[PATCH 2/6] mm: provide read-ahead helpers that take a struct kiocb
From: Jens Axboe
Date: Sun May 31 2020 - 22:25:04 EST
Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
---
mm/internal.h | 10 +++++++
mm/readahead.c | 79 +++++++++++++++++++++++++++++++-------------------
2 files changed, 59 insertions(+), 30 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index 1d051cbadf1a..c486d675af41 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -63,6 +63,16 @@ static inline unsigned long ra_submit(struct file_ra_state *ra,
ra->start, ra->size, ra->async_size);
}
+void __page_cache_sync_readahead(struct address_space *mapping,
+ struct file_ra_state *ra,
+ struct kiocb *kiocb, pgoff_t offset,
+ unsigned long req_size);
+
+void __page_cache_async_readahead(struct address_space *mapping,
+ struct file_ra_state *ra, struct kiocb *kiocb,
+ struct page *page, pgoff_t offset,
+ unsigned long req_size);
+
/**
* page_evictable - test whether a page is evictable
* @page: the page to test
diff --git a/mm/readahead.c b/mm/readahead.c
index 657206f6318d..54a41dae4fea 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -493,6 +493,29 @@ ondemand_readahead(struct address_space *mapping,
return ra_submit(ra, mapping, kiocb);
}
+void __page_cache_sync_readahead(struct address_space *mapping,
+ struct file_ra_state *ra, struct kiocb *kiocb,
+ pgoff_t offset, unsigned long req_size)
+{
+ struct file *filp = kiocb->ki_filp;
+
+ /* no read-ahead */
+ if (!ra->ra_pages)
+ return;
+
+ if (blk_cgroup_congested())
+ return;
+
+ /* be dumb */
+ if (filp && (filp->f_mode & FMODE_RANDOM)) {
+ force_page_cache_readahead(mapping, kiocb, offset, req_size);
+ return;
+ }
+
+ /* do read-ahead */
+ ondemand_readahead(mapping, ra, kiocb, false, offset, req_size);
+}
+
/**
* page_cache_sync_readahead - generic file readahead
* @mapping: address_space which holds the pagecache and I/O vectors
@@ -513,23 +536,40 @@ void page_cache_sync_readahead(struct address_space *mapping,
{
struct kiocb kiocb = { .ki_filp = filp, };
+ __page_cache_sync_readahead(mapping, ra, &kiocb, offset, req_size);
+}
+EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
+
+void
+__page_cache_async_readahead(struct address_space *mapping,
+ struct file_ra_state *ra, struct kiocb *kiocb,
+ struct page *page, pgoff_t offset,
+ unsigned long req_size)
+{
/* no read-ahead */
if (!ra->ra_pages)
return;
- if (blk_cgroup_congested())
+ /*
+ * Same bit is used for PG_readahead and PG_reclaim.
+ */
+ if (PageWriteback(page))
return;
- /* be dumb */
- if (filp && (filp->f_mode & FMODE_RANDOM)) {
- force_page_cache_readahead(mapping, &kiocb, offset, req_size);
+ ClearPageReadahead(page);
+
+ /*
+ * Defer asynchronous read-ahead on IO congestion.
+ */
+ if (inode_read_congested(mapping->host))
+ return;
+
+ if (blk_cgroup_congested())
return;
- }
/* do read-ahead */
- ondemand_readahead(mapping, ra, &kiocb, false, offset, req_size);
+ ondemand_readahead(mapping, ra, kiocb, true, offset, req_size);
}
-EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
/**
* page_cache_async_readahead - file readahead for marked pages
@@ -554,29 +594,8 @@ page_cache_async_readahead(struct address_space *mapping,
{
struct kiocb kiocb = { .ki_filp = filp, };
- /* no read-ahead */
- if (!ra->ra_pages)
- return;
-
- /*
- * Same bit is used for PG_readahead and PG_reclaim.
- */
- if (PageWriteback(page))
- return;
-
- ClearPageReadahead(page);
-
- /*
- * Defer asynchronous read-ahead on IO congestion.
- */
- if (inode_read_congested(mapping->host))
- return;
-
- if (blk_cgroup_congested())
- return;
-
- /* do read-ahead */
- ondemand_readahead(mapping, ra, &kiocb, true, offset, req_size);
+ __page_cache_async_readahead(mapping, ra, &kiocb, page, offset,
+ req_size);
}
EXPORT_SYMBOL_GPL(page_cache_async_readahead);
--
2.26.2
--------------3ADB41F72EDDDE21CDE7CE18
Content-Type: text/x-patch; charset=UTF-8;
name="0001-fs-make-aops-readpages-take-kiocb-argument.patch"
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment;
filename="0001-fs-make-aops-readpages-take-kiocb-argument.patch"