[32/41] Readahead changes to support large blocksize.

From: Christoph Lameter
Date: Tue Sep 11 2007 - 02:17:04 EST


Fix up readhead for large I/O operations.

Only calculate the readahead until the 2M boundary then fall back to
one page.

Signed-off-by: Fengguang Wu <fengguang.wu@xxxxxxxxx>
Signed-off-by: Christoph Lameter <clameter@xxxxxxx>

===================================================================
---
include/linux/mm.h | 2 +-
mm/fadvise.c | 4 ++--
mm/filemap.c | 5 ++---
mm/madvise.c | 2 +-
mm/readahead.c | 22 ++++++++++++++--------
5 files changed, 20 insertions(+), 15 deletions(-)

diff --git a/include/linux/mm.h b/include/linux/mm.h
index a8a0378..88f5ee3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1142,7 +1142,7 @@ void page_cache_async_readahead(struct address_space *mapping,
pgoff_t offset,
unsigned long size);

-unsigned long max_sane_readahead(unsigned long nr);
+unsigned long max_sane_readahead(unsigned long nr, int order);

/* Do stack extension */
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 804c2a9..378dcdc 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -86,10 +86,10 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
nrpages = end_index - start_index + 1;
if (!nrpages)
nrpages = ~0UL;
-
+
ret = force_page_cache_readahead(mapping, file,
start_index,
- max_sane_readahead(nrpages));
+ nrpages);
if (ret > 0)
ret = 0;
break;
diff --git a/mm/filemap.c b/mm/filemap.c
index 2cc5a8c..d6e1f40 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1237,8 +1237,7 @@ do_readahead(struct address_space *mapping, struct file *filp,
if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
return -EINVAL;

- force_page_cache_readahead(mapping, filp, index,
- max_sane_readahead(nr));
+ force_page_cache_readahead(mapping, filp, index, nr);
return 0;
}

@@ -1373,7 +1372,7 @@ retry_find:
count_vm_event(PGMAJFAULT);
}
did_readaround = 1;
- ra_pages = max_sane_readahead(file->f_ra.ra_pages);
+ ra_pages = file->f_ra.ra_pages;
if (ra_pages) {
pgoff_t start = 0;

diff --git a/mm/madvise.c b/mm/madvise.c
index 93ee375..f54e179 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -124,7 +124,7 @@ static long madvise_willneed(struct vm_area_struct * vma,
end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;

force_page_cache_readahead(file->f_mapping,
- file, start, max_sane_readahead(end - start));
+ file, start, end - start);
return 0;
}

diff --git a/mm/readahead.c b/mm/readahead.c
index 39bf45d..2cbda72 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -44,7 +44,8 @@ EXPORT_SYMBOL_GPL(default_backing_dev_info);
void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
{
- ra->ra_pages = mapping->backing_dev_info->ra_pages;
+ ra->ra_pages = DIV_ROUND_UP(mapping->backing_dev_info->ra_pages,
+ page_cache_size(mapping));
ra->prev_index = -1;
}
EXPORT_SYMBOL_GPL(file_ra_state_init);
@@ -84,7 +85,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
put_pages_list(pages);
break;
}
- task_io_account_read(PAGE_CACHE_SIZE);
+ task_io_account_read(page_cache_size(mapping));
}
pagevec_lru_add(&lru_pvec);
return ret;
@@ -151,7 +152,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
if (isize == 0)
goto out;

- end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
+ end_index = page_cache_index(mapping, isize - 1);

/*
* Preallocate as many pages as we will need.
@@ -204,10 +205,12 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
return -EINVAL;

+ nr_to_read = max_sane_readahead(nr_to_read, mapping_order(mapping));
while (nr_to_read) {
int err;

- unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
+ unsigned long this_chunk = DIV_ROUND_UP(2 * 1024 * 1024,
+ page_cache_size(mapping));

if (this_chunk > nr_to_read)
this_chunk = nr_to_read;
@@ -237,17 +240,20 @@ int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
if (bdi_read_congested(mapping->backing_dev_info))
return -1;

+ nr_to_read = max_sane_readahead(nr_to_read, mapping_order(mapping));
return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0);
}

/*
- * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
+ * Given a desired number of page order readahead pages, return a
* sensible upper limit.
*/
-unsigned long max_sane_readahead(unsigned long nr)
+unsigned long max_sane_readahead(unsigned long nr, int order)
{
- return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE)
- + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
+ unsigned long base_pages = node_page_state(numa_node_id(), NR_INACTIVE)
+ + node_page_state(numa_node_id(), NR_FREE_PAGES);
+
+ return min(nr, (base_pages / 2) >> order);
}

/*
--
1.5.2.5

--
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/