[PATCH 1/4] shmem: simlify shmem_unlock_mapping

From: Konstantin Khlebnikov
Date: Fri Feb 10 2012 - 14:42:26 EST


find_get_pages() now can skip unlimited count of exeptional entries,
so shmem_find_get_pages_and_swap() does not required there any more.

Signed-off-by: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxx>
---
mm/shmem.c | 12 ++----------
1 files changed, 2 insertions(+), 10 deletions(-)

diff --git a/mm/shmem.c b/mm/shmem.c
index 269d049..4af8e85 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -397,7 +397,6 @@ static void shmem_deswap_pagevec(struct pagevec *pvec)
void shmem_unlock_mapping(struct address_space *mapping)
{
struct pagevec pvec;
- pgoff_t indices[PAGEVEC_SIZE];
pgoff_t index = 0;

pagevec_init(&pvec, 0);
@@ -405,16 +404,9 @@ void shmem_unlock_mapping(struct address_space *mapping)
* Minor point, but we might as well stop if someone else SHM_LOCKs it.
*/
while (!mapping_unevictable(mapping)) {
- /*
- * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
- * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
- */
- pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
- PAGEVEC_SIZE, pvec.pages, indices);
- if (!pvec.nr)
+ if (!pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE))
break;
- index = indices[pvec.nr - 1] + 1;
- shmem_deswap_pagevec(&pvec);
+ index = pvec.pages[pvec.nr - 1]->index + 1;
check_move_unevictable_pages(pvec.pages, pvec.nr);
pagevec_release(&pvec);
cond_resched();

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/