[PATCH] mm: unmap_kernel_range_atomic

From: Minchan Kim
Date: Mon Nov 16 2020 - 12:38:40 EST


unmap_kernel_range had been atomic operation and zsmalloc has used
it in atomic context in zs_unmap_object.
However, ("e47110e90584, mm/vunmap: add cond_resched() in vunmap_pmd_range")
changed it into non-atomic operation via adding cond_resched.
It causes zram decompresion failure by corrupting compressed buffer
in atomic context.

This patch introduces unmap_kernel_range_atomic which works for
only range less than PMD_SIZE to prevent cond_resched call.

Signed-off-by: Minchan Kim <minchan@xxxxxxxxxx>
---
include/linux/vmalloc.h | 2 ++
mm/vmalloc.c | 23 +++++++++++++++++++++--
mm/zsmalloc.c | 2 +-
3 files changed, 24 insertions(+), 3 deletions(-)

diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 938eaf9517e2..36b1ecc2d014 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -180,6 +180,7 @@ int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
struct page **pages);
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
extern void unmap_kernel_range(unsigned long addr, unsigned long size);
+extern void unmap_kernel_range_atomic(unsigned long addr, unsigned long size);
static inline void set_vm_flush_reset_perms(void *addr)
{
struct vm_struct *vm = find_vm_area(addr);
@@ -200,6 +201,7 @@ unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
}
#define unmap_kernel_range unmap_kernel_range_noflush
+#define unmap_kernel_range_atomic unmap_kernel_range_noflush
static inline void set_vm_flush_reset_perms(void *addr)
{
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d7075ad340aa..714e5425dc45 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -88,6 +88,7 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
pmd_t *pmd;
unsigned long next;
int cleared;
+ bool check_resched = (end - addr) > PMD_SIZE;

pmd = pmd_offset(pud, addr);
do {
@@ -102,8 +103,8 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
if (pmd_none_or_clear_bad(pmd))
continue;
vunmap_pte_range(pmd, addr, next, mask);
-
- cond_resched();
+ if (check_resched)
+ cond_resched();
} while (pmd++, addr = next, addr != end);
}

@@ -2024,6 +2025,24 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
flush_tlb_kernel_range(addr, end);
}

+/**
+ * unmap_kernel_range_atomic - unmap kernel VM area and flush cache and TLB
+ * @addr: start of the VM area to unmap
+ * @size: size of the VM area to unmap
+ *
+ * Similar to unmap_kernel_range_noflush() but it's atomic. @size should be
+ * less than PMD_SIZE.
+ */
+void unmap_kernel_range_atomic(unsigned long addr, unsigned long size)
+{
+ unsigned long end = addr + size;
+
+ flush_cache_vunmap(addr, end);
+ WARN_ON(size > PMD_SIZE);
+ unmap_kernel_range_noflush(addr, size);
+ flush_tlb_kernel_range(addr, end);
+}
+
static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
struct vmap_area *va, unsigned long flags, const void *caller)
{
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 662ee420706f..9decc7634852 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -1154,7 +1154,7 @@ static inline void __zs_unmap_object(struct mapping_area *area,
{
unsigned long addr = (unsigned long)area->vm_addr;

- unmap_kernel_range(addr, PAGE_SIZE * 2);
+ unmap_kernel_range_atomic(addr, PAGE_SIZE * 2);
}

#else /* CONFIG_ZSMALLOC_PGTABLE_MAPPING */
--
2.29.2.299.gdc1121823c-goog