[PATCH 5.6 122/194] bpf: Fix bug in mmap() implementation for BPF array map
From: Greg Kroah-Hartman
Date: Mon May 18 2020 - 14:10:36 EST
[ Upstream commit 333291ce5055f2039afc907badaf5b66bc1adfdc ]
mmap() subsystem allows user-space application to memory-map region with
initial page offset. This wasn't taken into account in initial implementation
of BPF array memory-mapping. This would result in wrong pages, not taking into
account requested page shift, being memory-mmaped into user-space. This patch
fixes this gap and adds a test for such scenario.
Fixes: fc9702273e2e ("bpf: Add mmap() support for BPF_MAP_TYPE_ARRAY")
Signed-off-by: Andrii Nakryiko <andriin@xxxxxx>
Signed-off-by: Alexei Starovoitov <ast@xxxxxxxxxx>
Acked-by: Yonghong Song <yhs@xxxxxx>
Link: https://lore.kernel.org/bpf/20200512235925.3817805-1-andriin@xxxxxx
Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>
---
kernel/bpf/arraymap.c | 7 ++++++-
tools/testing/selftests/bpf/prog_tests/mmap.c | 9 +++++++++
2 files changed, 15 insertions(+), 1 deletion(-)
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 95d77770353c9..1d6120fd5ba68 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -486,7 +486,12 @@ static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
if (!(map->map_flags & BPF_F_MMAPABLE))
return -EINVAL;
- return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), pgoff);
+ if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
+ PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
+ return -EINVAL;
+
+ return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
+ vma->vm_pgoff + pgoff);
}
const struct bpf_map_ops array_map_ops = {
diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c
index 16a814eb4d645..b0e789678aa46 100644
--- a/tools/testing/selftests/bpf/prog_tests/mmap.c
+++ b/tools/testing/selftests/bpf/prog_tests/mmap.c
@@ -197,6 +197,15 @@ void test_mmap(void)
CHECK_FAIL(map_data->val[far] != 3 * 321);
munmap(tmp2, 4 * page_size);
+
+ /* map all 4 pages, but with pg_off=1 page, should fail */
+ tmp1 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
+ data_map_fd, page_size /* initial page shift */);
+ if (CHECK(tmp1 != MAP_FAILED, "adv_mmap7", "unexpected success")) {
+ munmap(tmp1, 4 * page_size);
+ goto cleanup;
+ }
+
cleanup:
if (bss_mmaped)
CHECK_FAIL(munmap(bss_mmaped, bss_sz));
--
2.20.1