[PATCH v2 bpf-next 01/14] bpf: add ability to charge bpf maps memory dynamically

From: Roman Gushchin
Date: Thu Jul 05 2018 - 16:52:45 EST


This commits extends existing bpf maps memory charging API
to support dynamic charging/uncharging.

This is required to account memory used by maps,
if all entries are created dynamically after
the map initialization.

Signed-off-by: Roman Gushchin <guro@xxxxxx>
Cc: Alexei Starovoitov <ast@xxxxxxxxxx>
Cc: Daniel Borkmann <daniel@xxxxxxxxxxxxx>
Acked-by: Martin KaFai Lau <kafai@xxxxxx>
---
include/linux/bpf.h | 2 ++
kernel/bpf/syscall.c | 53 +++++++++++++++++++++++++++++++++++++---------------
2 files changed, 40 insertions(+), 15 deletions(-)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8827e797ff97..3d1933707374 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -434,6 +434,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
+int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
+void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
void *bpf_map_area_alloc(size_t size, int numa_node);
void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index d10ecd78105f..cee452a19538 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -181,32 +181,55 @@ int bpf_map_precharge_memlock(u32 pages)
return 0;
}

-static int bpf_map_charge_memlock(struct bpf_map *map)
+static int bpf_charge_memlock(struct user_struct *user, u32 pages)
{
- struct user_struct *user = get_current_user();
- unsigned long memlock_limit;
+ unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;

- memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) {
+ atomic_long_sub(pages, &user->locked_vm);
+ return -EPERM;
+ }
+ return 0;
+}

- atomic_long_add(map->pages, &user->locked_vm);
+static int bpf_map_init_memlock(struct bpf_map *map)
+{
+ struct user_struct *user = get_current_user();
+ int ret;

- if (atomic_long_read(&user->locked_vm) > memlock_limit) {
- atomic_long_sub(map->pages, &user->locked_vm);
+ ret = bpf_charge_memlock(user, map->pages);
+ if (ret) {
free_uid(user);
- return -EPERM;
+ return ret;
}
map->user = user;
- return 0;
+ return ret;
}

-static void bpf_map_uncharge_memlock(struct bpf_map *map)
+static void bpf_map_release_memlock(struct bpf_map *map)
{
struct user_struct *user = map->user;
-
- atomic_long_sub(map->pages, &user->locked_vm);
+ atomic_long_sub(map->pages, &map->user->locked_vm);
free_uid(user);
}

+int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
+{
+ int ret;
+
+ ret = bpf_charge_memlock(map->user, pages);
+ if (ret)
+ return ret;
+ map->pages += pages;
+ return ret;
+}
+
+void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
+{
+ atomic_long_sub(pages, &map->user->locked_vm);
+ map->pages -= pages;
+}
+
static int bpf_map_alloc_id(struct bpf_map *map)
{
int id;
@@ -256,7 +279,7 @@ static void bpf_map_free_deferred(struct work_struct *work)
{
struct bpf_map *map = container_of(work, struct bpf_map, work);

- bpf_map_uncharge_memlock(map);
+ bpf_map_release_memlock(map);
security_bpf_map_free(map);
/* implementation dependent freeing */
map->ops->map_free(map);
@@ -492,7 +515,7 @@ static int map_create(union bpf_attr *attr)
if (err)
goto free_map_nouncharge;

- err = bpf_map_charge_memlock(map);
+ err = bpf_map_init_memlock(map);
if (err)
goto free_map_sec;

@@ -515,7 +538,7 @@ static int map_create(union bpf_attr *attr)
return err;

free_map:
- bpf_map_uncharge_memlock(map);
+ bpf_map_release_memlock(map);
free_map_sec:
security_bpf_map_free(map);
free_map_nouncharge:
--
2.14.4