[PATCH 1/3] livepatch: Add klp_object and klp_func iterators

From: Jason Baron
Date: Wed Jul 19 2017 - 13:27:27 EST


In preparation to introducing atomic replace, introduce iterators for klp_func
and klp_object, such that objects and functions can be dynmically allocated
(needed for atomic replace). Note that this patch is careful, not to grow the
size of klp_func as that's the most common data structure. This patch is
intended to effectively be a no-op until atomic replace is introduced.

Signed-off-by: Jason Baron <jbaron@xxxxxxxxxx>
Cc: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
Cc: Jessica Yu <jeyu@xxxxxxxxxx>
Cc: Jiri Kosina <jikos@xxxxxxxxxx>
Cc: Miroslav Benes <mbenes@xxxxxxx>
Cc: Petr Mladek <pmladek@xxxxxxxx>
---
include/linux/livepatch.h | 106 ++++++++++++++++++++++++++++++++++++++++--
kernel/livepatch/core.c | 25 +++++++---
kernel/livepatch/patch.c | 9 ++--
kernel/livepatch/transition.c | 18 ++++---
4 files changed, 137 insertions(+), 21 deletions(-)

diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 194991e..5038337 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/completion.h>
+#include <linux/list.h>

#if IS_ENABLED(CONFIG_LIVEPATCH)

@@ -88,10 +89,23 @@ struct klp_func {
};

/**
+ * struct klp_func_no_op - internal object used to link no_op functions, which
+ avoids the need to bloat struct klp_func
+ * @orig_func: embeds struct klp_func
+ * @func_entry: used link struct klp_func_no_op to struct klp_object
+ */
+struct klp_func_no_op {
+ struct klp_func orig_func;
+ struct list_head func_entry;
+};
+
+/**
* struct klp_object - kernel object structure for live patching
* @name: module name (or NULL for vmlinux)
* @funcs: function entries for functions to be patched in the object
* @kobj: kobject for sysfs resources
+ * @func_list: head of list for struct klp_func_no_op
+ * @obj_entry: used to link struct klp_object to struct klp_patch
* @mod: kernel module associated with the patched object
* (NULL for vmlinux)
* @patched: the object's funcs have been added to the klp_ops list
@@ -103,6 +117,8 @@ struct klp_object {

/* internal */
struct kobject kobj;
+ struct list_head func_list;
+ struct list_head obj_entry;
struct module *mod;
bool patched;
};
@@ -114,6 +130,7 @@ struct klp_object {
* @immediate: patch all funcs immediately, bypassing safety mechanisms
* @list: list node for global list of registered patches
* @kobj: kobject for sysfs resources
+ * @obj_list: head of list for dynamically allocated struct klp_object
* @enabled: the patch is enabled (but operation may be incomplete)
* @finish: for waiting till it is safe to remove the patch module
*/
@@ -126,17 +143,96 @@ struct klp_patch {
/* internal */
struct list_head list;
struct kobject kobj;
+ struct list_head obj_list;
bool enabled;
struct completion finish;
};

-#define klp_for_each_object(patch, obj) \
+struct obj_iter {
+ struct klp_object *obj;
+ struct list_head *obj_list_head;
+ struct list_head *obj_list_pos;
+};
+
+static inline struct klp_object *obj_iter_next(struct obj_iter *iter)
+{
+ struct klp_object *obj;
+
+ if (iter->obj->funcs || iter->obj->name) {
+ obj = iter->obj;
+ iter->obj++;
+ } else {
+ if (iter->obj_list_pos == iter->obj_list_head) {
+ obj = NULL;
+ } else {
+ obj = list_entry(iter->obj_list_pos, struct klp_object,
+ obj_entry);
+ iter->obj_list_pos = iter->obj_list_pos->next;
+ }
+ }
+
+ return obj;
+}
+
+static inline struct klp_object *obj_iter_init(struct klp_patch *patch,
+ struct obj_iter *iter)
+{
+ iter->obj = patch->objs;
+ iter->obj_list_head = &patch->obj_list;
+ iter->obj_list_pos = iter->obj_list_head->next;
+
+ return obj_iter_next(iter);
+}
+
+#define klp_for_each_object(patch, obj, iter) \
+ for (obj = obj_iter_init(patch, iter); obj; obj = obj_iter_next(iter))
+
+#define klp_for_each_object_core(patch, obj) \
for (obj = patch->objs; obj->funcs || obj->name; obj++)

-#define klp_for_each_func(obj, func) \
- for (func = obj->funcs; \
- func->old_name || func->new_func || func->old_sympos; \
- func++)
+struct func_iter {
+ struct klp_func *func;
+ struct list_head *func_list_head;
+ struct list_head *func_list_pos;
+};
+
+static inline struct klp_func *func_iter_next(struct func_iter *iter)
+{
+ struct klp_func *func;
+ struct klp_func_no_op *func_no_op;
+
+ if (iter->func->old_name || iter->func->new_func ||
+ iter->func->old_sympos) {
+ func = iter->func;
+ iter->func++;
+ } else {
+ if (iter->func_list_pos == iter->func_list_head) {
+ func = NULL;
+ } else {
+ func_no_op = list_entry(iter->func_list_pos,
+ struct klp_func_no_op,
+ func_entry);
+ func = &func_no_op->orig_func;
+ iter->func_list_pos = iter->func_list_pos->next;
+ }
+ }
+
+ return func;
+}
+
+static inline struct klp_func *func_iter_init(struct klp_object *obj,
+ struct func_iter *iter)
+{
+ iter->func = obj->funcs;
+ iter->func_list_head = &obj->func_list;
+ iter->func_list_pos = iter->func_list_head->next;
+
+ return func_iter_next(iter);
+}
+
+#define klp_for_each_func(obj, func, iter) \
+ for (func = func_iter_init(obj, iter); func; \
+ func = func_iter_next(iter))

int klp_register_patch(struct klp_patch *);
int klp_unregister_patch(struct klp_patch *);
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index b9628e4..e63f478 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -346,6 +346,7 @@ EXPORT_SYMBOL_GPL(klp_disable_patch);
static int __klp_enable_patch(struct klp_patch *patch)
{
struct klp_object *obj;
+ struct obj_iter o_iter;
int ret;

if (klp_transition_patch)
@@ -384,7 +385,7 @@ static int __klp_enable_patch(struct klp_patch *patch)
*/
smp_wmb();

- klp_for_each_object(patch, obj) {
+ klp_for_each_object(patch, obj, &o_iter) {
if (!klp_is_object_loaded(obj))
continue;

@@ -571,10 +572,11 @@ static void klp_free_funcs_limited(struct klp_object *obj,
static void klp_free_object_loaded(struct klp_object *obj)
{
struct klp_func *func;
+ struct func_iter f_iter;

obj->mod = NULL;

- klp_for_each_func(obj, func)
+ klp_for_each_func(obj, func, &f_iter)
func->old_addr = 0;
}

@@ -630,6 +632,7 @@ static int klp_init_object_loaded(struct klp_patch *patch,
struct klp_object *obj)
{
struct klp_func *func;
+ struct func_iter f_iter;
int ret;

module_disable_ro(patch->mod);
@@ -642,7 +645,7 @@ static int klp_init_object_loaded(struct klp_patch *patch,
arch_klp_init_object_loaded(patch, obj);
module_enable_ro(patch->mod, true);

- klp_for_each_func(obj, func) {
+ klp_for_each_func(obj, func, &f_iter) {
ret = klp_find_object_symbol(obj->name, func->old_name,
func->old_sympos,
&func->old_addr);
@@ -672,6 +675,7 @@ static int klp_init_object_loaded(struct klp_patch *patch,
static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
{
struct klp_func *func;
+ struct func_iter f_iter;
int ret;
const char *name;

@@ -689,7 +693,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
if (ret)
return ret;

- klp_for_each_func(obj, func) {
+ klp_for_each_func(obj, func, &f_iter) {
ret = klp_init_func(obj, func);
if (ret)
goto free;
@@ -712,6 +716,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
static int klp_init_patch(struct klp_patch *patch)
{
struct klp_object *obj;
+ struct obj_iter o_iter;
int ret;

if (!patch->objs)
@@ -729,7 +734,11 @@ static int klp_init_patch(struct klp_patch *patch)
return ret;
}

- klp_for_each_object(patch, obj) {
+ INIT_LIST_HEAD(&patch->obj_list);
+ klp_for_each_object_core(patch, obj)
+ INIT_LIST_HEAD(&obj->func_list);
+
+ klp_for_each_object(patch, obj, &o_iter) {
ret = klp_init_object(patch, obj);
if (ret)
goto free;
@@ -835,6 +844,7 @@ int klp_module_coming(struct module *mod)
int ret;
struct klp_patch *patch;
struct klp_object *obj;
+ struct obj_iter o_iter;

if (WARN_ON(mod->state != MODULE_STATE_COMING))
return -EINVAL;
@@ -848,7 +858,7 @@ int klp_module_coming(struct module *mod)
mod->klp_alive = true;

list_for_each_entry(patch, &klp_patches, list) {
- klp_for_each_object(patch, obj) {
+ klp_for_each_object(patch, obj, &o_iter) {
if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
continue;

@@ -904,6 +914,7 @@ void klp_module_going(struct module *mod)
{
struct klp_patch *patch;
struct klp_object *obj;
+ struct obj_iter o_iter;

if (WARN_ON(mod->state != MODULE_STATE_GOING &&
mod->state != MODULE_STATE_COMING))
@@ -918,7 +929,7 @@ void klp_module_going(struct module *mod)
mod->klp_alive = false;

list_for_each_entry(patch, &klp_patches, list) {
- klp_for_each_object(patch, obj) {
+ klp_for_each_object(patch, obj, &o_iter) {
if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
continue;

diff --git a/kernel/livepatch/patch.c b/kernel/livepatch/patch.c
index 52c4e90..1cfdabc 100644
--- a/kernel/livepatch/patch.c
+++ b/kernel/livepatch/patch.c
@@ -238,8 +238,9 @@ static int klp_patch_func(struct klp_func *func)
void klp_unpatch_object(struct klp_object *obj)
{
struct klp_func *func;
+ struct func_iter f_iter;

- klp_for_each_func(obj, func)
+ klp_for_each_func(obj, func, &f_iter)
if (func->patched)
klp_unpatch_func(func);

@@ -249,12 +250,13 @@ void klp_unpatch_object(struct klp_object *obj)
int klp_patch_object(struct klp_object *obj)
{
struct klp_func *func;
+ struct func_iter f_iter;
int ret;

if (WARN_ON(obj->patched))
return -EINVAL;

- klp_for_each_func(obj, func) {
+ klp_for_each_func(obj, func, &f_iter) {
ret = klp_patch_func(func);
if (ret) {
klp_unpatch_object(obj);
@@ -269,8 +271,9 @@ int klp_patch_object(struct klp_object *obj)
void klp_unpatch_objects(struct klp_patch *patch)
{
struct klp_object *obj;
+ struct obj_iter o_iter;

- klp_for_each_object(patch, obj)
+ klp_for_each_object(patch, obj, &o_iter)
if (obj->patched)
klp_unpatch_object(obj);
}
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index b004a1f..e112826 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -81,6 +81,8 @@ static void klp_complete_transition(void)
struct task_struct *g, *task;
unsigned int cpu;
bool immediate_func = false;
+ struct obj_iter o_iter;
+ struct func_iter f_iter;

if (klp_target_state == KLP_UNPATCHED) {
/*
@@ -101,8 +103,8 @@ static void klp_complete_transition(void)
if (klp_transition_patch->immediate)
goto done;

- klp_for_each_object(klp_transition_patch, obj) {
- klp_for_each_func(obj, func) {
+ klp_for_each_object(klp_transition_patch, obj, &o_iter) {
+ klp_for_each_func(obj, func, &f_iter) {
func->transition = false;
if (func->immediate)
immediate_func = true;
@@ -244,6 +246,8 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
struct stack_trace trace;
struct klp_object *obj;
struct klp_func *func;
+ struct obj_iter o_iter;
+ struct func_iter f_iter;
int ret;

trace.skip = 0;
@@ -259,10 +263,10 @@ static int klp_check_stack(struct task_struct *task, char *err_buf)
return ret;
}

- klp_for_each_object(klp_transition_patch, obj) {
+ klp_for_each_object(klp_transition_patch, obj, &o_iter) {
if (!obj->patched)
continue;
- klp_for_each_func(obj, func) {
+ klp_for_each_func(obj, func, &f_iter) {
ret = klp_check_stack_func(func, &trace);
if (ret) {
snprintf(err_buf, STACK_ERR_BUF_SIZE,
@@ -470,6 +474,8 @@ void klp_init_transition(struct klp_patch *patch, int state)
unsigned int cpu;
struct klp_object *obj;
struct klp_func *func;
+ struct obj_iter o_iter;
+ struct func_iter f_iter;
int initial_state = !state;

WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
@@ -531,8 +537,8 @@ void klp_init_transition(struct klp_patch *patch, int state)
* When unpatching, the funcs are already in the func_stack and so are
* already visible to the ftrace handler.
*/
- klp_for_each_object(patch, obj)
- klp_for_each_func(obj, func)
+ klp_for_each_object(patch, obj, &o_iter)
+ klp_for_each_func(obj, func, &f_iter)
func->transition = true;
}

--
2.6.1