[RFC kgr on klp 8/9] livepatch: add kgraft-like patching

From: Jiri Slaby
Date: Mon May 04 2015 - 07:41:37 EST


This adds a simplified kGraft to be a consistency model. kGraft builds
on Live Kernel Patching, but adds RCU-like update of code that does
not require stopping the kernel.

The same as the Live Kernel Patches, a kGraft patch is a kernel module
and fully relies on the in-kernel module loader to link the new code
with the kernel.

While kGraft is, by choice, limited to replacing whole functions and
constants they reference, this does not limit the set of code patches
that can be applied significantly.

IRQs are not specially handled in this patch. IRQs inherit the flag
from the process they interrupted. This can be easily tackled down by
porting the pieces of the code from the SUSE's kGraft implementation
which was not based on LKP yet.

Kthreads are ignored in this patch too. We need proper parking for all
of them to be implemented first. This is a work in progress and will
gradually improve the situation around kthreads&freezer/parker which
needs some care anyway. This means kthreads always call the old code,
but do not block finalization.

The operations are as follows:
ENABLE
1) !(stub registered) => register stub
2) state = KLP_PREPARED
3) write_lock(klp_kgr_state_lock)
4) add func to klp_ops
5) state = KLP_ASYNC_ENABLED
6) set tasks's in_progress
7) write_unlock(klp_kgr_state_lock);
=== async ===
8) state = KLP_ENABLED

DISABLE
1) write_lock(klp_kgr_state_lock)
2) state = KLP_ASYNC_DISABLED
3) set tasks's in_progress
4) write_unlock(klp_kgr_state_lock);
=== async ===
5) remove func from klp_ops
6) no more funcs in klp_ops => unregister stub
7) state = KLP_DISABLED

STUB
1) fstate == KLP_DISABLED || KLP_PREPARED => go_old = true
2) fstate == KLP_ASYNC_ENABLED => go_old = (task in_progress?)
3) fstate == KLP_ENABLED => go_old = false
4) fstate == KLP_ASYNC_DISABLED => go_old = !(task in_progress?)
5) go_old => take the next fn in the klp_ops list

Signed-off-by: Jiri Slaby <jslaby@xxxxxxx>
---
include/linux/livepatch.h | 17 ++++
kernel/livepatch/Makefile | 2 +-
kernel/livepatch/cmodel-kgraft.c | 181 +++++++++++++++++++++++++++++++++++
kernel/livepatch/core.c | 119 ++++++++++++++++++++---
samples/livepatch/livepatch-sample.c | 2 +-
5 files changed, 308 insertions(+), 13 deletions(-)
create mode 100644 kernel/livepatch/cmodel-kgraft.c

diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index add2b1bd1cce..dcf5500b3999 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -29,6 +29,7 @@

#include <asm/livepatch.h>

+struct klp_patch;
struct klp_func;

/**
@@ -37,20 +38,28 @@ struct klp_func;
enum klp_cmodel_id {
KLP_CM_INVALID = 0,
KLP_CM_SIMPLE, /* LEAVE_FUNCTION and SWITCH_FUNCTION */
+ KLP_CM_KGRAFT, /* LEAVE_KERNEL and SWITCH_THREAD */
};

/**
* struct klp_cmodel - implementation of a consistency model
* @id: id of this model (from enum klp_cmodel_id)
+ * @async_finish: cmodel finishes asynchronously
* @list: member of klp_cmodel_list
* @stub: what to use as an ftrace handler (annotate with notrace!)
+ * @pre_patch: hook to run before patching starts (optional)
+ * @post_patch: hook to run after patching finishes (optional)
*/
struct klp_cmodel {
const enum klp_cmodel_id id;
+ bool async_finish;
struct list_head list;

void (*stub)(struct list_head *func_stack, struct klp_func *func,
struct pt_regs *regs);
+
+ void (*pre_patch)(struct klp_patch *);
+ void (*post_patch)(struct klp_patch *);
};

/**
@@ -58,6 +67,8 @@ struct klp_cmodel {
* @KLP_DISABLED: completely disabled
* @KLP_ENABLED: completely enabled (applied)
* @KLP_PREPARED: being applied
+ * @KLP_ASYNC_DISABLED: in the process of disabling (will become @KLP_DISABLED)
+ * @KLP_ASYNC_ENABLED: in the process of enabling (will become @KLP_ENABLED)
*
* @KLP_DISABLED & @KLP_ENABLED are part of the /sys ABI
*/
@@ -65,6 +76,10 @@ enum klp_state {
KLP_DISABLED,
KLP_ENABLED,
KLP_PREPARED,
+
+ KLP_ASYNC = 0x100,
+ KLP_ASYNC_DISABLED = KLP_DISABLED | KLP_ASYNC,
+ KLP_ASYNC_ENABLED = KLP_ENABLED | KLP_ASYNC,
};

/**
@@ -184,8 +199,10 @@ int klp_register_patch(struct klp_patch *);
int klp_unregister_patch(struct klp_patch *);
int klp_enable_patch(struct klp_patch *);
int klp_disable_patch(struct klp_patch *);
+void klp_async_patch_done(void);

void klp_init_cmodel_simple(void);
+void klp_init_cmodel_kgraft(void);
void klp_register_cmodel(struct klp_cmodel *);

#endif /* CONFIG_LIVEPATCH */
diff --git a/kernel/livepatch/Makefile b/kernel/livepatch/Makefile
index 926533777247..0c7fd8361dc3 100644
--- a/kernel/livepatch/Makefile
+++ b/kernel/livepatch/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_LIVEPATCH) += livepatch.o

-livepatch-objs := core.o cmodel-simple.o
+livepatch-objs := core.o cmodel-simple.o cmodel-kgraft.o
diff --git a/kernel/livepatch/cmodel-kgraft.c b/kernel/livepatch/cmodel-kgraft.c
new file mode 100644
index 000000000000..196b08823f73
--- /dev/null
+++ b/kernel/livepatch/cmodel-kgraft.c
@@ -0,0 +1,181 @@
+/*
+ * cmodels-kgraft.c - KLP kGraft Consistency Model
+ *
+ * Copyright (C) 2015 SUSE
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/ftrace.h>
+#include <linux/livepatch.h>
+#include <linux/rculist.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+
+#define KGRAFT_TIMEOUT 2
+
+static void klp_kgraft_work_fn(struct work_struct *work);
+
+static struct workqueue_struct *klp_kgraft_wq;
+static DECLARE_DELAYED_WORK(klp_kgraft_work, klp_kgraft_work_fn);
+/*
+ * This lock protects manipulation of func->state and TIF_KGR_IN_PROGRESS
+ * flag when a patch is being added or removed. kGraft stub has to see
+ * both values in a consistent state for the whole patch and all threads.
+ */
+static DEFINE_RWLOCK(klp_kgr_state_lock);
+
+static void notrace klp_kgraft_stub(struct list_head *func_stack,
+ struct klp_func *func, struct pt_regs *regs)
+{
+ unsigned long flags;
+ bool go_old;
+
+ if (current->flags & PF_KTHREAD)
+ return;
+
+ /*
+ * The corresponding write lock is taken only when functions are moved
+ * to _ASYNC_ states and _IN_PROGRESS flag is set for all threads.
+ */
+ read_lock_irqsave(&klp_kgr_state_lock, flags);
+
+ switch (func->state) {
+ case KLP_DISABLED:
+ case KLP_PREPARED:
+ go_old = true;
+ break;
+ case KLP_ASYNC_ENABLED:
+ go_old = klp_kgraft_task_in_progress(current);
+ break;
+ case KLP_ENABLED:
+ go_old = false;
+ break;
+ case KLP_ASYNC_DISABLED:
+ go_old = !klp_kgraft_task_in_progress(current);
+ break;
+ /* default: none to catch missing states at compile time! */
+ }
+
+ read_unlock_irqrestore(&klp_kgr_state_lock, flags);
+
+ if (go_old)
+ func = list_entry_rcu(list_next_rcu(&func->stack_node),
+ struct klp_func, stack_node);
+
+ /* did we hit the bottom => run the original */
+ if (&func->stack_node != func_stack)
+ klp_arch_set_pc(regs, (unsigned long)func->new_func);
+}
+
+static void klp_kgraft_pre_patch(struct klp_patch *patch)
+ __acquires(&klp_kgr_state_lock)
+{
+ write_lock_irq(&klp_kgr_state_lock);
+}
+
+static bool klp_kgraft_still_patching(void)
+{
+ struct task_struct *p;
+ bool failed = false;
+
+ /*
+ * We do not need to take klp_kgr_state_lock here.
+ * Any race will just delay finalization.
+ */
+ read_lock(&tasklist_lock);
+ for_each_process(p) {
+ if (klp_kgraft_task_in_progress(p)) {
+ failed = true;
+ break;
+ }
+ }
+ read_unlock(&tasklist_lock);
+ return failed;
+}
+
+static void klp_kgraft_work_fn(struct work_struct *work)
+{
+ static bool printed = false;
+
+ if (klp_kgraft_still_patching()) {
+ if (!printed) {
+ pr_info("kGraft still in progress after timeout, will keep trying every %d seconds\n",
+ KGRAFT_TIMEOUT);
+ printed = true;
+ }
+ /* recheck again later */
+ queue_delayed_work(klp_kgraft_wq, &klp_kgraft_work,
+ KGRAFT_TIMEOUT * HZ);
+ return;
+ }
+
+ /*
+ * victory, patching finished, put everything back in shape
+ * with as less performance impact as possible again
+ */
+ klp_async_patch_done();
+ pr_info("kGraft succeeded\n");
+
+ printed = false;
+}
+
+static void klp_kgraft_handle_processes(void)
+{
+ struct task_struct *p;
+
+ read_lock(&tasklist_lock);
+ for_each_process(p) {
+ /* kthreads cannot be patched yet */
+ if (p->flags & PF_KTHREAD)
+ continue;
+
+ klp_kgraft_mark_task_in_progress(p);
+ }
+ read_unlock(&tasklist_lock);
+}
+
+static void klp_kgraft_post_patch(struct klp_patch *patch)
+ __releases(&klp_kgr_state_lock)
+{
+ klp_kgraft_handle_processes();
+ write_unlock_irq(&klp_kgr_state_lock);
+
+ /*
+ * give everyone time to exit the kernel, and check after a while
+ */
+ queue_delayed_work(klp_kgraft_wq, &klp_kgraft_work,
+ KGRAFT_TIMEOUT * HZ);
+}
+
+static struct klp_cmodel kgraft_model = {
+ .id = KLP_CM_KGRAFT,
+ .async_finish = true,
+ .stub = klp_kgraft_stub,
+ .pre_patch = klp_kgraft_pre_patch,
+ .post_patch = klp_kgraft_post_patch,
+};
+
+void klp_init_cmodel_kgraft(void)
+{
+ klp_kgraft_wq = create_singlethread_workqueue("kgraft");
+ if (!klp_kgraft_wq) {
+ pr_err("kGraft: cannot allocate a work queue, aborting!\n");
+ return;
+ }
+
+ klp_register_cmodel(&kgraft_model);
+}
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 5e89ea74cadb..8f6fa8c8f593 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -58,6 +58,12 @@ struct klp_ops {
*/
static DEFINE_MUTEX(klp_mutex);

+/*
+ * current patch in progress. Access only under klp_mutex.
+ * This is useful to know only for async cmodels.
+ */
+static struct klp_patch *klp_async_patch;
+
static LIST_HEAD(klp_patches);
static LIST_HEAD(klp_ops);
static LIST_HEAD(klp_cmodel_list);
@@ -330,6 +336,27 @@ static void notrace klp_ftrace_handler(unsigned long ip,
rcu_read_unlock();
}

+static void klp_patch_change_state(struct klp_patch *patch, enum klp_state from,
+ enum klp_state to)
+{
+ struct klp_object *obj;
+ struct klp_func *func;
+
+ if (patch->state != from)
+ return;
+
+ klp_for_each_object(patch, obj)
+ if (klp_is_object_loaded(obj) && obj->state == from) {
+ klp_for_each_func(obj, func)
+ if (func->state == from)
+ func->state = to;
+
+ obj->state = to;
+ }
+
+ patch->state = to;
+}
+
static void klp_disable_func(struct klp_func *func, enum klp_state dstate)
{
struct klp_ops *ops;
@@ -467,10 +494,24 @@ static void klp_enable_object(struct klp_object *obj, enum klp_state dstate)
obj->state = dstate;
}

-static int __klp_disable_patch(struct klp_patch *patch)
+static void klp_disable_patch_real(struct klp_patch *patch)
{
struct klp_object *obj;

+ klp_for_each_object(patch, obj) {
+ if (obj->state == KLP_PREPARED || obj->state == KLP_ENABLED ||
+ obj->state == KLP_ASYNC_DISABLED)
+ klp_disable_object(obj, KLP_DISABLED);
+ }
+
+ patch->state = KLP_DISABLED;
+}
+
+static int __klp_disable_patch(struct klp_patch *patch)
+{
+ if (klp_async_patch)
+ return -EBUSY;
+
/* enforce stacking: only the last enabled patch can be disabled */
if (!list_is_last(&patch->list, &klp_patches) &&
list_next_entry(patch, list)->state == KLP_ENABLED)
@@ -478,12 +519,23 @@ static int __klp_disable_patch(struct klp_patch *patch)

pr_notice("disabling patch '%s'\n", patch->mod->name);

- klp_for_each_object(patch, obj) {
- if (obj->state == KLP_PREPARED || obj->state == KLP_ENABLED)
- klp_disable_object(obj, KLP_DISABLED);
+ /*
+ * Do only fast and non-blocking operations between pre_patch
+ * and post_patch callbacks. They might take a lock and block
+ * patched functions!
+ */
+ if (patch->cmodel->pre_patch)
+ patch->cmodel->pre_patch(patch);
+
+ if (patch->cmodel->async_finish) {
+ klp_async_patch = patch;
+ klp_patch_change_state(patch, KLP_ENABLED, KLP_ASYNC_DISABLED);
+ } else {
+ klp_disable_patch_real(patch);
}

- patch->state = KLP_DISABLED;
+ if (patch->cmodel->post_patch)
+ patch->cmodel->post_patch(patch);

return 0;
}
@@ -520,14 +572,35 @@ err:
}
EXPORT_SYMBOL_GPL(klp_disable_patch);

+/**
+ * klp_async_patch_done - asynchronous patch should be finished
+ */
+void klp_async_patch_done(void)
+{
+ mutex_lock(&klp_mutex);
+ if (klp_async_patch->state == KLP_ASYNC_ENABLED) {
+ klp_patch_change_state(klp_async_patch, KLP_ASYNC_ENABLED,
+ KLP_ENABLED);
+ } else {
+ klp_disable_patch_real(klp_async_patch);
+ }
+
+ klp_async_patch = NULL;
+ mutex_unlock(&klp_mutex);
+}
+
static int __klp_enable_patch(struct klp_patch *patch)
{
struct klp_object *obj;
- int ret;
+ enum klp_state dstate;
+ int ret = 0;

if (WARN_ON(patch->state != KLP_DISABLED))
return -EINVAL;

+ if (klp_async_patch)
+ return -EBUSY;
+
/* enforce stacking: only the first disabled patch can be enabled */
if (patch->list.prev != &klp_patches &&
list_prev_entry(patch, list)->state == KLP_DISABLED)
@@ -547,19 +620,35 @@ static int __klp_enable_patch(struct klp_patch *patch)
goto unregister;
}

+ /*
+ * Do only fast and non-blocking operations between pre_patch
+ * and post_patch callbacks. They might take a lock and block
+ * patched functions!
+ */
+ if (patch->cmodel->pre_patch)
+ patch->cmodel->pre_patch(patch);
+
+ dstate = patch->cmodel->async_finish ? KLP_ASYNC_ENABLED : KLP_ENABLED;
+
klp_for_each_object(patch, obj) {
if (!klp_is_object_loaded(obj))
continue;

- klp_enable_object(obj, KLP_ENABLED);
+ klp_enable_object(obj, dstate);
}

- patch->state = KLP_ENABLED;
+ if (patch->cmodel->async_finish)
+ klp_async_patch = patch;

- return 0;
+ patch->state = dstate;
+
+ if (patch->cmodel->post_patch)
+ patch->cmodel->post_patch(patch);

unregister:
- WARN_ON(__klp_disable_patch(patch));
+ if (ret)
+ klp_disable_patch_real(patch);
+
return ret;
}

@@ -948,6 +1037,7 @@ static void klp_module_notify_coming(struct klp_patch *patch,
{
struct module *pmod = patch->mod;
struct module *mod = obj->mod;
+ enum klp_state dstate;
int ret;

ret = klp_init_object_loaded(patch, obj);
@@ -964,7 +1054,13 @@ static void klp_module_notify_coming(struct klp_patch *patch,
if (ret)
goto err;

- klp_enable_object(obj, KLP_ENABLED);
+ /*
+ * Put the module to the ASYNC state in case we are in the transition.
+ * The module still can affect behaviour of running processes.
+ */
+ dstate = klp_async_patch ? klp_async_patch->state : KLP_ENABLED;
+
+ klp_enable_object(obj, dstate);

return;
err:
@@ -1047,6 +1143,7 @@ static int klp_init(void)
}

klp_init_cmodel_simple();
+ klp_init_cmodel_kgraft();

ret = register_module_notifier(&klp_module_nb);
if (ret)
diff --git a/samples/livepatch/livepatch-sample.c b/samples/livepatch/livepatch-sample.c
index 48621de040db..25289083deac 100644
--- a/samples/livepatch/livepatch-sample.c
+++ b/samples/livepatch/livepatch-sample.c
@@ -63,7 +63,7 @@ static struct klp_object objs[] = {
static struct klp_patch patch = {
.mod = THIS_MODULE,
.objs = objs,
- .cmodel_id = KLP_CM_SIMPLE,
+ .cmodel_id = KLP_CM_KGRAFT,
};

static int livepatch_init(void)
--
2.3.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/