[RFT PATCH 08/13] kprobes: Use workqueue for reclaiming kprobe insn cache pages

From: Masami Hiramatsu
Date: Thu Jan 16 2020 - 09:45:52 EST


Use workqueues for reclaiming kprobe insn cache pages. This can
split the heaviest part from the unregistration process.

Signed-off-by: Masami Hiramatsu <mhiramat@xxxxxxxxxx>
---
include/linux/kprobes.h | 2 ++
kernel/kprobes.c | 29 ++++++++++++++++++-----------
2 files changed, 20 insertions(+), 11 deletions(-)

diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 04bdaf01112c..0f832817fca3 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -245,6 +245,7 @@ struct kprobe_insn_cache {
struct list_head pages; /* list of kprobe_insn_page */
size_t insn_size; /* size of instruction slot */
int nr_garbage;
+ struct work_struct work;
};

#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
@@ -254,6 +255,7 @@ extern void __free_insn_slot(struct kprobe_insn_cache *c,
/* sleep-less address checking routine */
extern bool __is_insn_slot_addr(struct kprobe_insn_cache *c,
unsigned long addr);
+void kprobe_insn_cache_gc(struct work_struct *work);

#define DEFINE_INSN_CACHE_OPS(__name) \
extern struct kprobe_insn_cache kprobe_##__name##_slots; \
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 09b0e33bc845..a9114923da4c 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -126,8 +126,15 @@ struct kprobe_insn_cache kprobe_insn_slots = {
.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
.insn_size = MAX_INSN_SIZE,
.nr_garbage = 0,
+ .work = __WORK_INITIALIZER(kprobe_insn_slots.work,
+ kprobe_insn_cache_gc),
};
-static int collect_garbage_slots(struct kprobe_insn_cache *c);
+
+static void kick_kprobe_insn_cache_gc(struct kprobe_insn_cache *c)
+{
+ if (!work_pending(&c->work))
+ schedule_work(&c->work);
+}

/**
* __get_insn_slot() - Find a slot on an executable page for an instruction.
@@ -140,7 +147,6 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)

/* Since the slot array is not protected by rcu, we need a mutex */
mutex_lock(&c->mutex);
- retry:
list_for_each_entry(kip, &c->pages, list) {
if (kip->nused < slots_per_page(c)) {
int i;
@@ -158,11 +164,7 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
}
}

- /* If there are any garbage slots, collect it and try again. */
- if (c->nr_garbage && collect_garbage_slots(c) == 0)
- goto retry;
-
- /* All out of space. Need to allocate a new page. */
+ /* All out of space. Need to allocate a new page. */
kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
if (!kip)
goto out;
@@ -213,10 +215,12 @@ static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
return 0;
}

-static int collect_garbage_slots(struct kprobe_insn_cache *c)
+void kprobe_insn_cache_gc(struct work_struct *work)
{
+ struct kprobe_insn_cache *c = container_of(work, typeof(*c), work);
struct kprobe_insn_page *kip, *next;

+ mutex_lock(&c->mutex);
/* Ensure no-one is running on the garbages. */
synchronize_rcu_tasks();

@@ -226,12 +230,13 @@ static int collect_garbage_slots(struct kprobe_insn_cache *c)
continue;
kip->ngarbage = 0; /* we will collect all garbages */
for (i = 0; i < slots_per_page(c); i++) {
- if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
+ if (kip->slot_used[i] == SLOT_DIRTY &&
+ collect_one_slot(kip, i))
break;
}
}
c->nr_garbage = 0;
- return 0;
+ mutex_unlock(&c->mutex);
}

void __free_insn_slot(struct kprobe_insn_cache *c,
@@ -259,7 +264,7 @@ void __free_insn_slot(struct kprobe_insn_cache *c,
kip->slot_used[idx] = SLOT_DIRTY;
kip->ngarbage++;
if (++c->nr_garbage > slots_per_page(c))
- collect_garbage_slots(c);
+ kick_kprobe_insn_cache_gc(c);
} else {
collect_one_slot(kip, idx);
}
@@ -299,6 +304,8 @@ struct kprobe_insn_cache kprobe_optinsn_slots = {
.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
/* .insn_size is initialized later */
.nr_garbage = 0,
+ .work = __WORK_INITIALIZER(kprobe_optinsn_slots.work,
+ kprobe_insn_cache_gc),
};
#endif
#endif