[RFT PATCH 07/13] kprobes: Use normal list traversal API if a mutex is held

From: Masami Hiramatsu
Date: Thu Jan 16 2020 - 09:45:30 EST


Use normal list traversal API instead of rcu_read_lock,
RCU list traversal and rcu_read_unlock pair if a mutex
which protects the list is held in the methods of
kprobe_insn_cache.

Signed-off-by: Masami Hiramatsu <mhiramat@xxxxxxxxxx>
---
kernel/kprobes.c | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)

diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 848c14e92ccc..09b0e33bc845 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -141,8 +141,7 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
/* Since the slot array is not protected by rcu, we need a mutex */
mutex_lock(&c->mutex);
retry:
- rcu_read_lock();
- list_for_each_entry_rcu(kip, &c->pages, list) {
+ list_for_each_entry(kip, &c->pages, list) {
if (kip->nused < slots_per_page(c)) {
int i;
for (i = 0; i < slots_per_page(c); i++) {
@@ -150,7 +149,6 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
kip->slot_used[i] = SLOT_USED;
kip->nused++;
slot = kip->insns + (i * c->insn_size);
- rcu_read_unlock();
goto out;
}
}
@@ -159,7 +157,6 @@ kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
WARN_ON(1);
}
}
- rcu_read_unlock();

/* If there are any garbage slots, collect it and try again. */
if (c->nr_garbage && collect_garbage_slots(c) == 0)
@@ -244,8 +241,7 @@ void __free_insn_slot(struct kprobe_insn_cache *c,
long idx;

mutex_lock(&c->mutex);
- rcu_read_lock();
- list_for_each_entry_rcu(kip, &c->pages, list) {
+ list_for_each_entry(kip, &c->pages, list) {
idx = ((long)slot - (long)kip->insns) /
(c->insn_size * sizeof(kprobe_opcode_t));
if (idx >= 0 && idx < slots_per_page(c))
@@ -255,7 +251,6 @@ void __free_insn_slot(struct kprobe_insn_cache *c,
WARN_ON(1);
kip = NULL;
out:
- rcu_read_unlock();
/* Mark and sweep: this may sleep */
if (kip) {
/* Check double free */