[PATCH 1/2] lockdep: lock_set_lock_cmp_fn()

From: Kent Overstreet
Date: Fri Feb 17 2023 - 22:21:36 EST


This implements a new interface to lockedp, lock_set_cmp_fn(), for
defining an ordering when taking multiple locks of the same type.

This provides a more rigorous mechanism than the subclass mechanism -
it's hoped that this might be able to replace subclasses.

Signed-off-by: Kent Overstreet <kent.overstreet@xxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> (maintainer:LOCKING PRIMITIVES)
Cc: Ingo Molnar <mingo@xxxxxxxxxx> (maintainer:LOCKING PRIMITIVES)
---
include/linux/lockdep.h | 8 ++++++
include/linux/lockdep_types.h | 6 +++++
kernel/locking/lockdep.c | 51 ++++++++++++++++++++++++++++++++++-
3 files changed, 64 insertions(+), 1 deletion(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 1f1099dac3..a4673a6f7a 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -662,4 +662,12 @@ lockdep_rcu_suspicious(const char *file, const int line, const char *s)
}
#endif

+#ifdef CONFIG_PROVE_LOCKING
+void lockdep_set_lock_cmp_fn(struct lockdep_map *lock, lock_cmp_fn fn);
+
+#define lock_set_lock_cmp_fn(lock, fn) lockdep_set_lock_cmp_fn(&(lock)->dep_map, fn)
+#else
+#define lock_set_lock_cmp_fn(lock, fn) do {} while (0)
+#endif
+
#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index d22430840b..813467dccb 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -84,6 +84,10 @@ struct lock_trace;

#define LOCKSTAT_POINTS 4

+struct lockdep_map;
+typedef int (*lock_cmp_fn)(const struct lockdep_map *a,
+ const struct lockdep_map *b);
+
/*
* The lock-class itself. The order of the structure members matters.
* reinit_class() zeroes the key member and all subsequent members.
@@ -109,6 +113,8 @@ struct lock_class {
struct list_head locks_after, locks_before;

const struct lockdep_subclass_key *key;
+ lock_cmp_fn cmp_fn;
+
unsigned int subclass;
unsigned int dep_gen_id;

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index e3375bc40d..b9e759743e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2143,6 +2143,8 @@ check_path(struct held_lock *target, struct lock_list *src_entry,
return ret;
}

+static void print_deadlock_bug(struct task_struct *, struct held_lock *, struct held_lock *);
+
/*
* Prove that the dependency graph starting at <src> can not
* lead to <target>. If it can, there is a circle when adding
@@ -2174,7 +2176,10 @@ check_noncircular(struct held_lock *src, struct held_lock *target,
*trace = save_trace();
}

- print_circular_bug(&src_entry, target_entry, src, target);
+ if (src->class_idx == target->class_idx)
+ print_deadlock_bug(current, src, target);
+ else
+ print_circular_bug(&src_entry, target_entry, src, target);
}

return ret;
@@ -2968,6 +2973,8 @@ static void
print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next)
{
+ struct lock_class *class = hlock_class(prev);
+
if (!debug_locks_off_graph_unlock() || debug_locks_silent)
return;

@@ -2982,6 +2989,10 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
pr_warn("\nbut task is already holding lock:\n");
print_lock(prev);

+ if (class->cmp_fn)
+ pr_warn("and the lock comparison function returns %i:\n",
+ class->cmp_fn(prev->instance, next->instance));
+
pr_warn("\nother info that might help us debug this:\n");
print_deadlock_scenario(next, prev);
lockdep_print_held_locks(curr);
@@ -3003,6 +3014,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
static int
check_deadlock(struct task_struct *curr, struct held_lock *next)
{
+ struct lock_class *class;
struct held_lock *prev;
struct held_lock *nest = NULL;
int i;
@@ -3023,6 +3035,12 @@ check_deadlock(struct task_struct *curr, struct held_lock *next)
if ((next->read == 2) && prev->read)
continue;

+ class = hlock_class(prev);
+
+ if (class->cmp_fn &&
+ class->cmp_fn(prev->instance, next->instance) < 0)
+ continue;
+
/*
* We're holding the nest_lock, which serializes this lock's
* nesting behaviour.
@@ -3084,6 +3102,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
return 2;
}

+ if (prev->class_idx == next->class_idx) {
+ struct lock_class *class = hlock_class(prev);
+
+ if (class->cmp_fn &&
+ class->cmp_fn(prev->instance, next->instance) < 0)
+ return 2;
+ }
+
/*
* Prove that the new <prev> -> <next> dependency would not
* create a circular dependency in the graph. (We do this by
@@ -6597,3 +6623,26 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
dump_stack();
}
EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
+
+#ifdef CONFIG_PROVE_LOCKING
+void lockdep_set_lock_cmp_fn(struct lockdep_map *lock, lock_cmp_fn fn)
+{
+ struct lock_class *class = lock->class_cache[0];
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ lockdep_recursion_inc();
+
+ if (!class)
+ class = register_lock_class(lock, 0, 0);
+
+ WARN_ON(class && class->cmp_fn && class->cmp_fn != fn);
+
+ if (class)
+ class->cmp_fn = fn;
+
+ lockdep_recursion_finish();
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lockdep_set_lock_cmp_fn);
+#endif
--
2.39.2