Re: [PATCH RFC] drivers/core: Replace lockdep_set_novalidate_class() with unique class keys

From: Kent Overstreet
Date: Sat Feb 11 2023 - 18:08:28 EST


On Sat, Feb 11, 2023 at 06:06:28PM -0500, Kent Overstreet wrote:
> Yeah, what bcache really needs (and presumably dev->mutex as well) is just
> to disable lockdep checking for self-deadlock of that lock type, since it's
> got its own deadlock avoidance and the subclass thing isn't good enough.
>
> I've got a patch that should do what we want, replying from my other account
> with it.
-- >8 --
Subject: [PATCH] locking/lockdep: lockdep_set_no_check_recursion()

This adds a method to tell lockdep not to check lock ordering within a
lock class - but to still check lock ordering w.r.t. other lock types.

Signed-off-by: Kent Overstreet <kent.overstreet@xxxxxxxxx>
---
include/linux/lockdep.h | 6 ++++++
include/linux/lockdep_types.h | 2 +-
kernel/locking/lockdep.c | 26 ++++++++++++++++++++++++++
3 files changed, 33 insertions(+), 1 deletion(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index e027c504b7..66f28553c6 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -666,4 +666,10 @@ lockdep_rcu_suspicious(const char *file, const int line, const char *s)
}
#endif

+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void lockdep_set_no_check_recursion(struct lockdep_map *map);
+#else
+static inline void lockdep_set_no_check_recursion(struct lockdep_map *map) {}
+#endif
+
#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/lockdep_types.h b/include/linux/lockdep_types.h
index d22430840b..506e769b4a 100644
--- a/include/linux/lockdep_types.h
+++ b/include/linux/lockdep_types.h
@@ -128,7 +128,7 @@ struct lock_class {
u8 wait_type_inner;
u8 wait_type_outer;
u8 lock_type;
- /* u8 hole; */
+ u8 no_check_recursion;

#ifdef CONFIG_LOCK_STAT
unsigned long contention_point[LOCKSTAT_POINTS];
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index cae7d5f0ad..47ffb8df11 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3023,6 +3023,9 @@ check_deadlock(struct task_struct *curr, struct held_lock *next)
if ((next->read == 2) && prev->read)
continue;

+ if (hlock_class(next)->no_check_recursion)
+ continue;
+
/*
* We're holding the nest_lock, which serializes this lock's
* nesting behaviour.
@@ -3084,6 +3087,10 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
return 2;
}

+ if (hlock_class(prev) == hlock_class(next) &&
+ hlock_class(prev)->no_check_recursion)
+ return 2;
+
/*
* Prove that the new <prev> -> <next> dependency would not
* create a circular dependency in the graph. (We do this by
@@ -6617,3 +6624,22 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
dump_stack();
}
EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void lockdep_set_no_check_recursion(struct lockdep_map *lock)
+{
+ struct lock_class *class = lock->class_cache[0];
+ unsigned long flags;
+
+ raw_local_irq_save(flags);
+ lockdep_recursion_inc();
+
+ if (!class)
+ class = register_lock_class(lock, 0, 0);
+ if (class)
+ class->no_check_recursion = true;
+ lockdep_recursion_finish();
+ raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lockdep_set_no_check_recursion);
+#endif
--
2.39.1