[patch 4/6] brlock: introduce special brlocks

From: npiggin
Date: Thu Oct 15 2009 - 02:06:17 EST


This patch introduces special brlocks, these can only be used as global
locks, and use some preprocessor trickery to allow us to retain a more
optimal per-cpu lock implementation. We don't bother working around
lockdep yet.

Signed-off-by: Nick Piggin <npiggin@xxxxxxx>
---
include/linux/brlock.h | 112 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 112 insertions(+)

Index: linux-2.6/include/linux/brlock.h
===================================================================
--- /dev/null
+++ linux-2.6/include/linux/brlock.h
@@ -0,0 +1,112 @@
+/*
+ * Specialised big-reader spinlock. Can only be declared as global variables
+ * to avoid overhead and keep things simple (and we don't want to start using
+ * these inside dynamically allocated structures).
+ *
+ * Copyright 2009, Nick Piggin, Novell Inc.
+ */
+#ifndef __LINUX_BRLOCK_H
+#define __LINUX_BRLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/percpu.h>
+#include <asm/atomic.h>
+
+#if defined(CONFIG_SMP) && !defined(CONFIG_LOCKDEP)
+#define DECLARE_BRLOCK(name) \
+ DECLARE_PER_CPU(spinlock_t, name##_lock); \
+ static inline void name##_lock_init(void) { \
+ int i; \
+ for_each_possible_cpu(i) { \
+ spinlock_t *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ spin_lock_init(lock); \
+ } \
+ } \
+ static inline void name##_rlock(void) { \
+ spinlock_t *lock; \
+ lock = &get_cpu_var(name##_lock); \
+ spin_lock(lock); \
+ } \
+ static inline void name##_runlock(void) { \
+ spinlock_t *lock; \
+ lock = &__get_cpu_var(name##_lock); \
+ spin_unlock(lock); \
+ put_cpu_var(name##_lock); \
+ } \
+ extern void name##_wlock(void); \
+ extern void name##_wunlock(void); \
+ static inline int name##_atomic_dec_and_rlock(atomic_t *a) { \
+ int ret; \
+ spinlock_t *lock; \
+ lock = &get_cpu_var(name##_lock); \
+ ret = atomic_dec_and_lock(a, lock); \
+ if (!ret) \
+ put_cpu_var(name##_lock); \
+ return ret; \
+ } \
+ extern int name##_atomic_dec_and_wlock__failed(atomic_t *a); \
+ static inline int name##_atomic_dec_and_wlock(atomic_t *a) { \
+ if (atomic_add_unless(a, -1, 1)) \
+ return 0; \
+ return name##_atomic_dec_and_wlock__failed(a); \
+ }
+
+#define DEFINE_BRLOCK(name) \
+ DEFINE_PER_CPU(spinlock_t, name##_lock); \
+ void name##_wlock(void) { \
+ int i; \
+ for_each_online_cpu(i) { \
+ spinlock_t *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ spin_lock(lock); \
+ } \
+ } \
+ void name##_wunlock(void) { \
+ int i; \
+ for_each_online_cpu(i) { \
+ spinlock_t *lock; \
+ lock = &per_cpu(name##_lock, i); \
+ spin_unlock(lock); \
+ } \
+ } \
+ int name##_atomic_dec_and_wlock__failed(atomic_t *a) { \
+ name##_wlock(); \
+ if (!atomic_dec_and_test(a)) { \
+ name##_wunlock(); \
+ return 0; \
+ } \
+ return 1; \
+ }
+
+#else
+
+#define DECLARE_BRLOCK(name) \
+ spinlock_t name##_lock; \
+ static inline void name##_lock_init(void) { \
+ spin_lock_init(&name##_lock); \
+ } \
+ static inline void name##_rlock(void) { \
+ spin_lock(&name##_lock); \
+ } \
+ static inline void name##_runlock(void) { \
+ spin_unlock(&name##_lock); \
+ } \
+ static inline void name##_wlock(void) { \
+ spin_lock(&name##_lock); \
+ } \
+ static inline void name##_wunlock(void) { \
+ spin_unlock(&name##_lock); \
+ } \
+ static inline int name##_atomic_dec_and_rlock(atomic_t *a) { \
+ return atomic_dec_and_lock(a, &name##_lock); \
+ } \
+ static inline int name##_atomic_dec_and_wlock(atomic_t *a) { \
+ return atomic_dec_and_lock(a, &name##_lock); \
+ }
+
+#define DEFINE_BRLOCK(name) \
+ spinlock_t name##_lock
+#endif
+
+#endif


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/