Re: RFC: patch to allow lock-free traversal of lists with insertion

From: Paul E. McKenney (mckenney@eng4.beaverton.ibm.com)
Date: Wed Oct 10 2001 - 20:56:26 EST


< On Wed, Oct 10, 2001 at 02:47:05PM -0700, Paul McKenney wrote:
< > > I don't think it's actually all that bad. There won't be all
< > > that many places that require the rmbdd, and they'll pretty
< > > much exactly correspond to the places in which you have to put
< > > wmb for all architectures anyway.
< >
< > Just to make sure I understand... This rmbdd() would use IPIs to
< > get all the CPUs' caches synchronized, right?
<
< Err, I see your confusion now.
<
< "Correspond" meaning "for every wmb needed on the writer side,
< there is likely an rmb needed on the reader side in a similar
< place".

Fair enough!

Here are two patches. The wmbdd patch has been modified to use
the lighter-weight SPARC instruction, as suggested by Dave Miller.
The rmbdd patch defines an rmbdd() primitive that is defined to be
rmb() on Alpha and a nop on other architectures. I believe this
rmbdd() primitive is what Richard is looking for.

Please pass on any comments or criticisms. I am particularly
interested in comments from people with PA-RISC and MIPS
expertise, as I am not 100% sure that I have interpreted
the PA-RISC architecture manual correctly, and I do not yet
have a MIPS manual. I do not believe that these architectures
need the Alpha treatment, but then again, I didn't think
that Alpha needed the Alpha treatment when I first encountered
it -- and I am quite clearly not the only one! ;-)

                                        Thanx, Paul

PS. An updated explanation of why this is needed may be found
     at http://lse.sourceforge.net/locking/wmbdd.html

diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-alpha/system.h linux-2.4.10.rmbdd/include/asm-alpha/system.h
--- linux-2.4.10/include/asm-alpha/system.h Sun Aug 12 10:38:47 2001
+++ linux-2.4.10.rmbdd/include/asm-alpha/system.h Wed Oct 10 16:49:11 2001
@@ -148,16 +148,21 @@
 #define rmb() \
 __asm__ __volatile__("mb": : :"memory")
 
+#define rmbdd() \
+__asm__ __volatile__("mb": : :"memory")
+
 #define wmb() \
 __asm__ __volatile__("wmb": : :"memory")
 
 #ifdef CONFIG_SMP
 #define smp_mb() mb()
 #define smp_rmb() rmb()
+#define smp_rmbdd() rmbdd()
 #define smp_wmb() wmb()
 #else
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
+#define smp_rmbdd() barrier()
 #define smp_wmb() barrier()
 #endif
 
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-arm/system.h linux-2.4.10.rmbdd/include/asm-arm/system.h
--- linux-2.4.10/include/asm-arm/system.h Mon Nov 27 17:07:59 2000
+++ linux-2.4.10.rmbdd/include/asm-arm/system.h Wed Oct 10 18:18:12 2001
@@ -38,6 +38,7 @@
 
 #define mb() __asm__ __volatile__ ("" : : : "memory")
 #define rmb() mb()
+#define rmbdd() do { } while(0)
 #define wmb() mb()
 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
 
@@ -67,12 +68,14 @@
 
 #define smp_mb() mb()
 #define smp_rmb() rmb()
+#define smp_rmbdd() rmbdd()
 #define smp_wmb() wmb()
 
 #else
 
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
+#define smp_rmbdd() do { } while(0)
 #define smp_wmb() barrier()
 
 #define cli() __cli()
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-cris/system.h linux-2.4.10.rmbdd/include/asm-cris/system.h
--- linux-2.4.10/include/asm-cris/system.h Tue May 1 16:05:00 2001
+++ linux-2.4.10.rmbdd/include/asm-cris/system.h Wed Oct 10 18:19:04 2001
@@ -143,15 +143,18 @@
 
 #define mb() __asm__ __volatile__ ("" : : : "memory")
 #define rmb() mb()
+#define rmbdd() do { } while(0)
 #define wmb() mb()
 
 #ifdef CONFIG_SMP
 #define smp_mb() mb()
 #define smp_rmb() rmb()
+#define smp_rmbdd() rmbdd()
 #define smp_wmb() wmb()
 #else
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
+#define smp_rmbdd() do { } while(0)
 #define smp_wmb() barrier()
 #endif
 
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-i386/system.h linux-2.4.10.rmbdd/include/asm-i386/system.h
--- linux-2.4.10/include/asm-i386/system.h Sun Sep 23 10:31:01 2001
+++ linux-2.4.10.rmbdd/include/asm-i386/system.h Wed Oct 10 17:00:57 2001
@@ -284,15 +284,18 @@
  */
 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
 #define rmb() mb()
+#define rmbdd() do { } while(0)
 #define wmb() __asm__ __volatile__ ("": : :"memory")
 
 #ifdef CONFIG_SMP
 #define smp_mb() mb()
 #define smp_rmb() rmb()
+#define smp_rmbdd() rmbdd()
 #define smp_wmb() wmb()
 #else
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
+#define smp_rmbdd() do { } while(0)
 #define smp_wmb() barrier()
 #endif
 
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-ia64/system.h linux-2.4.10.rmbdd/include/asm-ia64/system.h
--- linux-2.4.10/include/asm-ia64/system.h Tue Jul 31 10:30:09 2001
+++ linux-2.4.10.rmbdd/include/asm-ia64/system.h Wed Oct 10 17:01:09 2001
@@ -85,6 +85,9 @@
  * stores and that all following stores will be
  * visible only after all previous stores.
  * rmb(): Like wmb(), but for reads.
+ * rmbdd(): Like rmb(), but only for pairs of loads where
+ * the second load depends on the value loaded
+ * by the first.
  * mb(): wmb()/rmb() combo, i.e., all previous memory
  * accesses are visible before all subsequent
  * accesses and vice versa. This is also known as
@@ -98,15 +101,18 @@
  */
 #define mb() __asm__ __volatile__ ("mf" ::: "memory")
 #define rmb() mb()
+#define rmbdd() do { } while(0)
 #define wmb() mb()
 
 #ifdef CONFIG_SMP
 # define smp_mb() mb()
 # define smp_rmb() rmb()
+# define smp_rmbdd() rmbdd()
 # define smp_wmb() wmb()
 #else
 # define smp_mb() barrier()
 # define smp_rmb() barrier()
+# define smp_rmbdd() do { } while(0)
 # define smp_wmb() barrier()
 #endif
 
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-m68k/system.h linux-2.4.10.rmbdd/include/asm-m68k/system.h
--- linux-2.4.10/include/asm-m68k/system.h Mon Jun 11 19:15:27 2001
+++ linux-2.4.10.rmbdd/include/asm-m68k/system.h Wed Oct 10 17:01:15 2001
@@ -80,12 +80,14 @@
 #define nop() do { asm volatile ("nop"); barrier(); } while (0)
 #define mb() barrier()
 #define rmb() barrier()
+#define rmbdd() do { } while(0)
 #define wmb() barrier()
 #define set_mb(var, value) do { xchg(&var, value); } while (0)
 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
 
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
+#define smp_rmbdd() do { } while(0)
 #define smp_wmb() barrier()
 
 
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-mips/system.h linux-2.4.10.rmbdd/include/asm-mips/system.h
--- linux-2.4.10/include/asm-mips/system.h Sun Sep 9 10:43:01 2001
+++ linux-2.4.10.rmbdd/include/asm-mips/system.h Wed Oct 10 17:01:26 2001
@@ -150,6 +150,7 @@
 
 #include <asm/wbflush.h>
 #define rmb() do { } while(0)
+#define rmbdd() do { } while(0)
 #define wmb() wbflush()
 #define mb() wbflush()
 
@@ -166,6 +167,7 @@
         : /* no input */ \
         : "memory")
 #define rmb() mb()
+#define rmbdd() do { } while(0)
 #define wmb() mb()
 
 #endif /* CONFIG_CPU_HAS_WB */
@@ -173,10 +175,12 @@
 #ifdef CONFIG_SMP
 #define smp_mb() mb()
 #define smp_rmb() rmb()
+#define smp_rmbdd() rmbdd()
 #define smp_wmb() wmb()
 #else
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
+#define smp_rmbdd() do { } while(0)
 #define smp_wmb() barrier()
 #endif
 
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-mips64/system.h linux-2.4.10.rmbdd/include/asm-mips64/system.h
--- linux-2.4.10/include/asm-mips64/system.h Wed Jul 4 11:50:39 2001
+++ linux-2.4.10.rmbdd/include/asm-mips64/system.h Wed Oct 10 17:01:41 2001
@@ -147,15 +147,18 @@
         : /* no input */ \
         : "memory")
 #define rmb() mb()
+#define rmbdd() do { } while(0)
 #define wmb() mb()
 
 #ifdef CONFIG_SMP
 #define smp_mb() mb()
 #define smp_rmb() rmb()
+#define smp_rmbdd() rmbdd()
 #define smp_wmb() wmb()
 #else
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
+#define smp_rmbdd() do { } while(0)
 #define smp_wmb() barrier()
 #endif
 
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-parisc/system.h linux-2.4.10.rmbdd/include/asm-parisc/system.h
--- linux-2.4.10/include/asm-parisc/system.h Wed Dec 6 11:46:39 2000
+++ linux-2.4.10.rmbdd/include/asm-parisc/system.h Wed Oct 10 17:04:07 2001
@@ -50,6 +50,7 @@
 #ifdef CONFIG_SMP
 #define smp_mb() mb()
 #define smp_rmb() rmb()
+#define smp_rmbdd() do { } while(0)
 #define smp_wmb() wmb()
 #else
 /* This is simply the barrier() macro from linux/kernel.h but when serial.c
@@ -58,6 +59,7 @@
  */
 #define smp_mb() __asm__ __volatile__("":::"memory");
 #define smp_rmb() __asm__ __volatile__("":::"memory");
+#define smp_rmbdd() do { } while(0)
 #define smp_wmb() __asm__ __volatile__("":::"memory");
 #endif
 
@@ -122,6 +124,7 @@
 
 #define mb() __asm__ __volatile__ ("sync" : : :"memory")
 #define wmb() mb()
+#define rmbdd() do { } while(0)
 
 extern unsigned long __xchg(unsigned long, unsigned long *, int);
 
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-ppc/system.h linux-2.4.10.rmbdd/include/asm-ppc/system.h
--- linux-2.4.10/include/asm-ppc/system.h Tue Aug 28 06:58:33 2001
+++ linux-2.4.10.rmbdd/include/asm-ppc/system.h Wed Oct 10 18:19:43 2001
@@ -24,6 +24,8 @@
  *
  * mb() prevents loads and stores being reordered across this point.
  * rmb() prevents loads being reordered across this point.
+ * rmbdd() prevents data-dependant loads being reordered across this point
+ * (nop on PPC).
  * wmb() prevents stores being reordered across this point.
  *
  * We can use the eieio instruction for wmb, but since it doesn't
@@ -32,6 +34,7 @@
  */
 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
 #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
+#define rmbdd() do { } while(0)
 #define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
 
 #define set_mb(var, value) do { var = value; mb(); } while (0)
@@ -40,10 +43,12 @@
 #ifdef CONFIG_SMP
 #define smp_mb() mb()
 #define smp_rmb() rmb()
+#define smp_rmbdd() rmbdd()
 #define smp_wmb() wmb()
 #else
 #define smp_mb() __asm__ __volatile__("": : :"memory")
 #define smp_rmb() __asm__ __volatile__("": : :"memory")
+#define smp_rmbdd() do { } while(0)
 #define smp_wmb() __asm__ __volatile__("": : :"memory")
 #endif /* CONFIG_SMP */
 
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-s390/system.h linux-2.4.10.rmbdd/include/asm-s390/system.h
--- linux-2.4.10/include/asm-s390/system.h Wed Jul 25 14:12:02 2001
+++ linux-2.4.10.rmbdd/include/asm-s390/system.h Wed Oct 10 18:20:31 2001
@@ -117,9 +117,11 @@
 # define SYNC_OTHER_CORES(x) eieio()
 #define mb() eieio()
 #define rmb() eieio()
+#define rmbdd() do { } while(0)
 #define wmb() eieio()
 #define smp_mb() mb()
 #define smp_rmb() rmb()
+#define smp_rmbdd() rmbdd()
 #define smp_wmb() wmb()
 #define smp_mb__before_clear_bit() smp_mb()
 #define smp_mb__after_clear_bit() smp_mb()
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-s390x/system.h linux-2.4.10.rmbdd/include/asm-s390x/system.h
--- linux-2.4.10/include/asm-s390x/system.h Wed Jul 25 14:12:03 2001
+++ linux-2.4.10.rmbdd/include/asm-s390x/system.h Wed Oct 10 17:04:45 2001
@@ -130,9 +130,11 @@
 # define SYNC_OTHER_CORES(x) eieio()
 #define mb() eieio()
 #define rmb() eieio()
+#define rmbdd() do { } while(0)
 #define wmb() eieio()
 #define smp_mb() mb()
 #define smp_rmb() rmb()
+#define smp_rmbdd() rmbdd()
 #define smp_wmb() wmb()
 #define smp_mb__before_clear_bit() smp_mb()
 #define smp_mb__after_clear_bit() smp_mb()
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-sh/system.h linux-2.4.10.rmbdd/include/asm-sh/system.h
--- linux-2.4.10/include/asm-sh/system.h Sat Sep 8 12:29:09 2001
+++ linux-2.4.10.rmbdd/include/asm-sh/system.h Wed Oct 10 17:05:07 2001
@@ -88,15 +88,18 @@
 
 #define mb() __asm__ __volatile__ ("": : :"memory")
 #define rmb() mb()
+#define rmbdd() do { } while(0)
 #define wmb() __asm__ __volatile__ ("": : :"memory")
 
 #ifdef CONFIG_SMP
 #define smp_mb() mb()
 #define smp_rmb() rmb()
+#define smp_rmbdd() rmbdd()
 #define smp_wmb() wmb()
 #else
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
+#define smp_rmbdd() do { } while(0)
 #define smp_wmb() barrier()
 #endif
 
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-sparc/system.h linux-2.4.10.rmbdd/include/asm-sparc/system.h
--- linux-2.4.10/include/asm-sparc/system.h Tue Oct 3 09:24:41 2000
+++ linux-2.4.10.rmbdd/include/asm-sparc/system.h Wed Oct 10 16:59:44 2001
@@ -277,11 +277,13 @@
 /* XXX Change this if we ever use a PSO mode kernel. */
 #define mb() __asm__ __volatile__ ("" : : : "memory")
 #define rmb() mb()
+#define rmbdd() do { } while(0)
 #define wmb() mb()
 #define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
 #define set_wmb(__var, __value) set_mb(__var, __value)
 #define smp_mb() __asm__ __volatile__("":::"memory");
 #define smp_rmb() __asm__ __volatile__("":::"memory");
+#define smp_rmbdd() do { } while(0)
 #define smp_wmb() __asm__ __volatile__("":::"memory");
 
 #define nop() __asm__ __volatile__ ("nop");
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-sparc64/system.h linux-2.4.10.rmbdd/include/asm-sparc64/system.h
--- linux-2.4.10/include/asm-sparc64/system.h Fri Sep 7 11:01:20 2001
+++ linux-2.4.10.rmbdd/include/asm-sparc64/system.h Wed Oct 10 17:00:12 2001
@@ -99,6 +99,7 @@
 #define mb() \
         membar("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad");
 #define rmb() membar("#LoadLoad")
+#define rmbdd() do { } while(0)
 #define wmb() membar("#StoreStore")
 #define set_mb(__var, __value) \
         do { __var = __value; membar("#StoreLoad | #StoreStore"); } while(0)
@@ -108,10 +109,12 @@
 #ifdef CONFIG_SMP
 #define smp_mb() mb()
 #define smp_rmb() rmb()
+#define smp_rmbdd() rmbdd()
 #define smp_wmb() wmb()
 #else
 #define smp_mb() __asm__ __volatile__("":::"memory");
 #define smp_rmb() __asm__ __volatile__("":::"memory");
+#define smp_rmbdd() do { } while(0)
 #define smp_wmb() __asm__ __volatile__("":::"memory");
 #endif
 

diff -urN -X /home/mckenney/dontdiff linux-2.4.10/arch/alpha/kernel/smp.c linux-2.4.10.wmbdd/arch/alpha/kernel/smp.c
--- linux-2.4.10/arch/alpha/kernel/smp.c Thu Sep 13 15:21:32 2001
+++ linux-2.4.10.wmbdd/arch/alpha/kernel/smp.c Mon Oct 8 18:31:18 2001
@@ -63,8 +63,20 @@
         IPI_RESCHEDULE,
         IPI_CALL_FUNC,
         IPI_CPU_STOP,
+ IPI_MB,
 };
 
+/* Global and per-CPU state for global MB shootdown. */
+static struct {
+ spinlock_t mutex;
+ unsigned long need_mb; /* bitmask of CPUs that need to do "mb". */
+ long curgen; /* Each "generation" is a group of requests */
+ long maxgen; /* that is handled by one set of "mb"s. */
+} mb_global_data __cacheline_aligned = { SPIN_LOCK_UNLOCKED, 0, 1, 0 };
+static struct {
+ long mygen ____cacheline_aligned;
+} mb_data[NR_CPUS] __cacheline_aligned;
+
 spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
 
 /* Set to a secondary's cpuid when it comes online. */
@@ -772,6 +784,41 @@
         goto again;
 }
 
+/*
+ * Execute an "mb" instruction in response to an IPI_MB. Also directly
+ * called by smp_global_mb(). If this is the last CPU to respond to
+ * an smp_global_mb(), then check to see if an additional generation of
+ * requests needs to be satisfied.
+ */
+
+void
+handle_mb_ipi(void)
+{
+ int this_cpu = smp_processor_id();
+ unsigned long this_cpu_mask = 1UL << this_cpu;
+ unsigned long flags;
+ unsigned long to_whom = cpu_present_mask ^ this_cpu_mask;
+
+ /* Avoid lock contention when extra IPIs arrive (due to race) and
+ when waiting for global mb shootdown. */
+ if ((mb_global_data.need_mb & this_cpu_mask) == 0) {
+ return;
+ }
+ spin_lock_irqsave(&mb_global_data.mutex, flags); /* implied mb */
+ if ((mb_global_data.need_mb & this_cpu_mask) == 0) {
+ spin_unlock_irqrestore(&mb_global_data.mutex, flags);
+ return;
+ }
+ mb_global_data.need_mb &= ~this_cpu_mask;
+ if (mb_global_data.need_mb == 0) {
+ if (++mb_global_data.curgen - mb_global_data.maxgen <= 0) {
+ mb_global_data.need_mb = to_whom;
+ send_ipi_message(to_whom, IPI_MB);
+ }
+ }
+ spin_unlock_irqrestore(&mb_global_data.mutex, flags); /* implied mb */
+}
+
 void
 handle_ipi(struct pt_regs *regs)
 {
@@ -825,6 +872,9 @@
                 else if (which == IPI_CPU_STOP) {
                         halt();
                 }
+ else if (which == IPI_MB) {
+ handle_mb_ipi();
+ }
                 else {
                         printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
                                this_cpu, which);
@@ -860,6 +910,58 @@
                 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
 #endif
         send_ipi_message(to_whom, IPI_CPU_STOP);
+}
+
+/*
+ * Execute an "mb" instruction, then force all other CPUs to execute "mb"
+ * instructions. Does not block. Once this function returns, the caller
+ * is guaranteed that all of its memory writes preceding the call to
+ * smp_global_mb() will be seen by all CPUs as preceding all memory
+ * writes following the call to smp_global_mb().
+ *
+ * For example, if CPU 0 does:
+ * a.data = 1;
+ * smp_global_mb();
+ * p = &a;
+ * and CPU 1 does:
+ * d = p->data;
+ * where a.data is initially garbage and p initially points to another
+ * structure with the "data" field being zero, then CPU 1 will be
+ * guaranteed to have "d" set to either 0 or 1, never garbage.
+ *
+ * Note that the Alpha "wmb" instruction is -not- sufficient!!! If CPU 0
+ * were replace the smp_global_mb() with a wmb(), then CPU 1 could end
+ * up with garbage in "d"!
+ *
+ * This function sends IPIs to all other CPUs, then spins waiting for
+ * them to receive the IPI and execute an "mb" instruction. While
+ * spinning, this function -must- respond to other CPUs executing
+ * smp_global_mb() concurrently, otherwise, deadlock would result.
+ */
+
+void
+smp_global_mb(void)
+{
+ int this_cpu = smp_processor_id();
+ unsigned long this_cpu_mask = 1UL << this_cpu;
+ unsigned long flags;
+ unsigned long to_whom = cpu_present_mask ^ this_cpu_mask;
+
+ spin_lock_irqsave(&mb_global_data.mutex, flags); /* implied mb */
+ if (mb_global_data.curgen - mb_global_data.maxgen <= 0) {
+ mb_global_data.maxgen = mb_global_data.curgen + 1;
+ } else {
+ mb_global_data.maxgen = mb_global_data.curgen;
+ mb_global_data.need_mb = to_whom;
+ send_ipi_message(to_whom, IPI_MB);
+ }
+ mb_data[this_cpu].mygen = mb_global_data.maxgen;
+ spin_unlock_irqrestore(&mb_global_data.mutex, flags);
+ while (mb_data[this_cpu].mygen - mb_global_data.curgen >= 0) {
+ handle_mb_ipi();
+ barrier();
+ }
+
 }
 
 /*
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-alpha/system.h linux-2.4.10.wmbdd/include/asm-alpha/system.h
--- linux-2.4.10/include/asm-alpha/system.h Sun Aug 12 10:38:47 2001
+++ linux-2.4.10.wmbdd/include/asm-alpha/system.h Mon Oct 8 18:31:18 2001
@@ -151,14 +151,21 @@
 #define wmb() \
 __asm__ __volatile__("wmb": : :"memory")
 
+#define mbdd() smp_mbdd()
+#define wmbdd() smp_wmbdd()
+
 #ifdef CONFIG_SMP
 #define smp_mb() mb()
 #define smp_rmb() rmb()
 #define smp_wmb() wmb()
+#define smp_mbdd() smp_global_mb()
+#define smp_wmbdd() smp_mbdd()
 #else
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
 #define smp_wmb() barrier()
+#define smp_mbdd() barrier()
+#define smp_wmbdd() barrier()
 #endif
 
 #define set_mb(var, value) \
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-arm/system.h linux-2.4.10.wmbdd/include/asm-arm/system.h
--- linux-2.4.10/include/asm-arm/system.h Mon Nov 27 17:07:59 2000
+++ linux-2.4.10.wmbdd/include/asm-arm/system.h Mon Oct 8 18:31:18 2001
@@ -39,6 +39,8 @@
 #define mb() __asm__ __volatile__ ("" : : : "memory")
 #define rmb() mb()
 #define wmb() mb()
+#define mbdd() mb()
+#define wmbdd() wmb()
 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
 
 #define prepare_to_switch() do { } while(0)
@@ -68,12 +70,16 @@
 #define smp_mb() mb()
 #define smp_rmb() rmb()
 #define smp_wmb() wmb()
+#define smp_mbdd() rmbdd()
+#define smp_wmbdd() wmbdd()
 
 #else
 
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
 #define smp_wmb() barrier()
+#define smp_mbdd() barrier()
+#define smp_wmbdd() barrier()
 
 #define cli() __cli()
 #define sti() __sti()
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-cris/system.h linux-2.4.10.wmbdd/include/asm-cris/system.h
--- linux-2.4.10/include/asm-cris/system.h Tue May 1 16:05:00 2001
+++ linux-2.4.10.wmbdd/include/asm-cris/system.h Mon Oct 8 18:31:18 2001
@@ -144,15 +144,21 @@
 #define mb() __asm__ __volatile__ ("" : : : "memory")
 #define rmb() mb()
 #define wmb() mb()
+#define mbdd() mb()
+#define wmbdd() wmb()
 
 #ifdef CONFIG_SMP
 #define smp_mb() mb()
 #define smp_rmb() rmb()
 #define smp_wmb() wmb()
+#define smp_mbdd() mbdd()
+#define smp_wmbdd() wmbdd()
 #else
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
 #define smp_wmb() barrier()
+#define smp_mbdd() barrier()
+#define smp_wmbdd() barrier()
 #endif
 
 #define iret()
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-i386/system.h linux-2.4.10.wmbdd/include/asm-i386/system.h
--- linux-2.4.10/include/asm-i386/system.h Sun Sep 23 10:31:01 2001
+++ linux-2.4.10.wmbdd/include/asm-i386/system.h Mon Oct 8 18:31:18 2001
@@ -285,15 +285,21 @@
 #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
 #define rmb() mb()
 #define wmb() __asm__ __volatile__ ("": : :"memory")
+#define mbdd() mb()
+#define wmbdd() wmb()
 
 #ifdef CONFIG_SMP
 #define smp_mb() mb()
 #define smp_rmb() rmb()
 #define smp_wmb() wmb()
+#define smp_mbdd() mbdd()
+#define smp_wmbdd() wmbdd()
 #else
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
 #define smp_wmb() barrier()
+#define smp_mbdd() barrier()
+#define smp_wmbdd() barrier()
 #endif
 
 #define set_mb(var, value) do { xchg(&var, value); } while (0)
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-ia64/system.h linux-2.4.10.wmbdd/include/asm-ia64/system.h
--- linux-2.4.10/include/asm-ia64/system.h Tue Jul 31 10:30:09 2001
+++ linux-2.4.10.wmbdd/include/asm-ia64/system.h Mon Oct 8 18:31:18 2001
@@ -84,11 +84,36 @@
  * like regions are visible before any subsequent
  * stores and that all following stores will be
  * visible only after all previous stores.
- * rmb(): Like wmb(), but for reads.
+ * In common code, any reads that depend on this
+ * ordering must be separated by an mb() or rmb().
+ * rmb(): Guarantees that all preceding loads to memory-
+ * like regions are executed before any subsequent
+ * loads.
  * mb(): wmb()/rmb() combo, i.e., all previous memory
  * accesses are visible before all subsequent
  * accesses and vice versa. This is also known as
- * a "fence."
+ * a "fence." Again, in common code, any reads that
+ * depend on the order of writes must themselves be
+ * separated by an mb() or rmb().
+ * wmbdd(): Guarantees that all preceding stores to memory-
+ * like regions are visible before any subsequent
+ * stores and that all following stores will be
+ * visible only after all previous stores.
+ * In common code, any reads that depend on this
+ * ordering either must be separated by an mb()
+ * or rmb(), or the later reads must depend on
+ * data loaded by the earlier reads. For an example
+ * of the latter, consider "p->next". The read of
+ * the "next" field depends on the read of the
+ * pointer "p".
+ * mbdd(): wmb()/rmb() combo, i.e., all previous memory
+ * accesses are visible before all subsequent
+ * accesses and vice versa. This is also known as
+ * a "fence." Again, in common code, any reads that
+ * depend on the order of writes must themselves be
+ * separated by an mb() or rmb(), or there must be
+ * a data dependency that forces the second to
+ * wait until the first completes.
  *
  * Note: "mb()" and its variants cannot be used as a fence to order
  * accesses to memory mapped I/O registers. For that, mf.a needs to
@@ -99,15 +124,21 @@
 #define mb() __asm__ __volatile__ ("mf" ::: "memory")
 #define rmb() mb()
 #define wmb() mb()
+#define rmbdd() mb()
+#define wmbdd() mb()
 
 #ifdef CONFIG_SMP
 # define smp_mb() mb()
 # define smp_rmb() rmb()
 # define smp_wmb() wmb()
+# define smp_mbdd() mbdd()
+# define smp_wmbdd() wmbdd()
 #else
 # define smp_mb() barrier()
 # define smp_rmb() barrier()
 # define smp_wmb() barrier()
+# define smp_mbdd() barrier()
+# define smp_wmbdd() barrier()
 #endif
 
 /*
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-m68k/system.h linux-2.4.10.wmbdd/include/asm-m68k/system.h
--- linux-2.4.10/include/asm-m68k/system.h Mon Jun 11 19:15:27 2001
+++ linux-2.4.10.wmbdd/include/asm-m68k/system.h Mon Oct 8 18:31:18 2001
@@ -81,12 +81,16 @@
 #define mb() barrier()
 #define rmb() barrier()
 #define wmb() barrier()
+#define rmbdd() barrier()
+#define wmbdd() barrier()
 #define set_mb(var, value) do { xchg(&var, value); } while (0)
 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
 
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
 #define smp_wmb() barrier()
+#define smp_mbdd() barrier()
+#define smp_wmbdd() barrier()
 
 
 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-mips/system.h linux-2.4.10.wmbdd/include/asm-mips/system.h
--- linux-2.4.10/include/asm-mips/system.h Sun Sep 9 10:43:01 2001
+++ linux-2.4.10.wmbdd/include/asm-mips/system.h Mon Oct 8 18:31:18 2001
@@ -152,6 +152,8 @@
 #define rmb() do { } while(0)
 #define wmb() wbflush()
 #define mb() wbflush()
+#define wmbdd() wbflush()
+#define mbdd() wbflush()
 
 #else /* CONFIG_CPU_HAS_WB */
 
@@ -167,6 +169,8 @@
         : "memory")
 #define rmb() mb()
 #define wmb() mb()
+#define wmbdd() mb()
+#define mbdd() mb()
 
 #endif /* CONFIG_CPU_HAS_WB */
 
@@ -174,10 +178,14 @@
 #define smp_mb() mb()
 #define smp_rmb() rmb()
 #define smp_wmb() wmb()
+#define smp_mbdd() mbdd()
+#define smp_wmbdd() wmbdd()
 #else
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
 #define smp_wmb() barrier()
+#define smp_mbdd() barrier()
+#define smp_wmbdd() barrier()
 #endif
 
 #define set_mb(var, value) \
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-mips64/system.h linux-2.4.10.wmbdd/include/asm-mips64/system.h
--- linux-2.4.10/include/asm-mips64/system.h Wed Jul 4 11:50:39 2001
+++ linux-2.4.10.wmbdd/include/asm-mips64/system.h Mon Oct 8 18:31:18 2001
@@ -148,15 +148,21 @@
         : "memory")
 #define rmb() mb()
 #define wmb() mb()
+#define rmbdd() mb()
+#define wmbdd() mb()
 
 #ifdef CONFIG_SMP
 #define smp_mb() mb()
 #define smp_rmb() rmb()
 #define smp_wmb() wmb()
+#define smp_mbdd() mbdd()
+#define smp_wmbdd() wmbdd()
 #else
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
 #define smp_wmb() barrier()
+#define smp_mbdd() barrier()
+#define smp_wmbdd() barrier()
 #endif
 
 #define set_mb(var, value) \
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-parisc/system.h linux-2.4.10.wmbdd/include/asm-parisc/system.h
--- linux-2.4.10/include/asm-parisc/system.h Wed Dec 6 11:46:39 2000
+++ linux-2.4.10.wmbdd/include/asm-parisc/system.h Mon Oct 8 18:31:18 2001
@@ -51,6 +51,8 @@
 #define smp_mb() mb()
 #define smp_rmb() rmb()
 #define smp_wmb() wmb()
+#define smp_mbdd() rmb()
+#define smp_wmbdd() wmb()
 #else
 /* This is simply the barrier() macro from linux/kernel.h but when serial.c
  * uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h
@@ -59,6 +61,8 @@
 #define smp_mb() __asm__ __volatile__("":::"memory");
 #define smp_rmb() __asm__ __volatile__("":::"memory");
 #define smp_wmb() __asm__ __volatile__("":::"memory");
+#define smp_mbdd() __asm__ __volatile__("":::"memory");
+#define smp_wmbdd() __asm__ __volatile__("":::"memory");
 #endif
 
 /* interrupt control */
@@ -122,6 +126,8 @@
 
 #define mb() __asm__ __volatile__ ("sync" : : :"memory")
 #define wmb() mb()
+#define mbdd() mb()
+#define wmbdd() mb()
 
 extern unsigned long __xchg(unsigned long, unsigned long *, int);
 
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-ppc/system.h linux-2.4.10.wmbdd/include/asm-ppc/system.h
--- linux-2.4.10/include/asm-ppc/system.h Tue Aug 28 06:58:33 2001
+++ linux-2.4.10.wmbdd/include/asm-ppc/system.h Mon Oct 8 18:31:18 2001
@@ -33,6 +33,8 @@
 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
 #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
 #define wmb() __asm__ __volatile__ ("eieio" : : : "memory")
+#define mbdd() mb()
+#define wmbdd() wmb()
 
 #define set_mb(var, value) do { var = value; mb(); } while (0)
 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
@@ -41,10 +43,14 @@
 #define smp_mb() mb()
 #define smp_rmb() rmb()
 #define smp_wmb() wmb()
+#define smp_mbdd() mb()
+#define smp_wmbdd() wmb()
 #else
 #define smp_mb() __asm__ __volatile__("": : :"memory")
 #define smp_rmb() __asm__ __volatile__("": : :"memory")
 #define smp_wmb() __asm__ __volatile__("": : :"memory")
+#define smp_mbdd() __asm__ __volatile__("": : :"memory")
+#define smp_wmbdd() __asm__ __volatile__("": : :"memory")
 #endif /* CONFIG_SMP */
 
 #ifdef __KERNEL__
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-s390/system.h linux-2.4.10.wmbdd/include/asm-s390/system.h
--- linux-2.4.10/include/asm-s390/system.h Wed Jul 25 14:12:02 2001
+++ linux-2.4.10.wmbdd/include/asm-s390/system.h Mon Oct 8 18:31:18 2001
@@ -118,9 +118,13 @@
 #define mb() eieio()
 #define rmb() eieio()
 #define wmb() eieio()
+#define mbdd() mb()
+#define wmbdd() wmb()
 #define smp_mb() mb()
 #define smp_rmb() rmb()
 #define smp_wmb() wmb()
+#define smp_mbdd() mb()
+#define smp_wmbdd() wmb()
 #define smp_mb__before_clear_bit() smp_mb()
 #define smp_mb__after_clear_bit() smp_mb()
 
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-s390x/system.h linux-2.4.10.wmbdd/include/asm-s390x/system.h
--- linux-2.4.10/include/asm-s390x/system.h Wed Jul 25 14:12:03 2001
+++ linux-2.4.10.wmbdd/include/asm-s390x/system.h Mon Oct 8 18:31:19 2001
@@ -131,9 +131,13 @@
 #define mb() eieio()
 #define rmb() eieio()
 #define wmb() eieio()
+#define mbdd() mb()
+#define wmbdd() wmb()
 #define smp_mb() mb()
 #define smp_rmb() rmb()
 #define smp_wmb() wmb()
+#define smp_mbdd() mb()
+#define smp_wmbdd() wmb()
 #define smp_mb__before_clear_bit() smp_mb()
 #define smp_mb__after_clear_bit() smp_mb()
 
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-sh/system.h linux-2.4.10.wmbdd/include/asm-sh/system.h
--- linux-2.4.10/include/asm-sh/system.h Sat Sep 8 12:29:09 2001
+++ linux-2.4.10.wmbdd/include/asm-sh/system.h Mon Oct 8 18:31:19 2001
@@ -89,15 +89,21 @@
 #define mb() __asm__ __volatile__ ("": : :"memory")
 #define rmb() mb()
 #define wmb() __asm__ __volatile__ ("": : :"memory")
+#define mbdd() mb()
+#define wmbdd() wmb()
 
 #ifdef CONFIG_SMP
 #define smp_mb() mb()
 #define smp_rmb() rmb()
 #define smp_wmb() wmb()
+#define smp_mbdd() mb()
+#define smp_wmbdd() wmb()
 #else
 #define smp_mb() barrier()
 #define smp_rmb() barrier()
 #define smp_wmb() barrier()
+#define smp_mbdd() barrier()
+#define smp_wmbdd() barrier()
 #endif
 
 #define set_mb(var, value) do { xchg(&var, value); } while (0)
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-sparc/system.h linux-2.4.10.wmbdd/include/asm-sparc/system.h
--- linux-2.4.10/include/asm-sparc/system.h Tue Oct 3 09:24:41 2000
+++ linux-2.4.10.wmbdd/include/asm-sparc/system.h Mon Oct 8 18:31:19 2001
@@ -278,11 +278,15 @@
 #define mb() __asm__ __volatile__ ("" : : : "memory")
 #define rmb() mb()
 #define wmb() mb()
+#define mbdd() mb()
+#define wmbdd() wmb()
 #define set_mb(__var, __value) do { __var = __value; mb(); } while(0)
 #define set_wmb(__var, __value) set_mb(__var, __value)
 #define smp_mb() __asm__ __volatile__("":::"memory");
 #define smp_rmb() __asm__ __volatile__("":::"memory");
 #define smp_wmb() __asm__ __volatile__("":::"memory");
+#define smp_mbdd() __asm__ __volatile__("":::"memory");
+#define smp_wmbdd() __asm__ __volatile__("":::"memory");
 
 #define nop() __asm__ __volatile__ ("nop");
 
diff -urN -X /home/mckenney/dontdiff linux-2.4.10/include/asm-sparc64/system.h linux-2.4.10.wmbdd/include/asm-sparc64/system.h
--- linux-2.4.10/include/asm-sparc64/system.h Fri Sep 7 11:01:20 2001
+++ linux-2.4.10.wmbdd/include/asm-sparc64/system.h Wed Oct 10 16:43:21 2001
@@ -100,6 +100,8 @@
         membar("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad");
 #define rmb() membar("#LoadLoad")
 #define wmb() membar("#StoreStore")
+#define mbdd() mb()
+#define wmbdd() wmb()
 #define set_mb(__var, __value) \
         do { __var = __value; membar("#StoreLoad | #StoreStore"); } while(0)
 #define set_wmb(__var, __value) \
@@ -109,10 +111,14 @@
 #define smp_mb() mb()
 #define smp_rmb() rmb()
 #define smp_wmb() wmb()
+#define smp_mbdd() mbdd()
+#define smp_wmbdd() wmbdd()
 #else
 #define smp_mb() __asm__ __volatile__("":::"memory");
 #define smp_rmb() __asm__ __volatile__("":::"memory");
 #define smp_wmb() __asm__ __volatile__("":::"memory");
+#define smp_mbdd() __asm__ __volatile__("":::"memory");
+#define smp_wmbdd() __asm__ __volatile__("":::"memory");
 #endif
 
 #define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/



This archive was generated by hypermail 2b29 : Mon Oct 15 2001 - 21:00:36 EST