Re: RCU scaling on large systems

From: William Lee Irwin III
Date: Sat May 01 2004 - 16:21:53 EST


On Sat, May 01, 2004 at 07:08:05AM -0500, Jack Steiner wrote:
> On a 512p idle 2.6.5 system, each cpu spends ~6% of the time in the kernel
> RCU code. The time is spent contending for shared cache lines.

Would something like this help cacheline contention? This uses the
per_cpu data areas to hold per-cpu booleans for needing switches.
Untested/uncompiled.

The global lock is unfortunately still there.


-- wli

Index: wli-2.6.6-rc3-mm1/include/linux/rcupdate.h
===================================================================
--- wli-2.6.6-rc3-mm1.orig/include/linux/rcupdate.h 2004-04-03 19:36:52.000000000 -0800
+++ wli-2.6.6-rc3-mm1/include/linux/rcupdate.h 2004-05-01 14:15:09.000000000 -0700
@@ -41,6 +41,9 @@
#include <linux/threads.h>
#include <linux/percpu.h>
#include <linux/cpumask.h>
+#include <asm/atomic.h>
+
+#define RCU_CPU_SCATTER (NR_CPUS > 128)

/**
* struct rcu_head - callback structure for use with RCU
@@ -68,8 +71,10 @@
spinlock_t mutex; /* Guard this struct */
long curbatch; /* Current batch number. */
long maxbatch; /* Max requested batch number. */
+#if !RCU_CPU_SCATTER
cpumask_t rcu_cpu_mask; /* CPUs that need to switch in order */
/* for current batch to proceed. */
+#endif
};

/* Is batch a before batch b ? */
@@ -96,6 +101,9 @@
long batch; /* Batch # for current RCU batch */
struct list_head nxtlist;
struct list_head curlist;
+#if RCU_CPU_SCATTER
+ atomic_t need_switch;
+#endif
};

DECLARE_PER_CPU(struct rcu_data, rcu_data);
@@ -109,13 +117,39 @@

#define RCU_QSCTR_INVALID 0

+#if RCU_CPU_SCATTER
+#define rcu_need_switch(cpu) (!!atomic_read(&per_cpu(rcu_data, cpu).need_switch))
+#define rcu_clear_need_switch(cpu) atomic_set(&per_cpu(rcu_data, cpu).need_switch, 0)
+static inline int rcu_any_cpu_need_switch(void)
+{
+ int cpu;
+ for_each_online_cpu(cpu) {
+ if (rcu_need_switch(cpu))
+ return 1;
+ }
+ return 0;
+}
+
+static inline void rcu_set_need_switch_cpumask(cpumask_t cpumask)
+{
+ int cpu;
+ for_each_cpu_mask(cpu, cpumask)
+ atomic_set(&per_cpu(rcu_data, cpu).need_switch, 1);
+}
+#else
+#define rcu_need_switch(cpu) cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask)
+#define rcu_clear_need_switch(cpu) cpu_clear(cpu, rcu_ctrlblk.rcu_cpu_mask)
+#define rcu_any_cpu_need_switch() (!cpus_empty(rcu_ctrlblk.rcu_cpu_mask))
+#define rcu_set_need_switch_cpumask(x) cpus_copy(rcu_ctrlblk.rcu_cpu_mask, x)
+#endif
+
static inline int rcu_pending(int cpu)
{
if ((!list_empty(&RCU_curlist(cpu)) &&
rcu_batch_before(RCU_batch(cpu), rcu_ctrlblk.curbatch)) ||
(list_empty(&RCU_curlist(cpu)) &&
!list_empty(&RCU_nxtlist(cpu))) ||
- cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask))
+ rcu_need_switch(cpu))
return 1;
else
return 0;
Index: wli-2.6.6-rc3-mm1/kernel/rcupdate.c
===================================================================
--- wli-2.6.6-rc3-mm1.orig/kernel/rcupdate.c 2004-04-30 15:05:53.000000000 -0700
+++ wli-2.6.6-rc3-mm1/kernel/rcupdate.c 2004-05-01 13:47:05.000000000 -0700
@@ -46,10 +46,19 @@
#include <linux/cpu.h>

/* Definition for rcupdate control block. */
-struct rcu_ctrlblk rcu_ctrlblk =
- { .mutex = SPIN_LOCK_UNLOCKED, .curbatch = 1,
- .maxbatch = 1, .rcu_cpu_mask = CPU_MASK_NONE };
+struct rcu_ctrlblk rcu_ctrlblk = {
+ .mutex = SPIN_LOCK_UNLOCKED,
+ .curbatch = 1,
+ .maxbatch = 1,
+#if !RCU_CPU_SCATTER
+ .rcu_cpu_mask = CPU_MASK_NONE
+#endif
+};
+#if RCU_CPU_SCATTER
+DEFINE_PER_CPU(struct rcu_data, rcu_data) = { .need_switch = ATOMIC_INIT(0), };
+#else
DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
+#endif

/* Fake initialization required by compiler */
static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
@@ -109,13 +118,14 @@
rcu_ctrlblk.maxbatch = newbatch;
}
if (rcu_batch_before(rcu_ctrlblk.maxbatch, rcu_ctrlblk.curbatch) ||
- !cpus_empty(rcu_ctrlblk.rcu_cpu_mask)) {
+ !rcu_any_cpu_need_switch()) {
return;
}
/* Can't change, since spin lock held. */
active = idle_cpu_mask;
cpus_complement(active);
- cpus_and(rcu_ctrlblk.rcu_cpu_mask, cpu_online_map, active);
+ cpus_and(active, cpu_online_map, active);
+ rcu_set_need_switch_cpumask(active);
}

/*
@@ -127,7 +137,7 @@
{
int cpu = smp_processor_id();

- if (!cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask))
+ if (!rcu_need_switch(cpu))
return;

/*
@@ -143,12 +153,12 @@
return;

spin_lock(&rcu_ctrlblk.mutex);
- if (!cpu_isset(cpu, rcu_ctrlblk.rcu_cpu_mask))
+ if (!rcu_need_switch(cpu))
goto out_unlock;

- cpu_clear(cpu, rcu_ctrlblk.rcu_cpu_mask);
+ rcu_clear_need_switch(cpu);
RCU_last_qsctr(cpu) = RCU_QSCTR_INVALID;
- if (!cpus_empty(rcu_ctrlblk.rcu_cpu_mask))
+ if (!rcu_any_cpu_need_switch())
goto out_unlock;

rcu_ctrlblk.curbatch++;
@@ -186,11 +196,11 @@
* it here
*/
spin_lock_irq(&rcu_ctrlblk.mutex);
- if (cpus_empty(rcu_ctrlblk.rcu_cpu_mask))
+ if (!rcu_any_cpu_need_switch())
goto unlock;

- cpu_clear(cpu, rcu_ctrlblk.rcu_cpu_mask);
- if (cpus_empty(rcu_ctrlblk.rcu_cpu_mask)) {
+ rcu_clear_need_switch(cpu);
+ if (!rcu_any_cpu_need_switch()) {
rcu_ctrlblk.curbatch++;
/* We may avoid calling start batch if
* we are starting the batch only
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/