[tip:core/locking] x86/smp: Limit spinlock delay on virtual machines

From: tip-bot for Rik van Riel
Date: Wed Feb 13 2013 - 07:12:24 EST


Commit-ID: 12b682864a336d72bfd507244649bd1066d90e43
Gitweb: http://git.kernel.org/tip/12b682864a336d72bfd507244649bd1066d90e43
Author: Rik van Riel <riel@xxxxxxxxxx>
AuthorDate: Thu, 7 Feb 2013 16:24:49 -0500
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitDate: Wed, 13 Feb 2013 09:07:21 +0100

x86/smp: Limit spinlock delay on virtual machines

Modern Intel and AMD CPUs will trap to the host when the guest
is spinning on a spinlock, allowing the host to schedule in
something else.

This effectively means the host is taking care of spinlock
backoff for virtual machines. It also means that doing the
spinlock backoff in the guest anyway can lead to totally
unpredictable results, extremely large backoffs, and
performance regressions.

To prevent those problems, we limit the spinlock backoff
delay, when running in a virtual machine, to a small value.

Signed-off-by: Rik van Riel <riel@xxxxxxxxxx>
Reviewed-by: Raghavendra K T <raghavendra.kt@xxxxxxxxxxxxxxxxxx>
Cc: aquini@xxxxxxxxxx
Cc: eric.dumazet@xxxxxxxxx
Cc: lwoodman@xxxxxxxxxx
Cc: knoel@xxxxxxxxxx
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <a.p.zijlstra@xxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Link: http://lkml.kernel.org/r/20130207162449.0292685a@xxxxxxxxxxxxxxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
Tested-by: Chegu Vinod <chegu_vinod@xxxxxx>
---
arch/x86/include/asm/processor.h | 2 ++
arch/x86/kernel/cpu/hypervisor.c | 2 ++
arch/x86/kernel/smp.c | 21 +++++++++++++++++++--
3 files changed, 23 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 888184b..2856972 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -158,9 +158,11 @@ extern __u32 cpu_caps_set[NCAPINTS];
#ifdef CONFIG_SMP
DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
#define cpu_data(cpu) per_cpu(cpu_info, cpu)
+extern void init_guest_spinlock_delay(void);
#else
#define cpu_info boot_cpu_data
#define cpu_data(cpu) boot_cpu_data
+static inline void init_guest_spinlock_delay(void) {}
#endif

extern const struct seq_operations cpuinfo_op;
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index a8f8fa9..4a53724 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -76,6 +76,8 @@ void __init init_hypervisor_platform(void)

init_hypervisor(&boot_cpu_data);

+ init_guest_spinlock_delay();
+
if (x86_hyper->init_platform)
x86_hyper->init_platform();
}
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 8e94469..73be656 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -116,8 +116,25 @@ static bool smp_no_nmi_ipi = false;
#define DELAY_SHIFT 8
#define DELAY_FIXED_1 (1<<DELAY_SHIFT)
#define MIN_SPINLOCK_DELAY (1 * DELAY_FIXED_1)
-#define MAX_SPINLOCK_DELAY (16000 * DELAY_FIXED_1)
+#define MAX_SPINLOCK_DELAY_NATIVE (16000 * DELAY_FIXED_1)
+#define MAX_SPINLOCK_DELAY_GUEST (16 * DELAY_FIXED_1)
#define DELAY_HASH_SHIFT 6
+
+/*
+ * Modern Intel and AMD CPUs tell the hypervisor when a guest is
+ * spinning excessively on a spinlock. The hypervisor will then
+ * schedule something else, effectively taking care of the backoff
+ * for us. Doing our own backoff on top of the hypervisor's pause
+ * loop exit handling can lead to excessively long delays, and
+ * performance degradations. Limit the spinlock delay in virtual
+ * machines to a smaller value. Called from init_hypervisor_platform
+ */
+static int __read_mostly max_spinlock_delay = MAX_SPINLOCK_DELAY_NATIVE;
+void __init init_guest_spinlock_delay(void)
+{
+ max_spinlock_delay = MAX_SPINLOCK_DELAY_GUEST;
+}
+
struct delay_entry {
u32 hash;
u32 delay;
@@ -171,7 +188,7 @@ void ticket_spin_lock_wait(arch_spinlock_t *lock, struct __raw_tickets inc)
}

/* Aggressively increase delay, to minimize lock accesses. */
- if (delay < MAX_SPINLOCK_DELAY)
+ if (delay < max_spinlock_delay)
delay += DELAY_FIXED_1 / 7;

loops = (delay * waiters_ahead) >> DELAY_SHIFT;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/