[PATCH 1/3] x86 msr: msr goto extension support

From: kan . liang
Date: Thu Jul 31 2014 - 13:30:30 EST


From: Kan Liang <kan.liang@xxxxxxxxx>

Currently, {rd,wr}msrl_safe can handle the exception which caused by accessing
specific MSR.
However, it will introduce extra conditional branch for testing errors. That
will impact the "fast" path's performance.
The newly implemented {rd,wr}msrl_goto function can not only handle the
exception which caused by accessing specific MSR,
but also takes advantage of the asm goto extension to eliminate the impact of
performance.

The asm goto extension is supported by GCC 4.5 and later versions. If the
compiler doesn't support goto extension, _safe will be used to replace _goto.

Signed-off-by: Kan Liang <kan.liang@xxxxxxxxx>
---
arch/x86/include/asm/msr.h | 60 +++++++++++++++++++++++++++++++++++++++++
arch/x86/include/asm/paravirt.h | 18 +++++++++++++
2 files changed, 78 insertions(+)

diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index de36f22..55438da 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -203,6 +203,66 @@ do { \

#define rdtscpll(val, aux) (val) = native_read_tscp(&(aux))

+#ifdef CC_HAVE_ASM_GOTO
+
+/*
+ * The _goto version is rdmsrl/wrmsrl with exception handling
+ * The advantage (than _safe) is that it can directly jump in the
+ * exception handling code, and never test in the "fast" path.
+ *
+ * Since _goto doesn't support output, try to protect the output
+ * registers by clobbers, and process the registers immediately.
+ */
+#define rdmsrl_goto(msr, result, fail_label) \
+do { \
+ DECLARE_ARGS(val, low, high); \
+ asm_volatile_goto("2: rdmsr\n" \
+ "1:\n\t" \
+ _ASM_EXTABLE(2b, %l[fail_label]) \
+ : /* No outputs. */ \
+ : "c" (msr) \
+ : "%rax", "%rdx" \
+ : fail_label); \
+ asm volatile ("" \
+ : EAX_EDX_RET(val, low, high) \
+ : ); \
+ result = EAX_EDX_VAL(val, low, high); \
+} while (0)
+
+#define wrmsrl_goto(msr, val, fail_label) \
+do { \
+ unsigned low, high; \
+ low = (u32)val; \
+ high = (u32)(val >> 32); \
+ asm_volatile_goto("2: wrmsr\n" \
+ "1:\n\t" \
+ _ASM_EXTABLE(2b, %l[fail_label]) \
+ : /* No outputs. */ \
+ : "c" (msr), "a" (low), "d" (high) \
+ : "memory" \
+ : fail_label); \
+} while (0)
+
+#else /* CC_HAVE_ASM_GOTO */
+
+/*
+ * If compiler doesn't support asm goto, use _safe to replace
+ */
+#define rdmsrl_goto(msr, result, fail_label) \
+do { \
+ if (rdmsrl_safe(msr, &result)) \
+ goto fail_label; \
+} while (0)
+
+#define wrmsrl_goto(msr, result, fail_label) \
+do { \
+ if (wrmsr_safe((msr), (u32)(result), \
+ (u32)((result) >> 32))) \
+ goto fail_label; \
+} while (0)
+
+#endif /* CC_HAVE_ASM_GOTO */
+
#endif /* !CONFIG_PARAVIRT */

#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val), \
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index cd6e161..1fa18a1 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -174,6 +174,24 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
return err;
}

+/*
+ * TODO: paravirt _goto doesn't support yet.
+ * But the implementation as below doesn't impact the current performance,
+ * since rdmsrl/wrmsrl eventually call _safe version in paravirt now.
+ */
+#define rdmsrl_goto(msr, result, fail_label) \
+do { \
+ if (rdmsrl_safe(msr, &result)) \
+ goto fail_label; \
+} while (0)
+
+#define wrmsrl_goto(msr, result, fail_label) \
+do { \
+ if (wrmsr_safe((msr), (u32)(result), \
+ (u32)((result) >> 32))) \
+ goto fail_label; \
+} while (0)
+
static inline u64 paravirt_read_tsc(void)
{
return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
--
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/