[PATCH 206/208] x86/fpu: Add CONFIG_X86_DEBUG_FPU=y FPU debugging code

From: Ingo Molnar
Date: Tue May 05 2015 - 14:01:36 EST


There are various internal FPU state debugging checks that never
trigger in practice, but which are useful for FPU code development.

Separate these out into CONFIG_X86_DEBUG_FPU=y, and also add a
couple of new ones.

The size difference is about 0.5K of code on defconfig:

text data bss filename
15028906 2578816 1638400 vmlinux
15029430 2578816 1638400 vmlinux

( Keep this enabled by default until the new FPU code is debugged. )

Cc: Andy Lutomirski <luto@xxxxxxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
Cc: Fenghua Yu <fenghua.yu@xxxxxxxxx>
Cc: H. Peter Anvin <hpa@xxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Oleg Nesterov <oleg@xxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
---
arch/x86/Kconfig.debug | 12 ++++++++++++
arch/x86/include/asm/fpu/internal.h | 17 ++++++++++++++++-
arch/x86/kernel/fpu/core.c | 18 +++++++++---------
arch/x86/kernel/fpu/init.c | 12 +++++++++++-
arch/x86/kernel/fpu/xstate.c | 11 ++++++++++-
5 files changed, 58 insertions(+), 12 deletions(-)

diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 72484a645f05..2fd3ebbb4e33 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -332,4 +332,16 @@ config X86_DEBUG_STATIC_CPU_HAS

If unsure, say N.

+config X86_DEBUG_FPU
+ bool "Debug the x86 FPU code"
+ depends on DEBUG_KERNEL
+ default y
+ ---help---
+ If this option is enabled then there will be extra sanity
+ checks and (boot time) debug printouts added to the kernel.
+ This debugging adds some small amount of runtime overhead
+ to the kernel.
+
+ If unsure, say N.
+
endmenu
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index a4c1b7dbf70e..d2a281bd5f45 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -59,6 +59,15 @@ extern void fpu__clear(struct fpu *fpu);
extern void fpu__init_check_bugs(void);
extern void fpu__resume_cpu(void);

+/*
+ * Debugging facility:
+ */
+#ifdef CONFIG_X86_DEBUG_FPU
+# define WARN_ON_FPU(x) WARN_ON_ONCE(x)
+#else
+# define WARN_ON_FPU(x) ({ 0; })
+#endif
+
DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);

/*
@@ -296,6 +305,8 @@ static inline void __fpregs_deactivate_hw(void)
/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
static inline void __fpregs_deactivate(struct fpu *fpu)
{
+ WARN_ON_FPU(!fpu->fpregs_active);
+
fpu->fpregs_active = 0;
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
}
@@ -303,6 +314,8 @@ static inline void __fpregs_deactivate(struct fpu *fpu)
/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
static inline void __fpregs_activate(struct fpu *fpu)
{
+ WARN_ON_FPU(fpu->fpregs_active);
+
fpu->fpregs_active = 1;
this_cpu_write(fpu_fpregs_owner_ctx, fpu);
}
@@ -433,8 +446,10 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
{
if (fpu_switch.preload) {
- if (unlikely(copy_fpstate_to_fpregs(new_fpu)))
+ if (unlikely(copy_fpstate_to_fpregs(new_fpu))) {
+ WARN_ON_FPU(1);
fpu__clear(new_fpu);
+ }
}
}

diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 421a98103820..9df2a09f1bbe 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -38,13 +38,13 @@ DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);

static void kernel_fpu_disable(void)
{
- WARN_ON(this_cpu_read(in_kernel_fpu));
+ WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, true);
}

static void kernel_fpu_enable(void)
{
- WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu));
+ WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
this_cpu_write(in_kernel_fpu, false);
}

@@ -109,7 +109,7 @@ void __kernel_fpu_begin(void)
{
struct fpu *fpu = &current->thread.fpu;

- WARN_ON_ONCE(!irq_fpu_usable());
+ WARN_ON_FPU(!irq_fpu_usable());

kernel_fpu_disable();

@@ -127,7 +127,7 @@ void __kernel_fpu_end(void)
struct fpu *fpu = &current->thread.fpu;

if (fpu->fpregs_active) {
- if (WARN_ON(copy_fpstate_to_fpregs(fpu)))
+ if (WARN_ON_FPU(copy_fpstate_to_fpregs(fpu)))
fpu__clear(fpu);
} else {
__fpregs_deactivate_hw();
@@ -187,7 +187,7 @@ EXPORT_SYMBOL_GPL(irq_ts_restore);
*/
void fpu__save(struct fpu *fpu)
{
- WARN_ON(fpu != &current->thread.fpu);
+ WARN_ON_FPU(fpu != &current->thread.fpu);

preempt_disable();
if (fpu->fpregs_active) {
@@ -233,7 +233,7 @@ EXPORT_SYMBOL_GPL(fpstate_init);
*/
static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
- WARN_ON(src_fpu != &current->thread.fpu);
+ WARN_ON_FPU(src_fpu != &current->thread.fpu);

/*
* Don't let 'init optimized' areas of the XSAVE area
@@ -284,7 +284,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
*/
void fpu__activate_curr(struct fpu *fpu)
{
- WARN_ON_ONCE(fpu != &current->thread.fpu);
+ WARN_ON_FPU(fpu != &current->thread.fpu);

if (!fpu->fpstate_active) {
fpstate_init(&fpu->state);
@@ -321,7 +321,7 @@ EXPORT_SYMBOL_GPL(fpu__activate_curr);
*/
void fpu__activate_stopped(struct fpu *child_fpu)
{
- WARN_ON_ONCE(child_fpu == &current->thread.fpu);
+ WARN_ON_FPU(child_fpu == &current->thread.fpu);

if (child_fpu->fpstate_active) {
child_fpu->last_cpu = -1;
@@ -407,7 +407,7 @@ static inline void copy_init_fpstate_to_fpregs(void)
*/
void fpu__clear(struct fpu *fpu)
{
- WARN_ON_ONCE(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
+ WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */

if (!use_eager_fpu()) {
/* FPU state will be reallocated lazily at the first use. */
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index a9e506a99a83..e9f1d6e62146 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -143,6 +143,11 @@ EXPORT_SYMBOL_GPL(xstate_size);
*/
static void __init fpu__init_system_xstate_size_legacy(void)
{
+ static int on_boot_cpu = 1;
+
+ WARN_ON_FPU(!on_boot_cpu);
+ on_boot_cpu = 0;
+
/*
* Note that xstate_size might be overwriten later during
* fpu__init_system_xstate().
@@ -214,7 +219,12 @@ __setup("eagerfpu=", eager_fpu_setup);
*/
static void __init fpu__init_system_ctx_switch(void)
{
- WARN_ON(current->thread.fpu.fpstate_active);
+ static bool on_boot_cpu = 1;
+
+ WARN_ON_FPU(!on_boot_cpu);
+ on_boot_cpu = 0;
+
+ WARN_ON_FPU(current->thread.fpu.fpstate_active);
current_thread_info()->status = 0;

/* Auto enable eagerfpu for xsaveopt */
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 201f08feb259..5724098adf1b 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -262,6 +262,11 @@ static void __init setup_xstate_comp(void)
*/
static void __init setup_init_fpu_buf(void)
{
+ static int on_boot_cpu = 1;
+
+ WARN_ON_FPU(!on_boot_cpu);
+ on_boot_cpu = 0;
+
if (!cpu_has_xsave)
return;

@@ -317,6 +322,10 @@ static void __init init_xstate_size(void)
void __init fpu__init_system_xstate(void)
{
unsigned int eax, ebx, ecx, edx;
+ static int on_boot_cpu = 1;
+
+ WARN_ON_FPU(!on_boot_cpu);
+ on_boot_cpu = 0;

if (!cpu_has_xsave) {
pr_info("x86/fpu: Legacy x87 FPU detected.\n");
@@ -324,7 +333,7 @@ void __init fpu__init_system_xstate(void)
}

if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
- WARN(1, "x86/fpu: XSTATE_CPUID missing!\n");
+ WARN_ON_FPU(1);
return;
}

--
2.1.0

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/