Re: [PATCH v2 1/4] x86/fpu: Add kernel_fpu_begin_mask() to selectively initialize state

From: Sean Christopherson
Date: Tue Jan 19 2021 - 14:37:26 EST


On Tue, Jan 19, 2021, Andy Lutomirski wrote:
> diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
> index eb86a2b831b1..d4a71596c41e 100644
> --- a/arch/x86/kernel/fpu/core.c
> +++ b/arch/x86/kernel/fpu/core.c
> @@ -121,7 +121,7 @@ int copy_fpregs_to_fpstate(struct fpu *fpu)
> }
> EXPORT_SYMBOL(copy_fpregs_to_fpstate);
>
> -void kernel_fpu_begin(void)
> +void kernel_fpu_begin_mask(unsigned int kfpu_mask)
> {
> preempt_disable();
>
> @@ -141,13 +141,18 @@ void kernel_fpu_begin(void)
> }
> __cpu_invalidate_fpregs_state();
>
> - if (boot_cpu_has(X86_FEATURE_XMM))
> - ldmxcsr(MXCSR_DEFAULT);
> + /* Put sane initial values into the control registers. */
> + if (likely(kfpu_mask & KFPU_MXCSR)) {
> + if (boot_cpu_has(X86_FEATURE_XMM))
> + ldmxcsr(MXCSR_DEFAULT);
> + }
>
> - if (boot_cpu_has(X86_FEATURE_FPU))
> - asm volatile ("fninit");
> + if (unlikely(kfpu_mask & KFPU_387)) {
> + if (boot_cpu_has(X86_FEATURE_FPU))
> + asm volatile ("fninit");
> + }

Why not combine these into a single if statement? Easier on the eyes (IMO), and
would generate a smaller diff.

if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
ldmxcsr(MXCSR_DEFAULT);

if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
asm volatile ("fninit");

> }
> -EXPORT_SYMBOL_GPL(kernel_fpu_begin);
> +EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
>
> void kernel_fpu_end(void)
> {
> --
> 2.29.2
>