Re: [PATCH v3 9/9] KVM: x86: Add XSAVE Support for Architectural LBRs

From: Sean Christopherson
Date: Wed Mar 03 2021 - 14:07:17 EST


On Wed, Mar 03, 2021, Like Xu wrote:
> diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
> index 034708a3df20..ec4593e0ee6d 100644
> --- a/arch/x86/kvm/vmx/vmx.c
> +++ b/arch/x86/kvm/vmx/vmx.c
> @@ -7268,6 +7268,8 @@ static __init void vmx_set_cpu_caps(void)
> supported_xss = 0;
> if (!cpu_has_vmx_xsaves())
> kvm_cpu_cap_clear(X86_FEATURE_XSAVES);
> + else if (kvm_cpu_cap_has(X86_FEATURE_ARCH_LBR))
> + supported_xss |= XFEATURE_MASK_LBR;
>
> /* CPUID 0x80000001 */
> if (!cpu_has_vmx_rdtscp())
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index d773836ceb7a..bca2e318ff24 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -10433,6 +10433,8 @@ int kvm_arch_hardware_setup(void *opaque)
>
> if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
> supported_xss = 0;
> + else
> + supported_xss &= host_xss;

Not your fault by any means, but I would prefer to have matching logic for XSS
and XCR0. The existing clearing of supported_xss here is pointless. E.g. I'd
prefer something like the following, though Paolo may have a different opinion.

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6d7e760fdfa0..c781034463e5 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7244,12 +7244,15 @@ static __init void vmx_set_cpu_caps(void)
kvm_cpu_cap_clear(X86_FEATURE_INVPCID);
if (vmx_pt_mode_is_host_guest())
kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT);
+ if (!cpu_has_vmx_arch_lbr()) {
+ kvm_cpu_cap_clear(X86_FEATURE_ARCH_LBR);
+ supported_xss &= ~XFEATURE_MASK_LBR;
+ }

if (vmx_umip_emulated())
kvm_cpu_cap_set(X86_FEATURE_UMIP);

/* CPUID 0xD.1 */
- supported_xss = 0;
if (!cpu_has_vmx_xsaves())
kvm_cpu_cap_clear(X86_FEATURE_XSAVES);

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7b0adebec1ef..5f9eb1f5b840 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -205,6 +205,8 @@ static struct kvm_user_return_msrs __percpu *user_return_msrs;
| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
| XFEATURE_MASK_PKRU)

+#define KVM_SUPPORTED_XSS XFEATURE_MASK_LBR
+
u64 __read_mostly host_efer;
EXPORT_SYMBOL_GPL(host_efer);

@@ -8037,6 +8039,11 @@ int kvm_arch_init(void *opaque)
supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0;
}

+ if (boot_cpu_has(X86_FEATURE_XSAVES))
+ rdmsrl(MSR_IA32_XSS, host_xss);
+ supported_xss = host_xss & KVM_SUPPORTED_XSS;
+ }
+
if (pi_inject_timer == -1)
pi_inject_timer = housekeeping_enabled(HK_FLAG_TIMER);
#ifdef CONFIG_X86_64
@@ -10412,9 +10419,6 @@ int kvm_arch_hardware_setup(void *opaque)

rdmsrl_safe(MSR_EFER, &host_efer);

- if (boot_cpu_has(X86_FEATURE_XSAVES))
- rdmsrl(MSR_IA32_XSS, host_xss);
-
r = ops->hardware_setup();
if (r != 0)
return r;
@@ -10422,9 +10426,6 @@ int kvm_arch_hardware_setup(void *opaque)
memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
kvm_ops_static_call_update();

- if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
- supported_xss = 0;
-
#define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f)
cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_);
#undef __kvm_cpu_cap_has