Re: [PATCH AUTOSEL for 4.14 12/67] KVM: x86: add support for emulating UMIP

From: Paolo Bonzini
Date: Thu Mar 08 2018 - 01:46:50 EST




----- Original Message -----
> From: "Sasha Levin" <Alexander.Levin@xxxxxxxxxxxxx>
> To: linux-kernel@xxxxxxxxxxxxxxx, stable@xxxxxxxxxxxxxxx
> Cc: "Paolo Bonzini" <pbonzini@xxxxxxxxxx>, "Sasha Levin" <Alexander.Levin@xxxxxxxxxxxxx>
> Sent: Thursday, March 8, 2018 5:57:36 AM
> Subject: [PATCH AUTOSEL for 4.14 12/67] KVM: x86: add support for emulating UMIP
>
> From: Paolo Bonzini <pbonzini@xxxxxxxxxx>
>
> [ Upstream commit 66336cab3531d3325ebde36a04725dddd0c42cb5 ]
>
> The User-Mode Instruction Prevention feature present in recent Intel
> processor prevents a group of instructions (sgdt, sidt, sldt, smsw, and
> str) from being executed with CPL > 0. Otherwise, a general protection
> fault is issued.
>
> UMIP instructions in general are also able to trigger vmexits, so we can
> actually emulate UMIP on older processors. This commit sets up the
> infrastructure so that kvm-intel.ko and kvm-amd.ko can set the UMIP
> feature bit for CPUID even if the feature is not actually available
> in hardware.
>
> Reviewed-by: Wanpeng Li <wanpeng.li@xxxxxxxxxxx>
> Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
> Signed-off-by: Sasha Levin <alexander.levin@xxxxxxxxxxxxx>

This is not enough (there were a couple bugs, plus you have not
added the changes needed to the handle SLDT, STR, etc. in the
emulator). But since this is a feature, why is it being backported
to 4.14, especially without any testing??

Thanks,

Paolo

> ---
> arch/x86/include/asm/kvm_host.h | 1 +
> arch/x86/kvm/cpuid.c | 2 ++
> arch/x86/kvm/svm.c | 6 ++++++
> arch/x86/kvm/vmx.c | 6 ++++++
> 4 files changed, 15 insertions(+)
>
> diff --git a/arch/x86/include/asm/kvm_host.h
> b/arch/x86/include/asm/kvm_host.h
> index 4f8b80199672..52ecf9b2f61e 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -1004,6 +1004,7 @@ struct kvm_x86_ops {
> void (*handle_external_intr)(struct kvm_vcpu *vcpu);
> bool (*mpx_supported)(void);
> bool (*xsaves_supported)(void);
> + bool (*umip_emulated)(void);
>
> int (*check_nested_events)(struct kvm_vcpu *vcpu, bool external_intr);
>
> diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
> index 13f5d4217e4f..f3fc225f5ebb 100644
> --- a/arch/x86/kvm/cpuid.c
> +++ b/arch/x86/kvm/cpuid.c
> @@ -325,6 +325,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2
> *entry, u32 function,
> unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
> unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
> unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
> + unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
>
> /* cpuid 1.edx */
> const u32 kvm_cpuid_1_edx_x86_features =
> @@ -476,6 +477,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2
> *entry, u32 function,
> entry->ebx |= F(TSC_ADJUST);
> entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
> cpuid_mask(&entry->ecx, CPUID_7_ECX);
> + entry->ecx |= f_umip;
> /* PKU is not yet implemented for shadow paging. */
> if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
> entry->ecx &= ~F(PKU);
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index e0bc3ad0f6cd..8ea19bf09202 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -5322,6 +5322,11 @@ static bool svm_xsaves_supported(void)
> return false;
> }
>
> +static bool svm_umip_emulated(void)
> +{
> + return false;
> +}
> +
> static bool svm_has_wbinvd_exit(void)
> {
> return true;
> @@ -5633,6 +5638,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init =
> {
> .invpcid_supported = svm_invpcid_supported,
> .mpx_supported = svm_mpx_supported,
> .xsaves_supported = svm_xsaves_supported,
> + .umip_emulated = svm_umip_emulated,
>
> .set_supported_cpuid = svm_set_supported_cpuid,
>
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 5ffde16253cb..924d88d5ca35 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -9212,6 +9212,11 @@ static bool vmx_xsaves_supported(void)
> SECONDARY_EXEC_XSAVES;
> }
>
> +static bool vmx_umip_emulated(void)
> +{
> + return false;
> +}
> +
> static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
> {
> u32 exit_intr_info;
> @@ -12252,6 +12257,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init
> = {
> .handle_external_intr = vmx_handle_external_intr,
> .mpx_supported = vmx_mpx_supported,
> .xsaves_supported = vmx_xsaves_supported,
> + .umip_emulated = vmx_umip_emulated,
>
> .check_nested_events = vmx_check_nested_events,
>
> --
> 2.14.1
>