Re: [PATCH 04/10] KVM: x86: do not go through ctxt->ops when emulating rsm
From: Sean Christopherson
Date: Fri Oct 28 2022 - 17:46:51 EST
On Fri, Oct 28, 2022, kernel test robot wrote:
> All warnings (new ones prefixed by >>):
>
> arch/x86/kvm/smm.c: In function 'emulator_leave_smm':
> >> arch/x86/kvm/smm.c:503:33: warning: unused variable 'efer' [-Wunused-variable]
> 503 | unsigned long cr0, cr4, efer;
> | ^~~~
> >> arch/x86/kvm/smm.c:503:28: warning: unused variable 'cr4' [-Wunused-variable]
> 503 | unsigned long cr0, cr4, efer;
> | ^~~
>
>
> vim +/efer +503 arch/x86/kvm/smm.c
>
> e2881deb76e87c Paolo Bonzini 2022-10-27 499
> e2881deb76e87c Paolo Bonzini 2022-10-27 500 int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
> e2881deb76e87c Paolo Bonzini 2022-10-27 501 {
> e2881deb76e87c Paolo Bonzini 2022-10-27 502 struct kvm_vcpu *vcpu = ctxt->vcpu;
> e2881deb76e87c Paolo Bonzini 2022-10-27 @503 unsigned long cr0, cr4, efer;
> e2881deb76e87c Paolo Bonzini 2022-10-27 504 char buf[512];
> e2881deb76e87c Paolo Bonzini 2022-10-27 505 u64 smbase;
> e2881deb76e87c Paolo Bonzini 2022-10-27 506 int ret;
> e2881deb76e87c Paolo Bonzini 2022-10-27 507
> 9842725ac4bb94 Paolo Bonzini 2022-10-27 508 smbase = vcpu->arch.smbase;
> e2881deb76e87c Paolo Bonzini 2022-10-27 509
> 9842725ac4bb94 Paolo Bonzini 2022-10-27 510 ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, buf, sizeof(buf));
> 9842725ac4bb94 Paolo Bonzini 2022-10-27 511 if (ret < 0)
> e2881deb76e87c Paolo Bonzini 2022-10-27 512 return X86EMUL_UNHANDLEABLE;
> e2881deb76e87c Paolo Bonzini 2022-10-27 513
> 9842725ac4bb94 Paolo Bonzini 2022-10-27 514 if ((vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK) == 0)
> 9842725ac4bb94 Paolo Bonzini 2022-10-27 515 static_call(kvm_x86_set_nmi_mask)(vcpu, false);
> e2881deb76e87c Paolo Bonzini 2022-10-27 516
> e2881deb76e87c Paolo Bonzini 2022-10-27 517 kvm_smm_changed(vcpu, false);
> e2881deb76e87c Paolo Bonzini 2022-10-27 518
> e2881deb76e87c Paolo Bonzini 2022-10-27 519 /*
> e2881deb76e87c Paolo Bonzini 2022-10-27 520 * Get back to real mode, to prepare a safe state in which to load
> e2881deb76e87c Paolo Bonzini 2022-10-27 521 * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
> e2881deb76e87c Paolo Bonzini 2022-10-27 522 * supports long mode.
> e2881deb76e87c Paolo Bonzini 2022-10-27 523 */
> 9842725ac4bb94 Paolo Bonzini 2022-10-27 524 #ifdef CONFIG_X86_64
> 9842725ac4bb94 Paolo Bonzini 2022-10-27 525 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
My suggestion from the previous version[*] should still work. Copy+pasted below
for convenience, may or may not apply cleanly.
https://lore.kernel.org/all/Y0nO0quQnVFQruPM@xxxxxxxxxx
---
arch/x86/kvm/smm.c | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
index 41ca128478fc..740fca1cf3a3 100644
--- a/arch/x86/kvm/smm.c
+++ b/arch/x86/kvm/smm.c
@@ -500,6 +500,8 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
+ bool is_64bit_vcpu = IS_ENABLED(CONFIG_X86_64) &&
+ guest_cpuid_has(vcpu, X86_FEATURE_LM);
unsigned long cr0, cr4, efer;
char buf[512];
u64 smbase;
@@ -521,8 +523,7 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
* CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
* supports long mode.
*/
-#ifdef CONFIG_X86_64
- if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ if (is_64bit_vcpu) {
struct kvm_segment cs_desc;
/* Zero CR4.PCIDE before CR0.PG. */
@@ -536,15 +537,13 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
cs_desc.s = cs_desc.g = cs_desc.present = 1;
kvm_set_segment(vcpu, &cs_desc, VCPU_SREG_CS);
}
-#endif
/* For the 64-bit case, this will clear EFER.LMA. */
cr0 = kvm_read_cr0(vcpu);
if (cr0 & X86_CR0_PE)
kvm_set_cr0(vcpu, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
-#ifdef CONFIG_X86_64
- if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
+ if (is_64bit_vcpu) {
/* Clear CR4.PAE before clearing EFER.LME. */
cr4 = kvm_read_cr4(vcpu);
if (cr4 & X86_CR4_PAE)
@@ -554,7 +553,6 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
efer = 0;
kvm_set_msr(vcpu, MSR_EFER, efer);
}
-#endif
/*
* Give leave_smm() a chance to make ISA-specific changes to the vCPU
@@ -565,7 +563,7 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
return X86EMUL_UNHANDLEABLE;
#ifdef CONFIG_X86_64
- if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
+ if (is_64bit_vcpu)
return rsm_load_state_64(ctxt, buf);
else
#endif
base-commit: 8b86d27cc60a150252b04989de818ad4ec85f899
--