[PATCH v2 1/4] KVM: x86: move the event handling of KVM_REQ_GET_VMCS12_PAGES into a common function
From: Mingwei Zhang
Date: Sun Aug 28 2022 - 18:25:57 EST
Create a common function to handle kvm request in the vcpu_run loop. KVM
implicitly assumes the virtual APIC page being present + mapped into the
kernel address space when executing vmx_guest_apic_has_interrupts().
However, with demand paging KVM breaks the assumption, as the
KVM_REQ_GET_VMCS12_PAGES event isn't assessed before entering vcpu_block.
Fix this by getting vmcs12 pages before inspecting the guest's APIC page.
Because of this fix, the event handling code of
KVM_REQ_GET_NESTED_STATE_PAGES becomes a common code path for both
vcpu_enter_guest() and vcpu_block(). Thus, put this code snippet into a
common helper function to avoid code duplication.
Cc: Maxim Levitsky <mlevitsk@xxxxxxxxxx>
Cc: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx>
Originally-by: Oliver Upton <oupton@xxxxxxxxxx>
Signed-off-by: Oliver Upton <oupton@xxxxxxxxxx>
Signed-off-by: Mingwei Zhang <mizhang@xxxxxxxxxx>
---
arch/x86/kvm/x86.c | 29 +++++++++++++++++++++++------
1 file changed, 23 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d7374d768296..3dcaac8f0584 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10261,12 +10261,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
r = -EIO;
goto out;
}
- if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
- if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
- r = 0;
- goto out;
- }
- }
if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
kvm_mmu_free_obsolete_roots(vcpu);
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
@@ -10666,6 +10660,23 @@ static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
!vcpu->arch.apf.halted);
}
+static int kvm_vcpu_handle_common_requests(struct kvm_vcpu *vcpu)
+{
+ if (kvm_request_pending(vcpu)) {
+ /*
+ * Get the vmcs12 pages before checking for interrupts that
+ * might unblock the guest if L1 is using virtual-interrupt
+ * delivery.
+ */
+ if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
+ if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu)))
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
/* Called within kvm->srcu read side. */
static int vcpu_run(struct kvm_vcpu *vcpu)
{
@@ -10681,6 +10692,12 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
* this point can start executing an instruction.
*/
vcpu->arch.at_instruction_boundary = false;
+
+ /* Process common request regardless of vcpu state. */
+ r = kvm_vcpu_handle_common_requests(vcpu);
+ if (r <= 0)
+ break;
+
if (kvm_vcpu_running(vcpu)) {
r = vcpu_enter_guest(vcpu);
} else {
--
2.37.2.672.g94769d06f0-goog