[PATCH v5 10/12] KVM: X86: Rename and move the function vmx_handle_memory_failure to x86.c

From: Babu Moger
Date: Wed Aug 26 2020 - 15:15:52 EST


Handling of kvm_read/write_guest_virt*() errors can be moved to common
code. The same code can be used by both VMX and SVM.

Signed-off-by: Babu Moger <babu.moger@xxxxxxx>
---
arch/x86/kvm/vmx/nested.c | 12 ++++++------
arch/x86/kvm/vmx/vmx.c | 29 +----------------------------
arch/x86/kvm/vmx/vmx.h | 2 --
arch/x86/kvm/x86.c | 28 ++++++++++++++++++++++++++++
arch/x86/kvm/x86.h | 2 ++
5 files changed, 37 insertions(+), 36 deletions(-)

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 23b58c28a1c9..28becd22d9d9 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4688,7 +4688,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,

r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
if (r != X86EMUL_CONTINUE) {
- *ret = vmx_handle_memory_failure(vcpu, r, &e);
+ *ret = kvm_handle_memory_failure(vcpu, r, &e);
return -EINVAL;
}

@@ -4995,7 +4995,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
/* _system ok, nested_vmx_check_permission has verified cpl=0 */
r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
if (r != X86EMUL_CONTINUE)
- return vmx_handle_memory_failure(vcpu, r, &e);
+ return kvm_handle_memory_failure(vcpu, r, &e);
}

return nested_vmx_succeed(vcpu);
@@ -5068,7 +5068,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
return 1;
r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
if (r != X86EMUL_CONTINUE)
- return vmx_handle_memory_failure(vcpu, r, &e);
+ return kvm_handle_memory_failure(vcpu, r, &e);
}

field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
@@ -5230,7 +5230,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
r = kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
sizeof(gpa_t), &e);
if (r != X86EMUL_CONTINUE)
- return vmx_handle_memory_failure(vcpu, r, &e);
+ return kvm_handle_memory_failure(vcpu, r, &e);

return nested_vmx_succeed(vcpu);
}
@@ -5283,7 +5283,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
return 1;
r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
if (r != X86EMUL_CONTINUE)
- return vmx_handle_memory_failure(vcpu, r, &e);
+ return kvm_handle_memory_failure(vcpu, r, &e);

/*
* Nested EPT roots are always held through guest_mmu,
@@ -5365,7 +5365,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
return 1;
r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
if (r != X86EMUL_CONTINUE)
- return vmx_handle_memory_failure(vcpu, r, &e);
+ return kvm_handle_memory_failure(vcpu, r, &e);

if (operand.vpid >> 16)
return nested_vmx_fail(vcpu,
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 46ba2e03a892..b15b4c6e3b46 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1598,33 +1598,6 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
return 1;
}

-/*
- * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
- * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
- * indicates whether exit to userspace is needed.
- */
-int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
- struct x86_exception *e)
-{
- if (r == X86EMUL_PROPAGATE_FAULT) {
- kvm_inject_emulated_page_fault(vcpu, e);
- return 1;
- }
-
- /*
- * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
- * while handling a VMX instruction KVM could've handled the request
- * correctly by exiting to userspace and performing I/O but there
- * doesn't seem to be a real use-case behind such requests, just return
- * KVM_EXIT_INTERNAL_ERROR for now.
- */
- vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
- vcpu->run->internal.ndata = 0;
-
- return 0;
-}
-
/*
* Recognizes a pending MTF VM-exit and records the nested state for later
* delivery.
@@ -5558,7 +5531,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)

r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
if (r != X86EMUL_CONTINUE)
- return vmx_handle_memory_failure(vcpu, r, &e);
+ return kvm_handle_memory_failure(vcpu, r, &e);

if (operand.pcid >> 12 != 0) {
kvm_inject_gp(vcpu, 0);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 26175a4759fa..7c578564a8fc 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -354,8 +354,6 @@ struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
-int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
- struct x86_exception *e);

#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 539ea1cd6020..5d7930ecdddc 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10763,6 +10763,34 @@ void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_c
}
EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error);

+/*
+ * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
+ * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
+ * indicates whether exit to userspace is needed.
+ */
+int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+ struct x86_exception *e)
+{
+ if (r == X86EMUL_PROPAGATE_FAULT) {
+ kvm_inject_emulated_page_fault(vcpu, e);
+ return 1;
+ }
+
+ /*
+ * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
+ * while handling a VMX instruction KVM could've handled the request
+ * correctly by exiting to userspace and performing I/O but there
+ * doesn't seem to be a real use-case behind such requests, just return
+ * KVM_EXIT_INTERNAL_ERROR for now.
+ */
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_handle_memory_failure);
+
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 995ab696dcf0..d3a41144eb30 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -372,6 +372,8 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
int kvm_spec_ctrl_test_value(u64 value);
int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
+int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+ struct x86_exception *e);

#define KVM_MSR_RET_INVALID 2