This patch adds the functions to do a nested l2_gva to
l1_gpa page table walk.
Signed-off-by: Joerg Roedel<joerg.roedel@xxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 4 ++++
arch/x86/kvm/mmu.c | 8 ++++++++
arch/x86/kvm/paging_tmpl.h | 31 +++++++++++++++++++++++++++++++
3 files changed, 43 insertions(+), 0 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ccdbd2f..3cbfb51 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -287,6 +287,10 @@ struct kvm_vcpu_arch {
bool tpr_access_reporting;
struct kvm_mmu mmu;
+
+ /* Used for two dimensional paging emulation */
+ struct kvm_mmu nested_mmu;
+
/* only needed in kvm_pv_mmu_op() path, but it's hot so
* put it here to avoid allocation */
struct kvm_pv_mmu_op_buffer mmu_op_buffer;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4e0bfdb..8bd40b5 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2144,6 +2144,14 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
return vaddr;
}
+static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
+ u32 access, u32 *error)
+{
+ if (error)
+ *error = 0;
+static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
+ u32 access, u32 *error)
+{
+ struct guest_walker walker;
+ gpa_t gpa = UNMAPPED_GVA;
+ int r;
+
+ r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr,
+ !!(access& PFERR_WRITE_MASK),
+ !!(access& PFERR_USER_MASK),
+ !!(access& PFERR_FETCH_MASK));