[RFC PATCH 08/13] KVM: MMU: Introduce level info in PFERR code

From: isaku . yamahata
Date: Sun Aug 07 2022 - 18:32:37 EST


From: Xiaoyao Li <xiaoyao.li@xxxxxxxxx>

For TDX, EPT violation can happen when TDG.MEM.PAGE.ACCEPT.
And TDG.MEM.PAGE.ACCEPT contains the desired accept page level of TD guest.

1. KVM can map it with 4KB page while TD guest wants to accept 2MB page.

TD geust will get TDX_PAGE_SIZE_MISMATCH and it should try to accept
4KB size.

2. KVM can map it with 2MB page while TD guest wants to accept 4KB page.

KVM needs to honor it because
a) there is no way to tell guest KVM maps it as 2MB size. And
b) guest accepts it in 4KB size since guest knows some other 4KB page
in the same 2MB range will be used as shared page.

For case 2, it need to pass desired page level to MMU's
page_fault_handler. Use bit 29:31 of kvm PF error code for this purpose.

Signed-off-by: Xiaoyao Li <xiaoyao.li@xxxxxxxxx>
Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 3 +++
arch/x86/kvm/mmu/mmu.c | 5 +++++
arch/x86/kvm/vmx/common.h | 6 +++++-
arch/x86/kvm/vmx/tdx.c | 15 ++++++++++++++-
arch/x86/kvm/vmx/tdx.h | 19 +++++++++++++++++++
arch/x86/kvm/vmx/vmx.c | 2 +-
6 files changed, 47 insertions(+), 3 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2bdb1de9bce0..c01bde832de2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -251,6 +251,8 @@ enum x86_intercept_stage;
#define PFERR_FETCH_BIT 4
#define PFERR_PK_BIT 5
#define PFERR_SGX_BIT 15
+#define PFERR_LEVEL_START_BIT 29
+#define PFERR_LEVEL_END_BIT 31
#define PFERR_GUEST_FINAL_BIT 32
#define PFERR_GUEST_PAGE_BIT 33
#define PFERR_IMPLICIT_ACCESS_BIT 48
@@ -262,6 +264,7 @@ enum x86_intercept_stage;
#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
#define PFERR_PK_MASK (1U << PFERR_PK_BIT)
#define PFERR_SGX_MASK (1U << PFERR_SGX_BIT)
+#define PFERR_LEVEL_MASK GENMASK_ULL(PFERR_LEVEL_END_BIT, PFERR_LEVEL_START_BIT)
#define PFERR_GUEST_FINAL_MASK (1ULL << PFERR_GUEST_FINAL_BIT)
#define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT)
#define PFERR_IMPLICIT_ACCESS (1ULL << PFERR_IMPLICIT_ACCESS_BIT)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index a03aa609a0da..ba21503fa46f 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4451,6 +4451,11 @@ EXPORT_SYMBOL_GPL(kvm_handle_page_fault);

int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
+ u8 err_level = (fault->error_code & PFERR_LEVEL_MASK) >> PFERR_LEVEL_START_BIT;
+
+ if (err_level)
+ fault->max_level = min(fault->max_level, err_level);
+
/*
* If the guest's MTRRs may be used to compute the "real" memtype,
* restrict the mapping level to ensure KVM uses a consistent memtype
diff --git a/arch/x86/kvm/vmx/common.h b/arch/x86/kvm/vmx/common.h
index fd5ed3c0f894..f512eaa458a2 100644
--- a/arch/x86/kvm/vmx/common.h
+++ b/arch/x86/kvm/vmx/common.h
@@ -78,7 +78,8 @@ static inline void vmx_handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu,
}

static inline int __vmx_handle_ept_violation(struct kvm_vcpu *vcpu, gpa_t gpa,
- unsigned long exit_qualification)
+ unsigned long exit_qualification,
+ int err_page_level)
{
u64 error_code;

@@ -98,6 +99,9 @@ static inline int __vmx_handle_ept_violation(struct kvm_vcpu *vcpu, gpa_t gpa,
error_code |= (exit_qualification & EPT_VIOLATION_GVA_TRANSLATED) != 0 ?
PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;

+ if (err_page_level > 0)
+ error_code |= (err_page_level << PFERR_LEVEL_START_BIT) & PFERR_LEVEL_MASK;
+
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
}

diff --git a/arch/x86/kvm/vmx/tdx.c b/arch/x86/kvm/vmx/tdx.c
index cdd421fb5024..81d88b1e63ac 100644
--- a/arch/x86/kvm/vmx/tdx.c
+++ b/arch/x86/kvm/vmx/tdx.c
@@ -1765,7 +1765,20 @@ void tdx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,

static int tdx_handle_ept_violation(struct kvm_vcpu *vcpu)
{
+ union tdx_ext_exit_qualification ext_exit_qual;
unsigned long exit_qual;
+ int err_page_level = 0;
+
+ ext_exit_qual.full = tdexit_ext_exit_qual(vcpu);
+
+ if (ext_exit_qual.type >= NUM_EXT_EXIT_QUAL) {
+ pr_err("EPT violation at gpa 0x%lx, with invalid ext exit qualification type 0x%x\n",
+ tdexit_gpa(vcpu), ext_exit_qual.type);
+ kvm_vm_bugged(vcpu->kvm);
+ return 0;
+ } else if (ext_exit_qual.type == EXT_EXIT_QUAL_ACCEPT) {
+ err_page_level = ext_exit_qual.req_sept_level + 1;
+ }

if (kvm_is_private_gpa(vcpu->kvm, tdexit_gpa(vcpu))) {
/*
@@ -1792,7 +1805,7 @@ static int tdx_handle_ept_violation(struct kvm_vcpu *vcpu)
}

trace_kvm_page_fault(tdexit_gpa(vcpu), exit_qual);
- return __vmx_handle_ept_violation(vcpu, tdexit_gpa(vcpu), exit_qual);
+ return __vmx_handle_ept_violation(vcpu, tdexit_gpa(vcpu), exit_qual, err_page_level);
}

static int tdx_handle_ept_misconfig(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx/tdx.h b/arch/x86/kvm/vmx/tdx.h
index 8284cce0d385..3400563a2254 100644
--- a/arch/x86/kvm/vmx/tdx.h
+++ b/arch/x86/kvm/vmx/tdx.h
@@ -79,6 +79,25 @@ union tdx_exit_reason {
u64 full;
};

+union tdx_ext_exit_qualification {
+ struct {
+ u64 type : 4;
+ u64 reserved0 : 28;
+ u64 req_sept_level : 3;
+ u64 err_sept_level : 3;
+ u64 err_sept_state : 8;
+ u64 err_sept_is_leaf : 1;
+ u64 reserved1 : 17;
+ };
+ u64 full;
+};
+
+enum tdx_ext_exit_qualification_type {
+ EXT_EXIT_QUAL_NONE,
+ EXT_EXIT_QUAL_ACCEPT,
+ NUM_EXT_EXIT_QUAL,
+};
+
struct vcpu_tdx {
struct kvm_vcpu vcpu;

diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index e5aa805f6db4..6ba3eded55a7 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -5646,7 +5646,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa)))
return kvm_emulate_instruction(vcpu, 0);

- return __vmx_handle_ept_violation(vcpu, gpa, exit_qualification);
+ return __vmx_handle_ept_violation(vcpu, gpa, exit_qualification, 0);
}

static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
--
2.25.1