[PATCH 16/16] KVM: arm64: pkvm: Unshare guest structs during teardown

From: Quentin Perret
Date: Wed Oct 13 2021 - 11:59:36 EST


Make use of the newly introduced unshare hypercall during guest teardown
to unmap guest-related data structures from the hyp stage-1.

Signed-off-by: Quentin Perret <qperret@xxxxxxxxxx>
---
arch/arm64/include/asm/kvm_host.h | 2 ++
arch/arm64/include/asm/kvm_mmu.h | 1 +
arch/arm64/kvm/arm.c | 2 ++
arch/arm64/kvm/fpsimd.c | 10 ++++++++--
arch/arm64/kvm/mmu.c | 16 ++++++++++++++++
arch/arm64/kvm/reset.c | 13 ++++++++++++-
6 files changed, 41 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index f8be56d5342b..8b61cdcd1b29 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -322,6 +322,8 @@ struct kvm_vcpu_arch {

struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
+ struct thread_info *kern_thread_info;
+ struct user_fpsimd_state *kern_fpsimd_state;

struct {
/* {Break,watch}point registers */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 185d0f62b724..81839e9a8a24 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -151,6 +151,7 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
#include <asm/stage2_pgtable.h>

int kvm_share_hyp(void *from, void *to);
+void kvm_unshare_hyp(void *from, void *to);
int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
void __iomem **kaddr,
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index f2e74635332b..f11c51db6fe6 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -188,6 +188,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
}
}
atomic_set(&kvm->online_vcpus, 0);
+
+ kvm_unshare_hyp(kvm, kvm + 1);
}

int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 2fe1128d9f3d..67059daf4d26 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -28,23 +28,29 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
{
int ret;

- struct thread_info *ti = &current->thread_info;
- struct user_fpsimd_state *fpsimd = &current->thread.uw.fpsimd_state;
+ struct thread_info *ti = vcpu->arch.kern_thread_info;
+ struct user_fpsimd_state *fpsimd = vcpu->arch.kern_fpsimd_state;

/*
* Make sure the host task thread flags and fpsimd state are
* visible to hyp:
*/
+ kvm_unshare_hyp(ti, ti + 1);
+ ti = &current->thread_info;
ret = kvm_share_hyp(ti, ti + 1);
if (ret)
goto error;

+ kvm_unshare_hyp(fpsimd, fpsimd + 1);
+ fpsimd = &current->thread.uw.fpsimd_state;
ret = kvm_share_hyp(fpsimd, fpsimd + 1);
if (ret)
goto error;

vcpu->arch.host_thread_info = kern_hyp_va(ti);
vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
+ vcpu->arch.kern_thread_info = ti;
+ vcpu->arch.kern_fpsimd_state = fpsimd;
error:
return ret;
}
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index bc9865a8c988..f01b0e49e262 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -300,6 +300,22 @@ int kvm_share_hyp(void *from, void *to)
nr_pages);
}

+void kvm_unshare_hyp(void *from, void *to)
+{
+ phys_addr_t start, end;
+ u64 nr_pages;
+
+ if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from)
+ return;
+
+ start = ALIGN_DOWN(kvm_kaddr_to_phys(from), PAGE_SIZE);
+ end = PAGE_ALIGN(kvm_kaddr_to_phys(to));
+ nr_pages = (end - start) >> PAGE_SHIFT;
+
+ WARN_ON(kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, __phys_to_pfn(start),
+ nr_pages));
+}
+
/**
* create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
* @from: The virtual kernel start address of the range
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 5ce36b0a3343..e3e9c9e1f1c8 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -141,7 +141,18 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)

void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
- kfree(vcpu->arch.sve_state);
+ struct user_fpsimd_state *fpsimd = vcpu->arch.kern_fpsimd_state;
+ struct thread_info *ti = vcpu->arch.kern_thread_info;
+ void *sve_state = vcpu->arch.sve_state;
+
+ kvm_unshare_hyp(vcpu, vcpu + 1);
+ if (ti)
+ kvm_unshare_hyp(ti, ti + 1);
+ if (fpsimd)
+ kvm_unshare_hyp(fpsimd, fpsimd + 1);
+ if (sve_state && vcpu->arch.has_run_once)
+ kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
+ kfree(sve_state);
}

static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
--
2.33.0.882.g93a45727a2-goog