[RFC PATCH 20/28] KVM: arm64: Handle Realm PSCI requests

From: Steven Price
Date: Fri Jan 27 2023 - 06:42:21 EST


The RMM needs to be informed of the target REC when a PSCI call is made
with an MPIDR argument.

Signed-off-by: Steven Price <steven.price@xxxxxxx>
---
arch/arm64/include/asm/kvm_rme.h | 1 +
arch/arm64/kvm/psci.c | 23 +++++++++++++++++++++++
arch/arm64/kvm/rme.c | 13 +++++++++++++
3 files changed, 37 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_rme.h b/arch/arm64/include/asm/kvm_rme.h
index 303e4a5e5704..2254e28c855e 100644
--- a/arch/arm64/include/asm/kvm_rme.h
+++ b/arch/arm64/include/asm/kvm_rme.h
@@ -65,6 +65,7 @@ int realm_map_non_secure(struct realm *realm,
int realm_set_ipa_state(struct kvm_vcpu *vcpu,
unsigned long addr, unsigned long end,
unsigned long ripas);
+int realm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target);

#define RME_RTT_BLOCK_LEVEL 2
#define RME_RTT_MAX_LEVEL 3
diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
index 7fbc4c1b9df0..e2061cab9b26 100644
--- a/arch/arm64/kvm/psci.c
+++ b/arch/arm64/kvm/psci.c
@@ -76,6 +76,10 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
*/
if (!vcpu)
return PSCI_RET_INVALID_PARAMS;
+
+ if (vcpu_is_rec(vcpu))
+ realm_psci_complete(source_vcpu, vcpu);
+
if (!kvm_arm_vcpu_stopped(vcpu)) {
if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
return PSCI_RET_ALREADY_ON;
@@ -135,6 +139,25 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
/* Ignore other bits of target affinity */
target_affinity &= target_affinity_mask;

+ if (vcpu_is_rec(vcpu)) {
+ struct kvm_vcpu *target_vcpu;
+
+ /* RMM supports only zero affinity level */
+ if (lowest_affinity_level != 0)
+ return PSCI_RET_INVALID_PARAMS;
+
+ target_vcpu = kvm_mpidr_to_vcpu(kvm, target_affinity);
+ if (!target_vcpu)
+ return PSCI_RET_INVALID_PARAMS;
+
+ /*
+ * Provide the references of running and target RECs to the RMM
+ * so that the RMM can complete the PSCI request.
+ */
+ realm_psci_complete(vcpu, target_vcpu);
+ return PSCI_RET_SUCCESS;
+ }
+
/*
* If one or more VCPU matching target affinity are running
* then ON else OFF
diff --git a/arch/arm64/kvm/rme.c b/arch/arm64/kvm/rme.c
index 3d46191798e5..6ac50481a138 100644
--- a/arch/arm64/kvm/rme.c
+++ b/arch/arm64/kvm/rme.c
@@ -126,6 +126,19 @@ static void free_delegated_page(struct realm *realm, phys_addr_t phys)
free_page((unsigned long)phys_to_virt(phys));
}

+int realm_psci_complete(struct kvm_vcpu *calling, struct kvm_vcpu *target)
+{
+ int ret;
+
+ ret = rmi_psci_complete(virt_to_phys(calling->arch.rec.rec_page),
+ virt_to_phys(target->arch.rec.rec_page));
+
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
static void realm_destroy_undelegate_range(struct realm *realm,
unsigned long ipa,
unsigned long addr,
--
2.34.1