[PATCH v7 13/22] KVM: arm64: Support EVENT_{COMPLETE, COMPLETE_AND_RESUME} hypercall

From: Gavin Shan
Date: Fri May 27 2022 - 04:06:01 EST


This supports EVENT_{COMPLETE, COMPLETE_AND_RESUME} hypercall. The
execution resumes from previously interrupted context when
EVENT_COMPLETE hypercall is received.

However, the execution resumes from the specified address when
EVENT_COMPLETE_AND_RESUME is received. In this case, context
switches like below.

* x0 to x17 are restored from the interrupted context.

* SPSR_EL1 is set to PSTATE of the interrupted context.

* ELR_EL1 is set to PC of the interrupted context.

* PSTATE has nRW cleared, but D/A/I/F set.

* PC is set to the resume address, specified in the first argument
of EVENT_COMPLETE_AND_RESUME hypercall.

Signed-off-by: Gavin Shan <gshan@xxxxxxxxxx>
---
arch/arm64/kvm/sdei.c | 54 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 54 insertions(+)

diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c
index 36a72c1750fc..0bea1b2f9452 100644
--- a/arch/arm64/kvm/sdei.c
+++ b/arch/arm64/kvm/sdei.c
@@ -85,6 +85,54 @@ static unsigned long event_context(struct kvm_vcpu *vcpu)
return ctxt->regs[param_id];
}

+static void event_complete(struct kvm_vcpu *vcpu, bool resume)
+{
+ struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
+ struct kvm_sdei_event_context *ctxt = &vsdei->ctxt;
+ unsigned long pstate, resume_addr = smccc_get_arg(vcpu, 1);
+ unsigned int num, i;
+
+ num = find_next_bit(&vsdei->running, KVM_NR_SDEI_EVENTS, 0);
+ if (num >= KVM_NR_SDEI_EVENTS)
+ return;
+
+ /* Restore registers: x0 -> x17 */
+ for (i = 0; i < ARRAY_SIZE(ctxt->regs); i++)
+ vcpu_set_reg(vcpu, i, ctxt->regs[i]);
+
+ /*
+ * The registers are modified accordingly if the execution resumes
+ * from the specified address.
+ *
+ * SPSR_EL1: PSTATE of the interrupted context
+ * ELR_EL1: PC of the interrupted context
+ * PSTATE: cleared nRW bit, but D/A/I/F bits are set
+ * PC: the resume address
+ */
+ if (resume) {
+ if (has_vhe()) {
+ write_sysreg_el1(ctxt->pstate, SYS_SPSR);
+ write_sysreg_s(ctxt->pc, SYS_ELR_EL12);
+ } else {
+ __vcpu_sys_reg(vcpu, SPSR_EL1) = ctxt->pstate;
+ __vcpu_sys_reg(vcpu, ELR_EL1) = ctxt->pc;
+ }
+
+ pstate = ctxt->pstate;
+ pstate &= ~(PSR_MODE32_BIT | PSR_MODE_MASK);
+ pstate |= (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT |
+ PSR_F_BIT | PSR_MODE_EL1h);
+ *vcpu_cpsr(vcpu) = pstate;
+ *vcpu_pc(vcpu) = resume_addr;
+ } else {
+ *vcpu_cpsr(vcpu) = ctxt->pstate;
+ *vcpu_pc(vcpu) = ctxt->pc;
+ }
+
+ /* Update event state */
+ clear_bit(num, &vsdei->running);
+}
+
static unsigned long event_unregister(struct kvm_vcpu *vcpu)
{
struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
@@ -234,6 +282,12 @@ int kvm_sdei_call(struct kvm_vcpu *vcpu)
case SDEI_1_0_FN_SDEI_EVENT_CONTEXT:
ret = event_context(vcpu);
break;
+ case SDEI_1_0_FN_SDEI_EVENT_COMPLETE:
+ event_complete(vcpu, false);
+ break;
+ case SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME:
+ event_complete(vcpu, true);
+ break;
case SDEI_1_0_FN_SDEI_EVENT_UNREGISTER:
ret = event_unregister(vcpu);
break;
--
2.23.0