[PATCH v2 06/29] LoongArch: KVM: Implement vcpu create and destroy interface

From: Tianrui Zhao
Date: Mon Feb 20 2023 - 01:58:24 EST


Implement vcpu create and destroy interface, saving some info
into vcpu arch structure such as vcpu exception entrance, vcpu
enter guest pointer, etc. Init vcpu timer and set address
translation mode when vcpu create.

Signed-off-by: Tianrui Zhao <zhaotianrui@xxxxxxxxxxx>
---
arch/loongarch/kvm/vcpu.c | 94 +++++++++++++++++++++++++++++++++++++++
1 file changed, 94 insertions(+)
create mode 100644 arch/loongarch/kvm/vcpu.c

diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
new file mode 100644
index 000000000..4d355bcff
--- /dev/null
+++ b/arch/loongarch/kvm/vcpu.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/fpu.h>
+#include <asm/loongarch.h>
+#include <asm/setup.h>
+#include <asm/time.h>
+#include <asm/kvm_host.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
+{
+ return 0;
+}
+
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+{
+ int i;
+ unsigned long timer_hz;
+ struct loongarch_csrs *csr;
+ struct kvm_context *kvm_context = per_cpu_ptr(vcpu->kvm->arch.vmcs, 0);
+
+ for_each_possible_cpu(i)
+ vcpu->arch.vpid[i] = 0;
+
+ hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+ vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
+ vcpu->arch.fpu_enabled = true;
+ vcpu->kvm->arch.online_vcpus = vcpu->vcpu_id + 1;
+
+ vcpu->arch.guest_eentry = (unsigned long)kvm_context->kvm_eentry;
+ vcpu->arch.vcpu_run = kvm_context->kvm_enter_guest;
+ vcpu->arch.handle_exit = _kvm_handle_exit;
+ vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL);
+ if (!vcpu->arch.csr)
+ return -ENOMEM;
+
+ /*
+ * kvm all exceptions share one exception entry, and host <-> guest switch
+ * also switch excfg.VS field, keep host excfg.VS info here
+ */
+ vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS);
+
+ /* Init */
+ vcpu->arch.last_sched_cpu = -1;
+ vcpu->arch.last_exec_cpu = -1;
+
+ /*
+ * Initialize guest register state to valid architectural reset state.
+ */
+ timer_hz = calc_const_freq();
+ kvm_init_timer(vcpu, timer_hz);
+
+ /* Set Initialize mode for GUEST */
+ csr = vcpu->arch.csr;
+ kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA);
+
+ /* Set cpuid */
+ kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id);
+
+ /* start with no pending virtual guest interrupts */
+ csr->csrs[LOONGARCH_CSR_GINTC] = 0;
+
+ return 0;
+}
+
+void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ int cpu;
+ struct kvm_context *context;
+
+ hrtimer_cancel(&vcpu->arch.swtimer);
+ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
+ kfree(vcpu->arch.csr);
+
+ /*
+ * If the VCPU is freed and reused as another VCPU, we don't want the
+ * matching pointer wrongly hanging around in last_vcpu.
+ */
+ for_each_possible_cpu(cpu) {
+ context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
+ if (context->last_vcpu == vcpu)
+ context->last_vcpu = NULL;
+ }
+}
--
2.31.1