[PATCH v2 20/29] LoongArch: KVM: Implement handle csr excption

From: Tianrui Zhao
Date: Mon Feb 20 2023 - 01:58:51 EST


Implement kvm handle loongarch vcpu exit caused by reading and
writing csr. Using loongarch_csr structure to emulate the registers.

Signed-off-by: Tianrui Zhao <zhaotianrui@xxxxxxxxxxx>
---
arch/loongarch/include/asm/kvm_csr.h | 89 +++++++++++++++++++++++
arch/loongarch/kvm/exit.c | 101 +++++++++++++++++++++++++++
2 files changed, 190 insertions(+)
create mode 100644 arch/loongarch/include/asm/kvm_csr.h
create mode 100644 arch/loongarch/kvm/exit.c

diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h
new file mode 100644
index 000000000..44fcd724c
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_csr.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_KVM_CSR_H__
+#define __ASM_LOONGARCH_KVM_CSR_H__
+#include <asm/loongarch.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_vcpu.h>
+#include <linux/uaccess.h>
+#include <linux/kvm_host.h>
+
+#define kvm_read_hw_gcsr(id) gcsr_read(id)
+#define kvm_write_hw_gcsr(csr, id, val) gcsr_write(val, id)
+
+int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v, int force);
+int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v, int force);
+
+int _kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+static inline void kvm_save_hw_gcsr(struct loongarch_csrs *csr, int gid)
+{
+ csr->csrs[gid] = gcsr_read(gid);
+}
+
+static inline void kvm_restore_hw_gcsr(struct loongarch_csrs *csr, int gid)
+{
+ gcsr_write(csr->csrs[gid], gid);
+}
+
+static inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
+{
+ return csr->csrs[gid];
+}
+
+static inline void kvm_write_sw_gcsr(struct loongarch_csrs *csr, int gid, unsigned long val)
+{
+ csr->csrs[gid] = val;
+}
+
+static inline void kvm_set_sw_gcsr(struct loongarch_csrs *csr, int gid, unsigned long val)
+{
+ csr->csrs[gid] |= val;
+}
+
+static inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr, int gid, unsigned long mask,
+ unsigned long val)
+{
+ unsigned long _mask = mask;
+
+ csr->csrs[gid] &= ~_mask;
+ csr->csrs[gid] |= val & _mask;
+}
+
+
+#define GET_HW_GCSR(id, csrid, v) \
+ do { \
+ if (csrid == id) { \
+ *v = (long)kvm_read_hw_gcsr(csrid); \
+ return 0; \
+ } \
+ } while (0)
+
+#define GET_SW_GCSR(csr, id, csrid, v) \
+ do { \
+ if (csrid == id) { \
+ *v = kvm_read_sw_gcsr(csr, id); \
+ return 0; \
+ } \
+ } while (0)
+
+#define SET_HW_GCSR(csr, id, csrid, v) \
+ do { \
+ if (csrid == id) { \
+ kvm_write_hw_gcsr(csr, csrid, *v); \
+ return 0; \
+ } \
+ } while (0)
+
+#define SET_SW_GCSR(csr, id, csrid, v) \
+ do { \
+ if (csrid == id) { \
+ kvm_write_sw_gcsr(csr, csrid, *v); \
+ return 0; \
+ } \
+ } while (0)
+
+#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
new file mode 100644
index 000000000..dc37827d9
--- /dev/null
+++ b/arch/loongarch/kvm/exit.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/vmalloc.h>
+#include <asm/fpu.h>
+#include <asm/inst.h>
+#include <asm/time.h>
+#include <asm/tlb.h>
+#include <asm/loongarch.h>
+#include <asm/numa.h>
+#include <asm/kvm_vcpu.h>
+#include <asm/kvm_csr.h>
+#include <linux/kvm_host.h>
+#include <asm/mmzone.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+static unsigned long _kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+ unsigned long val = 0;
+
+ val = 0;
+ if (csrid < 4096)
+ val = kvm_read_sw_gcsr(csr, csrid);
+ else
+ pr_warn_once("Unsupport csrread 0x%x with pc %lx\n",
+ csrid, vcpu->arch.pc);
+ return val;
+}
+
+static void _kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid,
+ unsigned long val)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ if (csrid < 4096)
+ kvm_write_sw_gcsr(csr, csrid, val);
+ else
+ pr_warn_once("Unsupport csrwrite 0x%x with pc %lx\n",
+ csrid, vcpu->arch.pc);
+}
+
+static void _kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
+ unsigned long csr_mask, unsigned long val)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ if (csrid < 4096) {
+ unsigned long orig;
+
+ orig = kvm_read_sw_gcsr(csr, csrid);
+ orig &= ~csr_mask;
+ orig |= val & csr_mask;
+ kvm_write_sw_gcsr(csr, csrid, orig);
+ } else
+ pr_warn_once("Unsupport csrxchg 0x%x with pc %lx\n",
+ csrid, vcpu->arch.pc);
+}
+
+static int _kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
+{
+ unsigned int rd, rj, csrid;
+ unsigned long csr_mask;
+ unsigned long val = 0;
+
+ /*
+ * CSR value mask imm
+ * rj = 0 means csrrd
+ * rj = 1 means csrwr
+ * rj != 0,1 means csrxchg
+ */
+ rd = inst.reg2csr_format.rd;
+ rj = inst.reg2csr_format.rj;
+ csrid = inst.reg2csr_format.csr;
+
+ /* Process CSR ops */
+ if (rj == 0) {
+ /* process csrrd */
+ val = _kvm_emu_read_csr(vcpu, csrid);
+ vcpu->arch.gprs[rd] = val;
+ } else if (rj == 1) {
+ /* process csrwr */
+ val = vcpu->arch.gprs[rd];
+ _kvm_emu_write_csr(vcpu, csrid, val);
+ } else {
+ /* process csrxchg */
+ val = vcpu->arch.gprs[rd];
+ csr_mask = vcpu->arch.gprs[rj];
+ _kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
+ }
+
+ return EMULATE_DONE;
+}
--
2.31.1