[PATCH v7 20/30] LoongArch: KVM: Implement handle csr excption
From: Tianrui Zhao
Date: Mon Apr 17 2023 - 07:02:41 EST
Implement kvm handle loongarch vcpu exit caused by reading and
writing csr. Using loongarch_csr structure to emulate the registers.
Signed-off-by: Tianrui Zhao <zhaotianrui@xxxxxxxxxxx>
---
arch/loongarch/include/asm/kvm_csr.h | 58 ++++++++++++++++
arch/loongarch/kvm/exit.c | 98 ++++++++++++++++++++++++++++
2 files changed, 156 insertions(+)
create mode 100644 arch/loongarch/include/asm/kvm_csr.h
create mode 100644 arch/loongarch/kvm/exit.c
diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h
new file mode 100644
index 000000000000..36c91652d6bb
--- /dev/null
+++ b/arch/loongarch/include/asm/kvm_csr.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef __ASM_LOONGARCH_KVM_CSR_H__
+#define __ASM_LOONGARCH_KVM_CSR_H__
+#include <asm/loongarch.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_vcpu.h>
+#include <linux/uaccess.h>
+#include <linux/kvm_host.h>
+
+#define kvm_read_hw_gcsr(id) gcsr_read(id)
+#define kvm_write_hw_gcsr(csr, id, val) gcsr_write(val, id)
+
+int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *v);
+int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 v);
+
+int _kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+static __always_inline void kvm_save_hw_gcsr(struct loongarch_csrs *csr, int gid)
+{
+ csr->csrs[gid] = gcsr_read(gid);
+}
+
+static __always_inline void kvm_restore_hw_gcsr(struct loongarch_csrs *csr, int gid)
+{
+ gcsr_write(csr->csrs[gid], gid);
+}
+
+static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid)
+{
+ return csr->csrs[gid];
+}
+
+static __always_inline void kvm_write_sw_gcsr(struct loongarch_csrs *csr,
+ int gid, unsigned long val)
+{
+ csr->csrs[gid] = val;
+}
+
+static __always_inline void kvm_set_sw_gcsr(struct loongarch_csrs *csr,
+ int gid, unsigned long val)
+{
+ csr->csrs[gid] |= val;
+}
+
+static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr,
+ int gid, unsigned long mask,
+ unsigned long val)
+{
+ unsigned long _mask = mask;
+
+ csr->csrs[gid] &= ~_mask;
+ csr->csrs[gid] |= val & _mask;
+}
+#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */
diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c
new file mode 100644
index 000000000000..508cbce31aa5
--- /dev/null
+++ b/arch/loongarch/kvm/exit.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/preempt.h>
+#include <linux/vmalloc.h>
+#include <asm/fpu.h>
+#include <asm/inst.h>
+#include <asm/time.h>
+#include <asm/tlb.h>
+#include <asm/loongarch.h>
+#include <asm/numa.h>
+#include <asm/kvm_vcpu.h>
+#include <asm/kvm_csr.h>
+#include <linux/kvm_host.h>
+#include <asm/mmzone.h>
+#include "trace.h"
+
+static unsigned long _kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+ unsigned long val = 0;
+
+ if (csrid < 4096 && (get_gcsr_flag(csrid) & SW_GCSR))
+ val = kvm_read_sw_gcsr(csr, csrid);
+ else
+ pr_warn_once("Unsupport csrread 0x%x with pc %lx\n",
+ csrid, vcpu->arch.pc);
+ return val;
+}
+
+static void _kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid,
+ unsigned long val)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ if (csrid < 4096 && (get_gcsr_flag(csrid) & SW_GCSR))
+ kvm_write_sw_gcsr(csr, csrid, val);
+ else
+ pr_warn_once("Unsupport csrwrite 0x%x with pc %lx\n",
+ csrid, vcpu->arch.pc);
+}
+
+static void _kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid,
+ unsigned long csr_mask, unsigned long val)
+{
+ struct loongarch_csrs *csr = vcpu->arch.csr;
+
+ if (csrid < 4096 && (get_gcsr_flag(csrid) & SW_GCSR)) {
+ unsigned long orig;
+
+ orig = kvm_read_sw_gcsr(csr, csrid);
+ orig &= ~csr_mask;
+ orig |= val & csr_mask;
+ kvm_write_sw_gcsr(csr, csrid, orig);
+ } else
+ pr_warn_once("Unsupport csrxchg 0x%x with pc %lx\n",
+ csrid, vcpu->arch.pc);
+}
+
+static int _kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst)
+{
+ unsigned int rd, rj, csrid;
+ unsigned long csr_mask;
+ unsigned long val = 0;
+
+ /*
+ * CSR value mask imm
+ * rj = 0 means csrrd
+ * rj = 1 means csrwr
+ * rj != 0,1 means csrxchg
+ */
+ rd = inst.reg2csr_format.rd;
+ rj = inst.reg2csr_format.rj;
+ csrid = inst.reg2csr_format.csr;
+
+ /* Process CSR ops */
+ if (rj == 0) {
+ /* process csrrd */
+ val = _kvm_emu_read_csr(vcpu, csrid);
+ vcpu->arch.gprs[rd] = val;
+ } else if (rj == 1) {
+ /* process csrwr */
+ val = vcpu->arch.gprs[rd];
+ _kvm_emu_write_csr(vcpu, csrid, val);
+ } else {
+ /* process csrxchg */
+ val = vcpu->arch.gprs[rd];
+ csr_mask = vcpu->arch.gprs[rj];
+ _kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val);
+ }
+
+ return EMULATE_DONE;
+}
--
2.31.1