1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/csr.h>
13 #include <asm/sbi.h>
14 #include <asm/kvm_vcpu_timer.h>
15 #include <asm/kvm_vcpu_sbi.h>
16 
kvm_sbi_ext_time_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,unsigned long * out_val,struct kvm_cpu_trap * utrap,bool * exit)17 static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
18 				    unsigned long *out_val,
19 				    struct kvm_cpu_trap *utrap, bool *exit)
20 {
21 	int ret = 0;
22 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
23 	u64 next_cycle;
24 
25 	if (cp->a6 != SBI_EXT_TIME_SET_TIMER)
26 		return -EINVAL;
27 
28 #if __riscv_xlen == 32
29 	next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
30 #else
31 	next_cycle = (u64)cp->a0;
32 #endif
33 	kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
34 
35 	return ret;
36 }
37 
38 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = {
39 	.extid_start = SBI_EXT_TIME,
40 	.extid_end = SBI_EXT_TIME,
41 	.handler = kvm_sbi_ext_time_handler,
42 };
43 
kvm_sbi_ext_ipi_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,unsigned long * out_val,struct kvm_cpu_trap * utrap,bool * exit)44 static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
45 				   unsigned long *out_val,
46 				   struct kvm_cpu_trap *utrap, bool *exit)
47 {
48 	int ret = 0;
49 	unsigned long i;
50 	struct kvm_vcpu *tmp;
51 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
52 	unsigned long hmask = cp->a0;
53 	unsigned long hbase = cp->a1;
54 
55 	if (cp->a6 != SBI_EXT_IPI_SEND_IPI)
56 		return -EINVAL;
57 
58 	kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
59 		if (hbase != -1UL) {
60 			if (tmp->vcpu_id < hbase)
61 				continue;
62 			if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
63 				continue;
64 		}
65 		ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
66 		if (ret < 0)
67 			break;
68 	}
69 
70 	return ret;
71 }
72 
73 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi = {
74 	.extid_start = SBI_EXT_IPI,
75 	.extid_end = SBI_EXT_IPI,
76 	.handler = kvm_sbi_ext_ipi_handler,
77 };
78 
kvm_sbi_ext_rfence_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,unsigned long * out_val,struct kvm_cpu_trap * utrap,bool * exit)79 static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
80 				      unsigned long *out_val,
81 				      struct kvm_cpu_trap *utrap, bool *exit)
82 {
83 	int ret = 0;
84 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
85 	unsigned long hmask = cp->a0;
86 	unsigned long hbase = cp->a1;
87 	unsigned long funcid = cp->a6;
88 
89 	switch (funcid) {
90 	case SBI_EXT_RFENCE_REMOTE_FENCE_I:
91 		kvm_riscv_fence_i(vcpu->kvm, hbase, hmask);
92 		break;
93 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
94 		if (cp->a2 == 0 && cp->a3 == 0)
95 			kvm_riscv_hfence_vvma_all(vcpu->kvm, hbase, hmask);
96 		else
97 			kvm_riscv_hfence_vvma_gva(vcpu->kvm, hbase, hmask,
98 						  cp->a2, cp->a3, PAGE_SHIFT);
99 		break;
100 	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
101 		if (cp->a2 == 0 && cp->a3 == 0)
102 			kvm_riscv_hfence_vvma_asid_all(vcpu->kvm,
103 						       hbase, hmask, cp->a4);
104 		else
105 			kvm_riscv_hfence_vvma_asid_gva(vcpu->kvm,
106 						       hbase, hmask,
107 						       cp->a2, cp->a3,
108 						       PAGE_SHIFT, cp->a4);
109 		break;
110 	case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
111 	case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
112 	case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
113 	case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
114 		/*
115 		 * Until nested virtualization is implemented, the
116 		 * SBI HFENCE calls should be treated as NOPs
117 		 */
118 		break;
119 	default:
120 		ret = -EOPNOTSUPP;
121 	}
122 
123 	return ret;
124 }
125 
126 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = {
127 	.extid_start = SBI_EXT_RFENCE,
128 	.extid_end = SBI_EXT_RFENCE,
129 	.handler = kvm_sbi_ext_rfence_handler,
130 };
131 
kvm_sbi_ext_srst_handler(struct kvm_vcpu * vcpu,struct kvm_run * run,unsigned long * out_val,struct kvm_cpu_trap * utrap,bool * exit)132 static int kvm_sbi_ext_srst_handler(struct kvm_vcpu *vcpu,
133 				    struct kvm_run *run,
134 				    unsigned long *out_val,
135 				    struct kvm_cpu_trap *utrap, bool *exit)
136 {
137 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
138 	unsigned long funcid = cp->a6;
139 	u32 reason = cp->a1;
140 	u32 type = cp->a0;
141 	int ret = 0;
142 
143 	switch (funcid) {
144 	case SBI_EXT_SRST_RESET:
145 		switch (type) {
146 		case SBI_SRST_RESET_TYPE_SHUTDOWN:
147 			kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
148 						KVM_SYSTEM_EVENT_SHUTDOWN,
149 						reason);
150 			*exit = true;
151 			break;
152 		case SBI_SRST_RESET_TYPE_COLD_REBOOT:
153 		case SBI_SRST_RESET_TYPE_WARM_REBOOT:
154 			kvm_riscv_vcpu_sbi_system_reset(vcpu, run,
155 						KVM_SYSTEM_EVENT_RESET,
156 						reason);
157 			*exit = true;
158 			break;
159 		default:
160 			ret = -EOPNOTSUPP;
161 		}
162 		break;
163 	default:
164 		ret = -EOPNOTSUPP;
165 	}
166 
167 	return ret;
168 }
169 
170 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst = {
171 	.extid_start = SBI_EXT_SRST,
172 	.extid_end = SBI_EXT_SRST,
173 	.handler = kvm_sbi_ext_srst_handler,
174 };
175