1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Anup Patel <anup.patel@wdc.com>
7 */
8
9 #include <linux/bitops.h>
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <asm/csr.h>
14
15 #define INSN_OPCODE_MASK 0x007c
16 #define INSN_OPCODE_SHIFT 2
17 #define INSN_OPCODE_SYSTEM 28
18
19 #define INSN_MASK_WFI 0xffffffff
20 #define INSN_MATCH_WFI 0x10500073
21
22 #define INSN_MATCH_LB 0x3
23 #define INSN_MASK_LB 0x707f
24 #define INSN_MATCH_LH 0x1003
25 #define INSN_MASK_LH 0x707f
26 #define INSN_MATCH_LW 0x2003
27 #define INSN_MASK_LW 0x707f
28 #define INSN_MATCH_LD 0x3003
29 #define INSN_MASK_LD 0x707f
30 #define INSN_MATCH_LBU 0x4003
31 #define INSN_MASK_LBU 0x707f
32 #define INSN_MATCH_LHU 0x5003
33 #define INSN_MASK_LHU 0x707f
34 #define INSN_MATCH_LWU 0x6003
35 #define INSN_MASK_LWU 0x707f
36 #define INSN_MATCH_SB 0x23
37 #define INSN_MASK_SB 0x707f
38 #define INSN_MATCH_SH 0x1023
39 #define INSN_MASK_SH 0x707f
40 #define INSN_MATCH_SW 0x2023
41 #define INSN_MASK_SW 0x707f
42 #define INSN_MATCH_SD 0x3023
43 #define INSN_MASK_SD 0x707f
44
45 #define INSN_MATCH_C_LD 0x6000
46 #define INSN_MASK_C_LD 0xe003
47 #define INSN_MATCH_C_SD 0xe000
48 #define INSN_MASK_C_SD 0xe003
49 #define INSN_MATCH_C_LW 0x4000
50 #define INSN_MASK_C_LW 0xe003
51 #define INSN_MATCH_C_SW 0xc000
52 #define INSN_MASK_C_SW 0xe003
53 #define INSN_MATCH_C_LDSP 0x6002
54 #define INSN_MASK_C_LDSP 0xe003
55 #define INSN_MATCH_C_SDSP 0xe002
56 #define INSN_MASK_C_SDSP 0xe003
57 #define INSN_MATCH_C_LWSP 0x4002
58 #define INSN_MASK_C_LWSP 0xe003
59 #define INSN_MATCH_C_SWSP 0xc002
60 #define INSN_MASK_C_SWSP 0xe003
61
62 #define INSN_16BIT_MASK 0x3
63
64 #define INSN_IS_16BIT(insn) (((insn) & INSN_16BIT_MASK) != INSN_16BIT_MASK)
65
66 #define INSN_LEN(insn) (INSN_IS_16BIT(insn) ? 2 : 4)
67
68 #ifdef CONFIG_64BIT
69 #define LOG_REGBYTES 3
70 #else
71 #define LOG_REGBYTES 2
72 #endif
73 #define REGBYTES (1 << LOG_REGBYTES)
74
75 #define SH_RD 7
76 #define SH_RS1 15
77 #define SH_RS2 20
78 #define SH_RS2C 2
79
80 #define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
81 #define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
82 (RV_X(x, 10, 3) << 3) | \
83 (RV_X(x, 5, 1) << 6))
84 #define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
85 (RV_X(x, 5, 2) << 6))
86 #define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
87 (RV_X(x, 12, 1) << 5) | \
88 (RV_X(x, 2, 2) << 6))
89 #define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
90 (RV_X(x, 12, 1) << 5) | \
91 (RV_X(x, 2, 3) << 6))
92 #define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
93 (RV_X(x, 7, 2) << 6))
94 #define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
95 (RV_X(x, 7, 3) << 6))
96 #define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
97 #define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
98 #define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
99
100 #define SHIFT_RIGHT(x, y) \
101 ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
102
103 #define REG_MASK \
104 ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
105
106 #define REG_OFFSET(insn, pos) \
107 (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
108
109 #define REG_PTR(insn, pos, regs) \
110 ((ulong *)((ulong)(regs) + REG_OFFSET(insn, pos)))
111
112 #define GET_RM(insn) (((insn) >> 12) & 7)
113
114 #define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
115 #define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
116 #define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
117 #define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
118 #define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
119 #define GET_SP(regs) (*REG_PTR(2, 0, regs))
120 #define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
121 #define IMM_I(insn) ((s32)(insn) >> 20)
122 #define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
123 (s32)(((insn) >> 7) & 0x1f))
124 #define MASK_FUNCT3 0x7000
125
truly_illegal_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,ulong insn)126 static int truly_illegal_insn(struct kvm_vcpu *vcpu,
127 struct kvm_run *run,
128 ulong insn)
129 {
130 struct kvm_cpu_trap utrap = { 0 };
131
132 /* Redirect trap to Guest VCPU */
133 utrap.sepc = vcpu->arch.guest_context.sepc;
134 utrap.scause = EXC_INST_ILLEGAL;
135 utrap.stval = insn;
136 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
137
138 return 1;
139 }
140
system_opcode_insn(struct kvm_vcpu * vcpu,struct kvm_run * run,ulong insn)141 static int system_opcode_insn(struct kvm_vcpu *vcpu,
142 struct kvm_run *run,
143 ulong insn)
144 {
145 if ((insn & INSN_MASK_WFI) == INSN_MATCH_WFI) {
146 vcpu->stat.wfi_exit_stat++;
147 kvm_riscv_vcpu_wfi(vcpu);
148 vcpu->arch.guest_context.sepc += INSN_LEN(insn);
149 return 1;
150 }
151
152 return truly_illegal_insn(vcpu, run, insn);
153 }
154
virtual_inst_fault(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_cpu_trap * trap)155 static int virtual_inst_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
156 struct kvm_cpu_trap *trap)
157 {
158 unsigned long insn = trap->stval;
159 struct kvm_cpu_trap utrap = { 0 };
160 struct kvm_cpu_context *ct;
161
162 if (unlikely(INSN_IS_16BIT(insn))) {
163 if (insn == 0) {
164 ct = &vcpu->arch.guest_context;
165 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true,
166 ct->sepc,
167 &utrap);
168 if (utrap.scause) {
169 utrap.sepc = ct->sepc;
170 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
171 return 1;
172 }
173 }
174 if (INSN_IS_16BIT(insn))
175 return truly_illegal_insn(vcpu, run, insn);
176 }
177
178 switch ((insn & INSN_OPCODE_MASK) >> INSN_OPCODE_SHIFT) {
179 case INSN_OPCODE_SYSTEM:
180 return system_opcode_insn(vcpu, run, insn);
181 default:
182 return truly_illegal_insn(vcpu, run, insn);
183 }
184 }
185
emulate_load(struct kvm_vcpu * vcpu,struct kvm_run * run,unsigned long fault_addr,unsigned long htinst)186 static int emulate_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
187 unsigned long fault_addr, unsigned long htinst)
188 {
189 u8 data_buf[8];
190 unsigned long insn;
191 int shift = 0, len = 0, insn_len = 0;
192 struct kvm_cpu_trap utrap = { 0 };
193 struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
194
195 /* Determine trapped instruction */
196 if (htinst & 0x1) {
197 /*
198 * Bit[0] == 1 implies trapped instruction value is
199 * transformed instruction or custom instruction.
200 */
201 insn = htinst | INSN_16BIT_MASK;
202 insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
203 } else {
204 /*
205 * Bit[0] == 0 implies trapped instruction value is
206 * zero or special value.
207 */
208 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
209 &utrap);
210 if (utrap.scause) {
211 /* Redirect trap if we failed to read instruction */
212 utrap.sepc = ct->sepc;
213 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
214 return 1;
215 }
216 insn_len = INSN_LEN(insn);
217 }
218
219 /* Decode length of MMIO and shift */
220 if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
221 len = 4;
222 shift = 8 * (sizeof(ulong) - len);
223 } else if ((insn & INSN_MASK_LB) == INSN_MATCH_LB) {
224 len = 1;
225 shift = 8 * (sizeof(ulong) - len);
226 } else if ((insn & INSN_MASK_LBU) == INSN_MATCH_LBU) {
227 len = 1;
228 shift = 8 * (sizeof(ulong) - len);
229 #ifdef CONFIG_64BIT
230 } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
231 len = 8;
232 shift = 8 * (sizeof(ulong) - len);
233 } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
234 len = 4;
235 #endif
236 } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
237 len = 2;
238 shift = 8 * (sizeof(ulong) - len);
239 } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
240 len = 2;
241 #ifdef CONFIG_64BIT
242 } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
243 len = 8;
244 shift = 8 * (sizeof(ulong) - len);
245 insn = RVC_RS2S(insn) << SH_RD;
246 } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
247 ((insn >> SH_RD) & 0x1f)) {
248 len = 8;
249 shift = 8 * (sizeof(ulong) - len);
250 #endif
251 } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
252 len = 4;
253 shift = 8 * (sizeof(ulong) - len);
254 insn = RVC_RS2S(insn) << SH_RD;
255 } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
256 ((insn >> SH_RD) & 0x1f)) {
257 len = 4;
258 shift = 8 * (sizeof(ulong) - len);
259 } else {
260 return -EOPNOTSUPP;
261 }
262
263 /* Fault address should be aligned to length of MMIO */
264 if (fault_addr & (len - 1))
265 return -EIO;
266
267 /* Save instruction decode info */
268 vcpu->arch.mmio_decode.insn = insn;
269 vcpu->arch.mmio_decode.insn_len = insn_len;
270 vcpu->arch.mmio_decode.shift = shift;
271 vcpu->arch.mmio_decode.len = len;
272 vcpu->arch.mmio_decode.return_handled = 0;
273
274 /* Update MMIO details in kvm_run struct */
275 run->mmio.is_write = false;
276 run->mmio.phys_addr = fault_addr;
277 run->mmio.len = len;
278
279 /* Try to handle MMIO access in the kernel */
280 if (!kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_addr, len, data_buf)) {
281 /* Successfully handled MMIO access in the kernel so resume */
282 memcpy(run->mmio.data, data_buf, len);
283 vcpu->stat.mmio_exit_kernel++;
284 kvm_riscv_vcpu_mmio_return(vcpu, run);
285 return 1;
286 }
287
288 /* Exit to userspace for MMIO emulation */
289 vcpu->stat.mmio_exit_user++;
290 run->exit_reason = KVM_EXIT_MMIO;
291
292 return 0;
293 }
294
emulate_store(struct kvm_vcpu * vcpu,struct kvm_run * run,unsigned long fault_addr,unsigned long htinst)295 static int emulate_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
296 unsigned long fault_addr, unsigned long htinst)
297 {
298 u8 data8;
299 u16 data16;
300 u32 data32;
301 u64 data64;
302 ulong data;
303 unsigned long insn;
304 int len = 0, insn_len = 0;
305 struct kvm_cpu_trap utrap = { 0 };
306 struct kvm_cpu_context *ct = &vcpu->arch.guest_context;
307
308 /* Determine trapped instruction */
309 if (htinst & 0x1) {
310 /*
311 * Bit[0] == 1 implies trapped instruction value is
312 * transformed instruction or custom instruction.
313 */
314 insn = htinst | INSN_16BIT_MASK;
315 insn_len = (htinst & BIT(1)) ? INSN_LEN(insn) : 2;
316 } else {
317 /*
318 * Bit[0] == 0 implies trapped instruction value is
319 * zero or special value.
320 */
321 insn = kvm_riscv_vcpu_unpriv_read(vcpu, true, ct->sepc,
322 &utrap);
323 if (utrap.scause) {
324 /* Redirect trap if we failed to read instruction */
325 utrap.sepc = ct->sepc;
326 kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
327 return 1;
328 }
329 insn_len = INSN_LEN(insn);
330 }
331
332 data = GET_RS2(insn, &vcpu->arch.guest_context);
333 data8 = data16 = data32 = data64 = data;
334
335 if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
336 len = 4;
337 } else if ((insn & INSN_MASK_SB) == INSN_MATCH_SB) {
338 len = 1;
339 #ifdef CONFIG_64BIT
340 } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
341 len = 8;
342 #endif
343 } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
344 len = 2;
345 #ifdef CONFIG_64BIT
346 } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
347 len = 8;
348 data64 = GET_RS2S(insn, &vcpu->arch.guest_context);
349 } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
350 ((insn >> SH_RD) & 0x1f)) {
351 len = 8;
352 data64 = GET_RS2C(insn, &vcpu->arch.guest_context);
353 #endif
354 } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
355 len = 4;
356 data32 = GET_RS2S(insn, &vcpu->arch.guest_context);
357 } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
358 ((insn >> SH_RD) & 0x1f)) {
359 len = 4;
360 data32 = GET_RS2C(insn, &vcpu->arch.guest_context);
361 } else {
362 return -EOPNOTSUPP;
363 }
364
365 /* Fault address should be aligned to length of MMIO */
366 if (fault_addr & (len - 1))
367 return -EIO;
368
369 /* Save instruction decode info */
370 vcpu->arch.mmio_decode.insn = insn;
371 vcpu->arch.mmio_decode.insn_len = insn_len;
372 vcpu->arch.mmio_decode.shift = 0;
373 vcpu->arch.mmio_decode.len = len;
374 vcpu->arch.mmio_decode.return_handled = 0;
375
376 /* Copy data to kvm_run instance */
377 switch (len) {
378 case 1:
379 *((u8 *)run->mmio.data) = data8;
380 break;
381 case 2:
382 *((u16 *)run->mmio.data) = data16;
383 break;
384 case 4:
385 *((u32 *)run->mmio.data) = data32;
386 break;
387 case 8:
388 *((u64 *)run->mmio.data) = data64;
389 break;
390 default:
391 return -EOPNOTSUPP;
392 }
393
394 /* Update MMIO details in kvm_run struct */
395 run->mmio.is_write = true;
396 run->mmio.phys_addr = fault_addr;
397 run->mmio.len = len;
398
399 /* Try to handle MMIO access in the kernel */
400 if (!kvm_io_bus_write(vcpu, KVM_MMIO_BUS,
401 fault_addr, len, run->mmio.data)) {
402 /* Successfully handled MMIO access in the kernel so resume */
403 vcpu->stat.mmio_exit_kernel++;
404 kvm_riscv_vcpu_mmio_return(vcpu, run);
405 return 1;
406 }
407
408 /* Exit to userspace for MMIO emulation */
409 vcpu->stat.mmio_exit_user++;
410 run->exit_reason = KVM_EXIT_MMIO;
411
412 return 0;
413 }
414
gstage_page_fault(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_cpu_trap * trap)415 static int gstage_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
416 struct kvm_cpu_trap *trap)
417 {
418 struct kvm_memory_slot *memslot;
419 unsigned long hva, fault_addr;
420 bool writeable;
421 gfn_t gfn;
422 int ret;
423
424 fault_addr = (trap->htval << 2) | (trap->stval & 0x3);
425 gfn = fault_addr >> PAGE_SHIFT;
426 memslot = gfn_to_memslot(vcpu->kvm, gfn);
427 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable);
428
429 if (kvm_is_error_hva(hva) ||
430 (trap->scause == EXC_STORE_GUEST_PAGE_FAULT && !writeable)) {
431 switch (trap->scause) {
432 case EXC_LOAD_GUEST_PAGE_FAULT:
433 return emulate_load(vcpu, run, fault_addr,
434 trap->htinst);
435 case EXC_STORE_GUEST_PAGE_FAULT:
436 return emulate_store(vcpu, run, fault_addr,
437 trap->htinst);
438 default:
439 return -EOPNOTSUPP;
440 };
441 }
442
443 ret = kvm_riscv_gstage_map(vcpu, memslot, fault_addr, hva,
444 (trap->scause == EXC_STORE_GUEST_PAGE_FAULT) ? true : false);
445 if (ret < 0)
446 return ret;
447
448 return 1;
449 }
450
451 /**
452 * kvm_riscv_vcpu_wfi -- Emulate wait for interrupt (WFI) behaviour
453 *
454 * @vcpu: The VCPU pointer
455 */
kvm_riscv_vcpu_wfi(struct kvm_vcpu * vcpu)456 void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
457 {
458 if (!kvm_arch_vcpu_runnable(vcpu)) {
459 kvm_vcpu_srcu_read_unlock(vcpu);
460 kvm_vcpu_halt(vcpu);
461 kvm_vcpu_srcu_read_lock(vcpu);
462 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
463 }
464 }
465
466 /**
467 * kvm_riscv_vcpu_unpriv_read -- Read machine word from Guest memory
468 *
469 * @vcpu: The VCPU pointer
470 * @read_insn: Flag representing whether we are reading instruction
471 * @guest_addr: Guest address to read
472 * @trap: Output pointer to trap details
473 */
kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu * vcpu,bool read_insn,unsigned long guest_addr,struct kvm_cpu_trap * trap)474 unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
475 bool read_insn,
476 unsigned long guest_addr,
477 struct kvm_cpu_trap *trap)
478 {
479 register unsigned long taddr asm("a0") = (unsigned long)trap;
480 register unsigned long ttmp asm("a1");
481 register unsigned long val asm("t0");
482 register unsigned long tmp asm("t1");
483 register unsigned long addr asm("t2") = guest_addr;
484 unsigned long flags;
485 unsigned long old_stvec, old_hstatus;
486
487 local_irq_save(flags);
488
489 old_hstatus = csr_swap(CSR_HSTATUS, vcpu->arch.guest_context.hstatus);
490 old_stvec = csr_swap(CSR_STVEC, (ulong)&__kvm_riscv_unpriv_trap);
491
492 if (read_insn) {
493 /*
494 * HLVX.HU instruction
495 * 0110010 00011 rs1 100 rd 1110011
496 */
497 asm volatile ("\n"
498 ".option push\n"
499 ".option norvc\n"
500 "add %[ttmp], %[taddr], 0\n"
501 /*
502 * HLVX.HU %[val], (%[addr])
503 * HLVX.HU t0, (t2)
504 * 0110010 00011 00111 100 00101 1110011
505 */
506 ".word 0x6433c2f3\n"
507 "andi %[tmp], %[val], 3\n"
508 "addi %[tmp], %[tmp], -3\n"
509 "bne %[tmp], zero, 2f\n"
510 "addi %[addr], %[addr], 2\n"
511 /*
512 * HLVX.HU %[tmp], (%[addr])
513 * HLVX.HU t1, (t2)
514 * 0110010 00011 00111 100 00110 1110011
515 */
516 ".word 0x6433c373\n"
517 "sll %[tmp], %[tmp], 16\n"
518 "add %[val], %[val], %[tmp]\n"
519 "2:\n"
520 ".option pop"
521 : [val] "=&r" (val), [tmp] "=&r" (tmp),
522 [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp),
523 [addr] "+&r" (addr) : : "memory");
524
525 if (trap->scause == EXC_LOAD_PAGE_FAULT)
526 trap->scause = EXC_INST_PAGE_FAULT;
527 } else {
528 /*
529 * HLV.D instruction
530 * 0110110 00000 rs1 100 rd 1110011
531 *
532 * HLV.W instruction
533 * 0110100 00000 rs1 100 rd 1110011
534 */
535 asm volatile ("\n"
536 ".option push\n"
537 ".option norvc\n"
538 "add %[ttmp], %[taddr], 0\n"
539 #ifdef CONFIG_64BIT
540 /*
541 * HLV.D %[val], (%[addr])
542 * HLV.D t0, (t2)
543 * 0110110 00000 00111 100 00101 1110011
544 */
545 ".word 0x6c03c2f3\n"
546 #else
547 /*
548 * HLV.W %[val], (%[addr])
549 * HLV.W t0, (t2)
550 * 0110100 00000 00111 100 00101 1110011
551 */
552 ".word 0x6803c2f3\n"
553 #endif
554 ".option pop"
555 : [val] "=&r" (val),
556 [taddr] "+&r" (taddr), [ttmp] "+&r" (ttmp)
557 : [addr] "r" (addr) : "memory");
558 }
559
560 csr_write(CSR_STVEC, old_stvec);
561 csr_write(CSR_HSTATUS, old_hstatus);
562
563 local_irq_restore(flags);
564
565 return val;
566 }
567
568 /**
569 * kvm_riscv_vcpu_trap_redirect -- Redirect trap to Guest
570 *
571 * @vcpu: The VCPU pointer
572 * @trap: Trap details
573 */
kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu * vcpu,struct kvm_cpu_trap * trap)574 void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
575 struct kvm_cpu_trap *trap)
576 {
577 unsigned long vsstatus = csr_read(CSR_VSSTATUS);
578
579 /* Change Guest SSTATUS.SPP bit */
580 vsstatus &= ~SR_SPP;
581 if (vcpu->arch.guest_context.sstatus & SR_SPP)
582 vsstatus |= SR_SPP;
583
584 /* Change Guest SSTATUS.SPIE bit */
585 vsstatus &= ~SR_SPIE;
586 if (vsstatus & SR_SIE)
587 vsstatus |= SR_SPIE;
588
589 /* Clear Guest SSTATUS.SIE bit */
590 vsstatus &= ~SR_SIE;
591
592 /* Update Guest SSTATUS */
593 csr_write(CSR_VSSTATUS, vsstatus);
594
595 /* Update Guest SCAUSE, STVAL, and SEPC */
596 csr_write(CSR_VSCAUSE, trap->scause);
597 csr_write(CSR_VSTVAL, trap->stval);
598 csr_write(CSR_VSEPC, trap->sepc);
599
600 /* Set Guest PC to Guest exception vector */
601 vcpu->arch.guest_context.sepc = csr_read(CSR_VSTVEC);
602 }
603
604 /**
605 * kvm_riscv_vcpu_mmio_return -- Handle MMIO loads after user space emulation
606 * or in-kernel IO emulation
607 *
608 * @vcpu: The VCPU pointer
609 * @run: The VCPU run struct containing the mmio data
610 */
kvm_riscv_vcpu_mmio_return(struct kvm_vcpu * vcpu,struct kvm_run * run)611 int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
612 {
613 u8 data8;
614 u16 data16;
615 u32 data32;
616 u64 data64;
617 ulong insn;
618 int len, shift;
619
620 if (vcpu->arch.mmio_decode.return_handled)
621 return 0;
622
623 vcpu->arch.mmio_decode.return_handled = 1;
624 insn = vcpu->arch.mmio_decode.insn;
625
626 if (run->mmio.is_write)
627 goto done;
628
629 len = vcpu->arch.mmio_decode.len;
630 shift = vcpu->arch.mmio_decode.shift;
631
632 switch (len) {
633 case 1:
634 data8 = *((u8 *)run->mmio.data);
635 SET_RD(insn, &vcpu->arch.guest_context,
636 (ulong)data8 << shift >> shift);
637 break;
638 case 2:
639 data16 = *((u16 *)run->mmio.data);
640 SET_RD(insn, &vcpu->arch.guest_context,
641 (ulong)data16 << shift >> shift);
642 break;
643 case 4:
644 data32 = *((u32 *)run->mmio.data);
645 SET_RD(insn, &vcpu->arch.guest_context,
646 (ulong)data32 << shift >> shift);
647 break;
648 case 8:
649 data64 = *((u64 *)run->mmio.data);
650 SET_RD(insn, &vcpu->arch.guest_context,
651 (ulong)data64 << shift >> shift);
652 break;
653 default:
654 return -EOPNOTSUPP;
655 }
656
657 done:
658 /* Move to next instruction */
659 vcpu->arch.guest_context.sepc += vcpu->arch.mmio_decode.insn_len;
660
661 return 0;
662 }
663
664 /*
665 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
666 * proper exit to userspace.
667 */
kvm_riscv_vcpu_exit(struct kvm_vcpu * vcpu,struct kvm_run * run,struct kvm_cpu_trap * trap)668 int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
669 struct kvm_cpu_trap *trap)
670 {
671 int ret;
672
673 /* If we got host interrupt then do nothing */
674 if (trap->scause & CAUSE_IRQ_FLAG)
675 return 1;
676
677 /* Handle guest traps */
678 ret = -EFAULT;
679 run->exit_reason = KVM_EXIT_UNKNOWN;
680 switch (trap->scause) {
681 case EXC_VIRTUAL_INST_FAULT:
682 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
683 ret = virtual_inst_fault(vcpu, run, trap);
684 break;
685 case EXC_INST_GUEST_PAGE_FAULT:
686 case EXC_LOAD_GUEST_PAGE_FAULT:
687 case EXC_STORE_GUEST_PAGE_FAULT:
688 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
689 ret = gstage_page_fault(vcpu, run, trap);
690 break;
691 case EXC_SUPERVISOR_SYSCALL:
692 if (vcpu->arch.guest_context.hstatus & HSTATUS_SPV)
693 ret = kvm_riscv_vcpu_sbi_ecall(vcpu, run);
694 break;
695 default:
696 break;
697 }
698
699 /* Print details in-case of error */
700 if (ret < 0) {
701 kvm_err("VCPU exit error %d\n", ret);
702 kvm_err("SEPC=0x%lx SSTATUS=0x%lx HSTATUS=0x%lx\n",
703 vcpu->arch.guest_context.sepc,
704 vcpu->arch.guest_context.sstatus,
705 vcpu->arch.guest_context.hstatus);
706 kvm_err("SCAUSE=0x%lx STVAL=0x%lx HTVAL=0x%lx HTINST=0x%lx\n",
707 trap->scause, trap->stval, trap->htval, trap->htinst);
708 }
709
710 return ret;
711 }
712