1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * handling privileged instructions
4  *
5  * Copyright IBM Corp. 2008, 2020
6  *
7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8  *               Christian Borntraeger <borntraeger@de.ibm.com>
9  */
10 
11 #include <linux/kvm.h>
12 #include <linux/gfp.h>
13 #include <linux/errno.h>
14 #include <linux/mm_types.h>
15 #include <linux/pgtable.h>
16 
17 #include <asm/asm-offsets.h>
18 #include <asm/facility.h>
19 #include <asm/current.h>
20 #include <asm/debug.h>
21 #include <asm/ebcdic.h>
22 #include <asm/sysinfo.h>
23 #include <asm/page-states.h>
24 #include <asm/gmap.h>
25 #include <asm/io.h>
26 #include <asm/ptrace.h>
27 #include <asm/sclp.h>
28 #include <asm/ap.h>
29 #include "gaccess.h"
30 #include "kvm-s390.h"
31 #include "trace.h"
32 
handle_ri(struct kvm_vcpu * vcpu)33 static int handle_ri(struct kvm_vcpu *vcpu)
34 {
35 	vcpu->stat.instruction_ri++;
36 
37 	if (test_kvm_facility(vcpu->kvm, 64)) {
38 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
39 		vcpu->arch.sie_block->ecb3 |= ECB3_RI;
40 		kvm_s390_retry_instr(vcpu);
41 		return 0;
42 	} else
43 		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
44 }
45 
kvm_s390_handle_aa(struct kvm_vcpu * vcpu)46 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
47 {
48 	if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
49 		return handle_ri(vcpu);
50 	else
51 		return -EOPNOTSUPP;
52 }
53 
handle_gs(struct kvm_vcpu * vcpu)54 static int handle_gs(struct kvm_vcpu *vcpu)
55 {
56 	vcpu->stat.instruction_gs++;
57 
58 	if (test_kvm_facility(vcpu->kvm, 133)) {
59 		VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
60 		preempt_disable();
61 		__ctl_set_bit(2, 4);
62 		current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
63 		restore_gs_cb(current->thread.gs_cb);
64 		preempt_enable();
65 		vcpu->arch.sie_block->ecb |= ECB_GS;
66 		vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
67 		vcpu->arch.gs_enabled = 1;
68 		kvm_s390_retry_instr(vcpu);
69 		return 0;
70 	} else
71 		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
72 }
73 
kvm_s390_handle_e3(struct kvm_vcpu * vcpu)74 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
75 {
76 	int code = vcpu->arch.sie_block->ipb & 0xff;
77 
78 	if (code == 0x49 || code == 0x4d)
79 		return handle_gs(vcpu);
80 	else
81 		return -EOPNOTSUPP;
82 }
83 /* Handle SCK (SET CLOCK) interception */
handle_set_clock(struct kvm_vcpu * vcpu)84 static int handle_set_clock(struct kvm_vcpu *vcpu)
85 {
86 	struct kvm_s390_vm_tod_clock gtod = { 0 };
87 	int rc;
88 	u8 ar;
89 	u64 op2;
90 
91 	vcpu->stat.instruction_sck++;
92 
93 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
94 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
95 
96 	op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
97 	if (op2 & 7)	/* Operand must be on a doubleword boundary */
98 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
99 	rc = read_guest(vcpu, op2, ar, &gtod.tod, sizeof(gtod.tod));
100 	if (rc)
101 		return kvm_s390_inject_prog_cond(vcpu, rc);
102 
103 	VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
104 	/*
105 	 * To set the TOD clock the kvm lock must be taken, but the vcpu lock
106 	 * is already held in handle_set_clock. The usual lock order is the
107 	 * opposite.  As SCK is deprecated and should not be used in several
108 	 * cases, for example when the multiple epoch facility or TOD clock
109 	 * steering facility is installed (see Principles of Operation),  a
110 	 * slow path can be used.  If the lock can not be taken via try_lock,
111 	 * the instruction will be retried via -EAGAIN at a later point in
112 	 * time.
113 	 */
114 	if (!kvm_s390_try_set_tod_clock(vcpu->kvm, &gtod)) {
115 		kvm_s390_retry_instr(vcpu);
116 		return -EAGAIN;
117 	}
118 
119 	kvm_s390_set_psw_cc(vcpu, 0);
120 	return 0;
121 }
122 
handle_set_prefix(struct kvm_vcpu * vcpu)123 static int handle_set_prefix(struct kvm_vcpu *vcpu)
124 {
125 	u64 operand2;
126 	u32 address;
127 	int rc;
128 	u8 ar;
129 
130 	vcpu->stat.instruction_spx++;
131 
132 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
133 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
134 
135 	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
136 
137 	/* must be word boundary */
138 	if (operand2 & 3)
139 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
140 
141 	/* get the value */
142 	rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
143 	if (rc)
144 		return kvm_s390_inject_prog_cond(vcpu, rc);
145 
146 	address &= 0x7fffe000u;
147 
148 	/*
149 	 * Make sure the new value is valid memory. We only need to check the
150 	 * first page, since address is 8k aligned and memory pieces are always
151 	 * at least 1MB aligned and have at least a size of 1MB.
152 	 */
153 	if (kvm_is_error_gpa(vcpu->kvm, address))
154 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
155 
156 	kvm_s390_set_prefix(vcpu, address);
157 	trace_kvm_s390_handle_prefix(vcpu, 1, address);
158 	return 0;
159 }
160 
handle_store_prefix(struct kvm_vcpu * vcpu)161 static int handle_store_prefix(struct kvm_vcpu *vcpu)
162 {
163 	u64 operand2;
164 	u32 address;
165 	int rc;
166 	u8 ar;
167 
168 	vcpu->stat.instruction_stpx++;
169 
170 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
171 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
172 
173 	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
174 
175 	/* must be word boundary */
176 	if (operand2 & 3)
177 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
178 
179 	address = kvm_s390_get_prefix(vcpu);
180 
181 	/* get the value */
182 	rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
183 	if (rc)
184 		return kvm_s390_inject_prog_cond(vcpu, rc);
185 
186 	VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
187 	trace_kvm_s390_handle_prefix(vcpu, 0, address);
188 	return 0;
189 }
190 
handle_store_cpu_address(struct kvm_vcpu * vcpu)191 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
192 {
193 	u16 vcpu_id = vcpu->vcpu_id;
194 	u64 ga;
195 	int rc;
196 	u8 ar;
197 
198 	vcpu->stat.instruction_stap++;
199 
200 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
201 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
202 
203 	ga = kvm_s390_get_base_disp_s(vcpu, &ar);
204 
205 	if (ga & 1)
206 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
207 
208 	rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
209 	if (rc)
210 		return kvm_s390_inject_prog_cond(vcpu, rc);
211 
212 	VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
213 	trace_kvm_s390_handle_stap(vcpu, ga);
214 	return 0;
215 }
216 
kvm_s390_skey_check_enable(struct kvm_vcpu * vcpu)217 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
218 {
219 	int rc;
220 
221 	trace_kvm_s390_skey_related_inst(vcpu);
222 	/* Already enabled? */
223 	if (vcpu->arch.skey_enabled)
224 		return 0;
225 
226 	rc = s390_enable_skey();
227 	VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
228 	if (rc)
229 		return rc;
230 
231 	if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
232 		kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
233 	if (!vcpu->kvm->arch.use_skf)
234 		vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
235 	else
236 		vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
237 	vcpu->arch.skey_enabled = true;
238 	return 0;
239 }
240 
try_handle_skey(struct kvm_vcpu * vcpu)241 static int try_handle_skey(struct kvm_vcpu *vcpu)
242 {
243 	int rc;
244 
245 	rc = kvm_s390_skey_check_enable(vcpu);
246 	if (rc)
247 		return rc;
248 	if (vcpu->kvm->arch.use_skf) {
249 		/* with storage-key facility, SIE interprets it for us */
250 		kvm_s390_retry_instr(vcpu);
251 		VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
252 		return -EAGAIN;
253 	}
254 	return 0;
255 }
256 
handle_iske(struct kvm_vcpu * vcpu)257 static int handle_iske(struct kvm_vcpu *vcpu)
258 {
259 	unsigned long gaddr, vmaddr;
260 	unsigned char key;
261 	int reg1, reg2;
262 	bool unlocked;
263 	int rc;
264 
265 	vcpu->stat.instruction_iske++;
266 
267 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
268 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
269 
270 	rc = try_handle_skey(vcpu);
271 	if (rc)
272 		return rc != -EAGAIN ? rc : 0;
273 
274 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
275 
276 	gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
277 	gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
278 	gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
279 	vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
280 	if (kvm_is_error_hva(vmaddr))
281 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
282 retry:
283 	unlocked = false;
284 	mmap_read_lock(current->mm);
285 	rc = get_guest_storage_key(current->mm, vmaddr, &key);
286 
287 	if (rc) {
288 		rc = fixup_user_fault(current->mm, vmaddr,
289 				      FAULT_FLAG_WRITE, &unlocked);
290 		if (!rc) {
291 			mmap_read_unlock(current->mm);
292 			goto retry;
293 		}
294 	}
295 	mmap_read_unlock(current->mm);
296 	if (rc == -EFAULT)
297 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
298 	if (rc < 0)
299 		return rc;
300 	vcpu->run->s.regs.gprs[reg1] &= ~0xff;
301 	vcpu->run->s.regs.gprs[reg1] |= key;
302 	return 0;
303 }
304 
handle_rrbe(struct kvm_vcpu * vcpu)305 static int handle_rrbe(struct kvm_vcpu *vcpu)
306 {
307 	unsigned long vmaddr, gaddr;
308 	int reg1, reg2;
309 	bool unlocked;
310 	int rc;
311 
312 	vcpu->stat.instruction_rrbe++;
313 
314 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
315 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
316 
317 	rc = try_handle_skey(vcpu);
318 	if (rc)
319 		return rc != -EAGAIN ? rc : 0;
320 
321 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
322 
323 	gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
324 	gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
325 	gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
326 	vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
327 	if (kvm_is_error_hva(vmaddr))
328 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
329 retry:
330 	unlocked = false;
331 	mmap_read_lock(current->mm);
332 	rc = reset_guest_reference_bit(current->mm, vmaddr);
333 	if (rc < 0) {
334 		rc = fixup_user_fault(current->mm, vmaddr,
335 				      FAULT_FLAG_WRITE, &unlocked);
336 		if (!rc) {
337 			mmap_read_unlock(current->mm);
338 			goto retry;
339 		}
340 	}
341 	mmap_read_unlock(current->mm);
342 	if (rc == -EFAULT)
343 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
344 	if (rc < 0)
345 		return rc;
346 	kvm_s390_set_psw_cc(vcpu, rc);
347 	return 0;
348 }
349 
350 #define SSKE_NQ 0x8
351 #define SSKE_MR 0x4
352 #define SSKE_MC 0x2
353 #define SSKE_MB 0x1
handle_sske(struct kvm_vcpu * vcpu)354 static int handle_sske(struct kvm_vcpu *vcpu)
355 {
356 	unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
357 	unsigned long start, end;
358 	unsigned char key, oldkey;
359 	int reg1, reg2;
360 	bool unlocked;
361 	int rc;
362 
363 	vcpu->stat.instruction_sske++;
364 
365 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
366 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
367 
368 	rc = try_handle_skey(vcpu);
369 	if (rc)
370 		return rc != -EAGAIN ? rc : 0;
371 
372 	if (!test_kvm_facility(vcpu->kvm, 8))
373 		m3 &= ~SSKE_MB;
374 	if (!test_kvm_facility(vcpu->kvm, 10))
375 		m3 &= ~(SSKE_MC | SSKE_MR);
376 	if (!test_kvm_facility(vcpu->kvm, 14))
377 		m3 &= ~SSKE_NQ;
378 
379 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
380 
381 	key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
382 	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
383 	start = kvm_s390_logical_to_effective(vcpu, start);
384 	if (m3 & SSKE_MB) {
385 		/* start already designates an absolute address */
386 		end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
387 	} else {
388 		start = kvm_s390_real_to_abs(vcpu, start);
389 		end = start + PAGE_SIZE;
390 	}
391 
392 	while (start != end) {
393 		unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
394 		unlocked = false;
395 
396 		if (kvm_is_error_hva(vmaddr))
397 			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
398 
399 		mmap_read_lock(current->mm);
400 		rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
401 						m3 & SSKE_NQ, m3 & SSKE_MR,
402 						m3 & SSKE_MC);
403 
404 		if (rc < 0) {
405 			rc = fixup_user_fault(current->mm, vmaddr,
406 					      FAULT_FLAG_WRITE, &unlocked);
407 			rc = !rc ? -EAGAIN : rc;
408 		}
409 		mmap_read_unlock(current->mm);
410 		if (rc == -EFAULT)
411 			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
412 		if (rc == -EAGAIN)
413 			continue;
414 		if (rc < 0)
415 			return rc;
416 		start += PAGE_SIZE;
417 	}
418 
419 	if (m3 & (SSKE_MC | SSKE_MR)) {
420 		if (m3 & SSKE_MB) {
421 			/* skey in reg1 is unpredictable */
422 			kvm_s390_set_psw_cc(vcpu, 3);
423 		} else {
424 			kvm_s390_set_psw_cc(vcpu, rc);
425 			vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
426 			vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
427 		}
428 	}
429 	if (m3 & SSKE_MB) {
430 		if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
431 			vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
432 		else
433 			vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
434 		end = kvm_s390_logical_to_effective(vcpu, end);
435 		vcpu->run->s.regs.gprs[reg2] |= end;
436 	}
437 	return 0;
438 }
439 
handle_ipte_interlock(struct kvm_vcpu * vcpu)440 static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
441 {
442 	vcpu->stat.instruction_ipte_interlock++;
443 	if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
444 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
445 	wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
446 	kvm_s390_retry_instr(vcpu);
447 	VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
448 	return 0;
449 }
450 
handle_test_block(struct kvm_vcpu * vcpu)451 static int handle_test_block(struct kvm_vcpu *vcpu)
452 {
453 	gpa_t addr;
454 	int reg2;
455 
456 	vcpu->stat.instruction_tb++;
457 
458 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
459 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
460 
461 	kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
462 	addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
463 	addr = kvm_s390_logical_to_effective(vcpu, addr);
464 	if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
465 		return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
466 	addr = kvm_s390_real_to_abs(vcpu, addr);
467 
468 	if (kvm_is_error_gpa(vcpu->kvm, addr))
469 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
470 	/*
471 	 * We don't expect errors on modern systems, and do not care
472 	 * about storage keys (yet), so let's just clear the page.
473 	 */
474 	if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
475 		return -EFAULT;
476 	kvm_s390_set_psw_cc(vcpu, 0);
477 	vcpu->run->s.regs.gprs[0] = 0;
478 	return 0;
479 }
480 
handle_tpi(struct kvm_vcpu * vcpu)481 static int handle_tpi(struct kvm_vcpu *vcpu)
482 {
483 	struct kvm_s390_interrupt_info *inti;
484 	unsigned long len;
485 	u32 tpi_data[3];
486 	int rc;
487 	u64 addr;
488 	u8 ar;
489 
490 	vcpu->stat.instruction_tpi++;
491 
492 	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
493 	if (addr & 3)
494 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
495 
496 	inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
497 	if (!inti) {
498 		kvm_s390_set_psw_cc(vcpu, 0);
499 		return 0;
500 	}
501 
502 	tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
503 	tpi_data[1] = inti->io.io_int_parm;
504 	tpi_data[2] = inti->io.io_int_word;
505 	if (addr) {
506 		/*
507 		 * Store the two-word I/O interruption code into the
508 		 * provided area.
509 		 */
510 		len = sizeof(tpi_data) - 4;
511 		rc = write_guest(vcpu, addr, ar, &tpi_data, len);
512 		if (rc) {
513 			rc = kvm_s390_inject_prog_cond(vcpu, rc);
514 			goto reinject_interrupt;
515 		}
516 	} else {
517 		/*
518 		 * Store the three-word I/O interruption code into
519 		 * the appropriate lowcore area.
520 		 */
521 		len = sizeof(tpi_data);
522 		if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
523 			/* failed writes to the low core are not recoverable */
524 			rc = -EFAULT;
525 			goto reinject_interrupt;
526 		}
527 	}
528 
529 	/* irq was successfully handed to the guest */
530 	kfree(inti);
531 	kvm_s390_set_psw_cc(vcpu, 1);
532 	return 0;
533 reinject_interrupt:
534 	/*
535 	 * If we encounter a problem storing the interruption code, the
536 	 * instruction is suppressed from the guest's view: reinject the
537 	 * interrupt.
538 	 */
539 	if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
540 		kfree(inti);
541 		rc = -EFAULT;
542 	}
543 	/* don't set the cc, a pgm irq was injected or we drop to user space */
544 	return rc ? -EFAULT : 0;
545 }
546 
handle_tsch(struct kvm_vcpu * vcpu)547 static int handle_tsch(struct kvm_vcpu *vcpu)
548 {
549 	struct kvm_s390_interrupt_info *inti = NULL;
550 	const u64 isc_mask = 0xffUL << 24; /* all iscs set */
551 
552 	vcpu->stat.instruction_tsch++;
553 
554 	/* a valid schid has at least one bit set */
555 	if (vcpu->run->s.regs.gprs[1])
556 		inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
557 					   vcpu->run->s.regs.gprs[1]);
558 
559 	/*
560 	 * Prepare exit to userspace.
561 	 * We indicate whether we dequeued a pending I/O interrupt
562 	 * so that userspace can re-inject it if the instruction gets
563 	 * a program check. While this may re-order the pending I/O
564 	 * interrupts, this is no problem since the priority is kept
565 	 * intact.
566 	 */
567 	vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
568 	vcpu->run->s390_tsch.dequeued = !!inti;
569 	if (inti) {
570 		vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
571 		vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
572 		vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
573 		vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
574 	}
575 	vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
576 	kfree(inti);
577 	return -EREMOTE;
578 }
579 
handle_io_inst(struct kvm_vcpu * vcpu)580 static int handle_io_inst(struct kvm_vcpu *vcpu)
581 {
582 	VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
583 
584 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
585 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
586 
587 	if (vcpu->kvm->arch.css_support) {
588 		/*
589 		 * Most I/O instructions will be handled by userspace.
590 		 * Exceptions are tpi and the interrupt portion of tsch.
591 		 */
592 		if (vcpu->arch.sie_block->ipa == 0xb236)
593 			return handle_tpi(vcpu);
594 		if (vcpu->arch.sie_block->ipa == 0xb235)
595 			return handle_tsch(vcpu);
596 		/* Handle in userspace. */
597 		vcpu->stat.instruction_io_other++;
598 		return -EOPNOTSUPP;
599 	} else {
600 		/*
601 		 * Set condition code 3 to stop the guest from issuing channel
602 		 * I/O instructions.
603 		 */
604 		kvm_s390_set_psw_cc(vcpu, 3);
605 		return 0;
606 	}
607 }
608 
609 /*
610  * handle_pqap: Handling pqap interception
611  * @vcpu: the vcpu having issue the pqap instruction
612  *
613  * We now support PQAP/AQIC instructions and we need to correctly
614  * answer the guest even if no dedicated driver's hook is available.
615  *
616  * The intercepting code calls a dedicated callback for this instruction
617  * if a driver did register one in the CRYPTO satellite of the
618  * SIE block.
619  *
620  * If no callback is available, the queues are not available, return this
621  * response code to the caller and set CC to 3.
622  * Else return the response code returned by the callback.
623  */
handle_pqap(struct kvm_vcpu * vcpu)624 static int handle_pqap(struct kvm_vcpu *vcpu)
625 {
626 	struct ap_queue_status status = {};
627 	crypto_hook pqap_hook;
628 	unsigned long reg0;
629 	int ret;
630 	uint8_t fc;
631 
632 	/* Verify that the AP instruction are available */
633 	if (!ap_instructions_available())
634 		return -EOPNOTSUPP;
635 	/* Verify that the guest is allowed to use AP instructions */
636 	if (!(vcpu->arch.sie_block->eca & ECA_APIE))
637 		return -EOPNOTSUPP;
638 	/*
639 	 * The only possibly intercepted functions when AP instructions are
640 	 * available for the guest are AQIC and TAPQ with the t bit set
641 	 * since we do not set IC.3 (FIII) we currently will only intercept
642 	 * the AQIC function code.
643 	 * Note: running nested under z/VM can result in intercepts for other
644 	 * function codes, e.g. PQAP(QCI). We do not support this and bail out.
645 	 */
646 	reg0 = vcpu->run->s.regs.gprs[0];
647 	fc = (reg0 >> 24) & 0xff;
648 	if (fc != 0x03)
649 		return -EOPNOTSUPP;
650 
651 	/* PQAP instruction is allowed for guest kernel only */
652 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
653 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
654 
655 	/* Common PQAP instruction specification exceptions */
656 	/* bits 41-47 must all be zeros */
657 	if (reg0 & 0x007f0000UL)
658 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
659 	/* APFT not install and T bit set */
660 	if (!test_kvm_facility(vcpu->kvm, 15) && (reg0 & 0x00800000UL))
661 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
662 	/* APXA not installed and APID greater 64 or APQI greater 16 */
663 	if (!(vcpu->kvm->arch.crypto.crycbd & 0x02) && (reg0 & 0x0000c0f0UL))
664 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
665 
666 	/* AQIC function code specific exception */
667 	/* facility 65 not present for AQIC function code */
668 	if (!test_kvm_facility(vcpu->kvm, 65))
669 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
670 
671 	/*
672 	 * If the hook callback is registered, there will be a pointer to the
673 	 * hook function pointer in the kvm_s390_crypto structure. Lock the
674 	 * owner, retrieve the hook function pointer and call the hook.
675 	 */
676 	down_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
677 	if (vcpu->kvm->arch.crypto.pqap_hook) {
678 		pqap_hook = *vcpu->kvm->arch.crypto.pqap_hook;
679 		ret = pqap_hook(vcpu);
680 		if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000)
681 			kvm_s390_set_psw_cc(vcpu, 3);
682 		up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
683 		return ret;
684 	}
685 	up_read(&vcpu->kvm->arch.crypto.pqap_hook_rwsem);
686 	/*
687 	 * A vfio_driver must register a hook.
688 	 * No hook means no driver to enable the SIE CRYCB and no queues.
689 	 * We send this response to the guest.
690 	 */
691 	status.response_code = 0x01;
692 	memcpy(&vcpu->run->s.regs.gprs[1], &status, sizeof(status));
693 	kvm_s390_set_psw_cc(vcpu, 3);
694 	return 0;
695 }
696 
handle_stfl(struct kvm_vcpu * vcpu)697 static int handle_stfl(struct kvm_vcpu *vcpu)
698 {
699 	int rc;
700 	unsigned int fac;
701 
702 	vcpu->stat.instruction_stfl++;
703 
704 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
705 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
706 
707 	/*
708 	 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
709 	 * into a u32 memory representation. They will remain bits 0-31.
710 	 */
711 	fac = *vcpu->kvm->arch.model.fac_list >> 32;
712 	rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
713 			    &fac, sizeof(fac));
714 	if (rc)
715 		return rc;
716 	VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
717 	trace_kvm_s390_handle_stfl(vcpu, fac);
718 	return 0;
719 }
720 
721 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
722 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
723 #define PSW_ADDR_24 0x0000000000ffffffUL
724 #define PSW_ADDR_31 0x000000007fffffffUL
725 
is_valid_psw(psw_t * psw)726 int is_valid_psw(psw_t *psw)
727 {
728 	if (psw->mask & PSW_MASK_UNASSIGNED)
729 		return 0;
730 	if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
731 		if (psw->addr & ~PSW_ADDR_31)
732 			return 0;
733 	}
734 	if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
735 		return 0;
736 	if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
737 		return 0;
738 	if (psw->addr & 1)
739 		return 0;
740 	return 1;
741 }
742 
kvm_s390_handle_lpsw(struct kvm_vcpu * vcpu)743 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
744 {
745 	psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
746 	psw_compat_t new_psw;
747 	u64 addr;
748 	int rc;
749 	u8 ar;
750 
751 	vcpu->stat.instruction_lpsw++;
752 
753 	if (gpsw->mask & PSW_MASK_PSTATE)
754 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
755 
756 	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
757 	if (addr & 7)
758 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
759 
760 	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
761 	if (rc)
762 		return kvm_s390_inject_prog_cond(vcpu, rc);
763 	if (!(new_psw.mask & PSW32_MASK_BASE))
764 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
765 	gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
766 	gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
767 	gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
768 	if (!is_valid_psw(gpsw))
769 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
770 	return 0;
771 }
772 
handle_lpswe(struct kvm_vcpu * vcpu)773 static int handle_lpswe(struct kvm_vcpu *vcpu)
774 {
775 	psw_t new_psw;
776 	u64 addr;
777 	int rc;
778 	u8 ar;
779 
780 	vcpu->stat.instruction_lpswe++;
781 
782 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
783 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
784 
785 	addr = kvm_s390_get_base_disp_s(vcpu, &ar);
786 	if (addr & 7)
787 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
788 	rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
789 	if (rc)
790 		return kvm_s390_inject_prog_cond(vcpu, rc);
791 	vcpu->arch.sie_block->gpsw = new_psw;
792 	if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
793 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
794 	return 0;
795 }
796 
handle_stidp(struct kvm_vcpu * vcpu)797 static int handle_stidp(struct kvm_vcpu *vcpu)
798 {
799 	u64 stidp_data = vcpu->kvm->arch.model.cpuid;
800 	u64 operand2;
801 	int rc;
802 	u8 ar;
803 
804 	vcpu->stat.instruction_stidp++;
805 
806 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
807 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
808 
809 	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
810 
811 	if (operand2 & 7)
812 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
813 
814 	rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
815 	if (rc)
816 		return kvm_s390_inject_prog_cond(vcpu, rc);
817 
818 	VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
819 	return 0;
820 }
821 
handle_stsi_3_2_2(struct kvm_vcpu * vcpu,struct sysinfo_3_2_2 * mem)822 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
823 {
824 	int cpus = 0;
825 	int n;
826 
827 	cpus = atomic_read(&vcpu->kvm->online_vcpus);
828 
829 	/* deal with other level 3 hypervisors */
830 	if (stsi(mem, 3, 2, 2))
831 		mem->count = 0;
832 	if (mem->count < 8)
833 		mem->count++;
834 	for (n = mem->count - 1; n > 0 ; n--)
835 		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
836 
837 	memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
838 	mem->vm[0].cpus_total = cpus;
839 	mem->vm[0].cpus_configured = cpus;
840 	mem->vm[0].cpus_standby = 0;
841 	mem->vm[0].cpus_reserved = 0;
842 	mem->vm[0].caf = 1000;
843 	memcpy(mem->vm[0].name, "KVMguest", 8);
844 	ASCEBC(mem->vm[0].name, 8);
845 	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
846 	ASCEBC(mem->vm[0].cpi, 16);
847 }
848 
insert_stsi_usr_data(struct kvm_vcpu * vcpu,u64 addr,u8 ar,u8 fc,u8 sel1,u16 sel2)849 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
850 				 u8 fc, u8 sel1, u16 sel2)
851 {
852 	vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
853 	vcpu->run->s390_stsi.addr = addr;
854 	vcpu->run->s390_stsi.ar = ar;
855 	vcpu->run->s390_stsi.fc = fc;
856 	vcpu->run->s390_stsi.sel1 = sel1;
857 	vcpu->run->s390_stsi.sel2 = sel2;
858 }
859 
handle_stsi(struct kvm_vcpu * vcpu)860 static int handle_stsi(struct kvm_vcpu *vcpu)
861 {
862 	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
863 	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
864 	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
865 	unsigned long mem = 0;
866 	u64 operand2;
867 	int rc = 0;
868 	u8 ar;
869 
870 	vcpu->stat.instruction_stsi++;
871 	VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
872 
873 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
874 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
875 
876 	if (fc > 3) {
877 		kvm_s390_set_psw_cc(vcpu, 3);
878 		return 0;
879 	}
880 
881 	if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
882 	    || vcpu->run->s.regs.gprs[1] & 0xffff0000)
883 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
884 
885 	if (fc == 0) {
886 		vcpu->run->s.regs.gprs[0] = 3 << 28;
887 		kvm_s390_set_psw_cc(vcpu, 0);
888 		return 0;
889 	}
890 
891 	operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
892 
893 	if (!kvm_s390_pv_cpu_is_protected(vcpu) && (operand2 & 0xfff))
894 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
895 
896 	switch (fc) {
897 	case 1: /* same handling for 1 and 2 */
898 	case 2:
899 		mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
900 		if (!mem)
901 			goto out_no_data;
902 		if (stsi((void *) mem, fc, sel1, sel2))
903 			goto out_no_data;
904 		break;
905 	case 3:
906 		if (sel1 != 2 || sel2 != 2)
907 			goto out_no_data;
908 		mem = get_zeroed_page(GFP_KERNEL_ACCOUNT);
909 		if (!mem)
910 			goto out_no_data;
911 		handle_stsi_3_2_2(vcpu, (void *) mem);
912 		break;
913 	}
914 	if (kvm_s390_pv_cpu_is_protected(vcpu)) {
915 		memcpy((void *)sida_origin(vcpu->arch.sie_block), (void *)mem,
916 		       PAGE_SIZE);
917 		rc = 0;
918 	} else {
919 		rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
920 	}
921 	if (rc) {
922 		rc = kvm_s390_inject_prog_cond(vcpu, rc);
923 		goto out;
924 	}
925 	if (vcpu->kvm->arch.user_stsi) {
926 		insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
927 		rc = -EREMOTE;
928 	}
929 	trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
930 	free_page(mem);
931 	kvm_s390_set_psw_cc(vcpu, 0);
932 	vcpu->run->s.regs.gprs[0] = 0;
933 	return rc;
934 out_no_data:
935 	kvm_s390_set_psw_cc(vcpu, 3);
936 out:
937 	free_page(mem);
938 	return rc;
939 }
940 
kvm_s390_handle_b2(struct kvm_vcpu * vcpu)941 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
942 {
943 	switch (vcpu->arch.sie_block->ipa & 0x00ff) {
944 	case 0x02:
945 		return handle_stidp(vcpu);
946 	case 0x04:
947 		return handle_set_clock(vcpu);
948 	case 0x10:
949 		return handle_set_prefix(vcpu);
950 	case 0x11:
951 		return handle_store_prefix(vcpu);
952 	case 0x12:
953 		return handle_store_cpu_address(vcpu);
954 	case 0x14:
955 		return kvm_s390_handle_vsie(vcpu);
956 	case 0x21:
957 	case 0x50:
958 		return handle_ipte_interlock(vcpu);
959 	case 0x29:
960 		return handle_iske(vcpu);
961 	case 0x2a:
962 		return handle_rrbe(vcpu);
963 	case 0x2b:
964 		return handle_sske(vcpu);
965 	case 0x2c:
966 		return handle_test_block(vcpu);
967 	case 0x30:
968 	case 0x31:
969 	case 0x32:
970 	case 0x33:
971 	case 0x34:
972 	case 0x35:
973 	case 0x36:
974 	case 0x37:
975 	case 0x38:
976 	case 0x39:
977 	case 0x3a:
978 	case 0x3b:
979 	case 0x3c:
980 	case 0x5f:
981 	case 0x74:
982 	case 0x76:
983 		return handle_io_inst(vcpu);
984 	case 0x56:
985 		return handle_sthyi(vcpu);
986 	case 0x7d:
987 		return handle_stsi(vcpu);
988 	case 0xaf:
989 		return handle_pqap(vcpu);
990 	case 0xb1:
991 		return handle_stfl(vcpu);
992 	case 0xb2:
993 		return handle_lpswe(vcpu);
994 	default:
995 		return -EOPNOTSUPP;
996 	}
997 }
998 
handle_epsw(struct kvm_vcpu * vcpu)999 static int handle_epsw(struct kvm_vcpu *vcpu)
1000 {
1001 	int reg1, reg2;
1002 
1003 	vcpu->stat.instruction_epsw++;
1004 
1005 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
1006 
1007 	/* This basically extracts the mask half of the psw. */
1008 	vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
1009 	vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
1010 	if (reg2) {
1011 		vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
1012 		vcpu->run->s.regs.gprs[reg2] |=
1013 			vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
1014 	}
1015 	return 0;
1016 }
1017 
1018 #define PFMF_RESERVED   0xfffc0101UL
1019 #define PFMF_SK         0x00020000UL
1020 #define PFMF_CF         0x00010000UL
1021 #define PFMF_UI         0x00008000UL
1022 #define PFMF_FSC        0x00007000UL
1023 #define PFMF_NQ         0x00000800UL
1024 #define PFMF_MR         0x00000400UL
1025 #define PFMF_MC         0x00000200UL
1026 #define PFMF_KEY        0x000000feUL
1027 
handle_pfmf(struct kvm_vcpu * vcpu)1028 static int handle_pfmf(struct kvm_vcpu *vcpu)
1029 {
1030 	bool mr = false, mc = false, nq;
1031 	int reg1, reg2;
1032 	unsigned long start, end;
1033 	unsigned char key;
1034 
1035 	vcpu->stat.instruction_pfmf++;
1036 
1037 	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
1038 
1039 	if (!test_kvm_facility(vcpu->kvm, 8))
1040 		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1041 
1042 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1043 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1044 
1045 	if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
1046 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1047 
1048 	/* Only provide non-quiescing support if enabled for the guest */
1049 	if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
1050 	    !test_kvm_facility(vcpu->kvm, 14))
1051 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1052 
1053 	/* Only provide conditional-SSKE support if enabled for the guest */
1054 	if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
1055 	    test_kvm_facility(vcpu->kvm, 10)) {
1056 		mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
1057 		mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
1058 	}
1059 
1060 	nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
1061 	key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
1062 	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
1063 	start = kvm_s390_logical_to_effective(vcpu, start);
1064 
1065 	if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
1066 		if (kvm_s390_check_low_addr_prot_real(vcpu, start))
1067 			return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
1068 	}
1069 
1070 	switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
1071 	case 0x00000000:
1072 		/* only 4k frames specify a real address */
1073 		start = kvm_s390_real_to_abs(vcpu, start);
1074 		end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1075 		break;
1076 	case 0x00001000:
1077 		end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
1078 		break;
1079 	case 0x00002000:
1080 		/* only support 2G frame size if EDAT2 is available and we are
1081 		   not in 24-bit addressing mode */
1082 		if (!test_kvm_facility(vcpu->kvm, 78) ||
1083 		    psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
1084 			return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1085 		end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1);
1086 		break;
1087 	default:
1088 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1089 	}
1090 
1091 	while (start != end) {
1092 		unsigned long vmaddr;
1093 		bool unlocked = false;
1094 
1095 		/* Translate guest address to host address */
1096 		vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
1097 		if (kvm_is_error_hva(vmaddr))
1098 			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1099 
1100 		if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
1101 			if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE))
1102 				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1103 		}
1104 
1105 		if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
1106 			int rc = kvm_s390_skey_check_enable(vcpu);
1107 
1108 			if (rc)
1109 				return rc;
1110 			mmap_read_lock(current->mm);
1111 			rc = cond_set_guest_storage_key(current->mm, vmaddr,
1112 							key, NULL, nq, mr, mc);
1113 			if (rc < 0) {
1114 				rc = fixup_user_fault(current->mm, vmaddr,
1115 						      FAULT_FLAG_WRITE, &unlocked);
1116 				rc = !rc ? -EAGAIN : rc;
1117 			}
1118 			mmap_read_unlock(current->mm);
1119 			if (rc == -EFAULT)
1120 				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1121 			if (rc == -EAGAIN)
1122 				continue;
1123 			if (rc < 0)
1124 				return rc;
1125 		}
1126 		start += PAGE_SIZE;
1127 	}
1128 	if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
1129 		if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
1130 			vcpu->run->s.regs.gprs[reg2] = end;
1131 		} else {
1132 			vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
1133 			end = kvm_s390_logical_to_effective(vcpu, end);
1134 			vcpu->run->s.regs.gprs[reg2] |= end;
1135 		}
1136 	}
1137 	return 0;
1138 }
1139 
1140 /*
1141  * Must be called with relevant read locks held (kvm->mm->mmap_lock, kvm->srcu)
1142  */
__do_essa(struct kvm_vcpu * vcpu,const int orc)1143 static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
1144 {
1145 	int r1, r2, nappended, entries;
1146 	unsigned long gfn, hva, res, pgstev, ptev;
1147 	unsigned long *cbrlo;
1148 
1149 	/*
1150 	 * We don't need to set SD.FPF.SK to 1 here, because if we have a
1151 	 * machine check here we either handle it or crash
1152 	 */
1153 
1154 	kvm_s390_get_regs_rre(vcpu, &r1, &r2);
1155 	gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT;
1156 	hva = gfn_to_hva(vcpu->kvm, gfn);
1157 	entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1158 
1159 	if (kvm_is_error_hva(hva))
1160 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1161 
1162 	nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
1163 	if (nappended < 0) {
1164 		res = orc ? 0x10 : 0;
1165 		vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */
1166 		return 0;
1167 	}
1168 	res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22;
1169 	/*
1170 	 * Set the block-content state part of the result. 0 means resident, so
1171 	 * nothing to do if the page is valid. 2 is for preserved pages
1172 	 * (non-present and non-zero), and 3 for zero pages (non-present and
1173 	 * zero).
1174 	 */
1175 	if (ptev & _PAGE_INVALID) {
1176 		res |= 2;
1177 		if (pgstev & _PGSTE_GPS_ZERO)
1178 			res |= 1;
1179 	}
1180 	if (pgstev & _PGSTE_GPS_NODAT)
1181 		res |= 0x20;
1182 	vcpu->run->s.regs.gprs[r1] = res;
1183 	/*
1184 	 * It is possible that all the normal 511 slots were full, in which case
1185 	 * we will now write in the 512th slot, which is reserved for host use.
1186 	 * In both cases we let the normal essa handling code process all the
1187 	 * slots, including the reserved one, if needed.
1188 	 */
1189 	if (nappended > 0) {
1190 		cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK);
1191 		cbrlo[entries] = gfn << PAGE_SHIFT;
1192 	}
1193 
1194 	if (orc) {
1195 		struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn);
1196 
1197 		/* Increment only if we are really flipping the bit */
1198 		if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
1199 			atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages);
1200 	}
1201 
1202 	return nappended;
1203 }
1204 
handle_essa(struct kvm_vcpu * vcpu)1205 static int handle_essa(struct kvm_vcpu *vcpu)
1206 {
1207 	/* entries expected to be 1FF */
1208 	int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1209 	unsigned long *cbrlo;
1210 	struct gmap *gmap;
1211 	int i, orc;
1212 
1213 	VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
1214 	gmap = vcpu->arch.gmap;
1215 	vcpu->stat.instruction_essa++;
1216 	if (!vcpu->kvm->arch.use_cmma)
1217 		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1218 
1219 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1220 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1221 	/* Check for invalid operation request code */
1222 	orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
1223 	/* ORCs 0-6 are always valid */
1224 	if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT
1225 						: ESSA_SET_STABLE_IF_RESIDENT))
1226 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1227 
1228 	if (!vcpu->kvm->arch.migration_mode) {
1229 		/*
1230 		 * CMMA is enabled in the KVM settings, but is disabled in
1231 		 * the SIE block and in the mm_context, and we are not doing
1232 		 * a migration. Enable CMMA in the mm_context.
1233 		 * Since we need to take a write lock to write to the context
1234 		 * to avoid races with storage keys handling, we check if the
1235 		 * value really needs to be written to; if the value is
1236 		 * already correct, we do nothing and avoid the lock.
1237 		 */
1238 		if (vcpu->kvm->mm->context.uses_cmm == 0) {
1239 			mmap_write_lock(vcpu->kvm->mm);
1240 			vcpu->kvm->mm->context.uses_cmm = 1;
1241 			mmap_write_unlock(vcpu->kvm->mm);
1242 		}
1243 		/*
1244 		 * If we are here, we are supposed to have CMMA enabled in
1245 		 * the SIE block. Enabling CMMA works on a per-CPU basis,
1246 		 * while the context use_cmma flag is per process.
1247 		 * It's possible that the context flag is enabled and the
1248 		 * SIE flag is not, so we set the flag always; if it was
1249 		 * already set, nothing changes, otherwise we enable it
1250 		 * on this CPU too.
1251 		 */
1252 		vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
1253 		/* Retry the ESSA instruction */
1254 		kvm_s390_retry_instr(vcpu);
1255 	} else {
1256 		int srcu_idx;
1257 
1258 		mmap_read_lock(vcpu->kvm->mm);
1259 		srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1260 		i = __do_essa(vcpu, orc);
1261 		srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1262 		mmap_read_unlock(vcpu->kvm->mm);
1263 		if (i < 0)
1264 			return i;
1265 		/* Account for the possible extra cbrl entry */
1266 		entries += i;
1267 	}
1268 	vcpu->arch.sie_block->cbrlo &= PAGE_MASK;	/* reset nceo */
1269 	cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
1270 	mmap_read_lock(gmap->mm);
1271 	for (i = 0; i < entries; ++i)
1272 		__gmap_zap(gmap, cbrlo[i]);
1273 	mmap_read_unlock(gmap->mm);
1274 	return 0;
1275 }
1276 
kvm_s390_handle_b9(struct kvm_vcpu * vcpu)1277 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
1278 {
1279 	switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1280 	case 0x8a:
1281 	case 0x8e:
1282 	case 0x8f:
1283 		return handle_ipte_interlock(vcpu);
1284 	case 0x8d:
1285 		return handle_epsw(vcpu);
1286 	case 0xab:
1287 		return handle_essa(vcpu);
1288 	case 0xaf:
1289 		return handle_pfmf(vcpu);
1290 	default:
1291 		return -EOPNOTSUPP;
1292 	}
1293 }
1294 
kvm_s390_handle_lctl(struct kvm_vcpu * vcpu)1295 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
1296 {
1297 	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1298 	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1299 	int reg, rc, nr_regs;
1300 	u32 ctl_array[16];
1301 	u64 ga;
1302 	u8 ar;
1303 
1304 	vcpu->stat.instruction_lctl++;
1305 
1306 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1307 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1308 
1309 	ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1310 
1311 	if (ga & 3)
1312 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1313 
1314 	VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1315 	trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
1316 
1317 	nr_regs = ((reg3 - reg1) & 0xf) + 1;
1318 	rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1319 	if (rc)
1320 		return kvm_s390_inject_prog_cond(vcpu, rc);
1321 	reg = reg1;
1322 	nr_regs = 0;
1323 	do {
1324 		vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
1325 		vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
1326 		if (reg == reg3)
1327 			break;
1328 		reg = (reg + 1) % 16;
1329 	} while (1);
1330 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1331 	return 0;
1332 }
1333 
kvm_s390_handle_stctl(struct kvm_vcpu * vcpu)1334 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1335 {
1336 	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1337 	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1338 	int reg, rc, nr_regs;
1339 	u32 ctl_array[16];
1340 	u64 ga;
1341 	u8 ar;
1342 
1343 	vcpu->stat.instruction_stctl++;
1344 
1345 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1346 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1347 
1348 	ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1349 
1350 	if (ga & 3)
1351 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1352 
1353 	VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1354 	trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
1355 
1356 	reg = reg1;
1357 	nr_regs = 0;
1358 	do {
1359 		ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1360 		if (reg == reg3)
1361 			break;
1362 		reg = (reg + 1) % 16;
1363 	} while (1);
1364 	rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1365 	return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1366 }
1367 
handle_lctlg(struct kvm_vcpu * vcpu)1368 static int handle_lctlg(struct kvm_vcpu *vcpu)
1369 {
1370 	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1371 	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1372 	int reg, rc, nr_regs;
1373 	u64 ctl_array[16];
1374 	u64 ga;
1375 	u8 ar;
1376 
1377 	vcpu->stat.instruction_lctlg++;
1378 
1379 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1380 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1381 
1382 	ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1383 
1384 	if (ga & 7)
1385 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1386 
1387 	VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1388 	trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1389 
1390 	nr_regs = ((reg3 - reg1) & 0xf) + 1;
1391 	rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1392 	if (rc)
1393 		return kvm_s390_inject_prog_cond(vcpu, rc);
1394 	reg = reg1;
1395 	nr_regs = 0;
1396 	do {
1397 		vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1398 		if (reg == reg3)
1399 			break;
1400 		reg = (reg + 1) % 16;
1401 	} while (1);
1402 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1403 	return 0;
1404 }
1405 
handle_stctg(struct kvm_vcpu * vcpu)1406 static int handle_stctg(struct kvm_vcpu *vcpu)
1407 {
1408 	int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1409 	int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1410 	int reg, rc, nr_regs;
1411 	u64 ctl_array[16];
1412 	u64 ga;
1413 	u8 ar;
1414 
1415 	vcpu->stat.instruction_stctg++;
1416 
1417 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1418 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1419 
1420 	ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1421 
1422 	if (ga & 7)
1423 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1424 
1425 	VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1426 	trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
1427 
1428 	reg = reg1;
1429 	nr_regs = 0;
1430 	do {
1431 		ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1432 		if (reg == reg3)
1433 			break;
1434 		reg = (reg + 1) % 16;
1435 	} while (1);
1436 	rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1437 	return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1438 }
1439 
kvm_s390_handle_eb(struct kvm_vcpu * vcpu)1440 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1441 {
1442 	switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
1443 	case 0x25:
1444 		return handle_stctg(vcpu);
1445 	case 0x2f:
1446 		return handle_lctlg(vcpu);
1447 	case 0x60:
1448 	case 0x61:
1449 	case 0x62:
1450 		return handle_ri(vcpu);
1451 	default:
1452 		return -EOPNOTSUPP;
1453 	}
1454 }
1455 
handle_tprot(struct kvm_vcpu * vcpu)1456 static int handle_tprot(struct kvm_vcpu *vcpu)
1457 {
1458 	u64 address, operand2;
1459 	unsigned long gpa;
1460 	u8 access_key;
1461 	bool writable;
1462 	int ret, cc;
1463 	u8 ar;
1464 
1465 	vcpu->stat.instruction_tprot++;
1466 
1467 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1468 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1469 
1470 	kvm_s390_get_base_disp_sse(vcpu, &address, &operand2, &ar, NULL);
1471 	access_key = (operand2 & 0xf0) >> 4;
1472 
1473 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1474 		ipte_lock(vcpu);
1475 
1476 	ret = guest_translate_address_with_key(vcpu, address, ar, &gpa,
1477 					       GACC_STORE, access_key);
1478 	if (ret == 0) {
1479 		gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
1480 	} else if (ret == PGM_PROTECTION) {
1481 		writable = false;
1482 		/* Write protected? Try again with read-only... */
1483 		ret = guest_translate_address_with_key(vcpu, address, ar, &gpa,
1484 						       GACC_FETCH, access_key);
1485 	}
1486 	if (ret >= 0) {
1487 		cc = -1;
1488 
1489 		/* Fetching permitted; storing permitted */
1490 		if (ret == 0 && writable)
1491 			cc = 0;
1492 		/* Fetching permitted; storing not permitted */
1493 		else if (ret == 0 && !writable)
1494 			cc = 1;
1495 		/* Fetching not permitted; storing not permitted */
1496 		else if (ret == PGM_PROTECTION)
1497 			cc = 2;
1498 		/* Translation not available */
1499 		else if (ret != PGM_ADDRESSING && ret != PGM_TRANSLATION_SPEC)
1500 			cc = 3;
1501 
1502 		if (cc != -1) {
1503 			kvm_s390_set_psw_cc(vcpu, cc);
1504 			ret = 0;
1505 		} else {
1506 			ret = kvm_s390_inject_program_int(vcpu, ret);
1507 		}
1508 	}
1509 
1510 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1511 		ipte_unlock(vcpu);
1512 	return ret;
1513 }
1514 
kvm_s390_handle_e5(struct kvm_vcpu * vcpu)1515 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
1516 {
1517 	switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1518 	case 0x01:
1519 		return handle_tprot(vcpu);
1520 	default:
1521 		return -EOPNOTSUPP;
1522 	}
1523 }
1524 
handle_sckpf(struct kvm_vcpu * vcpu)1525 static int handle_sckpf(struct kvm_vcpu *vcpu)
1526 {
1527 	u32 value;
1528 
1529 	vcpu->stat.instruction_sckpf++;
1530 
1531 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1532 		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1533 
1534 	if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
1535 		return kvm_s390_inject_program_int(vcpu,
1536 						   PGM_SPECIFICATION);
1537 
1538 	value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
1539 	vcpu->arch.sie_block->todpr = value;
1540 
1541 	return 0;
1542 }
1543 
handle_ptff(struct kvm_vcpu * vcpu)1544 static int handle_ptff(struct kvm_vcpu *vcpu)
1545 {
1546 	vcpu->stat.instruction_ptff++;
1547 
1548 	/* we don't emulate any control instructions yet */
1549 	kvm_s390_set_psw_cc(vcpu, 3);
1550 	return 0;
1551 }
1552 
kvm_s390_handle_01(struct kvm_vcpu * vcpu)1553 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1554 {
1555 	switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1556 	case 0x04:
1557 		return handle_ptff(vcpu);
1558 	case 0x07:
1559 		return handle_sckpf(vcpu);
1560 	default:
1561 		return -EOPNOTSUPP;
1562 	}
1563 }
1564