1 /*
2  * priv.c - handling privileged instructions
3  *
4  * Copyright IBM Corp. 2008
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  */
13 
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <asm/current.h>
18 #include <asm/debug.h>
19 #include <asm/ebcdic.h>
20 #include <asm/sysinfo.h>
21 #include "gaccess.h"
22 #include "kvm-s390.h"
23 
handle_set_prefix(struct kvm_vcpu * vcpu)24 static int handle_set_prefix(struct kvm_vcpu *vcpu)
25 {
26 	int base2 = vcpu->arch.sie_block->ipb >> 28;
27 	int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
28 	u64 operand2;
29 	u32 address = 0;
30 	u8 tmp;
31 
32 	vcpu->stat.instruction_spx++;
33 
34 	operand2 = disp2;
35 	if (base2)
36 		operand2 += vcpu->run->s.regs.gprs[base2];
37 
38 	/* must be word boundary */
39 	if (operand2 & 3) {
40 		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
41 		goto out;
42 	}
43 
44 	/* get the value */
45 	if (get_guest_u32(vcpu, operand2, &address)) {
46 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
47 		goto out;
48 	}
49 
50 	address = address & 0x7fffe000u;
51 
52 	/* make sure that the new value is valid memory */
53 	if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
54 	   (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
55 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
56 		goto out;
57 	}
58 
59 	kvm_s390_set_prefix(vcpu, address);
60 
61 	VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
62 out:
63 	return 0;
64 }
65 
handle_store_prefix(struct kvm_vcpu * vcpu)66 static int handle_store_prefix(struct kvm_vcpu *vcpu)
67 {
68 	int base2 = vcpu->arch.sie_block->ipb >> 28;
69 	int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
70 	u64 operand2;
71 	u32 address;
72 
73 	vcpu->stat.instruction_stpx++;
74 	operand2 = disp2;
75 	if (base2)
76 		operand2 += vcpu->run->s.regs.gprs[base2];
77 
78 	/* must be word boundary */
79 	if (operand2 & 3) {
80 		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
81 		goto out;
82 	}
83 
84 	address = vcpu->arch.sie_block->prefix;
85 	address = address & 0x7fffe000u;
86 
87 	/* get the value */
88 	if (put_guest_u32(vcpu, operand2, address)) {
89 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
90 		goto out;
91 	}
92 
93 	VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
94 out:
95 	return 0;
96 }
97 
handle_store_cpu_address(struct kvm_vcpu * vcpu)98 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
99 {
100 	int base2 = vcpu->arch.sie_block->ipb >> 28;
101 	int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
102 	u64 useraddr;
103 	int rc;
104 
105 	vcpu->stat.instruction_stap++;
106 	useraddr = disp2;
107 	if (base2)
108 		useraddr += vcpu->run->s.regs.gprs[base2];
109 
110 	if (useraddr & 1) {
111 		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
112 		goto out;
113 	}
114 
115 	rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
116 	if (rc == -EFAULT) {
117 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
118 		goto out;
119 	}
120 
121 	VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
122 out:
123 	return 0;
124 }
125 
handle_skey(struct kvm_vcpu * vcpu)126 static int handle_skey(struct kvm_vcpu *vcpu)
127 {
128 	vcpu->stat.instruction_storage_key++;
129 	vcpu->arch.sie_block->gpsw.addr -= 4;
130 	VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
131 	return 0;
132 }
133 
handle_stsch(struct kvm_vcpu * vcpu)134 static int handle_stsch(struct kvm_vcpu *vcpu)
135 {
136 	vcpu->stat.instruction_stsch++;
137 	VCPU_EVENT(vcpu, 4, "%s", "store subchannel - CC3");
138 	/* condition code 3 */
139 	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
140 	vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
141 	return 0;
142 }
143 
handle_chsc(struct kvm_vcpu * vcpu)144 static int handle_chsc(struct kvm_vcpu *vcpu)
145 {
146 	vcpu->stat.instruction_chsc++;
147 	VCPU_EVENT(vcpu, 4, "%s", "channel subsystem call - CC3");
148 	/* condition code 3 */
149 	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
150 	vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
151 	return 0;
152 }
153 
handle_stfl(struct kvm_vcpu * vcpu)154 static int handle_stfl(struct kvm_vcpu *vcpu)
155 {
156 	unsigned int facility_list;
157 	int rc;
158 
159 	vcpu->stat.instruction_stfl++;
160 	/* only pass the facility bits, which we can handle */
161 	facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3;
162 
163 	rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
164 			   &facility_list, sizeof(facility_list));
165 	if (rc == -EFAULT)
166 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
167 	else
168 		VCPU_EVENT(vcpu, 5, "store facility list value %x",
169 			   facility_list);
170 	return 0;
171 }
172 
handle_stidp(struct kvm_vcpu * vcpu)173 static int handle_stidp(struct kvm_vcpu *vcpu)
174 {
175 	int base2 = vcpu->arch.sie_block->ipb >> 28;
176 	int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
177 	u64 operand2;
178 	int rc;
179 
180 	vcpu->stat.instruction_stidp++;
181 	operand2 = disp2;
182 	if (base2)
183 		operand2 += vcpu->run->s.regs.gprs[base2];
184 
185 	if (operand2 & 7) {
186 		kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
187 		goto out;
188 	}
189 
190 	rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
191 	if (rc == -EFAULT) {
192 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
193 		goto out;
194 	}
195 
196 	VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
197 out:
198 	return 0;
199 }
200 
handle_stsi_3_2_2(struct kvm_vcpu * vcpu,struct sysinfo_3_2_2 * mem)201 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
202 {
203 	struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
204 	int cpus = 0;
205 	int n;
206 
207 	spin_lock(&fi->lock);
208 	for (n = 0; n < KVM_MAX_VCPUS; n++)
209 		if (fi->local_int[n])
210 			cpus++;
211 	spin_unlock(&fi->lock);
212 
213 	/* deal with other level 3 hypervisors */
214 	if (stsi(mem, 3, 2, 2) == -ENOSYS)
215 		mem->count = 0;
216 	if (mem->count < 8)
217 		mem->count++;
218 	for (n = mem->count - 1; n > 0 ; n--)
219 		memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
220 
221 	mem->vm[0].cpus_total = cpus;
222 	mem->vm[0].cpus_configured = cpus;
223 	mem->vm[0].cpus_standby = 0;
224 	mem->vm[0].cpus_reserved = 0;
225 	mem->vm[0].caf = 1000;
226 	memcpy(mem->vm[0].name, "KVMguest", 8);
227 	ASCEBC(mem->vm[0].name, 8);
228 	memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
229 	ASCEBC(mem->vm[0].cpi, 16);
230 }
231 
handle_stsi(struct kvm_vcpu * vcpu)232 static int handle_stsi(struct kvm_vcpu *vcpu)
233 {
234 	int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
235 	int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
236 	int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
237 	int base2 = vcpu->arch.sie_block->ipb >> 28;
238 	int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
239 	u64 operand2;
240 	unsigned long mem;
241 
242 	vcpu->stat.instruction_stsi++;
243 	VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
244 
245 	operand2 = disp2;
246 	if (base2)
247 		operand2 += vcpu->run->s.regs.gprs[base2];
248 
249 	if (operand2 & 0xfff && fc > 0)
250 		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
251 
252 	switch (fc) {
253 	case 0:
254 		vcpu->run->s.regs.gprs[0] = 3 << 28;
255 		vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
256 		return 0;
257 	case 1: /* same handling for 1 and 2 */
258 	case 2:
259 		mem = get_zeroed_page(GFP_KERNEL);
260 		if (!mem)
261 			goto out_fail;
262 		if (stsi((void *) mem, fc, sel1, sel2) == -ENOSYS)
263 			goto out_mem;
264 		break;
265 	case 3:
266 		if (sel1 != 2 || sel2 != 2)
267 			goto out_fail;
268 		mem = get_zeroed_page(GFP_KERNEL);
269 		if (!mem)
270 			goto out_fail;
271 		handle_stsi_3_2_2(vcpu, (void *) mem);
272 		break;
273 	default:
274 		goto out_fail;
275 	}
276 
277 	if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
278 		kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
279 		goto out_mem;
280 	}
281 	free_page(mem);
282 	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
283 	vcpu->run->s.regs.gprs[0] = 0;
284 	return 0;
285 out_mem:
286 	free_page(mem);
287 out_fail:
288 	/* condition code 3 */
289 	vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
290 	return 0;
291 }
292 
293 static intercept_handler_t priv_handlers[256] = {
294 	[0x02] = handle_stidp,
295 	[0x10] = handle_set_prefix,
296 	[0x11] = handle_store_prefix,
297 	[0x12] = handle_store_cpu_address,
298 	[0x29] = handle_skey,
299 	[0x2a] = handle_skey,
300 	[0x2b] = handle_skey,
301 	[0x34] = handle_stsch,
302 	[0x5f] = handle_chsc,
303 	[0x7d] = handle_stsi,
304 	[0xb1] = handle_stfl,
305 };
306 
kvm_s390_handle_b2(struct kvm_vcpu * vcpu)307 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
308 {
309 	intercept_handler_t handler;
310 
311 	/*
312 	 * a lot of B2 instructions are priviledged. We first check for
313 	 * the privileged ones, that we can handle in the kernel. If the
314 	 * kernel can handle this instruction, we check for the problem
315 	 * state bit and (a) handle the instruction or (b) send a code 2
316 	 * program check.
317 	 * Anything else goes to userspace.*/
318 	handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
319 	if (handler) {
320 		if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
321 			return kvm_s390_inject_program_int(vcpu,
322 						   PGM_PRIVILEGED_OPERATION);
323 		else
324 			return handler(vcpu);
325 	}
326 	return -EOPNOTSUPP;
327 }
328 
handle_tprot(struct kvm_vcpu * vcpu)329 static int handle_tprot(struct kvm_vcpu *vcpu)
330 {
331 	int base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
332 	int disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
333 	int base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
334 	int disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
335 	u64 address1 = disp1 + base1 ? vcpu->run->s.regs.gprs[base1] : 0;
336 	u64 address2 = disp2 + base2 ? vcpu->run->s.regs.gprs[base2] : 0;
337 	struct vm_area_struct *vma;
338 	unsigned long user_address;
339 
340 	vcpu->stat.instruction_tprot++;
341 
342 	/* we only handle the Linux memory detection case:
343 	 * access key == 0
344 	 * guest DAT == off
345 	 * everything else goes to userspace. */
346 	if (address2 & 0xf0)
347 		return -EOPNOTSUPP;
348 	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
349 		return -EOPNOTSUPP;
350 
351 
352 	/* we must resolve the address without holding the mmap semaphore.
353 	 * This is ok since the userspace hypervisor is not supposed to change
354 	 * the mapping while the guest queries the memory. Otherwise the guest
355 	 * might crash or get wrong info anyway. */
356 	user_address = (unsigned long) __guestaddr_to_user(vcpu, address1);
357 
358 	down_read(&current->mm->mmap_sem);
359 	vma = find_vma(current->mm, user_address);
360 	if (!vma) {
361 		up_read(&current->mm->mmap_sem);
362 		return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
363 	}
364 
365 	vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
366 	if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
367 		vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
368 	if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
369 		vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
370 
371 	up_read(&current->mm->mmap_sem);
372 	return 0;
373 }
374 
kvm_s390_handle_e5(struct kvm_vcpu * vcpu)375 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
376 {
377 	/* For e5xx... instructions we only handle TPROT */
378 	if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
379 		return handle_tprot(vcpu);
380 	return -EOPNOTSUPP;
381 }
382 
383