1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * access guest memory
4 *
5 * Copyright IBM Corp. 2008, 2014
6 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 */
9
10 #ifndef __KVM_S390_GACCESS_H
11 #define __KVM_S390_GACCESS_H
12
13 #include <linux/compiler.h>
14 #include <linux/kvm_host.h>
15 #include <linux/uaccess.h>
16 #include <linux/ptrace.h>
17 #include "kvm-s390.h"
18
19 /**
20 * kvm_s390_real_to_abs - convert guest real address to guest absolute address
21 * @prefix - guest prefix
22 * @gra - guest real address
23 *
24 * Returns the guest absolute address that corresponds to the passed guest real
25 * address @gra of by applying the given prefix.
26 */
_kvm_s390_real_to_abs(u32 prefix,unsigned long gra)27 static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra)
28 {
29 if (gra < 2 * PAGE_SIZE)
30 gra += prefix;
31 else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
32 gra -= prefix;
33 return gra;
34 }
35
36 /**
37 * kvm_s390_real_to_abs - convert guest real address to guest absolute address
38 * @vcpu - guest virtual cpu
39 * @gra - guest real address
40 *
41 * Returns the guest absolute address that corresponds to the passed guest real
42 * address @gra of a virtual guest cpu by applying its prefix.
43 */
kvm_s390_real_to_abs(struct kvm_vcpu * vcpu,unsigned long gra)44 static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
45 unsigned long gra)
46 {
47 return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra);
48 }
49
50 /**
51 * _kvm_s390_logical_to_effective - convert guest logical to effective address
52 * @psw: psw of the guest
53 * @ga: guest logical address
54 *
55 * Convert a guest logical address to an effective address by applying the
56 * rules of the addressing mode defined by bits 31 and 32 of the given PSW
57 * (extendended/basic addressing mode).
58 *
59 * Depending on the addressing mode, the upper 40 bits (24 bit addressing
60 * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing
61 * mode) of @ga will be zeroed and the remaining bits will be returned.
62 */
_kvm_s390_logical_to_effective(psw_t * psw,unsigned long ga)63 static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw,
64 unsigned long ga)
65 {
66 if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
67 return ga;
68 if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
69 return ga & ((1UL << 31) - 1);
70 return ga & ((1UL << 24) - 1);
71 }
72
73 /**
74 * kvm_s390_logical_to_effective - convert guest logical to effective address
75 * @vcpu: guest virtual cpu
76 * @ga: guest logical address
77 *
78 * Convert a guest vcpu logical address to a guest vcpu effective address by
79 * applying the rules of the vcpu's addressing mode defined by PSW bits 31
80 * and 32 (extendended/basic addressing mode).
81 *
82 * Depending on the vcpu's addressing mode the upper 40 bits (24 bit addressing
83 * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing mode)
84 * of @ga will be zeroed and the remaining bits will be returned.
85 */
kvm_s390_logical_to_effective(struct kvm_vcpu * vcpu,unsigned long ga)86 static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
87 unsigned long ga)
88 {
89 return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga);
90 }
91
92 /*
93 * put_guest_lc, read_guest_lc and write_guest_lc are guest access functions
94 * which shall only be used to access the lowcore of a vcpu.
95 * These functions should be used for e.g. interrupt handlers where no
96 * guest memory access protection facilities, like key or low address
97 * protection, are applicable.
98 * At a later point guest vcpu lowcore access should happen via pinned
99 * prefix pages, so that these pages can be accessed directly via the
100 * kernel mapping. All of these *_lc functions can be removed then.
101 */
102
103 /**
104 * put_guest_lc - write a simple variable to a guest vcpu's lowcore
105 * @vcpu: virtual cpu
106 * @x: value to copy to guest
107 * @gra: vcpu's destination guest real address
108 *
109 * Copies a simple value from kernel space to a guest vcpu's lowcore.
110 * The size of the variable may be 1, 2, 4 or 8 bytes. The destination
111 * must be located in the vcpu's lowcore. Otherwise the result is undefined.
112 *
113 * Returns zero on success or -EFAULT on error.
114 *
115 * Note: an error indicates that either the kernel is out of memory or
116 * the guest memory mapping is broken. In any case the best solution
117 * would be to terminate the guest.
118 * It is wrong to inject a guest exception.
119 */
120 #define put_guest_lc(vcpu, x, gra) \
121 ({ \
122 struct kvm_vcpu *__vcpu = (vcpu); \
123 __typeof__(*(gra)) __x = (x); \
124 unsigned long __gpa; \
125 \
126 __gpa = (unsigned long)(gra); \
127 __gpa += kvm_s390_get_prefix(__vcpu); \
128 kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x)); \
129 })
130
131 /**
132 * write_guest_lc - copy data from kernel space to guest vcpu's lowcore
133 * @vcpu: virtual cpu
134 * @gra: vcpu's source guest real address
135 * @data: source address in kernel space
136 * @len: number of bytes to copy
137 *
138 * Copy data from kernel space to guest vcpu's lowcore. The entire range must
139 * be located within the vcpu's lowcore, otherwise the result is undefined.
140 *
141 * Returns zero on success or -EFAULT on error.
142 *
143 * Note: an error indicates that either the kernel is out of memory or
144 * the guest memory mapping is broken. In any case the best solution
145 * would be to terminate the guest.
146 * It is wrong to inject a guest exception.
147 */
148 static inline __must_check
write_guest_lc(struct kvm_vcpu * vcpu,unsigned long gra,void * data,unsigned long len)149 int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
150 unsigned long len)
151 {
152 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
153
154 return kvm_write_guest(vcpu->kvm, gpa, data, len);
155 }
156
157 /**
158 * read_guest_lc - copy data from guest vcpu's lowcore to kernel space
159 * @vcpu: virtual cpu
160 * @gra: vcpu's source guest real address
161 * @data: destination address in kernel space
162 * @len: number of bytes to copy
163 *
164 * Copy data from guest vcpu's lowcore to kernel space. The entire range must
165 * be located within the vcpu's lowcore, otherwise the result is undefined.
166 *
167 * Returns zero on success or -EFAULT on error.
168 *
169 * Note: an error indicates that either the kernel is out of memory or
170 * the guest memory mapping is broken. In any case the best solution
171 * would be to terminate the guest.
172 * It is wrong to inject a guest exception.
173 */
174 static inline __must_check
read_guest_lc(struct kvm_vcpu * vcpu,unsigned long gra,void * data,unsigned long len)175 int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
176 unsigned long len)
177 {
178 unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
179
180 return kvm_read_guest(vcpu->kvm, gpa, data, len);
181 }
182
183 enum gacc_mode {
184 GACC_FETCH,
185 GACC_STORE,
186 GACC_IFETCH,
187 };
188
189 int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
190 unsigned long *gpa, enum gacc_mode mode,
191 u8 access_key);
192
193 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
194 unsigned long length, enum gacc_mode mode, u8 access_key);
195
196 int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length,
197 enum gacc_mode mode, u8 access_key);
198
199 int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data,
200 unsigned long len, enum gacc_mode mode, u8 access_key);
201
202 int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
203 void *data, unsigned long len, enum gacc_mode mode,
204 u8 access_key);
205
206 int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
207 void *data, unsigned long len, enum gacc_mode mode);
208
209 /**
210 * write_guest_with_key - copy data from kernel space to guest space
211 * @vcpu: virtual cpu
212 * @ga: guest address
213 * @ar: access register
214 * @data: source address in kernel space
215 * @len: number of bytes to copy
216 * @access_key: access key the storage key needs to match
217 *
218 * Copy @len bytes from @data (kernel space) to @ga (guest address).
219 * In order to copy data to guest space the PSW of the vcpu is inspected:
220 * If DAT is off data will be copied to guest real or absolute memory.
221 * If DAT is on data will be copied to the address space as specified by
222 * the address space bits of the PSW:
223 * Primary, secondary, home space or access register mode.
224 * The addressing mode of the PSW is also inspected, so that address wrap
225 * around is taken into account for 24-, 31- and 64-bit addressing mode,
226 * if the to be copied data crosses page boundaries in guest address space.
227 * In addition low address, DAT and key protection checks are performed before
228 * copying any data.
229 *
230 * This function modifies the 'struct kvm_s390_pgm_info pgm' member of @vcpu.
231 * In case of an access exception (e.g. protection exception) pgm will contain
232 * all data necessary so that a subsequent call to 'kvm_s390_inject_prog_vcpu()'
233 * will inject a correct exception into the guest.
234 * If no access exception happened, the contents of pgm are undefined when
235 * this function returns.
236 *
237 * Returns: - zero on success
238 * - a negative value if e.g. the guest mapping is broken or in
239 * case of out-of-memory. In this case the contents of pgm are
240 * undefined. Also parts of @data may have been copied to guest
241 * space.
242 * - a positive value if an access exception happened. In this case
243 * the returned value is the program interruption code and the
244 * contents of pgm may be used to inject an exception into the
245 * guest. No data has been copied to guest space.
246 *
247 * Note: in case an access exception is recognized no data has been copied to
248 * guest space (this is also true, if the to be copied data would cross
249 * one or more page boundaries in guest space).
250 * Therefore this function may be used for nullifying and suppressing
251 * instruction emulation.
252 * It may also be used for terminating instructions, if it is undefined
253 * if data has been changed in guest space in case of an exception.
254 */
255 static inline __must_check
write_guest_with_key(struct kvm_vcpu * vcpu,unsigned long ga,u8 ar,void * data,unsigned long len,u8 access_key)256 int write_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
257 void *data, unsigned long len, u8 access_key)
258 {
259 return access_guest_with_key(vcpu, ga, ar, data, len, GACC_STORE,
260 access_key);
261 }
262
263 /**
264 * write_guest - copy data from kernel space to guest space
265 * @vcpu: virtual cpu
266 * @ga: guest address
267 * @ar: access register
268 * @data: source address in kernel space
269 * @len: number of bytes to copy
270 *
271 * The behaviour of write_guest is identical to write_guest_with_key, except
272 * that the PSW access key is used instead of an explicit argument.
273 */
274 static inline __must_check
write_guest(struct kvm_vcpu * vcpu,unsigned long ga,u8 ar,void * data,unsigned long len)275 int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
276 unsigned long len)
277 {
278 u8 access_key = psw_bits(vcpu->arch.sie_block->gpsw).key;
279
280 return write_guest_with_key(vcpu, ga, ar, data, len, access_key);
281 }
282
283 /**
284 * read_guest_with_key - copy data from guest space to kernel space
285 * @vcpu: virtual cpu
286 * @ga: guest address
287 * @ar: access register
288 * @data: destination address in kernel space
289 * @len: number of bytes to copy
290 * @access_key: access key the storage key needs to match
291 *
292 * Copy @len bytes from @ga (guest address) to @data (kernel space).
293 *
294 * The behaviour of read_guest_with_key is identical to write_guest_with_key,
295 * except that data will be copied from guest space to kernel space.
296 */
297 static inline __must_check
read_guest_with_key(struct kvm_vcpu * vcpu,unsigned long ga,u8 ar,void * data,unsigned long len,u8 access_key)298 int read_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
299 void *data, unsigned long len, u8 access_key)
300 {
301 return access_guest_with_key(vcpu, ga, ar, data, len, GACC_FETCH,
302 access_key);
303 }
304
305 /**
306 * read_guest - copy data from guest space to kernel space
307 * @vcpu: virtual cpu
308 * @ga: guest address
309 * @ar: access register
310 * @data: destination address in kernel space
311 * @len: number of bytes to copy
312 *
313 * Copy @len bytes from @ga (guest address) to @data (kernel space).
314 *
315 * The behaviour of read_guest is identical to read_guest_with_key, except
316 * that the PSW access key is used instead of an explicit argument.
317 */
318 static inline __must_check
read_guest(struct kvm_vcpu * vcpu,unsigned long ga,u8 ar,void * data,unsigned long len)319 int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
320 unsigned long len)
321 {
322 u8 access_key = psw_bits(vcpu->arch.sie_block->gpsw).key;
323
324 return read_guest_with_key(vcpu, ga, ar, data, len, access_key);
325 }
326
327 /**
328 * read_guest_instr - copy instruction data from guest space to kernel space
329 * @vcpu: virtual cpu
330 * @ga: guest address
331 * @data: destination address in kernel space
332 * @len: number of bytes to copy
333 *
334 * Copy @len bytes from the given address (guest space) to @data (kernel
335 * space).
336 *
337 * The behaviour of read_guest_instr is identical to read_guest, except that
338 * instruction data will be read from primary space when in home-space or
339 * address-space mode.
340 */
341 static inline __must_check
read_guest_instr(struct kvm_vcpu * vcpu,unsigned long ga,void * data,unsigned long len)342 int read_guest_instr(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
343 unsigned long len)
344 {
345 u8 access_key = psw_bits(vcpu->arch.sie_block->gpsw).key;
346
347 return access_guest_with_key(vcpu, ga, 0, data, len, GACC_IFETCH,
348 access_key);
349 }
350
351 /**
352 * write_guest_abs - copy data from kernel space to guest space absolute
353 * @vcpu: virtual cpu
354 * @gpa: guest physical (absolute) address
355 * @data: source address in kernel space
356 * @len: number of bytes to copy
357 *
358 * Copy @len bytes from @data (kernel space) to @gpa (guest absolute address).
359 * It is up to the caller to ensure that the entire guest memory range is
360 * valid memory before calling this function.
361 * Guest low address and key protection are not checked.
362 *
363 * Returns zero on success or -EFAULT on error.
364 *
365 * If an error occurs data may have been copied partially to guest memory.
366 */
367 static inline __must_check
write_guest_abs(struct kvm_vcpu * vcpu,unsigned long gpa,void * data,unsigned long len)368 int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
369 unsigned long len)
370 {
371 return kvm_write_guest(vcpu->kvm, gpa, data, len);
372 }
373
374 /**
375 * read_guest_abs - copy data from guest space absolute to kernel space
376 * @vcpu: virtual cpu
377 * @gpa: guest physical (absolute) address
378 * @data: destination address in kernel space
379 * @len: number of bytes to copy
380 *
381 * Copy @len bytes from @gpa (guest absolute address) to @data (kernel space).
382 * It is up to the caller to ensure that the entire guest memory range is
383 * valid memory before calling this function.
384 * Guest key protection is not checked.
385 *
386 * Returns zero on success or -EFAULT on error.
387 *
388 * If an error occurs data may have been copied partially to kernel space.
389 */
390 static inline __must_check
read_guest_abs(struct kvm_vcpu * vcpu,unsigned long gpa,void * data,unsigned long len)391 int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
392 unsigned long len)
393 {
394 return kvm_read_guest(vcpu->kvm, gpa, data, len);
395 }
396
397 /**
398 * write_guest_real - copy data from kernel space to guest space real
399 * @vcpu: virtual cpu
400 * @gra: guest real address
401 * @data: source address in kernel space
402 * @len: number of bytes to copy
403 *
404 * Copy @len bytes from @data (kernel space) to @gra (guest real address).
405 * It is up to the caller to ensure that the entire guest memory range is
406 * valid memory before calling this function.
407 * Guest low address and key protection are not checked.
408 *
409 * Returns zero on success or -EFAULT on error.
410 *
411 * If an error occurs data may have been copied partially to guest memory.
412 */
413 static inline __must_check
write_guest_real(struct kvm_vcpu * vcpu,unsigned long gra,void * data,unsigned long len)414 int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
415 unsigned long len)
416 {
417 return access_guest_real(vcpu, gra, data, len, 1);
418 }
419
420 /**
421 * read_guest_real - copy data from guest space real to kernel space
422 * @vcpu: virtual cpu
423 * @gra: guest real address
424 * @data: destination address in kernel space
425 * @len: number of bytes to copy
426 *
427 * Copy @len bytes from @gra (guest real address) to @data (kernel space).
428 * It is up to the caller to ensure that the entire guest memory range is
429 * valid memory before calling this function.
430 * Guest key protection is not checked.
431 *
432 * Returns zero on success or -EFAULT on error.
433 *
434 * If an error occurs data may have been copied partially to kernel space.
435 */
436 static inline __must_check
read_guest_real(struct kvm_vcpu * vcpu,unsigned long gra,void * data,unsigned long len)437 int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
438 unsigned long len)
439 {
440 return access_guest_real(vcpu, gra, data, len, 0);
441 }
442
443 void ipte_lock(struct kvm_vcpu *vcpu);
444 void ipte_unlock(struct kvm_vcpu *vcpu);
445 int ipte_lock_held(struct kvm_vcpu *vcpu);
446 int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
447
448 /* MVPG PEI indication bits */
449 #define PEI_DAT_PROT 2
450 #define PEI_NOT_PTE 4
451
452 int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
453 unsigned long saddr, unsigned long *datptr);
454
455 #endif /* __KVM_S390_GACCESS_H */
456