1 /*
2 * include/asm-s390/uaccess.h
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com),
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/uaccess.h"
10 */
11 #ifndef __S390_UACCESS_H
12 #define __S390_UACCESS_H
13
14 /*
15 * User space memory access functions
16 */
17 #include <linux/sched.h>
18
19 #define VERIFY_READ 0
20 #define VERIFY_WRITE 1
21
22
23 /*
24 * The fs value determines whether argument validity checking should be
25 * performed or not. If get_fs() == USER_DS, checking is performed, with
26 * get_fs() == KERNEL_DS, checking is bypassed.
27 *
28 * For historical reasons, these macros are grossly misnamed.
29 */
30
31 #define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
32
33
34 #define KERNEL_DS MAKE_MM_SEG(0)
35 #define USER_DS MAKE_MM_SEG(1)
36
37 #define get_ds() (KERNEL_DS)
38 #define get_fs() (current->addr_limit)
39 #define set_fs(x) ({asm volatile("sar 4,%0"::"a" ((x).ar4)); \
40 current->addr_limit = (x);})
41
42 #define segment_eq(a,b) ((a).ar4 == (b).ar4)
43
44
45 #define __access_ok(addr,size) (1)
46
47 #define access_ok(type,addr,size) __access_ok(addr,size)
48
verify_area(int type,const void * addr,unsigned long size)49 extern inline int verify_area(int type, const void * addr, unsigned long size)
50 {
51 return access_ok(type,addr,size)?0:-EFAULT;
52 }
53
54 /*
55 * The exception table consists of pairs of addresses: the first is the
56 * address of an instruction that is allowed to fault, and the second is
57 * the address at which the program should continue. No registers are
58 * modified, so it is entirely up to the continuation code to figure out
59 * what to do.
60 *
61 * All the routines below use bits of fixup code that are out of line
62 * with the main instruction path. This means when everything is well,
63 * we don't even have to jump over them. Further, they do not intrude
64 * on our cache or tlb entries.
65 */
66
67 struct exception_table_entry
68 {
69 unsigned long insn, fixup;
70 };
71
72 /* Returns 0 if exception not found and fixup otherwise. */
73 extern unsigned long search_exception_table(unsigned long);
74
75
76 /*
77 * These are the main single-value transfer routines. They automatically
78 * use the right size if we just have the right pointer type.
79 */
80
__put_user_asm_8(__u64 x,void * ptr)81 extern inline int __put_user_asm_8(__u64 x, void *ptr)
82 {
83 int err;
84
85 __asm__ __volatile__ ( " sr %1,%1\n"
86 " la 2,%2\n"
87 " la 4,%0\n"
88 " sacf 512\n"
89 "0: mvc 0(8,4),0(2)\n"
90 " sacf 0\n"
91 "1:\n"
92 ".section .fixup,\"ax\"\n"
93 "2: sacf 0\n"
94 " lhi %1,%h3\n"
95 " bras 4,3f\n"
96 " .long 1b\n"
97 "3: l 4,0(4)\n"
98 " br 4\n"
99 ".previous\n"
100 ".section __ex_table,\"a\"\n"
101 " .align 4\n"
102 " .long 0b,2b\n"
103 ".previous"
104 : "=m" (*((__u32*) ptr)), "=&d" (err)
105 : "m" (x), "K" (-EFAULT)
106 : "cc", "2", "4" );
107 return err;
108 }
109
__put_user_asm_4(__u32 x,void * ptr)110 extern inline int __put_user_asm_4(__u32 x, void *ptr)
111 {
112 int err;
113
114 __asm__ __volatile__ ( " sr %1,%1\n"
115 " la 4,%0\n"
116 " sacf 512\n"
117 "0: st %2,0(4)\n"
118 " sacf 0\n"
119 "1:\n"
120 ".section .fixup,\"ax\"\n"
121 "2: sacf 0\n"
122 " lhi %1,%h3\n"
123 " bras 4,3f\n"
124 " .long 1b\n"
125 "3: l 4,0(4)\n"
126 " br 4\n"
127 ".previous\n"
128 ".section __ex_table,\"a\"\n"
129 " .align 4\n"
130 " .long 0b,2b\n"
131 ".previous"
132 : "=m" (*((__u32*) ptr)) , "=&d" (err)
133 : "d" (x), "K" (-EFAULT)
134 : "cc", "4" );
135 return err;
136 }
137
__put_user_asm_2(__u16 x,void * ptr)138 extern inline int __put_user_asm_2(__u16 x, void *ptr)
139 {
140 int err;
141
142 __asm__ __volatile__ ( " sr %1,%1\n"
143 " la 4,%0\n"
144 " sacf 512\n"
145 "0: sth %2,0(4)\n"
146 " sacf 0\n"
147 "1:\n"
148 ".section .fixup,\"ax\"\n"
149 "2: sacf 0\n"
150 " lhi %1,%h3\n"
151 " bras 4,3f\n"
152 " .long 1b\n"
153 "3: l 4,0(4)\n"
154 " br 4\n"
155 ".previous\n"
156 ".section __ex_table,\"a\"\n"
157 " .align 4\n"
158 " .long 0b,2b\n"
159 ".previous"
160 : "=m" (*((__u16*) ptr)) , "=&d" (err)
161 : "d" (x), "K" (-EFAULT)
162 : "cc", "4" );
163 return err;
164 }
165
__put_user_asm_1(__u8 x,void * ptr)166 extern inline int __put_user_asm_1(__u8 x, void *ptr)
167 {
168 int err;
169
170 __asm__ __volatile__ ( " sr %1,%1\n"
171 " la 4,%0\n"
172 " sacf 512\n"
173 "0: stc %2,0(4)\n"
174 " sacf 0\n"
175 "1:\n"
176 ".section .fixup,\"ax\"\n"
177 "2: sacf 0\n"
178 " lhi %1,%h3\n"
179 " bras 4,3f\n"
180 " .long 1b\n"
181 "3: l 4,0(4)\n"
182 " br 4\n"
183 ".previous\n"
184 ".section __ex_table,\"a\"\n"
185 " .align 4\n"
186 " .long 0b,2b\n"
187 ".previous"
188 : "=m" (*((__u8*) ptr)) , "=&d" (err)
189 : "d" (x), "K" (-EFAULT)
190 : "cc", "4" );
191 return err;
192 }
193
194 /*
195 * (u8)(u32) ... autsch, but that the only way we can suppress the
196 * warnings when compiling binfmt_elf.c
197 */
198 #define __put_user(x, ptr) \
199 ({ \
200 __typeof__(*(ptr)) *__pu_addr = (ptr); \
201 __typeof__(*(ptr)) __x = (x); \
202 int __pu_err; \
203 switch (sizeof (*(ptr))) { \
204 case 1: \
205 __pu_err = __put_user_asm_1((__u8)(__u32) __x, \
206 __pu_addr); \
207 break; \
208 case 2: \
209 __pu_err = __put_user_asm_2((__u16)(__u32) __x, \
210 __pu_addr); \
211 break; \
212 case 4: \
213 __pu_err = __put_user_asm_4((__u32) __x, \
214 __pu_addr); \
215 break; \
216 case 8: \
217 __pu_err = __put_user_asm_8((__u64) __x, \
218 __pu_addr); \
219 break; \
220 default: \
221 __pu_err = __put_user_bad(); \
222 break; \
223 } \
224 __pu_err; \
225 })
226
227 #define put_user(x, ptr) __put_user(x, ptr)
228
229 extern int __put_user_bad(void);
230
231 #define __get_user_asm_8(x, ptr, err) \
232 ({ \
233 __asm__ __volatile__ ( " sr %1,%1\n" \
234 " la 2,%0\n" \
235 " la 4,%2\n" \
236 " sacf 512\n" \
237 "0: mvc 0(8,2),0(4)\n" \
238 " sacf 0\n" \
239 "1:\n" \
240 ".section .fixup,\"ax\"\n" \
241 "2: sacf 0\n" \
242 " lhi %1,%h3\n" \
243 " bras 4,3f\n" \
244 " .long 1b\n" \
245 "3: l 4,0(4)\n" \
246 " br 4\n" \
247 ".previous\n" \
248 ".section __ex_table,\"a\"\n" \
249 " .align 4\n" \
250 " .long 0b,2b\n" \
251 ".previous" \
252 : "=m" (x) , "=&d" (err) \
253 : "m" (*(const __u64*)(ptr)),"K" (-EFAULT) \
254 : "cc", "2", "4" ); \
255 })
256
257 #define __get_user_asm_4(x, ptr, err) \
258 ({ \
259 __asm__ __volatile__ ( " sr %1,%1\n" \
260 " la 4,%2\n" \
261 " sacf 512\n" \
262 "0: l %0,0(4)\n" \
263 " sacf 0\n" \
264 "1:\n" \
265 ".section .fixup,\"ax\"\n" \
266 "2: sacf 0\n" \
267 " lhi %1,%h3\n" \
268 " bras 4,3f\n" \
269 " .long 1b\n" \
270 "3: l 4,0(4)\n" \
271 " br 4\n" \
272 ".previous\n" \
273 ".section __ex_table,\"a\"\n" \
274 " .align 4\n" \
275 " .long 0b,2b\n" \
276 ".previous" \
277 : "=d" (x) , "=&d" (err) \
278 : "m" (*(const __u32*)(ptr)),"K" (-EFAULT) \
279 : "cc", "4" ); \
280 })
281
282 #define __get_user_asm_2(x, ptr, err) \
283 ({ \
284 __asm__ __volatile__ ( " sr %1,%1\n" \
285 " la 4,%2\n" \
286 " sacf 512\n" \
287 "0: lh %0,0(4)\n" \
288 " sacf 0\n" \
289 "1:\n" \
290 ".section .fixup,\"ax\"\n" \
291 "2: sacf 0\n" \
292 " lhi %1,%h3\n" \
293 " bras 4,3f\n" \
294 " .long 1b\n" \
295 "3: l 4,0(4)\n" \
296 " br 4\n" \
297 ".previous\n" \
298 ".section __ex_table,\"a\"\n" \
299 " .align 4\n" \
300 " .long 0b,2b\n" \
301 ".previous" \
302 : "=d" (x) , "=&d" (err) \
303 : "m" (*(const __u16*)(ptr)),"K" (-EFAULT) \
304 : "cc", "4" ); \
305 })
306
307 #define __get_user_asm_1(x, ptr, err) \
308 ({ \
309 __asm__ __volatile__ ( " sr %1,%1\n" \
310 " la 4,%2\n" \
311 " sr %0,%0\n" \
312 " sacf 512\n" \
313 "0: ic %0,0(4)\n" \
314 " sacf 0\n" \
315 "1:\n" \
316 ".section .fixup,\"ax\"\n" \
317 "2: sacf 0\n" \
318 " lhi %1,%h3\n" \
319 " bras 4,3f\n" \
320 " .long 1b\n" \
321 "3: l 4,0(4)\n" \
322 " br 4\n" \
323 ".previous\n" \
324 ".section __ex_table,\"a\"\n" \
325 " .align 4\n" \
326 " .long 0b,2b\n" \
327 ".previous" \
328 : "=d" (x) , "=&d" (err) \
329 : "m" (*(const __u8*)(ptr)),"K" (-EFAULT) \
330 : "cc", "4" ); \
331 })
332
333 #define __get_user(x, ptr) \
334 ({ \
335 __typeof__(ptr) __gu_addr = (ptr); \
336 __typeof__(*(ptr)) __x; \
337 int __gu_err; \
338 switch (sizeof(*(__gu_addr))) { \
339 case 1: \
340 __get_user_asm_1(__x, __gu_addr, __gu_err); \
341 break; \
342 case 2: \
343 __get_user_asm_2(__x, __gu_addr, __gu_err); \
344 break; \
345 case 4: \
346 __get_user_asm_4(__x, __gu_addr, __gu_err); \
347 break; \
348 case 8: \
349 __get_user_asm_8(__x, __gu_addr, __gu_err); \
350 break; \
351 default: \
352 __x = 0; \
353 __gu_err = __get_user_bad(); \
354 break; \
355 } \
356 (x) = __x; \
357 __gu_err; \
358 })
359
360 #define get_user(x, ptr) __get_user(x, ptr)
361
362 extern int __get_user_bad(void);
363
364 /*
365 * access register are set up, that 4 points to secondary (user) , 2 to primary (kernel)
366 */
367
368 extern long __copy_to_user_asm(const void *from, long n, const void *to);
369
370 #define __copy_to_user(to, from, n) \
371 ({ \
372 __copy_to_user_asm(from, n, to); \
373 })
374
375 #define copy_to_user(to, from, n) \
376 ({ \
377 long err = 0; \
378 __typeof__(n) __n = (n); \
379 if (__access_ok(to,__n)) { \
380 err = __copy_to_user_asm(from, __n, to); \
381 } \
382 else \
383 err = __n; \
384 err; \
385 })
386
387 extern long __copy_from_user_asm(void *to, long n, const void *from);
388
389 #define __copy_from_user(to, from, n) \
390 ({ \
391 __copy_from_user_asm(to, n, from); \
392 })
393
394 #define copy_from_user(to, from, n) \
395 ({ \
396 long err = 0; \
397 __typeof__(n) __n = (n); \
398 if (__access_ok(from,__n)) { \
399 err = __copy_from_user_asm(to, __n, from); \
400 } \
401 else \
402 err = __n; \
403 err; \
404 })
405
406 /*
407 * Copy a null terminated string from userspace.
408 */
409
410 static inline long
__strncpy_from_user(char * dst,const char * src,long count)411 __strncpy_from_user(char *dst, const char *src, long count)
412 {
413 int len;
414 __asm__ __volatile__ ( " slr %0,%0\n"
415 " lr 2,%1\n"
416 " lr 4,%2\n"
417 " slr 3,3\n"
418 " sacf 512\n"
419 "0: ic 3,0(%0,4)\n"
420 "1: stc 3,0(%0,2)\n"
421 " ltr 3,3\n"
422 " jz 2f\n"
423 " ahi %0,1\n"
424 " clr %0,%3\n"
425 " jl 0b\n"
426 "2: sacf 0\n"
427 ".section .fixup,\"ax\"\n"
428 "3: lhi %0,%h4\n"
429 " basr 3,0\n"
430 " l 3,4f-.(3)\n"
431 " br 3\n"
432 "4: .long 2b\n"
433 ".previous\n"
434 ".section __ex_table,\"a\"\n"
435 " .align 4\n"
436 " .long 0b,3b\n"
437 " .long 1b,3b\n"
438 ".previous"
439 : "=&a" (len)
440 : "a" (dst), "d" (src), "d" (count),
441 "K" (-EFAULT)
442 : "2", "3", "4", "memory", "cc" );
443 return len;
444 }
445
446 static inline long
strncpy_from_user(char * dst,const char * src,long count)447 strncpy_from_user(char *dst, const char *src, long count)
448 {
449 long res = -EFAULT;
450 if (access_ok(VERIFY_READ, src, 1))
451 res = __strncpy_from_user(dst, src, count);
452 return res;
453 }
454
455
456 /*
457 * Return the size of a string (including the ending 0)
458 *
459 * Return 0 for error
460 */
461 static inline unsigned long
strnlen_user(const char * src,unsigned long n)462 strnlen_user(const char * src, unsigned long n)
463 {
464 __asm__ __volatile__ (" alr %0,%1\n"
465 " slr 0,0\n"
466 " lr 4,%1\n"
467 " sacf 512\n"
468 "0: srst %0,4\n"
469 " jo 0b\n"
470 " slr %0,%1\n"
471 " ahi %0,1\n"
472 " sacf 0\n"
473 "1:\n"
474 ".section .fixup,\"ax\"\n"
475 "2: sacf 0\n"
476 " slr %0,%0\n"
477 " bras 4,3f\n"
478 " .long 1b\n"
479 "3: l 4,0(4)\n"
480 " br 4\n"
481 ".previous\n"
482 ".section __ex_table,\"a\"\n"
483 " .align 4\n"
484 " .long 0b,2b\n"
485 ".previous"
486 : "+&a" (n) : "d" (src)
487 : "cc", "0", "4" );
488 return n;
489 }
490 #define strlen_user(str) strnlen_user(str, ~0UL)
491
492 /*
493 * Zero Userspace
494 */
495
496 extern long __clear_user_asm(void *to, long n);
497
498 #define __clear_user(to, n) \
499 ({ \
500 __clear_user_asm(to, n); \
501 })
502
503 static inline unsigned long
clear_user(void * to,unsigned long n)504 clear_user(void *to, unsigned long n)
505 {
506 if (access_ok(VERIFY_WRITE, to, n))
507 n = __clear_user(to, n);
508 return n;
509 }
510
511
512 #endif /* _S390_UACCESS_H */
513