1 #ifndef __i386_UACCESS_H
2 #define __i386_UACCESS_H
3 
4 /*
5  * User space memory access functions
6  */
7 #include <linux/config.h>
8 #include <linux/sched.h>
9 #include <linux/prefetch.h>
10 #include <asm/page.h>
11 
12 #define VERIFY_READ 0
13 #define VERIFY_WRITE 1
14 
15 /*
16  * The fs value determines whether argument validity checking should be
17  * performed or not.  If get_fs() == USER_DS, checking is performed, with
18  * get_fs() == KERNEL_DS, checking is bypassed.
19  *
20  * For historical reasons, these macros are grossly misnamed.
21  */
22 
23 #define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
24 
25 
26 #define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFF)
27 #define USER_DS		MAKE_MM_SEG(PAGE_OFFSET)
28 
29 #define get_ds()	(KERNEL_DS)
30 #define get_fs()	(current->addr_limit)
31 #define set_fs(x)	(current->addr_limit = (x))
32 
33 #define segment_eq(a,b)	((a).seg == (b).seg)
34 
35 extern int __verify_write(const void *, unsigned long);
36 
37 #define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
38 
39 /*
40  * Test whether a block of memory is a valid user space address.
41  * Returns 0 if the range is valid, nonzero otherwise.
42  *
43  * This is equivalent to the following test:
44  * (u33)addr + (u33)size >= (u33)current->addr_limit.seg
45  *
46  * This needs 33-bit arithmetic. We have a carry...
47  */
48 #define __range_ok(addr,size) ({ \
49 	unsigned long flag,sum; \
50 	asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \
51 		:"=&r" (flag), "=r" (sum) \
52 		:"1" (addr),"g" ((int)(size)),"g" (current->addr_limit.seg)); \
53 	flag; })
54 
55 #ifdef CONFIG_X86_WP_WORKS_OK
56 
57 /**
58  * access_ok: - Checks if a user space pointer is valid
59  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
60  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
61  *        to write to a block, it is always safe to read from it.
62  * @addr: User space pointer to start of block to check
63  * @size: Size of block to check
64  *
65  * Context: User context only.  This function may sleep.
66  *
67  * Checks if a pointer to a block of memory in user space is valid.
68  *
69  * Returns true (nonzero) if the memory block may be valid, false (zero)
70  * if it is definitely invalid.
71  *
72  * Note that, depending on architecture, this function probably just
73  * checks that the pointer is in the user space range - after calling
74  * this function, memory access functions may still return -EFAULT.
75  */
76 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
77 
78 #else
79 
80 #define access_ok(type,addr,size) ( (__range_ok(addr,size) == 0) && \
81 			 ((type) == VERIFY_READ || boot_cpu_data.wp_works_ok || \
82 			 segment_eq(get_fs(),KERNEL_DS) || \
83 			  __verify_write((void *)(addr),(size))))
84 
85 #endif
86 
87 /**
88  * verify_area: - Obsolete, use access_ok()
89  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE
90  * @addr: User space pointer to start of block to check
91  * @size: Size of block to check
92  *
93  * Context: User context only.  This function may sleep.
94  *
95  * This function has been replaced by access_ok().
96  *
97  * Checks if a pointer to a block of memory in user space is valid.
98  *
99  * Returns zero if the memory block may be valid, -EFAULT
100  * if it is definitely invalid.
101  *
102  * See access_ok() for more details.
103  */
verify_area(int type,const void * addr,unsigned long size)104 static inline int verify_area(int type, const void * addr, unsigned long size)
105 {
106 	return access_ok(type,addr,size) ? 0 : -EFAULT;
107 }
108 
109 
110 /*
111  * The exception table consists of pairs of addresses: the first is the
112  * address of an instruction that is allowed to fault, and the second is
113  * the address at which the program should continue.  No registers are
114  * modified, so it is entirely up to the continuation code to figure out
115  * what to do.
116  *
117  * All the routines below use bits of fixup code that are out of line
118  * with the main instruction path.  This means when everything is well,
119  * we don't even have to jump over them.  Further, they do not intrude
120  * on our cache or tlb entries.
121  */
122 
123 struct exception_table_entry
124 {
125 	unsigned long insn, fixup;
126 };
127 
128 /* Returns 0 if exception not found and fixup otherwise.  */
129 extern unsigned long search_exception_table(unsigned long);
130 
131 
132 /*
133  * These are the main single-value transfer routines.  They automatically
134  * use the right size if we just have the right pointer type.
135  *
136  * This gets kind of ugly. We want to return _two_ values in "get_user()"
137  * and yet we don't want to do any pointers, because that is too much
138  * of a performance impact. Thus we have a few rather ugly macros here,
139  * and hide all the uglyness from the user.
140  *
141  * The "__xxx" versions of the user access functions are versions that
142  * do not verify the address space, that must have been done previously
143  * with a separate "access_ok()" call (this is used when we do multiple
144  * accesses to the same area of user memory).
145  */
146 
147 extern void __get_user_1(void);
148 extern void __get_user_2(void);
149 extern void __get_user_4(void);
150 
151 #define __get_user_x(size,ret,x,ptr) \
152 	__asm__ __volatile__("call __get_user_" #size \
153 		:"=a" (ret),"=d" (x) \
154 		:"0" (ptr))
155 
156 
157 /* Careful: we have to cast the result to the type of the pointer for sign reasons */
158 /**
159  * get_user: - Get a simple variable from user space.
160  * @x:   Variable to store result.
161  * @ptr: Source address, in user space.
162  *
163  * Context: User context only.  This function may sleep.
164  *
165  * This macro copies a single simple variable from user space to kernel
166  * space.  It supports simple types like char and int, but not larger
167  * data types like structures or arrays.
168  *
169  * @ptr must have pointer-to-simple-variable type, and the result of
170  * dereferencing @ptr must be assignable to @x without a cast.
171  *
172  * Returns zero on success, or -EFAULT on error.
173  * On error, the variable @x is set to zero.
174  */
175 #define get_user(x,ptr)							\
176 ({	int __ret_gu,__val_gu;						\
177 	switch(sizeof (*(ptr))) {					\
178 	case 1:  __get_user_x(1,__ret_gu,__val_gu,ptr); break;		\
179 	case 2:  __get_user_x(2,__ret_gu,__val_gu,ptr); break;		\
180 	case 4:  __get_user_x(4,__ret_gu,__val_gu,ptr); break;		\
181 	default: __get_user_x(X,__ret_gu,__val_gu,ptr); break;		\
182 	}								\
183 	(x) = (__typeof__(*(ptr)))__val_gu;				\
184 	__ret_gu;							\
185 })
186 
187 extern void __put_user_1(void);
188 extern void __put_user_2(void);
189 extern void __put_user_4(void);
190 extern void __put_user_8(void);
191 
192 extern void __put_user_bad(void);
193 
194 
195 /**
196  * put_user: - Write a simple value into user space.
197  * @x:   Value to copy to user space.
198  * @ptr: Destination address, in user space.
199  *
200  * Context: User context only.  This function may sleep.
201  *
202  * This macro copies a single simple value from kernel space to user
203  * space.  It supports simple types like char and int, but not larger
204  * data types like structures or arrays.
205  *
206  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
207  * to the result of dereferencing @ptr.
208  *
209  * Returns zero on success, or -EFAULT on error.
210  */
211 #define put_user(x,ptr)							\
212   __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
213 
214 
215 /**
216  * __get_user: - Get a simple variable from user space, with less checking.
217  * @x:   Variable to store result.
218  * @ptr: Source address, in user space.
219  *
220  * Context: User context only.  This function may sleep.
221  *
222  * This macro copies a single simple variable from user space to kernel
223  * space.  It supports simple types like char and int, but not larger
224  * data types like structures or arrays.
225  *
226  * @ptr must have pointer-to-simple-variable type, and the result of
227  * dereferencing @ptr must be assignable to @x without a cast.
228  *
229  * Caller must check the pointer with access_ok() before calling this
230  * function.
231  *
232  * Returns zero on success, or -EFAULT on error.
233  * On error, the variable @x is set to zero.
234  */
235 #define __get_user(x,ptr) \
236   __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
237 
238 
239 /**
240  * __put_user: - Write a simple value into user space, with less checking.
241  * @x:   Value to copy to user space.
242  * @ptr: Destination address, in user space.
243  *
244  * Context: User context only.  This function may sleep.
245  *
246  * This macro copies a single simple value from kernel space to user
247  * space.  It supports simple types like char and int, but not larger
248  * data types like structures or arrays.
249  *
250  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
251  * to the result of dereferencing @ptr.
252  *
253  * Caller must check the pointer with access_ok() before calling this
254  * function.
255  *
256  * Returns zero on success, or -EFAULT on error.
257  */
258 #define __put_user(x,ptr) \
259   __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
260 
261 #define __put_user_nocheck(x,ptr,size)			\
262 ({							\
263 	long __pu_err;					\
264 	__put_user_size((x),(ptr),(size),__pu_err);	\
265 	__pu_err;					\
266 })
267 
268 
269 #define __put_user_check(x,ptr,size)			\
270 ({							\
271 	long __pu_err = -EFAULT;					\
272 	__typeof__(*(ptr)) *__pu_addr = (ptr);		\
273 	if (access_ok(VERIFY_WRITE,__pu_addr,size))	\
274 		__put_user_size((x),__pu_addr,(size),__pu_err);	\
275 	__pu_err;					\
276 })
277 
278 #define __put_user_u64(x, addr, err)				\
279 	__asm__ __volatile__(					\
280 		"1:	movl %%eax,0(%2)\n"			\
281 		"2:	movl %%edx,4(%2)\n"			\
282 		"3:\n"						\
283 		".section .fixup,\"ax\"\n"			\
284 		"4:	movl %3,%0\n"				\
285 		"	jmp 3b\n"				\
286 		".previous\n"					\
287 		".section __ex_table,\"a\"\n"			\
288 		"	.align 4\n"				\
289 		"	.long 1b,4b\n"				\
290 		"	.long 2b,4b\n"				\
291 		".previous"					\
292 		: "=r"(err)					\
293 		: "A" (x), "r" (addr), "i"(-EFAULT), "0"(err))
294 
295 #define __put_user_size(x,ptr,size,retval)				\
296 do {									\
297 	retval = 0;							\
298 	switch (size) {							\
299 	  case 1: __put_user_asm(x,ptr,retval,"b","b","iq"); break;	\
300 	  case 2: __put_user_asm(x,ptr,retval,"w","w","ir"); break;	\
301 	  case 4: __put_user_asm(x,ptr,retval,"l","","ir"); break;	\
302 	  case 8: __put_user_u64(x,ptr,retval); break;			\
303 	  default: __put_user_bad();					\
304 	}								\
305 } while (0)
306 
307 struct __large_struct { unsigned long buf[100]; };
308 #define __m(x) (*(struct __large_struct *)(x))
309 
310 /*
311  * Tell gcc we read from memory instead of writing: this is because
312  * we do not write to any memory gcc knows about, so there are no
313  * aliasing issues.
314  */
315 #define __put_user_asm(x, addr, err, itype, rtype, ltype)	\
316 	__asm__ __volatile__(					\
317 		"1:	mov"itype" %"rtype"1,%2\n"		\
318 		"2:\n"						\
319 		".section .fixup,\"ax\"\n"			\
320 		"3:	movl %3,%0\n"				\
321 		"	jmp 2b\n"				\
322 		".previous\n"					\
323 		".section __ex_table,\"a\"\n"			\
324 		"	.align 4\n"				\
325 		"	.long 1b,3b\n"				\
326 		".previous"					\
327 		: "=r"(err)					\
328 		: ltype (x), "m"(__m(addr)), "i"(-EFAULT), "0"(err))
329 
330 
331 #define __get_user_nocheck(x,ptr,size)				\
332 ({								\
333 	long __gu_err, __gu_val;				\
334 	__get_user_size(__gu_val,(ptr),(size),__gu_err);	\
335 	(x) = (__typeof__(*(ptr)))__gu_val;			\
336 	__gu_err;						\
337 })
338 
339 extern long __get_user_bad(void);
340 
341 #define __get_user_size(x,ptr,size,retval)				\
342 do {									\
343 	retval = 0;							\
344 	switch (size) {							\
345 	  case 1: __get_user_asm(x,ptr,retval,"b","b","=q"); break;	\
346 	  case 2: __get_user_asm(x,ptr,retval,"w","w","=r"); break;	\
347 	  case 4: __get_user_asm(x,ptr,retval,"l","","=r"); break;	\
348 	  default: (x) = __get_user_bad();				\
349 	}								\
350 } while (0)
351 
352 #define __get_user_asm(x, addr, err, itype, rtype, ltype)	\
353 	__asm__ __volatile__(					\
354 		"1:	mov"itype" %2,%"rtype"1\n"		\
355 		"2:\n"						\
356 		".section .fixup,\"ax\"\n"			\
357 		"3:	movl %3,%0\n"				\
358 		"	xor"itype" %"rtype"1,%"rtype"1\n"	\
359 		"	jmp 2b\n"				\
360 		".previous\n"					\
361 		".section __ex_table,\"a\"\n"			\
362 		"	.align 4\n"				\
363 		"	.long 1b,3b\n"				\
364 		".previous"					\
365 		: "=r"(err), ltype (x)				\
366 		: "m"(__m(addr)), "i"(-EFAULT), "0"(err))
367 
368 
369 /*
370  * Copy To/From Userspace
371  */
372 
373 /* Generic arbitrary sized copy.  */
374 #define __copy_user(to,from,size)					\
375 do {									\
376 	int __d0, __d1;							\
377 	__asm__ __volatile__(						\
378 		"0:	rep; movsl\n"					\
379 		"	movl %3,%0\n"					\
380 		"1:	rep; movsb\n"					\
381 		"2:\n"							\
382 		".section .fixup,\"ax\"\n"				\
383 		"3:	lea 0(%3,%0,4),%0\n"				\
384 		"	jmp 2b\n"					\
385 		".previous\n"						\
386 		".section __ex_table,\"a\"\n"				\
387 		"	.align 4\n"					\
388 		"	.long 0b,3b\n"					\
389 		"	.long 1b,2b\n"					\
390 		".previous"						\
391 		: "=&c"(size), "=&D" (__d0), "=&S" (__d1)		\
392 		: "r"(size & 3), "0"(size / 4), "1"(to), "2"(from)	\
393 		: "memory");						\
394 } while (0)
395 
396 #define __copy_user_zeroing(to,from,size)				\
397 do {									\
398 	int __d0, __d1;							\
399 	__asm__ __volatile__(						\
400 		"0:	rep; movsl\n"					\
401 		"	movl %3,%0\n"					\
402 		"1:	rep; movsb\n"					\
403 		"2:\n"							\
404 		".section .fixup,\"ax\"\n"				\
405 		"3:	lea 0(%3,%0,4),%0\n"				\
406 		"4:	pushl %0\n"					\
407 		"	pushl %%eax\n"					\
408 		"	xorl %%eax,%%eax\n"				\
409 		"	rep; stosb\n"					\
410 		"	popl %%eax\n"					\
411 		"	popl %0\n"					\
412 		"	jmp 2b\n"					\
413 		".previous\n"						\
414 		".section __ex_table,\"a\"\n"				\
415 		"	.align 4\n"					\
416 		"	.long 0b,3b\n"					\
417 		"	.long 1b,4b\n"					\
418 		".previous"						\
419 		: "=&c"(size), "=&D" (__d0), "=&S" (__d1)		\
420 		: "r"(size & 3), "0"(size / 4), "1"(to), "2"(from)	\
421 		: "memory");						\
422 } while (0)
423 
424 /* We let the __ versions of copy_from/to_user inline, because they're often
425  * used in fast paths and have only a small space overhead.
426  */
427 static inline unsigned long
__generic_copy_from_user_nocheck(void * to,const void * from,unsigned long n)428 __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
429 {
430 	__copy_user_zeroing(to,from,n);
431 	return n;
432 }
433 
434 static inline unsigned long
__generic_copy_to_user_nocheck(void * to,const void * from,unsigned long n)435 __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
436 {
437 	__copy_user(to,from,n);
438 	return n;
439 }
440 
441 
442 /* Optimize just a little bit when we know the size of the move. */
443 #define __constant_copy_user(to, from, size)			\
444 do {								\
445 	int __d0, __d1;						\
446 	switch (size & 3) {					\
447 	default:						\
448 		__asm__ __volatile__(				\
449 			"0:	rep; movsl\n"			\
450 			"1:\n"					\
451 			".section .fixup,\"ax\"\n"		\
452 			"2:	shl $2,%0\n"			\
453 			"	jmp 1b\n"			\
454 			".previous\n"				\
455 			".section __ex_table,\"a\"\n"		\
456 			"	.align 4\n"			\
457 			"	.long 0b,2b\n"			\
458 			".previous"				\
459 			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
460 			: "1"(from), "2"(to), "0"(size/4)	\
461 			: "memory");				\
462 		break;						\
463 	case 1:							\
464 		__asm__ __volatile__(				\
465 			"0:	rep; movsl\n"			\
466 			"1:	movsb\n"			\
467 			"2:\n"					\
468 			".section .fixup,\"ax\"\n"		\
469 			"3:	shl $2,%0\n"			\
470 			"4:	incl %0\n"			\
471 			"	jmp 2b\n"			\
472 			".previous\n"				\
473 			".section __ex_table,\"a\"\n"		\
474 			"	.align 4\n"			\
475 			"	.long 0b,3b\n"			\
476 			"	.long 1b,4b\n"			\
477 			".previous"				\
478 			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
479 			: "1"(from), "2"(to), "0"(size/4)	\
480 			: "memory");				\
481 		break;						\
482 	case 2:							\
483 		__asm__ __volatile__(				\
484 			"0:	rep; movsl\n"			\
485 			"1:	movsw\n"			\
486 			"2:\n"					\
487 			".section .fixup,\"ax\"\n"		\
488 			"3:	shl $2,%0\n"			\
489 			"4:	addl $2,%0\n"			\
490 			"	jmp 2b\n"			\
491 			".previous\n"				\
492 			".section __ex_table,\"a\"\n"		\
493 			"	.align 4\n"			\
494 			"	.long 0b,3b\n"			\
495 			"	.long 1b,4b\n"			\
496 			".previous"				\
497 			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
498 			: "1"(from), "2"(to), "0"(size/4)	\
499 			: "memory");				\
500 		break;						\
501 	case 3:							\
502 		__asm__ __volatile__(				\
503 			"0:	rep; movsl\n"			\
504 			"1:	movsw\n"			\
505 			"2:	movsb\n"			\
506 			"3:\n"					\
507 			".section .fixup,\"ax\"\n"		\
508 			"4:	shl $2,%0\n"			\
509 			"5:	addl $2,%0\n"			\
510 			"6:	incl %0\n"			\
511 			"	jmp 3b\n"			\
512 			".previous\n"				\
513 			".section __ex_table,\"a\"\n"		\
514 			"	.align 4\n"			\
515 			"	.long 0b,4b\n"			\
516 			"	.long 1b,5b\n"			\
517 			"	.long 2b,6b\n"			\
518 			".previous"				\
519 			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
520 			: "1"(from), "2"(to), "0"(size/4)	\
521 			: "memory");				\
522 		break;						\
523 	}							\
524 } while (0)
525 
526 /* Optimize just a little bit when we know the size of the move. */
527 #define __constant_copy_user_zeroing(to, from, size)		\
528 do {								\
529 	int __d0, __d1;						\
530 	switch (size & 3) {					\
531 	default:						\
532 		__asm__ __volatile__(				\
533 			"0:	rep; movsl\n"			\
534 			"1:\n"					\
535 			".section .fixup,\"ax\"\n"		\
536 			"2:	pushl %0\n"			\
537 			"	pushl %%eax\n"			\
538 			"	xorl %%eax,%%eax\n"		\
539 			"	rep; stosl\n"			\
540 			"	popl %%eax\n"			\
541 			"	popl %0\n"			\
542 			"	shl $2,%0\n"			\
543 			"	jmp 1b\n"			\
544 			".previous\n"				\
545 			".section __ex_table,\"a\"\n"		\
546 			"	.align 4\n"			\
547 			"	.long 0b,2b\n"			\
548 			".previous"				\
549 			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
550 			: "1"(from), "2"(to), "0"(size/4)	\
551 			: "memory");				\
552 		break;						\
553 	case 1:							\
554 		__asm__ __volatile__(				\
555 			"0:	rep; movsl\n"			\
556 			"1:	movsb\n"			\
557 			"2:\n"					\
558 			".section .fixup,\"ax\"\n"		\
559 			"3:	pushl %0\n"			\
560 			"	pushl %%eax\n"			\
561 			"	xorl %%eax,%%eax\n"		\
562 			"	rep; stosl\n"			\
563 			"	stosb\n"			\
564 			"	popl %%eax\n"			\
565 			"	popl %0\n"			\
566 			"	shl $2,%0\n"			\
567 			"	incl %0\n"			\
568 			"	jmp 2b\n"			\
569 			"4:	pushl %%eax\n"			\
570 			"	xorl %%eax,%%eax\n"		\
571 			"	stosb\n"			\
572 			"	popl %%eax\n"			\
573 			"	incl %0\n"			\
574 			"	jmp 2b\n"			\
575 			".previous\n"				\
576 			".section __ex_table,\"a\"\n"		\
577 			"	.align 4\n"			\
578 			"	.long 0b,3b\n"			\
579 			"	.long 1b,4b\n"			\
580 			".previous"				\
581 			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
582 			: "1"(from), "2"(to), "0"(size/4)	\
583 			: "memory");				\
584 		break;						\
585 	case 2:							\
586 		__asm__ __volatile__(				\
587 			"0:	rep; movsl\n"			\
588 			"1:	movsw\n"			\
589 			"2:\n"					\
590 			".section .fixup,\"ax\"\n"		\
591 			"3:	pushl %0\n"			\
592 			"	pushl %%eax\n"			\
593 			"	xorl %%eax,%%eax\n"		\
594 			"	rep; stosl\n"			\
595 			"	stosw\n"			\
596 			"	popl %%eax\n"			\
597 			"	popl %0\n"			\
598 			"	shl $2,%0\n"			\
599 			"	addl $2,%0\n"			\
600 			"	jmp 2b\n"			\
601 			"4:	pushl %%eax\n"			\
602 			"	xorl %%eax,%%eax\n"		\
603 			"	stosw\n"			\
604 			"	popl %%eax\n"			\
605 			"	addl $2,%0\n"			\
606 			"	jmp 2b\n"			\
607 			".previous\n"				\
608 			".section __ex_table,\"a\"\n"		\
609 			"	.align 4\n"			\
610 			"	.long 0b,3b\n"			\
611 			"	.long 1b,4b\n"			\
612 			".previous"				\
613 			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
614 			: "1"(from), "2"(to), "0"(size/4)	\
615 			: "memory");				\
616 		break;						\
617 	case 3:							\
618 		__asm__ __volatile__(				\
619 			"0:	rep; movsl\n"			\
620 			"1:	movsw\n"			\
621 			"2:	movsb\n"			\
622 			"3:\n"					\
623 			".section .fixup,\"ax\"\n"		\
624 			"4:	pushl %0\n"			\
625 			"	pushl %%eax\n"			\
626 			"	xorl %%eax,%%eax\n"		\
627 			"	rep; stosl\n"			\
628 			"	stosw\n"			\
629 			"	stosb\n"			\
630 			"	popl %%eax\n"			\
631 			"	popl %0\n"			\
632 			"	shl $2,%0\n"			\
633 			"	addl $3,%0\n"			\
634 			"	jmp 2b\n"			\
635 			"5:	pushl %%eax\n"			\
636 			"	xorl %%eax,%%eax\n"		\
637 			"	stosw\n"			\
638 			"	stosb\n"			\
639 			"	popl %%eax\n"			\
640 			"	addl $3,%0\n"			\
641 			"	jmp 2b\n"			\
642 			"6:	pushl %%eax\n"			\
643 			"	xorl %%eax,%%eax\n"		\
644 			"	stosb\n"			\
645 			"	popl %%eax\n"			\
646 			"	incl %0\n"			\
647 			"	jmp 3b\n"			\
648 			".previous\n"				\
649 			".section __ex_table,\"a\"\n"		\
650 			"	.align 4\n"			\
651 			"	.long 0b,4b\n"			\
652 			"	.long 1b,5b\n"			\
653 			"	.long 2b,6b\n"			\
654 			".previous"				\
655 			: "=c"(size), "=&S" (__d0), "=&D" (__d1)\
656 			: "1"(from), "2"(to), "0"(size/4)	\
657 			: "memory");				\
658 		break;						\
659 	}							\
660 } while (0)
661 
662 unsigned long __generic_copy_to_user(void *, const void *, unsigned long);
663 unsigned long __generic_copy_from_user(void *, const void *, unsigned long);
664 
665 static inline unsigned long
__constant_copy_to_user(void * to,const void * from,unsigned long n)666 __constant_copy_to_user(void *to, const void *from, unsigned long n)
667 {
668 	prefetch(from);
669 	if (access_ok(VERIFY_WRITE, to, n))
670 		__constant_copy_user(to,from,n);
671 	return n;
672 }
673 
674 static inline unsigned long
__constant_copy_from_user(void * to,const void * from,unsigned long n)675 __constant_copy_from_user(void *to, const void *from, unsigned long n)
676 {
677 	if (access_ok(VERIFY_READ, from, n))
678 		__constant_copy_user_zeroing(to,from,n);
679 	else
680 		memset(to, 0, n);
681 	return n;
682 }
683 
684 static inline unsigned long
__constant_copy_to_user_nocheck(void * to,const void * from,unsigned long n)685 __constant_copy_to_user_nocheck(void *to, const void *from, unsigned long n)
686 {
687 	__constant_copy_user(to,from,n);
688 	return n;
689 }
690 
691 static inline unsigned long
__constant_copy_from_user_nocheck(void * to,const void * from,unsigned long n)692 __constant_copy_from_user_nocheck(void *to, const void *from, unsigned long n)
693 {
694 	__constant_copy_user_zeroing(to,from,n);
695 	return n;
696 }
697 
698 /**
699  * copy_to_user: - Copy a block of data into user space.
700  * @to:   Destination address, in user space.
701  * @from: Source address, in kernel space.
702  * @n:    Number of bytes to copy.
703  *
704  * Context: User context only.  This function may sleep.
705  *
706  * Copy data from kernel space to user space.
707  *
708  * Returns number of bytes that could not be copied.
709  * On success, this will be zero.
710  */
711 #define copy_to_user(to,from,n)				\
712 	(__builtin_constant_p(n) ?			\
713 	 __constant_copy_to_user((to),(from),(n)) :	\
714 	 __generic_copy_to_user((to),(from),(n)))
715 
716 /**
717  * copy_from_user: - Copy a block of data from user space.
718  * @to:   Destination address, in kernel space.
719  * @from: Source address, in user space.
720  * @n:    Number of bytes to copy.
721  *
722  * Context: User context only.  This function may sleep.
723  *
724  * Copy data from user space to kernel space.
725  *
726  * Returns number of bytes that could not be copied.
727  * On success, this will be zero.
728  *
729  * If some data could not be copied, this function will pad the copied
730  * data to the requested size using zero bytes.
731  */
732 #define copy_from_user(to,from,n)			\
733 	(__builtin_constant_p(n) ?			\
734 	 __constant_copy_from_user((to),(from),(n)) :	\
735 	 __generic_copy_from_user((to),(from),(n)))
736 
737 /**
738  * __copy_to_user: - Copy a block of data into user space, with less checking.
739  * @to:   Destination address, in user space.
740  * @from: Source address, in kernel space.
741  * @n:    Number of bytes to copy.
742  *
743  * Context: User context only.  This function may sleep.
744  *
745  * Copy data from kernel space to user space.  Caller must check
746  * the specified block with access_ok() before calling this function.
747  *
748  * Returns number of bytes that could not be copied.
749  * On success, this will be zero.
750  */
751 #define __copy_to_user(to,from,n)			\
752 	(__builtin_constant_p(n) ?			\
753 	 __constant_copy_to_user_nocheck((to),(from),(n)) :	\
754 	 __generic_copy_to_user_nocheck((to),(from),(n)))
755 
756 /**
757  * __copy_from_user: - Copy a block of data from user space, with less checking.
758  * @to:   Destination address, in kernel space.
759  * @from: Source address, in user space.
760  * @n:    Number of bytes to copy.
761  *
762  * Context: User context only.  This function may sleep.
763  *
764  * Copy data from user space to kernel space.  Caller must check
765  * the specified block with access_ok() before calling this function.
766  *
767  * Returns number of bytes that could not be copied.
768  * On success, this will be zero.
769  *
770  * If some data could not be copied, this function will pad the copied
771  * data to the requested size using zero bytes.
772  */
773 #define __copy_from_user(to,from,n)			\
774 	(__builtin_constant_p(n) ?			\
775 	 __constant_copy_from_user_nocheck((to),(from),(n)) :	\
776 	 __generic_copy_from_user_nocheck((to),(from),(n)))
777 
778 long strncpy_from_user(char *dst, const char *src, long count);
779 long __strncpy_from_user(char *dst, const char *src, long count);
780 
781 /**
782  * strlen_user: - Get the size of a string in user space.
783  * @str: The string to measure.
784  *
785  * Context: User context only.  This function may sleep.
786  *
787  * Get the size of a NUL-terminated string in user space.
788  *
789  * Returns the size of the string INCLUDING the terminating NUL.
790  * On exception, returns 0.
791  *
792  * If there is a limit on the length of a valid string, you may wish to
793  * consider using strnlen_user() instead.
794  */
795 #define strlen_user(str) strnlen_user(str, ~0UL >> 1)
796 
797 long strnlen_user(const char *str, long n);
798 unsigned long clear_user(void *mem, unsigned long len);
799 unsigned long __clear_user(void *mem, unsigned long len);
800 
801 #endif /* __i386_UACCESS_H */
802