1 /*
2  * User address space access functions.
3  * The non inlined parts of asm-i386/uaccess.h are here.
4  *
5  * Copyright 1997 Andi Kleen <ak@muc.de>
6  * Copyright 1997 Linus Torvalds
7  */
8 #include <linux/mm.h>
9 #include <linux/highmem.h>
10 #include <linux/blkdev.h>
11 #include <linux/module.h>
12 #include <linux/backing-dev.h>
13 #include <linux/interrupt.h>
14 #include <asm/uaccess.h>
15 #include <asm/mmx.h>
16 
17 #ifdef CONFIG_X86_INTEL_USERCOPY
18 /*
19  * Alignment at which movsl is preferred for bulk memory copies.
20  */
21 struct movsl_mask movsl_mask __read_mostly;
22 #endif
23 
__movsl_is_ok(unsigned long a1,unsigned long a2,unsigned long n)24 static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
25 {
26 #ifdef CONFIG_X86_INTEL_USERCOPY
27 	if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
28 		return 0;
29 #endif
30 	return 1;
31 }
32 #define movsl_is_ok(a1, a2, n) \
33 	__movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
34 
35 /*
36  * Zero Userspace
37  */
38 
39 #define __do_clear_user(addr,size)					\
40 do {									\
41 	int __d0;							\
42 	might_fault();							\
43 	__asm__ __volatile__(						\
44 		"0:	rep; stosl\n"					\
45 		"	movl %2,%0\n"					\
46 		"1:	rep; stosb\n"					\
47 		"2:\n"							\
48 		".section .fixup,\"ax\"\n"				\
49 		"3:	lea 0(%2,%0,4),%0\n"				\
50 		"	jmp 2b\n"					\
51 		".previous\n"						\
52 		_ASM_EXTABLE(0b,3b)					\
53 		_ASM_EXTABLE(1b,2b)					\
54 		: "=&c"(size), "=&D" (__d0)				\
55 		: "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0));	\
56 } while (0)
57 
58 /**
59  * clear_user: - Zero a block of memory in user space.
60  * @to:   Destination address, in user space.
61  * @n:    Number of bytes to zero.
62  *
63  * Zero a block of memory in user space.
64  *
65  * Returns number of bytes that could not be cleared.
66  * On success, this will be zero.
67  */
68 unsigned long
clear_user(void __user * to,unsigned long n)69 clear_user(void __user *to, unsigned long n)
70 {
71 	might_fault();
72 	if (access_ok(VERIFY_WRITE, to, n))
73 		__do_clear_user(to, n);
74 	return n;
75 }
76 EXPORT_SYMBOL(clear_user);
77 
78 /**
79  * __clear_user: - Zero a block of memory in user space, with less checking.
80  * @to:   Destination address, in user space.
81  * @n:    Number of bytes to zero.
82  *
83  * Zero a block of memory in user space.  Caller must check
84  * the specified block with access_ok() before calling this function.
85  *
86  * Returns number of bytes that could not be cleared.
87  * On success, this will be zero.
88  */
89 unsigned long
__clear_user(void __user * to,unsigned long n)90 __clear_user(void __user *to, unsigned long n)
91 {
92 	__do_clear_user(to, n);
93 	return n;
94 }
95 EXPORT_SYMBOL(__clear_user);
96 
97 /**
98  * strnlen_user: - Get the size of a string in user space.
99  * @s: The string to measure.
100  * @n: The maximum valid length
101  *
102  * Get the size of a NUL-terminated string in user space.
103  *
104  * Returns the size of the string INCLUDING the terminating NUL.
105  * On exception, returns 0.
106  * If the string is too long, returns a value greater than @n.
107  */
strnlen_user(const char __user * s,long n)108 long strnlen_user(const char __user *s, long n)
109 {
110 	unsigned long mask = -__addr_ok(s);
111 	unsigned long res, tmp;
112 
113 	might_fault();
114 
115 	__asm__ __volatile__(
116 		"	testl %0, %0\n"
117 		"	jz 3f\n"
118 		"	andl %0,%%ecx\n"
119 		"0:	repne; scasb\n"
120 		"	setne %%al\n"
121 		"	subl %%ecx,%0\n"
122 		"	addl %0,%%eax\n"
123 		"1:\n"
124 		".section .fixup,\"ax\"\n"
125 		"2:	xorl %%eax,%%eax\n"
126 		"	jmp 1b\n"
127 		"3:	movb $1,%%al\n"
128 		"	jmp 1b\n"
129 		".previous\n"
130 		".section __ex_table,\"a\"\n"
131 		"	.align 4\n"
132 		"	.long 0b,2b\n"
133 		".previous"
134 		:"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
135 		:"0" (n), "1" (s), "2" (0), "3" (mask)
136 		:"cc");
137 	return res & mask;
138 }
139 EXPORT_SYMBOL(strnlen_user);
140 
141 #ifdef CONFIG_X86_INTEL_USERCOPY
142 static unsigned long
__copy_user_intel(void __user * to,const void * from,unsigned long size)143 __copy_user_intel(void __user *to, const void *from, unsigned long size)
144 {
145 	int d0, d1;
146 	__asm__ __volatile__(
147 		       "       .align 2,0x90\n"
148 		       "1:     movl 32(%4), %%eax\n"
149 		       "       cmpl $67, %0\n"
150 		       "       jbe 3f\n"
151 		       "2:     movl 64(%4), %%eax\n"
152 		       "       .align 2,0x90\n"
153 		       "3:     movl 0(%4), %%eax\n"
154 		       "4:     movl 4(%4), %%edx\n"
155 		       "5:     movl %%eax, 0(%3)\n"
156 		       "6:     movl %%edx, 4(%3)\n"
157 		       "7:     movl 8(%4), %%eax\n"
158 		       "8:     movl 12(%4),%%edx\n"
159 		       "9:     movl %%eax, 8(%3)\n"
160 		       "10:    movl %%edx, 12(%3)\n"
161 		       "11:    movl 16(%4), %%eax\n"
162 		       "12:    movl 20(%4), %%edx\n"
163 		       "13:    movl %%eax, 16(%3)\n"
164 		       "14:    movl %%edx, 20(%3)\n"
165 		       "15:    movl 24(%4), %%eax\n"
166 		       "16:    movl 28(%4), %%edx\n"
167 		       "17:    movl %%eax, 24(%3)\n"
168 		       "18:    movl %%edx, 28(%3)\n"
169 		       "19:    movl 32(%4), %%eax\n"
170 		       "20:    movl 36(%4), %%edx\n"
171 		       "21:    movl %%eax, 32(%3)\n"
172 		       "22:    movl %%edx, 36(%3)\n"
173 		       "23:    movl 40(%4), %%eax\n"
174 		       "24:    movl 44(%4), %%edx\n"
175 		       "25:    movl %%eax, 40(%3)\n"
176 		       "26:    movl %%edx, 44(%3)\n"
177 		       "27:    movl 48(%4), %%eax\n"
178 		       "28:    movl 52(%4), %%edx\n"
179 		       "29:    movl %%eax, 48(%3)\n"
180 		       "30:    movl %%edx, 52(%3)\n"
181 		       "31:    movl 56(%4), %%eax\n"
182 		       "32:    movl 60(%4), %%edx\n"
183 		       "33:    movl %%eax, 56(%3)\n"
184 		       "34:    movl %%edx, 60(%3)\n"
185 		       "       addl $-64, %0\n"
186 		       "       addl $64, %4\n"
187 		       "       addl $64, %3\n"
188 		       "       cmpl $63, %0\n"
189 		       "       ja  1b\n"
190 		       "35:    movl  %0, %%eax\n"
191 		       "       shrl  $2, %0\n"
192 		       "       andl  $3, %%eax\n"
193 		       "       cld\n"
194 		       "99:    rep; movsl\n"
195 		       "36:    movl %%eax, %0\n"
196 		       "37:    rep; movsb\n"
197 		       "100:\n"
198 		       ".section .fixup,\"ax\"\n"
199 		       "101:   lea 0(%%eax,%0,4),%0\n"
200 		       "       jmp 100b\n"
201 		       ".previous\n"
202 		       ".section __ex_table,\"a\"\n"
203 		       "       .align 4\n"
204 		       "       .long 1b,100b\n"
205 		       "       .long 2b,100b\n"
206 		       "       .long 3b,100b\n"
207 		       "       .long 4b,100b\n"
208 		       "       .long 5b,100b\n"
209 		       "       .long 6b,100b\n"
210 		       "       .long 7b,100b\n"
211 		       "       .long 8b,100b\n"
212 		       "       .long 9b,100b\n"
213 		       "       .long 10b,100b\n"
214 		       "       .long 11b,100b\n"
215 		       "       .long 12b,100b\n"
216 		       "       .long 13b,100b\n"
217 		       "       .long 14b,100b\n"
218 		       "       .long 15b,100b\n"
219 		       "       .long 16b,100b\n"
220 		       "       .long 17b,100b\n"
221 		       "       .long 18b,100b\n"
222 		       "       .long 19b,100b\n"
223 		       "       .long 20b,100b\n"
224 		       "       .long 21b,100b\n"
225 		       "       .long 22b,100b\n"
226 		       "       .long 23b,100b\n"
227 		       "       .long 24b,100b\n"
228 		       "       .long 25b,100b\n"
229 		       "       .long 26b,100b\n"
230 		       "       .long 27b,100b\n"
231 		       "       .long 28b,100b\n"
232 		       "       .long 29b,100b\n"
233 		       "       .long 30b,100b\n"
234 		       "       .long 31b,100b\n"
235 		       "       .long 32b,100b\n"
236 		       "       .long 33b,100b\n"
237 		       "       .long 34b,100b\n"
238 		       "       .long 35b,100b\n"
239 		       "       .long 36b,100b\n"
240 		       "       .long 37b,100b\n"
241 		       "       .long 99b,101b\n"
242 		       ".previous"
243 		       : "=&c"(size), "=&D" (d0), "=&S" (d1)
244 		       :  "1"(to), "2"(from), "0"(size)
245 		       : "eax", "edx", "memory");
246 	return size;
247 }
248 
249 static unsigned long
__copy_user_zeroing_intel(void * to,const void __user * from,unsigned long size)250 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
251 {
252 	int d0, d1;
253 	__asm__ __volatile__(
254 		       "        .align 2,0x90\n"
255 		       "0:      movl 32(%4), %%eax\n"
256 		       "        cmpl $67, %0\n"
257 		       "        jbe 2f\n"
258 		       "1:      movl 64(%4), %%eax\n"
259 		       "        .align 2,0x90\n"
260 		       "2:      movl 0(%4), %%eax\n"
261 		       "21:     movl 4(%4), %%edx\n"
262 		       "        movl %%eax, 0(%3)\n"
263 		       "        movl %%edx, 4(%3)\n"
264 		       "3:      movl 8(%4), %%eax\n"
265 		       "31:     movl 12(%4),%%edx\n"
266 		       "        movl %%eax, 8(%3)\n"
267 		       "        movl %%edx, 12(%3)\n"
268 		       "4:      movl 16(%4), %%eax\n"
269 		       "41:     movl 20(%4), %%edx\n"
270 		       "        movl %%eax, 16(%3)\n"
271 		       "        movl %%edx, 20(%3)\n"
272 		       "10:     movl 24(%4), %%eax\n"
273 		       "51:     movl 28(%4), %%edx\n"
274 		       "        movl %%eax, 24(%3)\n"
275 		       "        movl %%edx, 28(%3)\n"
276 		       "11:     movl 32(%4), %%eax\n"
277 		       "61:     movl 36(%4), %%edx\n"
278 		       "        movl %%eax, 32(%3)\n"
279 		       "        movl %%edx, 36(%3)\n"
280 		       "12:     movl 40(%4), %%eax\n"
281 		       "71:     movl 44(%4), %%edx\n"
282 		       "        movl %%eax, 40(%3)\n"
283 		       "        movl %%edx, 44(%3)\n"
284 		       "13:     movl 48(%4), %%eax\n"
285 		       "81:     movl 52(%4), %%edx\n"
286 		       "        movl %%eax, 48(%3)\n"
287 		       "        movl %%edx, 52(%3)\n"
288 		       "14:     movl 56(%4), %%eax\n"
289 		       "91:     movl 60(%4), %%edx\n"
290 		       "        movl %%eax, 56(%3)\n"
291 		       "        movl %%edx, 60(%3)\n"
292 		       "        addl $-64, %0\n"
293 		       "        addl $64, %4\n"
294 		       "        addl $64, %3\n"
295 		       "        cmpl $63, %0\n"
296 		       "        ja  0b\n"
297 		       "5:      movl  %0, %%eax\n"
298 		       "        shrl  $2, %0\n"
299 		       "        andl $3, %%eax\n"
300 		       "        cld\n"
301 		       "6:      rep; movsl\n"
302 		       "        movl %%eax,%0\n"
303 		       "7:      rep; movsb\n"
304 		       "8:\n"
305 		       ".section .fixup,\"ax\"\n"
306 		       "9:      lea 0(%%eax,%0,4),%0\n"
307 		       "16:     pushl %0\n"
308 		       "        pushl %%eax\n"
309 		       "        xorl %%eax,%%eax\n"
310 		       "        rep; stosb\n"
311 		       "        popl %%eax\n"
312 		       "        popl %0\n"
313 		       "        jmp 8b\n"
314 		       ".previous\n"
315 		       ".section __ex_table,\"a\"\n"
316 		       "	.align 4\n"
317 		       "	.long 0b,16b\n"
318 		       "	.long 1b,16b\n"
319 		       "	.long 2b,16b\n"
320 		       "	.long 21b,16b\n"
321 		       "	.long 3b,16b\n"
322 		       "	.long 31b,16b\n"
323 		       "	.long 4b,16b\n"
324 		       "	.long 41b,16b\n"
325 		       "	.long 10b,16b\n"
326 		       "	.long 51b,16b\n"
327 		       "	.long 11b,16b\n"
328 		       "	.long 61b,16b\n"
329 		       "	.long 12b,16b\n"
330 		       "	.long 71b,16b\n"
331 		       "	.long 13b,16b\n"
332 		       "	.long 81b,16b\n"
333 		       "	.long 14b,16b\n"
334 		       "	.long 91b,16b\n"
335 		       "	.long 6b,9b\n"
336 		       "        .long 7b,16b\n"
337 		       ".previous"
338 		       : "=&c"(size), "=&D" (d0), "=&S" (d1)
339 		       :  "1"(to), "2"(from), "0"(size)
340 		       : "eax", "edx", "memory");
341 	return size;
342 }
343 
344 /*
345  * Non Temporal Hint version of __copy_user_zeroing_intel.  It is cache aware.
346  * hyoshiok@miraclelinux.com
347  */
348 
__copy_user_zeroing_intel_nocache(void * to,const void __user * from,unsigned long size)349 static unsigned long __copy_user_zeroing_intel_nocache(void *to,
350 				const void __user *from, unsigned long size)
351 {
352 	int d0, d1;
353 
354 	__asm__ __volatile__(
355 	       "        .align 2,0x90\n"
356 	       "0:      movl 32(%4), %%eax\n"
357 	       "        cmpl $67, %0\n"
358 	       "        jbe 2f\n"
359 	       "1:      movl 64(%4), %%eax\n"
360 	       "        .align 2,0x90\n"
361 	       "2:      movl 0(%4), %%eax\n"
362 	       "21:     movl 4(%4), %%edx\n"
363 	       "        movnti %%eax, 0(%3)\n"
364 	       "        movnti %%edx, 4(%3)\n"
365 	       "3:      movl 8(%4), %%eax\n"
366 	       "31:     movl 12(%4),%%edx\n"
367 	       "        movnti %%eax, 8(%3)\n"
368 	       "        movnti %%edx, 12(%3)\n"
369 	       "4:      movl 16(%4), %%eax\n"
370 	       "41:     movl 20(%4), %%edx\n"
371 	       "        movnti %%eax, 16(%3)\n"
372 	       "        movnti %%edx, 20(%3)\n"
373 	       "10:     movl 24(%4), %%eax\n"
374 	       "51:     movl 28(%4), %%edx\n"
375 	       "        movnti %%eax, 24(%3)\n"
376 	       "        movnti %%edx, 28(%3)\n"
377 	       "11:     movl 32(%4), %%eax\n"
378 	       "61:     movl 36(%4), %%edx\n"
379 	       "        movnti %%eax, 32(%3)\n"
380 	       "        movnti %%edx, 36(%3)\n"
381 	       "12:     movl 40(%4), %%eax\n"
382 	       "71:     movl 44(%4), %%edx\n"
383 	       "        movnti %%eax, 40(%3)\n"
384 	       "        movnti %%edx, 44(%3)\n"
385 	       "13:     movl 48(%4), %%eax\n"
386 	       "81:     movl 52(%4), %%edx\n"
387 	       "        movnti %%eax, 48(%3)\n"
388 	       "        movnti %%edx, 52(%3)\n"
389 	       "14:     movl 56(%4), %%eax\n"
390 	       "91:     movl 60(%4), %%edx\n"
391 	       "        movnti %%eax, 56(%3)\n"
392 	       "        movnti %%edx, 60(%3)\n"
393 	       "        addl $-64, %0\n"
394 	       "        addl $64, %4\n"
395 	       "        addl $64, %3\n"
396 	       "        cmpl $63, %0\n"
397 	       "        ja  0b\n"
398 	       "        sfence \n"
399 	       "5:      movl  %0, %%eax\n"
400 	       "        shrl  $2, %0\n"
401 	       "        andl $3, %%eax\n"
402 	       "        cld\n"
403 	       "6:      rep; movsl\n"
404 	       "        movl %%eax,%0\n"
405 	       "7:      rep; movsb\n"
406 	       "8:\n"
407 	       ".section .fixup,\"ax\"\n"
408 	       "9:      lea 0(%%eax,%0,4),%0\n"
409 	       "16:     pushl %0\n"
410 	       "        pushl %%eax\n"
411 	       "        xorl %%eax,%%eax\n"
412 	       "        rep; stosb\n"
413 	       "        popl %%eax\n"
414 	       "        popl %0\n"
415 	       "        jmp 8b\n"
416 	       ".previous\n"
417 	       ".section __ex_table,\"a\"\n"
418 	       "	.align 4\n"
419 	       "	.long 0b,16b\n"
420 	       "	.long 1b,16b\n"
421 	       "	.long 2b,16b\n"
422 	       "	.long 21b,16b\n"
423 	       "	.long 3b,16b\n"
424 	       "	.long 31b,16b\n"
425 	       "	.long 4b,16b\n"
426 	       "	.long 41b,16b\n"
427 	       "	.long 10b,16b\n"
428 	       "	.long 51b,16b\n"
429 	       "	.long 11b,16b\n"
430 	       "	.long 61b,16b\n"
431 	       "	.long 12b,16b\n"
432 	       "	.long 71b,16b\n"
433 	       "	.long 13b,16b\n"
434 	       "	.long 81b,16b\n"
435 	       "	.long 14b,16b\n"
436 	       "	.long 91b,16b\n"
437 	       "	.long 6b,9b\n"
438 	       "        .long 7b,16b\n"
439 	       ".previous"
440 	       : "=&c"(size), "=&D" (d0), "=&S" (d1)
441 	       :  "1"(to), "2"(from), "0"(size)
442 	       : "eax", "edx", "memory");
443 	return size;
444 }
445 
__copy_user_intel_nocache(void * to,const void __user * from,unsigned long size)446 static unsigned long __copy_user_intel_nocache(void *to,
447 				const void __user *from, unsigned long size)
448 {
449 	int d0, d1;
450 
451 	__asm__ __volatile__(
452 	       "        .align 2,0x90\n"
453 	       "0:      movl 32(%4), %%eax\n"
454 	       "        cmpl $67, %0\n"
455 	       "        jbe 2f\n"
456 	       "1:      movl 64(%4), %%eax\n"
457 	       "        .align 2,0x90\n"
458 	       "2:      movl 0(%4), %%eax\n"
459 	       "21:     movl 4(%4), %%edx\n"
460 	       "        movnti %%eax, 0(%3)\n"
461 	       "        movnti %%edx, 4(%3)\n"
462 	       "3:      movl 8(%4), %%eax\n"
463 	       "31:     movl 12(%4),%%edx\n"
464 	       "        movnti %%eax, 8(%3)\n"
465 	       "        movnti %%edx, 12(%3)\n"
466 	       "4:      movl 16(%4), %%eax\n"
467 	       "41:     movl 20(%4), %%edx\n"
468 	       "        movnti %%eax, 16(%3)\n"
469 	       "        movnti %%edx, 20(%3)\n"
470 	       "10:     movl 24(%4), %%eax\n"
471 	       "51:     movl 28(%4), %%edx\n"
472 	       "        movnti %%eax, 24(%3)\n"
473 	       "        movnti %%edx, 28(%3)\n"
474 	       "11:     movl 32(%4), %%eax\n"
475 	       "61:     movl 36(%4), %%edx\n"
476 	       "        movnti %%eax, 32(%3)\n"
477 	       "        movnti %%edx, 36(%3)\n"
478 	       "12:     movl 40(%4), %%eax\n"
479 	       "71:     movl 44(%4), %%edx\n"
480 	       "        movnti %%eax, 40(%3)\n"
481 	       "        movnti %%edx, 44(%3)\n"
482 	       "13:     movl 48(%4), %%eax\n"
483 	       "81:     movl 52(%4), %%edx\n"
484 	       "        movnti %%eax, 48(%3)\n"
485 	       "        movnti %%edx, 52(%3)\n"
486 	       "14:     movl 56(%4), %%eax\n"
487 	       "91:     movl 60(%4), %%edx\n"
488 	       "        movnti %%eax, 56(%3)\n"
489 	       "        movnti %%edx, 60(%3)\n"
490 	       "        addl $-64, %0\n"
491 	       "        addl $64, %4\n"
492 	       "        addl $64, %3\n"
493 	       "        cmpl $63, %0\n"
494 	       "        ja  0b\n"
495 	       "        sfence \n"
496 	       "5:      movl  %0, %%eax\n"
497 	       "        shrl  $2, %0\n"
498 	       "        andl $3, %%eax\n"
499 	       "        cld\n"
500 	       "6:      rep; movsl\n"
501 	       "        movl %%eax,%0\n"
502 	       "7:      rep; movsb\n"
503 	       "8:\n"
504 	       ".section .fixup,\"ax\"\n"
505 	       "9:      lea 0(%%eax,%0,4),%0\n"
506 	       "16:     jmp 8b\n"
507 	       ".previous\n"
508 	       ".section __ex_table,\"a\"\n"
509 	       "	.align 4\n"
510 	       "	.long 0b,16b\n"
511 	       "	.long 1b,16b\n"
512 	       "	.long 2b,16b\n"
513 	       "	.long 21b,16b\n"
514 	       "	.long 3b,16b\n"
515 	       "	.long 31b,16b\n"
516 	       "	.long 4b,16b\n"
517 	       "	.long 41b,16b\n"
518 	       "	.long 10b,16b\n"
519 	       "	.long 51b,16b\n"
520 	       "	.long 11b,16b\n"
521 	       "	.long 61b,16b\n"
522 	       "	.long 12b,16b\n"
523 	       "	.long 71b,16b\n"
524 	       "	.long 13b,16b\n"
525 	       "	.long 81b,16b\n"
526 	       "	.long 14b,16b\n"
527 	       "	.long 91b,16b\n"
528 	       "	.long 6b,9b\n"
529 	       "        .long 7b,16b\n"
530 	       ".previous"
531 	       : "=&c"(size), "=&D" (d0), "=&S" (d1)
532 	       :  "1"(to), "2"(from), "0"(size)
533 	       : "eax", "edx", "memory");
534 	return size;
535 }
536 
537 #else
538 
539 /*
540  * Leave these declared but undefined.  They should not be any references to
541  * them
542  */
543 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
544 					unsigned long size);
545 unsigned long __copy_user_intel(void __user *to, const void *from,
546 					unsigned long size);
547 unsigned long __copy_user_zeroing_intel_nocache(void *to,
548 				const void __user *from, unsigned long size);
549 #endif /* CONFIG_X86_INTEL_USERCOPY */
550 
551 /* Generic arbitrary sized copy.  */
552 #define __copy_user(to, from, size)					\
553 do {									\
554 	int __d0, __d1, __d2;						\
555 	__asm__ __volatile__(						\
556 		"	cmp  $7,%0\n"					\
557 		"	jbe  1f\n"					\
558 		"	movl %1,%0\n"					\
559 		"	negl %0\n"					\
560 		"	andl $7,%0\n"					\
561 		"	subl %0,%3\n"					\
562 		"4:	rep; movsb\n"					\
563 		"	movl %3,%0\n"					\
564 		"	shrl $2,%0\n"					\
565 		"	andl $3,%3\n"					\
566 		"	.align 2,0x90\n"				\
567 		"0:	rep; movsl\n"					\
568 		"	movl %3,%0\n"					\
569 		"1:	rep; movsb\n"					\
570 		"2:\n"							\
571 		".section .fixup,\"ax\"\n"				\
572 		"5:	addl %3,%0\n"					\
573 		"	jmp 2b\n"					\
574 		"3:	lea 0(%3,%0,4),%0\n"				\
575 		"	jmp 2b\n"					\
576 		".previous\n"						\
577 		".section __ex_table,\"a\"\n"				\
578 		"	.align 4\n"					\
579 		"	.long 4b,5b\n"					\
580 		"	.long 0b,3b\n"					\
581 		"	.long 1b,2b\n"					\
582 		".previous"						\
583 		: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)	\
584 		: "3"(size), "0"(size), "1"(to), "2"(from)		\
585 		: "memory");						\
586 } while (0)
587 
588 #define __copy_user_zeroing(to, from, size)				\
589 do {									\
590 	int __d0, __d1, __d2;						\
591 	__asm__ __volatile__(						\
592 		"	cmp  $7,%0\n"					\
593 		"	jbe  1f\n"					\
594 		"	movl %1,%0\n"					\
595 		"	negl %0\n"					\
596 		"	andl $7,%0\n"					\
597 		"	subl %0,%3\n"					\
598 		"4:	rep; movsb\n"					\
599 		"	movl %3,%0\n"					\
600 		"	shrl $2,%0\n"					\
601 		"	andl $3,%3\n"					\
602 		"	.align 2,0x90\n"				\
603 		"0:	rep; movsl\n"					\
604 		"	movl %3,%0\n"					\
605 		"1:	rep; movsb\n"					\
606 		"2:\n"							\
607 		".section .fixup,\"ax\"\n"				\
608 		"5:	addl %3,%0\n"					\
609 		"	jmp 6f\n"					\
610 		"3:	lea 0(%3,%0,4),%0\n"				\
611 		"6:	pushl %0\n"					\
612 		"	pushl %%eax\n"					\
613 		"	xorl %%eax,%%eax\n"				\
614 		"	rep; stosb\n"					\
615 		"	popl %%eax\n"					\
616 		"	popl %0\n"					\
617 		"	jmp 2b\n"					\
618 		".previous\n"						\
619 		".section __ex_table,\"a\"\n"				\
620 		"	.align 4\n"					\
621 		"	.long 4b,5b\n"					\
622 		"	.long 0b,3b\n"					\
623 		"	.long 1b,6b\n"					\
624 		".previous"						\
625 		: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)	\
626 		: "3"(size), "0"(size), "1"(to), "2"(from)		\
627 		: "memory");						\
628 } while (0)
629 
__copy_to_user_ll(void __user * to,const void * from,unsigned long n)630 unsigned long __copy_to_user_ll(void __user *to, const void *from,
631 				unsigned long n)
632 {
633 #ifndef CONFIG_X86_WP_WORKS_OK
634 	if (unlikely(boot_cpu_data.wp_works_ok == 0) &&
635 			((unsigned long)to) < TASK_SIZE) {
636 		/*
637 		 * When we are in an atomic section (see
638 		 * mm/filemap.c:file_read_actor), return the full
639 		 * length to take the slow path.
640 		 */
641 		if (in_atomic())
642 			return n;
643 
644 		/*
645 		 * CPU does not honor the WP bit when writing
646 		 * from supervisory mode, and due to preemption or SMP,
647 		 * the page tables can change at any time.
648 		 * Do it manually.	Manfred <manfred@colorfullife.com>
649 		 */
650 		while (n) {
651 			unsigned long offset = ((unsigned long)to)%PAGE_SIZE;
652 			unsigned long len = PAGE_SIZE - offset;
653 			int retval;
654 			struct page *pg;
655 			void *maddr;
656 
657 			if (len > n)
658 				len = n;
659 
660 survive:
661 			down_read(&current->mm->mmap_sem);
662 			retval = get_user_pages(current, current->mm,
663 					(unsigned long)to, 1, 1, 0, &pg, NULL);
664 
665 			if (retval == -ENOMEM && is_global_init(current)) {
666 				up_read(&current->mm->mmap_sem);
667 				congestion_wait(BLK_RW_ASYNC, HZ/50);
668 				goto survive;
669 			}
670 
671 			if (retval != 1) {
672 				up_read(&current->mm->mmap_sem);
673 				break;
674 			}
675 
676 			maddr = kmap_atomic(pg);
677 			memcpy(maddr + offset, from, len);
678 			kunmap_atomic(maddr);
679 			set_page_dirty_lock(pg);
680 			put_page(pg);
681 			up_read(&current->mm->mmap_sem);
682 
683 			from += len;
684 			to += len;
685 			n -= len;
686 		}
687 		return n;
688 	}
689 #endif
690 	if (movsl_is_ok(to, from, n))
691 		__copy_user(to, from, n);
692 	else
693 		n = __copy_user_intel(to, from, n);
694 	return n;
695 }
696 EXPORT_SYMBOL(__copy_to_user_ll);
697 
__copy_from_user_ll(void * to,const void __user * from,unsigned long n)698 unsigned long __copy_from_user_ll(void *to, const void __user *from,
699 					unsigned long n)
700 {
701 	if (movsl_is_ok(to, from, n))
702 		__copy_user_zeroing(to, from, n);
703 	else
704 		n = __copy_user_zeroing_intel(to, from, n);
705 	return n;
706 }
707 EXPORT_SYMBOL(__copy_from_user_ll);
708 
__copy_from_user_ll_nozero(void * to,const void __user * from,unsigned long n)709 unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
710 					 unsigned long n)
711 {
712 	if (movsl_is_ok(to, from, n))
713 		__copy_user(to, from, n);
714 	else
715 		n = __copy_user_intel((void __user *)to,
716 				      (const void *)from, n);
717 	return n;
718 }
719 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
720 
__copy_from_user_ll_nocache(void * to,const void __user * from,unsigned long n)721 unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
722 					unsigned long n)
723 {
724 #ifdef CONFIG_X86_INTEL_USERCOPY
725 	if (n > 64 && cpu_has_xmm2)
726 		n = __copy_user_zeroing_intel_nocache(to, from, n);
727 	else
728 		__copy_user_zeroing(to, from, n);
729 #else
730 	__copy_user_zeroing(to, from, n);
731 #endif
732 	return n;
733 }
734 EXPORT_SYMBOL(__copy_from_user_ll_nocache);
735 
__copy_from_user_ll_nocache_nozero(void * to,const void __user * from,unsigned long n)736 unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
737 					unsigned long n)
738 {
739 #ifdef CONFIG_X86_INTEL_USERCOPY
740 	if (n > 64 && cpu_has_xmm2)
741 		n = __copy_user_intel_nocache(to, from, n);
742 	else
743 		__copy_user(to, from, n);
744 #else
745 	__copy_user(to, from, n);
746 #endif
747 	return n;
748 }
749 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
750 
751 /**
752  * copy_to_user: - Copy a block of data into user space.
753  * @to:   Destination address, in user space.
754  * @from: Source address, in kernel space.
755  * @n:    Number of bytes to copy.
756  *
757  * Context: User context only.  This function may sleep.
758  *
759  * Copy data from kernel space to user space.
760  *
761  * Returns number of bytes that could not be copied.
762  * On success, this will be zero.
763  */
764 unsigned long
copy_to_user(void __user * to,const void * from,unsigned long n)765 copy_to_user(void __user *to, const void *from, unsigned long n)
766 {
767 	if (access_ok(VERIFY_WRITE, to, n))
768 		n = __copy_to_user(to, from, n);
769 	return n;
770 }
771 EXPORT_SYMBOL(copy_to_user);
772 
773 /**
774  * copy_from_user: - Copy a block of data from user space.
775  * @to:   Destination address, in kernel space.
776  * @from: Source address, in user space.
777  * @n:    Number of bytes to copy.
778  *
779  * Context: User context only.  This function may sleep.
780  *
781  * Copy data from user space to kernel space.
782  *
783  * Returns number of bytes that could not be copied.
784  * On success, this will be zero.
785  *
786  * If some data could not be copied, this function will pad the copied
787  * data to the requested size using zero bytes.
788  */
789 unsigned long
_copy_from_user(void * to,const void __user * from,unsigned long n)790 _copy_from_user(void *to, const void __user *from, unsigned long n)
791 {
792 	if (access_ok(VERIFY_READ, from, n))
793 		n = __copy_from_user(to, from, n);
794 	else
795 		memset(to, 0, n);
796 	return n;
797 }
798 EXPORT_SYMBOL(_copy_from_user);
799 
copy_from_user_overflow(void)800 void copy_from_user_overflow(void)
801 {
802 	WARN(1, "Buffer overflow detected!\n");
803 }
804 EXPORT_SYMBOL(copy_from_user_overflow);
805