1 /* $Id: uaccess.h,v 1.1.1.1.2.4 2002/08/28 16:52:43 gniibe Exp $
2 *
3 * User space memory access functions
4 *
5 * Copyright (C) 1999 Niibe Yutaka
6 *
7 * Based on:
8 * MIPS implementation version 1.15 by
9 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
10 * and i386 version.
11 */
12 #ifndef __ASM_SH_UACCESS_H
13 #define __ASM_SH_UACCESS_H
14
15 #include <linux/errno.h>
16 #include <linux/sched.h>
17
18 #define VERIFY_READ 0
19 #define VERIFY_WRITE 1
20
21 /*
22 * The fs value determines whether argument validity checking should be
23 * performed or not. If get_fs() == USER_DS, checking is performed, with
24 * get_fs() == KERNEL_DS, checking is bypassed.
25 *
26 * For historical reasons (Data Segment Register?), these macros are misnamed.
27 */
28
29 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
30
31 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
32 #define USER_DS MAKE_MM_SEG(0x80000000)
33
34 #define get_ds() (KERNEL_DS)
35 #define get_fs() (current->addr_limit)
36 #define set_fs(x) (current->addr_limit=(x))
37
38 #define segment_eq(a,b) ((a).seg == (b).seg)
39
40 #define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
41
42 /*
43 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
44 *
45 * sum := addr + size; carry? --> flag = true;
46 * if (sum >= addr_limit) flag = true;
47 */
48 #define __range_ok(addr,size) ({ \
49 unsigned long flag,sum; \
50 __asm__("clrt; addc %3, %1; movt %0; cmp/hi %4, %1; rotcl %0" \
51 :"=&r" (flag), "=r" (sum) \
52 :"1" (addr), "r" ((int)(size)), "r" (current->addr_limit.seg) \
53 :"t"); \
54 flag; })
55
56 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
57 #define __access_ok(addr,size) (__range_ok(addr,size) == 0)
58
verify_area(int type,const void * addr,unsigned long size)59 static inline int verify_area(int type, const void * addr, unsigned long size)
60 {
61 return access_ok(type,addr,size) ? 0 : -EFAULT;
62 }
63
64 /*
65 * Uh, these should become the main single-value transfer routines ...
66 * They automatically use the right size if we just have the right
67 * pointer type ...
68 *
69 * As SuperH uses the same address space for kernel and user data, we
70 * can just do these as direct assignments.
71 *
72 * Careful to not
73 * (a) re-use the arguments for side effects (sizeof is ok)
74 * (b) require any knowledge of processes at this stage
75 */
76 #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
77 #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
78
79 /*
80 * The "__xxx" versions do not do address space checking, useful when
81 * doing multiple accesses to the same area (the user has to do the
82 * checks by hand with "access_ok()")
83 */
84 #define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
85 #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
86
87 struct __large_struct { unsigned long buf[100]; };
88 #define __m(x) (*(struct __large_struct *)(x))
89
90 #define __get_user_nocheck(x,ptr,size) ({ \
91 long __gu_err; \
92 __typeof(*(ptr)) __gu_val; \
93 long __gu_addr; \
94 __asm__("":"=r" (__gu_val)); \
95 __gu_addr = (long) (ptr); \
96 __asm__("":"=r" (__gu_err)); \
97 switch (size) { \
98 case 1: __get_user_asm("b"); break; \
99 case 2: __get_user_asm("w"); break; \
100 case 4: __get_user_asm("l"); break; \
101 default: __get_user_unknown(); break; \
102 } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
103
104 #define __get_user_check(x,ptr,size) ({ \
105 long __gu_err = -EFAULT; \
106 __typeof__(*(ptr)) __gu_val; \
107 long __gu_addr; \
108 __asm__("":"=r" (__gu_val)); \
109 __gu_addr = (long) (ptr); \
110 if (__access_ok(__gu_addr,size)) { \
111 switch (size) { \
112 case 1: __get_user_asm("b"); break; \
113 case 2: __get_user_asm("w"); break; \
114 case 4: __get_user_asm("l"); break; \
115 default: __get_user_unknown(); break; \
116 } } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
117
118 #define __get_user_asm(insn) \
119 ({ \
120 __asm__ __volatile__( \
121 "1:\n\t" \
122 "mov." insn " %2, %1\n\t" \
123 "mov #0, %0\n" \
124 "2:\n" \
125 ".section .fixup,\"ax\"\n" \
126 "3:\n\t" \
127 "mov #0, %1\n\t" \
128 "mov.l 4f, %0\n\t" \
129 "jmp @%0\n\t" \
130 " mov %3, %0\n" \
131 "4: .long 2b\n\t" \
132 ".previous\n" \
133 ".section __ex_table,\"a\"\n\t" \
134 ".long 1b, 3b\n\t" \
135 ".previous" \
136 :"=&r" (__gu_err), "=&r" (__gu_val) \
137 :"m" (__m(__gu_addr)), "i" (-EFAULT)); })
138
139 extern void __get_user_unknown(void);
140
141 #define __put_user_nocheck(x,ptr,size) ({ \
142 long __pu_err; \
143 __typeof__(*(ptr)) __pu_val; \
144 long __pu_addr; \
145 __pu_val = (x); \
146 __pu_addr = (long) (ptr); \
147 __asm__("":"=r" (__pu_err)); \
148 switch (size) { \
149 case 1: __put_user_asm("b"); break; \
150 case 2: __put_user_asm("w"); break; \
151 case 4: __put_user_asm("l"); break; \
152 case 8: __put_user_u64(__pu_val,__pu_addr,__pu_err); break; \
153 default: __put_user_unknown(); break; \
154 } __pu_err; })
155
156 #define __put_user_check(x,ptr,size) ({ \
157 long __pu_err = -EFAULT; \
158 __typeof__(*(ptr)) __pu_val; \
159 long __pu_addr; \
160 __pu_val = (x); \
161 __pu_addr = (long) (ptr); \
162 if (__access_ok(__pu_addr,size)) { \
163 switch (size) { \
164 case 1: __put_user_asm("b"); break; \
165 case 2: __put_user_asm("w"); break; \
166 case 4: __put_user_asm("l"); break; \
167 case 8: __put_user_u64(__pu_val,__pu_addr,__pu_err); break; \
168 default: __put_user_unknown(); break; \
169 } } __pu_err; })
170
171 #define __put_user_asm(insn) \
172 ({ \
173 __asm__ __volatile__( \
174 "1:\n\t" \
175 "mov." insn " %1, %2\n\t" \
176 "mov #0, %0\n" \
177 "2:\n" \
178 ".section .fixup,\"ax\"\n" \
179 "3:\n\t" \
180 "nop\n\t" \
181 "mov.l 4f, %0\n\t" \
182 "jmp @%0\n\t" \
183 "mov %3, %0\n" \
184 "4: .long 2b\n\t" \
185 ".previous\n" \
186 ".section __ex_table,\"a\"\n\t" \
187 ".long 1b, 3b\n\t" \
188 ".previous" \
189 :"=&r" (__pu_err) \
190 :"r" (__pu_val), "m" (__m(__pu_addr)), "i" (-EFAULT) \
191 :"memory"); })
192
193 #if defined(__LITTLE_ENDIAN__)
194 #define __put_user_u64(val,addr,retval) \
195 ({ \
196 __asm__ __volatile__( \
197 "1:\n\t" \
198 "mov.l %R1,%2\n\t" \
199 "mov.l %S1,%T2\n\t" \
200 "mov #0,%0\n" \
201 "2:\n" \
202 ".section .fixup,\"ax\"\n" \
203 "3:\n\t" \
204 "nop\n\t" \
205 "mov.l 4f,%0\n\t" \
206 "jmp @%0\n\t" \
207 " mov %3,%0\n" \
208 "4: .long 2b\n\t" \
209 ".previous\n" \
210 ".section __ex_table,\"a\"\n\t" \
211 ".long 1b, 3b\n\t" \
212 ".previous" \
213 : "=r" (retval) \
214 : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \
215 : "memory"); })
216 #else
217 #define __put_user_u64(val,addr,retval) \
218 ({ \
219 __asm__ __volatile__( \
220 "1:\n\t" \
221 "mov.l %S1,%2\n\t" \
222 "mov.l %R1,%T2\n\t" \
223 "mov #0,%0\n" \
224 "2:\n" \
225 ".section .fixup,\"ax\"\n" \
226 "3:\n\t" \
227 "nop\n\t" \
228 "mov.l 4f,%0\n\t" \
229 "jmp @%0\n\t" \
230 " mov %3,%0\n" \
231 "4: .long 2b\n\t" \
232 ".previous\n" \
233 ".section __ex_table,\"a\"\n\t" \
234 ".long 1b, 3b\n\t" \
235 ".previous" \
236 : "=r" (retval) \
237 : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \
238 : "memory"); })
239 #endif
240
241 extern void __put_user_unknown(void);
242
243 /* Generic arbitrary sized copy. */
244 /* Return the number of bytes NOT copied */
245 extern __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
246
247 #define copy_to_user(to,from,n) ({ \
248 void *__copy_to = (void *) (to); \
249 __kernel_size_t __copy_size = (__kernel_size_t) (n); \
250 __kernel_size_t __copy_res; \
251 if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
252 __copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
253 } else __copy_res = __copy_size; \
254 __copy_res; })
255
256 #define __copy_to_user(to,from,n) \
257 __copy_user((void *)(to), \
258 (void *)(from), n)
259
260 #define copy_from_user(to,from,n) ({ \
261 void *__copy_to = (void *) (to); \
262 void *__copy_from = (void *) (from); \
263 __kernel_size_t __copy_size = (__kernel_size_t) (n); \
264 __kernel_size_t __copy_res; \
265 if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
266 __copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
267 } else __copy_res = __copy_size; \
268 __copy_res; })
269
270 #define __copy_from_user(to,from,n) \
271 __copy_user((void *)(to), \
272 (void *)(from), n)
273
274 /*
275 * Clear the area and return remaining number of bytes
276 * (on failure. Usually it's 0.)
277 */
278 extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
279
280 #define clear_user(addr,n) ({ \
281 void * __cl_addr = (addr); \
282 unsigned long __cl_size = (n); \
283 if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
284 __cl_size = __clear_user(__cl_addr, __cl_size); \
285 __cl_size; })
286
287 static __inline__ int
__strncpy_from_user(unsigned long __dest,unsigned long __src,int __count)288 __strncpy_from_user(unsigned long __dest, unsigned long __src, int __count)
289 {
290 __kernel_size_t res;
291 unsigned long __dummy, _d, _s;
292
293 __asm__ __volatile__(
294 "9:\n"
295 "mov.b @%2+, %1\n\t"
296 "cmp/eq #0, %1\n\t"
297 "bt/s 2f\n"
298 "1:\n"
299 "mov.b %1, @%3\n\t"
300 "dt %7\n\t"
301 "bf/s 9b\n\t"
302 " add #1, %3\n\t"
303 "2:\n\t"
304 "sub %7, %0\n"
305 "3:\n"
306 ".section .fixup,\"ax\"\n"
307 "4:\n\t"
308 "mov.l 5f, %1\n\t"
309 "jmp @%1\n\t"
310 " mov %8, %0\n\t"
311 ".balign 4\n"
312 "5: .long 3b\n"
313 ".previous\n"
314 ".section __ex_table,\"a\"\n"
315 " .balign 4\n"
316 " .long 9b,4b\n"
317 ".previous"
318 : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d)
319 : "0" (__count), "2" (__src), "3" (__dest), "r" (__count),
320 "i" (-EFAULT)
321 : "memory", "t");
322
323 return res;
324 }
325
326 #define strncpy_from_user(dest,src,count) ({ \
327 unsigned long __sfu_src = (unsigned long) (src); \
328 int __sfu_count = (int) (count); \
329 long __sfu_res = -EFAULT; \
330 if(__access_ok(__sfu_src, __sfu_count)) { \
331 __sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
332 } __sfu_res; })
333
334 /*
335 * Return the size of a string (including the ending 0!)
336 */
__strnlen_user(const char * __s,long __n)337 static __inline__ long __strnlen_user(const char *__s, long __n)
338 {
339 unsigned long res;
340 unsigned long __dummy;
341
342 __asm__ __volatile__(
343 "9:\n"
344 "cmp/eq %4, %0\n\t"
345 "bt 2f\n"
346 "1:\t"
347 "mov.b @(%0,%3), %1\n\t"
348 "tst %1, %1\n\t"
349 "bf/s 9b\n\t"
350 " add #1, %0\n"
351 "2:\n"
352 ".section .fixup,\"ax\"\n"
353 "3:\n\t"
354 "mov.l 4f, %1\n\t"
355 "jmp @%1\n\t"
356 " mov %5, %0\n"
357 ".balign 4\n"
358 "4: .long 2b\n"
359 ".previous\n"
360 ".section __ex_table,\"a\"\n"
361 " .balign 4\n"
362 " .long 1b,3b\n"
363 ".previous"
364 : "=z" (res), "=&r" (__dummy)
365 : "0" (0), "r" (__s), "r" (__n), "i" (-EFAULT)
366 : "t");
367 return res;
368 }
369
strnlen_user(const char * s,long n)370 static __inline__ long strnlen_user(const char *s, long n)
371 {
372 if (!access_ok(VERIFY_READ, s, n))
373 return 0;
374 else
375 return __strnlen_user(s, n);
376 }
377
strlen_user(const char * s)378 static __inline__ long strlen_user(const char *s)
379 {
380 if (!access_ok(VERIFY_READ, s, 0))
381 return 0;
382 else
383 return __strnlen_user(s, ~0UL >> 1);
384 }
385
386 struct exception_table_entry
387 {
388 unsigned long insn, fixup;
389 };
390
391 /* Returns 0 if exception not found and fixup.unit otherwise. */
392 extern unsigned long search_exception_table(unsigned long addr);
393
394 /* Returns the new pc */
395 #define fixup_exception(map_reg, fixup_unit, pc) \
396 ({ \
397 fixup_unit; \
398 })
399
400 #endif /* __ASM_SH_UACCESS_H */
401