1 #ifndef __ASM_SH64_UACCESS_H
2 #define __ASM_SH64_UACCESS_H
3
4 /*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/uaccess.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 *
13 * User space memory access functions
14 *
15 * Copyright (C) 1999 Niibe Yutaka
16 *
17 * Based on:
18 * MIPS implementation version 1.15 by
19 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle
20 * and i386 version.
21 *
22 */
23
24 #include <linux/errno.h>
25 #include <linux/sched.h>
26
27 #define VERIFY_READ 0
28 #define VERIFY_WRITE 1
29
30 /*
31 * The fs value determines whether argument validity checking should be
32 * performed or not. If get_fs() == USER_DS, checking is performed, with
33 * get_fs() == KERNEL_DS, checking is bypassed.
34 *
35 * For historical reasons (Data Segment Register?), these macros are misnamed.
36 */
37
38 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
39
40 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
41 #define USER_DS MAKE_MM_SEG(0x80000000)
42
43 #define get_ds() (KERNEL_DS)
44 #define get_fs() (current->addr_limit)
45 #define set_fs(x) (current->addr_limit=(x))
46
47 #define segment_eq(a,b) ((a).seg == (b).seg)
48
49 #define __addr_ok(addr) ((unsigned long)(addr) < (current->addr_limit.seg))
50
51 /*
52 * Uhhuh, this needs 33-bit arithmetic. We have a carry..
53 *
54 * sum := addr + size; carry? --> flag = true;
55 * if (sum >= addr_limit) flag = true;
56 */
57 #define __range_ok(addr,size) (((unsigned long) (addr) + (size) < (current->addr_limit.seg)) ? 0 : 1)
58
59 #define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
60 #define __access_ok(addr,size) (__range_ok(addr,size) == 0)
61
verify_area(int type,const void * addr,unsigned long size)62 extern inline int verify_area(int type, const void * addr, unsigned long size)
63 {
64 return access_ok(type,addr,size) ? 0 : -EFAULT;
65 }
66
67 /*
68 * Uh, these should become the main single-value transfer routines ...
69 * They automatically use the right size if we just have the right
70 * pointer type ...
71 *
72 * As MIPS uses the same address space for kernel and user data, we
73 * can just do these as direct assignments.
74 *
75 * Careful to not
76 * (a) re-use the arguments for side effects (sizeof is ok)
77 * (b) require any knowledge of processes at this stage
78 */
79 #define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr)))
80 #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)))
81
82 /*
83 * The "__xxx" versions do not do address space checking, useful when
84 * doing multiple accesses to the same area (the user has to do the
85 * checks by hand with "access_ok()")
86 */
87 #define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr)))
88 #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
89
90 /*
91 * The "xxx_ret" versions return constant specified in third argument, if
92 * something bad happens. These macros can be optimized for the
93 * case of just returning from the function xxx_ret is used.
94 */
95
96 #define put_user_ret(x,ptr,ret) ({ \
97 if (put_user(x,ptr)) return ret; })
98
99 #define get_user_ret(x,ptr,ret) ({ \
100 if (get_user(x,ptr)) return ret; })
101
102 #define __put_user_ret(x,ptr,ret) ({ \
103 if (__put_user(x,ptr)) return ret; })
104
105 #define __get_user_ret(x,ptr,ret) ({ \
106 if (__get_user(x,ptr)) return ret; })
107
108 struct __large_struct { unsigned long buf[100]; };
109 #define __m(x) (*(struct __large_struct *)(x))
110
111 #define __get_user_nocheck(x,ptr,size) ({ \
112 long __gu_err; \
113 __typeof(*(ptr)) __gu_val; \
114 long __gu_addr; \
115 __asm__("":"=r" (__gu_val)); \
116 __asm__("":"=r" (__gu_err)); \
117 __gu_addr = (long) (ptr); \
118 switch (size) { \
119 case 1: __gu_err = __get_user_asm_b((void *) &__gu_val, __gu_addr); break; \
120 case 2: __gu_err = __get_user_asm_w((void *) &__gu_val, __gu_addr); break; \
121 case 4: __gu_err = __get_user_asm_l((void *) &__gu_val, __gu_addr); break; \
122 case 8: __gu_err = __get_user_asm_q((void *) &__gu_val, __gu_addr); break; \
123 default: __get_user_unknown(); break; \
124 } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
125
126 #define __get_user_check(x,ptr,size) ({ \
127 long __gu_err; \
128 __typeof(*(ptr)) __gu_val; \
129 long __gu_addr; \
130 __asm__("":"=r" (__gu_val)); \
131 __asm__("":"=r" (__gu_err)); \
132 __gu_addr = (long) (ptr); \
133 if (__access_ok(__gu_addr,size)) { \
134 switch (size) { \
135 case 1: __gu_err = __get_user_asm_b((void *) &__gu_val, __gu_addr); break; \
136 case 2: __gu_err = __get_user_asm_w((void *) &__gu_val, __gu_addr); break; \
137 case 4: __gu_err = __get_user_asm_l((void *) &__gu_val, __gu_addr); break; \
138 case 8: __gu_err = __get_user_asm_q((void *) &__gu_val, __gu_addr); break; \
139 default: __get_user_unknown(); break; \
140 } } x = (__typeof__(*(ptr))) __gu_val; __gu_err; })
141
142 extern long __get_user_asm_b(void *, long);
143 extern long __get_user_asm_w(void *, long);
144 extern long __get_user_asm_l(void *, long);
145 extern long __get_user_asm_q(void *, long);
146 extern void __get_user_unknown(void);
147
148 #define __put_user_nocheck(x,ptr,size) ({ \
149 long __pu_err; \
150 __typeof__(*(ptr)) __pu_val; \
151 long __pu_addr; \
152 __pu_val = (x); \
153 __pu_addr = (long) (ptr); \
154 __asm__("":"=r" (__pu_err)); \
155 switch (size) { \
156 case 1: __pu_err = __put_user_asm_b((void *) &__pu_val, __pu_addr); break; \
157 case 2: __pu_err = __put_user_asm_w((void *) &__pu_val, __pu_addr); break; \
158 case 4: __pu_err = __put_user_asm_l((void *) &__pu_val, __pu_addr); break; \
159 case 8: __pu_err = __put_user_asm_q((void *) &__pu_val, __pu_addr); break; \
160 default: __put_user_unknown(); break; \
161 } __pu_err; })
162
163 #define __put_user_check(x,ptr,size) ({ \
164 long __pu_err; \
165 __typeof__(*(ptr)) __pu_val; \
166 long __pu_addr; \
167 __pu_val = (x); \
168 __pu_addr = (long) (ptr); \
169 __asm__("":"=r" (__pu_err)); \
170 if (__access_ok(__pu_addr,size)) { \
171 switch (size) { \
172 case 1: __pu_err = __put_user_asm_b((void *) &__pu_val, __pu_addr); break; \
173 case 2: __pu_err = __put_user_asm_w((void *) &__pu_val, __pu_addr); break; \
174 case 4: __pu_err = __put_user_asm_l((void *) &__pu_val, __pu_addr); break; \
175 case 8: __pu_err = __put_user_asm_q((void *) &__pu_val, __pu_addr); break; \
176 default: __put_user_unknown(); break; \
177 } } __pu_err; })
178
179 extern long __put_user_asm_b(void *, long);
180 extern long __put_user_asm_w(void *, long);
181 extern long __put_user_asm_l(void *, long);
182 extern long __put_user_asm_q(void *, long);
183 extern void __put_user_unknown(void);
184
185
186 /* Generic arbitrary sized copy. */
187 /* Return the number of bytes NOT copied */
188 /* XXX: should be such that: 4byte and the rest. */
189 extern __kernel_size_t __copy_user(void *__to, const void *__from, __kernel_size_t __n);
190
191 #define copy_to_user(to,from,n) ({ \
192 void *__copy_to = (void *) (to); \
193 __kernel_size_t __copy_size = (__kernel_size_t) (n); \
194 __kernel_size_t __copy_res; \
195 if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \
196 __copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \
197 } else __copy_res = __copy_size; \
198 __copy_res; })
199
200 #define copy_to_user_ret(to,from,n,retval) ({ \
201 if (copy_to_user(to,from,n)) \
202 return retval; \
203 })
204
205 #define __copy_to_user(to,from,n) \
206 __copy_user((void *)(to), \
207 (void *)(from), n)
208
209 #define __copy_to_user_ret(to,from,n,retval) ({ \
210 if (__copy_to_user(to,from,n)) \
211 return retval; \
212 })
213
214 #define copy_from_user(to,from,n) ({ \
215 void *__copy_to = (void *) (to); \
216 void *__copy_from = (void *) (from); \
217 __kernel_size_t __copy_size = (__kernel_size_t) (n); \
218 __kernel_size_t __copy_res; \
219 if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \
220 __copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \
221 } else __copy_res = __copy_size; \
222 __copy_res; })
223
224 #define copy_from_user_ret(to,from,n,retval) ({ \
225 if (copy_from_user(to,from,n)) \
226 return retval; \
227 })
228
229 #define __copy_from_user(to,from,n) \
230 __copy_user((void *)(to), \
231 (void *)(from), n)
232
233 #define __copy_from_user_ret(to,from,n,retval) ({ \
234 if (__copy_from_user(to,from,n)) \
235 return retval; \
236 })
237
238 /* XXX: Not sure it works well..
239 should be such that: 4byte clear and the rest. */
240 extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
241
242 #define clear_user(addr,n) ({ \
243 void * __cl_addr = (addr); \
244 unsigned long __cl_size = (n); \
245 if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \
246 __cl_size = __clear_user(__cl_addr, __cl_size); \
247 __cl_size; })
248
249 extern int __strncpy_from_user(unsigned long __dest, unsigned long __src, int __count);
250
251 #define strncpy_from_user(dest,src,count) ({ \
252 unsigned long __sfu_src = (unsigned long) (src); \
253 int __sfu_count = (int) (count); \
254 long __sfu_res = -EFAULT; \
255 if(__access_ok(__sfu_src, __sfu_count)) { \
256 __sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \
257 } __sfu_res; })
258
259 #define strlen_user(str) strnlen_user(str, ~0UL >> 1)
260
261 /*
262 * Return the size of a string (including the ending 0!)
263 */
264 extern long __strnlen_user(const char *__s, long __n);
265
strnlen_user(const char * s,long n)266 extern __inline__ long strnlen_user(const char *s, long n)
267 {
268 if (!__addr_ok(s))
269 return 0;
270 else
271 return __strnlen_user(s, n);
272 }
273
274 struct exception_table_entry
275 {
276 unsigned long insn, fixup;
277 };
278
279 /* Returns 0 if exception not found and fixup.unit otherwise. */
280 extern unsigned long search_exception_table(unsigned long addr);
281
282 /* Returns the new pc */
283 #define fixup_exception(map_reg, fixup_unit, pc) \
284 ({ \
285 fixup_unit; \
286 })
287
288 #endif /* __ASM_SH64_UACCESS_H */
289