1 #ifndef _ASM_IA64_UACCESS_H
2 #define _ASM_IA64_UACCESS_H
3
4 /*
5 * This file defines various macros to transfer memory areas across
6 * the user/kernel boundary. This needs to be done carefully because
7 * this code is executed in kernel mode and uses user-specified
8 * addresses. Thus, we need to be careful not to let the user to
9 * trick us into accessing kernel memory that would normally be
10 * inaccessible. This code is also fairly performance sensitive,
11 * so we want to spend as little time doing safety checks as
12 * possible.
13 *
14 * To make matters a bit more interesting, these macros sometimes also
15 * called from within the kernel itself, in which case the address
16 * validity check must be skipped. The get_fs() macro tells us what
17 * to do: if get_fs()==USER_DS, checking is performed, if
18 * get_fs()==KERNEL_DS, checking is bypassed.
19 *
20 * Note that even if the memory area specified by the user is in a
21 * valid address range, it is still possible that we'll get a page
22 * fault while accessing it. This is handled by filling out an
23 * exception handler fixup entry for each instruction that has the
24 * potential to fault. When such a fault occurs, the page fault
25 * handler checks to see whether the faulting instruction has a fixup
26 * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
27 * then resumes execution at the continuation point.
28 *
29 * Based on <asm-alpha/uaccess.h>.
30 *
31 * Copyright (C) 1998, 1999, 2001, 2003 Hewlett-Packard Co
32 * David Mosberger-Tang <davidm@hpl.hp.com>
33 */
34
35 #include <linux/errno.h>
36 #include <linux/sched.h>
37
38 #include <asm/pgtable.h>
39
40 /*
41 * For historical reasons, the following macros are grossly misnamed:
42 */
43 #define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */
44 #define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */
45
46 #define VERIFY_READ 0
47 #define VERIFY_WRITE 1
48
49 #define get_ds() (KERNEL_DS)
50 #define get_fs() (current->addr_limit)
51 #define set_fs(x) (current->addr_limit = (x))
52
53 #define segment_eq(a,b) ((a).seg == (b).seg)
54
55 /*
56 * When accessing user memory, we need to make sure the entire area really is in
57 * user-level space. In order to do this efficiently, we make sure that the page at
58 * address TASK_SIZE is never valid. We also need to make sure that the address doesn't
59 * point inside the virtually mapped linear page table.
60 */
61 #define __access_ok(addr,size,segment) \
62 likely(((unsigned long) (addr)) <= (segment).seg \
63 && ((segment).seg == KERNEL_DS.seg \
64 || REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))
65 #define access_ok(type,addr,size) __access_ok((addr),(size),get_fs())
66
67 static inline int
verify_area(int type,const void * addr,unsigned long size)68 verify_area (int type, const void *addr, unsigned long size)
69 {
70 return access_ok(type,addr,size) ? 0 : -EFAULT;
71 }
72
73 /*
74 * These are the main single-value transfer routines. They automatically
75 * use the right size if we just have the right pointer type.
76 *
77 * Careful to not
78 * (a) re-use the arguments for side effects (sizeof/typeof is ok)
79 * (b) require any knowledge of processes at this stage
80 */
81 #define put_user(x,ptr) __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())
82 #define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())
83
84 /*
85 * The "__xxx" versions do not do address space checking, useful when
86 * doing multiple accesses to the same area (the programmer has to do the
87 * checks by hand with "access_ok()")
88 */
89 #define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))
90 #define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)))
91
92 extern void __get_user_unknown (void);
93
94 #define __get_user_nocheck(x,ptr,size) \
95 ({ \
96 register long __gu_err asm ("r8") = 0; \
97 register long __gu_val asm ("r9") = 0; \
98 switch (size) { \
99 case 1: __get_user_8(ptr); break; \
100 case 2: __get_user_16(ptr); break; \
101 case 4: __get_user_32(ptr); break; \
102 case 8: __get_user_64(ptr); break; \
103 default: __get_user_unknown(); break; \
104 } \
105 (x) = (__typeof__(*(ptr))) __gu_val; \
106 __gu_err; \
107 })
108
109 #define __get_user_check(x,ptr,size,segment) \
110 ({ \
111 register long __gu_err asm ("r8") = -EFAULT; \
112 register long __gu_val asm ("r9") = 0; \
113 const __typeof__(*(ptr)) *__gu_addr = (ptr); \
114 if (__access_ok((long)__gu_addr,size,segment)) { \
115 __gu_err = 0; \
116 switch (size) { \
117 case 1: __get_user_8(__gu_addr); break; \
118 case 2: __get_user_16(__gu_addr); break; \
119 case 4: __get_user_32(__gu_addr); break; \
120 case 8: __get_user_64(__gu_addr); break; \
121 default: __get_user_unknown(); break; \
122 } \
123 } \
124 (x) = (__typeof__(*(ptr))) __gu_val; \
125 __gu_err; \
126 })
127
128 struct __large_struct { unsigned long buf[100]; };
129 #define __m(x) (*(struct __large_struct *)(x))
130
131 /* We need to declare the __ex_table section before we can use it in .xdata. */
132 asm (".section \"__ex_table\", \"a\"\n\t.previous");
133
134 #if __GNUC__ >= 3
135 # define GAS_HAS_LOCAL_TAGS /* define if gas supports local tags a la [1:] */
136 #endif
137
138 #ifdef GAS_HAS_LOCAL_TAGS
139 # define _LL "[1:]"
140 #else
141 # define _LL "1:"
142 #endif
143
144 #define __get_user_64(addr) \
145 asm ("\n"_LL"\tld8 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \
146 "\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)+4\n" \
147 _LL \
148 : "=r"(__gu_val), "=r"(__gu_err) : "m"(__m(addr)), "1"(__gu_err));
149
150 #define __get_user_32(addr) \
151 asm ("\n"_LL"\tld4 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \
152 "\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)+4\n" \
153 _LL \
154 : "=r"(__gu_val), "=r"(__gu_err) : "m"(__m(addr)), "1"(__gu_err));
155
156 #define __get_user_16(addr) \
157 asm ("\n"_LL"\tld2 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \
158 "\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)+4\n" \
159 _LL \
160 : "=r"(__gu_val), "=r"(__gu_err) : "m"(__m(addr)), "1"(__gu_err));
161
162 #define __get_user_8(addr) \
163 asm ("\n"_LL"\tld1 %0=%2%P2\t// %0 and %1 get overwritten by exception handler\n" \
164 "\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)+4\n" \
165 _LL \
166 : "=r"(__gu_val), "=r"(__gu_err) : "m"(__m(addr)), "1"(__gu_err));
167
168 extern void __put_user_unknown (void);
169
170 #define __put_user_nocheck(x,ptr,size) \
171 ({ \
172 register long __pu_err asm ("r8") = 0; \
173 switch (size) { \
174 case 1: __put_user_8(x,ptr); break; \
175 case 2: __put_user_16(x,ptr); break; \
176 case 4: __put_user_32(x,ptr); break; \
177 case 8: __put_user_64(x,ptr); break; \
178 default: __put_user_unknown(); break; \
179 } \
180 __pu_err; \
181 })
182
183 #define __put_user_check(x,ptr,size,segment) \
184 ({ \
185 register long __pu_err asm ("r8") = -EFAULT; \
186 __typeof__(*(ptr)) *__pu_addr = (ptr); \
187 if (__access_ok((long)__pu_addr,size,segment)) { \
188 __pu_err = 0; \
189 switch (size) { \
190 case 1: __put_user_8(x,__pu_addr); break; \
191 case 2: __put_user_16(x,__pu_addr); break; \
192 case 4: __put_user_32(x,__pu_addr); break; \
193 case 8: __put_user_64(x,__pu_addr); break; \
194 default: __put_user_unknown(); break; \
195 } \
196 } \
197 __pu_err; \
198 })
199
200 /*
201 * The "__put_user_xx()" macros tell gcc they read from memory
202 * instead of writing: this is because they do not write to
203 * any memory gcc knows about, so there are no aliasing issues
204 */
205 #define __put_user_64(x,addr) \
206 asm volatile ( \
207 "\n"_LL"\tst8 %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
208 "\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)\n" \
209 _LL \
210 : "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
211
212 #define __put_user_32(x,addr) \
213 asm volatile ( \
214 "\n"_LL"\tst4 %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
215 "\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)\n" \
216 _LL \
217 : "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
218
219 #define __put_user_16(x,addr) \
220 asm volatile ( \
221 "\n"_LL"\tst2 %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
222 "\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)\n" \
223 _LL \
224 : "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
225
226 #define __put_user_8(x,addr) \
227 asm volatile ( \
228 "\n"_LL"\tst1 %1=%r2%P1\t// %0 gets overwritten by exception handler\n" \
229 "\t.xdata4 \"__ex_table\", @gprel(1b), @gprel(1f)\n" \
230 _LL \
231 : "=r"(__pu_err) : "m"(__m(addr)), "rO"(x), "0"(__pu_err))
232
233 /*
234 * Complex access routines
235 */
236 extern unsigned long __copy_user (void *to, const void *from, unsigned long count);
237
238 #define __copy_to_user(to,from,n) __copy_user((to), (from), (n))
239 #define __copy_from_user(to,from,n) __copy_user((to), (from), (n))
240
241 #define copy_to_user(to,from,n) __copy_tofrom_user((to), (from), (n), 1)
242 #define copy_from_user(to,from,n) __copy_tofrom_user((to), (from), (n), 0)
243
244 #define __copy_tofrom_user(to,from,n,check_to) \
245 ({ \
246 void *__cu_to = (to); \
247 const void *__cu_from = (from); \
248 long __cu_len = (n); \
249 \
250 if (__access_ok((long) ((check_to) ? __cu_to : __cu_from), __cu_len, get_fs())) { \
251 __cu_len = __copy_user(__cu_to, __cu_from, __cu_len); \
252 } \
253 __cu_len; \
254 })
255
256 extern unsigned long __do_clear_user (void *, unsigned long);
257
258 #define __clear_user(to,n) \
259 ({ \
260 __do_clear_user(to,n); \
261 })
262
263 #define clear_user(to,n) \
264 ({ \
265 unsigned long __cu_len = (n); \
266 if (__access_ok((long) to, __cu_len, get_fs())) { \
267 __cu_len = __do_clear_user(to, __cu_len); \
268 } \
269 __cu_len; \
270 })
271
272
273 /* Returns: -EFAULT if exception before terminator, N if the entire
274 buffer filled, else strlen. */
275
276 extern long __strncpy_from_user (char *to, const char *from, long to_len);
277
278 #define strncpy_from_user(to,from,n) \
279 ({ \
280 const char * __sfu_from = (from); \
281 long __sfu_ret = -EFAULT; \
282 if (__access_ok((long) __sfu_from, 0, get_fs())) \
283 __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \
284 __sfu_ret; \
285 })
286
287 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
288 extern unsigned long __strlen_user (const char *);
289
290 #define strlen_user(str) \
291 ({ \
292 const char *__su_str = (str); \
293 unsigned long __su_ret = 0; \
294 if (__access_ok((long) __su_str, 0, get_fs())) \
295 __su_ret = __strlen_user(__su_str); \
296 __su_ret; \
297 })
298
299 /*
300 * Returns: 0 if exception before NUL or reaching the supplied limit
301 * (N), a value greater than N if the limit would be exceeded, else
302 * strlen.
303 */
304 extern unsigned long __strnlen_user (const char *, long);
305
306 #define strnlen_user(str, len) \
307 ({ \
308 const char *__su_str = (str); \
309 unsigned long __su_ret = 0; \
310 if (__access_ok((long) __su_str, 0, get_fs())) \
311 __su_ret = __strnlen_user(__su_str, len); \
312 __su_ret; \
313 })
314
315 struct exception_table_entry {
316 int addr; /* gp-relative address of insn this fixup is for */
317 int cont; /* gp-relative continuation address; if bit 2 is set, r9 is set to 0 */
318 };
319
320 struct exception_fixup {
321 unsigned long cont; /* continuation point (bit 2: clear r9 if set) */
322 };
323
324 extern struct exception_fixup search_exception_table (unsigned long addr);
325 extern void handle_exception (struct pt_regs *regs, struct exception_fixup fixup);
326
327 #ifdef GAS_HAS_LOCAL_TAGS
328 #define SEARCH_EXCEPTION_TABLE(regs) search_exception_table(regs->cr_iip + ia64_psr(regs)->ri);
329 #else
330 #define SEARCH_EXCEPTION_TABLE(regs) search_exception_table(regs->cr_iip);
331 #endif
332
333 static inline int
done_with_exception(struct pt_regs * regs)334 done_with_exception (struct pt_regs *regs)
335 {
336 struct exception_fixup fix;
337 fix = SEARCH_EXCEPTION_TABLE(regs);
338 if (fix.cont) {
339 handle_exception(regs, fix);
340 return 1;
341 }
342 return 0;
343 }
344
345 #endif /* _ASM_IA64_UACCESS_H */
346