1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_UACCESS_H__
3 #define __LINUX_UACCESS_H__
4
5 #include <linux/fault-inject-usercopy.h>
6 #include <linux/instrumented.h>
7 #include <linux/minmax.h>
8 #include <linux/sched.h>
9 #include <linux/thread_info.h>
10
11 #include <asm/uaccess.h>
12
13 /*
14 * Architectures should provide two primitives (raw_copy_{to,from}_user())
15 * and get rid of their private instances of copy_{to,from}_user() and
16 * __copy_{to,from}_user{,_inatomic}().
17 *
18 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
19 * return the amount left to copy. They should assume that access_ok() has
20 * already been checked (and succeeded); they should *not* zero-pad anything.
21 * No KASAN or object size checks either - those belong here.
22 *
23 * Both of these functions should attempt to copy size bytes starting at from
24 * into the area starting at to. They must not fetch or store anything
25 * outside of those areas. Return value must be between 0 (everything
26 * copied successfully) and size (nothing copied).
27 *
28 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
29 * at to must become equal to the bytes fetched from the corresponding area
30 * starting at from. All data past to + size - N must be left unmodified.
31 *
32 * If copying succeeds, the return value must be 0. If some data cannot be
33 * fetched, it is permitted to copy less than had been fetched; the only
34 * hard requirement is that not storing anything at all (i.e. returning size)
35 * should happen only when nothing could be copied. In other words, you don't
36 * have to squeeze as much as possible - it is allowed, but not necessary.
37 *
38 * For raw_copy_from_user() to always points to kernel memory and no faults
39 * on store should happen. Interpretation of from is affected by set_fs().
40 * For raw_copy_to_user() it's the other way round.
41 *
42 * Both can be inlined - it's up to architectures whether it wants to bother
43 * with that. They should not be used directly; they are used to implement
44 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
45 * that are used instead. Out of those, __... ones are inlined. Plain
46 * copy_{to,from}_user() might or might not be inlined. If you want them
47 * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
48 *
49 * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
50 * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
51 * at all; their callers absolutely must check the return value.
52 *
53 * Biarch ones should also provide raw_copy_in_user() - similar to the above,
54 * but both source and destination are __user pointers (affected by set_fs()
55 * as usual) and both source and destination can trigger faults.
56 */
57
58 static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void * to,const void __user * from,unsigned long n)59 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
60 {
61 instrument_copy_from_user(to, from, n);
62 check_object_size(to, n, false);
63 return raw_copy_from_user(to, from, n);
64 }
65
66 static __always_inline __must_check unsigned long
__copy_from_user(void * to,const void __user * from,unsigned long n)67 __copy_from_user(void *to, const void __user *from, unsigned long n)
68 {
69 might_fault();
70 if (should_fail_usercopy())
71 return n;
72 instrument_copy_from_user(to, from, n);
73 check_object_size(to, n, false);
74 return raw_copy_from_user(to, from, n);
75 }
76
77 /**
78 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
79 * @to: Destination address, in user space.
80 * @from: Source address, in kernel space.
81 * @n: Number of bytes to copy.
82 *
83 * Context: User context only.
84 *
85 * Copy data from kernel space to user space. Caller must check
86 * the specified block with access_ok() before calling this function.
87 * The caller should also make sure he pins the user space address
88 * so that we don't result in page fault and sleep.
89 */
90 static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user * to,const void * from,unsigned long n)91 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
92 {
93 if (should_fail_usercopy())
94 return n;
95 instrument_copy_to_user(to, from, n);
96 check_object_size(from, n, true);
97 return raw_copy_to_user(to, from, n);
98 }
99
100 static __always_inline __must_check unsigned long
__copy_to_user(void __user * to,const void * from,unsigned long n)101 __copy_to_user(void __user *to, const void *from, unsigned long n)
102 {
103 might_fault();
104 if (should_fail_usercopy())
105 return n;
106 instrument_copy_to_user(to, from, n);
107 check_object_size(from, n, true);
108 return raw_copy_to_user(to, from, n);
109 }
110
111 #ifdef INLINE_COPY_FROM_USER
112 static inline __must_check unsigned long
_copy_from_user(void * to,const void __user * from,unsigned long n)113 _copy_from_user(void *to, const void __user *from, unsigned long n)
114 {
115 unsigned long res = n;
116 might_fault();
117 if (!should_fail_usercopy() && likely(access_ok(from, n))) {
118 instrument_copy_from_user(to, from, n);
119 res = raw_copy_from_user(to, from, n);
120 }
121 if (unlikely(res))
122 memset(to + (n - res), 0, res);
123 return res;
124 }
125 #else
126 extern __must_check unsigned long
127 _copy_from_user(void *, const void __user *, unsigned long);
128 #endif
129
130 #ifdef INLINE_COPY_TO_USER
131 static inline __must_check unsigned long
_copy_to_user(void __user * to,const void * from,unsigned long n)132 _copy_to_user(void __user *to, const void *from, unsigned long n)
133 {
134 might_fault();
135 if (should_fail_usercopy())
136 return n;
137 if (access_ok(to, n)) {
138 instrument_copy_to_user(to, from, n);
139 n = raw_copy_to_user(to, from, n);
140 }
141 return n;
142 }
143 #else
144 extern __must_check unsigned long
145 _copy_to_user(void __user *, const void *, unsigned long);
146 #endif
147
148 static __always_inline unsigned long __must_check
copy_from_user(void * to,const void __user * from,unsigned long n)149 copy_from_user(void *to, const void __user *from, unsigned long n)
150 {
151 if (likely(check_copy_size(to, n, false)))
152 n = _copy_from_user(to, from, n);
153 return n;
154 }
155
156 static __always_inline unsigned long __must_check
copy_to_user(void __user * to,const void * from,unsigned long n)157 copy_to_user(void __user *to, const void *from, unsigned long n)
158 {
159 if (likely(check_copy_size(from, n, true)))
160 n = _copy_to_user(to, from, n);
161 return n;
162 }
163
164 #ifndef copy_mc_to_kernel
165 /*
166 * Without arch opt-in this generic copy_mc_to_kernel() will not handle
167 * #MC (or arch equivalent) during source read.
168 */
169 static inline unsigned long __must_check
copy_mc_to_kernel(void * dst,const void * src,size_t cnt)170 copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
171 {
172 memcpy(dst, src, cnt);
173 return 0;
174 }
175 #endif
176
pagefault_disabled_inc(void)177 static __always_inline void pagefault_disabled_inc(void)
178 {
179 current->pagefault_disabled++;
180 }
181
pagefault_disabled_dec(void)182 static __always_inline void pagefault_disabled_dec(void)
183 {
184 current->pagefault_disabled--;
185 }
186
187 /*
188 * These routines enable/disable the pagefault handler. If disabled, it will
189 * not take any locks and go straight to the fixup table.
190 *
191 * User access methods will not sleep when called from a pagefault_disabled()
192 * environment.
193 */
pagefault_disable(void)194 static inline void pagefault_disable(void)
195 {
196 pagefault_disabled_inc();
197 /*
198 * make sure to have issued the store before a pagefault
199 * can hit.
200 */
201 barrier();
202 }
203
pagefault_enable(void)204 static inline void pagefault_enable(void)
205 {
206 /*
207 * make sure to issue those last loads/stores before enabling
208 * the pagefault handler again.
209 */
210 barrier();
211 pagefault_disabled_dec();
212 }
213
214 /*
215 * Is the pagefault handler disabled? If so, user access methods will not sleep.
216 */
pagefault_disabled(void)217 static inline bool pagefault_disabled(void)
218 {
219 return current->pagefault_disabled != 0;
220 }
221
222 /*
223 * The pagefault handler is in general disabled by pagefault_disable() or
224 * when in irq context (via in_atomic()).
225 *
226 * This function should only be used by the fault handlers. Other users should
227 * stick to pagefault_disabled().
228 * Please NEVER use preempt_disable() to disable the fault handler. With
229 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
230 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
231 */
232 #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
233
234 #ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
235
236 /**
237 * probe_subpage_writeable: probe the user range for write faults at sub-page
238 * granularity (e.g. arm64 MTE)
239 * @uaddr: start of address range
240 * @size: size of address range
241 *
242 * Returns 0 on success, the number of bytes not probed on fault.
243 *
244 * It is expected that the caller checked for the write permission of each
245 * page in the range either by put_user() or GUP. The architecture port can
246 * implement a more efficient get_user() probing if the same sub-page faults
247 * are triggered by either a read or a write.
248 */
probe_subpage_writeable(char __user * uaddr,size_t size)249 static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
250 {
251 return 0;
252 }
253
254 #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
255
256 #ifndef ARCH_HAS_NOCACHE_UACCESS
257
258 static inline __must_check unsigned long
__copy_from_user_inatomic_nocache(void * to,const void __user * from,unsigned long n)259 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
260 unsigned long n)
261 {
262 return __copy_from_user_inatomic(to, from, n);
263 }
264
265 #endif /* ARCH_HAS_NOCACHE_UACCESS */
266
267 extern __must_check int check_zeroed_user(const void __user *from, size_t size);
268
269 /**
270 * copy_struct_from_user: copy a struct from userspace
271 * @dst: Destination address, in kernel space. This buffer must be @ksize
272 * bytes long.
273 * @ksize: Size of @dst struct.
274 * @src: Source address, in userspace.
275 * @usize: (Alleged) size of @src struct.
276 *
277 * Copies a struct from userspace to kernel space, in a way that guarantees
278 * backwards-compatibility for struct syscall arguments (as long as future
279 * struct extensions are made such that all new fields are *appended* to the
280 * old struct, and zeroed-out new fields have the same meaning as the old
281 * struct).
282 *
283 * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
284 * The recommended usage is something like the following:
285 *
286 * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
287 * {
288 * int err;
289 * struct foo karg = {};
290 *
291 * if (usize > PAGE_SIZE)
292 * return -E2BIG;
293 * if (usize < FOO_SIZE_VER0)
294 * return -EINVAL;
295 *
296 * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
297 * if (err)
298 * return err;
299 *
300 * // ...
301 * }
302 *
303 * There are three cases to consider:
304 * * If @usize == @ksize, then it's copied verbatim.
305 * * If @usize < @ksize, then the userspace has passed an old struct to a
306 * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
307 * are to be zero-filled.
308 * * If @usize > @ksize, then the userspace has passed a new struct to an
309 * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
310 * are checked to ensure they are zeroed, otherwise -E2BIG is returned.
311 *
312 * Returns (in all cases, some data may have been copied):
313 * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src.
314 * * -EFAULT: access to userspace failed.
315 */
316 static __always_inline __must_check int
copy_struct_from_user(void * dst,size_t ksize,const void __user * src,size_t usize)317 copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
318 size_t usize)
319 {
320 size_t size = min(ksize, usize);
321 size_t rest = max(ksize, usize) - size;
322
323 /* Deal with trailing bytes. */
324 if (usize < ksize) {
325 memset(dst + size, 0, rest);
326 } else if (usize > ksize) {
327 int ret = check_zeroed_user(src + size, rest);
328 if (ret <= 0)
329 return ret ?: -E2BIG;
330 }
331 /* Copy the interoperable parts of the struct. */
332 if (copy_from_user(dst, src, size))
333 return -EFAULT;
334 return 0;
335 }
336
337 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
338
339 long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
340 long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
341
342 long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
343 long notrace copy_to_user_nofault(void __user *dst, const void *src,
344 size_t size);
345
346 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
347 long count);
348
349 long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
350 long count);
351 long strnlen_user_nofault(const void __user *unsafe_addr, long count);
352
353 #ifndef __get_kernel_nofault
354 #define __get_kernel_nofault(dst, src, type, label) \
355 do { \
356 type __user *p = (type __force __user *)(src); \
357 type data; \
358 if (__get_user(data, p)) \
359 goto label; \
360 *(type *)dst = data; \
361 } while (0)
362
363 #define __put_kernel_nofault(dst, src, type, label) \
364 do { \
365 type __user *p = (type __force __user *)(dst); \
366 type data = *(type *)src; \
367 if (__put_user(data, p)) \
368 goto label; \
369 } while (0)
370 #endif
371
372 /**
373 * get_kernel_nofault(): safely attempt to read from a location
374 * @val: read into this variable
375 * @ptr: address to read from
376 *
377 * Returns 0 on success, or -EFAULT.
378 */
379 #define get_kernel_nofault(val, ptr) ({ \
380 const typeof(val) *__gk_ptr = (ptr); \
381 copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
382 })
383
384 #ifndef user_access_begin
385 #define user_access_begin(ptr,len) access_ok(ptr, len)
386 #define user_access_end() do { } while (0)
387 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
388 #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
389 #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
390 #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
391 #define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
user_access_save(void)392 static inline unsigned long user_access_save(void) { return 0UL; }
user_access_restore(unsigned long flags)393 static inline void user_access_restore(unsigned long flags) { }
394 #endif
395 #ifndef user_write_access_begin
396 #define user_write_access_begin user_access_begin
397 #define user_write_access_end user_access_end
398 #endif
399 #ifndef user_read_access_begin
400 #define user_read_access_begin user_access_begin
401 #define user_read_access_end user_access_end
402 #endif
403
404 #ifdef CONFIG_HARDENED_USERCOPY
405 void __noreturn usercopy_abort(const char *name, const char *detail,
406 bool to_user, unsigned long offset,
407 unsigned long len);
408 #endif
409
410 #endif /* __LINUX_UACCESS_H__ */
411