1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/mm.h>
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
19 #include <linux/elf.h>
20 #include <linux/elf-randomize.h>
21 #include <linux/personality.h>
22 #include <linux/random.h>
23 #include <linux/processor.h>
24 #include <linux/sizes.h>
25 #include <linux/compat.h>
26
27 #include <linux/uaccess.h>
28
29 #include "internal.h"
30 #include "swap.h"
31
32 /**
33 * kfree_const - conditionally free memory
34 * @x: pointer to the memory
35 *
36 * Function calls kfree only if @x is not in .rodata section.
37 */
kfree_const(const void * x)38 void kfree_const(const void *x)
39 {
40 if (!is_kernel_rodata((unsigned long)x))
41 kfree(x);
42 }
43 EXPORT_SYMBOL(kfree_const);
44
45 /**
46 * kstrdup - allocate space for and copy an existing string
47 * @s: the string to duplicate
48 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
49 *
50 * Return: newly allocated copy of @s or %NULL in case of error
51 */
kstrdup(const char * s,gfp_t gfp)52 char *kstrdup(const char *s, gfp_t gfp)
53 {
54 size_t len;
55 char *buf;
56
57 if (!s)
58 return NULL;
59
60 len = strlen(s) + 1;
61 buf = kmalloc_track_caller(len, gfp);
62 if (buf)
63 memcpy(buf, s, len);
64 return buf;
65 }
66 EXPORT_SYMBOL(kstrdup);
67
68 /**
69 * kstrdup_const - conditionally duplicate an existing const string
70 * @s: the string to duplicate
71 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
72 *
73 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
74 * must not be passed to krealloc().
75 *
76 * Return: source string if it is in .rodata section otherwise
77 * fallback to kstrdup.
78 */
kstrdup_const(const char * s,gfp_t gfp)79 const char *kstrdup_const(const char *s, gfp_t gfp)
80 {
81 if (is_kernel_rodata((unsigned long)s))
82 return s;
83
84 return kstrdup(s, gfp);
85 }
86 EXPORT_SYMBOL(kstrdup_const);
87
88 /**
89 * kstrndup - allocate space for and copy an existing string
90 * @s: the string to duplicate
91 * @max: read at most @max chars from @s
92 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
93 *
94 * Note: Use kmemdup_nul() instead if the size is known exactly.
95 *
96 * Return: newly allocated copy of @s or %NULL in case of error
97 */
kstrndup(const char * s,size_t max,gfp_t gfp)98 char *kstrndup(const char *s, size_t max, gfp_t gfp)
99 {
100 size_t len;
101 char *buf;
102
103 if (!s)
104 return NULL;
105
106 len = strnlen(s, max);
107 buf = kmalloc_track_caller(len+1, gfp);
108 if (buf) {
109 memcpy(buf, s, len);
110 buf[len] = '\0';
111 }
112 return buf;
113 }
114 EXPORT_SYMBOL(kstrndup);
115
116 /**
117 * kmemdup - duplicate region of memory
118 *
119 * @src: memory region to duplicate
120 * @len: memory region length
121 * @gfp: GFP mask to use
122 *
123 * Return: newly allocated copy of @src or %NULL in case of error
124 */
kmemdup(const void * src,size_t len,gfp_t gfp)125 void *kmemdup(const void *src, size_t len, gfp_t gfp)
126 {
127 void *p;
128
129 p = kmalloc_track_caller(len, gfp);
130 if (p)
131 memcpy(p, src, len);
132 return p;
133 }
134 EXPORT_SYMBOL(kmemdup);
135
136 /**
137 * kmemdup_nul - Create a NUL-terminated string from unterminated data
138 * @s: The data to stringify
139 * @len: The size of the data
140 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
141 *
142 * Return: newly allocated copy of @s with NUL-termination or %NULL in
143 * case of error
144 */
kmemdup_nul(const char * s,size_t len,gfp_t gfp)145 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
146 {
147 char *buf;
148
149 if (!s)
150 return NULL;
151
152 buf = kmalloc_track_caller(len + 1, gfp);
153 if (buf) {
154 memcpy(buf, s, len);
155 buf[len] = '\0';
156 }
157 return buf;
158 }
159 EXPORT_SYMBOL(kmemdup_nul);
160
161 /**
162 * memdup_user - duplicate memory region from user space
163 *
164 * @src: source address in user space
165 * @len: number of bytes to copy
166 *
167 * Return: an ERR_PTR() on failure. Result is physically
168 * contiguous, to be freed by kfree().
169 */
memdup_user(const void __user * src,size_t len)170 void *memdup_user(const void __user *src, size_t len)
171 {
172 void *p;
173
174 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
175 if (!p)
176 return ERR_PTR(-ENOMEM);
177
178 if (copy_from_user(p, src, len)) {
179 kfree(p);
180 return ERR_PTR(-EFAULT);
181 }
182
183 return p;
184 }
185 EXPORT_SYMBOL(memdup_user);
186
187 /**
188 * vmemdup_user - duplicate memory region from user space
189 *
190 * @src: source address in user space
191 * @len: number of bytes to copy
192 *
193 * Return: an ERR_PTR() on failure. Result may be not
194 * physically contiguous. Use kvfree() to free.
195 */
vmemdup_user(const void __user * src,size_t len)196 void *vmemdup_user(const void __user *src, size_t len)
197 {
198 void *p;
199
200 p = kvmalloc(len, GFP_USER);
201 if (!p)
202 return ERR_PTR(-ENOMEM);
203
204 if (copy_from_user(p, src, len)) {
205 kvfree(p);
206 return ERR_PTR(-EFAULT);
207 }
208
209 return p;
210 }
211 EXPORT_SYMBOL(vmemdup_user);
212
213 /**
214 * strndup_user - duplicate an existing string from user space
215 * @s: The string to duplicate
216 * @n: Maximum number of bytes to copy, including the trailing NUL.
217 *
218 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
219 */
strndup_user(const char __user * s,long n)220 char *strndup_user(const char __user *s, long n)
221 {
222 char *p;
223 long length;
224
225 length = strnlen_user(s, n);
226
227 if (!length)
228 return ERR_PTR(-EFAULT);
229
230 if (length > n)
231 return ERR_PTR(-EINVAL);
232
233 p = memdup_user(s, length);
234
235 if (IS_ERR(p))
236 return p;
237
238 p[length - 1] = '\0';
239
240 return p;
241 }
242 EXPORT_SYMBOL(strndup_user);
243
244 /**
245 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
246 *
247 * @src: source address in user space
248 * @len: number of bytes to copy
249 *
250 * Return: an ERR_PTR() on failure.
251 */
memdup_user_nul(const void __user * src,size_t len)252 void *memdup_user_nul(const void __user *src, size_t len)
253 {
254 char *p;
255
256 /*
257 * Always use GFP_KERNEL, since copy_from_user() can sleep and
258 * cause pagefault, which makes it pointless to use GFP_NOFS
259 * or GFP_ATOMIC.
260 */
261 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
262 if (!p)
263 return ERR_PTR(-ENOMEM);
264
265 if (copy_from_user(p, src, len)) {
266 kfree(p);
267 return ERR_PTR(-EFAULT);
268 }
269 p[len] = '\0';
270
271 return p;
272 }
273 EXPORT_SYMBOL(memdup_user_nul);
274
275 /* Check if the vma is being used as a stack by this task */
vma_is_stack_for_current(struct vm_area_struct * vma)276 int vma_is_stack_for_current(struct vm_area_struct *vma)
277 {
278 struct task_struct * __maybe_unused t = current;
279
280 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
281 }
282
283 /*
284 * Change backing file, only valid to use during initial VMA setup.
285 */
vma_set_file(struct vm_area_struct * vma,struct file * file)286 void vma_set_file(struct vm_area_struct *vma, struct file *file)
287 {
288 /* Changing an anonymous vma with this is illegal */
289 get_file(file);
290 swap(vma->vm_file, file);
291 fput(file);
292 }
293 EXPORT_SYMBOL(vma_set_file);
294
295 #ifndef STACK_RND_MASK
296 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
297 #endif
298
randomize_stack_top(unsigned long stack_top)299 unsigned long randomize_stack_top(unsigned long stack_top)
300 {
301 unsigned long random_variable = 0;
302
303 if (current->flags & PF_RANDOMIZE) {
304 random_variable = get_random_long();
305 random_variable &= STACK_RND_MASK;
306 random_variable <<= PAGE_SHIFT;
307 }
308 #ifdef CONFIG_STACK_GROWSUP
309 return PAGE_ALIGN(stack_top) + random_variable;
310 #else
311 return PAGE_ALIGN(stack_top) - random_variable;
312 #endif
313 }
314
315 /**
316 * randomize_page - Generate a random, page aligned address
317 * @start: The smallest acceptable address the caller will take.
318 * @range: The size of the area, starting at @start, within which the
319 * random address must fall.
320 *
321 * If @start + @range would overflow, @range is capped.
322 *
323 * NOTE: Historical use of randomize_range, which this replaces, presumed that
324 * @start was already page aligned. We now align it regardless.
325 *
326 * Return: A page aligned address within [start, start + range). On error,
327 * @start is returned.
328 */
randomize_page(unsigned long start,unsigned long range)329 unsigned long randomize_page(unsigned long start, unsigned long range)
330 {
331 if (!PAGE_ALIGNED(start)) {
332 range -= PAGE_ALIGN(start) - start;
333 start = PAGE_ALIGN(start);
334 }
335
336 if (start > ULONG_MAX - range)
337 range = ULONG_MAX - start;
338
339 range >>= PAGE_SHIFT;
340
341 if (range == 0)
342 return start;
343
344 return start + (get_random_long() % range << PAGE_SHIFT);
345 }
346
347 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
arch_randomize_brk(struct mm_struct * mm)348 unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
349 {
350 /* Is the current task 32bit ? */
351 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
352 return randomize_page(mm->brk, SZ_32M);
353
354 return randomize_page(mm->brk, SZ_1G);
355 }
356
arch_mmap_rnd(void)357 unsigned long arch_mmap_rnd(void)
358 {
359 unsigned long rnd;
360
361 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
362 if (is_compat_task())
363 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
364 else
365 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
366 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
367
368 return rnd << PAGE_SHIFT;
369 }
370
mmap_is_legacy(struct rlimit * rlim_stack)371 static int mmap_is_legacy(struct rlimit *rlim_stack)
372 {
373 if (current->personality & ADDR_COMPAT_LAYOUT)
374 return 1;
375
376 if (rlim_stack->rlim_cur == RLIM_INFINITY)
377 return 1;
378
379 return sysctl_legacy_va_layout;
380 }
381
382 /*
383 * Leave enough space between the mmap area and the stack to honour ulimit in
384 * the face of randomisation.
385 */
386 #define MIN_GAP (SZ_128M)
387 #define MAX_GAP (STACK_TOP / 6 * 5)
388
mmap_base(unsigned long rnd,struct rlimit * rlim_stack)389 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
390 {
391 unsigned long gap = rlim_stack->rlim_cur;
392 unsigned long pad = stack_guard_gap;
393
394 /* Account for stack randomization if necessary */
395 if (current->flags & PF_RANDOMIZE)
396 pad += (STACK_RND_MASK << PAGE_SHIFT);
397
398 /* Values close to RLIM_INFINITY can overflow. */
399 if (gap + pad > gap)
400 gap += pad;
401
402 if (gap < MIN_GAP)
403 gap = MIN_GAP;
404 else if (gap > MAX_GAP)
405 gap = MAX_GAP;
406
407 return PAGE_ALIGN(STACK_TOP - gap - rnd);
408 }
409
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)410 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
411 {
412 unsigned long random_factor = 0UL;
413
414 if (current->flags & PF_RANDOMIZE)
415 random_factor = arch_mmap_rnd();
416
417 if (mmap_is_legacy(rlim_stack)) {
418 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
419 mm->get_unmapped_area = arch_get_unmapped_area;
420 } else {
421 mm->mmap_base = mmap_base(random_factor, rlim_stack);
422 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
423 }
424 }
425 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)426 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
427 {
428 mm->mmap_base = TASK_UNMAPPED_BASE;
429 mm->get_unmapped_area = arch_get_unmapped_area;
430 }
431 #endif
432
433 /**
434 * __account_locked_vm - account locked pages to an mm's locked_vm
435 * @mm: mm to account against
436 * @pages: number of pages to account
437 * @inc: %true if @pages should be considered positive, %false if not
438 * @task: task used to check RLIMIT_MEMLOCK
439 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
440 *
441 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
442 * that mmap_lock is held as writer.
443 *
444 * Return:
445 * * 0 on success
446 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
447 */
__account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc,struct task_struct * task,bool bypass_rlim)448 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
449 struct task_struct *task, bool bypass_rlim)
450 {
451 unsigned long locked_vm, limit;
452 int ret = 0;
453
454 mmap_assert_write_locked(mm);
455
456 locked_vm = mm->locked_vm;
457 if (inc) {
458 if (!bypass_rlim) {
459 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
460 if (locked_vm + pages > limit)
461 ret = -ENOMEM;
462 }
463 if (!ret)
464 mm->locked_vm = locked_vm + pages;
465 } else {
466 WARN_ON_ONCE(pages > locked_vm);
467 mm->locked_vm = locked_vm - pages;
468 }
469
470 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
471 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
472 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
473 ret ? " - exceeded" : "");
474
475 return ret;
476 }
477 EXPORT_SYMBOL_GPL(__account_locked_vm);
478
479 /**
480 * account_locked_vm - account locked pages to an mm's locked_vm
481 * @mm: mm to account against, may be NULL
482 * @pages: number of pages to account
483 * @inc: %true if @pages should be considered positive, %false if not
484 *
485 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
486 *
487 * Return:
488 * * 0 on success, or if mm is NULL
489 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
490 */
account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc)491 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
492 {
493 int ret;
494
495 if (pages == 0 || !mm)
496 return 0;
497
498 mmap_write_lock(mm);
499 ret = __account_locked_vm(mm, pages, inc, current,
500 capable(CAP_IPC_LOCK));
501 mmap_write_unlock(mm);
502
503 return ret;
504 }
505 EXPORT_SYMBOL_GPL(account_locked_vm);
506
vm_mmap_pgoff(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long pgoff)507 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
508 unsigned long len, unsigned long prot,
509 unsigned long flag, unsigned long pgoff)
510 {
511 unsigned long ret;
512 struct mm_struct *mm = current->mm;
513 unsigned long populate;
514 LIST_HEAD(uf);
515
516 ret = security_mmap_file(file, prot, flag);
517 if (!ret) {
518 if (mmap_write_lock_killable(mm))
519 return -EINTR;
520 ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
521 &uf);
522 mmap_write_unlock(mm);
523 userfaultfd_unmap_complete(mm, &uf);
524 if (populate)
525 mm_populate(ret, populate);
526 }
527 return ret;
528 }
529
vm_mmap(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long offset)530 unsigned long vm_mmap(struct file *file, unsigned long addr,
531 unsigned long len, unsigned long prot,
532 unsigned long flag, unsigned long offset)
533 {
534 if (unlikely(offset + PAGE_ALIGN(len) < offset))
535 return -EINVAL;
536 if (unlikely(offset_in_page(offset)))
537 return -EINVAL;
538
539 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
540 }
541 EXPORT_SYMBOL(vm_mmap);
542
543 /**
544 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
545 * failure, fall back to non-contiguous (vmalloc) allocation.
546 * @size: size of the request.
547 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
548 * @node: numa node to allocate from
549 *
550 * Uses kmalloc to get the memory but if the allocation fails then falls back
551 * to the vmalloc allocator. Use kvfree for freeing the memory.
552 *
553 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
554 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
555 * preferable to the vmalloc fallback, due to visible performance drawbacks.
556 *
557 * Return: pointer to the allocated memory of %NULL in case of failure
558 */
kvmalloc_node(size_t size,gfp_t flags,int node)559 void *kvmalloc_node(size_t size, gfp_t flags, int node)
560 {
561 gfp_t kmalloc_flags = flags;
562 void *ret;
563
564 /*
565 * We want to attempt a large physically contiguous block first because
566 * it is less likely to fragment multiple larger blocks and therefore
567 * contribute to a long term fragmentation less than vmalloc fallback.
568 * However make sure that larger requests are not too disruptive - no
569 * OOM killer and no allocation failure warnings as we have a fallback.
570 */
571 if (size > PAGE_SIZE) {
572 kmalloc_flags |= __GFP_NOWARN;
573
574 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
575 kmalloc_flags |= __GFP_NORETRY;
576
577 /* nofail semantic is implemented by the vmalloc fallback */
578 kmalloc_flags &= ~__GFP_NOFAIL;
579 }
580
581 ret = kmalloc_node(size, kmalloc_flags, node);
582
583 /*
584 * It doesn't really make sense to fallback to vmalloc for sub page
585 * requests
586 */
587 if (ret || size <= PAGE_SIZE)
588 return ret;
589
590 /* non-sleeping allocations are not supported by vmalloc */
591 if (!gfpflags_allow_blocking(flags))
592 return NULL;
593
594 /* Don't even allow crazy sizes */
595 if (unlikely(size > INT_MAX)) {
596 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
597 return NULL;
598 }
599
600 /*
601 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
602 * since the callers already cannot assume anything
603 * about the resulting pointer, and cannot play
604 * protection games.
605 */
606 return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
607 flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
608 node, __builtin_return_address(0));
609 }
610 EXPORT_SYMBOL(kvmalloc_node);
611
612 /**
613 * kvfree() - Free memory.
614 * @addr: Pointer to allocated memory.
615 *
616 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
617 * It is slightly more efficient to use kfree() or vfree() if you are certain
618 * that you know which one to use.
619 *
620 * Context: Either preemptible task context or not-NMI interrupt.
621 */
kvfree(const void * addr)622 void kvfree(const void *addr)
623 {
624 if (is_vmalloc_addr(addr))
625 vfree(addr);
626 else
627 kfree(addr);
628 }
629 EXPORT_SYMBOL(kvfree);
630
631 /**
632 * kvfree_sensitive - Free a data object containing sensitive information.
633 * @addr: address of the data object to be freed.
634 * @len: length of the data object.
635 *
636 * Use the special memzero_explicit() function to clear the content of a
637 * kvmalloc'ed object containing sensitive data to make sure that the
638 * compiler won't optimize out the data clearing.
639 */
kvfree_sensitive(const void * addr,size_t len)640 void kvfree_sensitive(const void *addr, size_t len)
641 {
642 if (likely(!ZERO_OR_NULL_PTR(addr))) {
643 memzero_explicit((void *)addr, len);
644 kvfree(addr);
645 }
646 }
647 EXPORT_SYMBOL(kvfree_sensitive);
648
kvrealloc(const void * p,size_t oldsize,size_t newsize,gfp_t flags)649 void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
650 {
651 void *newp;
652
653 if (oldsize >= newsize)
654 return (void *)p;
655 newp = kvmalloc(newsize, flags);
656 if (!newp)
657 return NULL;
658 memcpy(newp, p, oldsize);
659 kvfree(p);
660 return newp;
661 }
662 EXPORT_SYMBOL(kvrealloc);
663
664 /**
665 * __vmalloc_array - allocate memory for a virtually contiguous array.
666 * @n: number of elements.
667 * @size: element size.
668 * @flags: the type of memory to allocate (see kmalloc).
669 */
__vmalloc_array(size_t n,size_t size,gfp_t flags)670 void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
671 {
672 size_t bytes;
673
674 if (unlikely(check_mul_overflow(n, size, &bytes)))
675 return NULL;
676 return __vmalloc(bytes, flags);
677 }
678 EXPORT_SYMBOL(__vmalloc_array);
679
680 /**
681 * vmalloc_array - allocate memory for a virtually contiguous array.
682 * @n: number of elements.
683 * @size: element size.
684 */
vmalloc_array(size_t n,size_t size)685 void *vmalloc_array(size_t n, size_t size)
686 {
687 return __vmalloc_array(n, size, GFP_KERNEL);
688 }
689 EXPORT_SYMBOL(vmalloc_array);
690
691 /**
692 * __vcalloc - allocate and zero memory for a virtually contiguous array.
693 * @n: number of elements.
694 * @size: element size.
695 * @flags: the type of memory to allocate (see kmalloc).
696 */
__vcalloc(size_t n,size_t size,gfp_t flags)697 void *__vcalloc(size_t n, size_t size, gfp_t flags)
698 {
699 return __vmalloc_array(n, size, flags | __GFP_ZERO);
700 }
701 EXPORT_SYMBOL(__vcalloc);
702
703 /**
704 * vcalloc - allocate and zero memory for a virtually contiguous array.
705 * @n: number of elements.
706 * @size: element size.
707 */
vcalloc(size_t n,size_t size)708 void *vcalloc(size_t n, size_t size)
709 {
710 return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
711 }
712 EXPORT_SYMBOL(vcalloc);
713
714 /* Neutral page->mapping pointer to address_space or anon_vma or other */
page_rmapping(struct page * page)715 void *page_rmapping(struct page *page)
716 {
717 return folio_raw_mapping(page_folio(page));
718 }
719
720 /**
721 * folio_mapped - Is this folio mapped into userspace?
722 * @folio: The folio.
723 *
724 * Return: True if any page in this folio is referenced by user page tables.
725 */
folio_mapped(struct folio * folio)726 bool folio_mapped(struct folio *folio)
727 {
728 long i, nr;
729
730 if (!folio_test_large(folio))
731 return atomic_read(&folio->_mapcount) >= 0;
732 if (atomic_read(folio_mapcount_ptr(folio)) >= 0)
733 return true;
734 if (folio_test_hugetlb(folio))
735 return false;
736
737 nr = folio_nr_pages(folio);
738 for (i = 0; i < nr; i++) {
739 if (atomic_read(&folio_page(folio, i)->_mapcount) >= 0)
740 return true;
741 }
742 return false;
743 }
744 EXPORT_SYMBOL(folio_mapped);
745
folio_anon_vma(struct folio * folio)746 struct anon_vma *folio_anon_vma(struct folio *folio)
747 {
748 unsigned long mapping = (unsigned long)folio->mapping;
749
750 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
751 return NULL;
752 return (void *)(mapping - PAGE_MAPPING_ANON);
753 }
754
755 /**
756 * folio_mapping - Find the mapping where this folio is stored.
757 * @folio: The folio.
758 *
759 * For folios which are in the page cache, return the mapping that this
760 * page belongs to. Folios in the swap cache return the swap mapping
761 * this page is stored in (which is different from the mapping for the
762 * swap file or swap device where the data is stored).
763 *
764 * You can call this for folios which aren't in the swap cache or page
765 * cache and it will return NULL.
766 */
folio_mapping(struct folio * folio)767 struct address_space *folio_mapping(struct folio *folio)
768 {
769 struct address_space *mapping;
770
771 /* This happens if someone calls flush_dcache_page on slab page */
772 if (unlikely(folio_test_slab(folio)))
773 return NULL;
774
775 if (unlikely(folio_test_swapcache(folio)))
776 return swap_address_space(folio_swap_entry(folio));
777
778 mapping = folio->mapping;
779 if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
780 return NULL;
781
782 return mapping;
783 }
784 EXPORT_SYMBOL(folio_mapping);
785
786 /* Slow path of page_mapcount() for compound pages */
__page_mapcount(struct page * page)787 int __page_mapcount(struct page *page)
788 {
789 int ret;
790
791 ret = atomic_read(&page->_mapcount) + 1;
792 /*
793 * For file THP page->_mapcount contains total number of mapping
794 * of the page: no need to look into compound_mapcount.
795 */
796 if (!PageAnon(page) && !PageHuge(page))
797 return ret;
798 page = compound_head(page);
799 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
800 if (PageDoubleMap(page))
801 ret--;
802 return ret;
803 }
804 EXPORT_SYMBOL_GPL(__page_mapcount);
805
806 /**
807 * folio_mapcount() - Calculate the number of mappings of this folio.
808 * @folio: The folio.
809 *
810 * A large folio tracks both how many times the entire folio is mapped,
811 * and how many times each individual page in the folio is mapped.
812 * This function calculates the total number of times the folio is
813 * mapped.
814 *
815 * Return: The number of times this folio is mapped.
816 */
folio_mapcount(struct folio * folio)817 int folio_mapcount(struct folio *folio)
818 {
819 int i, compound, nr, ret;
820
821 if (likely(!folio_test_large(folio)))
822 return atomic_read(&folio->_mapcount) + 1;
823
824 compound = folio_entire_mapcount(folio);
825 if (folio_test_hugetlb(folio))
826 return compound;
827 ret = compound;
828 nr = folio_nr_pages(folio);
829 for (i = 0; i < nr; i++)
830 ret += atomic_read(&folio_page(folio, i)->_mapcount) + 1;
831 /* File pages has compound_mapcount included in _mapcount */
832 if (!folio_test_anon(folio))
833 return ret - compound * nr;
834 if (folio_test_double_map(folio))
835 ret -= nr;
836 return ret;
837 }
838
839 /**
840 * folio_copy - Copy the contents of one folio to another.
841 * @dst: Folio to copy to.
842 * @src: Folio to copy from.
843 *
844 * The bytes in the folio represented by @src are copied to @dst.
845 * Assumes the caller has validated that @dst is at least as large as @src.
846 * Can be called in atomic context for order-0 folios, but if the folio is
847 * larger, it may sleep.
848 */
folio_copy(struct folio * dst,struct folio * src)849 void folio_copy(struct folio *dst, struct folio *src)
850 {
851 long i = 0;
852 long nr = folio_nr_pages(src);
853
854 for (;;) {
855 copy_highpage(folio_page(dst, i), folio_page(src, i));
856 if (++i == nr)
857 break;
858 cond_resched();
859 }
860 }
861
862 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
863 int sysctl_overcommit_ratio __read_mostly = 50;
864 unsigned long sysctl_overcommit_kbytes __read_mostly;
865 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
866 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
867 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
868
overcommit_ratio_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)869 int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
870 size_t *lenp, loff_t *ppos)
871 {
872 int ret;
873
874 ret = proc_dointvec(table, write, buffer, lenp, ppos);
875 if (ret == 0 && write)
876 sysctl_overcommit_kbytes = 0;
877 return ret;
878 }
879
sync_overcommit_as(struct work_struct * dummy)880 static void sync_overcommit_as(struct work_struct *dummy)
881 {
882 percpu_counter_sync(&vm_committed_as);
883 }
884
overcommit_policy_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)885 int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
886 size_t *lenp, loff_t *ppos)
887 {
888 struct ctl_table t;
889 int new_policy = -1;
890 int ret;
891
892 /*
893 * The deviation of sync_overcommit_as could be big with loose policy
894 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
895 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
896 * with the strict "NEVER", and to avoid possible race condition (even
897 * though user usually won't too frequently do the switching to policy
898 * OVERCOMMIT_NEVER), the switch is done in the following order:
899 * 1. changing the batch
900 * 2. sync percpu count on each CPU
901 * 3. switch the policy
902 */
903 if (write) {
904 t = *table;
905 t.data = &new_policy;
906 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
907 if (ret || new_policy == -1)
908 return ret;
909
910 mm_compute_batch(new_policy);
911 if (new_policy == OVERCOMMIT_NEVER)
912 schedule_on_each_cpu(sync_overcommit_as);
913 sysctl_overcommit_memory = new_policy;
914 } else {
915 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
916 }
917
918 return ret;
919 }
920
overcommit_kbytes_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)921 int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
922 size_t *lenp, loff_t *ppos)
923 {
924 int ret;
925
926 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
927 if (ret == 0 && write)
928 sysctl_overcommit_ratio = 0;
929 return ret;
930 }
931
932 /*
933 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
934 */
vm_commit_limit(void)935 unsigned long vm_commit_limit(void)
936 {
937 unsigned long allowed;
938
939 if (sysctl_overcommit_kbytes)
940 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
941 else
942 allowed = ((totalram_pages() - hugetlb_total_pages())
943 * sysctl_overcommit_ratio / 100);
944 allowed += total_swap_pages;
945
946 return allowed;
947 }
948
949 /*
950 * Make sure vm_committed_as in one cacheline and not cacheline shared with
951 * other variables. It can be updated by several CPUs frequently.
952 */
953 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
954
955 /*
956 * The global memory commitment made in the system can be a metric
957 * that can be used to drive ballooning decisions when Linux is hosted
958 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
959 * balancing memory across competing virtual machines that are hosted.
960 * Several metrics drive this policy engine including the guest reported
961 * memory commitment.
962 *
963 * The time cost of this is very low for small platforms, and for big
964 * platform like a 2S/36C/72T Skylake server, in worst case where
965 * vm_committed_as's spinlock is under severe contention, the time cost
966 * could be about 30~40 microseconds.
967 */
vm_memory_committed(void)968 unsigned long vm_memory_committed(void)
969 {
970 return percpu_counter_sum_positive(&vm_committed_as);
971 }
972 EXPORT_SYMBOL_GPL(vm_memory_committed);
973
974 /*
975 * Check that a process has enough memory to allocate a new virtual
976 * mapping. 0 means there is enough memory for the allocation to
977 * succeed and -ENOMEM implies there is not.
978 *
979 * We currently support three overcommit policies, which are set via the
980 * vm.overcommit_memory sysctl. See Documentation/mm/overcommit-accounting.rst
981 *
982 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
983 * Additional code 2002 Jul 20 by Robert Love.
984 *
985 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
986 *
987 * Note this is a helper function intended to be used by LSMs which
988 * wish to use this logic.
989 */
__vm_enough_memory(struct mm_struct * mm,long pages,int cap_sys_admin)990 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
991 {
992 long allowed;
993
994 vm_acct_memory(pages);
995
996 /*
997 * Sometimes we want to use more memory than we have
998 */
999 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1000 return 0;
1001
1002 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1003 if (pages > totalram_pages() + total_swap_pages)
1004 goto error;
1005 return 0;
1006 }
1007
1008 allowed = vm_commit_limit();
1009 /*
1010 * Reserve some for root
1011 */
1012 if (!cap_sys_admin)
1013 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1014
1015 /*
1016 * Don't let a single process grow so big a user can't recover
1017 */
1018 if (mm) {
1019 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
1020
1021 allowed -= min_t(long, mm->total_vm / 32, reserve);
1022 }
1023
1024 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1025 return 0;
1026 error:
1027 pr_warn_ratelimited("%s: pid: %d, comm: %s, no enough memory for the allocation\n",
1028 __func__, current->pid, current->comm);
1029 vm_unacct_memory(pages);
1030
1031 return -ENOMEM;
1032 }
1033
1034 /**
1035 * get_cmdline() - copy the cmdline value to a buffer.
1036 * @task: the task whose cmdline value to copy.
1037 * @buffer: the buffer to copy to.
1038 * @buflen: the length of the buffer. Larger cmdline values are truncated
1039 * to this length.
1040 *
1041 * Return: the size of the cmdline field copied. Note that the copy does
1042 * not guarantee an ending NULL byte.
1043 */
get_cmdline(struct task_struct * task,char * buffer,int buflen)1044 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
1045 {
1046 int res = 0;
1047 unsigned int len;
1048 struct mm_struct *mm = get_task_mm(task);
1049 unsigned long arg_start, arg_end, env_start, env_end;
1050 if (!mm)
1051 goto out;
1052 if (!mm->arg_end)
1053 goto out_mm; /* Shh! No looking before we're done */
1054
1055 spin_lock(&mm->arg_lock);
1056 arg_start = mm->arg_start;
1057 arg_end = mm->arg_end;
1058 env_start = mm->env_start;
1059 env_end = mm->env_end;
1060 spin_unlock(&mm->arg_lock);
1061
1062 len = arg_end - arg_start;
1063
1064 if (len > buflen)
1065 len = buflen;
1066
1067 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
1068
1069 /*
1070 * If the nul at the end of args has been overwritten, then
1071 * assume application is using setproctitle(3).
1072 */
1073 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1074 len = strnlen(buffer, res);
1075 if (len < res) {
1076 res = len;
1077 } else {
1078 len = env_end - env_start;
1079 if (len > buflen - res)
1080 len = buflen - res;
1081 res += access_process_vm(task, env_start,
1082 buffer+res, len,
1083 FOLL_FORCE);
1084 res = strnlen(buffer, res);
1085 }
1086 }
1087 out_mm:
1088 mmput(mm);
1089 out:
1090 return res;
1091 }
1092
memcmp_pages(struct page * page1,struct page * page2)1093 int __weak memcmp_pages(struct page *page1, struct page *page2)
1094 {
1095 char *addr1, *addr2;
1096 int ret;
1097
1098 addr1 = kmap_atomic(page1);
1099 addr2 = kmap_atomic(page2);
1100 ret = memcmp(addr1, addr2, PAGE_SIZE);
1101 kunmap_atomic(addr2);
1102 kunmap_atomic(addr1);
1103 return ret;
1104 }
1105
1106 #ifdef CONFIG_PRINTK
1107 /**
1108 * mem_dump_obj - Print available provenance information
1109 * @object: object for which to find provenance information.
1110 *
1111 * This function uses pr_cont(), so that the caller is expected to have
1112 * printed out whatever preamble is appropriate. The provenance information
1113 * depends on the type of object and on how much debugging is enabled.
1114 * For example, for a slab-cache object, the slab name is printed, and,
1115 * if available, the return address and stack trace from the allocation
1116 * and last free path of that object.
1117 */
mem_dump_obj(void * object)1118 void mem_dump_obj(void *object)
1119 {
1120 const char *type;
1121
1122 if (kmem_valid_obj(object)) {
1123 kmem_dump_obj(object);
1124 return;
1125 }
1126
1127 if (vmalloc_dump_obj(object))
1128 return;
1129
1130 if (virt_addr_valid(object))
1131 type = "non-slab/vmalloc memory";
1132 else if (object == NULL)
1133 type = "NULL pointer";
1134 else if (object == ZERO_SIZE_PTR)
1135 type = "zero-size pointer";
1136 else
1137 type = "non-paged memory";
1138
1139 pr_cont(" %s\n", type);
1140 }
1141 EXPORT_SYMBOL_GPL(mem_dump_obj);
1142 #endif
1143
1144 /*
1145 * A driver might set a page logically offline -- PageOffline() -- and
1146 * turn the page inaccessible in the hypervisor; after that, access to page
1147 * content can be fatal.
1148 *
1149 * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1150 * pages after checking PageOffline(); however, these PFN walkers can race
1151 * with drivers that set PageOffline().
1152 *
1153 * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1154 * synchronize with such drivers, achieving that a page cannot be set
1155 * PageOffline() while frozen.
1156 *
1157 * page_offline_begin()/page_offline_end() is used by drivers that care about
1158 * such races when setting a page PageOffline().
1159 */
1160 static DECLARE_RWSEM(page_offline_rwsem);
1161
page_offline_freeze(void)1162 void page_offline_freeze(void)
1163 {
1164 down_read(&page_offline_rwsem);
1165 }
1166
page_offline_thaw(void)1167 void page_offline_thaw(void)
1168 {
1169 up_read(&page_offline_rwsem);
1170 }
1171
page_offline_begin(void)1172 void page_offline_begin(void)
1173 {
1174 down_write(&page_offline_rwsem);
1175 }
1176 EXPORT_SYMBOL(page_offline_begin);
1177
page_offline_end(void)1178 void page_offline_end(void)
1179 {
1180 up_write(&page_offline_rwsem);
1181 }
1182 EXPORT_SYMBOL(page_offline_end);
1183
1184 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
flush_dcache_folio(struct folio * folio)1185 void flush_dcache_folio(struct folio *folio)
1186 {
1187 long i, nr = folio_nr_pages(folio);
1188
1189 for (i = 0; i < nr; i++)
1190 flush_dcache_page(folio_page(folio, i));
1191 }
1192 EXPORT_SYMBOL(flush_dcache_folio);
1193 #endif
1194