1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4 *
5 * (C) SGI 2006, Christoph Lameter
6 * Cleaned up and restructured to ease the addition of alternative
7 * implementations of SLAB allocators.
8 * (C) Linux Foundation 2008-2013
9 * Unified interface for all slab allocators
10 */
11
12 #ifndef _LINUX_SLAB_H
13 #define _LINUX_SLAB_H
14
15 #include <linux/cache.h>
16 #include <linux/gfp.h>
17 #include <linux/overflow.h>
18 #include <linux/types.h>
19 #include <linux/workqueue.h>
20 #include <linux/percpu-refcount.h>
21 #include <linux/cleanup.h>
22 #include <linux/hash.h>
23
24
25 /*
26 * Flags to pass to kmem_cache_create().
27 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
28 */
29 /* DEBUG: Perform (expensive) checks on alloc/free */
30 #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
31 /* DEBUG: Red zone objs in a cache */
32 #define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
33 /* DEBUG: Poison objects */
34 #define SLAB_POISON ((slab_flags_t __force)0x00000800U)
35 /* Indicate a kmalloc slab */
36 #define SLAB_KMALLOC ((slab_flags_t __force)0x00001000U)
37 /* Align objs on cache lines */
38 #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
39 /* Use GFP_DMA memory */
40 #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
41 /* Use GFP_DMA32 memory */
42 #define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
43 /* DEBUG: Store the last owner for bug hunting */
44 #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
45 /* Panic if kmem_cache_create() fails */
46 #define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
47 /*
48 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
49 *
50 * This delays freeing the SLAB page by a grace period, it does _NOT_
51 * delay object freeing. This means that if you do kmem_cache_free()
52 * that memory location is free to be reused at any time. Thus it may
53 * be possible to see another object there in the same RCU grace period.
54 *
55 * This feature only ensures the memory location backing the object
56 * stays valid, the trick to using this is relying on an independent
57 * object validation pass. Something like:
58 *
59 * begin:
60 * rcu_read_lock();
61 * obj = lockless_lookup(key);
62 * if (obj) {
63 * if (!try_get_ref(obj)) // might fail for free objects
64 * rcu_read_unlock();
65 * goto begin;
66 *
67 * if (obj->key != key) { // not the object we expected
68 * put_ref(obj);
69 * rcu_read_unlock();
70 * goto begin;
71 * }
72 * }
73 * rcu_read_unlock();
74 *
75 * This is useful if we need to approach a kernel structure obliquely,
76 * from its address obtained without the usual locking. We can lock
77 * the structure to stabilize it and check it's still at the given address,
78 * only if we can be sure that the memory has not been meanwhile reused
79 * for some other kind of object (which our subsystem's lock might corrupt).
80 *
81 * rcu_read_lock before reading the address, then rcu_read_unlock after
82 * taking the spinlock within the structure expected at that address.
83 *
84 * Note that it is not possible to acquire a lock within a structure
85 * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference
86 * as described above. The reason is that SLAB_TYPESAFE_BY_RCU pages
87 * are not zeroed before being given to the slab, which means that any
88 * locks must be initialized after each and every kmem_struct_alloc().
89 * Alternatively, make the ctor passed to kmem_cache_create() initialize
90 * the locks at page-allocation time, as is done in __i915_request_ctor(),
91 * sighand_ctor(), and anon_vma_ctor(). Such a ctor permits readers
92 * to safely acquire those ctor-initialized locks under rcu_read_lock()
93 * protection.
94 *
95 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
96 */
97 /* Defer freeing slabs to RCU */
98 #define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
99 /* Spread some memory over cpuset */
100 #define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
101 /* Trace allocations and frees */
102 #define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
103
104 /* Flag to prevent checks on free */
105 #ifdef CONFIG_DEBUG_OBJECTS
106 # define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
107 #else
108 # define SLAB_DEBUG_OBJECTS 0
109 #endif
110
111 /* Avoid kmemleak tracing */
112 #define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
113
114 /*
115 * Prevent merging with compatible kmem caches. This flag should be used
116 * cautiously. Valid use cases:
117 *
118 * - caches created for self-tests (e.g. kunit)
119 * - general caches created and used by a subsystem, only when a
120 * (subsystem-specific) debug option is enabled
121 * - performance critical caches, should be very rare and consulted with slab
122 * maintainers, and not used together with CONFIG_SLUB_TINY
123 */
124 #define SLAB_NO_MERGE ((slab_flags_t __force)0x01000000U)
125
126 /* Fault injection mark */
127 #ifdef CONFIG_FAILSLAB
128 # define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
129 #else
130 # define SLAB_FAILSLAB 0
131 #endif
132 /* Account to memcg */
133 #ifdef CONFIG_MEMCG_KMEM
134 # define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
135 #else
136 # define SLAB_ACCOUNT 0
137 #endif
138
139 #ifdef CONFIG_KASAN_GENERIC
140 #define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
141 #else
142 #define SLAB_KASAN 0
143 #endif
144
145 /*
146 * Ignore user specified debugging flags.
147 * Intended for caches created for self-tests so they have only flags
148 * specified in the code and other flags are ignored.
149 */
150 #define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U)
151
152 #ifdef CONFIG_KFENCE
153 #define SLAB_SKIP_KFENCE ((slab_flags_t __force)0x20000000U)
154 #else
155 #define SLAB_SKIP_KFENCE 0
156 #endif
157
158 /* The following flags affect the page allocator grouping pages by mobility */
159 /* Objects are reclaimable */
160 #ifndef CONFIG_SLUB_TINY
161 #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
162 #else
163 #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0)
164 #endif
165 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
166
167 /*
168 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
169 *
170 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
171 *
172 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
173 * Both make kfree a no-op.
174 */
175 #define ZERO_SIZE_PTR ((void *)16)
176
177 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
178 (unsigned long)ZERO_SIZE_PTR)
179
180 #include <linux/kasan.h>
181
182 struct list_lru;
183 struct mem_cgroup;
184 /*
185 * struct kmem_cache related prototypes
186 */
187 bool slab_is_available(void);
188
189 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
190 unsigned int align, slab_flags_t flags,
191 void (*ctor)(void *));
192 struct kmem_cache *kmem_cache_create_usercopy(const char *name,
193 unsigned int size, unsigned int align,
194 slab_flags_t flags,
195 unsigned int useroffset, unsigned int usersize,
196 void (*ctor)(void *));
197 void kmem_cache_destroy(struct kmem_cache *s);
198 int kmem_cache_shrink(struct kmem_cache *s);
199
200 /*
201 * Please use this macro to create slab caches. Simply specify the
202 * name of the structure and maybe some flags that are listed above.
203 *
204 * The alignment of the struct determines object alignment. If you
205 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
206 * then the objects will be properly aligned in SMP configurations.
207 */
208 #define KMEM_CACHE(__struct, __flags) \
209 kmem_cache_create(#__struct, sizeof(struct __struct), \
210 __alignof__(struct __struct), (__flags), NULL)
211
212 /*
213 * To whitelist a single field for copying to/from usercopy, use this
214 * macro instead for KMEM_CACHE() above.
215 */
216 #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
217 kmem_cache_create_usercopy(#__struct, \
218 sizeof(struct __struct), \
219 __alignof__(struct __struct), (__flags), \
220 offsetof(struct __struct, __field), \
221 sizeof_field(struct __struct, __field), NULL)
222
223 /*
224 * Common kmalloc functions provided by all allocators
225 */
226 void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
227 void kfree(const void *objp);
228 void kfree_sensitive(const void *objp);
229 size_t __ksize(const void *objp);
230
231 DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
232
233 /**
234 * ksize - Report actual allocation size of associated object
235 *
236 * @objp: Pointer returned from a prior kmalloc()-family allocation.
237 *
238 * This should not be used for writing beyond the originally requested
239 * allocation size. Either use krealloc() or round up the allocation size
240 * with kmalloc_size_roundup() prior to allocation. If this is used to
241 * access beyond the originally requested allocation size, UBSAN_BOUNDS
242 * and/or FORTIFY_SOURCE may trip, since they only know about the
243 * originally allocated size via the __alloc_size attribute.
244 */
245 size_t ksize(const void *objp);
246
247 #ifdef CONFIG_PRINTK
248 bool kmem_valid_obj(void *object);
249 void kmem_dump_obj(void *object);
250 #endif
251
252 /*
253 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
254 * alignment larger than the alignment of a 64-bit integer.
255 * Setting ARCH_DMA_MINALIGN in arch headers allows that.
256 */
257 #ifdef ARCH_HAS_DMA_MINALIGN
258 #if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMALLOC_MINALIGN)
259 #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
260 #endif
261 #endif
262
263 #ifndef ARCH_KMALLOC_MINALIGN
264 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
265 #elif ARCH_KMALLOC_MINALIGN > 8
266 #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
267 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
268 #endif
269
270 /*
271 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
272 * Intended for arches that get misalignment faults even for 64 bit integer
273 * aligned buffers.
274 */
275 #ifndef ARCH_SLAB_MINALIGN
276 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
277 #endif
278
279 /*
280 * Arches can define this function if they want to decide the minimum slab
281 * alignment at runtime. The value returned by the function must be a power
282 * of two and >= ARCH_SLAB_MINALIGN.
283 */
284 #ifndef arch_slab_minalign
arch_slab_minalign(void)285 static inline unsigned int arch_slab_minalign(void)
286 {
287 return ARCH_SLAB_MINALIGN;
288 }
289 #endif
290
291 /*
292 * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN.
293 * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN
294 * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment.
295 */
296 #define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
297 #define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
298 #define __assume_page_alignment __assume_aligned(PAGE_SIZE)
299
300 /*
301 * Kmalloc array related definitions
302 */
303
304 #ifdef CONFIG_SLAB
305 /*
306 * SLAB and SLUB directly allocates requests fitting in to an order-1 page
307 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
308 */
309 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
310 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
311 #ifndef KMALLOC_SHIFT_LOW
312 #define KMALLOC_SHIFT_LOW 5
313 #endif
314 #endif
315
316 #ifdef CONFIG_SLUB
317 #define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
318 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
319 #ifndef KMALLOC_SHIFT_LOW
320 #define KMALLOC_SHIFT_LOW 3
321 #endif
322 #endif
323
324 /* Maximum allocatable size */
325 #define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
326 /* Maximum size for which we actually use a slab cache */
327 #define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
328 /* Maximum order allocatable via the slab allocator */
329 #define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
330
331 /*
332 * Kmalloc subsystem.
333 */
334 #ifndef KMALLOC_MIN_SIZE
335 #define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
336 #endif
337
338 /*
339 * This restriction comes from byte sized index implementation.
340 * Page size is normally 2^12 bytes and, in this case, if we want to use
341 * byte sized index which can represent 2^8 entries, the size of the object
342 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
343 * If minimum size of kmalloc is less than 16, we use it as minimum object
344 * size and give up to use byte sized index.
345 */
346 #define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
347 (KMALLOC_MIN_SIZE) : 16)
348
349 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
350 #define RANDOM_KMALLOC_CACHES_NR 15 // # of cache copies
351 #else
352 #define RANDOM_KMALLOC_CACHES_NR 0
353 #endif
354
355 /*
356 * Whenever changing this, take care of that kmalloc_type() and
357 * create_kmalloc_caches() still work as intended.
358 *
359 * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
360 * is for accounted but unreclaimable and non-dma objects. All the other
361 * kmem caches can have both accounted and unaccounted objects.
362 */
363 enum kmalloc_cache_type {
364 KMALLOC_NORMAL = 0,
365 #ifndef CONFIG_ZONE_DMA
366 KMALLOC_DMA = KMALLOC_NORMAL,
367 #endif
368 #ifndef CONFIG_MEMCG_KMEM
369 KMALLOC_CGROUP = KMALLOC_NORMAL,
370 #endif
371 KMALLOC_RANDOM_START = KMALLOC_NORMAL,
372 KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR,
373 #ifdef CONFIG_SLUB_TINY
374 KMALLOC_RECLAIM = KMALLOC_NORMAL,
375 #else
376 KMALLOC_RECLAIM,
377 #endif
378 #ifdef CONFIG_ZONE_DMA
379 KMALLOC_DMA,
380 #endif
381 #ifdef CONFIG_MEMCG_KMEM
382 KMALLOC_CGROUP,
383 #endif
384 NR_KMALLOC_TYPES
385 };
386
387 extern struct kmem_cache *
388 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
389
390 /*
391 * Define gfp bits that should not be set for KMALLOC_NORMAL.
392 */
393 #define KMALLOC_NOT_NORMAL_BITS \
394 (__GFP_RECLAIMABLE | \
395 (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
396 (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
397
398 extern unsigned long random_kmalloc_seed;
399
kmalloc_type(gfp_t flags,unsigned long caller)400 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller)
401 {
402 /*
403 * The most common case is KMALLOC_NORMAL, so test for it
404 * with a single branch for all the relevant flags.
405 */
406 if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
407 #ifdef CONFIG_RANDOM_KMALLOC_CACHES
408 /* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */
409 return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed,
410 ilog2(RANDOM_KMALLOC_CACHES_NR + 1));
411 #else
412 return KMALLOC_NORMAL;
413 #endif
414
415 /*
416 * At least one of the flags has to be set. Their priorities in
417 * decreasing order are:
418 * 1) __GFP_DMA
419 * 2) __GFP_RECLAIMABLE
420 * 3) __GFP_ACCOUNT
421 */
422 if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
423 return KMALLOC_DMA;
424 if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
425 return KMALLOC_RECLAIM;
426 else
427 return KMALLOC_CGROUP;
428 }
429
430 /*
431 * Figure out which kmalloc slab an allocation of a certain size
432 * belongs to.
433 * 0 = zero alloc
434 * 1 = 65 .. 96 bytes
435 * 2 = 129 .. 192 bytes
436 * n = 2^(n-1)+1 .. 2^n
437 *
438 * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
439 * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
440 * Callers where !size_is_constant should only be test modules, where runtime
441 * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab().
442 */
__kmalloc_index(size_t size,bool size_is_constant)443 static __always_inline unsigned int __kmalloc_index(size_t size,
444 bool size_is_constant)
445 {
446 if (!size)
447 return 0;
448
449 if (size <= KMALLOC_MIN_SIZE)
450 return KMALLOC_SHIFT_LOW;
451
452 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
453 return 1;
454 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
455 return 2;
456 if (size <= 8) return 3;
457 if (size <= 16) return 4;
458 if (size <= 32) return 5;
459 if (size <= 64) return 6;
460 if (size <= 128) return 7;
461 if (size <= 256) return 8;
462 if (size <= 512) return 9;
463 if (size <= 1024) return 10;
464 if (size <= 2 * 1024) return 11;
465 if (size <= 4 * 1024) return 12;
466 if (size <= 8 * 1024) return 13;
467 if (size <= 16 * 1024) return 14;
468 if (size <= 32 * 1024) return 15;
469 if (size <= 64 * 1024) return 16;
470 if (size <= 128 * 1024) return 17;
471 if (size <= 256 * 1024) return 18;
472 if (size <= 512 * 1024) return 19;
473 if (size <= 1024 * 1024) return 20;
474 if (size <= 2 * 1024 * 1024) return 21;
475
476 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
477 BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
478 else
479 BUG();
480
481 /* Will never be reached. Needed because the compiler may complain */
482 return -1;
483 }
484 static_assert(PAGE_SHIFT <= 20);
485 #define kmalloc_index(s) __kmalloc_index(s, true)
486
487 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
488
489 /**
490 * kmem_cache_alloc - Allocate an object
491 * @cachep: The cache to allocate from.
492 * @flags: See kmalloc().
493 *
494 * Allocate an object from this cache.
495 * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags.
496 *
497 * Return: pointer to the new object or %NULL in case of error
498 */
499 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
500 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
501 gfp_t gfpflags) __assume_slab_alignment __malloc;
502 void kmem_cache_free(struct kmem_cache *s, void *objp);
503
504 /*
505 * Bulk allocation and freeing operations. These are accelerated in an
506 * allocator specific way to avoid taking locks repeatedly or building
507 * metadata structures unnecessarily.
508 *
509 * Note that interrupts must be enabled when calling these functions.
510 */
511 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
512 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
513
kfree_bulk(size_t size,void ** p)514 static __always_inline void kfree_bulk(size_t size, void **p)
515 {
516 kmem_cache_free_bulk(NULL, size, p);
517 }
518
519 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
520 __alloc_size(1);
521 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
522 __malloc;
523
524 void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
525 __assume_kmalloc_alignment __alloc_size(3);
526
527 void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
528 int node, size_t size) __assume_kmalloc_alignment
529 __alloc_size(4);
530 void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
531 __alloc_size(1);
532
533 void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
534 __alloc_size(1);
535
536 /**
537 * kmalloc - allocate kernel memory
538 * @size: how many bytes of memory are required.
539 * @flags: describe the allocation context
540 *
541 * kmalloc is the normal method of allocating memory
542 * for objects smaller than page size in the kernel.
543 *
544 * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
545 * bytes. For @size of power of two bytes, the alignment is also guaranteed
546 * to be at least to the size.
547 *
548 * The @flags argument may be one of the GFP flags defined at
549 * include/linux/gfp_types.h and described at
550 * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
551 *
552 * The recommended usage of the @flags is described at
553 * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
554 *
555 * Below is a brief outline of the most useful GFP flags
556 *
557 * %GFP_KERNEL
558 * Allocate normal kernel ram. May sleep.
559 *
560 * %GFP_NOWAIT
561 * Allocation will not sleep.
562 *
563 * %GFP_ATOMIC
564 * Allocation will not sleep. May use emergency pools.
565 *
566 * Also it is possible to set different flags by OR'ing
567 * in one or more of the following additional @flags:
568 *
569 * %__GFP_ZERO
570 * Zero the allocated memory before returning. Also see kzalloc().
571 *
572 * %__GFP_HIGH
573 * This allocation has high priority and may use emergency pools.
574 *
575 * %__GFP_NOFAIL
576 * Indicate that this allocation is in no way allowed to fail
577 * (think twice before using).
578 *
579 * %__GFP_NORETRY
580 * If memory is not immediately available,
581 * then give up at once.
582 *
583 * %__GFP_NOWARN
584 * If allocation fails, don't issue any warnings.
585 *
586 * %__GFP_RETRY_MAYFAIL
587 * Try really hard to succeed the allocation but fail
588 * eventually.
589 */
kmalloc(size_t size,gfp_t flags)590 static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
591 {
592 if (__builtin_constant_p(size) && size) {
593 unsigned int index;
594
595 if (size > KMALLOC_MAX_CACHE_SIZE)
596 return kmalloc_large(size, flags);
597
598 index = kmalloc_index(size);
599 return kmalloc_trace(
600 kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
601 flags, size);
602 }
603 return __kmalloc(size, flags);
604 }
605
kmalloc_node(size_t size,gfp_t flags,int node)606 static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
607 {
608 if (__builtin_constant_p(size) && size) {
609 unsigned int index;
610
611 if (size > KMALLOC_MAX_CACHE_SIZE)
612 return kmalloc_large_node(size, flags, node);
613
614 index = kmalloc_index(size);
615 return kmalloc_node_trace(
616 kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
617 flags, node, size);
618 }
619 return __kmalloc_node(size, flags, node);
620 }
621
622 /**
623 * kmalloc_array - allocate memory for an array.
624 * @n: number of elements.
625 * @size: element size.
626 * @flags: the type of memory to allocate (see kmalloc).
627 */
kmalloc_array(size_t n,size_t size,gfp_t flags)628 static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
629 {
630 size_t bytes;
631
632 if (unlikely(check_mul_overflow(n, size, &bytes)))
633 return NULL;
634 if (__builtin_constant_p(n) && __builtin_constant_p(size))
635 return kmalloc(bytes, flags);
636 return __kmalloc(bytes, flags);
637 }
638
639 /**
640 * krealloc_array - reallocate memory for an array.
641 * @p: pointer to the memory chunk to reallocate
642 * @new_n: new number of elements to alloc
643 * @new_size: new size of a single member of the array
644 * @flags: the type of memory to allocate (see kmalloc)
645 */
krealloc_array(void * p,size_t new_n,size_t new_size,gfp_t flags)646 static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
647 size_t new_n,
648 size_t new_size,
649 gfp_t flags)
650 {
651 size_t bytes;
652
653 if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
654 return NULL;
655
656 return krealloc(p, bytes, flags);
657 }
658
659 /**
660 * kcalloc - allocate memory for an array. The memory is set to zero.
661 * @n: number of elements.
662 * @size: element size.
663 * @flags: the type of memory to allocate (see kmalloc).
664 */
kcalloc(size_t n,size_t size,gfp_t flags)665 static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
666 {
667 return kmalloc_array(n, size, flags | __GFP_ZERO);
668 }
669
670 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
671 unsigned long caller) __alloc_size(1);
672 #define kmalloc_node_track_caller(size, flags, node) \
673 __kmalloc_node_track_caller(size, flags, node, \
674 _RET_IP_)
675
676 /*
677 * kmalloc_track_caller is a special version of kmalloc that records the
678 * calling function of the routine calling it for slab leak tracking instead
679 * of just the calling function (confusing, eh?).
680 * It's useful when the call to kmalloc comes from a widely-used standard
681 * allocator where we care about the real place the memory allocation
682 * request comes from.
683 */
684 #define kmalloc_track_caller(size, flags) \
685 __kmalloc_node_track_caller(size, flags, \
686 NUMA_NO_NODE, _RET_IP_)
687
kmalloc_array_node(size_t n,size_t size,gfp_t flags,int node)688 static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
689 int node)
690 {
691 size_t bytes;
692
693 if (unlikely(check_mul_overflow(n, size, &bytes)))
694 return NULL;
695 if (__builtin_constant_p(n) && __builtin_constant_p(size))
696 return kmalloc_node(bytes, flags, node);
697 return __kmalloc_node(bytes, flags, node);
698 }
699
kcalloc_node(size_t n,size_t size,gfp_t flags,int node)700 static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
701 {
702 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
703 }
704
705 /*
706 * Shortcuts
707 */
kmem_cache_zalloc(struct kmem_cache * k,gfp_t flags)708 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
709 {
710 return kmem_cache_alloc(k, flags | __GFP_ZERO);
711 }
712
713 /**
714 * kzalloc - allocate memory. The memory is set to zero.
715 * @size: how many bytes of memory are required.
716 * @flags: the type of memory to allocate (see kmalloc).
717 */
kzalloc(size_t size,gfp_t flags)718 static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
719 {
720 return kmalloc(size, flags | __GFP_ZERO);
721 }
722
723 /**
724 * kzalloc_node - allocate zeroed memory from a particular memory node.
725 * @size: how many bytes of memory are required.
726 * @flags: the type of memory to allocate (see kmalloc).
727 * @node: memory node from which to allocate
728 */
kzalloc_node(size_t size,gfp_t flags,int node)729 static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
730 {
731 return kmalloc_node(size, flags | __GFP_ZERO, node);
732 }
733
734 extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
kvmalloc(size_t size,gfp_t flags)735 static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
736 {
737 return kvmalloc_node(size, flags, NUMA_NO_NODE);
738 }
kvzalloc_node(size_t size,gfp_t flags,int node)739 static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
740 {
741 return kvmalloc_node(size, flags | __GFP_ZERO, node);
742 }
kvzalloc(size_t size,gfp_t flags)743 static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
744 {
745 return kvmalloc(size, flags | __GFP_ZERO);
746 }
747
kvmalloc_array(size_t n,size_t size,gfp_t flags)748 static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
749 {
750 size_t bytes;
751
752 if (unlikely(check_mul_overflow(n, size, &bytes)))
753 return NULL;
754
755 return kvmalloc(bytes, flags);
756 }
757
kvcalloc(size_t n,size_t size,gfp_t flags)758 static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
759 {
760 return kvmalloc_array(n, size, flags | __GFP_ZERO);
761 }
762
763 extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
764 __realloc_size(3);
765 extern void kvfree(const void *addr);
766 extern void kvfree_sensitive(const void *addr, size_t len);
767
768 unsigned int kmem_cache_size(struct kmem_cache *s);
769
770 /**
771 * kmalloc_size_roundup - Report allocation bucket size for the given size
772 *
773 * @size: Number of bytes to round up from.
774 *
775 * This returns the number of bytes that would be available in a kmalloc()
776 * allocation of @size bytes. For example, a 126 byte request would be
777 * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly
778 * for the general-purpose kmalloc()-based allocations, and is not for the
779 * pre-sized kmem_cache_alloc()-based allocations.)
780 *
781 * Use this to kmalloc() the full bucket size ahead of time instead of using
782 * ksize() to query the size after an allocation.
783 */
784 size_t kmalloc_size_roundup(size_t size);
785
786 void __init kmem_cache_init_late(void);
787
788 #if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
789 int slab_prepare_cpu(unsigned int cpu);
790 int slab_dead_cpu(unsigned int cpu);
791 #else
792 #define slab_prepare_cpu NULL
793 #define slab_dead_cpu NULL
794 #endif
795
796 #endif /* _LINUX_SLAB_H */
797