Lines Matching refs:cachep
212 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
214 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list);
215 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
218 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
220 static inline void fixup_slab_list(struct kmem_cache *cachep,
242 #define MAKE_LIST(cachep, listp, slab, nodeid) \ argument
245 list_splice(&get_node(cachep, nodeid)->slab, listp); \
248 #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \ argument
250 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
251 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
252 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
328 static int obj_offset(struct kmem_cache *cachep) in obj_offset() argument
330 return cachep->obj_offset; in obj_offset()
333 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp) in dbg_redzone1() argument
335 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); in dbg_redzone1()
336 return (unsigned long long *) (objp + obj_offset(cachep) - in dbg_redzone1()
340 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp) in dbg_redzone2() argument
342 BUG_ON(!(cachep->flags & SLAB_RED_ZONE)); in dbg_redzone2()
343 if (cachep->flags & SLAB_STORE_USER) in dbg_redzone2()
344 return (unsigned long long *)(objp + cachep->size - in dbg_redzone2()
347 return (unsigned long long *) (objp + cachep->size - in dbg_redzone2()
351 static void **dbg_userword(struct kmem_cache *cachep, void *objp) in dbg_userword() argument
353 BUG_ON(!(cachep->flags & SLAB_STORE_USER)); in dbg_userword()
354 return (void **)(objp + cachep->size - BYTES_PER_WORD); in dbg_userword()
360 #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) argument
361 #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;}) argument
362 #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;}) argument
393 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) in cpu_cache_get() argument
395 return this_cpu_ptr(cachep->cpu_cache); in cpu_cache_get()
437 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg) argument
439 static void __slab_error(const char *function, struct kmem_cache *cachep, in __slab_error() argument
443 function, cachep->name, msg); in __slab_error()
552 static noinline void cache_free_pfmemalloc(struct kmem_cache *cachep, in cache_free_pfmemalloc() argument
560 n = get_node(cachep, slab_node); in cache_free_pfmemalloc()
563 free_block(cachep, &objp, 1, slab_node, &list); in cache_free_pfmemalloc()
566 slabs_destroy(cachep, &list); in cache_free_pfmemalloc()
604 #define drain_alien_cache(cachep, alien) do { } while (0) argument
605 #define reap_alien(cachep, n) do { } while (0) argument
617 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) in cache_free_alien() argument
680 static void __drain_alien_cache(struct kmem_cache *cachep, in __drain_alien_cache() argument
684 struct kmem_cache_node *n = get_node(cachep, node); in __drain_alien_cache()
696 free_block(cachep, ac->entry, ac->avail, node, list); in __drain_alien_cache()
705 static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n) in reap_alien() argument
718 __drain_alien_cache(cachep, ac, node, &list); in reap_alien()
720 slabs_destroy(cachep, &list); in reap_alien()
726 static void drain_alien_cache(struct kmem_cache *cachep, in drain_alien_cache() argument
741 __drain_alien_cache(cachep, ac, i, &list); in drain_alien_cache()
743 slabs_destroy(cachep, &list); in drain_alien_cache()
748 static int __cache_free_alien(struct kmem_cache *cachep, void *objp, in __cache_free_alien() argument
756 n = get_node(cachep, node); in __cache_free_alien()
757 STATS_INC_NODEFREES(cachep); in __cache_free_alien()
763 STATS_INC_ACOVERFLOW(cachep); in __cache_free_alien()
764 __drain_alien_cache(cachep, ac, slab_node, &list); in __cache_free_alien()
768 slabs_destroy(cachep, &list); in __cache_free_alien()
770 n = get_node(cachep, slab_node); in __cache_free_alien()
772 free_block(cachep, &objp, 1, slab_node, &list); in __cache_free_alien()
774 slabs_destroy(cachep, &list); in __cache_free_alien()
779 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) in cache_free_alien() argument
790 return __cache_free_alien(cachep, objp, node, slab_node); in cache_free_alien()
803 static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) in init_cache_node() argument
812 n = get_node(cachep, node); in init_cache_node()
815 n->free_limit = (1 + nr_cpus_node(node)) * cachep->batchcount + in init_cache_node()
816 cachep->num; in init_cache_node()
828 ((unsigned long)cachep) % REAPTIMEOUT_NODE; in init_cache_node()
831 (1 + nr_cpus_node(node)) * cachep->batchcount + cachep->num; in init_cache_node()
838 cachep->node[node] = n; in init_cache_node()
856 struct kmem_cache *cachep; in init_cache_node_node() local
858 list_for_each_entry(cachep, &slab_caches, list) { in init_cache_node_node()
859 ret = init_cache_node(cachep, node, GFP_KERNEL); in init_cache_node_node()
868 static int setup_kmem_cache_node(struct kmem_cache *cachep, in setup_kmem_cache_node() argument
879 new_alien = alloc_alien_cache(node, cachep->limit, gfp); in setup_kmem_cache_node()
884 if (cachep->shared) { in setup_kmem_cache_node()
886 cachep->shared * cachep->batchcount, 0xbaadf00d, gfp); in setup_kmem_cache_node()
891 ret = init_cache_node(cachep, node, gfp); in setup_kmem_cache_node()
895 n = get_node(cachep, node); in setup_kmem_cache_node()
898 free_block(cachep, n->shared->entry, in setup_kmem_cache_node()
915 slabs_destroy(cachep, &list); in setup_kmem_cache_node()
938 struct kmem_cache *cachep; in cpuup_canceled() local
943 list_for_each_entry(cachep, &slab_caches, list) { in cpuup_canceled()
949 n = get_node(cachep, node); in cpuup_canceled()
956 n->free_limit -= cachep->batchcount; in cpuup_canceled()
959 nc = per_cpu_ptr(cachep->cpu_cache, cpu); in cpuup_canceled()
960 free_block(cachep, nc->entry, nc->avail, node, &list); in cpuup_canceled()
970 free_block(cachep, shared->entry, in cpuup_canceled()
982 drain_alien_cache(cachep, alien); in cpuup_canceled()
987 slabs_destroy(cachep, &list); in cpuup_canceled()
994 list_for_each_entry(cachep, &slab_caches, list) { in cpuup_canceled()
995 n = get_node(cachep, node); in cpuup_canceled()
998 drain_freelist(cachep, n, INT_MAX); in cpuup_canceled()
1004 struct kmem_cache *cachep; in cpuup_prepare() local
1022 list_for_each_entry(cachep, &slab_caches, list) { in cpuup_prepare()
1023 err = setup_kmem_cache_node(cachep, node, GFP_KERNEL, false); in cpuup_prepare()
1093 struct kmem_cache *cachep; in drain_cache_node_node() local
1096 list_for_each_entry(cachep, &slab_caches, list) { in drain_cache_node_node()
1099 n = get_node(cachep, node); in drain_cache_node_node()
1103 drain_freelist(cachep, n, INT_MAX); in drain_cache_node_node()
1150 static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list, in init_list() argument
1164 MAKE_ALL_LISTS(cachep, ptr, nodeid); in init_list()
1165 cachep->node[nodeid] = ptr; in init_list()
1172 static void __init set_up_node(struct kmem_cache *cachep, int index) in set_up_node() argument
1177 cachep->node[node] = &init_kmem_cache_node[index + node]; in set_up_node()
1178 cachep->node[node]->next_reap = jiffies + in set_up_node()
1180 ((unsigned long)cachep) % REAPTIMEOUT_NODE; in set_up_node()
1271 struct kmem_cache *cachep; in kmem_cache_init_late() local
1275 list_for_each_entry(cachep, &slab_caches, list) in kmem_cache_init_late()
1276 if (enable_cpucache(cachep, GFP_NOWAIT)) in kmem_cache_init_late()
1313 slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) in slab_out_of_memory() argument
1328 cachep->name, cachep->size, cachep->gfporder); in slab_out_of_memory()
1330 for_each_kmem_cache_node(cachep, node, n) { in slab_out_of_memory()
1341 (total_slabs * cachep->num) - free_objs, in slab_out_of_memory()
1342 total_slabs * cachep->num); in slab_out_of_memory()
1355 static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, in kmem_getpages() argument
1361 flags |= cachep->allocflags; in kmem_getpages()
1363 folio = (struct folio *) __alloc_pages_node(nodeid, flags, cachep->gfporder); in kmem_getpages()
1365 slab_out_of_memory(cachep, flags, nodeid); in kmem_getpages()
1371 account_slab(slab, cachep->gfporder, cachep, flags); in kmem_getpages()
1383 static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab) in kmem_freepages() argument
1385 int order = cachep->gfporder; in kmem_freepages()
1396 unaccount_slab(slab, order, cachep); in kmem_freepages()
1402 struct kmem_cache *cachep; in kmem_rcu_free() local
1406 cachep = slab->slab_cache; in kmem_rcu_free()
1408 kmem_freepages(cachep, slab); in kmem_rcu_free()
1412 static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) in is_debug_pagealloc_cache() argument
1414 if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) && in is_debug_pagealloc_cache()
1415 (cachep->size % PAGE_SIZE) == 0) in is_debug_pagealloc_cache()
1422 static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map) in slab_kernel_map() argument
1424 if (!is_debug_pagealloc_cache(cachep)) in slab_kernel_map()
1427 __kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); in slab_kernel_map()
1431 static inline void slab_kernel_map(struct kmem_cache *cachep, void *objp, in slab_kernel_map() argument
1436 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val) in poison_obj() argument
1438 int size = cachep->object_size; in poison_obj()
1439 addr = &((char *)addr)[obj_offset(cachep)]; in poison_obj()
1477 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) in print_objinfo() argument
1482 if (cachep->flags & SLAB_RED_ZONE) { in print_objinfo()
1484 *dbg_redzone1(cachep, objp), in print_objinfo()
1485 *dbg_redzone2(cachep, objp)); in print_objinfo()
1488 if (cachep->flags & SLAB_STORE_USER) in print_objinfo()
1489 pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp)); in print_objinfo()
1490 realobj = (char *)objp + obj_offset(cachep); in print_objinfo()
1491 size = cachep->object_size; in print_objinfo()
1501 static void check_poison_obj(struct kmem_cache *cachep, void *objp) in check_poison_obj() argument
1507 if (is_debug_pagealloc_cache(cachep)) in check_poison_obj()
1510 realobj = (char *)objp + obj_offset(cachep); in check_poison_obj()
1511 size = cachep->object_size; in check_poison_obj()
1523 print_tainted(), cachep->name, in check_poison_obj()
1525 print_objinfo(cachep, objp, 0); in check_poison_obj()
1547 objnr = obj_to_index(cachep, slab, objp); in check_poison_obj()
1549 objp = index_to_obj(cachep, slab, objnr - 1); in check_poison_obj()
1550 realobj = (char *)objp + obj_offset(cachep); in check_poison_obj()
1552 print_objinfo(cachep, objp, 2); in check_poison_obj()
1554 if (objnr + 1 < cachep->num) { in check_poison_obj()
1555 objp = index_to_obj(cachep, slab, objnr + 1); in check_poison_obj()
1556 realobj = (char *)objp + obj_offset(cachep); in check_poison_obj()
1558 print_objinfo(cachep, objp, 2); in check_poison_obj()
1565 static void slab_destroy_debugcheck(struct kmem_cache *cachep, in slab_destroy_debugcheck() argument
1570 if (OBJFREELIST_SLAB(cachep) && cachep->flags & SLAB_POISON) { in slab_destroy_debugcheck()
1571 poison_obj(cachep, slab->freelist - obj_offset(cachep), in slab_destroy_debugcheck()
1575 for (i = 0; i < cachep->num; i++) { in slab_destroy_debugcheck()
1576 void *objp = index_to_obj(cachep, slab, i); in slab_destroy_debugcheck()
1578 if (cachep->flags & SLAB_POISON) { in slab_destroy_debugcheck()
1579 check_poison_obj(cachep, objp); in slab_destroy_debugcheck()
1580 slab_kernel_map(cachep, objp, 1); in slab_destroy_debugcheck()
1582 if (cachep->flags & SLAB_RED_ZONE) { in slab_destroy_debugcheck()
1583 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) in slab_destroy_debugcheck()
1584 slab_error(cachep, "start of a freed object was overwritten"); in slab_destroy_debugcheck()
1585 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) in slab_destroy_debugcheck()
1586 slab_error(cachep, "end of a freed object was overwritten"); in slab_destroy_debugcheck()
1591 static void slab_destroy_debugcheck(struct kmem_cache *cachep, in slab_destroy_debugcheck() argument
1606 static void slab_destroy(struct kmem_cache *cachep, struct slab *slab) in slab_destroy() argument
1611 slab_destroy_debugcheck(cachep, slab); in slab_destroy()
1612 if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU)) in slab_destroy()
1615 kmem_freepages(cachep, slab); in slab_destroy()
1621 if (OFF_SLAB(cachep)) in slab_destroy()
1629 static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) in slabs_destroy() argument
1635 slab_destroy(cachep, slab); in slabs_destroy()
1653 static size_t calculate_slab_order(struct kmem_cache *cachep, in calculate_slab_order() argument
1694 if (freelist_cache_size > cachep->size / 2) in calculate_slab_order()
1699 cachep->num = num; in calculate_slab_order()
1700 cachep->gfporder = gfporder; in calculate_slab_order()
1728 struct kmem_cache *cachep, int entries, int batchcount) in alloc_kmem_cache_cpus() argument
1748 static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) in setup_cpu_cache() argument
1751 return enable_cpucache(cachep, gfp); in setup_cpu_cache()
1753 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1); in setup_cpu_cache()
1754 if (!cachep->cpu_cache) in setup_cpu_cache()
1762 set_up_node(cachep, SIZE_NODE); in setup_cpu_cache()
1767 cachep->node[node] = kmalloc_node( in setup_cpu_cache()
1769 BUG_ON(!cachep->node[node]); in setup_cpu_cache()
1770 kmem_cache_node_init(cachep->node[node]); in setup_cpu_cache()
1774 cachep->node[numa_mem_id()]->next_reap = in setup_cpu_cache()
1776 ((unsigned long)cachep) % REAPTIMEOUT_NODE; in setup_cpu_cache()
1778 cpu_cache_get(cachep)->avail = 0; in setup_cpu_cache()
1779 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES; in setup_cpu_cache()
1780 cpu_cache_get(cachep)->batchcount = 1; in setup_cpu_cache()
1781 cpu_cache_get(cachep)->touched = 0; in setup_cpu_cache()
1782 cachep->batchcount = 1; in setup_cpu_cache()
1783 cachep->limit = BOOT_CPUCACHE_ENTRIES; in setup_cpu_cache()
1797 struct kmem_cache *cachep; in __kmem_cache_alias() local
1799 cachep = find_mergeable(size, align, flags, name, ctor); in __kmem_cache_alias()
1800 if (cachep) { in __kmem_cache_alias()
1801 cachep->refcount++; in __kmem_cache_alias()
1807 cachep->object_size = max_t(int, cachep->object_size, size); in __kmem_cache_alias()
1809 return cachep; in __kmem_cache_alias()
1812 static bool set_objfreelist_slab_cache(struct kmem_cache *cachep, in set_objfreelist_slab_cache() argument
1817 cachep->num = 0; in set_objfreelist_slab_cache()
1824 if (unlikely(slab_want_init_on_free(cachep))) in set_objfreelist_slab_cache()
1827 if (cachep->ctor || flags & SLAB_TYPESAFE_BY_RCU) in set_objfreelist_slab_cache()
1830 left = calculate_slab_order(cachep, size, in set_objfreelist_slab_cache()
1832 if (!cachep->num) in set_objfreelist_slab_cache()
1835 if (cachep->num * sizeof(freelist_idx_t) > cachep->object_size) in set_objfreelist_slab_cache()
1838 cachep->colour = left / cachep->colour_off; in set_objfreelist_slab_cache()
1843 static bool set_off_slab_cache(struct kmem_cache *cachep, in set_off_slab_cache() argument
1848 cachep->num = 0; in set_off_slab_cache()
1861 left = calculate_slab_order(cachep, size, flags | CFLGS_OFF_SLAB); in set_off_slab_cache()
1862 if (!cachep->num) in set_off_slab_cache()
1869 if (left >= cachep->num * sizeof(freelist_idx_t)) in set_off_slab_cache()
1872 cachep->colour = left / cachep->colour_off; in set_off_slab_cache()
1877 static bool set_on_slab_cache(struct kmem_cache *cachep, in set_on_slab_cache() argument
1882 cachep->num = 0; in set_on_slab_cache()
1884 left = calculate_slab_order(cachep, size, flags); in set_on_slab_cache()
1885 if (!cachep->num) in set_on_slab_cache()
1888 cachep->colour = left / cachep->colour_off; in set_on_slab_cache()
1916 int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) in __kmem_cache_create() argument
1921 unsigned int size = cachep->size; in __kmem_cache_create()
1954 if (ralign < cachep->align) { in __kmem_cache_create()
1955 ralign = cachep->align; in __kmem_cache_create()
1963 cachep->align = ralign; in __kmem_cache_create()
1964 cachep->colour_off = cache_line_size(); in __kmem_cache_create()
1966 if (cachep->colour_off < cachep->align) in __kmem_cache_create()
1967 cachep->colour_off = cachep->align; in __kmem_cache_create()
1982 cachep->obj_offset += sizeof(unsigned long long); in __kmem_cache_create()
1997 kasan_cache_create(cachep, &size, &flags); in __kmem_cache_create()
1999 size = ALIGN(size, cachep->align); in __kmem_cache_create()
2005 size = ALIGN(SLAB_OBJ_MIN_SIZE, cachep->align); in __kmem_cache_create()
2016 size >= 256 && cachep->object_size > cache_line_size()) { in __kmem_cache_create()
2020 if (set_off_slab_cache(cachep, tmp_size, flags)) { in __kmem_cache_create()
2022 cachep->obj_offset += tmp_size - size; in __kmem_cache_create()
2030 if (set_objfreelist_slab_cache(cachep, size, flags)) { in __kmem_cache_create()
2035 if (set_off_slab_cache(cachep, size, flags)) { in __kmem_cache_create()
2040 if (set_on_slab_cache(cachep, size, flags)) in __kmem_cache_create()
2046 cachep->freelist_size = cachep->num * sizeof(freelist_idx_t); in __kmem_cache_create()
2047 cachep->flags = flags; in __kmem_cache_create()
2048 cachep->allocflags = __GFP_COMP; in __kmem_cache_create()
2050 cachep->allocflags |= GFP_DMA; in __kmem_cache_create()
2052 cachep->allocflags |= GFP_DMA32; in __kmem_cache_create()
2054 cachep->allocflags |= __GFP_RECLAIMABLE; in __kmem_cache_create()
2055 cachep->size = size; in __kmem_cache_create()
2056 cachep->reciprocal_buffer_size = reciprocal_value(size); in __kmem_cache_create()
2065 (cachep->flags & SLAB_POISON) && in __kmem_cache_create()
2066 is_debug_pagealloc_cache(cachep)) in __kmem_cache_create()
2067 cachep->flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); in __kmem_cache_create()
2070 err = setup_cpu_cache(cachep, gfp); in __kmem_cache_create()
2072 __kmem_cache_release(cachep); in __kmem_cache_create()
2095 static void check_spinlock_acquired(struct kmem_cache *cachep) in check_spinlock_acquired() argument
2099 assert_spin_locked(&get_node(cachep, numa_mem_id())->list_lock); in check_spinlock_acquired()
2103 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node) in check_spinlock_acquired_node() argument
2107 assert_spin_locked(&get_node(cachep, node)->list_lock); in check_spinlock_acquired_node()
2119 static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac, in drain_array_locked() argument
2131 free_block(cachep, ac->entry, tofree, node, list); in drain_array_locked()
2138 struct kmem_cache *cachep = arg; in do_drain() local
2145 ac = cpu_cache_get(cachep); in do_drain()
2146 n = get_node(cachep, node); in do_drain()
2148 free_block(cachep, ac->entry, ac->avail, node, &list); in do_drain()
2151 slabs_destroy(cachep, &list); in do_drain()
2154 static void drain_cpu_caches(struct kmem_cache *cachep) in drain_cpu_caches() argument
2160 on_each_cpu(do_drain, cachep, 1); in drain_cpu_caches()
2162 for_each_kmem_cache_node(cachep, node, n) in drain_cpu_caches()
2164 drain_alien_cache(cachep, n->alien); in drain_cpu_caches()
2166 for_each_kmem_cache_node(cachep, node, n) { in drain_cpu_caches()
2168 drain_array_locked(cachep, n->shared, node, true, &list); in drain_cpu_caches()
2171 slabs_destroy(cachep, &list); in drain_cpu_caches()
2227 int __kmem_cache_shrink(struct kmem_cache *cachep) in __kmem_cache_shrink() argument
2233 drain_cpu_caches(cachep); in __kmem_cache_shrink()
2236 for_each_kmem_cache_node(cachep, node, n) { in __kmem_cache_shrink()
2237 drain_freelist(cachep, n, INT_MAX); in __kmem_cache_shrink()
2245 int __kmem_cache_shutdown(struct kmem_cache *cachep) in __kmem_cache_shutdown() argument
2247 return __kmem_cache_shrink(cachep); in __kmem_cache_shutdown()
2250 void __kmem_cache_release(struct kmem_cache *cachep) in __kmem_cache_release() argument
2255 cache_random_seq_destroy(cachep); in __kmem_cache_release()
2257 free_percpu(cachep->cpu_cache); in __kmem_cache_release()
2260 for_each_kmem_cache_node(cachep, i, n) { in __kmem_cache_release()
2264 cachep->node[i] = NULL; in __kmem_cache_release()
2282 static void *alloc_slabmgmt(struct kmem_cache *cachep, in alloc_slabmgmt() argument
2292 if (OBJFREELIST_SLAB(cachep)) in alloc_slabmgmt()
2294 else if (OFF_SLAB(cachep)) { in alloc_slabmgmt()
2296 freelist = kmalloc_node(cachep->freelist_size, in alloc_slabmgmt()
2300 freelist = addr + (PAGE_SIZE << cachep->gfporder) - in alloc_slabmgmt()
2301 cachep->freelist_size; in alloc_slabmgmt()
2318 static void cache_init_objs_debug(struct kmem_cache *cachep, struct slab *slab) in cache_init_objs_debug() argument
2323 for (i = 0; i < cachep->num; i++) { in cache_init_objs_debug()
2324 void *objp = index_to_obj(cachep, slab, i); in cache_init_objs_debug()
2326 if (cachep->flags & SLAB_STORE_USER) in cache_init_objs_debug()
2327 *dbg_userword(cachep, objp) = NULL; in cache_init_objs_debug()
2329 if (cachep->flags & SLAB_RED_ZONE) { in cache_init_objs_debug()
2330 *dbg_redzone1(cachep, objp) = RED_INACTIVE; in cache_init_objs_debug()
2331 *dbg_redzone2(cachep, objp) = RED_INACTIVE; in cache_init_objs_debug()
2338 if (cachep->ctor && !(cachep->flags & SLAB_POISON)) { in cache_init_objs_debug()
2339 kasan_unpoison_object_data(cachep, in cache_init_objs_debug()
2340 objp + obj_offset(cachep)); in cache_init_objs_debug()
2341 cachep->ctor(objp + obj_offset(cachep)); in cache_init_objs_debug()
2343 cachep, objp + obj_offset(cachep)); in cache_init_objs_debug()
2346 if (cachep->flags & SLAB_RED_ZONE) { in cache_init_objs_debug()
2347 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE) in cache_init_objs_debug()
2348 slab_error(cachep, "constructor overwrote the end of an object"); in cache_init_objs_debug()
2349 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE) in cache_init_objs_debug()
2350 slab_error(cachep, "constructor overwrote the start of an object"); in cache_init_objs_debug()
2353 if (cachep->flags & SLAB_POISON) { in cache_init_objs_debug()
2354 poison_obj(cachep, objp, POISON_FREE); in cache_init_objs_debug()
2355 slab_kernel_map(cachep, objp, 0); in cache_init_objs_debug()
2377 struct kmem_cache *cachep, in freelist_state_initialize() argument
2387 if (!cachep->random_seq) { in freelist_state_initialize()
2391 state->list = cachep->random_seq; in freelist_state_initialize()
2418 static bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab) in shuffle_freelist() argument
2420 unsigned int objfreelist = 0, i, rand, count = cachep->num; in shuffle_freelist()
2427 precomputed = freelist_state_initialize(&state, cachep, count); in shuffle_freelist()
2430 if (OBJFREELIST_SLAB(cachep)) { in shuffle_freelist()
2435 slab->freelist = index_to_obj(cachep, slab, objfreelist) + in shuffle_freelist()
2436 obj_offset(cachep); in shuffle_freelist()
2459 if (OBJFREELIST_SLAB(cachep)) in shuffle_freelist()
2460 set_free_obj(slab, cachep->num - 1, objfreelist); in shuffle_freelist()
2465 static inline bool shuffle_freelist(struct kmem_cache *cachep, in shuffle_freelist() argument
2472 static void cache_init_objs(struct kmem_cache *cachep, in cache_init_objs() argument
2479 cache_init_objs_debug(cachep, slab); in cache_init_objs()
2482 shuffled = shuffle_freelist(cachep, slab); in cache_init_objs()
2484 if (!shuffled && OBJFREELIST_SLAB(cachep)) { in cache_init_objs()
2485 slab->freelist = index_to_obj(cachep, slab, cachep->num - 1) + in cache_init_objs()
2486 obj_offset(cachep); in cache_init_objs()
2489 for (i = 0; i < cachep->num; i++) { in cache_init_objs()
2490 objp = index_to_obj(cachep, slab, i); in cache_init_objs()
2491 objp = kasan_init_slab_obj(cachep, objp); in cache_init_objs()
2494 if (DEBUG == 0 && cachep->ctor) { in cache_init_objs()
2495 kasan_unpoison_object_data(cachep, objp); in cache_init_objs()
2496 cachep->ctor(objp); in cache_init_objs()
2497 kasan_poison_object_data(cachep, objp); in cache_init_objs()
2505 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slab) in slab_get_obj() argument
2509 objp = index_to_obj(cachep, slab, get_free_obj(slab, slab->active)); in slab_get_obj()
2515 static void slab_put_obj(struct kmem_cache *cachep, in slab_put_obj() argument
2518 unsigned int objnr = obj_to_index(cachep, slab, objp); in slab_put_obj()
2523 for (i = slab->active; i < cachep->num; i++) { in slab_put_obj()
2526 cachep->name, objp); in slab_put_obj()
2533 slab->freelist = objp + obj_offset(cachep); in slab_put_obj()
2542 static struct slab *cache_grow_begin(struct kmem_cache *cachep, in cache_grow_begin() argument
2559 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); in cache_grow_begin()
2570 slab = kmem_getpages(cachep, local_flags, nodeid); in cache_grow_begin()
2575 n = get_node(cachep, slab_node); in cache_grow_begin()
2579 if (n->colour_next >= cachep->colour) in cache_grow_begin()
2583 if (offset >= cachep->colour) in cache_grow_begin()
2586 offset *= cachep->colour_off; in cache_grow_begin()
2596 freelist = alloc_slabmgmt(cachep, slab, offset, in cache_grow_begin()
2598 if (OFF_SLAB(cachep) && !freelist) in cache_grow_begin()
2601 slab->slab_cache = cachep; in cache_grow_begin()
2604 cache_init_objs(cachep, slab); in cache_grow_begin()
2612 kmem_freepages(cachep, slab); in cache_grow_begin()
2619 static void cache_grow_end(struct kmem_cache *cachep, struct slab *slab) in cache_grow_end() argument
2630 n = get_node(cachep, slab_nid(slab)); in cache_grow_end()
2638 fixup_slab_list(cachep, n, slab, &list); in cache_grow_end()
2640 STATS_INC_GROWN(cachep); in cache_grow_end()
2641 n->free_objects += cachep->num - slab->active; in cache_grow_end()
2644 fixup_objfreelist_debug(cachep, &list); in cache_grow_end()
2685 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp, in cache_free_debugcheck() argument
2691 BUG_ON(virt_to_cache(objp) != cachep); in cache_free_debugcheck()
2693 objp -= obj_offset(cachep); in cache_free_debugcheck()
2697 if (cachep->flags & SLAB_RED_ZONE) { in cache_free_debugcheck()
2698 verify_redzone_free(cachep, objp); in cache_free_debugcheck()
2699 *dbg_redzone1(cachep, objp) = RED_INACTIVE; in cache_free_debugcheck()
2700 *dbg_redzone2(cachep, objp) = RED_INACTIVE; in cache_free_debugcheck()
2702 if (cachep->flags & SLAB_STORE_USER) in cache_free_debugcheck()
2703 *dbg_userword(cachep, objp) = (void *)caller; in cache_free_debugcheck()
2705 objnr = obj_to_index(cachep, slab, objp); in cache_free_debugcheck()
2707 BUG_ON(objnr >= cachep->num); in cache_free_debugcheck()
2708 BUG_ON(objp != index_to_obj(cachep, slab, objnr)); in cache_free_debugcheck()
2710 if (cachep->flags & SLAB_POISON) { in cache_free_debugcheck()
2711 poison_obj(cachep, objp, POISON_FREE); in cache_free_debugcheck()
2712 slab_kernel_map(cachep, objp, 0); in cache_free_debugcheck()
2722 static inline void fixup_objfreelist_debug(struct kmem_cache *cachep, in fixup_objfreelist_debug() argument
2730 objp = next - obj_offset(cachep); in fixup_objfreelist_debug()
2732 poison_obj(cachep, objp, POISON_FREE); in fixup_objfreelist_debug()
2737 static inline void fixup_slab_list(struct kmem_cache *cachep, in fixup_slab_list() argument
2743 if (slab->active == cachep->num) { in fixup_slab_list()
2745 if (OBJFREELIST_SLAB(cachep)) { in fixup_slab_list()
2748 if (cachep->flags & SLAB_POISON) { in fixup_slab_list()
2825 static noinline void *cache_alloc_pfmemalloc(struct kmem_cache *cachep, in cache_alloc_pfmemalloc() argument
2842 obj = slab_get_obj(cachep, slab); in cache_alloc_pfmemalloc()
2845 fixup_slab_list(cachep, n, slab, &list); in cache_alloc_pfmemalloc()
2848 fixup_objfreelist_debug(cachep, &list); in cache_alloc_pfmemalloc()
2857 static __always_inline int alloc_block(struct kmem_cache *cachep, in alloc_block() argument
2864 BUG_ON(slab->active >= cachep->num); in alloc_block()
2866 while (slab->active < cachep->num && batchcount--) { in alloc_block()
2867 STATS_INC_ALLOCED(cachep); in alloc_block()
2868 STATS_INC_ACTIVE(cachep); in alloc_block()
2869 STATS_SET_HIGH(cachep); in alloc_block()
2871 ac->entry[ac->avail++] = slab_get_obj(cachep, slab); in alloc_block()
2877 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) in cache_alloc_refill() argument
2889 ac = cpu_cache_get(cachep); in cache_alloc_refill()
2899 n = get_node(cachep, node); in cache_alloc_refill()
2921 check_spinlock_acquired(cachep); in cache_alloc_refill()
2923 batchcount = alloc_block(cachep, ac, slab, batchcount); in cache_alloc_refill()
2924 fixup_slab_list(cachep, n, slab, &list); in cache_alloc_refill()
2931 fixup_objfreelist_debug(cachep, &list); in cache_alloc_refill()
2937 void *obj = cache_alloc_pfmemalloc(cachep, n, flags); in cache_alloc_refill()
2943 slab = cache_grow_begin(cachep, gfp_exact_node(flags), node); in cache_alloc_refill()
2949 ac = cpu_cache_get(cachep); in cache_alloc_refill()
2951 alloc_block(cachep, ac, slab, batchcount); in cache_alloc_refill()
2952 cache_grow_end(cachep, slab); in cache_alloc_refill()
2963 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, in cache_alloc_debugcheck_after() argument
2966 WARN_ON_ONCE(cachep->ctor && (flags & __GFP_ZERO)); in cache_alloc_debugcheck_after()
2969 if (cachep->flags & SLAB_POISON) { in cache_alloc_debugcheck_after()
2970 check_poison_obj(cachep, objp); in cache_alloc_debugcheck_after()
2971 slab_kernel_map(cachep, objp, 1); in cache_alloc_debugcheck_after()
2972 poison_obj(cachep, objp, POISON_INUSE); in cache_alloc_debugcheck_after()
2974 if (cachep->flags & SLAB_STORE_USER) in cache_alloc_debugcheck_after()
2975 *dbg_userword(cachep, objp) = (void *)caller; in cache_alloc_debugcheck_after()
2977 if (cachep->flags & SLAB_RED_ZONE) { in cache_alloc_debugcheck_after()
2978 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || in cache_alloc_debugcheck_after()
2979 *dbg_redzone2(cachep, objp) != RED_INACTIVE) { in cache_alloc_debugcheck_after()
2980 slab_error(cachep, "double free, or memory outside object was overwritten"); in cache_alloc_debugcheck_after()
2982 objp, *dbg_redzone1(cachep, objp), in cache_alloc_debugcheck_after()
2983 *dbg_redzone2(cachep, objp)); in cache_alloc_debugcheck_after()
2985 *dbg_redzone1(cachep, objp) = RED_ACTIVE; in cache_alloc_debugcheck_after()
2986 *dbg_redzone2(cachep, objp) = RED_ACTIVE; in cache_alloc_debugcheck_after()
2989 objp += obj_offset(cachep); in cache_alloc_debugcheck_after()
2990 if (cachep->ctor && cachep->flags & SLAB_POISON) in cache_alloc_debugcheck_after()
2991 cachep->ctor(objp); in cache_alloc_debugcheck_after()
3002 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) in ____cache_alloc() argument
3009 ac = cpu_cache_get(cachep); in ____cache_alloc()
3014 STATS_INC_ALLOCHIT(cachep); in ____cache_alloc()
3018 STATS_INC_ALLOCMISS(cachep); in ____cache_alloc()
3019 objp = cache_alloc_refill(cachep, flags); in ____cache_alloc()
3024 ac = cpu_cache_get(cachep); in ____cache_alloc()
3046 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) in alternate_node_alloc() argument
3053 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) in alternate_node_alloc()
3058 return ____cache_alloc_node(cachep, flags, nid_alloc); in alternate_node_alloc()
3137 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, in ____cache_alloc_node() argument
3146 n = get_node(cachep, nodeid); in ____cache_alloc_node()
3155 check_spinlock_acquired_node(cachep, nodeid); in ____cache_alloc_node()
3157 STATS_INC_NODEALLOCS(cachep); in ____cache_alloc_node()
3158 STATS_INC_ACTIVE(cachep); in ____cache_alloc_node()
3159 STATS_SET_HIGH(cachep); in ____cache_alloc_node()
3161 BUG_ON(slab->active == cachep->num); in ____cache_alloc_node()
3163 obj = slab_get_obj(cachep, slab); in ____cache_alloc_node()
3166 fixup_slab_list(cachep, n, slab, &list); in ____cache_alloc_node()
3169 fixup_objfreelist_debug(cachep, &list); in ____cache_alloc_node()
3174 slab = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid); in ____cache_alloc_node()
3177 obj = slab_get_obj(cachep, slab); in ____cache_alloc_node()
3179 cache_grow_end(cachep, slab); in ____cache_alloc_node()
3181 return obj ? obj : fallback_alloc(cachep, flags); in ____cache_alloc_node()
3185 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid) in __do_cache_alloc() argument
3192 objp = alternate_node_alloc(cachep, flags); in __do_cache_alloc()
3202 objp = ____cache_alloc(cachep, flags); in __do_cache_alloc()
3205 objp = ____cache_alloc(cachep, flags); in __do_cache_alloc()
3206 } else if (!get_node(cachep, nodeid)) { in __do_cache_alloc()
3208 objp = fallback_alloc(cachep, flags); in __do_cache_alloc()
3217 objp = ____cache_alloc_node(cachep, flags, nodeid); in __do_cache_alloc()
3224 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags, int nodeid __maybe_unused) in __do_cache_alloc() argument
3226 return ____cache_alloc(cachep, flags); in __do_cache_alloc()
3232 slab_alloc_node(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags, in slab_alloc_node() argument
3241 cachep = slab_pre_alloc_hook(cachep, lru, &objcg, 1, flags); in slab_alloc_node()
3242 if (unlikely(!cachep)) in slab_alloc_node()
3245 objp = kfence_alloc(cachep, orig_size, flags); in slab_alloc_node()
3250 objp = __do_cache_alloc(cachep, flags, nodeid); in slab_alloc_node()
3252 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); in slab_alloc_node()
3254 init = slab_want_init_on_alloc(flags, cachep); in slab_alloc_node()
3257 slab_post_alloc_hook(cachep, objcg, flags, 1, &objp, init); in slab_alloc_node()
3262 slab_alloc(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags, in slab_alloc() argument
3265 return slab_alloc_node(cachep, lru, flags, NUMA_NO_NODE, orig_size, in slab_alloc()
3273 static void free_block(struct kmem_cache *cachep, void **objpp, in free_block() argument
3277 struct kmem_cache_node *n = get_node(cachep, node); in free_block()
3290 check_spinlock_acquired_node(cachep, node); in free_block()
3291 slab_put_obj(cachep, slab, objp); in free_block()
3292 STATS_DEC_ACTIVE(cachep); in free_block()
3308 n->free_objects -= cachep->num; in free_block()
3317 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) in cache_flusharray() argument
3327 n = get_node(cachep, node); in cache_flusharray()
3342 free_block(cachep, ac->entry, batchcount, node, &list); in cache_flusharray()
3354 STATS_SET_FREEABLE(cachep, i); in cache_flusharray()
3360 slabs_destroy(cachep, &list); in cache_flusharray()
3367 static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp, in __cache_free() argument
3372 memcg_slab_free_hook(cachep, virt_to_slab(objp), &objp, 1); in __cache_free()
3375 kmemleak_free_recursive(objp, cachep->flags); in __cache_free()
3385 init = slab_want_init_on_free(cachep); in __cache_free()
3387 memset(objp, 0, cachep->object_size); in __cache_free()
3389 if (kasan_slab_free(cachep, objp, init)) in __cache_free()
3393 if (!(cachep->flags & SLAB_TYPESAFE_BY_RCU)) in __cache_free()
3394 __kcsan_check_access(objp, cachep->object_size, in __cache_free()
3397 ___cache_free(cachep, objp, caller); in __cache_free()
3400 void ___cache_free(struct kmem_cache *cachep, void *objp, in ___cache_free() argument
3403 struct array_cache *ac = cpu_cache_get(cachep); in ___cache_free()
3406 kmemleak_free_recursive(objp, cachep->flags); in ___cache_free()
3407 objp = cache_free_debugcheck(cachep, objp, caller); in ___cache_free()
3416 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) in ___cache_free()
3420 STATS_INC_FREEHIT(cachep); in ___cache_free()
3422 STATS_INC_FREEMISS(cachep); in ___cache_free()
3423 cache_flusharray(cachep, ac); in ___cache_free()
3430 cache_free_pfmemalloc(cachep, slab, objp); in ___cache_free()
3439 void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, in __kmem_cache_alloc_lru() argument
3442 void *ret = slab_alloc(cachep, lru, flags, cachep->object_size, _RET_IP_); in __kmem_cache_alloc_lru()
3444 trace_kmem_cache_alloc(_RET_IP_, ret, cachep, flags, NUMA_NO_NODE); in __kmem_cache_alloc_lru()
3459 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) in kmem_cache_alloc() argument
3461 return __kmem_cache_alloc_lru(cachep, NULL, flags); in kmem_cache_alloc()
3465 void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru, in kmem_cache_alloc_lru() argument
3468 return __kmem_cache_alloc_lru(cachep, lru, flags); in kmem_cache_alloc_lru()
3535 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) in kmem_cache_alloc_node() argument
3537 void *ret = slab_alloc_node(cachep, NULL, flags, nodeid, cachep->object_size, _RET_IP_); in kmem_cache_alloc_node()
3539 trace_kmem_cache_alloc(_RET_IP_, ret, cachep, flags, nodeid); in kmem_cache_alloc_node()
3545 void *__kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, in __kmem_cache_alloc_node() argument
3549 return slab_alloc_node(cachep, NULL, flags, nodeid, in __kmem_cache_alloc_node()
3556 struct kmem_cache *cachep; in __kmem_obj_info() local
3562 cachep = slab->slab_cache; in __kmem_obj_info()
3563 kpp->kp_slab_cache = cachep; in __kmem_obj_info()
3564 objp = object - obj_offset(cachep); in __kmem_obj_info()
3565 kpp->kp_data_offset = obj_offset(cachep); in __kmem_obj_info()
3567 objnr = obj_to_index(cachep, slab, objp); in __kmem_obj_info()
3568 objp = index_to_obj(cachep, slab, objnr); in __kmem_obj_info()
3570 if (DEBUG && cachep->flags & SLAB_STORE_USER) in __kmem_obj_info()
3571 kpp->kp_ret = *dbg_userword(cachep, objp); in __kmem_obj_info()
3576 void __do_kmem_cache_free(struct kmem_cache *cachep, void *objp, in __do_kmem_cache_free() argument
3582 debug_check_no_locks_freed(objp, cachep->object_size); in __do_kmem_cache_free()
3583 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) in __do_kmem_cache_free()
3584 debug_check_no_obj_freed(objp, cachep->object_size); in __do_kmem_cache_free()
3585 __cache_free(cachep, objp, caller); in __do_kmem_cache_free()
3589 void __kmem_cache_free(struct kmem_cache *cachep, void *objp, in __kmem_cache_free() argument
3592 __do_kmem_cache_free(cachep, objp, caller); in __kmem_cache_free()
3603 void kmem_cache_free(struct kmem_cache *cachep, void *objp) in kmem_cache_free() argument
3605 cachep = cache_from_obj(cachep, objp); in kmem_cache_free()
3606 if (!cachep) in kmem_cache_free()
3609 trace_kmem_cache_free(_RET_IP_, objp, cachep); in kmem_cache_free()
3610 __do_kmem_cache_free(cachep, objp, _RET_IP_); in kmem_cache_free()
3655 static int setup_kmem_cache_nodes(struct kmem_cache *cachep, gfp_t gfp) in setup_kmem_cache_nodes() argument
3662 ret = setup_kmem_cache_node(cachep, node, gfp, true); in setup_kmem_cache_nodes()
3671 if (!cachep->list.next) { in setup_kmem_cache_nodes()
3675 n = get_node(cachep, node); in setup_kmem_cache_nodes()
3680 cachep->node[node] = NULL; in setup_kmem_cache_nodes()
3689 static int do_tune_cpucache(struct kmem_cache *cachep, int limit, in do_tune_cpucache() argument
3695 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount); in do_tune_cpucache()
3699 prev = cachep->cpu_cache; in do_tune_cpucache()
3700 cachep->cpu_cache = cpu_cache; in do_tune_cpucache()
3709 cachep->batchcount = batchcount; in do_tune_cpucache()
3710 cachep->limit = limit; in do_tune_cpucache()
3711 cachep->shared = shared; in do_tune_cpucache()
3723 n = get_node(cachep, node); in do_tune_cpucache()
3725 free_block(cachep, ac->entry, ac->avail, node, &list); in do_tune_cpucache()
3727 slabs_destroy(cachep, &list); in do_tune_cpucache()
3732 return setup_kmem_cache_nodes(cachep, gfp); in do_tune_cpucache()
3736 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp) in enable_cpucache() argument
3743 err = cache_random_seq_create(cachep, cachep->num, gfp); in enable_cpucache()
3756 if (cachep->size > 131072) in enable_cpucache()
3758 else if (cachep->size > PAGE_SIZE) in enable_cpucache()
3760 else if (cachep->size > 1024) in enable_cpucache()
3762 else if (cachep->size > 256) in enable_cpucache()
3777 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1) in enable_cpucache()
3789 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp); in enable_cpucache()
3793 cachep->name, -err); in enable_cpucache()
3802 static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *n, in drain_array() argument
3819 drain_array_locked(cachep, ac, node, false, &list); in drain_array()
3822 slabs_destroy(cachep, &list); in drain_array()
3894 void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo) in get_slabinfo() argument
3902 for_each_kmem_cache_node(cachep, node, n) { in get_slabinfo()
3915 num_objs = total_slabs * cachep->num; in get_slabinfo()
3924 sinfo->limit = cachep->limit; in get_slabinfo()
3925 sinfo->batchcount = cachep->batchcount; in get_slabinfo()
3926 sinfo->shared = cachep->shared; in get_slabinfo()
3927 sinfo->objects_per_slab = cachep->num; in get_slabinfo()
3928 sinfo->cache_order = cachep->gfporder; in get_slabinfo()
3931 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep) in slabinfo_show_stats() argument
3935 unsigned long high = cachep->high_mark; in slabinfo_show_stats()
3936 unsigned long allocs = cachep->num_allocations; in slabinfo_show_stats()
3937 unsigned long grown = cachep->grown; in slabinfo_show_stats()
3938 unsigned long reaped = cachep->reaped; in slabinfo_show_stats()
3939 unsigned long errors = cachep->errors; in slabinfo_show_stats()
3940 unsigned long max_freeable = cachep->max_freeable; in slabinfo_show_stats()
3941 unsigned long node_allocs = cachep->node_allocs; in slabinfo_show_stats()
3942 unsigned long node_frees = cachep->node_frees; in slabinfo_show_stats()
3943 unsigned long overflows = cachep->node_overflow; in slabinfo_show_stats()
3952 unsigned long allochit = atomic_read(&cachep->allochit); in slabinfo_show_stats()
3953 unsigned long allocmiss = atomic_read(&cachep->allocmiss); in slabinfo_show_stats()
3954 unsigned long freehit = atomic_read(&cachep->freehit); in slabinfo_show_stats()
3955 unsigned long freemiss = atomic_read(&cachep->freemiss); in slabinfo_show_stats()
3978 struct kmem_cache *cachep; in slabinfo_write() local
3997 list_for_each_entry(cachep, &slab_caches, list) { in slabinfo_write()
3998 if (!strcmp(cachep->name, kbuf)) { in slabinfo_write()
4003 res = do_tune_cpucache(cachep, limit, in slabinfo_write()
4028 struct kmem_cache *cachep; in __check_heap_object() local
4035 cachep = slab->slab_cache; in __check_heap_object()
4036 objnr = obj_to_index(cachep, slab, (void *)ptr); in __check_heap_object()
4037 BUG_ON(objnr >= cachep->num); in __check_heap_object()
4043 offset = ptr - index_to_obj(cachep, slab, objnr) - obj_offset(cachep); in __check_heap_object()
4046 if (offset >= cachep->useroffset && in __check_heap_object()
4047 offset - cachep->useroffset <= cachep->usersize && in __check_heap_object()
4048 n <= cachep->useroffset - offset + cachep->usersize) in __check_heap_object()
4051 usercopy_abort("SLAB object", cachep->name, to_user, offset, n); in __check_heap_object()