Home
last modified time | relevance | path

Searched refs:slab (Results 1 – 25 of 92) sorted by relevance

1234

/linux-6.6.21/mm/
Dslab.h42 struct slab { struct
67 struct slab *next; argument
105 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) argument
113 static_assert(sizeof(struct slab) <= sizeof(struct page));
115 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
128 const struct folio *: (const struct slab *)(folio), \
129 struct folio *: (struct slab *)(folio)))
143 const struct slab *: (const struct folio *)s, \
144 struct slab *: (struct folio *)s))
159 const struct page *: (const struct slab *)(p), \
[all …]
Dslub.c207 struct slab **slab; member
512 static __always_inline void slab_lock(struct slab *slab) in slab_lock() argument
514 struct page *page = slab_page(slab); in slab_lock()
520 static __always_inline void slab_unlock(struct slab *slab) in slab_unlock() argument
522 struct page *page = slab_page(slab); in slab_unlock()
529 __update_freelist_fast(struct slab *slab, in __update_freelist_fast() argument
537 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full); in __update_freelist_fast()
544 __update_freelist_slow(struct slab *slab, in __update_freelist_slow() argument
550 slab_lock(slab); in __update_freelist_slow()
551 if (slab->freelist == freelist_old && in __update_freelist_slow()
[all …]
Dslab.c221 struct kmem_cache_node *n, struct slab *slab,
241 #define MAKE_LIST(cachep, listp, slab, nodeid) \ argument
244 list_splice(&get_node(cachep, nodeid)->slab, listp); \
375 const struct slab *slab, unsigned int idx) in index_to_obj() argument
377 return slab->s_mem + cache->size * idx; in index_to_obj()
552 struct slab *slab, void *objp) in cache_free_pfmemalloc() argument
558 slab_node = slab_nid(slab); in cache_free_pfmemalloc()
1348 static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, in kmem_getpages()
1352 struct slab *slab; in kmem_getpages() local
1362 slab = folio_slab(folio); in kmem_getpages()
[all …]
Dslab_common.c551 static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in kmem_obj_info() argument
553 if (__kfence_obj_info(kpp, object, slab)) in kmem_obj_info()
555 __kmem_obj_info(kpp, object, slab); in kmem_obj_info()
577 struct slab *slab; in kmem_dump_obj() local
583 slab = virt_to_slab(object); in kmem_dump_obj()
584 if (WARN_ON_ONCE(!slab)) { in kmem_dump_obj()
588 kmem_obj_info(&kp, object, slab); in kmem_dump_obj()
1059 struct slab *slab; in kfree() local
1073 slab = folio_slab(folio); in kfree()
1074 s = slab->slab_cache; in kfree()
/linux-6.6.21/include/linux/
Dslub_def.h58 struct slab *slab; /* The slab from which we are allocating */ member
60 struct slab *partial; /* Partially allocated frozen slabs */
172 static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab, in nearest_obj() argument
174 void *object = x - (x - slab_address(slab)) % cache->size; in nearest_obj()
175 void *last_object = slab_address(slab) + in nearest_obj()
176 (slab->objects - 1) * cache->size; in nearest_obj()
192 const struct slab *slab, void *obj) in obj_to_index() argument
196 return __obj_to_index(cache, slab_address(slab), obj); in obj_to_index()
200 const struct slab *slab) in objs_per_slab() argument
202 return slab->objects; in objs_per_slab()
Dslab_def.h91 static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab, in nearest_obj() argument
94 void *object = x - (x - slab->s_mem) % cache->size; in nearest_obj()
95 void *last_object = slab->s_mem + (cache->num - 1) * cache->size; in nearest_obj()
110 const struct slab *slab, void *obj) in obj_to_index() argument
112 u32 offset = (obj - slab->s_mem); in obj_to_index()
117 const struct slab *slab) in objs_per_slab() argument
119 if (is_kfence_address(slab_address(slab))) in objs_per_slab()
Dkfence.h221 bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
243 static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in __kfence_obj_info() argument
Dkasan.h13 struct slab;
125 void __kasan_poison_slab(struct slab *slab);
126 static __always_inline void kasan_poison_slab(struct slab *slab) in kasan_poison_slab() argument
129 __kasan_poison_slab(slab); in kasan_poison_slab()
244 static inline void kasan_poison_slab(struct slab *slab) {} in kasan_poison_slab() argument
/linux-6.6.21/tools/mm/
Dslabinfo.c54 struct slabinfo *slab; member
357 if (a->slab == find && in find_one_alias()
1143 a->slab = s; in link_slabs()
1162 if (!show_single_ref && a->slab->refs == 1) in alias()
1167 if (strcmp(a->slab->name, active) == 0) { in alias()
1172 printf("\n%-12s <- %s", a->slab->name, a->name); in alias()
1173 active = a->slab->name; in alias()
1176 printf("%-15s -> %s\n", a->name, a->slab->name); in alias()
1206 static int slab_mismatch(char *slab) in slab_mismatch() argument
1208 return regexec(&pattern, slab, 0, NULL, 0); in slab_mismatch()
[all …]
/linux-6.6.21/Documentation/ABI/testing/
Dsysfs-kernel-slab1 What: /sys/kernel/slab
7 The /sys/kernel/slab directory contains a snapshot of the
13 What: /sys/kernel/slab/<cache>/aliases
22 What: /sys/kernel/slab/<cache>/align
31 What: /sys/kernel/slab/<cache>/alloc_calls
42 What: /sys/kernel/slab/<cache>/alloc_fastpath
53 What: /sys/kernel/slab/<cache>/alloc_from_partial
59 The alloc_from_partial file shows how many times a cpu slab has
60 been full and it has been refilled by using a slab from the list
65 What: /sys/kernel/slab/<cache>/alloc_refill
[all …]
/linux-6.6.21/scripts/gdb/linux/
Dslab.py38 def slab_folio(slab): argument
39 return slab.cast(gdb.lookup_type("struct folio").pointer())
41 def slab_address(slab): argument
43 folio = slab_folio(slab)
155 def __fill_map(obj_map, cache, slab): argument
156 p = slab['freelist']
157 addr = slab_address(slab)
165 for slab in lists.list_for_each_entry(slab_list, slab_ptr_type, "slab_list"):
167 __fill_map(obj_map, cache, slab)
168 addr = slab_address(slab)
[all …]
/linux-6.6.21/mm/kasan/
Dcommon.c33 struct slab *kasan_addr_to_slab(const void *addr) in kasan_addr_to_slab()
125 void __kasan_poison_slab(struct slab *slab) in __kasan_poison_slab() argument
127 struct page *page = slab_page(slab); in __kasan_poison_slab()
292 struct slab *slab = folio_slab(folio); in __kasan_slab_free_mempool() local
294 ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); in __kasan_slab_free_mempool()
424 struct slab *slab; in __kasan_krealloc() local
436 slab = virt_to_slab(object); in __kasan_krealloc()
439 if (unlikely(!slab)) in __kasan_krealloc()
442 return ____kasan_kmalloc(slab->slab_cache, object, size, flags); in __kasan_krealloc()
Dreport.c485 struct slab *slab; in complete_report_info() local
493 slab = kasan_addr_to_slab(addr); in complete_report_info()
494 if (slab) { in complete_report_info()
495 info->cache = slab->slab_cache; in complete_report_info()
496 info->object = nearest_obj(info->cache, slab, addr); in complete_report_info()
Dgeneric.c477 struct slab *slab = kasan_addr_to_slab(addr); in __kasan_record_aux_stack() local
482 if (is_kfence_address(addr) || !slab) in __kasan_record_aux_stack()
485 cache = slab->slab_cache; in __kasan_record_aux_stack()
486 object = nearest_obj(cache, slab, addr); in __kasan_record_aux_stack()
/linux-6.6.21/tools/cgroup/
Dmemcg_slabinfo.py73 for slab in list_for_each_entry('struct slab', n.partial.address_of_(),
75 nr_objs += fn(slab)
79 def count_free(slab): argument
80 return slab.objects - slab.inuse
195 for slab in for_each_slab(prog):
196 objcg_vec_raw = slab.memcg_data.value_()
199 cache = slab.slab_cache
/linux-6.6.21/lib/
Dsg_pool.c13 struct kmem_cache *slab; member
150 sgp->slab = kmem_cache_create(sgp->name, size, 0, in sg_pool_init()
152 if (!sgp->slab) { in sg_pool_init()
159 sgp->slab); in sg_pool_init()
174 kmem_cache_destroy(sgp->slab); in sg_pool_init()
/linux-6.6.21/Documentation/mm/
Dslub.rst7 slab caches. SLUB always includes full debugging but it is off by default.
38 slub_debug=<Debug-Options>,<slab name1>,<slab name2>,...
45 of the first "select slabs" blocks that matches the slab's name are applied.
57 caused higher minimum slab orders
70 end of the slab name, in order to cover all slabs with the same prefix. For
76 Red zoning and tracking may realign the slab. We can just apply sanity checks
81 Debugging options may require the minimum possible slab order to increase as
83 sizes). This has a higher liklihood of resulting in slab allocation errors
89 You can apply different options to different list of slab names, using blocks
97 debugged by specifying global debug options followed by a list of slab names
[all …]
/linux-6.6.21/Documentation/translations/zh_CN/dev-tools/
Dkasan.rst64 通用KASAN支持在所有的slab、page_alloc、vmap、vmalloc、堆栈和全局内存
67 基于软件标签的KASAN支持slab、page_alloc、vmalloc和堆栈内存。
69 基于硬件标签的KASAN支持slab、page_alloc和不可执行的vmalloc内存。
71 对于slab,两种软件KASAN模式都支持SLUB和SLAB分配器,而基于硬件标签的
89 要将受影响的slab对象的alloc和free堆栈跟踪包含到报告中,请启用
140 BUG: KASAN: slab-out-of-bounds in kmalloc_oob_right+0xa8/0xbc [test_kasan]
196 flags: 0x200000000000100(slab)
211 堆栈跟踪、所访问内存分配位置的堆栈跟踪(对于访问了slab对象的情况)以及对象
213 slab对象的描述以及关于访问的内存页的信息。
231 请注意,KASAN错误标题(如 ``slab-out-of-bounds`` 或 ``use-after-free`` )
/linux-6.6.21/net/dccp/
Dccid.c81 struct kmem_cache *slab; in ccid_kmem_cache_create() local
88 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, in ccid_kmem_cache_create()
90 return slab; in ccid_kmem_cache_create()
93 static void ccid_kmem_cache_destroy(struct kmem_cache *slab) in ccid_kmem_cache_destroy() argument
95 kmem_cache_destroy(slab); in ccid_kmem_cache_destroy()
/linux-6.6.21/mm/kfence/
Dcore.c396 struct slab *slab; in kfence_guarded_alloc() local
464 slab = virt_to_slab((void *)meta->addr); in kfence_guarded_alloc()
465 slab->slab_cache = cache; in kfence_guarded_alloc()
467 slab->objects = 1; in kfence_guarded_alloc()
469 slab->s_mem = addr; in kfence_guarded_alloc()
596 struct slab *slab = page_slab(nth_page(pages, i)); in kfence_init_pool() local
601 __folio_set_slab(slab_folio(slab)); in kfence_init_pool()
603 slab->memcg_data = (unsigned long)&kfence_metadata_init[i / 2 - 1].objcg | in kfence_init_pool()
648 struct slab *slab = page_slab(nth_page(pages, i)); in kfence_init_pool() local
653 slab->memcg_data = 0; in kfence_init_pool()
[all …]
/linux-6.6.21/tools/perf/Documentation/
Dperf-kmem.txt47 Sort the output (default: 'frag,hit,bytes' for slab and 'bytes,hit'
49 pingpong, frag' for slab and 'page, callsite, bytes, hit, order,
51 mode selection options - i.e. --slab, --page, --alloc and/or --caller.
60 --slab::
/linux-6.6.21/include/net/
Drequest_sock.h30 struct kmem_cache *slab; member
92 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN); in reqsk_alloc()
98 kmem_cache_free(ops->slab, req); in reqsk_alloc()
123 kmem_cache_free(req->rsk_ops->slab, req); in __reqsk_free()
/linux-6.6.21/tools/testing/scatterlist/
DMakefile17 … $(OFILES) scatterlist.c linux/scatterlist.h linux/highmem.h linux/kmemleak.h linux/slab.h asm/io.h
31 @touch linux/slab.h
/linux-6.6.21/Documentation/translations/zh_CN/mm/
Dsplit_page_table_lock.rst62 确保架构不使用slab分配器来分配页表:slab使用page->slab_cache来分配其页
/linux-6.6.21/Documentation/translations/zh_CN/core-api/
Dmm-api.rst49 include/linux/slab.h
51 mm/slab.c

1234