Home
last modified time | relevance | path

Searched refs:slab (Results 1 – 25 of 90) sorted by relevance

1234

/linux-6.1.9/mm/
Dslab.h9 struct slab { struct
30 struct slab *next; argument
67 static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) argument
78 static_assert(sizeof(struct slab) <= sizeof(struct page));
90 const struct folio *: (const struct slab *)(folio), \
91 struct folio *: (struct slab *)(folio)))
105 const struct slab *: (const struct folio *)s, \
106 struct slab *: (struct folio *)s))
121 const struct page *: (const struct slab *)(p), \
122 struct page *: (struct slab *)(p)))
[all …]
Dslub.c200 struct slab **slab; member
483 static __always_inline void slab_lock(struct slab *slab) in slab_lock() argument
485 struct page *page = slab_page(slab); in slab_lock()
491 static __always_inline void slab_unlock(struct slab *slab) in slab_unlock() argument
493 struct page *page = slab_page(slab); in slab_unlock()
506 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab, in __cmpxchg_double_slab() argument
516 if (cmpxchg_double(&slab->freelist, &slab->counters, in __cmpxchg_double_slab()
523 slab_lock(slab); in __cmpxchg_double_slab()
524 if (slab->freelist == freelist_old && in __cmpxchg_double_slab()
525 slab->counters == counters_old) { in __cmpxchg_double_slab()
[all …]
Dslab.c221 struct kmem_cache_node *n, struct slab *slab,
242 #define MAKE_LIST(cachep, listp, slab, nodeid) \ argument
245 list_splice(&get_node(cachep, nodeid)->slab, listp); \
376 const struct slab *slab, unsigned int idx) in index_to_obj() argument
378 return slab->s_mem + cache->size * idx; in index_to_obj()
553 struct slab *slab, void *objp) in cache_free_pfmemalloc() argument
559 slab_node = slab_nid(slab); in cache_free_pfmemalloc()
1355 static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, in kmem_getpages()
1359 struct slab *slab; in kmem_getpages() local
1369 slab = folio_slab(folio); in kmem_getpages()
[all …]
Dslob.c108 static inline int slob_page_free(struct slab *slab) in slob_page_free() argument
110 return PageSlobFree(slab_page(slab)); in slob_page_free()
113 static void set_slob_page_free(struct slab *slab, struct list_head *list) in set_slob_page_free() argument
115 list_add(&slab->slab_list, list); in set_slob_page_free()
116 __SetPageSlobFree(slab_page(slab)); in set_slob_page_free()
119 static inline void clear_slob_page_free(struct slab *slab) in clear_slob_page_free() argument
121 list_del(&slab->slab_list); in clear_slob_page_free()
122 __ClearPageSlobFree(slab_page(slab)); in clear_slob_page_free()
237 static void *slob_page_alloc(struct slab *sp, size_t size, int align, in slob_page_alloc()
305 struct slab *sp; in slob_alloc()
[all …]
Dslab_common.c546 static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in kmem_obj_info() argument
548 if (__kfence_obj_info(kpp, object, slab)) in kmem_obj_info()
550 __kmem_obj_info(kpp, object, slab); in kmem_obj_info()
572 struct slab *slab; in kmem_dump_obj() local
578 slab = virt_to_slab(object); in kmem_dump_obj()
579 if (WARN_ON_ONCE(!slab)) { in kmem_dump_obj()
583 kmem_obj_info(&kp, object, slab); in kmem_dump_obj()
991 struct slab *slab; in kfree() local
1005 slab = folio_slab(folio); in kfree()
1006 s = slab->slab_cache; in kfree()
/linux-6.1.9/include/linux/
Dslub_def.h51 struct slab *slab; /* The slab from which we are allocating */ member
53 struct slab *partial; /* Partially allocated frozen slabs */
160 static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab, in nearest_obj() argument
162 void *object = x - (x - slab_address(slab)) % cache->size; in nearest_obj()
163 void *last_object = slab_address(slab) + in nearest_obj()
164 (slab->objects - 1) * cache->size; in nearest_obj()
180 const struct slab *slab, void *obj) in obj_to_index() argument
184 return __obj_to_index(cache, slab_address(slab), obj); in obj_to_index()
188 const struct slab *slab) in objs_per_slab() argument
190 return slab->objects; in objs_per_slab()
Dslab_def.h89 static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab, in nearest_obj() argument
92 void *object = x - (x - slab->s_mem) % cache->size; in nearest_obj()
93 void *last_object = slab->s_mem + (cache->num - 1) * cache->size; in nearest_obj()
108 const struct slab *slab, void *obj) in obj_to_index() argument
110 u32 offset = (obj - slab->s_mem); in obj_to_index()
115 const struct slab *slab) in objs_per_slab() argument
117 if (is_kfence_address(slab_address(slab))) in objs_per_slab()
Dkfence.h220 bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
242 static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab) in __kfence_obj_info() argument
Dkasan.h13 struct slab;
138 void __kasan_poison_slab(struct slab *slab);
139 static __always_inline void kasan_poison_slab(struct slab *slab) in kasan_poison_slab() argument
142 __kasan_poison_slab(slab); in kasan_poison_slab()
255 static inline void kasan_poison_slab(struct slab *slab) {} in kasan_poison_slab() argument
/linux-6.1.9/tools/vm/
Dslabinfo.c54 struct slabinfo *slab; member
355 if (a->slab == find && in find_one_alias()
1141 a->slab = s; in link_slabs()
1160 if (!show_single_ref && a->slab->refs == 1) in alias()
1165 if (strcmp(a->slab->name, active) == 0) { in alias()
1170 printf("\n%-12s <- %s", a->slab->name, a->name); in alias()
1171 active = a->slab->name; in alias()
1174 printf("%-15s -> %s\n", a->name, a->slab->name); in alias()
1204 static int slab_mismatch(char *slab) in slab_mismatch() argument
1206 return regexec(&pattern, slab, 0, NULL, 0); in slab_mismatch()
[all …]
/linux-6.1.9/Documentation/ABI/testing/
Dsysfs-kernel-slab1 What: /sys/kernel/slab
7 The /sys/kernel/slab directory contains a snapshot of the
13 What: /sys/kernel/slab/<cache>/aliases
22 What: /sys/kernel/slab/<cache>/align
31 What: /sys/kernel/slab/<cache>/alloc_calls
42 What: /sys/kernel/slab/<cache>/alloc_fastpath
53 What: /sys/kernel/slab/<cache>/alloc_from_partial
59 The alloc_from_partial file shows how many times a cpu slab has
60 been full and it has been refilled by using a slab from the list
65 What: /sys/kernel/slab/<cache>/alloc_refill
[all …]
/linux-6.1.9/mm/kasan/
Dcommon.c33 struct slab *kasan_addr_to_slab(const void *addr) in kasan_addr_to_slab()
125 void __kasan_poison_slab(struct slab *slab) in __kasan_poison_slab() argument
127 struct page *page = slab_page(slab); in __kasan_poison_slab()
289 struct slab *slab = folio_slab(folio); in __kasan_slab_free_mempool() local
291 ____kasan_slab_free(slab->slab_cache, ptr, ip, false, false); in __kasan_slab_free_mempool()
421 struct slab *slab; in __kasan_krealloc() local
433 slab = virt_to_slab(object); in __kasan_krealloc()
436 if (unlikely(!slab)) in __kasan_krealloc()
439 return ____kasan_kmalloc(slab->slab_cache, object, size, flags); in __kasan_krealloc()
Dreport.c405 struct slab *slab; in complete_report_info() local
413 slab = kasan_addr_to_slab(addr); in complete_report_info()
414 if (slab) { in complete_report_info()
415 info->cache = slab->slab_cache; in complete_report_info()
416 info->object = nearest_obj(info->cache, slab, addr); in complete_report_info()
Dgeneric.c466 struct slab *slab = kasan_addr_to_slab(addr); in __kasan_record_aux_stack() local
471 if (is_kfence_address(addr) || !slab) in __kasan_record_aux_stack()
474 cache = slab->slab_cache; in __kasan_record_aux_stack()
475 object = nearest_obj(cache, slab, addr); in __kasan_record_aux_stack()
/linux-6.1.9/tools/cgroup/
Dmemcg_slabinfo.py73 for slab in list_for_each_entry('struct slab', n.partial.address_of_(),
75 nr_objs += fn(slab)
79 def count_free(slab): argument
80 return slab.objects - slab.inuse
195 for slab in for_each_slab(prog):
196 objcg_vec_raw = slab.memcg_data.value_()
199 cache = slab.slab_cache
/linux-6.1.9/lib/
Dsg_pool.c13 struct kmem_cache *slab; member
150 sgp->slab = kmem_cache_create(sgp->name, size, 0, in sg_pool_init()
152 if (!sgp->slab) { in sg_pool_init()
159 sgp->slab); in sg_pool_init()
174 kmem_cache_destroy(sgp->slab); in sg_pool_init()
/linux-6.1.9/Documentation/mm/
Dslub.rst9 slab caches. SLUB always includes full debugging but it is off by default.
40 slub_debug=<Debug-Options>,<slab name1>,<slab name2>,...
47 of the first "select slabs" blocks that matches the slab's name are applied.
59 caused higher minimum slab orders
72 end of the slab name, in order to cover all slabs with the same prefix. For
78 Red zoning and tracking may realign the slab. We can just apply sanity checks
83 Debugging options may require the minimum possible slab order to increase as
85 sizes). This has a higher liklihood of resulting in slab allocation errors
91 You can apply different options to different list of slab names, using blocks
99 debugged by specifying global debug options followed by a list of slab names
[all …]
/linux-6.1.9/Documentation/translations/zh_CN/dev-tools/
Dkasan.rst64 通用KASAN支持在所有的slab、page_alloc、vmap、vmalloc、堆栈和全局内存
67 基于软件标签的KASAN支持slab、page_alloc、vmalloc和堆栈内存。
69 基于硬件标签的KASAN支持slab、page_alloc和不可执行的vmalloc内存。
71 对于slab,两种软件KASAN模式都支持SLUB和SLAB分配器,而基于硬件标签的
89 要将受影响的slab对象的alloc和free堆栈跟踪包含到报告中,请启用
99 BUG: KASAN: slab-out-of-bounds in kmalloc_oob_right+0xa8/0xbc [test_kasan]
155 flags: 0x200000000000100(slab)
170 堆栈跟踪、所访问内存分配位置的堆栈跟踪(对于访问了slab对象的情况)以及对象
172 slab对象的描述以及关于访问的内存页的信息。
190 请注意,KASAN错误标题(如 ``slab-out-of-bounds`` 或 ``use-after-free`` )
/linux-6.1.9/mm/kfence/
Dcore.c361 struct slab *slab; in kfence_guarded_alloc() local
429 slab = virt_to_slab((void *)meta->addr); in kfence_guarded_alloc()
430 slab->slab_cache = cache; in kfence_guarded_alloc()
432 slab->objects = 1; in kfence_guarded_alloc()
434 slab->s_mem = addr; in kfence_guarded_alloc()
560 struct slab *slab = page_slab(&pages[i]); in kfence_init_pool() local
569 __folio_set_slab(slab_folio(slab)); in kfence_init_pool()
571 slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg | in kfence_init_pool()
637 struct slab *slab = virt_to_slab(p); in kfence_init_pool_early() local
639 if (!slab) in kfence_init_pool_early()
[all …]
/linux-6.1.9/net/dccp/
Dccid.c81 struct kmem_cache *slab; in ccid_kmem_cache_create() local
88 slab = kmem_cache_create(slab_name_fmt, sizeof(struct ccid) + obj_size, 0, in ccid_kmem_cache_create()
90 return slab; in ccid_kmem_cache_create()
93 static void ccid_kmem_cache_destroy(struct kmem_cache *slab) in ccid_kmem_cache_destroy() argument
95 kmem_cache_destroy(slab); in ccid_kmem_cache_destroy()
/linux-6.1.9/tools/perf/Documentation/
Dperf-kmem.txt47 Sort the output (default: 'frag,hit,bytes' for slab and 'bytes,hit'
49 pingpong, frag' for slab and 'page, callsite, bytes, hit, order,
51 mode selection options - i.e. --slab, --page, --alloc and/or --caller.
60 --slab::
/linux-6.1.9/include/net/
Drequest_sock.h30 struct kmem_cache *slab; member
92 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN); in reqsk_alloc()
98 kmem_cache_free(ops->slab, req); in reqsk_alloc()
123 kmem_cache_free(req->rsk_ops->slab, req); in __reqsk_free()
/linux-6.1.9/tools/testing/selftests/cgroup/
Dtest_kmem.c165 long current, slab, anon, file, kernel_stack, pagetables, percpu, sock, sum; in test_kmem_memcg_deletion() local
183 slab = cg_read_key_long(parent, "memory.stat", "slab "); in test_kmem_memcg_deletion()
190 if (current < 0 || slab < 0 || anon < 0 || file < 0 || in test_kmem_memcg_deletion()
194 sum = slab + anon + file + kernel_stack + pagetables + percpu + sock; in test_kmem_memcg_deletion()
200 printf("slab = %ld\n", slab); in test_kmem_memcg_deletion()
/linux-6.1.9/tools/testing/scatterlist/
DMakefile17 … $(OFILES) scatterlist.c linux/scatterlist.h linux/highmem.h linux/kmemleak.h linux/slab.h asm/io.h
31 @touch linux/slab.h
/linux-6.1.9/Documentation/translations/zh_CN/mm/
Dsplit_page_table_lock.rst62 确保架构不使用slab分配器来分配页表:slab使用page->slab_cache来分配其页

1234