Lines Matching refs:s

212 static inline bool kmem_cache_debug(struct kmem_cache *s)  in kmem_cache_debug()  argument
214 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS); in kmem_cache_debug()
217 static inline bool slub_debug_orig_size(struct kmem_cache *s) in slub_debug_orig_size() argument
219 return (kmem_cache_debug_flags(s, SLAB_STORE_USER) && in slub_debug_orig_size()
220 (s->flags & SLAB_KMALLOC)); in slub_debug_orig_size()
223 void *fixup_red_left(struct kmem_cache *s, void *p) in fixup_red_left() argument
225 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE)) in fixup_red_left()
226 p += s->red_left_pad; in fixup_red_left()
231 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) in kmem_cache_has_cpu_partial() argument
234 return !kmem_cache_debug(s); in kmem_cache_has_cpu_partial()
322 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } in sysfs_slab_add() argument
323 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) in sysfs_slab_alias() argument
330 static inline void debugfs_slab_add(struct kmem_cache *s) { } in debugfs_slab_add() argument
333 static inline void stat(const struct kmem_cache *s, enum stat_item si) in stat() argument
340 raw_cpu_inc(s->cpu_slab->stat[si]); in stat()
374 static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s, in freelist_ptr_encode() argument
380 encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr); in freelist_ptr_encode()
387 static inline void *freelist_ptr_decode(const struct kmem_cache *s, in freelist_ptr_decode() argument
393 decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr)); in freelist_ptr_decode()
400 static inline void *get_freepointer(struct kmem_cache *s, void *object) in get_freepointer() argument
406 ptr_addr = (unsigned long)object + s->offset; in get_freepointer()
408 return freelist_ptr_decode(s, p, ptr_addr); in get_freepointer()
412 static void prefetch_freepointer(const struct kmem_cache *s, void *object) in prefetch_freepointer() argument
414 prefetchw(object + s->offset); in prefetch_freepointer()
429 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object) in get_freepointer_safe() argument
435 return get_freepointer(s, object); in get_freepointer_safe()
438 freepointer_addr = (unsigned long)object + s->offset; in get_freepointer_safe()
440 return freelist_ptr_decode(s, p, freepointer_addr); in get_freepointer_safe()
443 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) in set_freepointer() argument
445 unsigned long freeptr_addr = (unsigned long)object + s->offset; in set_freepointer()
452 *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr); in set_freepointer()
487 static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) in slub_set_cpu_partial() argument
491 s->cpu_partial = nr_objects; in slub_set_cpu_partial()
499 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo)); in slub_set_cpu_partial()
500 s->cpu_partial_slabs = nr_slabs; in slub_set_cpu_partial()
504 slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects) in slub_set_cpu_partial() argument
569 static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab, in __slab_update_freelist() argument
579 if (s->flags & __CMPXCHG_DOUBLE) { in __slab_update_freelist()
590 stat(s, CMPXCHG_DOUBLE_FAIL); in __slab_update_freelist()
593 pr_info("%s %s: cmpxchg double redo ", n, s->name); in __slab_update_freelist()
599 static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab, in slab_update_freelist() argument
606 if (s->flags & __CMPXCHG_DOUBLE) { in slab_update_freelist()
621 stat(s, CMPXCHG_DOUBLE_FAIL); in slab_update_freelist()
624 pr_info("%s %s: cmpxchg double redo ", n, s->name); in slab_update_freelist()
634 static void __fill_map(unsigned long *obj_map, struct kmem_cache *s, in __fill_map() argument
642 for (p = slab->freelist; p; p = get_freepointer(s, p)) in __fill_map()
643 set_bit(__obj_to_index(s, addr, p), obj_map); in __fill_map()
666 static inline unsigned int size_from_object(struct kmem_cache *s) in size_from_object() argument
668 if (s->flags & SLAB_RED_ZONE) in size_from_object()
669 return s->size - s->red_left_pad; in size_from_object()
671 return s->size; in size_from_object()
674 static inline void *restore_red_left(struct kmem_cache *s, void *p) in restore_red_left() argument
676 if (s->flags & SLAB_RED_ZONE) in restore_red_left()
677 p -= s->red_left_pad; in restore_red_left()
715 static inline int check_valid_pointer(struct kmem_cache *s, in check_valid_pointer() argument
725 object = restore_red_left(s, object); in check_valid_pointer()
726 if (object < base || object >= base + slab->objects * s->size || in check_valid_pointer()
727 (object - base) % s->size) { in check_valid_pointer()
746 static inline bool freeptr_outside_object(struct kmem_cache *s) in freeptr_outside_object() argument
748 return s->offset >= s->inuse; in freeptr_outside_object()
755 static inline unsigned int get_info_end(struct kmem_cache *s) in get_info_end() argument
757 if (freeptr_outside_object(s)) in get_info_end()
758 return s->inuse + sizeof(void *); in get_info_end()
760 return s->inuse; in get_info_end()
763 static struct track *get_track(struct kmem_cache *s, void *object, in get_track() argument
768 p = object + get_info_end(s); in get_track()
792 static void set_track_update(struct kmem_cache *s, void *object, in set_track_update() argument
796 struct track *p = get_track(s, object, alloc); in set_track_update()
807 static __always_inline void set_track(struct kmem_cache *s, void *object, in set_track() argument
812 set_track_update(s, object, alloc, addr, handle); in set_track()
815 static void init_tracking(struct kmem_cache *s, void *object) in init_tracking() argument
819 if (!(s->flags & SLAB_STORE_USER)) in init_tracking()
822 p = get_track(s, object, TRACK_ALLOC); in init_tracking()
826 static void print_track(const char *s, struct track *t, unsigned long pr_time) in print_track() argument
834 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); in print_track()
844 void print_tracking(struct kmem_cache *s, void *object) in print_tracking() argument
847 if (!(s->flags & SLAB_STORE_USER)) in print_tracking()
850 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time); in print_tracking()
851 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time); in print_tracking()
869 static inline void set_orig_size(struct kmem_cache *s, in set_orig_size() argument
874 if (!slub_debug_orig_size(s)) in set_orig_size()
884 if (kasan_metadata_size(s, true) > orig_size) in set_orig_size()
885 orig_size = s->object_size; in set_orig_size()
888 p += get_info_end(s); in set_orig_size()
894 static inline unsigned int get_orig_size(struct kmem_cache *s, void *object) in get_orig_size() argument
898 if (!slub_debug_orig_size(s)) in get_orig_size()
899 return s->object_size; in get_orig_size()
901 p += get_info_end(s); in get_orig_size()
907 void skip_orig_size_check(struct kmem_cache *s, const void *object) in skip_orig_size_check() argument
909 set_orig_size(s, (void *)object, s->object_size); in skip_orig_size_check()
912 static void slab_bug(struct kmem_cache *s, char *fmt, ...) in slab_bug() argument
921 pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf); in slab_bug()
927 static void slab_fix(struct kmem_cache *s, char *fmt, ...) in slab_fix() argument
938 pr_err("FIX %s: %pV\n", s->name, &vaf); in slab_fix()
942 static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p) in print_trailer() argument
947 print_tracking(s, p); in print_trailer()
952 p, p - addr, get_freepointer(s, p)); in print_trailer()
954 if (s->flags & SLAB_RED_ZONE) in print_trailer()
955 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad, in print_trailer()
956 s->red_left_pad); in print_trailer()
961 min_t(unsigned int, s->object_size, PAGE_SIZE)); in print_trailer()
962 if (s->flags & SLAB_RED_ZONE) in print_trailer()
963 print_section(KERN_ERR, "Redzone ", p + s->object_size, in print_trailer()
964 s->inuse - s->object_size); in print_trailer()
966 off = get_info_end(s); in print_trailer()
968 if (s->flags & SLAB_STORE_USER) in print_trailer()
971 if (slub_debug_orig_size(s)) in print_trailer()
974 off += kasan_metadata_size(s, false); in print_trailer()
976 if (off != size_from_object(s)) in print_trailer()
979 size_from_object(s) - off); in print_trailer()
984 static void object_err(struct kmem_cache *s, struct slab *slab, in object_err() argument
990 slab_bug(s, "%s", reason); in object_err()
991 print_trailer(s, slab, object); in object_err()
995 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, in freelist_corrupted() argument
998 if ((s->flags & SLAB_CONSISTENCY_CHECKS) && in freelist_corrupted()
999 !check_valid_pointer(s, slab, nextfree) && freelist) { in freelist_corrupted()
1000 object_err(s, slab, *freelist, "Freechain corrupt"); in freelist_corrupted()
1002 slab_fix(s, "Isolate corrupted freechain"); in freelist_corrupted()
1009 static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab, in slab_err() argument
1021 slab_bug(s, "%s", buf); in slab_err()
1027 static void init_object(struct kmem_cache *s, void *object, u8 val) in init_object() argument
1030 unsigned int poison_size = s->object_size; in init_object()
1032 if (s->flags & SLAB_RED_ZONE) { in init_object()
1033 memset(p - s->red_left_pad, val, s->red_left_pad); in init_object()
1035 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { in init_object()
1041 poison_size = get_orig_size(s, object); in init_object()
1045 if (s->flags & __OBJECT_POISON) { in init_object()
1050 if (s->flags & SLAB_RED_ZONE) in init_object()
1051 memset(p + poison_size, val, s->inuse - poison_size); in init_object()
1054 static void restore_bytes(struct kmem_cache *s, char *message, u8 data, in restore_bytes() argument
1057 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data); in restore_bytes()
1061 static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab, in check_bytes_and_report() argument
1082 slab_bug(s, "%s overwritten", what); in check_bytes_and_report()
1086 print_trailer(s, slab, object); in check_bytes_and_report()
1090 restore_bytes(s, what, value, fault, end); in check_bytes_and_report()
1133 static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p) in check_pad_bytes() argument
1135 unsigned long off = get_info_end(s); /* The end of info */ in check_pad_bytes()
1137 if (s->flags & SLAB_STORE_USER) { in check_pad_bytes()
1141 if (s->flags & SLAB_KMALLOC) in check_pad_bytes()
1145 off += kasan_metadata_size(s, false); in check_pad_bytes()
1147 if (size_from_object(s) == off) in check_pad_bytes()
1150 return check_bytes_and_report(s, slab, p, "Object padding", in check_pad_bytes()
1151 p + off, POISON_INUSE, size_from_object(s) - off); in check_pad_bytes()
1155 static void slab_pad_check(struct kmem_cache *s, struct slab *slab) in slab_pad_check() argument
1164 if (!(s->flags & SLAB_POISON)) in slab_pad_check()
1170 remainder = length % s->size; in slab_pad_check()
1183 slab_err(s, slab, "Padding overwritten. 0x%p-0x%p @offset=%tu", in slab_pad_check()
1187 restore_bytes(s, "slab padding", POISON_INUSE, fault, end); in slab_pad_check()
1190 static int check_object(struct kmem_cache *s, struct slab *slab, in check_object() argument
1194 u8 *endobject = object + s->object_size; in check_object()
1197 if (s->flags & SLAB_RED_ZONE) { in check_object()
1198 if (!check_bytes_and_report(s, slab, object, "Left Redzone", in check_object()
1199 object - s->red_left_pad, val, s->red_left_pad)) in check_object()
1202 if (!check_bytes_and_report(s, slab, object, "Right Redzone", in check_object()
1203 endobject, val, s->inuse - s->object_size)) in check_object()
1206 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) { in check_object()
1207 orig_size = get_orig_size(s, object); in check_object()
1209 if (s->object_size > orig_size && in check_object()
1210 !check_bytes_and_report(s, slab, object, in check_object()
1212 val, s->object_size - orig_size)) { in check_object()
1217 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { in check_object()
1218 check_bytes_and_report(s, slab, p, "Alignment padding", in check_object()
1220 s->inuse - s->object_size); in check_object()
1224 if (s->flags & SLAB_POISON) { in check_object()
1225 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) && in check_object()
1226 (!check_bytes_and_report(s, slab, p, "Poison", p, in check_object()
1227 POISON_FREE, s->object_size - 1) || in check_object()
1228 !check_bytes_and_report(s, slab, p, "End Poison", in check_object()
1229 p + s->object_size - 1, POISON_END, 1))) in check_object()
1234 check_pad_bytes(s, slab, p); in check_object()
1237 if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE) in check_object()
1245 if (!check_valid_pointer(s, slab, get_freepointer(s, p))) { in check_object()
1246 object_err(s, slab, p, "Freepointer corrupt"); in check_object()
1252 set_freepointer(s, p, NULL); in check_object()
1258 static int check_slab(struct kmem_cache *s, struct slab *slab) in check_slab() argument
1263 slab_err(s, slab, "Not a valid slab page"); in check_slab()
1267 maxobj = order_objects(slab_order(slab), s->size); in check_slab()
1269 slab_err(s, slab, "objects %u > max %u", in check_slab()
1274 slab_err(s, slab, "inuse %u > max %u", in check_slab()
1279 slab_pad_check(s, slab); in check_slab()
1287 static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search) in on_freelist() argument
1298 if (!check_valid_pointer(s, slab, fp)) { in on_freelist()
1300 object_err(s, slab, object, in on_freelist()
1302 set_freepointer(s, object, NULL); in on_freelist()
1304 slab_err(s, slab, "Freepointer corrupt"); in on_freelist()
1307 slab_fix(s, "Freelist cleared"); in on_freelist()
1313 fp = get_freepointer(s, object); in on_freelist()
1317 max_objects = order_objects(slab_order(slab), s->size); in on_freelist()
1322 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d", in on_freelist()
1325 slab_fix(s, "Number of objects adjusted"); in on_freelist()
1328 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d", in on_freelist()
1331 slab_fix(s, "Object count adjusted"); in on_freelist()
1336 static void trace(struct kmem_cache *s, struct slab *slab, void *object, in trace() argument
1339 if (s->flags & SLAB_TRACE) { in trace()
1341 s->name, in trace()
1348 s->object_size); in trace()
1357 static void add_full(struct kmem_cache *s, in add_full() argument
1360 if (!(s->flags & SLAB_STORE_USER)) in add_full()
1367 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab) in remove_full() argument
1369 if (!(s->flags & SLAB_STORE_USER)) in remove_full()
1381 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) in inc_slabs_node() argument
1383 struct kmem_cache_node *n = get_node(s, node); in inc_slabs_node()
1396 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) in dec_slabs_node() argument
1398 struct kmem_cache_node *n = get_node(s, node); in dec_slabs_node()
1405 static void setup_object_debug(struct kmem_cache *s, void *object) in setup_object_debug() argument
1407 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)) in setup_object_debug()
1410 init_object(s, object, SLUB_RED_INACTIVE); in setup_object_debug()
1411 init_tracking(s, object); in setup_object_debug()
1415 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) in setup_slab_debug() argument
1417 if (!kmem_cache_debug_flags(s, SLAB_POISON)) in setup_slab_debug()
1425 static inline int alloc_consistency_checks(struct kmem_cache *s, in alloc_consistency_checks() argument
1428 if (!check_slab(s, slab)) in alloc_consistency_checks()
1431 if (!check_valid_pointer(s, slab, object)) { in alloc_consistency_checks()
1432 object_err(s, slab, object, "Freelist Pointer check fails"); in alloc_consistency_checks()
1436 if (!check_object(s, slab, object, SLUB_RED_INACTIVE)) in alloc_consistency_checks()
1442 static noinline bool alloc_debug_processing(struct kmem_cache *s, in alloc_debug_processing() argument
1445 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in alloc_debug_processing()
1446 if (!alloc_consistency_checks(s, slab, object)) in alloc_debug_processing()
1451 trace(s, slab, object, 1); in alloc_debug_processing()
1452 set_orig_size(s, object, orig_size); in alloc_debug_processing()
1453 init_object(s, object, SLUB_RED_ACTIVE); in alloc_debug_processing()
1463 slab_fix(s, "Marking all objects used"); in alloc_debug_processing()
1470 static inline int free_consistency_checks(struct kmem_cache *s, in free_consistency_checks() argument
1473 if (!check_valid_pointer(s, slab, object)) { in free_consistency_checks()
1474 slab_err(s, slab, "Invalid object pointer 0x%p", object); in free_consistency_checks()
1478 if (on_freelist(s, slab, object)) { in free_consistency_checks()
1479 object_err(s, slab, object, "Object already free"); in free_consistency_checks()
1483 if (!check_object(s, slab, object, SLUB_RED_ACTIVE)) in free_consistency_checks()
1486 if (unlikely(s != slab->slab_cache)) { in free_consistency_checks()
1488 slab_err(s, slab, "Attempt to free object(0x%p) outside of slab", in free_consistency_checks()
1495 object_err(s, slab, object, in free_consistency_checks()
1716 static inline void setup_object_debug(struct kmem_cache *s, void *object) {} in setup_object_debug() argument
1718 void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {} in setup_slab_debug() argument
1720 static inline bool alloc_debug_processing(struct kmem_cache *s, in alloc_debug_processing() argument
1723 static inline bool free_debug_processing(struct kmem_cache *s, in free_debug_processing() argument
1727 static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {} in slab_pad_check() argument
1728 static inline int check_object(struct kmem_cache *s, struct slab *slab, in check_object() argument
1731 static inline void set_track(struct kmem_cache *s, void *object, in set_track() argument
1733 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, in add_full() argument
1735 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, in remove_full() argument
1748 static inline void inc_slabs_node(struct kmem_cache *s, int node, in inc_slabs_node() argument
1750 static inline void dec_slabs_node(struct kmem_cache *s, int node, in dec_slabs_node() argument
1754 static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab, in freelist_corrupted() argument
1766 static __always_inline bool slab_free_hook(struct kmem_cache *s, in slab_free_hook() argument
1769 kmemleak_free_recursive(x, s->flags); in slab_free_hook()
1770 kmsan_slab_free(s, x); in slab_free_hook()
1772 debug_check_no_locks_freed(x, s->object_size); in slab_free_hook()
1774 if (!(s->flags & SLAB_DEBUG_OBJECTS)) in slab_free_hook()
1775 debug_check_no_obj_freed(x, s->object_size); in slab_free_hook()
1778 if (!(s->flags & SLAB_TYPESAFE_BY_RCU)) in slab_free_hook()
1779 __kcsan_check_access(x, s->object_size, in slab_free_hook()
1794 memset(kasan_reset_tag(x), 0, s->object_size); in slab_free_hook()
1795 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0; in slab_free_hook()
1796 memset((char *)kasan_reset_tag(x) + s->inuse, 0, in slab_free_hook()
1797 s->size - s->inuse - rsize); in slab_free_hook()
1800 return kasan_slab_free(s, x, init); in slab_free_hook()
1803 static inline bool slab_free_freelist_hook(struct kmem_cache *s, in slab_free_freelist_hook() argument
1813 slab_free_hook(s, next, false); in slab_free_freelist_hook()
1823 next = get_freepointer(s, object); in slab_free_freelist_hook()
1826 if (!slab_free_hook(s, object, slab_want_init_on_free(s))) { in slab_free_freelist_hook()
1828 set_freepointer(s, object, *head); in slab_free_freelist_hook()
1847 static void *setup_object(struct kmem_cache *s, void *object) in setup_object() argument
1849 setup_object_debug(s, object); in setup_object()
1850 object = kasan_init_slab_obj(s, object); in setup_object()
1851 if (unlikely(s->ctor)) { in setup_object()
1852 kasan_unpoison_object_data(s, object); in setup_object()
1853 s->ctor(object); in setup_object()
1854 kasan_poison_object_data(s, object); in setup_object()
1889 static int init_cache_random_seq(struct kmem_cache *s) in init_cache_random_seq() argument
1891 unsigned int count = oo_objects(s->oo); in init_cache_random_seq()
1895 if (s->random_seq) in init_cache_random_seq()
1898 err = cache_random_seq_create(s, count, GFP_KERNEL); in init_cache_random_seq()
1901 s->name); in init_cache_random_seq()
1906 if (s->random_seq) { in init_cache_random_seq()
1910 s->random_seq[i] *= s->size; in init_cache_random_seq()
1918 struct kmem_cache *s; in init_freelist_randomization() local
1922 list_for_each_entry(s, &slab_caches, list) in init_freelist_randomization()
1923 init_cache_random_seq(s); in init_freelist_randomization()
1929 static void *next_freelist_entry(struct kmem_cache *s, struct slab *slab, in next_freelist_entry() argument
1941 idx = s->random_seq[*pos]; in next_freelist_entry()
1951 static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) in shuffle_freelist() argument
1958 if (slab->objects < 2 || !s->random_seq) in shuffle_freelist()
1961 freelist_count = oo_objects(s->oo); in shuffle_freelist()
1964 page_limit = slab->objects * s->size; in shuffle_freelist()
1965 start = fixup_red_left(s, slab_address(slab)); in shuffle_freelist()
1968 cur = next_freelist_entry(s, slab, &pos, start, page_limit, in shuffle_freelist()
1970 cur = setup_object(s, cur); in shuffle_freelist()
1974 next = next_freelist_entry(s, slab, &pos, start, page_limit, in shuffle_freelist()
1976 next = setup_object(s, next); in shuffle_freelist()
1977 set_freepointer(s, cur, next); in shuffle_freelist()
1980 set_freepointer(s, cur, NULL); in shuffle_freelist()
1985 static inline int init_cache_random_seq(struct kmem_cache *s) in init_cache_random_seq() argument
1990 static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab) in shuffle_freelist() argument
1996 static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) in allocate_slab() argument
1999 struct kmem_cache_order_objects oo = s->oo; in allocate_slab()
2007 flags |= s->allocflags; in allocate_slab()
2014 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min)) in allocate_slab()
2019 oo = s->min; in allocate_slab()
2028 stat(s, ORDER_FALLBACK); in allocate_slab()
2035 account_slab(slab, oo_order(oo), s, flags); in allocate_slab()
2037 slab->slab_cache = s; in allocate_slab()
2043 setup_slab_debug(s, slab, start); in allocate_slab()
2045 shuffle = shuffle_freelist(s, slab); in allocate_slab()
2048 start = fixup_red_left(s, start); in allocate_slab()
2049 start = setup_object(s, start); in allocate_slab()
2052 next = p + s->size; in allocate_slab()
2053 next = setup_object(s, next); in allocate_slab()
2054 set_freepointer(s, p, next); in allocate_slab()
2057 set_freepointer(s, p, NULL); in allocate_slab()
2063 static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node) in new_slab() argument
2068 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO)); in new_slab()
2070 return allocate_slab(s, in new_slab()
2074 static void __free_slab(struct kmem_cache *s, struct slab *slab) in __free_slab() argument
2086 unaccount_slab(slab, order, s); in __free_slab()
2097 static void free_slab(struct kmem_cache *s, struct slab *slab) in free_slab() argument
2099 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) { in free_slab()
2102 slab_pad_check(s, slab); in free_slab()
2103 for_each_object(p, s, slab_address(slab), slab->objects) in free_slab()
2104 check_object(s, slab, p, SLUB_RED_INACTIVE); in free_slab()
2107 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) in free_slab()
2110 __free_slab(s, slab); in free_slab()
2113 static void discard_slab(struct kmem_cache *s, struct slab *slab) in discard_slab() argument
2115 dec_slabs_node(s, slab_nid(slab), slab->objects); in discard_slab()
2116 free_slab(s, slab); in discard_slab()
2153 static void *alloc_single_from_partial(struct kmem_cache *s, in alloc_single_from_partial() argument
2161 slab->freelist = get_freepointer(s, object); in alloc_single_from_partial()
2164 if (!alloc_debug_processing(s, slab, object, orig_size)) { in alloc_single_from_partial()
2171 add_full(s, n, slab); in alloc_single_from_partial()
2182 static void *alloc_single_from_new_slab(struct kmem_cache *s, in alloc_single_from_new_slab() argument
2186 struct kmem_cache_node *n = get_node(s, nid); in alloc_single_from_new_slab()
2192 slab->freelist = get_freepointer(s, object); in alloc_single_from_new_slab()
2195 if (!alloc_debug_processing(s, slab, object, orig_size)) in alloc_single_from_new_slab()
2206 add_full(s, n, slab); in alloc_single_from_new_slab()
2210 inc_slabs_node(s, nid, slab->objects); in alloc_single_from_new_slab()
2222 static inline void *acquire_slab(struct kmem_cache *s, in acquire_slab() argument
2250 if (!__slab_update_freelist(s, slab, in acquire_slab()
2262 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
2264 static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab, in put_cpu_partial() argument
2272 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, in get_partial_node() argument
2296 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { in get_partial_node()
2297 object = alloc_single_from_partial(s, n, slab, in get_partial_node()
2304 t = acquire_slab(s, n, slab, object == NULL); in get_partial_node()
2310 stat(s, ALLOC_FROM_PARTIAL); in get_partial_node()
2313 put_cpu_partial(s, slab, 0); in get_partial_node()
2314 stat(s, CPU_PARTIAL_NODE); in get_partial_node()
2318 if (!kmem_cache_has_cpu_partial(s) in get_partial_node()
2319 || partial_slabs > s->cpu_partial_slabs / 2) in get_partial_node()
2333 static void *get_any_partial(struct kmem_cache *s, struct partial_context *pc) in get_any_partial() argument
2361 if (!s->remote_node_defrag_ratio || in get_any_partial()
2362 get_cycles() % 1024 > s->remote_node_defrag_ratio) in get_any_partial()
2371 n = get_node(s, zone_to_nid(zone)); in get_any_partial()
2374 n->nr_partial > s->min_partial) { in get_any_partial()
2375 object = get_partial_node(s, n, pc); in get_any_partial()
2396 static void *get_partial(struct kmem_cache *s, int node, struct partial_context *pc) in get_partial() argument
2404 object = get_partial_node(s, get_node(s, searchnode), pc); in get_partial()
2408 return get_any_partial(s, pc); in get_partial()
2451 const struct kmem_cache *s, unsigned long tid) in note_cmpxchg_failure() argument
2454 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); in note_cmpxchg_failure()
2456 pr_info("%s %s: cmpxchg redo ", n, s->name); in note_cmpxchg_failure()
2471 stat(s, CMPXCHG_DOUBLE_CPU_FAIL); in note_cmpxchg_failure()
2474 static void init_kmem_cache_cpus(struct kmem_cache *s) in init_kmem_cache_cpus() argument
2480 c = per_cpu_ptr(s->cpu_slab, cpu); in init_kmem_cache_cpus()
2492 static void deactivate_slab(struct kmem_cache *s, struct slab *slab, in deactivate_slab() argument
2496 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); in deactivate_slab()
2506 stat(s, DEACTIVATE_REMOTE_FREES); in deactivate_slab()
2517 nextfree = get_freepointer(s, freelist_iter); in deactivate_slab()
2524 if (freelist_corrupted(s, slab, &freelist_iter, nextfree)) in deactivate_slab()
2555 set_freepointer(s, freelist_tail, old.freelist); in deactivate_slab()
2562 if (!new.inuse && n->nr_partial >= s->min_partial) { in deactivate_slab()
2576 if (!slab_update_freelist(s, slab, in deactivate_slab()
2589 stat(s, tail); in deactivate_slab()
2591 stat(s, DEACTIVATE_EMPTY); in deactivate_slab()
2592 discard_slab(s, slab); in deactivate_slab()
2593 stat(s, FREE_SLAB); in deactivate_slab()
2595 stat(s, DEACTIVATE_FULL); in deactivate_slab()
2600 static void __unfreeze_partials(struct kmem_cache *s, struct slab *partial_slab) in __unfreeze_partials() argument
2613 n2 = get_node(s, slab_nid(slab)); in __unfreeze_partials()
2633 } while (!__slab_update_freelist(s, slab, in __unfreeze_partials()
2638 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) { in __unfreeze_partials()
2643 stat(s, FREE_ADD_PARTIAL); in __unfreeze_partials()
2654 stat(s, DEACTIVATE_EMPTY); in __unfreeze_partials()
2655 discard_slab(s, slab); in __unfreeze_partials()
2656 stat(s, FREE_SLAB); in __unfreeze_partials()
2663 static void unfreeze_partials(struct kmem_cache *s) in unfreeze_partials() argument
2668 local_lock_irqsave(&s->cpu_slab->lock, flags); in unfreeze_partials()
2669 partial_slab = this_cpu_read(s->cpu_slab->partial); in unfreeze_partials()
2670 this_cpu_write(s->cpu_slab->partial, NULL); in unfreeze_partials()
2671 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in unfreeze_partials()
2674 __unfreeze_partials(s, partial_slab); in unfreeze_partials()
2677 static void unfreeze_partials_cpu(struct kmem_cache *s, in unfreeze_partials_cpu() argument
2686 __unfreeze_partials(s, partial_slab); in unfreeze_partials_cpu()
2696 static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain) in put_cpu_partial() argument
2703 local_lock_irqsave(&s->cpu_slab->lock, flags); in put_cpu_partial()
2705 oldslab = this_cpu_read(s->cpu_slab->partial); in put_cpu_partial()
2708 if (drain && oldslab->slabs >= s->cpu_partial_slabs) { in put_cpu_partial()
2726 this_cpu_write(s->cpu_slab->partial, slab); in put_cpu_partial()
2728 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in put_cpu_partial()
2731 __unfreeze_partials(s, slab_to_unfreeze); in put_cpu_partial()
2732 stat(s, CPU_PARTIAL_DRAIN); in put_cpu_partial()
2738 static inline void unfreeze_partials(struct kmem_cache *s) { } in unfreeze_partials() argument
2739 static inline void unfreeze_partials_cpu(struct kmem_cache *s, in unfreeze_partials_cpu() argument
2744 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) in flush_slab() argument
2750 local_lock_irqsave(&s->cpu_slab->lock, flags); in flush_slab()
2759 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in flush_slab()
2762 deactivate_slab(s, slab, freelist); in flush_slab()
2763 stat(s, CPUSLAB_FLUSH); in flush_slab()
2767 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) in __flush_cpu_slab() argument
2769 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab()
2778 deactivate_slab(s, slab, freelist); in __flush_cpu_slab()
2779 stat(s, CPUSLAB_FLUSH); in __flush_cpu_slab()
2782 unfreeze_partials_cpu(s, c); in __flush_cpu_slab()
2787 struct kmem_cache *s; member
2798 struct kmem_cache *s; in flush_cpu_slab() local
2804 s = sfw->s; in flush_cpu_slab()
2805 c = this_cpu_ptr(s->cpu_slab); in flush_cpu_slab()
2808 flush_slab(s, c); in flush_cpu_slab()
2810 unfreeze_partials(s); in flush_cpu_slab()
2813 static bool has_cpu_slab(int cpu, struct kmem_cache *s) in has_cpu_slab() argument
2815 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in has_cpu_slab()
2823 static void flush_all_cpus_locked(struct kmem_cache *s) in flush_all_cpus_locked() argument
2833 if (!has_cpu_slab(cpu, s)) { in flush_all_cpus_locked()
2839 sfw->s = s; in flush_all_cpus_locked()
2853 static void flush_all(struct kmem_cache *s) in flush_all() argument
2856 flush_all_cpus_locked(s); in flush_all()
2866 struct kmem_cache *s; in slub_cpu_dead() local
2869 list_for_each_entry(s, &slab_caches, list) in slub_cpu_dead()
2870 __flush_cpu_slab(s, cpu); in slub_cpu_dead()
2876 static inline void flush_all_cpus_locked(struct kmem_cache *s) { } in flush_all_cpus_locked() argument
2877 static inline void flush_all(struct kmem_cache *s) { } in flush_all() argument
2878 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { } in __flush_cpu_slab() argument
2907 static inline bool free_debug_processing(struct kmem_cache *s, in free_debug_processing() argument
2915 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
2916 if (!check_slab(s, slab)) in free_debug_processing()
2921 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n", in free_debug_processing()
2931 if (s->flags & SLAB_CONSISTENCY_CHECKS) { in free_debug_processing()
2932 if (!free_consistency_checks(s, slab, object, addr)) in free_debug_processing()
2936 if (s->flags & SLAB_STORE_USER) in free_debug_processing()
2937 set_track_update(s, object, TRACK_FREE, addr, handle); in free_debug_processing()
2938 trace(s, slab, object, 0); in free_debug_processing()
2940 init_object(s, object, SLUB_RED_INACTIVE); in free_debug_processing()
2944 object = get_freepointer(s, object); in free_debug_processing()
2951 slab_err(s, slab, "Bulk free expected %d objects but found %d\n", in free_debug_processing()
2959 slab_fix(s, "Object at 0x%p not freed", object); in free_debug_processing()
2983 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) in slab_out_of_memory() argument
2996 s->name, s->object_size, s->size, oo_order(s->oo), in slab_out_of_memory()
2997 oo_order(s->min)); in slab_out_of_memory()
2999 if (oo_order(s->min) > get_order(s->object_size)) in slab_out_of_memory()
3001 s->name); in slab_out_of_memory()
3003 for_each_kmem_cache_node(s, node, n) { in slab_out_of_memory()
3018 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { } in slab_out_of_memory() argument
3031 __update_cpu_freelist_fast(struct kmem_cache *s, in __update_cpu_freelist_fast() argument
3038 return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full, in __update_cpu_freelist_fast()
3050 static inline void *get_freelist(struct kmem_cache *s, struct slab *slab) in get_freelist() argument
3056 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); in get_freelist()
3068 } while (!__slab_update_freelist(s, slab, in get_freelist()
3095 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, in ___slab_alloc() argument
3103 stat(s, ALLOC_SLOWPATH); in ___slab_alloc()
3128 stat(s, ALLOC_NODE_MISMATCH); in ___slab_alloc()
3142 local_lock_irqsave(&s->cpu_slab->lock, flags); in ___slab_alloc()
3144 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3151 freelist = get_freelist(s, slab); in ___slab_alloc()
3156 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3157 stat(s, DEACTIVATE_BYPASS); in ___slab_alloc()
3161 stat(s, ALLOC_REFILL); in ___slab_alloc()
3165 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock)); in ___slab_alloc()
3173 c->freelist = get_freepointer(s, freelist); in ___slab_alloc()
3175 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3180 local_lock_irqsave(&s->cpu_slab->lock, flags); in ___slab_alloc()
3182 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3189 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3190 deactivate_slab(s, slab, freelist); in ___slab_alloc()
3195 local_lock_irqsave(&s->cpu_slab->lock, flags); in ___slab_alloc()
3197 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3201 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3208 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3209 stat(s, CPU_PARTIAL_ALLOC); in ___slab_alloc()
3218 freelist = get_partial(s, node, &pc); in ___slab_alloc()
3222 slub_put_cpu_ptr(s->cpu_slab); in ___slab_alloc()
3223 slab = new_slab(s, gfpflags, node); in ___slab_alloc()
3224 c = slub_get_cpu_ptr(s->cpu_slab); in ___slab_alloc()
3227 slab_out_of_memory(s, gfpflags, node); in ___slab_alloc()
3231 stat(s, ALLOC_SLAB); in ___slab_alloc()
3233 if (kmem_cache_debug(s)) { in ___slab_alloc()
3234 freelist = alloc_single_from_new_slab(s, slab, orig_size); in ___slab_alloc()
3239 if (s->flags & SLAB_STORE_USER) in ___slab_alloc()
3240 set_track(s, freelist, TRACK_ALLOC, addr); in ___slab_alloc()
3254 inc_slabs_node(s, slab_nid(slab), slab->objects); in ___slab_alloc()
3258 if (kmem_cache_debug(s)) { in ___slab_alloc()
3264 if (s->flags & SLAB_STORE_USER) in ___slab_alloc()
3265 set_track(s, freelist, TRACK_ALLOC, addr); in ___slab_alloc()
3275 deactivate_slab(s, slab, get_freepointer(s, freelist)); in ___slab_alloc()
3281 local_lock_irqsave(&s->cpu_slab->lock, flags); in ___slab_alloc()
3290 local_unlock_irqrestore(&s->cpu_slab->lock, flags); in ___slab_alloc()
3292 deactivate_slab(s, flush_slab, flush_freelist); in ___slab_alloc()
3294 stat(s, CPUSLAB_FLUSH); in ___slab_alloc()
3308 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, in __slab_alloc() argument
3319 c = slub_get_cpu_ptr(s->cpu_slab); in __slab_alloc()
3322 p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size); in __slab_alloc()
3324 slub_put_cpu_ptr(s->cpu_slab); in __slab_alloc()
3329 static __always_inline void *__slab_alloc_node(struct kmem_cache *s, in __slab_alloc_node() argument
3350 c = raw_cpu_ptr(s->cpu_slab); in __slab_alloc_node()
3375 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size); in __slab_alloc_node()
3377 void *next_object = get_freepointer_safe(s, object); in __slab_alloc_node()
3393 if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) { in __slab_alloc_node()
3394 note_cmpxchg_failure("slab_alloc", s, tid); in __slab_alloc_node()
3397 prefetch_freepointer(s, next_object); in __slab_alloc_node()
3398 stat(s, ALLOC_FASTPATH); in __slab_alloc_node()
3404 static void *__slab_alloc_node(struct kmem_cache *s, in __slab_alloc_node() argument
3414 object = get_partial(s, node, &pc); in __slab_alloc_node()
3419 slab = new_slab(s, gfpflags, node); in __slab_alloc_node()
3421 slab_out_of_memory(s, gfpflags, node); in __slab_alloc_node()
3425 object = alloc_single_from_new_slab(s, slab, orig_size); in __slab_alloc_node()
3435 static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s, in maybe_wipe_obj_freeptr() argument
3438 if (unlikely(slab_want_init_on_free(s)) && obj) in maybe_wipe_obj_freeptr()
3439 memset((void *)((char *)kasan_reset_tag(obj) + s->offset), in maybe_wipe_obj_freeptr()
3453 static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru, in slab_alloc_node() argument
3460 s = slab_pre_alloc_hook(s, lru, &objcg, 1, gfpflags); in slab_alloc_node()
3461 if (!s) in slab_alloc_node()
3464 object = kfence_alloc(s, orig_size, gfpflags); in slab_alloc_node()
3468 object = __slab_alloc_node(s, gfpflags, node, addr, orig_size); in slab_alloc_node()
3470 maybe_wipe_obj_freeptr(s, object); in slab_alloc_node()
3471 init = slab_want_init_on_alloc(gfpflags, s); in slab_alloc_node()
3478 slab_post_alloc_hook(s, objcg, gfpflags, 1, &object, init, orig_size); in slab_alloc_node()
3483 static __fastpath_inline void *slab_alloc(struct kmem_cache *s, struct list_lru *lru, in slab_alloc() argument
3486 return slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, addr, orig_size); in slab_alloc()
3490 void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, in __kmem_cache_alloc_lru() argument
3493 void *ret = slab_alloc(s, lru, gfpflags, _RET_IP_, s->object_size); in __kmem_cache_alloc_lru()
3495 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE); in __kmem_cache_alloc_lru()
3500 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) in kmem_cache_alloc() argument
3502 return __kmem_cache_alloc_lru(s, NULL, gfpflags); in kmem_cache_alloc()
3506 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, in kmem_cache_alloc_lru() argument
3509 return __kmem_cache_alloc_lru(s, lru, gfpflags); in kmem_cache_alloc_lru()
3513 void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, in __kmem_cache_alloc_node() argument
3517 return slab_alloc_node(s, NULL, gfpflags, node, in __kmem_cache_alloc_node()
3521 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) in kmem_cache_alloc_node() argument
3523 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); in kmem_cache_alloc_node()
3525 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node); in kmem_cache_alloc_node()
3532 struct kmem_cache *s, struct slab *slab, in free_to_partial_list() argument
3536 struct kmem_cache_node *n = get_node(s, slab_nid(slab)); in free_to_partial_list()
3542 if (s->flags & SLAB_STORE_USER) in free_to_partial_list()
3547 if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) { in free_to_partial_list()
3552 set_freepointer(s, tail, prior); in free_to_partial_list()
3560 if (slab->inuse == 0 && n->nr_partial >= s->min_partial) in free_to_partial_list()
3565 remove_full(s, n, slab); in free_to_partial_list()
3568 stat(s, FREE_ADD_PARTIAL); in free_to_partial_list()
3572 stat(s, FREE_REMOVE_PARTIAL); in free_to_partial_list()
3581 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects); in free_to_partial_list()
3587 stat(s, FREE_SLAB); in free_to_partial_list()
3588 free_slab(s, slab_free); in free_to_partial_list()
3600 static void __slab_free(struct kmem_cache *s, struct slab *slab, in __slab_free() argument
3612 stat(s, FREE_SLOWPATH); in __slab_free()
3617 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) { in __slab_free()
3618 free_to_partial_list(s, slab, head, tail, cnt, addr); in __slab_free()
3629 set_freepointer(s, tail, prior); in __slab_free()
3635 if (kmem_cache_has_cpu_partial(s) && !prior) { in __slab_free()
3647 n = get_node(s, slab_nid(slab)); in __slab_free()
3661 } while (!slab_update_freelist(s, slab, in __slab_free()
3673 stat(s, FREE_FROZEN); in __slab_free()
3679 put_cpu_partial(s, slab, 1); in __slab_free()
3680 stat(s, CPU_PARTIAL_FREE); in __slab_free()
3686 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) in __slab_free()
3693 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) { in __slab_free()
3694 remove_full(s, n, slab); in __slab_free()
3696 stat(s, FREE_ADD_PARTIAL); in __slab_free()
3707 stat(s, FREE_REMOVE_PARTIAL); in __slab_free()
3710 remove_full(s, n, slab); in __slab_free()
3714 stat(s, FREE_SLAB); in __slab_free()
3715 discard_slab(s, slab); in __slab_free()
3734 static __always_inline void do_slab_free(struct kmem_cache *s, in do_slab_free() argument
3750 c = raw_cpu_ptr(s->cpu_slab); in do_slab_free()
3757 __slab_free(s, slab, head, tail_obj, cnt, addr); in do_slab_free()
3764 set_freepointer(s, tail_obj, freelist); in do_slab_free()
3766 if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) { in do_slab_free()
3767 note_cmpxchg_failure("slab_free", s, tid); in do_slab_free()
3772 local_lock(&s->cpu_slab->lock); in do_slab_free()
3773 c = this_cpu_ptr(s->cpu_slab); in do_slab_free()
3775 local_unlock(&s->cpu_slab->lock); in do_slab_free()
3781 set_freepointer(s, tail_obj, freelist); in do_slab_free()
3785 local_unlock(&s->cpu_slab->lock); in do_slab_free()
3787 stat(s, FREE_FASTPATH); in do_slab_free()
3790 static void do_slab_free(struct kmem_cache *s, in do_slab_free() argument
3796 __slab_free(s, slab, head, tail_obj, cnt, addr); in do_slab_free()
3800 static __fastpath_inline void slab_free(struct kmem_cache *s, struct slab *slab, in slab_free() argument
3804 memcg_slab_free_hook(s, slab, p, cnt); in slab_free()
3809 if (slab_free_freelist_hook(s, &head, &tail, &cnt)) in slab_free()
3810 do_slab_free(s, slab, head, tail, cnt, addr); in slab_free()
3820 void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller) in __kmem_cache_free() argument
3822 slab_free(s, virt_to_slab(x), x, NULL, &x, 1, caller); in __kmem_cache_free()
3825 void kmem_cache_free(struct kmem_cache *s, void *x) in kmem_cache_free() argument
3827 s = cache_from_obj(s, x); in kmem_cache_free()
3828 if (!s) in kmem_cache_free()
3830 trace_kmem_cache_free(_RET_IP_, x, s); in kmem_cache_free()
3831 slab_free(s, virt_to_slab(x), x, NULL, &x, 1, _RET_IP_); in kmem_cache_free()
3840 struct kmem_cache *s; member
3856 int build_detached_freelist(struct kmem_cache *s, size_t size, in build_detached_freelist() argument
3866 if (!s) { in build_detached_freelist()
3875 df->s = df->slab->slab_cache; in build_detached_freelist()
3878 df->s = cache_from_obj(s, object); /* Support for memcg */ in build_detached_freelist()
3889 set_freepointer(df->s, object, NULL); in build_detached_freelist()
3897 set_freepointer(df->s, object, df->freelist); in build_detached_freelist()
3915 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p) in kmem_cache_free_bulk() argument
3923 size = build_detached_freelist(s, size, p, &df); in kmem_cache_free_bulk()
3927 slab_free(df.s, df.slab, df.freelist, df.tail, &p[size], df.cnt, in kmem_cache_free_bulk()
3934 static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, in __kmem_cache_alloc_bulk() argument
3946 c = slub_get_cpu_ptr(s->cpu_slab); in __kmem_cache_alloc_bulk()
3947 local_lock_irqsave(&s->cpu_slab->lock, irqflags); in __kmem_cache_alloc_bulk()
3950 void *object = kfence_alloc(s, s->object_size, flags); in __kmem_cache_alloc_bulk()
3968 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); in __kmem_cache_alloc_bulk()
3974 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, in __kmem_cache_alloc_bulk()
3975 _RET_IP_, c, s->object_size); in __kmem_cache_alloc_bulk()
3979 c = this_cpu_ptr(s->cpu_slab); in __kmem_cache_alloc_bulk()
3980 maybe_wipe_obj_freeptr(s, p[i]); in __kmem_cache_alloc_bulk()
3982 local_lock_irqsave(&s->cpu_slab->lock, irqflags); in __kmem_cache_alloc_bulk()
3986 c->freelist = get_freepointer(s, object); in __kmem_cache_alloc_bulk()
3988 maybe_wipe_obj_freeptr(s, p[i]); in __kmem_cache_alloc_bulk()
3991 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); in __kmem_cache_alloc_bulk()
3992 slub_put_cpu_ptr(s->cpu_slab); in __kmem_cache_alloc_bulk()
3997 slub_put_cpu_ptr(s->cpu_slab); in __kmem_cache_alloc_bulk()
3998 slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size); in __kmem_cache_alloc_bulk()
3999 kmem_cache_free_bulk(s, i, p); in __kmem_cache_alloc_bulk()
4004 static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, in __kmem_cache_alloc_bulk() argument
4010 void *object = kfence_alloc(s, s->object_size, flags); in __kmem_cache_alloc_bulk()
4017 p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE, in __kmem_cache_alloc_bulk()
4018 _RET_IP_, s->object_size); in __kmem_cache_alloc_bulk()
4022 maybe_wipe_obj_freeptr(s, p[i]); in __kmem_cache_alloc_bulk()
4028 slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size); in __kmem_cache_alloc_bulk()
4029 kmem_cache_free_bulk(s, i, p); in __kmem_cache_alloc_bulk()
4035 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, in kmem_cache_alloc_bulk() argument
4045 s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags); in kmem_cache_alloc_bulk()
4046 if (unlikely(!s)) in kmem_cache_alloc_bulk()
4049 i = __kmem_cache_alloc_bulk(s, flags, size, p, objcg); in kmem_cache_alloc_bulk()
4056 slab_post_alloc_hook(s, objcg, flags, size, p, in kmem_cache_alloc_bulk()
4057 slab_want_init_on_alloc(flags, s), s->object_size); in kmem_cache_alloc_bulk()
4216 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) in alloc_kmem_cache_cpus() argument
4226 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), in alloc_kmem_cache_cpus()
4229 if (!s->cpu_slab) in alloc_kmem_cache_cpus()
4232 init_kmem_cache_cpus(s); in alloc_kmem_cache_cpus()
4237 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s) in alloc_kmem_cache_cpus() argument
4290 static void free_kmem_cache_nodes(struct kmem_cache *s) in free_kmem_cache_nodes() argument
4295 for_each_kmem_cache_node(s, node, n) { in free_kmem_cache_nodes()
4296 s->node[node] = NULL; in free_kmem_cache_nodes()
4301 void __kmem_cache_release(struct kmem_cache *s) in __kmem_cache_release() argument
4303 cache_random_seq_destroy(s); in __kmem_cache_release()
4305 free_percpu(s->cpu_slab); in __kmem_cache_release()
4307 free_kmem_cache_nodes(s); in __kmem_cache_release()
4310 static int init_kmem_cache_nodes(struct kmem_cache *s) in init_kmem_cache_nodes() argument
4325 free_kmem_cache_nodes(s); in init_kmem_cache_nodes()
4330 s->node[node] = n; in init_kmem_cache_nodes()
4335 static void set_cpu_partial(struct kmem_cache *s) in set_cpu_partial() argument
4353 if (!kmem_cache_has_cpu_partial(s)) in set_cpu_partial()
4355 else if (s->size >= PAGE_SIZE) in set_cpu_partial()
4357 else if (s->size >= 1024) in set_cpu_partial()
4359 else if (s->size >= 256) in set_cpu_partial()
4364 slub_set_cpu_partial(s, nr_objects); in set_cpu_partial()
4372 static int calculate_sizes(struct kmem_cache *s) in calculate_sizes() argument
4374 slab_flags_t flags = s->flags; in calculate_sizes()
4375 unsigned int size = s->object_size; in calculate_sizes()
4392 !s->ctor) in calculate_sizes()
4393 s->flags |= __OBJECT_POISON; in calculate_sizes()
4395 s->flags &= ~__OBJECT_POISON; in calculate_sizes()
4403 if ((flags & SLAB_RED_ZONE) && size == s->object_size) in calculate_sizes()
4411 s->inuse = size; in calculate_sizes()
4413 if (slub_debug_orig_size(s) || in calculate_sizes()
4415 ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) || in calculate_sizes()
4416 s->ctor) { in calculate_sizes()
4431 s->offset = size; in calculate_sizes()
4439 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *)); in calculate_sizes()
4456 kasan_cache_create(s, &size, &s->flags); in calculate_sizes()
4468 s->red_left_pad = sizeof(void *); in calculate_sizes()
4469 s->red_left_pad = ALIGN(s->red_left_pad, s->align); in calculate_sizes()
4470 size += s->red_left_pad; in calculate_sizes()
4479 size = ALIGN(size, s->align); in calculate_sizes()
4480 s->size = size; in calculate_sizes()
4481 s->reciprocal_size = reciprocal_value(size); in calculate_sizes()
4487 s->allocflags = 0; in calculate_sizes()
4489 s->allocflags |= __GFP_COMP; in calculate_sizes()
4491 if (s->flags & SLAB_CACHE_DMA) in calculate_sizes()
4492 s->allocflags |= GFP_DMA; in calculate_sizes()
4494 if (s->flags & SLAB_CACHE_DMA32) in calculate_sizes()
4495 s->allocflags |= GFP_DMA32; in calculate_sizes()
4497 if (s->flags & SLAB_RECLAIM_ACCOUNT) in calculate_sizes()
4498 s->allocflags |= __GFP_RECLAIMABLE; in calculate_sizes()
4503 s->oo = oo_make(order, size); in calculate_sizes()
4504 s->min = oo_make(get_order(size), size); in calculate_sizes()
4506 return !!oo_objects(s->oo); in calculate_sizes()
4509 static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) in kmem_cache_open() argument
4511 s->flags = kmem_cache_flags(s->size, flags, s->name); in kmem_cache_open()
4513 s->random = get_random_long(); in kmem_cache_open()
4516 if (!calculate_sizes(s)) in kmem_cache_open()
4523 if (get_order(s->size) > get_order(s->object_size)) { in kmem_cache_open()
4524 s->flags &= ~DEBUG_METADATA_FLAGS; in kmem_cache_open()
4525 s->offset = 0; in kmem_cache_open()
4526 if (!calculate_sizes(s)) in kmem_cache_open()
4532 if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) { in kmem_cache_open()
4534 s->flags |= __CMPXCHG_DOUBLE; in kmem_cache_open()
4542 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2); in kmem_cache_open()
4543 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial); in kmem_cache_open()
4545 set_cpu_partial(s); in kmem_cache_open()
4548 s->remote_node_defrag_ratio = 1000; in kmem_cache_open()
4553 if (init_cache_random_seq(s)) in kmem_cache_open()
4557 if (!init_kmem_cache_nodes(s)) in kmem_cache_open()
4560 if (alloc_kmem_cache_cpus(s)) in kmem_cache_open()
4564 __kmem_cache_release(s); in kmem_cache_open()
4568 static void list_slab_objects(struct kmem_cache *s, struct slab *slab, in list_slab_objects() argument
4575 slab_err(s, slab, text, s->name); in list_slab_objects()
4578 __fill_map(object_map, s, slab); in list_slab_objects()
4580 for_each_object(p, s, addr, slab->objects) { in list_slab_objects()
4582 if (!test_bit(__obj_to_index(s, addr, p), object_map)) { in list_slab_objects()
4584 print_tracking(s, p); in list_slab_objects()
4596 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) in free_partial() argument
4608 list_slab_objects(s, slab, in free_partial()
4615 discard_slab(s, slab); in free_partial()
4618 bool __kmem_cache_empty(struct kmem_cache *s) in __kmem_cache_empty() argument
4623 for_each_kmem_cache_node(s, node, n) in __kmem_cache_empty()
4632 int __kmem_cache_shutdown(struct kmem_cache *s) in __kmem_cache_shutdown() argument
4637 flush_all_cpus_locked(s); in __kmem_cache_shutdown()
4639 for_each_kmem_cache_node(s, node, n) { in __kmem_cache_shutdown()
4640 free_partial(s, n); in __kmem_cache_shutdown()
4655 struct kmem_cache *s = slab->slab_cache; in __kmem_obj_info() local
4660 kpp->kp_slab_cache = s; in __kmem_obj_info()
4664 objp = restore_red_left(s, objp0); in __kmem_obj_info()
4668 objnr = obj_to_index(s, slab, objp); in __kmem_obj_info()
4670 objp = base + s->size * objnr; in __kmem_obj_info()
4672 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size in __kmem_obj_info()
4673 || (objp - base) % s->size) || in __kmem_obj_info()
4674 !(s->flags & SLAB_STORE_USER)) in __kmem_obj_info()
4677 objp = fixup_red_left(s, objp); in __kmem_obj_info()
4678 trackp = get_track(s, objp, TRACK_ALLOC); in __kmem_obj_info()
4693 trackp = get_track(s, objp, TRACK_FREE); in __kmem_obj_info()
4750 struct kmem_cache *s; in __check_heap_object() local
4757 s = slab->slab_cache; in __check_heap_object()
4768 offset = (ptr - slab_address(slab)) % s->size; in __check_heap_object()
4771 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) { in __check_heap_object()
4772 if (offset < s->red_left_pad) in __check_heap_object()
4774 s->name, to_user, offset, n); in __check_heap_object()
4775 offset -= s->red_left_pad; in __check_heap_object()
4779 if (offset >= s->useroffset && in __check_heap_object()
4780 offset - s->useroffset <= s->usersize && in __check_heap_object()
4781 n <= s->useroffset - offset + s->usersize) in __check_heap_object()
4784 usercopy_abort("SLUB object", s->name, to_user, offset, n); in __check_heap_object()
4799 static int __kmem_cache_do_shrink(struct kmem_cache *s) in __kmem_cache_do_shrink() argument
4811 for_each_kmem_cache_node(s, node, n) { in __kmem_cache_do_shrink()
4836 dec_slabs_node(s, node, slab->objects); in __kmem_cache_do_shrink()
4852 free_slab(s, slab); in __kmem_cache_do_shrink()
4861 int __kmem_cache_shrink(struct kmem_cache *s) in __kmem_cache_shrink() argument
4863 flush_all(s); in __kmem_cache_shrink()
4864 return __kmem_cache_do_shrink(s); in __kmem_cache_shrink()
4869 struct kmem_cache *s; in slab_mem_going_offline_callback() local
4872 list_for_each_entry(s, &slab_caches, list) { in slab_mem_going_offline_callback()
4873 flush_all_cpus_locked(s); in slab_mem_going_offline_callback()
4874 __kmem_cache_do_shrink(s); in slab_mem_going_offline_callback()
4908 struct kmem_cache *s; in slab_mem_going_online_callback() local
4926 list_for_each_entry(s, &slab_caches, list) { in slab_mem_going_online_callback()
4931 if (get_node(s, nid)) in slab_mem_going_online_callback()
4944 s->node[nid] = n; in slab_mem_going_online_callback()
4996 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); in bootstrap() local
4999 memcpy(s, static_cache, kmem_cache->object_size); in bootstrap()
5006 __flush_cpu_slab(s, smp_processor_id()); in bootstrap()
5007 for_each_kmem_cache_node(s, node, n) { in bootstrap()
5011 p->slab_cache = s; in bootstrap()
5015 p->slab_cache = s; in bootstrap()
5018 list_add(&s->list, &slab_caches); in bootstrap()
5019 return s; in bootstrap()
5089 struct kmem_cache *s; in __kmem_cache_alias() local
5091 s = find_mergeable(size, align, flags, name, ctor); in __kmem_cache_alias()
5092 if (s) { in __kmem_cache_alias()
5093 if (sysfs_slab_alias(s, name)) in __kmem_cache_alias()
5096 s->refcount++; in __kmem_cache_alias()
5102 s->object_size = max(s->object_size, size); in __kmem_cache_alias()
5103 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *))); in __kmem_cache_alias()
5106 return s; in __kmem_cache_alias()
5109 int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) in __kmem_cache_create() argument
5113 err = kmem_cache_open(s, flags); in __kmem_cache_create()
5121 err = sysfs_slab_add(s); in __kmem_cache_create()
5123 __kmem_cache_release(s); in __kmem_cache_create()
5127 if (s->flags & SLAB_STORE_USER) in __kmem_cache_create()
5128 debugfs_slab_add(s); in __kmem_cache_create()
5146 static void validate_slab(struct kmem_cache *s, struct slab *slab, in validate_slab() argument
5152 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL)) in validate_slab()
5156 __fill_map(obj_map, s, slab); in validate_slab()
5157 for_each_object(p, s, addr, slab->objects) { in validate_slab()
5158 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ? in validate_slab()
5161 if (!check_object(s, slab, p, val)) in validate_slab()
5166 static int validate_slab_node(struct kmem_cache *s, in validate_slab_node() argument
5176 validate_slab(s, slab, obj_map); in validate_slab_node()
5181 s->name, count, n->nr_partial); in validate_slab_node()
5185 if (!(s->flags & SLAB_STORE_USER)) in validate_slab_node()
5189 validate_slab(s, slab, obj_map); in validate_slab_node()
5194 s->name, count, node_nr_slabs(n)); in validate_slab_node()
5203 long validate_slab_cache(struct kmem_cache *s) in validate_slab_cache() argument
5210 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); in validate_slab_cache()
5214 flush_all(s); in validate_slab_cache()
5215 for_each_kmem_cache_node(s, node, n) in validate_slab_cache()
5216 count += validate_slab_node(s, n, obj_map); in validate_slab_cache()
5280 static int add_location(struct loc_track *t, struct kmem_cache *s, in add_location() argument
5289 unsigned int waste = s->object_size - orig_size; in add_location()
5372 static void process_slab(struct loc_track *t, struct kmem_cache *s, in process_slab() argument
5380 __fill_map(obj_map, s, slab); in process_slab()
5382 for_each_object(p, s, addr, slab->objects) in process_slab()
5383 if (!test_bit(__obj_to_index(s, addr, p), obj_map)) in process_slab()
5384 add_location(t, s, get_track(s, p, alloc), in process_slab()
5385 is_alloc ? get_orig_size(s, p) : in process_slab()
5386 s->object_size); in process_slab()
5406 static ssize_t show_slab_objects(struct kmem_cache *s, in show_slab_objects() argument
5423 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, in show_slab_objects()
5475 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
5492 for_each_kmem_cache_node(s, node, n) { in show_slab_objects()
5523 ssize_t (*show)(struct kmem_cache *s, char *buf);
5524 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
5533 static ssize_t slab_size_show(struct kmem_cache *s, char *buf) in slab_size_show() argument
5535 return sysfs_emit(buf, "%u\n", s->size); in slab_size_show()
5539 static ssize_t align_show(struct kmem_cache *s, char *buf) in align_show() argument
5541 return sysfs_emit(buf, "%u\n", s->align); in align_show()
5545 static ssize_t object_size_show(struct kmem_cache *s, char *buf) in object_size_show() argument
5547 return sysfs_emit(buf, "%u\n", s->object_size); in object_size_show()
5551 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf) in objs_per_slab_show() argument
5553 return sysfs_emit(buf, "%u\n", oo_objects(s->oo)); in objs_per_slab_show()
5557 static ssize_t order_show(struct kmem_cache *s, char *buf) in order_show() argument
5559 return sysfs_emit(buf, "%u\n", oo_order(s->oo)); in order_show()
5563 static ssize_t min_partial_show(struct kmem_cache *s, char *buf) in min_partial_show() argument
5565 return sysfs_emit(buf, "%lu\n", s->min_partial); in min_partial_show()
5568 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf, in min_partial_store() argument
5578 s->min_partial = min; in min_partial_store()
5583 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf) in cpu_partial_show() argument
5587 nr_partial = s->cpu_partial; in cpu_partial_show()
5593 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf, in cpu_partial_store() argument
5602 if (objects && !kmem_cache_has_cpu_partial(s)) in cpu_partial_store()
5605 slub_set_cpu_partial(s, objects); in cpu_partial_store()
5606 flush_all(s); in cpu_partial_store()
5611 static ssize_t ctor_show(struct kmem_cache *s, char *buf) in ctor_show() argument
5613 if (!s->ctor) in ctor_show()
5615 return sysfs_emit(buf, "%pS\n", s->ctor); in ctor_show()
5619 static ssize_t aliases_show(struct kmem_cache *s, char *buf) in aliases_show() argument
5621 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1); in aliases_show()
5625 static ssize_t partial_show(struct kmem_cache *s, char *buf) in partial_show() argument
5627 return show_slab_objects(s, buf, SO_PARTIAL); in partial_show()
5631 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf) in cpu_slabs_show() argument
5633 return show_slab_objects(s, buf, SO_CPU); in cpu_slabs_show()
5637 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf) in objects_partial_show() argument
5639 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS); in objects_partial_show()
5643 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf) in slabs_cpu_partial_show() argument
5654 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5662 objects = (slabs * oo_objects(s->oo)) / 2; in slabs_cpu_partial_show()
5669 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu)); in slabs_cpu_partial_show()
5672 objects = (slabs * oo_objects(s->oo)) / 2; in slabs_cpu_partial_show()
5684 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf) in reclaim_account_show() argument
5686 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT)); in reclaim_account_show()
5690 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) in hwcache_align_show() argument
5692 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); in hwcache_align_show()
5697 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) in cache_dma_show() argument
5699 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA)); in cache_dma_show()
5705 static ssize_t usersize_show(struct kmem_cache *s, char *buf) in usersize_show() argument
5707 return sysfs_emit(buf, "%u\n", s->usersize); in usersize_show()
5712 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) in destroy_by_rcu_show() argument
5714 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); in destroy_by_rcu_show()
5719 static ssize_t slabs_show(struct kmem_cache *s, char *buf) in slabs_show() argument
5721 return show_slab_objects(s, buf, SO_ALL); in slabs_show()
5725 static ssize_t total_objects_show(struct kmem_cache *s, char *buf) in total_objects_show() argument
5727 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL); in total_objects_show()
5731 static ssize_t objects_show(struct kmem_cache *s, char *buf) in objects_show() argument
5733 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS); in objects_show()
5737 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf) in sanity_checks_show() argument
5739 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS)); in sanity_checks_show()
5743 static ssize_t trace_show(struct kmem_cache *s, char *buf) in trace_show() argument
5745 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE)); in trace_show()
5749 static ssize_t red_zone_show(struct kmem_cache *s, char *buf) in red_zone_show() argument
5751 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE)); in red_zone_show()
5756 static ssize_t poison_show(struct kmem_cache *s, char *buf) in poison_show() argument
5758 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON)); in poison_show()
5763 static ssize_t store_user_show(struct kmem_cache *s, char *buf) in store_user_show() argument
5765 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER)); in store_user_show()
5770 static ssize_t validate_show(struct kmem_cache *s, char *buf) in validate_show() argument
5775 static ssize_t validate_store(struct kmem_cache *s, in validate_store() argument
5780 if (buf[0] == '1' && kmem_cache_debug(s)) { in validate_store()
5781 ret = validate_slab_cache(s); in validate_store()
5792 static ssize_t failslab_show(struct kmem_cache *s, char *buf) in failslab_show() argument
5794 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB)); in failslab_show()
5797 static ssize_t failslab_store(struct kmem_cache *s, const char *buf, in failslab_store() argument
5800 if (s->refcount > 1) in failslab_store()
5804 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB); in failslab_store()
5806 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB); in failslab_store()
5813 static ssize_t shrink_show(struct kmem_cache *s, char *buf) in shrink_show() argument
5818 static ssize_t shrink_store(struct kmem_cache *s, in shrink_store() argument
5822 kmem_cache_shrink(s); in shrink_store()
5830 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf) in remote_node_defrag_ratio_show() argument
5832 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10); in remote_node_defrag_ratio_show()
5835 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, in remote_node_defrag_ratio_store() argument
5847 s->remote_node_defrag_ratio = ratio * 10; in remote_node_defrag_ratio_store()
5855 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si) in show_stat() argument
5866 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; in show_stat()
5887 static void clear_stat(struct kmem_cache *s, enum stat_item si) in clear_stat() argument
5892 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; in clear_stat()
5896 static ssize_t text##_show(struct kmem_cache *s, char *buf) \
5898 return show_stat(s, buf, si); \
5900 static ssize_t text##_store(struct kmem_cache *s, \
5905 clear_stat(s, si); \
5939 static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf) in skip_kfence_show() argument
5941 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE)); in skip_kfence_show()
5944 static ssize_t skip_kfence_store(struct kmem_cache *s, in skip_kfence_store() argument
5950 s->flags &= ~SLAB_SKIP_KFENCE; in skip_kfence_store()
5952 s->flags |= SLAB_SKIP_KFENCE; in skip_kfence_store()
6046 struct kmem_cache *s; in slab_attr_show() local
6049 s = to_slab(kobj); in slab_attr_show()
6054 return attribute->show(s, buf); in slab_attr_show()
6062 struct kmem_cache *s; in slab_attr_store() local
6065 s = to_slab(kobj); in slab_attr_store()
6070 return attribute->store(s, buf, len); in slab_attr_store()
6090 static inline struct kset *cache_kset(struct kmem_cache *s) in cache_kset() argument
6101 static char *create_unique_id(struct kmem_cache *s) in create_unique_id() argument
6117 if (s->flags & SLAB_CACHE_DMA) in create_unique_id()
6119 if (s->flags & SLAB_CACHE_DMA32) in create_unique_id()
6121 if (s->flags & SLAB_RECLAIM_ACCOUNT) in create_unique_id()
6123 if (s->flags & SLAB_CONSISTENCY_CHECKS) in create_unique_id()
6125 if (s->flags & SLAB_ACCOUNT) in create_unique_id()
6129 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size); in create_unique_id()
6139 static int sysfs_slab_add(struct kmem_cache *s) in sysfs_slab_add() argument
6143 struct kset *kset = cache_kset(s); in sysfs_slab_add()
6144 int unmergeable = slab_unmergeable(s); in sysfs_slab_add()
6156 sysfs_remove_link(&slab_kset->kobj, s->name); in sysfs_slab_add()
6157 name = s->name; in sysfs_slab_add()
6163 name = create_unique_id(s); in sysfs_slab_add()
6168 s->kobj.kset = kset; in sysfs_slab_add()
6169 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name); in sysfs_slab_add()
6173 err = sysfs_create_group(&s->kobj, &slab_attr_group); in sysfs_slab_add()
6179 sysfs_slab_alias(s, s->name); in sysfs_slab_add()
6186 kobject_del(&s->kobj); in sysfs_slab_add()
6190 void sysfs_slab_unlink(struct kmem_cache *s) in sysfs_slab_unlink() argument
6193 kobject_del(&s->kobj); in sysfs_slab_unlink()
6196 void sysfs_slab_release(struct kmem_cache *s) in sysfs_slab_release() argument
6199 kobject_put(&s->kobj); in sysfs_slab_release()
6207 struct kmem_cache *s; member
6214 static int sysfs_slab_alias(struct kmem_cache *s, const char *name) in sysfs_slab_alias() argument
6223 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name); in sysfs_slab_alias()
6230 al->s = s; in sysfs_slab_alias()
6240 struct kmem_cache *s; in slab_sysfs_init() local
6254 list_for_each_entry(s, &slab_caches, list) { in slab_sysfs_init()
6255 err = sysfs_slab_add(s); in slab_sysfs_init()
6258 s->name); in slab_sysfs_init()
6265 err = sysfs_slab_alias(al->s, al->name); in slab_sysfs_init()
6394 struct kmem_cache *s = file_inode(filep)->i_private; in slab_debug_trace_open() local
6400 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); in slab_debug_trace_open()
6417 for_each_kmem_cache_node(s, node, n) { in slab_debug_trace_open()
6426 process_slab(t, s, slab, alloc, obj_map); in slab_debug_trace_open()
6428 process_slab(t, s, slab, alloc, obj_map); in slab_debug_trace_open()
6456 static void debugfs_slab_add(struct kmem_cache *s) in debugfs_slab_add() argument
6463 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root); in debugfs_slab_add()
6466 slab_cache_dir, s, &slab_debugfs_fops); in debugfs_slab_add()
6469 slab_cache_dir, s, &slab_debugfs_fops); in debugfs_slab_add()
6472 void debugfs_slab_release(struct kmem_cache *s) in debugfs_slab_release() argument
6474 debugfs_lookup_and_remove(s->name, slab_debugfs_root); in debugfs_slab_release()
6479 struct kmem_cache *s; in slab_debugfs_init() local
6483 list_for_each_entry(s, &slab_caches, list) in slab_debugfs_init()
6484 if (s->flags & SLAB_STORE_USER) in slab_debugfs_init()
6485 debugfs_slab_add(s); in slab_debugfs_init()
6496 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo) in get_slabinfo() argument
6504 for_each_kmem_cache_node(s, node, n) { in get_slabinfo()
6514 sinfo->objects_per_slab = oo_objects(s->oo); in get_slabinfo()
6515 sinfo->cache_order = oo_order(s->oo); in get_slabinfo()
6518 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s) in slabinfo_show_stats() argument