Lines Matching refs:class
258 unsigned int class:CLASS_BITS + 1; member
502 *class_idx = zspage->class; in get_zspage_mapping()
508 return pool->size_class[zspage->class]; in zspage_class()
515 zspage->class = class_idx; in set_zspage_mapping()
538 static inline void class_stat_inc(struct size_class *class, in class_stat_inc() argument
541 class->stats.objs[type] += cnt; in class_stat_inc()
545 static inline void class_stat_dec(struct size_class *class, in class_stat_dec() argument
548 class->stats.objs[type] -= cnt; in class_stat_dec()
552 static inline unsigned long zs_stat_get(struct size_class *class, in zs_stat_get() argument
555 return class->stats.objs[type]; in zs_stat_get()
575 static unsigned long zs_can_compact(struct size_class *class);
581 struct size_class *class; in zs_stats_size_show() local
595 class = pool->size_class[i]; in zs_stats_size_show()
597 if (class->index != i) in zs_stats_size_show()
600 spin_lock(&class->lock); in zs_stats_size_show()
601 class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL); in zs_stats_size_show()
602 class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY); in zs_stats_size_show()
603 obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); in zs_stats_size_show()
604 obj_used = zs_stat_get(class, OBJ_USED); in zs_stats_size_show()
605 freeable = zs_can_compact(class); in zs_stats_size_show()
606 spin_unlock(&class->lock); in zs_stats_size_show()
608 objs_per_zspage = class->objs_per_zspage; in zs_stats_size_show()
610 class->pages_per_zspage; in zs_stats_size_show()
614 i, class->size, class_almost_full, class_almost_empty, in zs_stats_size_show()
616 class->pages_per_zspage, freeable); in zs_stats_size_show()
680 static enum fullness_group get_fullness_group(struct size_class *class, in get_fullness_group() argument
687 objs_per_zspage = class->objs_per_zspage; in get_fullness_group()
707 static void insert_zspage(struct size_class *class, in insert_zspage() argument
713 class_stat_inc(class, fullness, 1); in insert_zspage()
714 head = list_first_entry_or_null(&class->fullness_list[fullness], in insert_zspage()
723 list_add(&zspage->list, &class->fullness_list[fullness]); in insert_zspage()
730 static void remove_zspage(struct size_class *class, in remove_zspage() argument
734 VM_BUG_ON(list_empty(&class->fullness_list[fullness])); in remove_zspage()
737 class_stat_dec(class, fullness, 1); in remove_zspage()
749 static enum fullness_group fix_fullness_group(struct size_class *class, in fix_fullness_group() argument
756 newfg = get_fullness_group(class, zspage); in fix_fullness_group()
760 remove_zspage(class, zspage, currfg); in fix_fullness_group()
761 insert_zspage(class, zspage, newfg); in fix_fullness_group()
910 static void __free_zspage(struct zs_pool *pool, struct size_class *class, in __free_zspage() argument
919 assert_spin_locked(&class->lock); in __free_zspage()
937 class_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage); in __free_zspage()
938 atomic_long_sub(class->pages_per_zspage, in __free_zspage()
942 static void free_zspage(struct zs_pool *pool, struct size_class *class, in free_zspage() argument
958 remove_zspage(class, zspage, ZS_EMPTY); in free_zspage()
959 __free_zspage(pool, class, zspage); in free_zspage()
963 static void init_zspage(struct size_class *class, struct zspage *zspage) in init_zspage() argument
979 while ((off += class->size) < PAGE_SIZE) { in init_zspage()
981 link += class->size / sizeof(*link); in init_zspage()
1007 static void create_page_chain(struct size_class *class, struct zspage *zspage, in create_page_chain() argument
1013 int nr_pages = class->pages_per_zspage; in create_page_chain()
1030 if (unlikely(class->objs_per_zspage == 1 && in create_page_chain()
1031 class->pages_per_zspage == 1)) in create_page_chain()
1044 struct size_class *class, in alloc_zspage() argument
1057 for (i = 0; i < class->pages_per_zspage; i++) { in alloc_zspage()
1074 create_page_chain(class, zspage, pages); in alloc_zspage()
1075 init_zspage(class, zspage); in alloc_zspage()
1081 static struct zspage *find_get_zspage(struct size_class *class) in find_get_zspage() argument
1087 zspage = list_first_entry_or_null(&class->fullness_list[i], in find_get_zspage()
1203 static bool zspage_full(struct size_class *class, struct zspage *zspage) in zspage_full() argument
1205 return get_zspage_inuse(zspage) == class->objs_per_zspage; in zspage_full()
1237 struct size_class *class; in zs_map_object() local
1264 class = zspage_class(pool, zspage); in zs_map_object()
1265 off = (class->size * obj_idx) & ~PAGE_MASK; in zs_map_object()
1270 if (off + class->size <= PAGE_SIZE) { in zs_map_object()
1282 ret = __zs_map_object(area, pages, off, class->size); in zs_map_object()
1298 struct size_class *class; in zs_unmap_object() local
1304 class = zspage_class(pool, zspage); in zs_unmap_object()
1305 off = (class->size * obj_idx) & ~PAGE_MASK; in zs_unmap_object()
1308 if (off + class->size <= PAGE_SIZE) in zs_unmap_object()
1317 __zs_unmap_object(area, pages, off, class->size); in zs_unmap_object()
1350 struct size_class *class; in obj_malloc() local
1356 class = pool->size_class[zspage->class]; in obj_malloc()
1360 offset = obj * class->size; in obj_malloc()
1400 struct size_class *class; in zs_malloc() local
1413 class = pool->size_class[get_size_class_index(size)]; in zs_malloc()
1416 spin_lock(&class->lock); in zs_malloc()
1417 zspage = find_get_zspage(class); in zs_malloc()
1421 fix_fullness_group(class, zspage); in zs_malloc()
1423 class_stat_inc(class, OBJ_USED, 1); in zs_malloc()
1424 spin_unlock(&class->lock); in zs_malloc()
1429 spin_unlock(&class->lock); in zs_malloc()
1431 zspage = alloc_zspage(pool, class, gfp); in zs_malloc()
1437 spin_lock(&class->lock); in zs_malloc()
1439 newfg = get_fullness_group(class, zspage); in zs_malloc()
1440 insert_zspage(class, zspage, newfg); in zs_malloc()
1441 set_zspage_mapping(zspage, class->index, newfg); in zs_malloc()
1443 atomic_long_add(class->pages_per_zspage, in zs_malloc()
1445 class_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage); in zs_malloc()
1446 class_stat_inc(class, OBJ_USED, 1); in zs_malloc()
1450 spin_unlock(&class->lock); in zs_malloc()
1487 struct size_class *class; in zs_free() local
1501 class = zspage_class(pool, zspage); in zs_free()
1502 spin_lock(&class->lock); in zs_free()
1505 obj_free(class->size, obj); in zs_free()
1506 class_stat_dec(class, OBJ_USED, 1); in zs_free()
1507 fullness = fix_fullness_group(class, zspage); in zs_free()
1511 free_zspage(pool, class, zspage); in zs_free()
1513 spin_unlock(&class->lock); in zs_free()
1518 static void zs_object_copy(struct size_class *class, unsigned long dst, in zs_object_copy() argument
1528 s_size = d_size = class->size; in zs_object_copy()
1533 s_off = (class->size * s_objidx) & ~PAGE_MASK; in zs_object_copy()
1534 d_off = (class->size * d_objidx) & ~PAGE_MASK; in zs_object_copy()
1536 if (s_off + class->size > PAGE_SIZE) in zs_object_copy()
1539 if (d_off + class->size > PAGE_SIZE) in zs_object_copy()
1550 if (written == class->size) in zs_object_copy()
1571 s_size = class->size - written; in zs_object_copy()
1579 d_size = class->size - written; in zs_object_copy()
1592 static unsigned long find_alloced_obj(struct size_class *class, in find_alloced_obj() argument
1601 offset += class->size * index; in find_alloced_obj()
1607 offset += class->size; in find_alloced_obj()
1629 static int migrate_zspage(struct zs_pool *pool, struct size_class *class, in migrate_zspage() argument
1640 handle = find_alloced_obj(class, s_page, &obj_idx); in migrate_zspage()
1650 if (zspage_full(class, get_zspage(d_page))) { in migrate_zspage()
1657 zs_object_copy(class, free_obj, used_obj); in migrate_zspage()
1660 obj_free(class->size, used_obj); in migrate_zspage()
1670 static struct zspage *isolate_zspage(struct size_class *class, bool source) in isolate_zspage() argument
1682 zspage = list_first_entry_or_null(&class->fullness_list[fg[i]], in isolate_zspage()
1685 remove_zspage(class, zspage, fg[i]); in isolate_zspage()
1700 static enum fullness_group putback_zspage(struct size_class *class, in putback_zspage() argument
1705 fullness = get_fullness_group(class, zspage); in putback_zspage()
1706 insert_zspage(class, zspage, fullness); in putback_zspage()
1707 set_zspage_mapping(zspage, class->index, fullness); in putback_zspage()
1799 static void replace_sub_page(struct size_class *class, struct zspage *zspage, in replace_sub_page() argument
1815 create_page_chain(class, zspage, pages); in replace_sub_page()
1845 struct size_class *class; in zs_page_migrate() local
1874 class = zspage_class(pool, zspage); in zs_page_migrate()
1879 spin_lock(&class->lock); in zs_page_migrate()
1894 addr += class->size) { in zs_page_migrate()
1906 replace_sub_page(class, zspage, newpage, page); in zs_page_migrate()
1912 spin_unlock(&class->lock); in zs_page_migrate()
1954 struct size_class *class; in async_free_zspage() local
1963 class = pool->size_class[i]; in async_free_zspage()
1964 if (class->index != i) in async_free_zspage()
1967 spin_lock(&class->lock); in async_free_zspage()
1968 list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages); in async_free_zspage()
1969 spin_unlock(&class->lock); in async_free_zspage()
1978 class = pool->size_class[class_idx]; in async_free_zspage()
1979 spin_lock(&class->lock); in async_free_zspage()
1980 __free_zspage(pool, class, zspage); in async_free_zspage()
1981 spin_unlock(&class->lock); in async_free_zspage()
2019 static unsigned long zs_can_compact(struct size_class *class) in zs_can_compact() argument
2022 unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); in zs_can_compact()
2023 unsigned long obj_used = zs_stat_get(class, OBJ_USED); in zs_can_compact()
2029 obj_wasted /= class->objs_per_zspage; in zs_can_compact()
2031 return obj_wasted * class->pages_per_zspage; in zs_can_compact()
2035 struct size_class *class) in __zs_compact() argument
2045 spin_lock(&class->lock); in __zs_compact()
2046 while ((src_zspage = isolate_zspage(class, true))) { in __zs_compact()
2050 if (!zs_can_compact(class)) in __zs_compact()
2056 while ((dst_zspage = isolate_zspage(class, false))) { in __zs_compact()
2064 if (!migrate_zspage(pool, class, &cc)) in __zs_compact()
2067 putback_zspage(class, dst_zspage); in __zs_compact()
2078 putback_zspage(class, dst_zspage); in __zs_compact()
2081 if (putback_zspage(class, src_zspage) == ZS_EMPTY) { in __zs_compact()
2083 free_zspage(pool, class, src_zspage); in __zs_compact()
2084 pages_freed += class->pages_per_zspage; in __zs_compact()
2087 spin_unlock(&class->lock); in __zs_compact()
2091 spin_lock(&class->lock); in __zs_compact()
2095 putback_zspage(class, src_zspage); in __zs_compact()
2099 spin_unlock(&class->lock); in __zs_compact()
2108 struct size_class *class; in zs_compact() local
2112 class = pool->size_class[i]; in zs_compact()
2113 if (class->index != i) in zs_compact()
2115 pages_freed += __zs_compact(pool, class); in zs_compact()
2150 struct size_class *class; in zs_shrinker_count() local
2156 class = pool->size_class[i]; in zs_shrinker_count()
2157 if (class->index != i) in zs_shrinker_count()
2160 pages_to_free += zs_can_compact(class); in zs_shrinker_count()
2220 struct size_class *class; in zs_create_pool() local
2266 class = kzalloc(sizeof(struct size_class), GFP_KERNEL); in zs_create_pool()
2267 if (!class) in zs_create_pool()
2270 class->size = size; in zs_create_pool()
2271 class->index = i; in zs_create_pool()
2272 class->pages_per_zspage = pages_per_zspage; in zs_create_pool()
2273 class->objs_per_zspage = objs_per_zspage; in zs_create_pool()
2274 spin_lock_init(&class->lock); in zs_create_pool()
2275 pool->size_class[i] = class; in zs_create_pool()
2278 INIT_LIST_HEAD(&class->fullness_list[fullness]); in zs_create_pool()
2280 prev_class = class; in zs_create_pool()
2312 struct size_class *class = pool->size_class[i]; in zs_destroy_pool() local
2314 if (!class) in zs_destroy_pool()
2317 if (class->index != i) in zs_destroy_pool()
2321 if (!list_empty(&class->fullness_list[fg])) { in zs_destroy_pool()
2323 class->size, fg); in zs_destroy_pool()
2326 kfree(class); in zs_destroy_pool()