Lines Matching refs:zhdr

240 static inline void z3fold_page_lock(struct z3fold_header *zhdr)  in z3fold_page_lock()  argument
242 spin_lock(&zhdr->page_lock); in z3fold_page_lock()
246 static inline int z3fold_page_trylock(struct z3fold_header *zhdr) in z3fold_page_trylock() argument
248 return spin_trylock(&zhdr->page_lock); in z3fold_page_trylock()
252 static inline void z3fold_page_unlock(struct z3fold_header *zhdr) in z3fold_page_unlock() argument
254 spin_unlock(&zhdr->page_lock); in z3fold_page_unlock()
261 struct z3fold_header *zhdr; in get_z3fold_header() local
271 zhdr = (struct z3fold_header *)(addr & PAGE_MASK); in get_z3fold_header()
272 locked = z3fold_page_trylock(zhdr); in get_z3fold_header()
275 struct page *page = virt_to_page(zhdr); in get_z3fold_header()
279 z3fold_page_unlock(zhdr); in get_z3fold_header()
284 zhdr = (struct z3fold_header *)(handle & PAGE_MASK); in get_z3fold_header()
287 return zhdr; in get_z3fold_header()
290 static inline void put_z3fold_header(struct z3fold_header *zhdr) in put_z3fold_header() argument
292 struct page *page = virt_to_page(zhdr); in put_z3fold_header()
295 z3fold_page_unlock(zhdr); in put_z3fold_header()
298 static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr) in free_handle() argument
316 if (zhdr->slots != slots) in free_handle()
317 zhdr->foreign_handles--; in free_handle()
331 if (zhdr->slots == slots) in free_handle()
332 zhdr->slots = NULL; in free_handle()
389 struct z3fold_header *zhdr = page_address(page); in init_z3fold_page() local
400 return zhdr; in init_z3fold_page()
406 memset(zhdr, 0, sizeof(*zhdr)); in init_z3fold_page()
407 spin_lock_init(&zhdr->page_lock); in init_z3fold_page()
408 kref_init(&zhdr->refcount); in init_z3fold_page()
409 zhdr->cpu = -1; in init_z3fold_page()
410 zhdr->slots = slots; in init_z3fold_page()
411 zhdr->pool = pool; in init_z3fold_page()
412 INIT_LIST_HEAD(&zhdr->buddy); in init_z3fold_page()
413 INIT_WORK(&zhdr->work, compact_page_work); in init_z3fold_page()
414 return zhdr; in init_z3fold_page()
429 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud) in __idx() argument
431 return (bud + zhdr->first_num) & BUDDY_MASK; in __idx()
438 static unsigned long __encode_handle(struct z3fold_header *zhdr, in __encode_handle() argument
442 unsigned long h = (unsigned long)zhdr; in __encode_handle()
453 idx = __idx(zhdr, bud); in __encode_handle()
456 h |= (zhdr->last_chunks << BUDDY_SHIFT); in __encode_handle()
464 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) in encode_handle() argument
466 return __encode_handle(zhdr, zhdr->slots, bud); in encode_handle()
488 struct z3fold_header *zhdr; in handle_to_buddy() local
496 zhdr = (struct z3fold_header *)(addr & PAGE_MASK); in handle_to_buddy()
497 return (addr - zhdr->first_num) & BUDDY_MASK; in handle_to_buddy()
500 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr) in zhdr_to_pool() argument
502 return zhdr->pool; in zhdr_to_pool()
505 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) in __release_z3fold_page() argument
507 struct page *page = virt_to_page(zhdr); in __release_z3fold_page()
508 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in __release_z3fold_page()
510 WARN_ON(!list_empty(&zhdr->buddy)); in __release_z3fold_page()
519 z3fold_page_unlock(zhdr); in __release_z3fold_page()
522 list_add(&zhdr->buddy, &pool->stale); in __release_z3fold_page()
531 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, in release_z3fold_page_locked() local
533 WARN_ON(z3fold_page_trylock(zhdr)); in release_z3fold_page_locked()
534 __release_z3fold_page(zhdr, true); in release_z3fold_page_locked()
539 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, in release_z3fold_page_locked_list() local
541 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in release_z3fold_page_locked_list()
544 list_del_init(&zhdr->buddy); in release_z3fold_page_locked_list()
547 WARN_ON(z3fold_page_trylock(zhdr)); in release_z3fold_page_locked_list()
548 __release_z3fold_page(zhdr, true); in release_z3fold_page_locked_list()
557 struct z3fold_header *zhdr = list_first_entry(&pool->stale, in free_pages_work() local
559 struct page *page = virt_to_page(zhdr); in free_pages_work()
561 list_del(&zhdr->buddy); in free_pages_work()
565 cancel_work_sync(&zhdr->work); in free_pages_work()
577 static int num_free_chunks(struct z3fold_header *zhdr) in num_free_chunks() argument
585 if (zhdr->middle_chunks != 0) { in num_free_chunks()
586 int nfree_before = zhdr->first_chunks ? in num_free_chunks()
587 0 : zhdr->start_middle - ZHDR_CHUNKS; in num_free_chunks()
588 int nfree_after = zhdr->last_chunks ? in num_free_chunks()
590 (zhdr->start_middle + zhdr->middle_chunks); in num_free_chunks()
593 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; in num_free_chunks()
599 struct z3fold_header *zhdr) in add_to_unbuddied() argument
601 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || in add_to_unbuddied()
602 zhdr->middle_chunks == 0) { in add_to_unbuddied()
604 int freechunks = num_free_chunks(zhdr); in add_to_unbuddied()
609 list_add(&zhdr->buddy, &unbuddied[freechunks]); in add_to_unbuddied()
611 zhdr->cpu = smp_processor_id(); in add_to_unbuddied()
616 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks) in get_free_buddy() argument
620 if (zhdr->middle_chunks) { in get_free_buddy()
621 if (!zhdr->first_chunks && in get_free_buddy()
622 chunks <= zhdr->start_middle - ZHDR_CHUNKS) in get_free_buddy()
624 else if (!zhdr->last_chunks) in get_free_buddy()
627 if (!zhdr->first_chunks) in get_free_buddy()
629 else if (!zhdr->last_chunks) in get_free_buddy()
638 static inline void *mchunk_memmove(struct z3fold_header *zhdr, in mchunk_memmove() argument
641 void *beg = zhdr; in mchunk_memmove()
643 beg + (zhdr->start_middle << CHUNK_SHIFT), in mchunk_memmove()
644 zhdr->middle_chunks << CHUNK_SHIFT); in mchunk_memmove()
647 static inline bool buddy_single(struct z3fold_header *zhdr) in buddy_single() argument
649 return !((zhdr->first_chunks && zhdr->middle_chunks) || in buddy_single()
650 (zhdr->first_chunks && zhdr->last_chunks) || in buddy_single()
651 (zhdr->middle_chunks && zhdr->last_chunks)); in buddy_single()
654 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr) in compact_single_buddy() argument
656 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in compact_single_buddy()
657 void *p = zhdr; in compact_single_buddy()
661 int first_idx = __idx(zhdr, FIRST); in compact_single_buddy()
662 int middle_idx = __idx(zhdr, MIDDLE); in compact_single_buddy()
663 int last_idx = __idx(zhdr, LAST); in compact_single_buddy()
670 if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) { in compact_single_buddy()
672 sz = zhdr->first_chunks << CHUNK_SHIFT; in compact_single_buddy()
673 old_handle = (unsigned long)&zhdr->slots->slot[first_idx]; in compact_single_buddy()
674 moved_chunks = &zhdr->first_chunks; in compact_single_buddy()
675 } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) { in compact_single_buddy()
676 p += zhdr->start_middle << CHUNK_SHIFT; in compact_single_buddy()
677 sz = zhdr->middle_chunks << CHUNK_SHIFT; in compact_single_buddy()
678 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx]; in compact_single_buddy()
679 moved_chunks = &zhdr->middle_chunks; in compact_single_buddy()
680 } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) { in compact_single_buddy()
681 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); in compact_single_buddy()
682 sz = zhdr->last_chunks << CHUNK_SHIFT; in compact_single_buddy()
683 old_handle = (unsigned long)&zhdr->slots->slot[last_idx]; in compact_single_buddy()
684 moved_chunks = &zhdr->last_chunks; in compact_single_buddy()
696 if (WARN_ON(new_zhdr == zhdr)) in compact_single_buddy()
721 write_lock(&zhdr->slots->lock); in compact_single_buddy()
727 write_unlock(&zhdr->slots->lock); in compact_single_buddy()
747 static int z3fold_compact_page(struct z3fold_header *zhdr) in z3fold_compact_page() argument
749 struct page *page = virt_to_page(zhdr); in z3fold_compact_page()
757 if (zhdr->middle_chunks == 0) in z3fold_compact_page()
760 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { in z3fold_compact_page()
762 mchunk_memmove(zhdr, ZHDR_CHUNKS); in z3fold_compact_page()
763 zhdr->first_chunks = zhdr->middle_chunks; in z3fold_compact_page()
764 zhdr->middle_chunks = 0; in z3fold_compact_page()
765 zhdr->start_middle = 0; in z3fold_compact_page()
766 zhdr->first_num++; in z3fold_compact_page()
774 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && in z3fold_compact_page()
775 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= in z3fold_compact_page()
777 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS); in z3fold_compact_page()
778 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; in z3fold_compact_page()
780 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && in z3fold_compact_page()
781 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle in z3fold_compact_page()
782 + zhdr->middle_chunks) >= in z3fold_compact_page()
784 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks - in z3fold_compact_page()
785 zhdr->middle_chunks; in z3fold_compact_page()
786 mchunk_memmove(zhdr, new_start); in z3fold_compact_page()
787 zhdr->start_middle = new_start; in z3fold_compact_page()
794 static void do_compact_page(struct z3fold_header *zhdr, bool locked) in do_compact_page() argument
796 struct z3fold_pool *pool = zhdr_to_pool(zhdr); in do_compact_page()
799 page = virt_to_page(zhdr); in do_compact_page()
801 WARN_ON(z3fold_page_trylock(zhdr)); in do_compact_page()
803 z3fold_page_lock(zhdr); in do_compact_page()
805 z3fold_page_unlock(zhdr); in do_compact_page()
809 list_del_init(&zhdr->buddy); in do_compact_page()
812 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) in do_compact_page()
817 z3fold_page_unlock(zhdr); in do_compact_page()
821 if (!zhdr->foreign_handles && buddy_single(zhdr) && in do_compact_page()
822 zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) { in do_compact_page()
823 if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) { in do_compact_page()
825 z3fold_page_unlock(zhdr); in do_compact_page()
830 z3fold_compact_page(zhdr); in do_compact_page()
831 add_to_unbuddied(pool, zhdr); in do_compact_page()
833 z3fold_page_unlock(zhdr); in do_compact_page()
838 struct z3fold_header *zhdr = container_of(w, struct z3fold_header, in compact_page_work() local
841 do_compact_page(zhdr, false); in compact_page_work()
848 struct z3fold_header *zhdr = NULL; in __z3fold_alloc() local
860 zhdr = list_first_entry_or_null(READ_ONCE(l), in __z3fold_alloc()
863 if (!zhdr) in __z3fold_alloc()
868 if (unlikely(zhdr != list_first_entry(READ_ONCE(l), in __z3fold_alloc()
870 !z3fold_page_trylock(zhdr)) { in __z3fold_alloc()
872 zhdr = NULL; in __z3fold_alloc()
878 list_del_init(&zhdr->buddy); in __z3fold_alloc()
879 zhdr->cpu = -1; in __z3fold_alloc()
882 page = virt_to_page(zhdr); in __z3fold_alloc()
885 z3fold_page_unlock(zhdr); in __z3fold_alloc()
886 zhdr = NULL; in __z3fold_alloc()
899 kref_get(&zhdr->refcount); in __z3fold_alloc()
904 if (!zhdr) { in __z3fold_alloc()
915 zhdr = list_first_entry_or_null(READ_ONCE(l), in __z3fold_alloc()
918 if (!zhdr || !z3fold_page_trylock(zhdr)) { in __z3fold_alloc()
920 zhdr = NULL; in __z3fold_alloc()
923 list_del_init(&zhdr->buddy); in __z3fold_alloc()
924 zhdr->cpu = -1; in __z3fold_alloc()
927 page = virt_to_page(zhdr); in __z3fold_alloc()
930 z3fold_page_unlock(zhdr); in __z3fold_alloc()
931 zhdr = NULL; in __z3fold_alloc()
936 kref_get(&zhdr->refcount); in __z3fold_alloc()
941 if (zhdr && !zhdr->slots) { in __z3fold_alloc()
942 zhdr->slots = alloc_slots(pool, GFP_ATOMIC); in __z3fold_alloc()
943 if (!zhdr->slots) in __z3fold_alloc()
946 return zhdr; in __z3fold_alloc()
949 if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) { in __z3fold_alloc()
950 add_to_unbuddied(pool, zhdr); in __z3fold_alloc()
951 z3fold_page_unlock(zhdr); in __z3fold_alloc()
1071 struct z3fold_header *zhdr = NULL; in z3fold_alloc() local
1086 zhdr = __z3fold_alloc(pool, size, can_sleep); in z3fold_alloc()
1087 if (zhdr) { in z3fold_alloc()
1088 bud = get_free_buddy(zhdr, chunks); in z3fold_alloc()
1090 if (!kref_put(&zhdr->refcount, in z3fold_alloc()
1092 z3fold_page_unlock(zhdr); in z3fold_alloc()
1097 page = virt_to_page(zhdr); in z3fold_alloc()
1107 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); in z3fold_alloc()
1108 if (!zhdr) { in z3fold_alloc()
1127 z3fold_page_lock(zhdr); in z3fold_alloc()
1131 zhdr->first_chunks = chunks; in z3fold_alloc()
1133 zhdr->last_chunks = chunks; in z3fold_alloc()
1135 zhdr->middle_chunks = chunks; in z3fold_alloc()
1136 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; in z3fold_alloc()
1138 add_to_unbuddied(pool, zhdr); in z3fold_alloc()
1148 *handle = encode_handle(zhdr, bud); in z3fold_alloc()
1151 z3fold_page_unlock(zhdr); in z3fold_alloc()
1168 struct z3fold_header *zhdr; in z3fold_free() local
1173 zhdr = get_z3fold_header(handle); in z3fold_free()
1174 page = virt_to_page(zhdr); in z3fold_free()
1187 put_z3fold_header(zhdr); in z3fold_free()
1199 zhdr->first_chunks = 0; in z3fold_free()
1202 zhdr->middle_chunks = 0; in z3fold_free()
1205 zhdr->last_chunks = 0; in z3fold_free()
1210 put_z3fold_header(zhdr); in z3fold_free()
1215 free_handle(handle, zhdr); in z3fold_free()
1216 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) in z3fold_free()
1220 put_z3fold_header(zhdr); in z3fold_free()
1225 put_z3fold_header(zhdr); in z3fold_free()
1228 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) { in z3fold_free()
1229 zhdr->cpu = -1; in z3fold_free()
1230 kref_get(&zhdr->refcount); in z3fold_free()
1232 do_compact_page(zhdr, true); in z3fold_free()
1235 kref_get(&zhdr->refcount); in z3fold_free()
1237 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); in z3fold_free()
1238 put_z3fold_header(zhdr); in z3fold_free()
1280 struct z3fold_header *zhdr = NULL; in z3fold_reclaim_page() local
1302 zhdr = page_address(page); in z3fold_reclaim_page()
1320 if (!z3fold_page_trylock(zhdr)) { in z3fold_reclaim_page()
1321 zhdr = NULL; in z3fold_reclaim_page()
1329 if (zhdr->foreign_handles || in z3fold_reclaim_page()
1331 z3fold_page_unlock(zhdr); in z3fold_reclaim_page()
1332 zhdr = NULL; in z3fold_reclaim_page()
1335 list_del_init(&zhdr->buddy); in z3fold_reclaim_page()
1336 zhdr->cpu = -1; in z3fold_reclaim_page()
1338 kref_get(&zhdr->refcount); in z3fold_reclaim_page()
1342 if (!zhdr) in z3fold_reclaim_page()
1359 if (zhdr->first_chunks) in z3fold_reclaim_page()
1360 first_handle = __encode_handle(zhdr, &slots, in z3fold_reclaim_page()
1362 if (zhdr->middle_chunks) in z3fold_reclaim_page()
1363 middle_handle = __encode_handle(zhdr, &slots, in z3fold_reclaim_page()
1365 if (zhdr->last_chunks) in z3fold_reclaim_page()
1366 last_handle = __encode_handle(zhdr, &slots, in z3fold_reclaim_page()
1372 z3fold_page_unlock(zhdr); in z3fold_reclaim_page()
1374 first_handle = encode_handle(zhdr, HEADLESS); in z3fold_reclaim_page()
1405 struct z3fold_buddy_slots *slots = zhdr->slots; in z3fold_reclaim_page()
1406 z3fold_page_lock(zhdr); in z3fold_reclaim_page()
1407 if (kref_put(&zhdr->refcount, in z3fold_reclaim_page()
1420 if (list_empty(&zhdr->buddy)) in z3fold_reclaim_page()
1421 add_to_unbuddied(pool, zhdr); in z3fold_reclaim_page()
1423 z3fold_page_unlock(zhdr); in z3fold_reclaim_page()
1445 struct z3fold_header *zhdr; in z3fold_map() local
1450 zhdr = get_z3fold_header(handle); in z3fold_map()
1451 addr = zhdr; in z3fold_map()
1452 page = virt_to_page(zhdr); in z3fold_map()
1463 addr += zhdr->start_middle << CHUNK_SHIFT; in z3fold_map()
1477 zhdr->mapped_count++; in z3fold_map()
1479 put_z3fold_header(zhdr); in z3fold_map()
1490 struct z3fold_header *zhdr; in z3fold_unmap() local
1494 zhdr = get_z3fold_header(handle); in z3fold_unmap()
1495 page = virt_to_page(zhdr); in z3fold_unmap()
1503 zhdr->mapped_count--; in z3fold_unmap()
1504 put_z3fold_header(zhdr); in z3fold_unmap()
1520 struct z3fold_header *zhdr; in z3fold_page_isolate() local
1529 zhdr = page_address(page); in z3fold_page_isolate()
1530 z3fold_page_lock(zhdr); in z3fold_page_isolate()
1535 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) in z3fold_page_isolate()
1540 pool = zhdr_to_pool(zhdr); in z3fold_page_isolate()
1542 if (!list_empty(&zhdr->buddy)) in z3fold_page_isolate()
1543 list_del_init(&zhdr->buddy); in z3fold_page_isolate()
1548 kref_get(&zhdr->refcount); in z3fold_page_isolate()
1549 z3fold_page_unlock(zhdr); in z3fold_page_isolate()
1553 z3fold_page_unlock(zhdr); in z3fold_page_isolate()
1560 struct z3fold_header *zhdr, *new_zhdr; in z3fold_page_migrate() local
1569 zhdr = page_address(page); in z3fold_page_migrate()
1570 pool = zhdr_to_pool(zhdr); in z3fold_page_migrate()
1572 if (!z3fold_page_trylock(zhdr)) in z3fold_page_migrate()
1574 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) { in z3fold_page_migrate()
1576 z3fold_page_unlock(zhdr); in z3fold_page_migrate()
1579 if (work_pending(&zhdr->work)) { in z3fold_page_migrate()
1580 z3fold_page_unlock(zhdr); in z3fold_page_migrate()
1584 memcpy(new_zhdr, zhdr, PAGE_SIZE); in z3fold_page_migrate()
1587 z3fold_page_unlock(zhdr); in z3fold_page_migrate()
1624 struct z3fold_header *zhdr; in z3fold_page_putback() local
1627 zhdr = page_address(page); in z3fold_page_putback()
1628 pool = zhdr_to_pool(zhdr); in z3fold_page_putback()
1630 z3fold_page_lock(zhdr); in z3fold_page_putback()
1631 if (!list_empty(&zhdr->buddy)) in z3fold_page_putback()
1632 list_del_init(&zhdr->buddy); in z3fold_page_putback()
1634 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) in z3fold_page_putback()
1639 if (list_empty(&zhdr->buddy)) in z3fold_page_putback()
1640 add_to_unbuddied(pool, zhdr); in z3fold_page_putback()
1642 z3fold_page_unlock(zhdr); in z3fold_page_putback()