Lines Matching refs:order
353 static void __free_pages_ok(struct page *page, unsigned int order,
726 static inline unsigned int order_to_pindex(int migratetype, int order) in order_to_pindex() argument
728 int base = order; in order_to_pindex()
731 if (order > PAGE_ALLOC_COSTLY_ORDER) { in order_to_pindex()
732 VM_BUG_ON(order != pageblock_order); in order_to_pindex()
736 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); in order_to_pindex()
744 int order = pindex / MIGRATE_PCPTYPES; in pindex_to_order() local
748 order = pageblock_order; in pindex_to_order()
750 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); in pindex_to_order()
753 return order; in pindex_to_order()
756 static inline bool pcp_allowed_order(unsigned int order) in pcp_allowed_order() argument
758 if (order <= PAGE_ALLOC_COSTLY_ORDER) in pcp_allowed_order()
761 if (order == pageblock_order) in pcp_allowed_order()
767 static inline void free_the_page(struct page *page, unsigned int order) in free_the_page() argument
769 if (pcp_allowed_order(order)) /* Via pcp? */ in free_the_page()
770 free_unref_page(page, order); in free_the_page()
772 __free_pages_ok(page, order, FPI_NONE); in free_the_page()
796 static void prep_compound_head(struct page *page, unsigned int order) in prep_compound_head() argument
799 set_compound_order(page, order); in prep_compound_head()
813 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
816 int nr_pages = 1 << order; in prep_compound_page()
822 prep_compound_head(page, order); in prep_compound_page()
865 unsigned int order, int migratetype) in set_page_guard() argument
870 if (order >= debug_guardpage_minorder()) in set_page_guard()
875 set_page_private(page, order); in set_page_guard()
878 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
884 unsigned int order, int migratetype) in clear_page_guard() argument
893 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
897 unsigned int order, int migratetype) { return false; } in set_page_guard() argument
899 unsigned int order, int migratetype) {} in clear_page_guard() argument
960 static inline void set_buddy_order(struct page *page, unsigned int order) in set_buddy_order() argument
962 set_page_private(page, order); in set_buddy_order()
979 int order, int migratetype) in compaction_capture() argument
981 if (!capc || order != capc->cc->order) in compaction_capture()
995 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE) in compaction_capture()
1010 int order, int migratetype) in compaction_capture() argument
1018 unsigned int order, int migratetype) in add_to_free_list() argument
1020 struct free_area *area = &zone->free_area[order]; in add_to_free_list()
1028 unsigned int order, int migratetype) in add_to_free_list_tail() argument
1030 struct free_area *area = &zone->free_area[order]; in add_to_free_list_tail()
1042 unsigned int order, int migratetype) in move_to_free_list() argument
1044 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
1050 unsigned int order) in del_page_from_free_list() argument
1059 zone->free_area[order].nr_free--; in del_page_from_free_list()
1072 struct page *page, unsigned int order) in buddy_merge_likely() argument
1077 if (order >= MAX_ORDER - 2) in buddy_merge_likely()
1083 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, in buddy_merge_likely()
1113 struct zone *zone, unsigned int order, in __free_one_page() argument
1127 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
1129 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); in __free_one_page()
1132 while (order < MAX_ORDER - 1) { in __free_one_page()
1133 if (compaction_capture(capc, page, order, migratetype)) { in __free_one_page()
1134 __mod_zone_freepage_state(zone, -(1 << order), in __free_one_page()
1139 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); in __free_one_page()
1143 if (unlikely(order >= pageblock_order)) { in __free_one_page()
1163 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
1165 del_page_from_free_list(buddy, zone, order); in __free_one_page()
1169 order++; in __free_one_page()
1173 set_buddy_order(page, order); in __free_one_page()
1177 else if (is_shuffle_order(order)) in __free_one_page()
1180 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); in __free_one_page()
1183 add_to_free_list_tail(page, zone, order, migratetype); in __free_one_page()
1185 add_to_free_list(page, zone, order, migratetype); in __free_one_page()
1189 page_reporting_notify_free(order); in __free_one_page()
1206 unsigned int order, unsigned long split_pfn_offset) in split_free_page() argument
1221 if (!PageBuddy(free_page) || buddy_order(free_page) != order) { in split_free_page()
1228 __mod_zone_freepage_state(zone, -(1UL << order), mt); in split_free_page()
1230 del_page_from_free_list(free_page, zone, order); in split_free_page()
1232 pfn < free_page_pfn + (1UL << order);) { in split_free_page()
1236 pfn ? __ffs(pfn) : order, in split_free_page()
1244 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn); in split_free_page()
1402 unsigned int order, bool check_free, fpi_t fpi_flags) in free_pages_prepare() argument
1409 trace_mm_page_free(page, order); in free_pages_prepare()
1410 kmsan_free_page(page, order); in free_pages_prepare()
1412 if (unlikely(PageHWPoison(page)) && !order) { in free_pages_prepare()
1418 __memcg_kmem_uncharge_page(page, order); in free_pages_prepare()
1419 reset_page_owner(page, order); in free_pages_prepare()
1420 page_table_check_free(page, order); in free_pages_prepare()
1428 if (unlikely(order)) { in free_pages_prepare()
1432 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); in free_pages_prepare()
1438 for (i = 1; i < (1 << order); i++) { in free_pages_prepare()
1451 __memcg_kmem_uncharge_page(page, order); in free_pages_prepare()
1459 reset_page_owner(page, order); in free_pages_prepare()
1460 page_table_check_free(page, order); in free_pages_prepare()
1464 PAGE_SIZE << order); in free_pages_prepare()
1466 PAGE_SIZE << order); in free_pages_prepare()
1469 kernel_poison_pages(page, 1 << order); in free_pages_prepare()
1480 kasan_poison_pages(page, order, init); in free_pages_prepare()
1487 kernel_init_pages(page, 1 << order); in free_pages_prepare()
1494 arch_free_page(page, order); in free_pages_prepare()
1496 debug_pagealloc_unmap_pages(page, 1 << order); in free_pages_prepare()
1507 static bool free_pcp_prepare(struct page *page, unsigned int order) in free_pcp_prepare() argument
1509 return free_pages_prepare(page, order, true, FPI_NONE); in free_pcp_prepare()
1527 static bool free_pcp_prepare(struct page *page, unsigned int order) in free_pcp_prepare() argument
1530 return free_pages_prepare(page, order, true, FPI_NONE); in free_pcp_prepare()
1532 return free_pages_prepare(page, order, false, FPI_NONE); in free_pcp_prepare()
1552 unsigned int order; in free_pcppages_bulk() local
1587 order = pindex_to_order(pindex); in free_pcppages_bulk()
1588 nr_pages = 1 << order; in free_pcppages_bulk()
1609 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); in free_pcppages_bulk()
1610 trace_mm_page_pcpu_drain(page, order, mt); in free_pcppages_bulk()
1619 unsigned int order, in free_one_page() argument
1629 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in free_one_page()
1707 static void __free_pages_ok(struct page *page, unsigned int order, in __free_pages_ok() argument
1715 if (!free_pages_prepare(page, order, true, fpi_flags)) in __free_pages_ok()
1725 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in __free_pages_ok()
1728 __count_vm_events(PGFREE, 1 << order); in __free_pages_ok()
1731 void __free_pages_core(struct page *page, unsigned int order) in __free_pages_core() argument
1733 unsigned int nr_pages = 1 << order; in __free_pages_core()
1757 __free_pages_ok(page, order, FPI_TO_TAIL | FPI_SKIP_KASAN_POISON); in __free_pages_core()
1813 unsigned int order) in memblock_free_pages() argument
1817 if (!kmsan_memblock_free_pages(page, order)) { in memblock_free_pages()
1821 __free_pages_core(page, order); in memblock_free_pages()
2200 deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
2202 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); in deferred_grow_zone()
2267 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
2269 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
2396 static bool check_new_pages(struct page *page, unsigned int order) in check_new_pages() argument
2399 for (i = 0; i < (1 << order); i++) { in check_new_pages()
2415 static inline bool check_pcp_refill(struct page *page, unsigned int order) in check_pcp_refill() argument
2418 return check_new_pages(page, order); in check_pcp_refill()
2423 static inline bool check_new_pcp(struct page *page, unsigned int order) in check_new_pcp() argument
2425 return check_new_pages(page, order); in check_new_pcp()
2433 static inline bool check_pcp_refill(struct page *page, unsigned int order) in check_pcp_refill() argument
2435 return check_new_pages(page, order); in check_pcp_refill()
2437 static inline bool check_new_pcp(struct page *page, unsigned int order) in check_new_pcp() argument
2440 return check_new_pages(page, order); in check_new_pcp()
2474 inline void post_alloc_hook(struct page *page, unsigned int order, in post_alloc_hook() argument
2485 arch_alloc_page(page, order); in post_alloc_hook()
2486 debug_pagealloc_map_pages(page, 1 << order); in post_alloc_hook()
2493 kernel_unpoison_pages(page, 1 << order); in post_alloc_hook()
2507 for (i = 0; i != 1 << order; ++i) in post_alloc_hook()
2515 kasan_unpoison_pages(page, order, init); in post_alloc_hook()
2522 for (i = 0; i != 1 << order; ++i) in post_alloc_hook()
2527 kernel_init_pages(page, 1 << order); in post_alloc_hook()
2532 set_page_owner(page, order, gfp_flags); in post_alloc_hook()
2533 page_table_check_alloc(page, order); in post_alloc_hook()
2536 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page() argument
2539 post_alloc_hook(page, order, gfp_flags); in prep_new_page()
2541 if (order && (gfp_flags & __GFP_COMP)) in prep_new_page()
2542 prep_compound_page(page, order); in prep_new_page()
2561 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
2569 for (current_order = order; current_order < MAX_ORDER; ++current_order) { in __rmqueue_smallest()
2575 expand(zone, page, order, current_order, migratetype); in __rmqueue_smallest()
2577 trace_mm_page_alloc_zone_locked(page, order, migratetype, in __rmqueue_smallest()
2578 pcp_allowed_order(order) && in __rmqueue_smallest()
2601 unsigned int order) in __rmqueue_cma_fallback() argument
2603 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
2607 unsigned int order) { return NULL; } in __rmqueue_cma_fallback() argument
2621 unsigned int order; in move_freepages() local
2643 order = buddy_order(page); in move_freepages()
2644 move_to_free_list(page, zone, order, migratetype); in move_freepages()
2645 pfn += 1 << order; in move_freepages()
2646 pages_moved += 1 << order; in move_freepages()
2697 static bool can_steal_fallback(unsigned int order, int start_mt) in can_steal_fallback() argument
2706 if (order >= pageblock_order) in can_steal_fallback()
2709 if (order >= pageblock_order / 2 || in can_steal_fallback()
2845 int find_suitable_fallback(struct free_area *area, unsigned int order, in find_suitable_fallback() argument
2863 if (can_steal_fallback(order, migratetype)) in find_suitable_fallback()
2930 int order; in unreserve_highatomic_pageblock() local
2944 for (order = 0; order < MAX_ORDER; order++) { in unreserve_highatomic_pageblock()
2945 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
3005 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, in __rmqueue_fallback() argument
3010 int min_order = order; in __rmqueue_fallback()
3020 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) in __rmqueue_fallback()
3045 && current_order > order) in __rmqueue_fallback()
3054 for (current_order = order; current_order < MAX_ORDER; in __rmqueue_fallback()
3075 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_fallback()
3087 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
3101 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
3107 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
3110 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
3112 if (!page && __rmqueue_fallback(zone, order, migratetype, in __rmqueue()
3124 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
3133 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk()
3138 if (unlikely(check_pcp_refill(page, order))) in rmqueue_bulk()
3155 -(1 << order)); in rmqueue_bulk()
3164 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
3338 unsigned int order, t; in mark_free_pages() local
3363 for_each_migratetype_order(order, t) { in mark_free_pages()
3365 &zone->free_area[order].free_list[t], buddy_list) { in mark_free_pages()
3369 for (i = 0; i < (1UL << order); i++) { in mark_free_pages()
3383 unsigned int order) in free_unref_page_prepare() argument
3387 if (!free_pcp_prepare(page, order)) in free_unref_page_prepare()
3444 unsigned int order) in free_unref_page_commit() argument
3450 __count_vm_events(PGFREE, 1 << order); in free_unref_page_commit()
3451 pindex = order_to_pindex(migratetype, order); in free_unref_page_commit()
3453 pcp->count += 1 << order; in free_unref_page_commit()
3461 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER); in free_unref_page_commit()
3474 void free_unref_page(struct page *page, unsigned int order) in free_unref_page() argument
3483 if (!free_unref_page_prepare(page, pfn, order)) in free_unref_page()
3496 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); in free_unref_page()
3506 free_unref_page_commit(zone, pcp, page, migratetype, order); in free_unref_page()
3509 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE); in free_unref_page()
3592 void split_page(struct page *page, unsigned int order) in split_page() argument
3599 for (i = 1; i < (1 << order); i++) in split_page()
3601 split_page_owner(page, 1 << order); in split_page()
3602 split_page_memcg(page, 1 << order); in split_page()
3606 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
3619 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
3623 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
3626 del_page_from_free_list(page, zone, order); in __isolate_free_page()
3632 if (order >= pageblock_order - 1) { in __isolate_free_page()
3633 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
3646 return 1UL << order; in __isolate_free_page()
3658 void __putback_isolated_page(struct page *page, unsigned int order, int mt) in __putback_isolated_page() argument
3666 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
3698 unsigned int order, unsigned int alloc_flags, in rmqueue_buddy() argument
3713 if (order > 0 && alloc_flags & ALLOC_HARDER) in rmqueue_buddy()
3714 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue_buddy()
3716 page = __rmqueue(zone, order, migratetype, alloc_flags); in rmqueue_buddy()
3722 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue_buddy()
3725 } while (check_new_pages(page, order)); in rmqueue_buddy()
3727 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue_buddy()
3735 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, in __rmqueue_pcplist() argument
3756 batch = max(batch >> order, 2); in __rmqueue_pcplist()
3757 alloced = rmqueue_bulk(zone, order, in __rmqueue_pcplist()
3761 pcp->count += alloced << order; in __rmqueue_pcplist()
3768 pcp->count -= 1 << order; in __rmqueue_pcplist()
3769 } while (check_new_pcp(page, order)); in __rmqueue_pcplist()
3776 struct zone *zone, unsigned int order, in rmqueue_pcplist() argument
3802 list = &pcp->lists[order_to_pindex(migratetype, order)]; in rmqueue_pcplist()
3803 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); in rmqueue_pcplist()
3807 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue_pcplist()
3827 struct zone *zone, unsigned int order, in rmqueue() argument
3837 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); in rmqueue()
3839 if (likely(pcp_allowed_order(order))) { in rmqueue()
3846 page = rmqueue_pcplist(preferred_zone, zone, order, in rmqueue()
3853 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, in rmqueue()
3888 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3892 if (order < fail_page_alloc.min_order) in __should_fail_alloc_page()
3906 return should_fail_ex(&fail_page_alloc.attr, 1 << order, flags); in __should_fail_alloc_page()
3934 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3941 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
3943 return __should_fail_alloc_page(gfp_mask, order); in should_fail_alloc_page()
3948 unsigned int order, unsigned int alloc_flags) in __zone_watermark_unusable_free() argument
3951 long unusable_free = (1 << order) - 1; in __zone_watermark_unusable_free()
3976 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok() argument
3985 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); in __zone_watermark_ok()
4012 if (!order) in __zone_watermark_ok()
4016 for (o = order; o < MAX_ORDER; o++) { in __zone_watermark_ok()
4040 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok() argument
4043 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, in zone_watermark_ok()
4047 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast() argument
4059 if (!order) { in zone_watermark_fast()
4072 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, in zone_watermark_fast()
4081 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost in zone_watermark_fast()
4084 return __zone_watermark_ok(z, order, mark, highest_zoneidx, in zone_watermark_fast()
4091 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe() argument
4099 return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0, in zone_watermark_ok_safe()
4174 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
4245 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
4256 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4269 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
4279 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
4288 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
4291 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist()
4297 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) in get_page_from_freelist()
4298 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
4305 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4369 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, in __alloc_pages_cpuset_fallback() argument
4375 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4382 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4389 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
4397 .order = order, in __alloc_pages_may_oom()
4421 ~__GFP_DIRECT_RECLAIM, order, in __alloc_pages_may_oom()
4430 if (order > PAGE_ALLOC_COSTLY_ORDER) in __alloc_pages_may_oom()
4467 page = __alloc_pages_cpuset_fallback(gfp_mask, order, in __alloc_pages_may_oom()
4484 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
4492 if (!order) in __alloc_pages_direct_compact()
4499 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
4516 prep_new_page(page, order, gfp_mask, alloc_flags); in __alloc_pages_direct_compact()
4520 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
4526 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
4543 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, in should_compact_retry() argument
4554 if (!order) in should_compact_retry()
4576 ret = compaction_zonelist_suitable(ac, order, alloc_flags); in should_compact_retry()
4598 if (order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
4610 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? in should_compact_retry()
4619 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); in should_compact_retry()
4624 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
4633 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, in should_compact_retry() argument
4641 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
4745 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
4758 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
4771 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
4780 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
4785 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
4804 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, in wake_all_kswapds() argument
4817 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); in wake_all_kswapds()
4918 should_reclaim_retry(gfp_t gfp_mask, unsigned order, in should_reclaim_retry() argument
4931 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) in should_reclaim_retry()
4965 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
4967 trace_reclaim_retry_zone(z, order, reclaimable, in should_reclaim_retry()
5023 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
5027 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; in __alloc_pages_slowpath()
5086 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
5092 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
5107 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) in __alloc_pages_slowpath()
5109 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
5154 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
5173 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
5186 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
5192 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
5208 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, in __alloc_pages_slowpath()
5219 should_compact_retry(ac, order, alloc_flags, in __alloc_pages_slowpath()
5234 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
5292 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); in __alloc_pages_slowpath()
5301 "page allocation failure: order:%u", order); in __alloc_pages_slowpath()
5306 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, in prepare_alloc_pages() argument
5330 if (should_fail_alloc_page(gfp_mask, order)) in prepare_alloc_pages()
5522 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, in __alloc_pages() argument
5534 if (WARN_ON_ONCE_GFP(order >= MAX_ORDER, gfp)) in __alloc_pages()
5547 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, in __alloc_pages()
5558 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); in __alloc_pages()
5571 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); in __alloc_pages()
5575 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { in __alloc_pages()
5576 __free_pages(page, order); in __alloc_pages()
5580 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); in __alloc_pages()
5581 kmsan_alloc_page(page, order, alloc_gfp); in __alloc_pages()
5587 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, in __folio_alloc() argument
5590 struct page *page = __alloc_pages(gfp | __GFP_COMP, order, in __folio_alloc()
5593 if (page && order > 1) in __folio_alloc()
5604 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages() argument
5608 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); in __get_free_pages()
5641 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
5644 free_the_page(page, order); in __free_pages()
5646 while (order-- > 0) in __free_pages()
5647 free_the_page(page + (1 << order), order); in __free_pages()
5651 void free_pages(unsigned long addr, unsigned int order) in free_pages() argument
5655 __free_pages(virt_to_page((void *)addr), order); in free_pages()
5787 static void *make_alloc_exact(unsigned long addr, unsigned int order, in make_alloc_exact() argument
5795 split_page_owner(page, 1 << order); in make_alloc_exact()
5796 split_page_memcg(page, 1 << order); in make_alloc_exact()
5800 last = page + (1UL << order); in make_alloc_exact()
5824 unsigned int order = get_order(size); in alloc_pages_exact() local
5830 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
5831 return make_alloc_exact(addr, order, size); in alloc_pages_exact()
5849 unsigned int order = get_order(size); in alloc_pages_exact_nid() local
5855 p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()
5858 return make_alloc_exact((unsigned long)page_address(p), order, size); in alloc_pages_exact_nid()
6260 unsigned int order; in __show_free_areas() local
6272 for (order = 0; order < MAX_ORDER; order++) { in __show_free_areas()
6273 struct free_area *area = &zone->free_area[order]; in __show_free_areas()
6276 nr[order] = area->nr_free; in __show_free_areas()
6277 total += nr[order] << order; in __show_free_areas()
6279 types[order] = 0; in __show_free_areas()
6282 types[order] |= 1 << type; in __show_free_areas()
6286 for (order = 0; order < MAX_ORDER; order++) { in __show_free_areas()
6288 nr[order], K(1UL) << order); in __show_free_areas()
6289 if (nr[order]) in __show_free_areas()
6290 show_migration_types(types[order]); in __show_free_areas()
6866 unsigned int order = pgmap->vmemmap_shift; in memmap_init_compound() local
6884 prep_compound_head(head, order); in memmap_init_compound()
6933 unsigned int order, t; in zone_init_free_lists() local
6934 for_each_migratetype_order(order, t) { in zone_init_free_lists()
6935 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
6936 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
7637 unsigned int order = MAX_ORDER - 1; in set_pageblock_order() local
7644 if (HPAGE_SHIFT > PAGE_SHIFT && HUGETLB_PAGE_ORDER < order) in set_pageblock_order()
7645 order = HUGETLB_PAGE_ORDER; in set_pageblock_order()
7652 pageblock_order = order; in set_pageblock_order()
9270 int order; in alloc_contig_range() local
9275 .order = -1, in alloc_contig_range()
9344 order = 0; in alloc_contig_range()
9347 if (++order >= MAX_ORDER) { in alloc_contig_range()
9351 outer_start &= ~0UL << order; in alloc_contig_range()
9355 order = buddy_order(pfn_to_page(outer_start)); in alloc_contig_range()
9363 if (outer_start + (1UL << order) <= start) in alloc_contig_range()
9552 unsigned int order; in __offline_isolated_pages() local
9581 order = buddy_order(page); in __offline_isolated_pages()
9582 del_page_from_free_list(page, zone, order); in __offline_isolated_pages()
9583 pfn += (1 << order); in __offline_isolated_pages()
9595 unsigned int order; in is_free_buddy_page() local
9597 for (order = 0; order < MAX_ORDER; order++) { in is_free_buddy_page()
9598 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
9601 buddy_order_unsafe(page_head) >= order) in is_free_buddy_page()
9605 return order < MAX_ORDER; in is_free_buddy_page()
9652 unsigned int order; in take_page_off_buddy() local
9656 for (order = 0; order < MAX_ORDER; order++) { in take_page_off_buddy()
9657 struct page *page_head = page - (pfn & ((1 << order) - 1)); in take_page_off_buddy()
9660 if (PageBuddy(page_head) && page_order >= order) { in take_page_off_buddy()