Lines Matching refs:nr_pages

232 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages)  in check_pfn_span()  argument
249 if (!IS_ALIGNED(pfn | nr_pages, min_align)) in check_pfn_span()
302 int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument
305 const unsigned long end_pfn = pfn + nr_pages; in __add_pages()
313 VM_BUG_ON(!mhp_range_allowed(PFN_PHYS(pfn), nr_pages * PAGE_SIZE, false)); in __add_pages()
320 || vmem_altmap_offset(altmap) > nr_pages) { in __add_pages()
327 if (check_pfn_span(pfn, nr_pages)) { in __add_pages()
328 WARN(1, "Misaligned %s start: %#lx end: #%lx\n", __func__, pfn, pfn + nr_pages - 1); in __add_pages()
462 unsigned long nr_pages) in remove_pfn_range_from_zone() argument
464 const unsigned long end_pfn = start_pfn + nr_pages; in remove_pfn_range_from_zone()
489 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); in remove_pfn_range_from_zone()
495 static void __remove_section(unsigned long pfn, unsigned long nr_pages, in __remove_section() argument
504 sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap); in __remove_section()
518 void __remove_pages(unsigned long pfn, unsigned long nr_pages, in __remove_pages() argument
521 const unsigned long end_pfn = pfn + nr_pages; in __remove_pages()
527 if (check_pfn_span(pfn, nr_pages)) { in __remove_pages()
528 WARN(1, "Misaligned %s start: %#lx end: #%lx\n", __func__, pfn, pfn + nr_pages - 1); in __remove_pages()
593 static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages) in online_pages_range() argument
595 const unsigned long end_pfn = start_pfn + nr_pages; in online_pages_range()
619 static void node_states_check_changes_online(unsigned long nr_pages, in node_states_check_changes_online() argument
643 unsigned long nr_pages) in resize_zone_range() argument
650 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; in resize_zone_range()
654 unsigned long nr_pages) in resize_pgdat_range() argument
661 pgdat->node_spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - pgdat->node_start_pfn; in resize_pgdat_range()
688 unsigned long nr_pages, in move_pfn_range_to_zone() argument
697 init_currently_empty_zone(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
698 resize_zone_range(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
699 resize_pgdat_range(pgdat, start_pfn, nr_pages); in move_pfn_range_to_zone()
710 if (!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)) in move_pfn_range_to_zone()
711 section_taint_zone_device(start_pfn + nr_pages); in move_pfn_range_to_zone()
720 memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0, in move_pfn_range_to_zone()
781 unsigned long nr_pages) in auto_movable_can_online_movable() argument
825 movable_pages += nr_pages; in auto_movable_can_online_movable()
835 unsigned long nr_pages) in default_kernel_zone_for_pfn() argument
843 if (zone_intersects(zone, start_pfn, nr_pages)) in default_kernel_zone_for_pfn()
903 unsigned long nr_pages) in auto_movable_zone_for_pfn() argument
918 } else if (!group || group->d.unit_pages == nr_pages) { in auto_movable_zone_for_pfn()
919 max_pages = nr_pages; in auto_movable_zone_for_pfn()
946 nr_pages = max_pages - online_pages; in auto_movable_zone_for_pfn()
947 if (!auto_movable_can_online_movable(NUMA_NO_NODE, group, nr_pages)) in auto_movable_zone_for_pfn()
952 !auto_movable_can_online_movable(nid, group, nr_pages)) in auto_movable_zone_for_pfn()
958 return default_kernel_zone_for_pfn(nid, pfn, nr_pages); in auto_movable_zone_for_pfn()
962 unsigned long nr_pages) in default_zone_for_pfn() argument
965 nr_pages); in default_zone_for_pfn()
967 bool in_kernel = zone_intersects(kernel_zone, start_pfn, nr_pages); in default_zone_for_pfn()
968 bool in_movable = zone_intersects(movable_zone, start_pfn, nr_pages); in default_zone_for_pfn()
987 unsigned long nr_pages) in zone_for_pfn_range() argument
990 return default_kernel_zone_for_pfn(nid, start_pfn, nr_pages); in zone_for_pfn_range()
996 return auto_movable_zone_for_pfn(nid, group, start_pfn, nr_pages); in zone_for_pfn_range()
998 return default_zone_for_pfn(nid, start_pfn, nr_pages); in zone_for_pfn_range()
1006 long nr_pages) in adjust_present_page_count() argument
1016 zone->present_early_pages += nr_pages; in adjust_present_page_count()
1017 zone->present_pages += nr_pages; in adjust_present_page_count()
1018 zone->zone_pgdat->node_present_pages += nr_pages; in adjust_present_page_count()
1021 group->present_movable_pages += nr_pages; in adjust_present_page_count()
1023 group->present_kernel_pages += nr_pages; in adjust_present_page_count()
1026 int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, in mhp_init_memmap_on_memory() argument
1029 unsigned long end_pfn = pfn + nr_pages; in mhp_init_memmap_on_memory()
1032 ret = kasan_add_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); in mhp_init_memmap_on_memory()
1036 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE); in mhp_init_memmap_on_memory()
1038 for (i = 0; i < nr_pages; i++) in mhp_init_memmap_on_memory()
1046 if (nr_pages >= PAGES_PER_SECTION) in mhp_init_memmap_on_memory()
1052 void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages) in mhp_deinit_memmap_on_memory() argument
1054 unsigned long end_pfn = pfn + nr_pages; in mhp_deinit_memmap_on_memory()
1061 if (nr_pages >= PAGES_PER_SECTION) in mhp_deinit_memmap_on_memory()
1068 remove_pfn_range_from_zone(page_zone(pfn_to_page(pfn)), pfn, nr_pages); in mhp_deinit_memmap_on_memory()
1069 kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages)); in mhp_deinit_memmap_on_memory()
1072 int __ref online_pages(unsigned long pfn, unsigned long nr_pages, in online_pages() argument
1088 if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(pfn) || in online_pages()
1089 !IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION))) in online_pages()
1095 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE); in online_pages()
1098 arg.nr_pages = nr_pages; in online_pages()
1099 node_states_check_changes_online(nr_pages, zone, &arg); in online_pages()
1111 zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages; in online_pages()
1124 online_pages_range(pfn, nr_pages); in online_pages()
1125 adjust_present_page_count(pfn_to_page(pfn), group, nr_pages); in online_pages()
1132 undo_isolate_page_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE); in online_pages()
1157 (((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1); in online_pages()
1159 remove_pfn_range_from_zone(zone, pfn, nr_pages); in online_pages()
1734 static void node_states_check_changes_offline(unsigned long nr_pages, in node_states_check_changes_offline() argument
1754 if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) in node_states_check_changes_offline()
1768 if (nr_pages >= present_pages) in node_states_check_changes_offline()
1782 unsigned long nr_pages, void *data) in count_system_ram_pages_cb() argument
1786 *nr_system_ram_pages += nr_pages; in count_system_ram_pages_cb()
1790 int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages, in offline_pages() argument
1793 const unsigned long end_pfn = start_pfn + nr_pages; in offline_pages()
1808 if (WARN_ON_ONCE(!nr_pages || !pageblock_aligned(start_pfn) || in offline_pages()
1809 !IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION))) in offline_pages()
1822 walk_system_ram_range(start_pfn, nr_pages, &system_ram_pages, in offline_pages()
1824 if (system_ram_pages != nr_pages) { in offline_pages()
1860 arg.nr_pages = nr_pages; in offline_pages()
1861 node_states_check_changes_offline(nr_pages, zone, &arg); in offline_pages()
1913 pr_debug("Offlined Pages %ld\n", nr_pages); in offline_pages()
1921 zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages; in offline_pages()
1928 adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages); in offline_pages()
1929 adjust_present_page_count(pfn_to_page(start_pfn), group, -nr_pages); in offline_pages()
1948 remove_pfn_range_from_zone(zone, start_pfn, nr_pages); in offline_pages()