/linux-6.1.9/mm/ |
D | mmzone.c | 34 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone() 39 zone = pgdat->node_zones; in next_zone()
|
D | shuffle.c | 157 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in __shuffle_free_memory()
|
D | memory_hotplug.c | 437 for (zone = pgdat->node_zones; in update_pgdat_span() 438 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { in update_pgdat_span() 797 zone = pgdat->node_zones + i; in auto_movable_can_online_movable() 841 struct zone *zone = &pgdat->node_zones[zid]; in default_kernel_zone_for_pfn() 847 return &pgdat->node_zones[ZONE_NORMAL]; in default_kernel_zone_for_pfn() 956 return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in auto_movable_zone_for_pfn() 966 struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in default_zone_for_pfn() 993 return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in zone_for_pfn_range() 1168 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_present_pages() 1753 present_pages += pgdat->node_zones[zt].present_pages; in node_states_check_changes_offline() [all …]
|
D | page_owner.c | 691 struct zone *node_zones = pgdat->node_zones; in init_zones_in_node() local 693 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { in init_zones_in_node()
|
D | vmstat.c | 326 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold() 985 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_node_page_state() 999 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_numa_event_state() 1439 struct zone *node_zones = pgdat->node_zones; in walk_zones_in_node() local 1442 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { in walk_zones_in_node() 1651 struct zone *compare = &pgdat->node_zones[zid]; in is_zone_first_populated()
|
D | mm_init.c | 44 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist()
|
D | memremap.c | 253 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; in pagemap_range() 267 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], in pagemap_range()
|
D | compaction.c | 382 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable() 2029 zone = &pgdat->node_zones[zoneid]; in fragmentation_score_node() 2660 zone = &pgdat->node_zones[zoneid]; in proactive_compact_node() 2690 zone = &pgdat->node_zones[zoneid]; in compact_node() 2801 zone = &pgdat->node_zones[zoneid]; in kcompactd_node_suitable() 2837 zone = &pgdat->node_zones[zoneid]; in kcompactd_do_work()
|
D | page_alloc.c | 1664 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() 2145 zone = pgdat->node_zones + zid; in deferred_init_memmap() 6002 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); in si_meminfo_node() 6008 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() 6078 if (zone_managed_pages(pgdat->node_zones + zone_idx)) in node_has_managed_zones() 6325 zone = pgdat->node_zones + zone_type; in build_zonerefs_node() 7018 struct zone *zone = node->node_zones + j; in memmap_init() 7557 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() 7778 zone_init_internals(&pgdat->node_zones[z], z, nid, 0); in free_area_init_core_hotplug() 7800 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() [all …]
|
D | vmscan.c | 608 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size() 1100 struct zone *zone = pgdat->node_zones + i; in skip_throttle_noprogress() 2869 struct zone *zone = &pgdat->node_zones[z]; in prepare_scan_count() 5148 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; in should_abort_scan() 6040 struct zone *zone = &pgdat->node_zones[z]; in should_continue_reclaim() 6524 zone = &pgdat->node_zones[i]; in allow_direct_reclaim() 6805 zone = pgdat->node_zones + i; in pgdat_watermark_boosted() 6831 zone = pgdat->node_zones + i; in pgdat_balanced() 6919 zone = pgdat->node_zones + z; in kswapd_shrink_node() 6953 zone = pgdat->node_zones + i; in update_reclaim_active() [all …]
|
D | migrate.c | 2082 struct zone *zone = pgdat->node_zones + z; in migrate_balanced_pgdat() 2136 if (managed_zone(pgdat->node_zones + z)) in numamigrate_isolate_page() 2139 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); in numamigrate_isolate_page()
|
D | memblock.c | 2113 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_managed_pages()
|
D | page-writeback.c | 277 struct zone *zone = pgdat->node_zones + z; in node_dirtyable_memory() 313 z = &NODE_DATA(node)->node_zones[i]; in highmem_dirtyable_memory()
|
/linux-6.1.9/tools/testing/memblock/linux/ |
D | mmzone.h | 33 struct zone node_zones[MAX_NR_ZONES]; member
|
/linux-6.1.9/lib/ |
D | show_mem.c | 23 struct zone *zone = &pgdat->node_zones[zoneid]; in __show_mem()
|
/linux-6.1.9/include/linux/ |
D | mmzone.h | 1103 struct zone node_zones[MAX_NR_ZONES]; member 1290 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 1429 for (zone = (first_online_pgdat())->node_zones; \ 1434 for (zone = (first_online_pgdat())->node_zones; \
|
D | mm_inline.h | 47 __mod_zone_page_state(&pgdat->node_zones[zid], in __update_lru_size()
|
D | mm.h | 1482 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; in page_zone()
|
/linux-6.1.9/kernel/ |
D | crash_core.c | 458 VMCOREINFO_OFFSET(pglist_data, node_zones); in crash_save_vmcoreinfo_init()
|
/linux-6.1.9/drivers/base/ |
D | memory.c | 663 zone = pgdat->node_zones + i; in early_node_zone_for_memory_block()
|
/linux-6.1.9/Documentation/admin-guide/kdump/ |
D | vmcoreinfo.rst | 150 (pglist_data, node_zones|nr_zones|node_mem_map|node_start_pfn|node_spanned_pages|node_id)
|
/linux-6.1.9/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_crat.c | 1862 mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]); in kfd_fill_mem_info_for_cpu()
|
/linux-6.1.9/kernel/sched/ |
D | fair.c | 1469 struct zone *zone = pgdat->node_zones + z; in pgdat_free_space_enough()
|