/linux-6.6.21/mm/ |
D | mmzone.c | 34 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone() 39 zone = pgdat->node_zones; in next_zone()
|
D | shuffle.c | 157 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in __shuffle_free_memory()
|
D | memory_hotplug.c | 504 for (zone = pgdat->node_zones; in update_pgdat_span() 505 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { in update_pgdat_span() 859 zone = pgdat->node_zones + i; in auto_movable_can_online_movable() 903 struct zone *zone = &pgdat->node_zones[zid]; in default_kernel_zone_for_pfn() 909 return &pgdat->node_zones[ZONE_NORMAL]; in default_kernel_zone_for_pfn() 1018 return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in auto_movable_zone_for_pfn() 1028 struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in default_zone_for_pfn() 1055 return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in zone_for_pfn_range() 1831 present_pages += pgdat->node_zones[zt].present_pages; in node_states_check_changes_offline() 1844 present_pages += pgdat->node_zones[ZONE_MOVABLE].present_pages; in node_states_check_changes_offline()
|
D | show_mem.c | 101 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); in si_meminfo_node() 107 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() 175 if (zone_managed_pages(pgdat->node_zones + zone_idx)) in node_has_managed_zones()
|
D | page_owner.c | 693 struct zone *node_zones = pgdat->node_zones; in init_zones_in_node() local 695 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { in init_zones_in_node()
|
D | vmstat.c | 326 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold() 985 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_node_page_state() 999 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_numa_event_state() 1451 struct zone *node_zones = pgdat->node_zones; in walk_zones_in_node() local 1454 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { in walk_zones_in_node() 1663 struct zone *compare = &pgdat->node_zones[zid]; in is_zone_first_populated()
|
D | mm_init.c | 61 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist() 717 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() 943 struct zone *zone = node->node_zones + j; in memmap_init() 1253 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { in reset_memoryless_node_totalpages() 1275 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() 1537 struct zone *zone = pgdat->node_zones + z; in free_area_init_core_hotplug() 1563 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() 1752 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() 2223 zone = pgdat->node_zones + zid; in deferred_init_memmap()
|
D | memremap.c | 253 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; in pagemap_range() 267 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], in pagemap_range()
|
D | compaction.c | 433 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable() 2136 zone = &pgdat->node_zones[zoneid]; in fragmentation_score_node() 2774 zone = &pgdat->node_zones[zoneid]; in proactive_compact_node() 2806 zone = &pgdat->node_zones[zoneid]; in compact_node() 2918 zone = &pgdat->node_zones[zoneid]; in kcompactd_node_suitable() 2960 zone = &pgdat->node_zones[zoneid]; in kcompactd_do_work()
|
D | vmscan.c | 661 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size() 1179 struct zone *zone = pgdat->node_zones + i; in skip_throttle_noprogress() 2996 struct zone *zone = &pgdat->node_zones[z]; in prepare_scan_count() 5374 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; in should_abort_scan() 6471 struct zone *zone = &pgdat->node_zones[z]; in should_continue_reclaim() 6959 zone = &pgdat->node_zones[i]; in allow_direct_reclaim() 7240 zone = pgdat->node_zones + i; in pgdat_watermark_boosted() 7266 zone = pgdat->node_zones + i; in pgdat_balanced() 7355 zone = pgdat->node_zones + z; in kswapd_shrink_node() 7389 zone = pgdat->node_zones + i; in update_reclaim_active() [all …]
|
D | migrate.c | 2471 struct zone *zone = pgdat->node_zones + z; in migrate_balanced_pgdat() 2522 if (managed_zone(pgdat->node_zones + z)) in numamigrate_isolate_page() 2525 wakeup_kswapd(pgdat->node_zones + z, 0, order, ZONE_MOVABLE); in numamigrate_isolate_page()
|
D | memblock.c | 2158 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_managed_pages()
|
D | page_alloc.c | 4818 zone = pgdat->node_zones + zone_type; in build_zonerefs_node() 5578 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() 5615 struct zone *zone = &pgdat->node_zones[i]; in setup_per_zone_lowmem_reserve() 5621 struct zone *upper_zone = &pgdat->node_zones[j]; in setup_per_zone_lowmem_reserve() 6558 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; in has_managed_dma()
|
D | page-writeback.c | 278 struct zone *zone = pgdat->node_zones + z; in node_dirtyable_memory() 314 z = &NODE_DATA(node)->node_zones[i]; in highmem_dirtyable_memory()
|
/linux-6.6.21/tools/testing/memblock/linux/ |
D | mmzone.h | 33 struct zone node_zones[MAX_NR_ZONES]; member
|
/linux-6.6.21/include/linux/ |
D | mmzone.h | 1271 struct zone node_zones[MAX_NR_ZONES]; member 1463 #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) 1581 for (zone = (first_online_pgdat())->node_zones; \ 1586 for (zone = (first_online_pgdat())->node_zones; \
|
D | mm_inline.h | 47 __mod_zone_page_state(&pgdat->node_zones[zid], in __update_lru_size()
|
D | mm.h | 1844 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; in page_zone()
|
/linux-6.6.21/kernel/ |
D | crash_core.c | 647 VMCOREINFO_OFFSET(pglist_data, node_zones); in crash_save_vmcoreinfo_init()
|
/linux-6.6.21/drivers/base/ |
D | memory.c | 699 zone = pgdat->node_zones + i; in early_node_zone_for_memory_block()
|
/linux-6.6.21/Documentation/mm/ |
D | physical_memory.rst | 211 ``node_zones``
|
/linux-6.6.21/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_acpi.c | 857 zone_managed_pages(&pgdat->node_zones[zone_type]); in amdgpu_acpi_get_numa_size()
|
/linux-6.6.21/Documentation/admin-guide/kdump/ |
D | vmcoreinfo.rst | 150 (pglist_data, node_zones|nr_zones|node_mem_map|node_start_pfn|node_spanned_pages|node_id)
|
/linux-6.6.21/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_crat.c | 1620 mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]); in kfd_fill_mem_info_for_cpu()
|
/linux-6.6.21/kernel/sched/ |
D | fair.c | 1752 struct zone *zone = pgdat->node_zones + z; in pgdat_free_space_enough()
|