/linux-6.6.21/mm/ |
D | show_mem.c | 98 pg_data_t *pgdat = NODE_DATA(nid); in si_meminfo_node() local 101 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); in si_meminfo_node() 103 val->sharedram = node_page_state(pgdat, NR_SHMEM); in si_meminfo_node() 107 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() 171 static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx) in node_has_managed_zones() argument 175 if (zone_managed_pages(pgdat->node_zones + zone_idx)) in node_has_managed_zones() 194 pg_data_t *pgdat; in show_free_areas() local 235 for_each_online_pgdat(pgdat) { in show_free_areas() 236 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) in show_free_areas() 238 if (!node_has_managed_zones(pgdat, max_zone_idx)) in show_free_areas() [all …]
|
D | mm_init.c | 48 pg_data_t *pgdat = NODE_DATA(nid); in mminit_verify_zonelist() local 60 zonelist = &pgdat->node_zonelists[listid]; in mminit_verify_zonelist() 61 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist() 653 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) in pgdat_set_deferred_range() argument 655 pgdat->first_deferred_pfn = ULONG_MAX; in pgdat_set_deferred_range() 708 pg_data_t *pgdat; in init_reserved_page() local 714 pgdat = NODE_DATA(nid); in init_reserved_page() 717 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page() 725 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} in pgdat_set_deferred_range() argument 1069 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device() local [all …]
|
D | vmstat.c | 274 struct pglist_data *pgdat; in refresh_zone_stat_thresholds() local 280 for_each_online_pgdat(pgdat) { in refresh_zone_stat_thresholds() 282 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0; in refresh_zone_stat_thresholds() 287 struct pglist_data *pgdat = zone->zone_pgdat; in refresh_zone_stat_thresholds() local 299 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold; in refresh_zone_stat_thresholds() 300 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold in refresh_zone_stat_thresholds() 317 void set_pgdat_percpu_threshold(pg_data_t *pgdat, in set_pgdat_percpu_threshold() argument 325 for (i = 0; i < pgdat->nr_zones; i++) { in set_pgdat_percpu_threshold() 326 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold() 373 void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, in __mod_node_page_state() argument [all …]
|
D | vmscan.c | 1161 static bool skip_throttle_noprogress(pg_data_t *pgdat) in skip_throttle_noprogress() argument 1170 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in skip_throttle_noprogress() 1179 struct zone *zone = pgdat->node_zones + i; in skip_throttle_noprogress() 1194 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason) in reclaim_throttle() argument 1196 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; in reclaim_throttle() 1225 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { in reclaim_throttle() 1226 WRITE_ONCE(pgdat->nr_reclaim_start, in reclaim_throttle() 1227 node_page_state(pgdat, NR_THROTTLED_WRITTEN)); in reclaim_throttle() 1234 if (skip_throttle_noprogress(pgdat)) { in reclaim_throttle() 1256 atomic_dec(&pgdat->nr_writeback_throttled); in reclaim_throttle() [all …]
|
D | compaction.c | 428 void reset_isolation_suitable(pg_data_t *pgdat) in reset_isolation_suitable() argument 433 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable() 790 pg_data_t *pgdat = cc->zone->zone_pgdat; in too_many_isolated() local 795 inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + in too_many_isolated() 796 node_page_state(pgdat, NR_INACTIVE_ANON); in too_many_isolated() 797 active = node_page_state(pgdat, NR_ACTIVE_FILE) + in too_many_isolated() 798 node_page_state(pgdat, NR_ACTIVE_ANON); in too_many_isolated() 799 isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + in too_many_isolated() 800 node_page_state(pgdat, NR_ISOLATED_ANON); in too_many_isolated() 815 wake_throttle_isolated(pgdat); in too_many_isolated() [all …]
|
D | mmzone.c | 18 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) in next_online_pgdat() argument 20 int nid = next_online_node(pgdat->node_id); in next_online_pgdat() 32 pg_data_t *pgdat = zone->zone_pgdat; in next_zone() local 34 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone() 37 pgdat = next_online_pgdat(pgdat); in next_zone() 38 if (pgdat) in next_zone() 39 zone = pgdat->node_zones; in next_zone()
|
D | workingset.c | 198 static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, in pack_shadow() argument 203 eviction = (eviction << NODES_SHIFT) | pgdat->node_id; in pack_shadow() 209 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, in unpack_shadow() argument 224 *pgdat = NODE_DATA(nid); in unpack_shadow() 243 struct pglist_data *pgdat = folio_pgdat(folio); in lru_gen_eviction() local 247 lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_eviction() 255 return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs); in lru_gen_eviction() 268 struct pglist_data *pgdat; in lru_gen_test_recent() local 270 unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset); in lru_gen_test_recent() 273 *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_test_recent() [all …]
|
D | memory-tiers.c | 227 pg_data_t *pgdat; in __node_get_memory_tier() local 229 pgdat = NODE_DATA(node); in __node_get_memory_tier() 230 if (!pgdat) in __node_get_memory_tier() 237 return rcu_dereference_check(pgdat->memtier, in __node_get_memory_tier() 245 pg_data_t *pgdat; in node_is_toptier() local 248 pgdat = NODE_DATA(node); in node_is_toptier() 249 if (!pgdat) in node_is_toptier() 253 memtier = rcu_dereference(pgdat->memtier); in node_is_toptier() 267 void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) in node_get_allowed_targets() argument 277 memtier = rcu_dereference(pgdat->memtier); in node_get_allowed_targets() [all …]
|
D | shuffle.h | 11 extern void __shuffle_free_memory(pg_data_t *pgdat); 13 static inline void __meminit shuffle_free_memory(pg_data_t *pgdat) in shuffle_free_memory() argument 17 __shuffle_free_memory(pgdat); in shuffle_free_memory() 40 static inline void shuffle_free_memory(pg_data_t *pgdat) in shuffle_free_memory() argument
|
D | memory_hotplug.c | 499 static void update_pgdat_span(struct pglist_data *pgdat) in update_pgdat_span() argument 504 for (zone = pgdat->node_zones; in update_pgdat_span() 505 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { in update_pgdat_span() 523 pgdat->node_start_pfn = node_start_pfn; in update_pgdat_span() 524 pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; in update_pgdat_span() 532 struct pglist_data *pgdat = zone->zone_pgdat; in remove_pfn_range_from_zone() local 557 update_pgdat_span(pgdat); in remove_pfn_range_from_zone() 715 static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, in resize_pgdat_range() argument 718 unsigned long old_end_pfn = pgdat_end_pfn(pgdat); in resize_pgdat_range() 720 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) in resize_pgdat_range() [all …]
|
D | bootmem_info.c | 102 void __init register_page_bootmem_info_node(struct pglist_data *pgdat) in register_page_bootmem_info_node() argument 105 int node = pgdat->node_id; in register_page_bootmem_info_node() 109 page = virt_to_page(pgdat); in register_page_bootmem_info_node() 114 pfn = pgdat->node_start_pfn; in register_page_bootmem_info_node() 115 end_pfn = pgdat_end_pfn(pgdat); in register_page_bootmem_info_node()
|
D | page_owner.c | 276 pg_data_t *pgdat, struct zone *zone) in pagetypeinfo_showmixedcount_print() argument 352 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); in pagetypeinfo_showmixedcount_print() 621 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) in init_pages_in_zone() argument 687 pgdat->node_id, zone->name, count); in init_pages_in_zone() 690 static void init_zones_in_node(pg_data_t *pgdat) in init_zones_in_node() argument 693 struct zone *node_zones = pgdat->node_zones; in init_zones_in_node() 699 init_pages_in_zone(pgdat, zone); in init_zones_in_node() 705 pg_data_t *pgdat; in init_early_allocated_pages() local 707 for_each_online_pgdat(pgdat) in init_early_allocated_pages() 708 init_zones_in_node(pgdat); in init_early_allocated_pages()
|
D | shuffle.c | 153 void __meminit __shuffle_free_memory(pg_data_t *pgdat) in __shuffle_free_memory() argument 157 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in __shuffle_free_memory()
|
D | sparse.c | 321 static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat) in pgdat_to_phys() argument 324 VM_BUG_ON(pgdat != &contig_page_data); in pgdat_to_phys() 327 return __pa(pgdat); in pgdat_to_phys() 332 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, in sparse_early_usemaps_alloc_pgdat_section() argument 348 goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); in sparse_early_usemaps_alloc_pgdat_section() 366 struct pglist_data *pgdat = NODE_DATA(nid); in check_usemap_section_nr() local 376 pgdat_snr = pfn_to_section_nr(pgdat_to_phys(pgdat) >> PAGE_SHIFT); in check_usemap_section_nr() 404 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, in sparse_early_usemaps_alloc_pgdat_section() argument 407 return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); in sparse_early_usemaps_alloc_pgdat_section()
|
/linux-6.6.21/include/linux/ |
D | memory_hotplug.h | 30 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); 44 memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \ 48 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) in arch_refresh_nodedata() argument 50 node_data[nid] = pgdat; in arch_refresh_nodedata() 61 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) in arch_refresh_nodedata() argument 213 static inline void pgdat_kswapd_lock(pg_data_t *pgdat) in pgdat_kswapd_lock() argument 215 mutex_lock(&pgdat->kswapd_lock); in pgdat_kswapd_lock() 218 static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) in pgdat_kswapd_unlock() argument 220 mutex_unlock(&pgdat->kswapd_lock); in pgdat_kswapd_unlock() 223 static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) in pgdat_kswapd_lock_init() argument [all …]
|
D | compaction.h | 91 extern void reset_isolation_suitable(pg_data_t *pgdat); 103 extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx); 106 static inline void reset_isolation_suitable(pg_data_t *pgdat) in reset_isolation_suitable() argument 123 static inline void wakeup_kcompactd(pg_data_t *pgdat, in wakeup_kcompactd() argument
|
D | memcontrol.h | 57 pg_data_t *pgdat; member 728 struct pglist_data *pgdat) in mem_cgroup_lruvec() argument 734 lruvec = &pgdat->__lruvec; in mem_cgroup_lruvec() 741 mz = memcg->nodeinfo[pgdat->node_id]; in mem_cgroup_lruvec() 749 if (unlikely(lruvec->pgdat != pgdat)) in mem_cgroup_lruvec() 750 lruvec->pgdat = pgdat; in mem_cgroup_lruvec() 1158 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, 1281 struct pglist_data *pgdat) in mem_cgroup_lruvec() argument 1283 return &pgdat->__lruvec; in mem_cgroup_lruvec() 1288 struct pglist_data *pgdat = folio_pgdat(folio); in folio_lruvec() local [all …]
|
D | vmstat.h | 172 static inline void node_page_state_add(long x, struct pglist_data *pgdat, in node_page_state_add() argument 175 atomic_long_add(x, &pgdat->vm_stat[item]); in node_page_state_add() 261 extern unsigned long node_page_state(struct pglist_data *pgdat, 263 extern unsigned long node_page_state_pages(struct pglist_data *pgdat, 311 void set_pgdat_percpu_threshold(pg_data_t *pgdat, 325 static inline void __mod_node_page_state(struct pglist_data *pgdat, in __mod_node_page_state() argument 339 node_page_state_add(delta, pgdat, item); in __mod_node_page_state() 348 static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) in __inc_node_state() argument 350 atomic_long_inc(&pgdat->vm_stat[item]); in __inc_node_state() 360 static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) in __dec_node_state() argument [all …]
|
D | memory-tiers.h | 41 void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets); 49 static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) in node_get_allowed_targets() argument 91 static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) in node_get_allowed_targets() argument
|
D | mmzone.h | 552 void lru_gen_init_pgdat(struct pglist_data *pgdat); 568 static inline void lru_gen_init_pgdat(struct pglist_data *pgdat) in lru_gen_init_pgdat() argument 576 static inline void lru_gen_init_pgdat(struct pglist_data *pgdat) in lru_gen_init_pgdat() argument 642 struct pglist_data *pgdat; member 1413 static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) in pgdat_end_pfn() argument 1415 return pgdat->node_start_pfn + pgdat->node_spanned_pages; in pgdat_end_pfn() 1420 void build_all_zonelists(pg_data_t *pgdat); 1448 return lruvec->pgdat; in lruvec_pgdat() 1562 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); 1569 #define for_each_online_pgdat(pgdat) \ argument [all …]
|
D | node.h | 119 struct pglist_data *pgdat = NODE_DATA(nid); in register_one_node() local 120 unsigned long start_pfn = pgdat->node_start_pfn; in register_one_node() 121 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; in register_one_node()
|
D | bootmem_info.h | 21 void __init register_page_bootmem_info_node(struct pglist_data *pgdat); 48 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) in register_page_bootmem_info_node() argument
|
/linux-6.6.21/tools/testing/memblock/linux/ |
D | mmzone.h | 8 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); 10 #define for_each_online_pgdat(pgdat) \ argument 11 for (pgdat = first_online_pgdat(); \ 12 pgdat; \ 13 pgdat = next_online_pgdat(pgdat))
|
/linux-6.6.21/drivers/base/ |
D | node.c | 374 struct pglist_data *pgdat = NODE_DATA(nid); in node_read_meminfo() local 380 sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B); in node_read_meminfo() 381 sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B); in node_read_meminfo() 383 swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE); in node_read_meminfo() 402 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) + in node_read_meminfo() 403 node_page_state(pgdat, NR_ACTIVE_FILE)), in node_read_meminfo() 404 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) + in node_read_meminfo() 405 node_page_state(pgdat, NR_INACTIVE_FILE)), in node_read_meminfo() 406 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)), in node_read_meminfo() 407 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)), in node_read_meminfo() [all …]
|
/linux-6.6.21/arch/ia64/include/asm/ |
D | nodedata.h | 57 #define LOCAL_DATA_ADDR(pgdat) \ argument 58 ((struct ia64_node_data *)((u64)(pgdat) + \
|