Home
last modified time | relevance | path

Searched refs:pgdat (Results 1 – 25 of 48) sorted by relevance

12

/linux-6.1.9/mm/
Dvmstat.c274 struct pglist_data *pgdat; in refresh_zone_stat_thresholds() local
280 for_each_online_pgdat(pgdat) { in refresh_zone_stat_thresholds()
282 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0; in refresh_zone_stat_thresholds()
287 struct pglist_data *pgdat = zone->zone_pgdat; in refresh_zone_stat_thresholds() local
299 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold; in refresh_zone_stat_thresholds()
300 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold in refresh_zone_stat_thresholds()
317 void set_pgdat_percpu_threshold(pg_data_t *pgdat, in set_pgdat_percpu_threshold() argument
325 for (i = 0; i < pgdat->nr_zones; i++) { in set_pgdat_percpu_threshold()
326 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold()
373 void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, in __mod_node_page_state() argument
[all …]
Dcompaction.c377 void reset_isolation_suitable(pg_data_t *pgdat) in reset_isolation_suitable() argument
382 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable()
748 static bool too_many_isolated(pg_data_t *pgdat) in too_many_isolated() argument
754 inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + in too_many_isolated()
755 node_page_state(pgdat, NR_INACTIVE_ANON); in too_many_isolated()
756 active = node_page_state(pgdat, NR_ACTIVE_FILE) + in too_many_isolated()
757 node_page_state(pgdat, NR_ACTIVE_ANON); in too_many_isolated()
758 isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + in too_many_isolated()
759 node_page_state(pgdat, NR_ISOLATED_ANON); in too_many_isolated()
763 wake_throttle_isolated(pgdat); in too_many_isolated()
[all …]
Dvmscan.c1082 static bool skip_throttle_noprogress(pg_data_t *pgdat) in skip_throttle_noprogress() argument
1091 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in skip_throttle_noprogress()
1100 struct zone *zone = pgdat->node_zones + i; in skip_throttle_noprogress()
1115 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason) in reclaim_throttle() argument
1117 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; in reclaim_throttle()
1146 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { in reclaim_throttle()
1147 WRITE_ONCE(pgdat->nr_reclaim_start, in reclaim_throttle()
1148 node_page_state(pgdat, NR_THROTTLED_WRITTEN)); in reclaim_throttle()
1155 if (skip_throttle_noprogress(pgdat)) { in reclaim_throttle()
1177 atomic_dec(&pgdat->nr_writeback_throttled); in reclaim_throttle()
[all …]
Dmmzone.c18 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) in next_online_pgdat() argument
20 int nid = next_online_node(pgdat->node_id); in next_online_pgdat()
32 pg_data_t *pgdat = zone->zone_pgdat; in next_zone() local
34 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone()
37 pgdat = next_online_pgdat(pgdat); in next_zone()
38 if (pgdat) in next_zone()
39 zone = pgdat->node_zones; in next_zone()
Dworkingset.c187 static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, in pack_shadow() argument
192 eviction = (eviction << NODES_SHIFT) | pgdat->node_id; in pack_shadow()
198 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, in unpack_shadow() argument
213 *pgdat = NODE_DATA(nid); in unpack_shadow()
232 struct pglist_data *pgdat = folio_pgdat(folio); in lru_gen_eviction() local
236 lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_eviction()
244 return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs); in lru_gen_eviction()
257 struct pglist_data *pgdat; in lru_gen_refault() local
261 unpack_shadow(shadow, &memcg_id, &pgdat, &token, &workingset); in lru_gen_refault()
263 if (pgdat != folio_pgdat(folio)) in lru_gen_refault()
[all …]
Dpage_alloc.c1654 pg_data_t *pgdat; in init_reserved_page() local
1661 pgdat = NODE_DATA(nid); in init_reserved_page()
1664 struct zone *zone = &pgdat->node_zones[zid]; in init_reserved_page()
2110 pg_data_t *pgdat = data; in deferred_init_memmap() local
2111 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in deferred_init_memmap()
2123 pgdat_resize_lock(pgdat, &flags); in deferred_init_memmap()
2124 first_init_pfn = pgdat->first_deferred_pfn; in deferred_init_memmap()
2126 pgdat_resize_unlock(pgdat, &flags); in deferred_init_memmap()
2132 BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); in deferred_init_memmap()
2133 BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); in deferred_init_memmap()
[all …]
Dmemory-tiers.c227 pg_data_t *pgdat; in __node_get_memory_tier() local
229 pgdat = NODE_DATA(node); in __node_get_memory_tier()
230 if (!pgdat) in __node_get_memory_tier()
237 return rcu_dereference_check(pgdat->memtier, in __node_get_memory_tier()
245 pg_data_t *pgdat; in node_is_toptier() local
248 pgdat = NODE_DATA(node); in node_is_toptier()
249 if (!pgdat) in node_is_toptier()
253 memtier = rcu_dereference(pgdat->memtier); in node_is_toptier()
267 void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) in node_get_allowed_targets() argument
277 memtier = rcu_dereference(pgdat->memtier); in node_get_allowed_targets()
[all …]
Dmemory_hotplug.c432 static void update_pgdat_span(struct pglist_data *pgdat) in update_pgdat_span() argument
437 for (zone = pgdat->node_zones; in update_pgdat_span()
438 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { in update_pgdat_span()
456 pgdat->node_start_pfn = node_start_pfn; in update_pgdat_span()
457 pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; in update_pgdat_span()
465 struct pglist_data *pgdat = zone->zone_pgdat; in remove_pfn_range_from_zone() local
490 update_pgdat_span(pgdat); in remove_pfn_range_from_zone()
653 static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, in resize_pgdat_range() argument
656 unsigned long old_end_pfn = pgdat_end_pfn(pgdat); in resize_pgdat_range()
658 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) in resize_pgdat_range()
[all …]
Dshuffle.h11 extern void __shuffle_free_memory(pg_data_t *pgdat);
13 static inline void __meminit shuffle_free_memory(pg_data_t *pgdat) in shuffle_free_memory() argument
17 __shuffle_free_memory(pgdat); in shuffle_free_memory()
40 static inline void shuffle_free_memory(pg_data_t *pgdat) in shuffle_free_memory() argument
Dbootmem_info.c102 void __init register_page_bootmem_info_node(struct pglist_data *pgdat) in register_page_bootmem_info_node() argument
105 int node = pgdat->node_id; in register_page_bootmem_info_node()
109 page = virt_to_page(pgdat); in register_page_bootmem_info_node()
114 pfn = pgdat->node_start_pfn; in register_page_bootmem_info_node()
115 end_pfn = pgdat_end_pfn(pgdat); in register_page_bootmem_info_node()
Dpage_owner.c274 pg_data_t *pgdat, struct zone *zone) in pagetypeinfo_showmixedcount_print() argument
350 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); in pagetypeinfo_showmixedcount_print()
619 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) in init_pages_in_zone() argument
685 pgdat->node_id, zone->name, count); in init_pages_in_zone()
688 static void init_zones_in_node(pg_data_t *pgdat) in init_zones_in_node() argument
691 struct zone *node_zones = pgdat->node_zones; in init_zones_in_node()
697 init_pages_in_zone(pgdat, zone); in init_zones_in_node()
703 pg_data_t *pgdat; in init_early_allocated_pages() local
705 for_each_online_pgdat(pgdat) in init_early_allocated_pages()
706 init_zones_in_node(pgdat); in init_early_allocated_pages()
Dshuffle.c153 void __meminit __shuffle_free_memory(pg_data_t *pgdat) in __shuffle_free_memory() argument
157 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in __shuffle_free_memory()
Dsparse.c321 static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat) in pgdat_to_phys() argument
324 VM_BUG_ON(pgdat != &contig_page_data); in pgdat_to_phys()
327 return __pa(pgdat); in pgdat_to_phys()
333 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, in sparse_early_usemaps_alloc_pgdat_section() argument
349 goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); in sparse_early_usemaps_alloc_pgdat_section()
367 struct pglist_data *pgdat = NODE_DATA(nid); in check_usemap_section_nr() local
377 pgdat_snr = pfn_to_section_nr(pgdat_to_phys(pgdat) >> PAGE_SHIFT); in check_usemap_section_nr()
405 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, in sparse_early_usemaps_alloc_pgdat_section() argument
408 return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); in sparse_early_usemaps_alloc_pgdat_section()
/linux-6.1.9/include/linux/
Dmemory_hotplug.h30 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
44 memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
48 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) in arch_refresh_nodedata() argument
50 node_data[nid] = pgdat; in arch_refresh_nodedata()
61 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) in arch_refresh_nodedata() argument
211 static inline void pgdat_kswapd_lock(pg_data_t *pgdat) in pgdat_kswapd_lock() argument
213 mutex_lock(&pgdat->kswapd_lock); in pgdat_kswapd_lock()
216 static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) in pgdat_kswapd_unlock() argument
218 mutex_unlock(&pgdat->kswapd_lock); in pgdat_kswapd_unlock()
221 static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) in pgdat_kswapd_lock_init() argument
[all …]
Dmemcontrol.h57 pg_data_t *pgdat; member
709 struct pglist_data *pgdat) in mem_cgroup_lruvec() argument
715 lruvec = &pgdat->__lruvec; in mem_cgroup_lruvec()
722 mz = memcg->nodeinfo[pgdat->node_id]; in mem_cgroup_lruvec()
730 if (unlikely(lruvec->pgdat != pgdat)) in mem_cgroup_lruvec()
731 lruvec->pgdat = pgdat; in mem_cgroup_lruvec()
1139 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
1251 struct pglist_data *pgdat) in mem_cgroup_lruvec() argument
1253 return &pgdat->__lruvec; in mem_cgroup_lruvec()
1258 struct pglist_data *pgdat = folio_pgdat(folio); in folio_lruvec() local
[all …]
Dvmstat.h166 static inline void node_page_state_add(long x, struct pglist_data *pgdat, in node_page_state_add() argument
169 atomic_long_add(x, &pgdat->vm_stat[item]); in node_page_state_add()
255 extern unsigned long node_page_state(struct pglist_data *pgdat,
257 extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
305 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
319 static inline void __mod_node_page_state(struct pglist_data *pgdat, in __mod_node_page_state() argument
333 node_page_state_add(delta, pgdat, item); in __mod_node_page_state()
342 static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) in __inc_node_state() argument
344 atomic_long_inc(&pgdat->vm_stat[item]); in __inc_node_state()
354 static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) in __dec_node_state() argument
[all …]
Dcompaction.h98 extern void reset_isolation_suitable(pg_data_t *pgdat);
182 extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
185 static inline void reset_isolation_suitable(pg_data_t *pgdat) in reset_isolation_suitable() argument
222 static inline void wakeup_kcompactd(pg_data_t *pgdat, in wakeup_kcompactd() argument
Dmemory-tiers.h42 void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
50 static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) in node_get_allowed_targets() argument
92 static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) in node_get_allowed_targets() argument
Dnode.h119 struct pglist_data *pgdat = NODE_DATA(nid); in register_one_node() local
120 unsigned long start_pfn = pgdat->node_start_pfn; in register_one_node()
121 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; in register_one_node()
Dmmzone.h532 struct pglist_data *pgdat; member
1240 static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) in pgdat_end_pfn() argument
1242 return pgdat->node_start_pfn + pgdat->node_spanned_pages; in pgdat_end_pfn()
1247 void build_all_zonelists(pg_data_t *pgdat);
1275 return lruvec->pgdat; in lruvec_pgdat()
1410 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
1417 #define for_each_online_pgdat(pgdat) \ argument
1418 for (pgdat = first_online_pgdat(); \
1419 pgdat; \
1420 pgdat = next_online_pgdat(pgdat))
Dbootmem_info.h20 void __init register_page_bootmem_info_node(struct pglist_data *pgdat);
47 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) in register_page_bootmem_info_node() argument
/linux-6.1.9/tools/testing/memblock/linux/
Dmmzone.h8 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
10 #define for_each_online_pgdat(pgdat) \ argument
11 for (pgdat = first_online_pgdat(); \
12 pgdat; \
13 pgdat = next_online_pgdat(pgdat))
/linux-6.1.9/drivers/base/
Dnode.c375 struct pglist_data *pgdat = NODE_DATA(nid); in node_read_meminfo() local
381 sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B); in node_read_meminfo()
382 sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B); in node_read_meminfo()
384 swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE); in node_read_meminfo()
403 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) + in node_read_meminfo()
404 node_page_state(pgdat, NR_ACTIVE_FILE)), in node_read_meminfo()
405 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) + in node_read_meminfo()
406 node_page_state(pgdat, NR_INACTIVE_FILE)), in node_read_meminfo()
407 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)), in node_read_meminfo()
408 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)), in node_read_meminfo()
[all …]
/linux-6.1.9/lib/
Dshow_mem.c13 pg_data_t *pgdat; in __show_mem() local
19 for_each_online_pgdat(pgdat) { in __show_mem()
23 struct zone *zone = &pgdat->node_zones[zoneid]; in __show_mem()
/linux-6.1.9/arch/ia64/include/asm/
Dnodedata.h57 #define LOCAL_DATA_ADDR(pgdat) \ argument
58 ((struct ia64_node_data *)((u64)(pgdat) + \

12