/linux-6.1.9/mm/ |
D | page_ext.c | 199 base = NODE_DATA(page_to_nid(page))->node_page_ext; in lookup_page_ext() 208 index = pfn - round_down(node_start_pfn(page_to_nid(page)), in lookup_page_ext()
|
D | hugetlb_cgroup.c | 320 h_cg->nodeinfo[page_to_nid(page)]->usage[idx]; in __hugetlb_cgroup_commit_charge() 326 WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx], in __hugetlb_cgroup_commit_charge() 369 h_cg->nodeinfo[page_to_nid(page)]->usage[idx]; in __hugetlb_cgroup_uncharge_page() 375 WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx], in __hugetlb_cgroup_uncharge_page()
|
D | list_lru.c | 121 int nid = page_to_nid(virt_to_page(item)); in list_lru_add() 145 int nid = page_to_nid(virt_to_page(item)); in list_lru_del()
|
D | migrate.c | 574 bool f_toptier = node_is_toptier(page_to_nid(&folio->page)); in folio_migrate_flags() 575 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page)); in folio_migrate_flags() 1713 if (page_to_nid(page) == node) in add_page_for_migration() 1914 err = page_to_nid(page); in do_pages_stat_array() 2207 if (!node_is_toptier(page_to_nid(page)) && node_is_toptier(node)) in migrate_misplaced_page()
|
D | sparse.c | 46 int page_to_nid(const struct page *page) in page_to_nid() function 50 EXPORT_SYMBOL(page_to_nid);
|
D | hugetlb.c | 59 return cma_pages_valid(hugetlb_cma[page_to_nid(page)], page, in hugetlb_cma_page() 1280 int nid = page_to_nid(page); in enqueue_huge_page() 1521 if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) in free_gigantic_page() 1596 int nid = page_to_nid(page); in __remove_hugetlb_page() 1663 int nid = page_to_nid(page); in add_hugetlb_page() 1855 int nid = page_to_nid(page); in free_huge_page() 2195 prep_new_huge_page(h, page, page_to_nid(page)); in alloc_fresh_huge_page() 2410 h->surplus_huge_pages_node[page_to_nid(page)]++; in alloc_surplus_huge_page() 2898 int nid = page_to_nid(old_page); in alloc_and_dissolve_huge_page() 3195 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc() [all …]
|
D | hugetlb_vmemmap.c | 365 int nid = page_to_nid((struct page *)start); in alloc_vmemmap_page_list()
|
D | huge_memory.c | 565 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); in get_deferred_split_queue() 575 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); in get_deferred_split_queue() 1503 page_nid = page_to_nid(page); in do_huge_pmd_numa_page() 1846 toptier = node_is_toptier(page_to_nid(page)); in change_huge_pmd() 2828 set_shrinker_bit(memcg, page_to_nid(page), in deferred_split_huge_page()
|
D | mempolicy.c | 438 int nid = page_to_nid(page); in queue_pages_required() 922 ret = page_to_nid(p); in lookup_node() 2123 if (page && page_to_nid(page) == nid) { in alloc_page_interleave() 2572 int curnid = page_to_nid(page); in mpol_misplaced()
|
D | mprotect.c | 152 nid = page_to_nid(page); in change_pte_range()
|
D | khugepaged.c | 1219 node = page_to_nid(page); in hpage_collapse_scan_pmd() 2165 node = page_to_nid(page); in hpage_collapse_scan_file()
|
D | memory_hotplug.c | 1699 mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru)); in do_migrate_range()
|
D | ksm.c | 1957 page_to_nid(tree_page) != nid) { in unstable_tree_search_insert()
|
D | slub.c | 5124 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location() 5162 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location()
|
/linux-6.1.9/include/linux/ |
D | mm.h | 1276 extern int page_to_nid(const struct page *page); 1278 static inline int page_to_nid(const struct page *page) in page_to_nid() function 1288 return page_to_nid(&folio->page); in folio_nid() 1378 return page_to_nid(page); /* XXX */ in page_cpupid_xchg_last() 1388 return page_to_nid(page); /* XXX */ in page_cpupid_last() 1482 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; in page_zone() 1487 return NODE_DATA(page_to_nid(page)); in page_pgdat()
|
D | mmzone.h | 1891 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
|
/linux-6.1.9/kernel/dma/ |
D | contiguous.c | 360 if (cma_release(dma_contiguous_pernuma_area[page_to_nid(page)], in dma_free_contiguous()
|
/linux-6.1.9/drivers/net/ethernet/cavium/liquidio/ |
D | octeon_network.h | 342 unlikely(page_to_nid(pg_info->page) != numa_node_id())) { in recv_buffer_recycle()
|
/linux-6.1.9/fs/proc/ |
D | task_mmu.c | 1808 md->node[page_to_nid(page)] += nr_pages; in gather_stats() 1827 nid = page_to_nid(page); in can_gather_numa_stats() 1852 nid = page_to_nid(page); in can_gather_numa_stats_pmd()
|
/linux-6.1.9/net/core/ |
D | page_pool.c | 254 if (likely(page_to_nid(page) == pref_nid)) { in page_pool_refill_alloc_cache()
|
/linux-6.1.9/drivers/net/ethernet/fungible/funeth/ |
D | funeth_rx.c | 123 rb->node = page_is_pfmemalloc(p) ? -1 : page_to_nid(p); in funeth_alloc_page()
|
/linux-6.1.9/drivers/virt/nitro_enclaves/ |
D | ne_misc_dev.c | 827 if (ne_enclave->numa_node != page_to_nid(mem_region_page)) { in ne_sanity_check_user_mem_region_page()
|
/linux-6.1.9/drivers/net/ethernet/hisilicon/hns/ |
D | hns_enet.c | 434 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) in hns_nic_reuse_page() 578 if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) in hns_nic_poll_rx_skb()
|
/linux-6.1.9/drivers/net/ethernet/mellanox/mlx4/ |
D | en_rx.c | 499 page_to_nid(page) != numa_mem_id(); in mlx4_en_complete_rx_desc()
|
/linux-6.1.9/drivers/net/ethernet/pensando/ionic/ |
D | ionic_txrx.c | 101 if (page_to_nid(buf_info->page) != numa_mem_id()) in ionic_rx_buf_recycle()
|