/linux-6.1.9/mm/ |
D | mempolicy.c | 423 nodemask_t *nmask; member 441 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); in queue_pages_required() 735 .nmask = nodes, in queue_pages_range() 929 static long do_get_mempolicy(int *policy, nodemask_t *nmask, in do_get_mempolicy() argument 946 *nmask = cpuset_current_mems_allowed; in do_get_mempolicy() 1006 if (nmask) { in do_get_mempolicy() 1008 *nmask = pol->w.user_nodemask; in do_get_mempolicy() 1011 get_policy_nodemask(pol, nmask); in do_get_mempolicy() 1064 nodemask_t nmask; in migrate_to_node() local 1073 nodes_clear(nmask); in migrate_to_node() [all …]
|
D | memory-tiers.c | 138 nodemask_t nmask; in nodelist_show() local 141 nmask = get_memtier_nodemask(to_memory_tier(dev)); in nodelist_show() 142 ret = sysfs_emit(buf, "%*pbl\n", nodemask_pr_args(&nmask)); in nodelist_show()
|
D | memory_hotplug.c | 1689 nodemask_t nmask = node_states[N_MEMORY]; in do_migrate_range() local 1691 .nmask = &nmask, in do_migrate_range() 1706 node_clear(mtc.nid, nmask); in do_migrate_range() 1707 if (nodes_empty(nmask)) in do_migrate_range() 1708 node_set(mtc.nid, nmask); in do_migrate_range()
|
D | hugetlb.c | 1316 nodemask_t *nmask) in dequeue_huge_page_nodemask() argument 1328 for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) { in dequeue_huge_page_nodemask() 2097 gfp_t gfp_mask, int nid, nodemask_t *nmask, in alloc_buddy_huge_page() argument 2120 page = __alloc_pages(gfp_mask, order, nid, nmask); in alloc_buddy_huge_page() 2166 gfp_t gfp_mask, int nid, nodemask_t *nmask, in alloc_fresh_huge_page() argument 2174 page = alloc_gigantic_page(h, gfp_mask, nid, nmask); in alloc_fresh_huge_page() 2177 nid, nmask, node_alloc_noretry); in alloc_fresh_huge_page() 2378 int nid, nodemask_t *nmask) in alloc_surplus_huge_page() argument 2390 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL); in alloc_surplus_huge_page() 2419 int nid, nodemask_t *nmask) in alloc_migrate_huge_page() argument [all …]
|
D | internal.h | 801 nodemask_t *nmask; member
|
D | migrate.c | 1626 return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); in alloc_migration_target() 1642 new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask); in alloc_migration_target()
|
D | khugepaged.c | 800 nodemask_t *nmask) in hpage_collapse_alloc_page() argument 802 *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask); in hpage_collapse_alloc_page()
|
D | vmscan.c | 1544 allowed_mask = mtc->nmask; in alloc_demote_page() 1554 mtc->nmask = NULL; in alloc_demote_page() 1561 mtc->nmask = allowed_mask; in alloc_demote_page() 1586 .nmask = &allowed_mask in demote_folio_list()
|
/linux-6.1.9/tools/testing/selftests/kvm/include/ |
D | numaif.h | 20 long get_mempolicy(int *policy, const unsigned long *nmask, in get_mempolicy() argument 23 return syscall(__NR_get_mempolicy, policy, nmask, in get_mempolicy()
|
/linux-6.1.9/drivers/ntb/test/ |
D | ntb_pingpong.c | 105 u64 nmask; member 124 if (link & pp->nmask) in pp_find_next_peer() 125 pidx = __ffs64(link & pp->nmask); in pp_find_next_peer() 307 pp->nmask = GENMASK_ULL(pcnt - 1, pidx); in pp_init_flds() 310 pp->in_db, pp->pmask, pp->nmask); in pp_init_flds()
|
/linux-6.1.9/drivers/clk/ |
D | clk-fractional-divider.c | 88 n = (val & fd->nmask) >> fd->nshift; in clk_fd_recalc_rate() 174 val &= ~(fd->mmask | fd->nmask); in clk_fd_set_rate() 219 fd->nmask = GENMASK(nwidth - 1, 0) << nshift; in clk_hw_register_fractional_divider()
|
/linux-6.1.9/drivers/scsi/cxlflash/ |
D | vlun.c | 1039 rhte->nmask = MC_RHT_NMASK; in cxlflash_disk_virtual_open() 1241 if (ctxi_dst->rht_start[i].nmask != 0) { in cxlflash_disk_clone() 1292 if (ctxi_src->rht_start[i].nmask == 0) in cxlflash_disk_clone() 1297 ctxi_dst->rht_start[i].nmask = ctxi_src->rht_start[i].nmask; in cxlflash_disk_clone()
|
D | sislite.h | 513 u8 nmask; member
|
D | superpipe.c | 460 if (unlikely(rhte->nmask == 0)) { in get_rhte() 488 if (ctxi->rht_start[i].nmask == 0) { in rhte_checkout() 511 rhte->nmask = 0; in rhte_checkin()
|
/linux-6.1.9/drivers/clk/imx/ |
D | clk-composite-7ulp.c | 108 fd->nmask = PCG_PCD_MASK; in imx_ulp_clk_hw_composite()
|
/linux-6.1.9/Documentation/admin-guide/mm/ |
D | numa_memory_policy.rst | 424 long set_mempolicy(int mode, const unsigned long *nmask, 429 'nmask'. 'nmask' points to a bit mask of node ids containing at least 440 const unsigned long *nmask, unsigned long maxnode, 453 const unsigned long *nmask, unsigned long maxnode, 456 mbind() installs the policy specified by (mode, nmask, maxnodes) as a
|
/linux-6.1.9/include/linux/ |
D | hugetlb.h | 705 nodemask_t *nmask, gfp_t gfp_mask); 1006 nodemask_t *nmask, gfp_t gfp_mask)
|
D | syscalls.h | 925 const unsigned long __user *nmask, 929 unsigned long __user *nmask, 932 asmlinkage long sys_set_mempolicy(int mode, const unsigned long __user *nmask,
|
D | clk-provider.h | 1139 u32 nmask; member
|
/linux-6.1.9/net/sched/ |
D | sch_api.c | 681 unsigned int nsize, nmask, osize; in qdisc_class_hash_grow() local 688 nmask = nsize - 1; in qdisc_class_hash_grow() 699 h = qdisc_class_hash(cl->classid, nmask); in qdisc_class_hash_grow() 705 clhash->hashmask = nmask; in qdisc_class_hash_grow()
|
/linux-6.1.9/drivers/clk/rockchip/ |
D | clk.c | 245 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift; in rockchip_clk_register_frac_branch()
|
/linux-6.1.9/drivers/scsi/ |
D | pmcraid.c | 429 u32 nmask = gmask | GLOBAL_INTERRUPT_MASK; in pmcraid_disable_interrupts() local 432 iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg); in pmcraid_disable_interrupts() 456 u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK); in pmcraid_enable_interrupts() local 458 iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg); in pmcraid_enable_interrupts()
|