Home
last modified time | relevance | path

Searched refs:nr_node_ids (Results 1 – 25 of 37) sorted by relevance

12

/linux-6.1.9/kernel/irq/
Daffinity.c48 masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL); in alloc_node_to_cpumask()
52 for (node = 0; node < nr_node_ids; node++) { in alloc_node_to_cpumask()
70 for (node = 0; node < nr_node_ids; node++) in free_node_to_cpumask()
137 for (n = 0; n < nr_node_ids; n++) { in alloc_nodes_vectors()
156 sort(node_vectors, nr_node_ids, sizeof(node_vectors[0]), in alloc_nodes_vectors()
227 for (n = 0; n < nr_node_ids; n++) { in alloc_nodes_vectors()
281 node_vectors = kcalloc(nr_node_ids, in __irq_build_affinity_masks()
291 for (i = 0; i < nr_node_ids; i++) { in __irq_build_affinity_masks()
/linux-6.1.9/drivers/base/
Darch_numa.c52 if (WARN_ON(node < 0 || node >= nr_node_ids)) in cpumask_of_node()
105 if (nr_node_ids == MAX_NUMNODES) in setup_node_to_cpumask_map()
109 for (node = 0; node < nr_node_ids; node++) { in setup_node_to_cpumask_map()
115 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); in setup_node_to_cpumask_map()
278 size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]); in numa_alloc_distance()
283 numa_distance_cnt = nr_node_ids; in numa_alloc_distance()
/linux-6.1.9/arch/x86/mm/
Dnuma.c114 if (nr_node_ids == MAX_NUMNODES) in setup_node_to_cpumask_map()
118 for (node = 0; node < nr_node_ids; node++) in setup_node_to_cpumask_map()
122 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); in setup_node_to_cpumask_map()
909 if ((unsigned)node >= nr_node_ids) { in cpumask_of_node()
912 node, nr_node_ids); in cpumask_of_node()
/linux-6.1.9/mm/
Dshrinker_debug.c54 count_per_node = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); in shrinker_debugfs_count_show()
130 if (nid < 0 || nid >= nr_node_ids) in shrinker_debugfs_scan_write()
Dlist_lru.c343 mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp); in memcg_init_list_lru_one()
571 lru->node = kcalloc(nr_node_ids, sizeof(*lru->node), GFP_KERNEL); in __list_lru_init()
Dmemory-tiers.c638 node_demotion = kcalloc(nr_node_ids, sizeof(struct demotion_nodes), in memory_tier_init()
Dksm.c2981 buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), in merge_across_nodes_store()
2988 root_unstable_tree = buf + nr_node_ids; in merge_across_nodes_store()
2995 ksm_nr_node_ids = knob ? 1 : nr_node_ids; in merge_across_nodes_store()
Dmempolicy.c1426 unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); in copy_nodes_to_user()
1430 nbytes = BITS_TO_COMPAT_LONGS(nr_node_ids) * sizeof(compat_long_t); in copy_nodes_to_user()
1438 maxnode = nr_node_ids; in copy_nodes_to_user()
1698 if (nmask != NULL && maxnode < nr_node_ids) in kernel_get_mempolicy()
Dslab.h792 for (__node = 0; __node < nr_node_ids; __node++) \
Dhugetlb_cgroup.c142 h_cgroup = kzalloc(struct_size(h_cgroup, nodeinfo, nr_node_ids), in hugetlb_cgroup_css_alloc()
Dslub.c4851 nr_node_ids * sizeof(struct kmem_cache_node *), in kmem_cache_init()
4870 nr_cpu_ids, nr_node_ids); in kmem_cache_init()
5209 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL); in show_slab_objects()
5301 for (node = 0; node < nr_node_ids; node++) { in show_slab_objects()
/linux-6.1.9/arch/loongarch/kernel/
Dnuma.c112 if (nr_node_ids >= 8) in setup_per_cpu_areas()
426 loongson_sysconf.nr_nodes = nr_node_ids; in init_numa_memory()
/linux-6.1.9/arch/x86/kernel/
Dsetup_percpu.c126 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); in setup_per_cpu_areas()
/linux-6.1.9/include/linux/
Dnodemask.h455 extern unsigned int nr_node_ids;
497 #define nr_node_ids 1U macro
/linux-6.1.9/arch/powerpc/mm/
Dnuma.c78 if (nr_node_ids == MAX_NUMNODES) in setup_node_to_cpumask_map()
86 pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); in setup_node_to_cpumask_map()
183 if (nid == 0xffff || nid >= nr_node_ids) in __associativity_to_nid()
/linux-6.1.9/drivers/hv/
Dhv.c133 hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask), in hv_synic_alloc()
Dchannel_mgmt.c761 if (numa_node == nr_node_ids) { in init_vp_index()
/linux-6.1.9/kernel/sched/
Dtopology.c1705 for (i = 0; i < nr_node_ids; i++) { in sched_numa_warn()
1707 for (j = 0; j < nr_node_ids; j++) { in sched_numa_warn()
1884 masks[i] = kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); in sched_init_numa()
2015 for (j = 0; j < nr_node_ids; j++) { in sched_domains_numa_masks_set()
2031 for (j = 0; j < nr_node_ids; j++) { in sched_domains_numa_masks_clear()
Dfair.c1266 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv; in task_faults_idx()
2688 nr_node_ids * sizeof(unsigned long); in task_numa_group()
2700 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) in task_numa_group()
2758 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) { in task_numa_group()
2801 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) in task_numa_free()
2816 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) in task_numa_free()
2852 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids; in task_numa_fault()
/linux-6.1.9/drivers/infiniband/sw/siw/
Dsiw_main.c138 int i, num_nodes = nr_node_ids; in siw_init_cpulist()
/linux-6.1.9/arch/powerpc/platforms/pseries/
Dhotplug-cpu.c240 if (rc && nr_node_ids > 1) { in pseries_add_processor()
/linux-6.1.9/arch/powerpc/sysdev/xive/
Dcommon.c1144 ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids, in xive_init_ipis()
1149 xive_ipis = kcalloc(nr_node_ids, sizeof(*xive_ipis), GFP_KERNEL | __GFP_NOFAIL); in xive_init_ipis()
/linux-6.1.9/kernel/
Dworkqueue.c3963 ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL); in apply_wqattrs_prepare()
4312 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]); in alloc_workqueue()
5972 tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL); in wq_numa_init()
/linux-6.1.9/io_uring/
Dio-wq.c1145 wq = kzalloc(struct_size(wq, wqes, nr_node_ids), GFP_KERNEL); in io_wq_create()
/linux-6.1.9/net/sunrpc/
Dsvc.c217 unsigned int maxpools = nr_node_ids; in svc_pool_map_init_pernode()

12