/linux-6.6.21/lib/ ! |
D | cpumask.c | 61 *mask = kmalloc_node(cpumask_size(), flags, node); in alloc_cpumask_var_node() 85 *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES); in alloc_bootmem_cpumask_var() 88 cpumask_size()); in alloc_bootmem_cpumask_var() 109 memblock_free(mask, cpumask_size()); in free_bootmem_cpumask_var()
|
/linux-6.6.21/kernel/ ! |
D | compat.c | 115 if (len < cpumask_size()) in compat_get_user_cpu_mask() 116 memset(new_mask, 0, cpumask_size()); in compat_get_user_cpu_mask() 117 else if (len > cpumask_size()) in compat_get_user_cpu_mask() 118 len = cpumask_size(); in compat_get_user_cpu_mask() 160 unsigned int retlen = min(len, cpumask_size()); in COMPAT_SYSCALL_DEFINE3()
|
D | fork.c | 3267 mm_size = sizeof(struct mm_struct) + cpumask_size() + mm_cid_size(); in mm_cache_init()
|
/linux-6.6.21/arch/riscv/kernel/ ! |
D | sys_riscv.c | 252 if (cpu_count > cpumask_size()) in do_riscv_hwprobe() 253 cpu_count = cpumask_size(); in do_riscv_hwprobe()
|
/linux-6.6.21/drivers/cpuidle/ ! |
D | cpuidle-big_little.c | 147 cpumask = kzalloc(cpumask_size(), GFP_KERNEL); in bl_idle_driver_init()
|
/linux-6.6.21/drivers/powercap/ ! |
D | idle_inject.c | 337 ii_dev = kzalloc(sizeof(*ii_dev) + cpumask_size(), GFP_KERNEL); in idle_inject_register_full()
|
/linux-6.6.21/include/linux/ ! |
D | mm_types.h | 1066 cid_bitmap += cpumask_size(); in mm_cidmask() 1100 return cpumask_size(); in mm_cid_size()
|
D | cpumask.h | 822 static inline unsigned int cpumask_size(void) in cpumask_size() function
|
/linux-6.6.21/kernel/sched/ ! |
D | topology.c | 946 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), in build_group_from_child_sched_domain() 1383 entry = kzalloc(sizeof(*entry) + cpumask_size(), GFP_KERNEL); in asym_cpu_capacity_update_data() 1905 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); in sched_init_numa() 2215 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), in __sdt_alloc() 2229 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), in __sdt_alloc() 2238 sgc = kzalloc_node(sizeof(struct sched_group_capacity) + cpumask_size(), in __sdt_alloc()
|
D | core.c | 2825 int size = max_t(int, cpumask_size(), sizeof(struct rcu_head)); in alloc_user_cpus_ptr() 8433 if (len < cpumask_size()) in get_user_cpu_mask() 8435 else if (len > cpumask_size()) in get_user_cpu_mask() 8436 len = cpumask_size(); in get_user_cpu_mask() 8517 unsigned int retlen = min(len, cpumask_size()); in SYSCALL_DEFINE3()
|
/linux-6.6.21/kernel/power/ ! |
D | energy_model.c | 212 pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL); in em_create_pd()
|
/linux-6.6.21/net/core/ ! |
D | sysctl_net_core.c | 85 rps_default_mask = kzalloc(cpumask_size(), GFP_KERNEL); in rps_default_mask_cow_alloc()
|
/linux-6.6.21/net/openvswitch/ ! |
D | flow_table.c | 1201 + cpumask_size(), in ovs_flow_init()
|
/linux-6.6.21/arch/s390/kernel/ ! |
D | perf_cpum_cf.c | 1622 len = min_t(u64, start.cpumask_len, cpumask_size()); in cfset_ioctl_start()
|
/linux-6.6.21/drivers/soc/ti/ ! |
D | knav_qmss_queue.c | 1253 cpumask_size(), GFP_KERNEL); in knav_setup_queue_range()
|
/linux-6.6.21/kernel/trace/ ! |
D | trace_events_filter.c | 1753 pred->mask = kzalloc(cpumask_size(), GFP_KERNEL); in parse_pred()
|
/linux-6.6.21/io_uring/ ! |
D | io_uring.c | 4310 if (len > cpumask_size()) in io_register_iowq_aff() 4311 len = cpumask_size(); in io_register_iowq_aff()
|
/linux-6.6.21/drivers/block/ ! |
D | ublk_drv.c | 2247 retlen = min_t(unsigned short, header->len, cpumask_size()); in ublk_ctrl_get_queue_affinity()
|
/linux-6.6.21/drivers/net/ethernet/marvell/mvpp2/ ! |
D | mvpp2_main.c | 4708 qv->mask = kzalloc(cpumask_size(), GFP_KERNEL); in mvpp2_irqs_init()
|
/linux-6.6.21/drivers/scsi/lpfc/ ! |
D | lpfc_init.c | 12724 tmp = kzalloc(cpumask_size(), GFP_KERNEL); in lpfc_cpuhp_get_eq()
|