1 #include <linux/slab.h>
2 #include <linux/kernel.h>
3 #include <linux/bitops.h>
4 #include <linux/cpumask.h>
5 #include <linux/module.h>
6 #include <linux/bootmem.h>
7 
__first_cpu(const cpumask_t * srcp)8 int __first_cpu(const cpumask_t *srcp)
9 {
10 	return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS));
11 }
12 EXPORT_SYMBOL(__first_cpu);
13 
__next_cpu(int n,const cpumask_t * srcp)14 int __next_cpu(int n, const cpumask_t *srcp)
15 {
16 	return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1));
17 }
18 EXPORT_SYMBOL(__next_cpu);
19 
20 #if NR_CPUS > 64
__next_cpu_nr(int n,const cpumask_t * srcp)21 int __next_cpu_nr(int n, const cpumask_t *srcp)
22 {
23 	return min_t(int, nr_cpu_ids,
24 				find_next_bit(srcp->bits, nr_cpu_ids, n+1));
25 }
26 EXPORT_SYMBOL(__next_cpu_nr);
27 #endif
28 
__any_online_cpu(const cpumask_t * mask)29 int __any_online_cpu(const cpumask_t *mask)
30 {
31 	int cpu;
32 
33 	for_each_cpu_mask(cpu, *mask) {
34 		if (cpu_online(cpu))
35 			break;
36 	}
37 	return cpu;
38 }
39 EXPORT_SYMBOL(__any_online_cpu);
40 
41 /**
42  * cpumask_next_and - get the next cpu in *src1p & *src2p
43  * @n: the cpu prior to the place to search (ie. return will be > @n)
44  * @src1p: the first cpumask pointer
45  * @src2p: the second cpumask pointer
46  *
47  * Returns >= nr_cpu_ids if no further cpus set in both.
48  */
cpumask_next_and(int n,const struct cpumask * src1p,const struct cpumask * src2p)49 int cpumask_next_and(int n, const struct cpumask *src1p,
50 		     const struct cpumask *src2p)
51 {
52 	while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
53 		if (cpumask_test_cpu(n, src2p))
54 			break;
55 	return n;
56 }
57 EXPORT_SYMBOL(cpumask_next_and);
58 
59 /**
60  * cpumask_any_but - return a "random" in a cpumask, but not this one.
61  * @mask: the cpumask to search
62  * @cpu: the cpu to ignore.
63  *
64  * Often used to find any cpu but smp_processor_id() in a mask.
65  * Returns >= nr_cpu_ids if no cpus set.
66  */
cpumask_any_but(const struct cpumask * mask,unsigned int cpu)67 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
68 {
69 	unsigned int i;
70 
71 	cpumask_check(cpu);
72 	for_each_cpu(i, mask)
73 		if (i != cpu)
74 			break;
75 	return i;
76 }
77 
78 /* These are not inline because of header tangles. */
79 #ifdef CONFIG_CPUMASK_OFFSTACK
80 /**
81  * alloc_cpumask_var_node - allocate a struct cpumask on a given node
82  * @mask: pointer to cpumask_var_t where the cpumask is returned
83  * @flags: GFP_ flags
84  *
85  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
86  * a nop returning a constant 1 (in <linux/cpumask.h>)
87  * Returns TRUE if memory allocation succeeded, FALSE otherwise.
88  *
89  * In addition, mask will be NULL if this fails.  Note that gcc is
90  * usually smart enough to know that mask can never be NULL if
91  * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
92  * too.
93  */
alloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)94 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
95 {
96 	*mask = kmalloc_node(cpumask_size(), flags, node);
97 
98 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
99 	if (!*mask) {
100 		printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
101 		dump_stack();
102 	}
103 #endif
104 	/* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
105 	if (*mask) {
106 		unsigned char *ptr = (unsigned char *)cpumask_bits(*mask);
107 		unsigned int tail;
108 		tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
109 		memset(ptr + cpumask_size() - tail, 0, tail);
110 	}
111 
112 	return *mask != NULL;
113 }
114 EXPORT_SYMBOL(alloc_cpumask_var_node);
115 
zalloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)116 bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
117 {
118 	return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
119 }
120 EXPORT_SYMBOL(zalloc_cpumask_var_node);
121 
122 /**
123  * alloc_cpumask_var - allocate a struct cpumask
124  * @mask: pointer to cpumask_var_t where the cpumask is returned
125  * @flags: GFP_ flags
126  *
127  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
128  * a nop returning a constant 1 (in <linux/cpumask.h>).
129  *
130  * See alloc_cpumask_var_node.
131  */
alloc_cpumask_var(cpumask_var_t * mask,gfp_t flags)132 bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
133 {
134 	return alloc_cpumask_var_node(mask, flags, numa_node_id());
135 }
136 EXPORT_SYMBOL(alloc_cpumask_var);
137 
zalloc_cpumask_var(cpumask_var_t * mask,gfp_t flags)138 bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
139 {
140 	return alloc_cpumask_var(mask, flags | __GFP_ZERO);
141 }
142 EXPORT_SYMBOL(zalloc_cpumask_var);
143 
144 /**
145  * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
146  * @mask: pointer to cpumask_var_t where the cpumask is returned
147  *
148  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
149  * a nop (in <linux/cpumask.h>).
150  * Either returns an allocated (zero-filled) cpumask, or causes the
151  * system to panic.
152  */
alloc_bootmem_cpumask_var(cpumask_var_t * mask)153 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
154 {
155 	*mask = alloc_bootmem(cpumask_size());
156 }
157 
158 /**
159  * free_cpumask_var - frees memory allocated for a struct cpumask.
160  * @mask: cpumask to free
161  *
162  * This is safe on a NULL mask.
163  */
free_cpumask_var(cpumask_var_t mask)164 void free_cpumask_var(cpumask_var_t mask)
165 {
166 	kfree(mask);
167 }
168 EXPORT_SYMBOL(free_cpumask_var);
169 
170 /**
171  * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
172  * @mask: cpumask to free
173  */
free_bootmem_cpumask_var(cpumask_var_t mask)174 void __init free_bootmem_cpumask_var(cpumask_var_t mask)
175 {
176 	free_bootmem((unsigned long)mask, cpumask_size());
177 }
178 #endif
179