1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PERCPU_COUNTER_H
3 #define _LINUX_PERCPU_COUNTER_H
4 /*
5 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6 *
7 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
8 */
9
10 #include <linux/spinlock.h>
11 #include <linux/smp.h>
12 #include <linux/list.h>
13 #include <linux/threads.h>
14 #include <linux/percpu.h>
15 #include <linux/types.h>
16
17 /* percpu_counter batch for local add or sub */
18 #define PERCPU_COUNTER_LOCAL_BATCH INT_MAX
19
20 #ifdef CONFIG_SMP
21
22 struct percpu_counter {
23 raw_spinlock_t lock;
24 s64 count;
25 #ifdef CONFIG_HOTPLUG_CPU
26 struct list_head list; /* All percpu_counters are on a list */
27 #endif
28 s32 __percpu *counters;
29 };
30
31 extern int percpu_counter_batch;
32
33 int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
34 gfp_t gfp, u32 nr_counters,
35 struct lock_class_key *key);
36
37 #define percpu_counter_init_many(fbc, value, gfp, nr_counters) \
38 ({ \
39 static struct lock_class_key __key; \
40 \
41 __percpu_counter_init_many(fbc, value, gfp, nr_counters,\
42 &__key); \
43 })
44
45
46 #define percpu_counter_init(fbc, value, gfp) \
47 percpu_counter_init_many(fbc, value, gfp, 1)
48
49 void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters);
percpu_counter_destroy(struct percpu_counter * fbc)50 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
51 {
52 percpu_counter_destroy_many(fbc, 1);
53 }
54
55 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
56 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
57 s32 batch);
58 s64 __percpu_counter_sum(struct percpu_counter *fbc);
59 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
60 void percpu_counter_sync(struct percpu_counter *fbc);
61
percpu_counter_compare(struct percpu_counter * fbc,s64 rhs)62 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
63 {
64 return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
65 }
66
percpu_counter_add(struct percpu_counter * fbc,s64 amount)67 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
68 {
69 percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
70 }
71
72 /*
73 * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
74 * are accumulated in local per cpu counter and not in fbc->count until
75 * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
76 * write efficient.
77 * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
78 * used to add up the counts from each CPU to account for all the local
79 * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
80 * should be used when a counter is updated frequently and read rarely.
81 */
82 static inline void
percpu_counter_add_local(struct percpu_counter * fbc,s64 amount)83 percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
84 {
85 percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
86 }
87
percpu_counter_sum_positive(struct percpu_counter * fbc)88 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
89 {
90 s64 ret = __percpu_counter_sum(fbc);
91 return ret < 0 ? 0 : ret;
92 }
93
percpu_counter_sum(struct percpu_counter * fbc)94 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
95 {
96 return __percpu_counter_sum(fbc);
97 }
98
percpu_counter_read(struct percpu_counter * fbc)99 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
100 {
101 return fbc->count;
102 }
103
104 /*
105 * It is possible for the percpu_counter_read() to return a small negative
106 * number for some counter which should never be negative.
107 *
108 */
percpu_counter_read_positive(struct percpu_counter * fbc)109 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
110 {
111 /* Prevent reloads of fbc->count */
112 s64 ret = READ_ONCE(fbc->count);
113
114 if (ret >= 0)
115 return ret;
116 return 0;
117 }
118
percpu_counter_initialized(struct percpu_counter * fbc)119 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
120 {
121 return (fbc->counters != NULL);
122 }
123
124 #else /* !CONFIG_SMP */
125
126 struct percpu_counter {
127 s64 count;
128 };
129
percpu_counter_init_many(struct percpu_counter * fbc,s64 amount,gfp_t gfp,u32 nr_counters)130 static inline int percpu_counter_init_many(struct percpu_counter *fbc,
131 s64 amount, gfp_t gfp,
132 u32 nr_counters)
133 {
134 u32 i;
135
136 for (i = 0; i < nr_counters; i++)
137 fbc[i].count = amount;
138
139 return 0;
140 }
141
percpu_counter_init(struct percpu_counter * fbc,s64 amount,gfp_t gfp)142 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
143 gfp_t gfp)
144 {
145 return percpu_counter_init_many(fbc, amount, gfp, 1);
146 }
147
percpu_counter_destroy_many(struct percpu_counter * fbc,u32 nr_counters)148 static inline void percpu_counter_destroy_many(struct percpu_counter *fbc,
149 u32 nr_counters)
150 {
151 }
152
percpu_counter_destroy(struct percpu_counter * fbc)153 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
154 {
155 }
156
percpu_counter_set(struct percpu_counter * fbc,s64 amount)157 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
158 {
159 fbc->count = amount;
160 }
161
percpu_counter_compare(struct percpu_counter * fbc,s64 rhs)162 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
163 {
164 if (fbc->count > rhs)
165 return 1;
166 else if (fbc->count < rhs)
167 return -1;
168 else
169 return 0;
170 }
171
172 static inline int
__percpu_counter_compare(struct percpu_counter * fbc,s64 rhs,s32 batch)173 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
174 {
175 return percpu_counter_compare(fbc, rhs);
176 }
177
178 static inline void
percpu_counter_add(struct percpu_counter * fbc,s64 amount)179 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
180 {
181 unsigned long flags;
182
183 local_irq_save(flags);
184 fbc->count += amount;
185 local_irq_restore(flags);
186 }
187
188 /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
189 static inline void
percpu_counter_add_local(struct percpu_counter * fbc,s64 amount)190 percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
191 {
192 percpu_counter_add(fbc, amount);
193 }
194
195 static inline void
percpu_counter_add_batch(struct percpu_counter * fbc,s64 amount,s32 batch)196 percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
197 {
198 percpu_counter_add(fbc, amount);
199 }
200
percpu_counter_read(struct percpu_counter * fbc)201 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
202 {
203 return fbc->count;
204 }
205
206 /*
207 * percpu_counter is intended to track positive numbers. In the UP case the
208 * number should never be negative.
209 */
percpu_counter_read_positive(struct percpu_counter * fbc)210 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
211 {
212 return fbc->count;
213 }
214
percpu_counter_sum_positive(struct percpu_counter * fbc)215 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
216 {
217 return percpu_counter_read_positive(fbc);
218 }
219
percpu_counter_sum(struct percpu_counter * fbc)220 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
221 {
222 return percpu_counter_read(fbc);
223 }
224
percpu_counter_initialized(struct percpu_counter * fbc)225 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
226 {
227 return true;
228 }
229
percpu_counter_sync(struct percpu_counter * fbc)230 static inline void percpu_counter_sync(struct percpu_counter *fbc)
231 {
232 }
233 #endif /* CONFIG_SMP */
234
percpu_counter_inc(struct percpu_counter * fbc)235 static inline void percpu_counter_inc(struct percpu_counter *fbc)
236 {
237 percpu_counter_add(fbc, 1);
238 }
239
percpu_counter_dec(struct percpu_counter * fbc)240 static inline void percpu_counter_dec(struct percpu_counter *fbc)
241 {
242 percpu_counter_add(fbc, -1);
243 }
244
percpu_counter_sub(struct percpu_counter * fbc,s64 amount)245 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
246 {
247 percpu_counter_add(fbc, -amount);
248 }
249
250 static inline void
percpu_counter_sub_local(struct percpu_counter * fbc,s64 amount)251 percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
252 {
253 percpu_counter_add_local(fbc, -amount);
254 }
255
256 #endif /* _LINUX_PERCPU_COUNTER_H */
257