1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PERCPU_COUNTER_H
3 #define _LINUX_PERCPU_COUNTER_H
4 /*
5  * A simple "approximate counter" for use in ext2 and ext3 superblocks.
6  *
7  * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
8  */
9 
10 #include <linux/spinlock.h>
11 #include <linux/smp.h>
12 #include <linux/list.h>
13 #include <linux/threads.h>
14 #include <linux/percpu.h>
15 #include <linux/types.h>
16 #include <linux/gfp.h>
17 
18 /* percpu_counter batch for local add or sub */
19 #define PERCPU_COUNTER_LOCAL_BATCH	INT_MAX
20 
21 #ifdef CONFIG_SMP
22 
23 struct percpu_counter {
24 	raw_spinlock_t lock;
25 	s64 count;
26 #ifdef CONFIG_HOTPLUG_CPU
27 	struct list_head list;	/* All percpu_counters are on a list */
28 #endif
29 	s32 __percpu *counters;
30 };
31 
32 extern int percpu_counter_batch;
33 
34 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
35 			  struct lock_class_key *key);
36 
37 #define percpu_counter_init(fbc, value, gfp)				\
38 	({								\
39 		static struct lock_class_key __key;			\
40 									\
41 		__percpu_counter_init(fbc, value, gfp, &__key);		\
42 	})
43 
44 void percpu_counter_destroy(struct percpu_counter *fbc);
45 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
46 void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
47 			      s32 batch);
48 s64 __percpu_counter_sum(struct percpu_counter *fbc);
49 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
50 void percpu_counter_sync(struct percpu_counter *fbc);
51 
percpu_counter_compare(struct percpu_counter * fbc,s64 rhs)52 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
53 {
54 	return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
55 }
56 
percpu_counter_add(struct percpu_counter * fbc,s64 amount)57 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
58 {
59 	percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
60 }
61 
62 /*
63  * With percpu_counter_add_local() and percpu_counter_sub_local(), counts
64  * are accumulated in local per cpu counter and not in fbc->count until
65  * local count overflows PERCPU_COUNTER_LOCAL_BATCH. This makes counter
66  * write efficient.
67  * But percpu_counter_sum(), instead of percpu_counter_read(), needs to be
68  * used to add up the counts from each CPU to account for all the local
69  * counts. So percpu_counter_add_local() and percpu_counter_sub_local()
70  * should be used when a counter is updated frequently and read rarely.
71  */
72 static inline void
percpu_counter_add_local(struct percpu_counter * fbc,s64 amount)73 percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
74 {
75 	percpu_counter_add_batch(fbc, amount, PERCPU_COUNTER_LOCAL_BATCH);
76 }
77 
percpu_counter_sum_positive(struct percpu_counter * fbc)78 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
79 {
80 	s64 ret = __percpu_counter_sum(fbc);
81 	return ret < 0 ? 0 : ret;
82 }
83 
percpu_counter_sum(struct percpu_counter * fbc)84 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
85 {
86 	return __percpu_counter_sum(fbc);
87 }
88 
percpu_counter_read(struct percpu_counter * fbc)89 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
90 {
91 	return fbc->count;
92 }
93 
94 /*
95  * It is possible for the percpu_counter_read() to return a small negative
96  * number for some counter which should never be negative.
97  *
98  */
percpu_counter_read_positive(struct percpu_counter * fbc)99 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
100 {
101 	/* Prevent reloads of fbc->count */
102 	s64 ret = READ_ONCE(fbc->count);
103 
104 	if (ret >= 0)
105 		return ret;
106 	return 0;
107 }
108 
percpu_counter_initialized(struct percpu_counter * fbc)109 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
110 {
111 	return (fbc->counters != NULL);
112 }
113 
114 #else /* !CONFIG_SMP */
115 
116 struct percpu_counter {
117 	s64 count;
118 };
119 
percpu_counter_init(struct percpu_counter * fbc,s64 amount,gfp_t gfp)120 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
121 				      gfp_t gfp)
122 {
123 	fbc->count = amount;
124 	return 0;
125 }
126 
percpu_counter_destroy(struct percpu_counter * fbc)127 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
128 {
129 }
130 
percpu_counter_set(struct percpu_counter * fbc,s64 amount)131 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
132 {
133 	fbc->count = amount;
134 }
135 
percpu_counter_compare(struct percpu_counter * fbc,s64 rhs)136 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
137 {
138 	if (fbc->count > rhs)
139 		return 1;
140 	else if (fbc->count < rhs)
141 		return -1;
142 	else
143 		return 0;
144 }
145 
146 static inline int
__percpu_counter_compare(struct percpu_counter * fbc,s64 rhs,s32 batch)147 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
148 {
149 	return percpu_counter_compare(fbc, rhs);
150 }
151 
152 static inline void
percpu_counter_add(struct percpu_counter * fbc,s64 amount)153 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
154 {
155 	preempt_disable();
156 	fbc->count += amount;
157 	preempt_enable();
158 }
159 
160 /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
161 static inline void
percpu_counter_add_local(struct percpu_counter * fbc,s64 amount)162 percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
163 {
164 	percpu_counter_add(fbc, amount);
165 }
166 
167 static inline void
percpu_counter_add_batch(struct percpu_counter * fbc,s64 amount,s32 batch)168 percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
169 {
170 	percpu_counter_add(fbc, amount);
171 }
172 
percpu_counter_read(struct percpu_counter * fbc)173 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
174 {
175 	return fbc->count;
176 }
177 
178 /*
179  * percpu_counter is intended to track positive numbers. In the UP case the
180  * number should never be negative.
181  */
percpu_counter_read_positive(struct percpu_counter * fbc)182 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
183 {
184 	return fbc->count;
185 }
186 
percpu_counter_sum_positive(struct percpu_counter * fbc)187 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
188 {
189 	return percpu_counter_read_positive(fbc);
190 }
191 
percpu_counter_sum(struct percpu_counter * fbc)192 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
193 {
194 	return percpu_counter_read(fbc);
195 }
196 
percpu_counter_initialized(struct percpu_counter * fbc)197 static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
198 {
199 	return true;
200 }
201 
percpu_counter_sync(struct percpu_counter * fbc)202 static inline void percpu_counter_sync(struct percpu_counter *fbc)
203 {
204 }
205 #endif	/* CONFIG_SMP */
206 
percpu_counter_inc(struct percpu_counter * fbc)207 static inline void percpu_counter_inc(struct percpu_counter *fbc)
208 {
209 	percpu_counter_add(fbc, 1);
210 }
211 
percpu_counter_dec(struct percpu_counter * fbc)212 static inline void percpu_counter_dec(struct percpu_counter *fbc)
213 {
214 	percpu_counter_add(fbc, -1);
215 }
216 
percpu_counter_sub(struct percpu_counter * fbc,s64 amount)217 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
218 {
219 	percpu_counter_add(fbc, -amount);
220 }
221 
222 static inline void
percpu_counter_sub_local(struct percpu_counter * fbc,s64 amount)223 percpu_counter_sub_local(struct percpu_counter *fbc, s64 amount)
224 {
225 	percpu_counter_add_local(fbc, -amount);
226 }
227 
228 #endif /* _LINUX_PERCPU_COUNTER_H */
229