1 #ifndef _LINUX_PERCPU_COUNTER_H
2 #define _LINUX_PERCPU_COUNTER_H
3 /*
4  * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5  *
6  * WARNING: these things are HUGE.  4 kbytes per counter on 32-way P4.
7  */
8 
9 #include <linux/spinlock.h>
10 #include <linux/smp.h>
11 #include <linux/list.h>
12 #include <linux/threads.h>
13 #include <linux/percpu.h>
14 #include <linux/types.h>
15 
16 #ifdef CONFIG_SMP
17 
18 struct percpu_counter {
19 	spinlock_t lock;
20 	s64 count;
21 #ifdef CONFIG_HOTPLUG_CPU
22 	struct list_head list;	/* All percpu_counters are on a list */
23 #endif
24 	s32 __percpu *counters;
25 };
26 
27 extern int percpu_counter_batch;
28 
29 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
30 			  struct lock_class_key *key);
31 
32 #define percpu_counter_init(fbc, value)					\
33 	({								\
34 		static struct lock_class_key __key;			\
35 									\
36 		__percpu_counter_init(fbc, value, &__key);		\
37 	})
38 
39 void percpu_counter_destroy(struct percpu_counter *fbc);
40 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
41 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
42 s64 __percpu_counter_sum(struct percpu_counter *fbc);
43 int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
44 
percpu_counter_add(struct percpu_counter * fbc,s64 amount)45 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
46 {
47 	__percpu_counter_add(fbc, amount, percpu_counter_batch);
48 }
49 
percpu_counter_sum_positive(struct percpu_counter * fbc)50 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
51 {
52 	s64 ret = __percpu_counter_sum(fbc);
53 	return ret < 0 ? 0 : ret;
54 }
55 
percpu_counter_sum(struct percpu_counter * fbc)56 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
57 {
58 	return __percpu_counter_sum(fbc);
59 }
60 
percpu_counter_read(struct percpu_counter * fbc)61 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
62 {
63 	return fbc->count;
64 }
65 
66 /*
67  * It is possible for the percpu_counter_read() to return a small negative
68  * number for some counter which should never be negative.
69  *
70  */
percpu_counter_read_positive(struct percpu_counter * fbc)71 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
72 {
73 	s64 ret = fbc->count;
74 
75 	barrier();		/* Prevent reloads of fbc->count */
76 	if (ret >= 0)
77 		return ret;
78 	return 1;
79 }
80 
percpu_counter_initialized(struct percpu_counter * fbc)81 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
82 {
83 	return (fbc->counters != NULL);
84 }
85 
86 #else
87 
88 struct percpu_counter {
89 	s64 count;
90 };
91 
percpu_counter_init(struct percpu_counter * fbc,s64 amount)92 static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
93 {
94 	fbc->count = amount;
95 	return 0;
96 }
97 
percpu_counter_destroy(struct percpu_counter * fbc)98 static inline void percpu_counter_destroy(struct percpu_counter *fbc)
99 {
100 }
101 
percpu_counter_set(struct percpu_counter * fbc,s64 amount)102 static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
103 {
104 	fbc->count = amount;
105 }
106 
percpu_counter_compare(struct percpu_counter * fbc,s64 rhs)107 static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
108 {
109 	if (fbc->count > rhs)
110 		return 1;
111 	else if (fbc->count < rhs)
112 		return -1;
113 	else
114 		return 0;
115 }
116 
117 static inline void
percpu_counter_add(struct percpu_counter * fbc,s64 amount)118 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
119 {
120 	preempt_disable();
121 	fbc->count += amount;
122 	preempt_enable();
123 }
124 
125 static inline void
__percpu_counter_add(struct percpu_counter * fbc,s64 amount,s32 batch)126 __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
127 {
128 	percpu_counter_add(fbc, amount);
129 }
130 
percpu_counter_read(struct percpu_counter * fbc)131 static inline s64 percpu_counter_read(struct percpu_counter *fbc)
132 {
133 	return fbc->count;
134 }
135 
percpu_counter_read_positive(struct percpu_counter * fbc)136 static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
137 {
138 	return fbc->count;
139 }
140 
percpu_counter_sum_positive(struct percpu_counter * fbc)141 static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
142 {
143 	return percpu_counter_read_positive(fbc);
144 }
145 
percpu_counter_sum(struct percpu_counter * fbc)146 static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
147 {
148 	return percpu_counter_read(fbc);
149 }
150 
percpu_counter_initialized(struct percpu_counter * fbc)151 static inline int percpu_counter_initialized(struct percpu_counter *fbc)
152 {
153 	return 1;
154 }
155 
156 #endif	/* CONFIG_SMP */
157 
percpu_counter_inc(struct percpu_counter * fbc)158 static inline void percpu_counter_inc(struct percpu_counter *fbc)
159 {
160 	percpu_counter_add(fbc, 1);
161 }
162 
percpu_counter_dec(struct percpu_counter * fbc)163 static inline void percpu_counter_dec(struct percpu_counter *fbc)
164 {
165 	percpu_counter_add(fbc, -1);
166 }
167 
percpu_counter_sub(struct percpu_counter * fbc,s64 amount)168 static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
169 {
170 	percpu_counter_add(fbc, -amount);
171 }
172 
173 #endif /* _LINUX_PERCPU_COUNTER_H */
174