1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Fast and scalable bitmaps.
4  *
5  * Copyright (C) 2016 Facebook
6  * Copyright (C) 2013-2014 Jens Axboe
7  */
8 
9 #ifndef __LINUX_SCALE_BITMAP_H
10 #define __LINUX_SCALE_BITMAP_H
11 
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/cache.h>
15 #include <linux/list.h>
16 #include <linux/log2.h>
17 #include <linux/minmax.h>
18 #include <linux/percpu.h>
19 #include <linux/slab.h>
20 #include <linux/smp.h>
21 #include <linux/types.h>
22 #include <linux/wait.h>
23 
24 struct seq_file;
25 
26 /**
27  * struct sbitmap_word - Word in a &struct sbitmap.
28  */
29 struct sbitmap_word {
30 	/**
31 	 * @word: word holding free bits
32 	 */
33 	unsigned long word;
34 
35 	/**
36 	 * @cleared: word holding cleared bits
37 	 */
38 	unsigned long cleared ____cacheline_aligned_in_smp;
39 } ____cacheline_aligned_in_smp;
40 
41 /**
42  * struct sbitmap - Scalable bitmap.
43  *
44  * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This
45  * trades off higher memory usage for better scalability.
46  */
47 struct sbitmap {
48 	/**
49 	 * @depth: Number of bits used in the whole bitmap.
50 	 */
51 	unsigned int depth;
52 
53 	/**
54 	 * @shift: log2(number of bits used per word)
55 	 */
56 	unsigned int shift;
57 
58 	/**
59 	 * @map_nr: Number of words (cachelines) being used for the bitmap.
60 	 */
61 	unsigned int map_nr;
62 
63 	/**
64 	 * @round_robin: Allocate bits in strict round-robin order.
65 	 */
66 	bool round_robin;
67 
68 	/**
69 	 * @map: Allocated bitmap.
70 	 */
71 	struct sbitmap_word *map;
72 
73 	/*
74 	 * @alloc_hint: Cache of last successfully allocated or freed bit.
75 	 *
76 	 * This is per-cpu, which allows multiple users to stick to different
77 	 * cachelines until the map is exhausted.
78 	 */
79 	unsigned int __percpu *alloc_hint;
80 };
81 
82 #define SBQ_WAIT_QUEUES 8
83 #define SBQ_WAKE_BATCH 8
84 
85 /**
86  * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue.
87  */
88 struct sbq_wait_state {
89 	/**
90 	 * @wait_cnt: Number of frees remaining before we wake up.
91 	 */
92 	atomic_t wait_cnt;
93 
94 	/**
95 	 * @wait: Wait queue.
96 	 */
97 	wait_queue_head_t wait;
98 } ____cacheline_aligned_in_smp;
99 
100 /**
101  * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free
102  * bits.
103  *
104  * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to
105  * avoid contention on the wait queue spinlock. This ensures that we don't hit a
106  * scalability wall when we run out of free bits and have to start putting tasks
107  * to sleep.
108  */
109 struct sbitmap_queue {
110 	/**
111 	 * @sb: Scalable bitmap.
112 	 */
113 	struct sbitmap sb;
114 
115 	/**
116 	 * @wake_batch: Number of bits which must be freed before we wake up any
117 	 * waiters.
118 	 */
119 	unsigned int wake_batch;
120 
121 	/**
122 	 * @wake_index: Next wait queue in @ws to wake up.
123 	 */
124 	atomic_t wake_index;
125 
126 	/**
127 	 * @ws: Wait queues.
128 	 */
129 	struct sbq_wait_state *ws;
130 
131 	/*
132 	 * @ws_active: count of currently active ws waitqueues
133 	 */
134 	atomic_t ws_active;
135 
136 	/**
137 	 * @min_shallow_depth: The minimum shallow depth which may be passed to
138 	 * sbitmap_queue_get_shallow()
139 	 */
140 	unsigned int min_shallow_depth;
141 };
142 
143 /**
144  * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node.
145  * @sb: Bitmap to initialize.
146  * @depth: Number of bits to allocate.
147  * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if
148  *         given, a good default is chosen.
149  * @flags: Allocation flags.
150  * @node: Memory node to allocate on.
151  * @round_robin: If true, be stricter about allocation order; always allocate
152  *               starting from the last allocated bit. This is less efficient
153  *               than the default behavior (false).
154  * @alloc_hint: If true, apply percpu hint for where to start searching for
155  *              a free bit.
156  *
157  * Return: Zero on success or negative errno on failure.
158  */
159 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
160 		      gfp_t flags, int node, bool round_robin, bool alloc_hint);
161 
162 /* sbitmap internal helper */
__map_depth(const struct sbitmap * sb,int index)163 static inline unsigned int __map_depth(const struct sbitmap *sb, int index)
164 {
165 	if (index == sb->map_nr - 1)
166 		return sb->depth - (index << sb->shift);
167 	return 1U << sb->shift;
168 }
169 
170 /**
171  * sbitmap_free() - Free memory used by a &struct sbitmap.
172  * @sb: Bitmap to free.
173  */
sbitmap_free(struct sbitmap * sb)174 static inline void sbitmap_free(struct sbitmap *sb)
175 {
176 	free_percpu(sb->alloc_hint);
177 	kvfree(sb->map);
178 	sb->map = NULL;
179 }
180 
181 /**
182  * sbitmap_resize() - Resize a &struct sbitmap.
183  * @sb: Bitmap to resize.
184  * @depth: New number of bits to resize to.
185  *
186  * Doesn't reallocate anything. It's up to the caller to ensure that the new
187  * depth doesn't exceed the depth that the sb was initialized with.
188  */
189 void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
190 
191 /**
192  * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap.
193  * @sb: Bitmap to allocate from.
194  *
195  * This operation provides acquire barrier semantics if it succeeds.
196  *
197  * Return: Non-negative allocated bit number if successful, -1 otherwise.
198  */
199 int sbitmap_get(struct sbitmap *sb);
200 
201 /**
202  * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
203  * limiting the depth used from each word.
204  * @sb: Bitmap to allocate from.
205  * @shallow_depth: The maximum number of bits to allocate from a single word.
206  *
207  * This rather specific operation allows for having multiple users with
208  * different allocation limits. E.g., there can be a high-priority class that
209  * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
210  * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
211  * class can only allocate half of the total bits in the bitmap, preventing it
212  * from starving out the high-priority class.
213  *
214  * Return: Non-negative allocated bit number if successful, -1 otherwise.
215  */
216 int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth);
217 
218 /**
219  * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
220  * @sb: Bitmap to check.
221  *
222  * Return: true if any bit in the bitmap is set, false otherwise.
223  */
224 bool sbitmap_any_bit_set(const struct sbitmap *sb);
225 
226 #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
227 #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
228 
229 typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *);
230 
231 /**
232  * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
233  * @start: Where to start the iteration.
234  * @sb: Bitmap to iterate over.
235  * @fn: Callback. Should return true to continue or false to break early.
236  * @data: Pointer to pass to callback.
237  *
238  * This is inline even though it's non-trivial so that the function calls to the
239  * callback will hopefully get optimized away.
240  */
__sbitmap_for_each_set(struct sbitmap * sb,unsigned int start,sb_for_each_fn fn,void * data)241 static inline void __sbitmap_for_each_set(struct sbitmap *sb,
242 					  unsigned int start,
243 					  sb_for_each_fn fn, void *data)
244 {
245 	unsigned int index;
246 	unsigned int nr;
247 	unsigned int scanned = 0;
248 
249 	if (start >= sb->depth)
250 		start = 0;
251 	index = SB_NR_TO_INDEX(sb, start);
252 	nr = SB_NR_TO_BIT(sb, start);
253 
254 	while (scanned < sb->depth) {
255 		unsigned long word;
256 		unsigned int depth = min_t(unsigned int,
257 					   __map_depth(sb, index) - nr,
258 					   sb->depth - scanned);
259 
260 		scanned += depth;
261 		word = sb->map[index].word & ~sb->map[index].cleared;
262 		if (!word)
263 			goto next;
264 
265 		/*
266 		 * On the first iteration of the outer loop, we need to add the
267 		 * bit offset back to the size of the word for find_next_bit().
268 		 * On all other iterations, nr is zero, so this is a noop.
269 		 */
270 		depth += nr;
271 		while (1) {
272 			nr = find_next_bit(&word, depth, nr);
273 			if (nr >= depth)
274 				break;
275 			if (!fn(sb, (index << sb->shift) + nr, data))
276 				return;
277 
278 			nr++;
279 		}
280 next:
281 		nr = 0;
282 		if (++index >= sb->map_nr)
283 			index = 0;
284 	}
285 }
286 
287 /**
288  * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap.
289  * @sb: Bitmap to iterate over.
290  * @fn: Callback. Should return true to continue or false to break early.
291  * @data: Pointer to pass to callback.
292  */
sbitmap_for_each_set(struct sbitmap * sb,sb_for_each_fn fn,void * data)293 static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn,
294 					void *data)
295 {
296 	__sbitmap_for_each_set(sb, 0, fn, data);
297 }
298 
__sbitmap_word(struct sbitmap * sb,unsigned int bitnr)299 static inline unsigned long *__sbitmap_word(struct sbitmap *sb,
300 					    unsigned int bitnr)
301 {
302 	return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word;
303 }
304 
305 /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */
306 
sbitmap_set_bit(struct sbitmap * sb,unsigned int bitnr)307 static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr)
308 {
309 	set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
310 }
311 
sbitmap_clear_bit(struct sbitmap * sb,unsigned int bitnr)312 static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
313 {
314 	clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
315 }
316 
317 /*
318  * This one is special, since it doesn't actually clear the bit, rather it
319  * sets the corresponding bit in the ->cleared mask instead. Paired with
320  * the caller doing sbitmap_deferred_clear() if a given index is full, which
321  * will clear the previously freed entries in the corresponding ->word.
322  */
sbitmap_deferred_clear_bit(struct sbitmap * sb,unsigned int bitnr)323 static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
324 {
325 	unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared;
326 
327 	set_bit(SB_NR_TO_BIT(sb, bitnr), addr);
328 }
329 
330 /*
331  * Pair of sbitmap_get, and this one applies both cleared bit and
332  * allocation hint.
333  */
sbitmap_put(struct sbitmap * sb,unsigned int bitnr)334 static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr)
335 {
336 	sbitmap_deferred_clear_bit(sb, bitnr);
337 
338 	if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth))
339 		*raw_cpu_ptr(sb->alloc_hint) = bitnr;
340 }
341 
sbitmap_test_bit(struct sbitmap * sb,unsigned int bitnr)342 static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr)
343 {
344 	return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr));
345 }
346 
sbitmap_calculate_shift(unsigned int depth)347 static inline int sbitmap_calculate_shift(unsigned int depth)
348 {
349 	int	shift = ilog2(BITS_PER_LONG);
350 
351 	/*
352 	 * If the bitmap is small, shrink the number of bits per word so
353 	 * we spread over a few cachelines, at least. If less than 4
354 	 * bits, just forget about it, it's not going to work optimally
355 	 * anyway.
356 	 */
357 	if (depth >= 4) {
358 		while ((4U << shift) > depth)
359 			shift--;
360 	}
361 
362 	return shift;
363 }
364 
365 /**
366  * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file.
367  * @sb: Bitmap to show.
368  * @m: struct seq_file to write to.
369  *
370  * This is intended for debugging. The format may change at any time.
371  */
372 void sbitmap_show(struct sbitmap *sb, struct seq_file *m);
373 
374 
375 /**
376  * sbitmap_weight() - Return how many set and not cleared bits in a &struct
377  * sbitmap.
378  * @sb: Bitmap to check.
379  *
380  * Return: How many set and not cleared bits set
381  */
382 unsigned int sbitmap_weight(const struct sbitmap *sb);
383 
384 /**
385  * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct
386  * seq_file.
387  * @sb: Bitmap to show.
388  * @m: struct seq_file to write to.
389  *
390  * This is intended for debugging. The output isn't guaranteed to be internally
391  * consistent.
392  */
393 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m);
394 
395 /**
396  * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific
397  * memory node.
398  * @sbq: Bitmap queue to initialize.
399  * @depth: See sbitmap_init_node().
400  * @shift: See sbitmap_init_node().
401  * @round_robin: See sbitmap_get().
402  * @flags: Allocation flags.
403  * @node: Memory node to allocate on.
404  *
405  * Return: Zero on success or negative errno on failure.
406  */
407 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
408 			    int shift, bool round_robin, gfp_t flags, int node);
409 
410 /**
411  * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue.
412  *
413  * @sbq: Bitmap queue to free.
414  */
sbitmap_queue_free(struct sbitmap_queue * sbq)415 static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
416 {
417 	kfree(sbq->ws);
418 	sbitmap_free(&sbq->sb);
419 }
420 
421 /**
422  * sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch
423  * @sbq: Bitmap queue to recalculate wake batch.
424  * @users: Number of shares.
425  *
426  * Like sbitmap_queue_update_wake_batch(), this will calculate wake batch
427  * by depth. This interface is for HCTX shared tags or queue shared tags.
428  */
429 void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
430 					    unsigned int users);
431 
432 /**
433  * sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
434  * @sbq: Bitmap queue to resize.
435  * @depth: New number of bits to resize to.
436  *
437  * Like sbitmap_resize(), this doesn't reallocate anything. It has to do
438  * some extra work on the &struct sbitmap_queue, so it's not safe to just
439  * resize the underlying &struct sbitmap.
440  */
441 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
442 
443 /**
444  * __sbitmap_queue_get() - Try to allocate a free bit from a &struct
445  * sbitmap_queue with preemption already disabled.
446  * @sbq: Bitmap queue to allocate from.
447  *
448  * Return: Non-negative allocated bit number if successful, -1 otherwise.
449  */
450 int __sbitmap_queue_get(struct sbitmap_queue *sbq);
451 
452 /**
453  * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits
454  * @sbq: Bitmap queue to allocate from.
455  * @nr_tags: number of tags requested
456  * @offset: offset to add to returned bits
457  *
458  * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is
459  * a bit in the mask returned, and the caller must add @offset to the value to
460  * get the absolute tag value.
461  */
462 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
463 					unsigned int *offset);
464 
465 /**
466  * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
467  * sbitmap_queue, limiting the depth used from each word, with preemption
468  * already disabled.
469  * @sbq: Bitmap queue to allocate from.
470  * @shallow_depth: The maximum number of bits to allocate from a single word.
471  * See sbitmap_get_shallow().
472  *
473  * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after
474  * initializing @sbq.
475  *
476  * Return: Non-negative allocated bit number if successful, -1 otherwise.
477  */
478 int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
479 			      unsigned int shallow_depth);
480 
481 /**
482  * sbitmap_queue_get() - Try to allocate a free bit from a &struct
483  * sbitmap_queue.
484  * @sbq: Bitmap queue to allocate from.
485  * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
486  *       sbitmap_queue_clear()).
487  *
488  * Return: Non-negative allocated bit number if successful, -1 otherwise.
489  */
sbitmap_queue_get(struct sbitmap_queue * sbq,unsigned int * cpu)490 static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
491 				    unsigned int *cpu)
492 {
493 	int nr;
494 
495 	*cpu = get_cpu();
496 	nr = __sbitmap_queue_get(sbq);
497 	put_cpu();
498 	return nr;
499 }
500 
501 /**
502  * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the
503  * minimum shallow depth that will be used.
504  * @sbq: Bitmap queue in question.
505  * @min_shallow_depth: The minimum shallow depth that will be passed to
506  * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow().
507  *
508  * sbitmap_queue_clear() batches wakeups as an optimization. The batch size
509  * depends on the depth of the bitmap. Since the shallow allocation functions
510  * effectively operate with a different depth, the shallow depth must be taken
511  * into account when calculating the batch size. This function must be called
512  * with the minimum shallow depth that will be used. Failure to do so can result
513  * in missed wakeups.
514  */
515 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
516 				     unsigned int min_shallow_depth);
517 
518 /**
519  * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
520  * &struct sbitmap_queue.
521  * @sbq: Bitmap to free from.
522  * @nr: Bit number to free.
523  * @cpu: CPU the bit was allocated on.
524  */
525 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
526 			 unsigned int cpu);
527 
528 /**
529  * sbitmap_queue_clear_batch() - Free a batch of allocated bits
530  * &struct sbitmap_queue.
531  * @sbq: Bitmap to free from.
532  * @offset: offset for each tag in array
533  * @tags: array of tags
534  * @nr_tags: number of tags in array
535  */
536 void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
537 				int *tags, int nr_tags);
538 
sbq_index_inc(int index)539 static inline int sbq_index_inc(int index)
540 {
541 	return (index + 1) & (SBQ_WAIT_QUEUES - 1);
542 }
543 
sbq_index_atomic_inc(atomic_t * index)544 static inline void sbq_index_atomic_inc(atomic_t *index)
545 {
546 	int old = atomic_read(index);
547 	int new = sbq_index_inc(old);
548 	atomic_cmpxchg(index, old, new);
549 }
550 
551 /**
552  * sbq_wait_ptr() - Get the next wait queue to use for a &struct
553  * sbitmap_queue.
554  * @sbq: Bitmap queue to wait on.
555  * @wait_index: A counter per "user" of @sbq.
556  */
sbq_wait_ptr(struct sbitmap_queue * sbq,atomic_t * wait_index)557 static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq,
558 						  atomic_t *wait_index)
559 {
560 	struct sbq_wait_state *ws;
561 
562 	ws = &sbq->ws[atomic_read(wait_index)];
563 	sbq_index_atomic_inc(wait_index);
564 	return ws;
565 }
566 
567 /**
568  * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct
569  * sbitmap_queue.
570  * @sbq: Bitmap queue to wake up.
571  */
572 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq);
573 
574 /**
575  * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue
576  * on a &struct sbitmap_queue.
577  * @sbq: Bitmap queue to wake up.
578  * @nr: Number of bits cleared.
579  */
580 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr);
581 
582 /**
583  * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct
584  * seq_file.
585  * @sbq: Bitmap queue to show.
586  * @m: struct seq_file to write to.
587  *
588  * This is intended for debugging. The format may change at any time.
589  */
590 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m);
591 
592 struct sbq_wait {
593 	struct sbitmap_queue *sbq;	/* if set, sbq_wait is accounted */
594 	struct wait_queue_entry wait;
595 };
596 
597 #define DEFINE_SBQ_WAIT(name)							\
598 	struct sbq_wait name = {						\
599 		.sbq = NULL,							\
600 		.wait = {							\
601 			.private	= current,				\
602 			.func		= autoremove_wake_function,		\
603 			.entry		= LIST_HEAD_INIT((name).wait.entry),	\
604 		}								\
605 	}
606 
607 /*
608  * Wrapper around prepare_to_wait_exclusive(), which maintains some extra
609  * internal state.
610  */
611 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
612 				struct sbq_wait_state *ws,
613 				struct sbq_wait *sbq_wait, int state);
614 
615 /*
616  * Must be paired with sbitmap_prepare_to_wait().
617  */
618 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
619 				struct sbq_wait *sbq_wait);
620 
621 /*
622  * Wrapper around add_wait_queue(), which maintains some extra internal state
623  */
624 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
625 			    struct sbq_wait_state *ws,
626 			    struct sbq_wait *sbq_wait);
627 
628 /*
629  * Must be paired with sbitmap_add_wait_queue()
630  */
631 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait);
632 
633 #endif /* __LINUX_SCALE_BITMAP_H */
634