1 /* SPDX-License-Identifier: GPL-2.0+ */
2 #ifndef _LINUX_XARRAY_H
3 #define _LINUX_XARRAY_H
4 /*
5 * eXtensible Arrays
6 * Copyright (c) 2017 Microsoft Corporation
7 * Author: Matthew Wilcox <willy@infradead.org>
8 *
9 * See Documentation/core-api/xarray.rst for how to use the XArray.
10 */
11
12 #include <linux/bitmap.h>
13 #include <linux/bug.h>
14 #include <linux/compiler.h>
15 #include <linux/gfp.h>
16 #include <linux/kconfig.h>
17 #include <linux/kernel.h>
18 #include <linux/rcupdate.h>
19 #include <linux/sched/mm.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22
23 /*
24 * The bottom two bits of the entry determine how the XArray interprets
25 * the contents:
26 *
27 * 00: Pointer entry
28 * 10: Internal entry
29 * x1: Value entry or tagged pointer
30 *
31 * Attempting to store internal entries in the XArray is a bug.
32 *
33 * Most internal entries are pointers to the next node in the tree.
34 * The following internal entries have a special meaning:
35 *
36 * 0-62: Sibling entries
37 * 256: Retry entry
38 * 257: Zero entry
39 *
40 * Errors are also represented as internal entries, but use the negative
41 * space (-4094 to -2). They're never stored in the slots array; only
42 * returned by the normal API.
43 */
44
45 #define BITS_PER_XA_VALUE (BITS_PER_LONG - 1)
46
47 /**
48 * xa_mk_value() - Create an XArray entry from an integer.
49 * @v: Value to store in XArray.
50 *
51 * Context: Any context.
52 * Return: An entry suitable for storing in the XArray.
53 */
xa_mk_value(unsigned long v)54 static inline void *xa_mk_value(unsigned long v)
55 {
56 WARN_ON((long)v < 0);
57 return (void *)((v << 1) | 1);
58 }
59
60 /**
61 * xa_to_value() - Get value stored in an XArray entry.
62 * @entry: XArray entry.
63 *
64 * Context: Any context.
65 * Return: The value stored in the XArray entry.
66 */
xa_to_value(const void * entry)67 static inline unsigned long xa_to_value(const void *entry)
68 {
69 return (unsigned long)entry >> 1;
70 }
71
72 /**
73 * xa_is_value() - Determine if an entry is a value.
74 * @entry: XArray entry.
75 *
76 * Context: Any context.
77 * Return: True if the entry is a value, false if it is a pointer.
78 */
xa_is_value(const void * entry)79 static inline bool xa_is_value(const void *entry)
80 {
81 return (unsigned long)entry & 1;
82 }
83
84 /**
85 * xa_tag_pointer() - Create an XArray entry for a tagged pointer.
86 * @p: Plain pointer.
87 * @tag: Tag value (0, 1 or 3).
88 *
89 * If the user of the XArray prefers, they can tag their pointers instead
90 * of storing value entries. Three tags are available (0, 1 and 3).
91 * These are distinct from the xa_mark_t as they are not replicated up
92 * through the array and cannot be searched for.
93 *
94 * Context: Any context.
95 * Return: An XArray entry.
96 */
xa_tag_pointer(void * p,unsigned long tag)97 static inline void *xa_tag_pointer(void *p, unsigned long tag)
98 {
99 return (void *)((unsigned long)p | tag);
100 }
101
102 /**
103 * xa_untag_pointer() - Turn an XArray entry into a plain pointer.
104 * @entry: XArray entry.
105 *
106 * If you have stored a tagged pointer in the XArray, call this function
107 * to get the untagged version of the pointer.
108 *
109 * Context: Any context.
110 * Return: A pointer.
111 */
xa_untag_pointer(void * entry)112 static inline void *xa_untag_pointer(void *entry)
113 {
114 return (void *)((unsigned long)entry & ~3UL);
115 }
116
117 /**
118 * xa_pointer_tag() - Get the tag stored in an XArray entry.
119 * @entry: XArray entry.
120 *
121 * If you have stored a tagged pointer in the XArray, call this function
122 * to get the tag of that pointer.
123 *
124 * Context: Any context.
125 * Return: A tag.
126 */
xa_pointer_tag(void * entry)127 static inline unsigned int xa_pointer_tag(void *entry)
128 {
129 return (unsigned long)entry & 3UL;
130 }
131
132 /*
133 * xa_mk_internal() - Create an internal entry.
134 * @v: Value to turn into an internal entry.
135 *
136 * Internal entries are used for a number of purposes. Entries 0-255 are
137 * used for sibling entries (only 0-62 are used by the current code). 256
138 * is used for the retry entry. 257 is used for the reserved / zero entry.
139 * Negative internal entries are used to represent errnos. Node pointers
140 * are also tagged as internal entries in some situations.
141 *
142 * Context: Any context.
143 * Return: An XArray internal entry corresponding to this value.
144 */
xa_mk_internal(unsigned long v)145 static inline void *xa_mk_internal(unsigned long v)
146 {
147 return (void *)((v << 2) | 2);
148 }
149
150 /*
151 * xa_to_internal() - Extract the value from an internal entry.
152 * @entry: XArray entry.
153 *
154 * Context: Any context.
155 * Return: The value which was stored in the internal entry.
156 */
xa_to_internal(const void * entry)157 static inline unsigned long xa_to_internal(const void *entry)
158 {
159 return (unsigned long)entry >> 2;
160 }
161
162 /*
163 * xa_is_internal() - Is the entry an internal entry?
164 * @entry: XArray entry.
165 *
166 * Context: Any context.
167 * Return: %true if the entry is an internal entry.
168 */
xa_is_internal(const void * entry)169 static inline bool xa_is_internal(const void *entry)
170 {
171 return ((unsigned long)entry & 3) == 2;
172 }
173
174 #define XA_ZERO_ENTRY xa_mk_internal(257)
175
176 /**
177 * xa_is_zero() - Is the entry a zero entry?
178 * @entry: Entry retrieved from the XArray
179 *
180 * The normal API will return NULL as the contents of a slot containing
181 * a zero entry. You can only see zero entries by using the advanced API.
182 *
183 * Return: %true if the entry is a zero entry.
184 */
xa_is_zero(const void * entry)185 static inline bool xa_is_zero(const void *entry)
186 {
187 return unlikely(entry == XA_ZERO_ENTRY);
188 }
189
190 /**
191 * xa_is_err() - Report whether an XArray operation returned an error
192 * @entry: Result from calling an XArray function
193 *
194 * If an XArray operation cannot complete an operation, it will return
195 * a special value indicating an error. This function tells you
196 * whether an error occurred; xa_err() tells you which error occurred.
197 *
198 * Context: Any context.
199 * Return: %true if the entry indicates an error.
200 */
xa_is_err(const void * entry)201 static inline bool xa_is_err(const void *entry)
202 {
203 return unlikely(xa_is_internal(entry) &&
204 entry >= xa_mk_internal(-MAX_ERRNO));
205 }
206
207 /**
208 * xa_err() - Turn an XArray result into an errno.
209 * @entry: Result from calling an XArray function.
210 *
211 * If an XArray operation cannot complete an operation, it will return
212 * a special pointer value which encodes an errno. This function extracts
213 * the errno from the pointer value, or returns 0 if the pointer does not
214 * represent an errno.
215 *
216 * Context: Any context.
217 * Return: A negative errno or 0.
218 */
xa_err(void * entry)219 static inline int xa_err(void *entry)
220 {
221 /* xa_to_internal() would not do sign extension. */
222 if (xa_is_err(entry))
223 return (long)entry >> 2;
224 return 0;
225 }
226
227 /**
228 * struct xa_limit - Represents a range of IDs.
229 * @min: The lowest ID to allocate (inclusive).
230 * @max: The maximum ID to allocate (inclusive).
231 *
232 * This structure is used either directly or via the XA_LIMIT() macro
233 * to communicate the range of IDs that are valid for allocation.
234 * Three common ranges are predefined for you:
235 * * xa_limit_32b - [0 - UINT_MAX]
236 * * xa_limit_31b - [0 - INT_MAX]
237 * * xa_limit_16b - [0 - USHRT_MAX]
238 */
239 struct xa_limit {
240 u32 max;
241 u32 min;
242 };
243
244 #define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max }
245
246 #define xa_limit_32b XA_LIMIT(0, UINT_MAX)
247 #define xa_limit_31b XA_LIMIT(0, INT_MAX)
248 #define xa_limit_16b XA_LIMIT(0, USHRT_MAX)
249
250 typedef unsigned __bitwise xa_mark_t;
251 #define XA_MARK_0 ((__force xa_mark_t)0U)
252 #define XA_MARK_1 ((__force xa_mark_t)1U)
253 #define XA_MARK_2 ((__force xa_mark_t)2U)
254 #define XA_PRESENT ((__force xa_mark_t)8U)
255 #define XA_MARK_MAX XA_MARK_2
256 #define XA_FREE_MARK XA_MARK_0
257
258 enum xa_lock_type {
259 XA_LOCK_IRQ = 1,
260 XA_LOCK_BH = 2,
261 };
262
263 /*
264 * Values for xa_flags. The radix tree stores its GFP flags in the xa_flags,
265 * and we remain compatible with that.
266 */
267 #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
268 #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
269 #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U)
270 #define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U)
271 #define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U)
272 #define XA_FLAGS_ACCOUNT ((__force gfp_t)32U)
273 #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
274 (__force unsigned)(mark)))
275
276 /* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */
277 #define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK))
278 #define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY)
279
280 /**
281 * struct xarray - The anchor of the XArray.
282 * @xa_lock: Lock that protects the contents of the XArray.
283 *
284 * To use the xarray, define it statically or embed it in your data structure.
285 * It is a very small data structure, so it does not usually make sense to
286 * allocate it separately and keep a pointer to it in your data structure.
287 *
288 * You may use the xa_lock to protect your own data structures as well.
289 */
290 /*
291 * If all of the entries in the array are NULL, @xa_head is a NULL pointer.
292 * If the only non-NULL entry in the array is at index 0, @xa_head is that
293 * entry. If any other entry in the array is non-NULL, @xa_head points
294 * to an @xa_node.
295 */
296 struct xarray {
297 spinlock_t xa_lock;
298 /* private: The rest of the data structure is not to be used directly. */
299 gfp_t xa_flags;
300 void __rcu * xa_head;
301 };
302
303 #define XARRAY_INIT(name, flags) { \
304 .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
305 .xa_flags = flags, \
306 .xa_head = NULL, \
307 }
308
309 /**
310 * DEFINE_XARRAY_FLAGS() - Define an XArray with custom flags.
311 * @name: A string that names your XArray.
312 * @flags: XA_FLAG values.
313 *
314 * This is intended for file scope definitions of XArrays. It declares
315 * and initialises an empty XArray with the chosen name and flags. It is
316 * equivalent to calling xa_init_flags() on the array, but it does the
317 * initialisation at compiletime instead of runtime.
318 */
319 #define DEFINE_XARRAY_FLAGS(name, flags) \
320 struct xarray name = XARRAY_INIT(name, flags)
321
322 /**
323 * DEFINE_XARRAY() - Define an XArray.
324 * @name: A string that names your XArray.
325 *
326 * This is intended for file scope definitions of XArrays. It declares
327 * and initialises an empty XArray with the chosen name. It is equivalent
328 * to calling xa_init() on the array, but it does the initialisation at
329 * compiletime instead of runtime.
330 */
331 #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0)
332
333 /**
334 * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0.
335 * @name: A string that names your XArray.
336 *
337 * This is intended for file scope definitions of allocating XArrays.
338 * See also DEFINE_XARRAY().
339 */
340 #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC)
341
342 /**
343 * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1.
344 * @name: A string that names your XArray.
345 *
346 * This is intended for file scope definitions of allocating XArrays.
347 * See also DEFINE_XARRAY().
348 */
349 #define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1)
350
351 void *xa_load(struct xarray *, unsigned long index);
352 void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
353 void *xa_erase(struct xarray *, unsigned long index);
354 void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
355 void *entry, gfp_t);
356 bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
357 void xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
358 void xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
359 void *xa_find(struct xarray *xa, unsigned long *index,
360 unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
361 void *xa_find_after(struct xarray *xa, unsigned long *index,
362 unsigned long max, xa_mark_t) __attribute__((nonnull(2)));
363 unsigned int xa_extract(struct xarray *, void **dst, unsigned long start,
364 unsigned long max, unsigned int n, xa_mark_t);
365 void xa_destroy(struct xarray *);
366
367 /**
368 * xa_init_flags() - Initialise an empty XArray with flags.
369 * @xa: XArray.
370 * @flags: XA_FLAG values.
371 *
372 * If you need to initialise an XArray with special flags (eg you need
373 * to take the lock from interrupt context), use this function instead
374 * of xa_init().
375 *
376 * Context: Any context.
377 */
xa_init_flags(struct xarray * xa,gfp_t flags)378 static inline void xa_init_flags(struct xarray *xa, gfp_t flags)
379 {
380 spin_lock_init(&xa->xa_lock);
381 xa->xa_flags = flags;
382 xa->xa_head = NULL;
383 }
384
385 /**
386 * xa_init() - Initialise an empty XArray.
387 * @xa: XArray.
388 *
389 * An empty XArray is full of NULL entries.
390 *
391 * Context: Any context.
392 */
xa_init(struct xarray * xa)393 static inline void xa_init(struct xarray *xa)
394 {
395 xa_init_flags(xa, 0);
396 }
397
398 /**
399 * xa_empty() - Determine if an array has any present entries.
400 * @xa: XArray.
401 *
402 * Context: Any context.
403 * Return: %true if the array contains only NULL pointers.
404 */
xa_empty(const struct xarray * xa)405 static inline bool xa_empty(const struct xarray *xa)
406 {
407 return xa->xa_head == NULL;
408 }
409
410 /**
411 * xa_marked() - Inquire whether any entry in this array has a mark set
412 * @xa: Array
413 * @mark: Mark value
414 *
415 * Context: Any context.
416 * Return: %true if any entry has this mark set.
417 */
xa_marked(const struct xarray * xa,xa_mark_t mark)418 static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
419 {
420 return xa->xa_flags & XA_FLAGS_MARK(mark);
421 }
422
423 /**
424 * xa_for_each_range() - Iterate over a portion of an XArray.
425 * @xa: XArray.
426 * @index: Index of @entry.
427 * @entry: Entry retrieved from array.
428 * @start: First index to retrieve from array.
429 * @last: Last index to retrieve from array.
430 *
431 * During the iteration, @entry will have the value of the entry stored
432 * in @xa at @index. You may modify @index during the iteration if you
433 * want to skip or reprocess indices. It is safe to modify the array
434 * during the iteration. At the end of the iteration, @entry will be set
435 * to NULL and @index will have a value less than or equal to max.
436 *
437 * xa_for_each_range() is O(n.log(n)) while xas_for_each() is O(n). You have
438 * to handle your own locking with xas_for_each(), and if you have to unlock
439 * after each iteration, it will also end up being O(n.log(n)).
440 * xa_for_each_range() will spin if it hits a retry entry; if you intend to
441 * see retry entries, you should use the xas_for_each() iterator instead.
442 * The xas_for_each() iterator will expand into more inline code than
443 * xa_for_each_range().
444 *
445 * Context: Any context. Takes and releases the RCU lock.
446 */
447 #define xa_for_each_range(xa, index, entry, start, last) \
448 for (index = start, \
449 entry = xa_find(xa, &index, last, XA_PRESENT); \
450 entry; \
451 entry = xa_find_after(xa, &index, last, XA_PRESENT))
452
453 /**
454 * xa_for_each_start() - Iterate over a portion of an XArray.
455 * @xa: XArray.
456 * @index: Index of @entry.
457 * @entry: Entry retrieved from array.
458 * @start: First index to retrieve from array.
459 *
460 * During the iteration, @entry will have the value of the entry stored
461 * in @xa at @index. You may modify @index during the iteration if you
462 * want to skip or reprocess indices. It is safe to modify the array
463 * during the iteration. At the end of the iteration, @entry will be set
464 * to NULL and @index will have a value less than or equal to max.
465 *
466 * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have
467 * to handle your own locking with xas_for_each(), and if you have to unlock
468 * after each iteration, it will also end up being O(n.log(n)).
469 * xa_for_each_start() will spin if it hits a retry entry; if you intend to
470 * see retry entries, you should use the xas_for_each() iterator instead.
471 * The xas_for_each() iterator will expand into more inline code than
472 * xa_for_each_start().
473 *
474 * Context: Any context. Takes and releases the RCU lock.
475 */
476 #define xa_for_each_start(xa, index, entry, start) \
477 xa_for_each_range(xa, index, entry, start, ULONG_MAX)
478
479 /**
480 * xa_for_each() - Iterate over present entries in an XArray.
481 * @xa: XArray.
482 * @index: Index of @entry.
483 * @entry: Entry retrieved from array.
484 *
485 * During the iteration, @entry will have the value of the entry stored
486 * in @xa at @index. You may modify @index during the iteration if you want
487 * to skip or reprocess indices. It is safe to modify the array during the
488 * iteration. At the end of the iteration, @entry will be set to NULL and
489 * @index will have a value less than or equal to max.
490 *
491 * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have
492 * to handle your own locking with xas_for_each(), and if you have to unlock
493 * after each iteration, it will also end up being O(n.log(n)). xa_for_each()
494 * will spin if it hits a retry entry; if you intend to see retry entries,
495 * you should use the xas_for_each() iterator instead. The xas_for_each()
496 * iterator will expand into more inline code than xa_for_each().
497 *
498 * Context: Any context. Takes and releases the RCU lock.
499 */
500 #define xa_for_each(xa, index, entry) \
501 xa_for_each_start(xa, index, entry, 0)
502
503 /**
504 * xa_for_each_marked() - Iterate over marked entries in an XArray.
505 * @xa: XArray.
506 * @index: Index of @entry.
507 * @entry: Entry retrieved from array.
508 * @filter: Selection criterion.
509 *
510 * During the iteration, @entry will have the value of the entry stored
511 * in @xa at @index. The iteration will skip all entries in the array
512 * which do not match @filter. You may modify @index during the iteration
513 * if you want to skip or reprocess indices. It is safe to modify the array
514 * during the iteration. At the end of the iteration, @entry will be set to
515 * NULL and @index will have a value less than or equal to max.
516 *
517 * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n).
518 * You have to handle your own locking with xas_for_each(), and if you have
519 * to unlock after each iteration, it will also end up being O(n.log(n)).
520 * xa_for_each_marked() will spin if it hits a retry entry; if you intend to
521 * see retry entries, you should use the xas_for_each_marked() iterator
522 * instead. The xas_for_each_marked() iterator will expand into more inline
523 * code than xa_for_each_marked().
524 *
525 * Context: Any context. Takes and releases the RCU lock.
526 */
527 #define xa_for_each_marked(xa, index, entry, filter) \
528 for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \
529 entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter))
530
531 #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
532 #define xa_lock(xa) spin_lock(&(xa)->xa_lock)
533 #define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
534 #define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock)
535 #define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock)
536 #define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock)
537 #define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock)
538 #define xa_lock_irqsave(xa, flags) \
539 spin_lock_irqsave(&(xa)->xa_lock, flags)
540 #define xa_unlock_irqrestore(xa, flags) \
541 spin_unlock_irqrestore(&(xa)->xa_lock, flags)
542 #define xa_lock_nested(xa, subclass) \
543 spin_lock_nested(&(xa)->xa_lock, subclass)
544 #define xa_lock_bh_nested(xa, subclass) \
545 spin_lock_bh_nested(&(xa)->xa_lock, subclass)
546 #define xa_lock_irq_nested(xa, subclass) \
547 spin_lock_irq_nested(&(xa)->xa_lock, subclass)
548 #define xa_lock_irqsave_nested(xa, flags, subclass) \
549 spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
550
551 /*
552 * Versions of the normal API which require the caller to hold the
553 * xa_lock. If the GFP flags allow it, they will drop the lock to
554 * allocate memory, then reacquire it afterwards. These functions
555 * may also re-enable interrupts if the XArray flags indicate the
556 * locking should be interrupt safe.
557 */
558 void *__xa_erase(struct xarray *, unsigned long index);
559 void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
560 void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
561 void *entry, gfp_t);
562 int __must_check __xa_insert(struct xarray *, unsigned long index,
563 void *entry, gfp_t);
564 int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry,
565 struct xa_limit, gfp_t);
566 int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry,
567 struct xa_limit, u32 *next, gfp_t);
568 void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
569 void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
570
571 /**
572 * xa_store_bh() - Store this entry in the XArray.
573 * @xa: XArray.
574 * @index: Index into array.
575 * @entry: New entry.
576 * @gfp: Memory allocation flags.
577 *
578 * This function is like calling xa_store() except it disables softirqs
579 * while holding the array lock.
580 *
581 * Context: Any context. Takes and releases the xa_lock while
582 * disabling softirqs.
583 * Return: The old entry at this index or xa_err() if an error happened.
584 */
xa_store_bh(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)585 static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
586 void *entry, gfp_t gfp)
587 {
588 void *curr;
589
590 might_alloc(gfp);
591 xa_lock_bh(xa);
592 curr = __xa_store(xa, index, entry, gfp);
593 xa_unlock_bh(xa);
594
595 return curr;
596 }
597
598 /**
599 * xa_store_irq() - Store this entry in the XArray.
600 * @xa: XArray.
601 * @index: Index into array.
602 * @entry: New entry.
603 * @gfp: Memory allocation flags.
604 *
605 * This function is like calling xa_store() except it disables interrupts
606 * while holding the array lock.
607 *
608 * Context: Process context. Takes and releases the xa_lock while
609 * disabling interrupts.
610 * Return: The old entry at this index or xa_err() if an error happened.
611 */
xa_store_irq(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)612 static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
613 void *entry, gfp_t gfp)
614 {
615 void *curr;
616
617 might_alloc(gfp);
618 xa_lock_irq(xa);
619 curr = __xa_store(xa, index, entry, gfp);
620 xa_unlock_irq(xa);
621
622 return curr;
623 }
624
625 /**
626 * xa_erase_bh() - Erase this entry from the XArray.
627 * @xa: XArray.
628 * @index: Index of entry.
629 *
630 * After this function returns, loading from @index will return %NULL.
631 * If the index is part of a multi-index entry, all indices will be erased
632 * and none of the entries will be part of a multi-index entry.
633 *
634 * Context: Any context. Takes and releases the xa_lock while
635 * disabling softirqs.
636 * Return: The entry which used to be at this index.
637 */
xa_erase_bh(struct xarray * xa,unsigned long index)638 static inline void *xa_erase_bh(struct xarray *xa, unsigned long index)
639 {
640 void *entry;
641
642 xa_lock_bh(xa);
643 entry = __xa_erase(xa, index);
644 xa_unlock_bh(xa);
645
646 return entry;
647 }
648
649 /**
650 * xa_erase_irq() - Erase this entry from the XArray.
651 * @xa: XArray.
652 * @index: Index of entry.
653 *
654 * After this function returns, loading from @index will return %NULL.
655 * If the index is part of a multi-index entry, all indices will be erased
656 * and none of the entries will be part of a multi-index entry.
657 *
658 * Context: Process context. Takes and releases the xa_lock while
659 * disabling interrupts.
660 * Return: The entry which used to be at this index.
661 */
xa_erase_irq(struct xarray * xa,unsigned long index)662 static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
663 {
664 void *entry;
665
666 xa_lock_irq(xa);
667 entry = __xa_erase(xa, index);
668 xa_unlock_irq(xa);
669
670 return entry;
671 }
672
673 /**
674 * xa_cmpxchg() - Conditionally replace an entry in the XArray.
675 * @xa: XArray.
676 * @index: Index into array.
677 * @old: Old value to test against.
678 * @entry: New value to place in array.
679 * @gfp: Memory allocation flags.
680 *
681 * If the entry at @index is the same as @old, replace it with @entry.
682 * If the return value is equal to @old, then the exchange was successful.
683 *
684 * Context: Any context. Takes and releases the xa_lock. May sleep
685 * if the @gfp flags permit.
686 * Return: The old value at this index or xa_err() if an error happened.
687 */
xa_cmpxchg(struct xarray * xa,unsigned long index,void * old,void * entry,gfp_t gfp)688 static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
689 void *old, void *entry, gfp_t gfp)
690 {
691 void *curr;
692
693 might_alloc(gfp);
694 xa_lock(xa);
695 curr = __xa_cmpxchg(xa, index, old, entry, gfp);
696 xa_unlock(xa);
697
698 return curr;
699 }
700
701 /**
702 * xa_cmpxchg_bh() - Conditionally replace an entry in the XArray.
703 * @xa: XArray.
704 * @index: Index into array.
705 * @old: Old value to test against.
706 * @entry: New value to place in array.
707 * @gfp: Memory allocation flags.
708 *
709 * This function is like calling xa_cmpxchg() except it disables softirqs
710 * while holding the array lock.
711 *
712 * Context: Any context. Takes and releases the xa_lock while
713 * disabling softirqs. May sleep if the @gfp flags permit.
714 * Return: The old value at this index or xa_err() if an error happened.
715 */
xa_cmpxchg_bh(struct xarray * xa,unsigned long index,void * old,void * entry,gfp_t gfp)716 static inline void *xa_cmpxchg_bh(struct xarray *xa, unsigned long index,
717 void *old, void *entry, gfp_t gfp)
718 {
719 void *curr;
720
721 might_alloc(gfp);
722 xa_lock_bh(xa);
723 curr = __xa_cmpxchg(xa, index, old, entry, gfp);
724 xa_unlock_bh(xa);
725
726 return curr;
727 }
728
729 /**
730 * xa_cmpxchg_irq() - Conditionally replace an entry in the XArray.
731 * @xa: XArray.
732 * @index: Index into array.
733 * @old: Old value to test against.
734 * @entry: New value to place in array.
735 * @gfp: Memory allocation flags.
736 *
737 * This function is like calling xa_cmpxchg() except it disables interrupts
738 * while holding the array lock.
739 *
740 * Context: Process context. Takes and releases the xa_lock while
741 * disabling interrupts. May sleep if the @gfp flags permit.
742 * Return: The old value at this index or xa_err() if an error happened.
743 */
xa_cmpxchg_irq(struct xarray * xa,unsigned long index,void * old,void * entry,gfp_t gfp)744 static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index,
745 void *old, void *entry, gfp_t gfp)
746 {
747 void *curr;
748
749 might_alloc(gfp);
750 xa_lock_irq(xa);
751 curr = __xa_cmpxchg(xa, index, old, entry, gfp);
752 xa_unlock_irq(xa);
753
754 return curr;
755 }
756
757 /**
758 * xa_insert() - Store this entry in the XArray unless another entry is
759 * already present.
760 * @xa: XArray.
761 * @index: Index into array.
762 * @entry: New entry.
763 * @gfp: Memory allocation flags.
764 *
765 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
766 * if no entry is present. Inserting will fail if a reserved entry is
767 * present, even though loading from this index will return NULL.
768 *
769 * Context: Any context. Takes and releases the xa_lock. May sleep if
770 * the @gfp flags permit.
771 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
772 * -ENOMEM if memory could not be allocated.
773 */
xa_insert(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)774 static inline int __must_check xa_insert(struct xarray *xa,
775 unsigned long index, void *entry, gfp_t gfp)
776 {
777 int err;
778
779 might_alloc(gfp);
780 xa_lock(xa);
781 err = __xa_insert(xa, index, entry, gfp);
782 xa_unlock(xa);
783
784 return err;
785 }
786
787 /**
788 * xa_insert_bh() - Store this entry in the XArray unless another entry is
789 * already present.
790 * @xa: XArray.
791 * @index: Index into array.
792 * @entry: New entry.
793 * @gfp: Memory allocation flags.
794 *
795 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
796 * if no entry is present. Inserting will fail if a reserved entry is
797 * present, even though loading from this index will return NULL.
798 *
799 * Context: Any context. Takes and releases the xa_lock while
800 * disabling softirqs. May sleep if the @gfp flags permit.
801 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
802 * -ENOMEM if memory could not be allocated.
803 */
xa_insert_bh(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)804 static inline int __must_check xa_insert_bh(struct xarray *xa,
805 unsigned long index, void *entry, gfp_t gfp)
806 {
807 int err;
808
809 might_alloc(gfp);
810 xa_lock_bh(xa);
811 err = __xa_insert(xa, index, entry, gfp);
812 xa_unlock_bh(xa);
813
814 return err;
815 }
816
817 /**
818 * xa_insert_irq() - Store this entry in the XArray unless another entry is
819 * already present.
820 * @xa: XArray.
821 * @index: Index into array.
822 * @entry: New entry.
823 * @gfp: Memory allocation flags.
824 *
825 * Inserting a NULL entry will store a reserved entry (like xa_reserve())
826 * if no entry is present. Inserting will fail if a reserved entry is
827 * present, even though loading from this index will return NULL.
828 *
829 * Context: Process context. Takes and releases the xa_lock while
830 * disabling interrupts. May sleep if the @gfp flags permit.
831 * Return: 0 if the store succeeded. -EBUSY if another entry was present.
832 * -ENOMEM if memory could not be allocated.
833 */
xa_insert_irq(struct xarray * xa,unsigned long index,void * entry,gfp_t gfp)834 static inline int __must_check xa_insert_irq(struct xarray *xa,
835 unsigned long index, void *entry, gfp_t gfp)
836 {
837 int err;
838
839 might_alloc(gfp);
840 xa_lock_irq(xa);
841 err = __xa_insert(xa, index, entry, gfp);
842 xa_unlock_irq(xa);
843
844 return err;
845 }
846
847 /**
848 * xa_alloc() - Find somewhere to store this entry in the XArray.
849 * @xa: XArray.
850 * @id: Pointer to ID.
851 * @entry: New entry.
852 * @limit: Range of ID to allocate.
853 * @gfp: Memory allocation flags.
854 *
855 * Finds an empty entry in @xa between @limit.min and @limit.max,
856 * stores the index into the @id pointer, then stores the entry at
857 * that index. A concurrent lookup will not see an uninitialised @id.
858 *
859 * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
860 * in xa_init_flags().
861 *
862 * Context: Any context. Takes and releases the xa_lock. May sleep if
863 * the @gfp flags permit.
864 * Return: 0 on success, -ENOMEM if memory could not be allocated or
865 * -EBUSY if there are no free entries in @limit.
866 */
xa_alloc(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,gfp_t gfp)867 static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
868 void *entry, struct xa_limit limit, gfp_t gfp)
869 {
870 int err;
871
872 might_alloc(gfp);
873 xa_lock(xa);
874 err = __xa_alloc(xa, id, entry, limit, gfp);
875 xa_unlock(xa);
876
877 return err;
878 }
879
880 /**
881 * xa_alloc_bh() - Find somewhere to store this entry in the XArray.
882 * @xa: XArray.
883 * @id: Pointer to ID.
884 * @entry: New entry.
885 * @limit: Range of ID to allocate.
886 * @gfp: Memory allocation flags.
887 *
888 * Finds an empty entry in @xa between @limit.min and @limit.max,
889 * stores the index into the @id pointer, then stores the entry at
890 * that index. A concurrent lookup will not see an uninitialised @id.
891 *
892 * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
893 * in xa_init_flags().
894 *
895 * Context: Any context. Takes and releases the xa_lock while
896 * disabling softirqs. May sleep if the @gfp flags permit.
897 * Return: 0 on success, -ENOMEM if memory could not be allocated or
898 * -EBUSY if there are no free entries in @limit.
899 */
xa_alloc_bh(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,gfp_t gfp)900 static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
901 void *entry, struct xa_limit limit, gfp_t gfp)
902 {
903 int err;
904
905 might_alloc(gfp);
906 xa_lock_bh(xa);
907 err = __xa_alloc(xa, id, entry, limit, gfp);
908 xa_unlock_bh(xa);
909
910 return err;
911 }
912
913 /**
914 * xa_alloc_irq() - Find somewhere to store this entry in the XArray.
915 * @xa: XArray.
916 * @id: Pointer to ID.
917 * @entry: New entry.
918 * @limit: Range of ID to allocate.
919 * @gfp: Memory allocation flags.
920 *
921 * Finds an empty entry in @xa between @limit.min and @limit.max,
922 * stores the index into the @id pointer, then stores the entry at
923 * that index. A concurrent lookup will not see an uninitialised @id.
924 *
925 * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
926 * in xa_init_flags().
927 *
928 * Context: Process context. Takes and releases the xa_lock while
929 * disabling interrupts. May sleep if the @gfp flags permit.
930 * Return: 0 on success, -ENOMEM if memory could not be allocated or
931 * -EBUSY if there are no free entries in @limit.
932 */
xa_alloc_irq(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,gfp_t gfp)933 static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
934 void *entry, struct xa_limit limit, gfp_t gfp)
935 {
936 int err;
937
938 might_alloc(gfp);
939 xa_lock_irq(xa);
940 err = __xa_alloc(xa, id, entry, limit, gfp);
941 xa_unlock_irq(xa);
942
943 return err;
944 }
945
946 /**
947 * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
948 * @xa: XArray.
949 * @id: Pointer to ID.
950 * @entry: New entry.
951 * @limit: Range of allocated ID.
952 * @next: Pointer to next ID to allocate.
953 * @gfp: Memory allocation flags.
954 *
955 * Finds an empty entry in @xa between @limit.min and @limit.max,
956 * stores the index into the @id pointer, then stores the entry at
957 * that index. A concurrent lookup will not see an uninitialised @id.
958 * The search for an empty entry will start at @next and will wrap
959 * around if necessary.
960 *
961 * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
962 * in xa_init_flags().
963 *
964 * Context: Any context. Takes and releases the xa_lock. May sleep if
965 * the @gfp flags permit.
966 * Return: 0 if the allocation succeeded without wrapping. 1 if the
967 * allocation succeeded after wrapping, -ENOMEM if memory could not be
968 * allocated or -EBUSY if there are no free entries in @limit.
969 */
xa_alloc_cyclic(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,u32 * next,gfp_t gfp)970 static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
971 struct xa_limit limit, u32 *next, gfp_t gfp)
972 {
973 int err;
974
975 might_alloc(gfp);
976 xa_lock(xa);
977 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
978 xa_unlock(xa);
979
980 return err;
981 }
982
983 /**
984 * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray.
985 * @xa: XArray.
986 * @id: Pointer to ID.
987 * @entry: New entry.
988 * @limit: Range of allocated ID.
989 * @next: Pointer to next ID to allocate.
990 * @gfp: Memory allocation flags.
991 *
992 * Finds an empty entry in @xa between @limit.min and @limit.max,
993 * stores the index into the @id pointer, then stores the entry at
994 * that index. A concurrent lookup will not see an uninitialised @id.
995 * The search for an empty entry will start at @next and will wrap
996 * around if necessary.
997 *
998 * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
999 * in xa_init_flags().
1000 *
1001 * Context: Any context. Takes and releases the xa_lock while
1002 * disabling softirqs. May sleep if the @gfp flags permit.
1003 * Return: 0 if the allocation succeeded without wrapping. 1 if the
1004 * allocation succeeded after wrapping, -ENOMEM if memory could not be
1005 * allocated or -EBUSY if there are no free entries in @limit.
1006 */
xa_alloc_cyclic_bh(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,u32 * next,gfp_t gfp)1007 static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
1008 struct xa_limit limit, u32 *next, gfp_t gfp)
1009 {
1010 int err;
1011
1012 might_alloc(gfp);
1013 xa_lock_bh(xa);
1014 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
1015 xa_unlock_bh(xa);
1016
1017 return err;
1018 }
1019
1020 /**
1021 * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray.
1022 * @xa: XArray.
1023 * @id: Pointer to ID.
1024 * @entry: New entry.
1025 * @limit: Range of allocated ID.
1026 * @next: Pointer to next ID to allocate.
1027 * @gfp: Memory allocation flags.
1028 *
1029 * Finds an empty entry in @xa between @limit.min and @limit.max,
1030 * stores the index into the @id pointer, then stores the entry at
1031 * that index. A concurrent lookup will not see an uninitialised @id.
1032 * The search for an empty entry will start at @next and will wrap
1033 * around if necessary.
1034 *
1035 * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
1036 * in xa_init_flags().
1037 *
1038 * Context: Process context. Takes and releases the xa_lock while
1039 * disabling interrupts. May sleep if the @gfp flags permit.
1040 * Return: 0 if the allocation succeeded without wrapping. 1 if the
1041 * allocation succeeded after wrapping, -ENOMEM if memory could not be
1042 * allocated or -EBUSY if there are no free entries in @limit.
1043 */
xa_alloc_cyclic_irq(struct xarray * xa,u32 * id,void * entry,struct xa_limit limit,u32 * next,gfp_t gfp)1044 static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry,
1045 struct xa_limit limit, u32 *next, gfp_t gfp)
1046 {
1047 int err;
1048
1049 might_alloc(gfp);
1050 xa_lock_irq(xa);
1051 err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp);
1052 xa_unlock_irq(xa);
1053
1054 return err;
1055 }
1056
1057 /**
1058 * xa_reserve() - Reserve this index in the XArray.
1059 * @xa: XArray.
1060 * @index: Index into array.
1061 * @gfp: Memory allocation flags.
1062 *
1063 * Ensures there is somewhere to store an entry at @index in the array.
1064 * If there is already something stored at @index, this function does
1065 * nothing. If there was nothing there, the entry is marked as reserved.
1066 * Loading from a reserved entry returns a %NULL pointer.
1067 *
1068 * If you do not use the entry that you have reserved, call xa_release()
1069 * or xa_erase() to free any unnecessary memory.
1070 *
1071 * Context: Any context. Takes and releases the xa_lock.
1072 * May sleep if the @gfp flags permit.
1073 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1074 */
1075 static inline __must_check
xa_reserve(struct xarray * xa,unsigned long index,gfp_t gfp)1076 int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
1077 {
1078 return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp));
1079 }
1080
1081 /**
1082 * xa_reserve_bh() - Reserve this index in the XArray.
1083 * @xa: XArray.
1084 * @index: Index into array.
1085 * @gfp: Memory allocation flags.
1086 *
1087 * A softirq-disabling version of xa_reserve().
1088 *
1089 * Context: Any context. Takes and releases the xa_lock while
1090 * disabling softirqs.
1091 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1092 */
1093 static inline __must_check
xa_reserve_bh(struct xarray * xa,unsigned long index,gfp_t gfp)1094 int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
1095 {
1096 return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp));
1097 }
1098
1099 /**
1100 * xa_reserve_irq() - Reserve this index in the XArray.
1101 * @xa: XArray.
1102 * @index: Index into array.
1103 * @gfp: Memory allocation flags.
1104 *
1105 * An interrupt-disabling version of xa_reserve().
1106 *
1107 * Context: Process context. Takes and releases the xa_lock while
1108 * disabling interrupts.
1109 * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
1110 */
1111 static inline __must_check
xa_reserve_irq(struct xarray * xa,unsigned long index,gfp_t gfp)1112 int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
1113 {
1114 return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp));
1115 }
1116
1117 /**
1118 * xa_release() - Release a reserved entry.
1119 * @xa: XArray.
1120 * @index: Index of entry.
1121 *
1122 * After calling xa_reserve(), you can call this function to release the
1123 * reservation. If the entry at @index has been stored to, this function
1124 * will do nothing.
1125 */
xa_release(struct xarray * xa,unsigned long index)1126 static inline void xa_release(struct xarray *xa, unsigned long index)
1127 {
1128 xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0);
1129 }
1130
1131 /* Everything below here is the Advanced API. Proceed with caution. */
1132
1133 /*
1134 * The xarray is constructed out of a set of 'chunks' of pointers. Choosing
1135 * the best chunk size requires some tradeoffs. A power of two recommends
1136 * itself so that we can walk the tree based purely on shifts and masks.
1137 * Generally, the larger the better; as the number of slots per level of the
1138 * tree increases, the less tall the tree needs to be. But that needs to be
1139 * balanced against the memory consumption of each node. On a 64-bit system,
1140 * xa_node is currently 576 bytes, and we get 7 of them per 4kB page. If we
1141 * doubled the number of slots per node, we'd get only 3 nodes per 4kB page.
1142 */
1143 #ifndef XA_CHUNK_SHIFT
1144 #define XA_CHUNK_SHIFT (CONFIG_BASE_SMALL ? 4 : 6)
1145 #endif
1146 #define XA_CHUNK_SIZE (1UL << XA_CHUNK_SHIFT)
1147 #define XA_CHUNK_MASK (XA_CHUNK_SIZE - 1)
1148 #define XA_MAX_MARKS 3
1149 #define XA_MARK_LONGS DIV_ROUND_UP(XA_CHUNK_SIZE, BITS_PER_LONG)
1150
1151 /*
1152 * @count is the count of every non-NULL element in the ->slots array
1153 * whether that is a value entry, a retry entry, a user pointer,
1154 * a sibling entry or a pointer to the next level of the tree.
1155 * @nr_values is the count of every element in ->slots which is
1156 * either a value entry or a sibling of a value entry.
1157 */
1158 struct xa_node {
1159 unsigned char shift; /* Bits remaining in each slot */
1160 unsigned char offset; /* Slot offset in parent */
1161 unsigned char count; /* Total entry count */
1162 unsigned char nr_values; /* Value entry count */
1163 struct xa_node __rcu *parent; /* NULL at top of tree */
1164 struct xarray *array; /* The array we belong to */
1165 union {
1166 struct list_head private_list; /* For tree user */
1167 struct rcu_head rcu_head; /* Used when freeing node */
1168 };
1169 void __rcu *slots[XA_CHUNK_SIZE];
1170 union {
1171 unsigned long tags[XA_MAX_MARKS][XA_MARK_LONGS];
1172 unsigned long marks[XA_MAX_MARKS][XA_MARK_LONGS];
1173 };
1174 };
1175
1176 void xa_dump(const struct xarray *);
1177 void xa_dump_node(const struct xa_node *);
1178
1179 #ifdef XA_DEBUG
1180 #define XA_BUG_ON(xa, x) do { \
1181 if (x) { \
1182 xa_dump(xa); \
1183 BUG(); \
1184 } \
1185 } while (0)
1186 #define XA_NODE_BUG_ON(node, x) do { \
1187 if (x) { \
1188 if (node) xa_dump_node(node); \
1189 BUG(); \
1190 } \
1191 } while (0)
1192 #else
1193 #define XA_BUG_ON(xa, x) do { } while (0)
1194 #define XA_NODE_BUG_ON(node, x) do { } while (0)
1195 #endif
1196
1197 /* Private */
xa_head(const struct xarray * xa)1198 static inline void *xa_head(const struct xarray *xa)
1199 {
1200 return rcu_dereference_check(xa->xa_head,
1201 lockdep_is_held(&xa->xa_lock));
1202 }
1203
1204 /* Private */
xa_head_locked(const struct xarray * xa)1205 static inline void *xa_head_locked(const struct xarray *xa)
1206 {
1207 return rcu_dereference_protected(xa->xa_head,
1208 lockdep_is_held(&xa->xa_lock));
1209 }
1210
1211 /* Private */
xa_entry(const struct xarray * xa,const struct xa_node * node,unsigned int offset)1212 static inline void *xa_entry(const struct xarray *xa,
1213 const struct xa_node *node, unsigned int offset)
1214 {
1215 XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
1216 return rcu_dereference_check(node->slots[offset],
1217 lockdep_is_held(&xa->xa_lock));
1218 }
1219
1220 /* Private */
xa_entry_locked(const struct xarray * xa,const struct xa_node * node,unsigned int offset)1221 static inline void *xa_entry_locked(const struct xarray *xa,
1222 const struct xa_node *node, unsigned int offset)
1223 {
1224 XA_NODE_BUG_ON(node, offset >= XA_CHUNK_SIZE);
1225 return rcu_dereference_protected(node->slots[offset],
1226 lockdep_is_held(&xa->xa_lock));
1227 }
1228
1229 /* Private */
xa_parent(const struct xarray * xa,const struct xa_node * node)1230 static inline struct xa_node *xa_parent(const struct xarray *xa,
1231 const struct xa_node *node)
1232 {
1233 return rcu_dereference_check(node->parent,
1234 lockdep_is_held(&xa->xa_lock));
1235 }
1236
1237 /* Private */
xa_parent_locked(const struct xarray * xa,const struct xa_node * node)1238 static inline struct xa_node *xa_parent_locked(const struct xarray *xa,
1239 const struct xa_node *node)
1240 {
1241 return rcu_dereference_protected(node->parent,
1242 lockdep_is_held(&xa->xa_lock));
1243 }
1244
1245 /* Private */
xa_mk_node(const struct xa_node * node)1246 static inline void *xa_mk_node(const struct xa_node *node)
1247 {
1248 return (void *)((unsigned long)node | 2);
1249 }
1250
1251 /* Private */
xa_to_node(const void * entry)1252 static inline struct xa_node *xa_to_node(const void *entry)
1253 {
1254 return (struct xa_node *)((unsigned long)entry - 2);
1255 }
1256
1257 /* Private */
xa_is_node(const void * entry)1258 static inline bool xa_is_node(const void *entry)
1259 {
1260 return xa_is_internal(entry) && (unsigned long)entry > 4096;
1261 }
1262
1263 /* Private */
xa_mk_sibling(unsigned int offset)1264 static inline void *xa_mk_sibling(unsigned int offset)
1265 {
1266 return xa_mk_internal(offset);
1267 }
1268
1269 /* Private */
xa_to_sibling(const void * entry)1270 static inline unsigned long xa_to_sibling(const void *entry)
1271 {
1272 return xa_to_internal(entry);
1273 }
1274
1275 /**
1276 * xa_is_sibling() - Is the entry a sibling entry?
1277 * @entry: Entry retrieved from the XArray
1278 *
1279 * Return: %true if the entry is a sibling entry.
1280 */
xa_is_sibling(const void * entry)1281 static inline bool xa_is_sibling(const void *entry)
1282 {
1283 return IS_ENABLED(CONFIG_XARRAY_MULTI) && xa_is_internal(entry) &&
1284 (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1));
1285 }
1286
1287 #define XA_RETRY_ENTRY xa_mk_internal(256)
1288
1289 /**
1290 * xa_is_retry() - Is the entry a retry entry?
1291 * @entry: Entry retrieved from the XArray
1292 *
1293 * Return: %true if the entry is a retry entry.
1294 */
xa_is_retry(const void * entry)1295 static inline bool xa_is_retry(const void *entry)
1296 {
1297 return unlikely(entry == XA_RETRY_ENTRY);
1298 }
1299
1300 /**
1301 * xa_is_advanced() - Is the entry only permitted for the advanced API?
1302 * @entry: Entry to be stored in the XArray.
1303 *
1304 * Return: %true if the entry cannot be stored by the normal API.
1305 */
xa_is_advanced(const void * entry)1306 static inline bool xa_is_advanced(const void *entry)
1307 {
1308 return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY);
1309 }
1310
1311 /**
1312 * typedef xa_update_node_t - A callback function from the XArray.
1313 * @node: The node which is being processed
1314 *
1315 * This function is called every time the XArray updates the count of
1316 * present and value entries in a node. It allows advanced users to
1317 * maintain the private_list in the node.
1318 *
1319 * Context: The xa_lock is held and interrupts may be disabled.
1320 * Implementations should not drop the xa_lock, nor re-enable
1321 * interrupts.
1322 */
1323 typedef void (*xa_update_node_t)(struct xa_node *node);
1324
1325 void xa_delete_node(struct xa_node *, xa_update_node_t);
1326
1327 /*
1328 * The xa_state is opaque to its users. It contains various different pieces
1329 * of state involved in the current operation on the XArray. It should be
1330 * declared on the stack and passed between the various internal routines.
1331 * The various elements in it should not be accessed directly, but only
1332 * through the provided accessor functions. The below documentation is for
1333 * the benefit of those working on the code, not for users of the XArray.
1334 *
1335 * @xa_node usually points to the xa_node containing the slot we're operating
1336 * on (and @xa_offset is the offset in the slots array). If there is a
1337 * single entry in the array at index 0, there are no allocated xa_nodes to
1338 * point to, and so we store %NULL in @xa_node. @xa_node is set to
1339 * the value %XAS_RESTART if the xa_state is not walked to the correct
1340 * position in the tree of nodes for this operation. If an error occurs
1341 * during an operation, it is set to an %XAS_ERROR value. If we run off the
1342 * end of the allocated nodes, it is set to %XAS_BOUNDS.
1343 */
1344 struct xa_state {
1345 struct xarray *xa;
1346 unsigned long xa_index;
1347 unsigned char xa_shift;
1348 unsigned char xa_sibs;
1349 unsigned char xa_offset;
1350 unsigned char xa_pad; /* Helps gcc generate better code */
1351 struct xa_node *xa_node;
1352 struct xa_node *xa_alloc;
1353 xa_update_node_t xa_update;
1354 struct list_lru *xa_lru;
1355 };
1356
1357 /*
1358 * We encode errnos in the xas->xa_node. If an error has happened, we need to
1359 * drop the lock to fix it, and once we've done so the xa_state is invalid.
1360 */
1361 #define XA_ERROR(errno) ((struct xa_node *)(((unsigned long)errno << 2) | 2UL))
1362 #define XAS_BOUNDS ((struct xa_node *)1UL)
1363 #define XAS_RESTART ((struct xa_node *)3UL)
1364
1365 #define __XA_STATE(array, index, shift, sibs) { \
1366 .xa = array, \
1367 .xa_index = index, \
1368 .xa_shift = shift, \
1369 .xa_sibs = sibs, \
1370 .xa_offset = 0, \
1371 .xa_pad = 0, \
1372 .xa_node = XAS_RESTART, \
1373 .xa_alloc = NULL, \
1374 .xa_update = NULL, \
1375 .xa_lru = NULL, \
1376 }
1377
1378 /**
1379 * XA_STATE() - Declare an XArray operation state.
1380 * @name: Name of this operation state (usually xas).
1381 * @array: Array to operate on.
1382 * @index: Initial index of interest.
1383 *
1384 * Declare and initialise an xa_state on the stack.
1385 */
1386 #define XA_STATE(name, array, index) \
1387 struct xa_state name = __XA_STATE(array, index, 0, 0)
1388
1389 /**
1390 * XA_STATE_ORDER() - Declare an XArray operation state.
1391 * @name: Name of this operation state (usually xas).
1392 * @array: Array to operate on.
1393 * @index: Initial index of interest.
1394 * @order: Order of entry.
1395 *
1396 * Declare and initialise an xa_state on the stack. This variant of
1397 * XA_STATE() allows you to specify the 'order' of the element you
1398 * want to operate on.`
1399 */
1400 #define XA_STATE_ORDER(name, array, index, order) \
1401 struct xa_state name = __XA_STATE(array, \
1402 (index >> order) << order, \
1403 order - (order % XA_CHUNK_SHIFT), \
1404 (1U << (order % XA_CHUNK_SHIFT)) - 1)
1405
1406 #define xas_marked(xas, mark) xa_marked((xas)->xa, (mark))
1407 #define xas_trylock(xas) xa_trylock((xas)->xa)
1408 #define xas_lock(xas) xa_lock((xas)->xa)
1409 #define xas_unlock(xas) xa_unlock((xas)->xa)
1410 #define xas_lock_bh(xas) xa_lock_bh((xas)->xa)
1411 #define xas_unlock_bh(xas) xa_unlock_bh((xas)->xa)
1412 #define xas_lock_irq(xas) xa_lock_irq((xas)->xa)
1413 #define xas_unlock_irq(xas) xa_unlock_irq((xas)->xa)
1414 #define xas_lock_irqsave(xas, flags) \
1415 xa_lock_irqsave((xas)->xa, flags)
1416 #define xas_unlock_irqrestore(xas, flags) \
1417 xa_unlock_irqrestore((xas)->xa, flags)
1418
1419 /**
1420 * xas_error() - Return an errno stored in the xa_state.
1421 * @xas: XArray operation state.
1422 *
1423 * Return: 0 if no error has been noted. A negative errno if one has.
1424 */
xas_error(const struct xa_state * xas)1425 static inline int xas_error(const struct xa_state *xas)
1426 {
1427 return xa_err(xas->xa_node);
1428 }
1429
1430 /**
1431 * xas_set_err() - Note an error in the xa_state.
1432 * @xas: XArray operation state.
1433 * @err: Negative error number.
1434 *
1435 * Only call this function with a negative @err; zero or positive errors
1436 * will probably not behave the way you think they should. If you want
1437 * to clear the error from an xa_state, use xas_reset().
1438 */
xas_set_err(struct xa_state * xas,long err)1439 static inline void xas_set_err(struct xa_state *xas, long err)
1440 {
1441 xas->xa_node = XA_ERROR(err);
1442 }
1443
1444 /**
1445 * xas_invalid() - Is the xas in a retry or error state?
1446 * @xas: XArray operation state.
1447 *
1448 * Return: %true if the xas cannot be used for operations.
1449 */
xas_invalid(const struct xa_state * xas)1450 static inline bool xas_invalid(const struct xa_state *xas)
1451 {
1452 return (unsigned long)xas->xa_node & 3;
1453 }
1454
1455 /**
1456 * xas_valid() - Is the xas a valid cursor into the array?
1457 * @xas: XArray operation state.
1458 *
1459 * Return: %true if the xas can be used for operations.
1460 */
xas_valid(const struct xa_state * xas)1461 static inline bool xas_valid(const struct xa_state *xas)
1462 {
1463 return !xas_invalid(xas);
1464 }
1465
1466 /**
1467 * xas_is_node() - Does the xas point to a node?
1468 * @xas: XArray operation state.
1469 *
1470 * Return: %true if the xas currently references a node.
1471 */
xas_is_node(const struct xa_state * xas)1472 static inline bool xas_is_node(const struct xa_state *xas)
1473 {
1474 return xas_valid(xas) && xas->xa_node;
1475 }
1476
1477 /* True if the pointer is something other than a node */
xas_not_node(struct xa_node * node)1478 static inline bool xas_not_node(struct xa_node *node)
1479 {
1480 return ((unsigned long)node & 3) || !node;
1481 }
1482
1483 /* True if the node represents RESTART or an error */
xas_frozen(struct xa_node * node)1484 static inline bool xas_frozen(struct xa_node *node)
1485 {
1486 return (unsigned long)node & 2;
1487 }
1488
1489 /* True if the node represents head-of-tree, RESTART or BOUNDS */
xas_top(struct xa_node * node)1490 static inline bool xas_top(struct xa_node *node)
1491 {
1492 return node <= XAS_RESTART;
1493 }
1494
1495 /**
1496 * xas_reset() - Reset an XArray operation state.
1497 * @xas: XArray operation state.
1498 *
1499 * Resets the error or walk state of the @xas so future walks of the
1500 * array will start from the root. Use this if you have dropped the
1501 * xarray lock and want to reuse the xa_state.
1502 *
1503 * Context: Any context.
1504 */
xas_reset(struct xa_state * xas)1505 static inline void xas_reset(struct xa_state *xas)
1506 {
1507 xas->xa_node = XAS_RESTART;
1508 }
1509
1510 /**
1511 * xas_retry() - Retry the operation if appropriate.
1512 * @xas: XArray operation state.
1513 * @entry: Entry from xarray.
1514 *
1515 * The advanced functions may sometimes return an internal entry, such as
1516 * a retry entry or a zero entry. This function sets up the @xas to restart
1517 * the walk from the head of the array if needed.
1518 *
1519 * Context: Any context.
1520 * Return: true if the operation needs to be retried.
1521 */
xas_retry(struct xa_state * xas,const void * entry)1522 static inline bool xas_retry(struct xa_state *xas, const void *entry)
1523 {
1524 if (xa_is_zero(entry))
1525 return true;
1526 if (!xa_is_retry(entry))
1527 return false;
1528 xas_reset(xas);
1529 return true;
1530 }
1531
1532 void *xas_load(struct xa_state *);
1533 void *xas_store(struct xa_state *, void *entry);
1534 void *xas_find(struct xa_state *, unsigned long max);
1535 void *xas_find_conflict(struct xa_state *);
1536
1537 bool xas_get_mark(const struct xa_state *, xa_mark_t);
1538 void xas_set_mark(const struct xa_state *, xa_mark_t);
1539 void xas_clear_mark(const struct xa_state *, xa_mark_t);
1540 void *xas_find_marked(struct xa_state *, unsigned long max, xa_mark_t);
1541 void xas_init_marks(const struct xa_state *);
1542
1543 bool xas_nomem(struct xa_state *, gfp_t);
1544 void xas_destroy(struct xa_state *);
1545 void xas_pause(struct xa_state *);
1546
1547 void xas_create_range(struct xa_state *);
1548
1549 #ifdef CONFIG_XARRAY_MULTI
1550 int xa_get_order(struct xarray *, unsigned long index);
1551 void xas_split(struct xa_state *, void *entry, unsigned int order);
1552 void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t);
1553 #else
xa_get_order(struct xarray * xa,unsigned long index)1554 static inline int xa_get_order(struct xarray *xa, unsigned long index)
1555 {
1556 return 0;
1557 }
1558
xas_split(struct xa_state * xas,void * entry,unsigned int order)1559 static inline void xas_split(struct xa_state *xas, void *entry,
1560 unsigned int order)
1561 {
1562 xas_store(xas, entry);
1563 }
1564
xas_split_alloc(struct xa_state * xas,void * entry,unsigned int order,gfp_t gfp)1565 static inline void xas_split_alloc(struct xa_state *xas, void *entry,
1566 unsigned int order, gfp_t gfp)
1567 {
1568 }
1569 #endif
1570
1571 /**
1572 * xas_reload() - Refetch an entry from the xarray.
1573 * @xas: XArray operation state.
1574 *
1575 * Use this function to check that a previously loaded entry still has
1576 * the same value. This is useful for the lockless pagecache lookup where
1577 * we walk the array with only the RCU lock to protect us, lock the page,
1578 * then check that the page hasn't moved since we looked it up.
1579 *
1580 * The caller guarantees that @xas is still valid. If it may be in an
1581 * error or restart state, call xas_load() instead.
1582 *
1583 * Return: The entry at this location in the xarray.
1584 */
xas_reload(struct xa_state * xas)1585 static inline void *xas_reload(struct xa_state *xas)
1586 {
1587 struct xa_node *node = xas->xa_node;
1588 void *entry;
1589 char offset;
1590
1591 if (!node)
1592 return xa_head(xas->xa);
1593 if (IS_ENABLED(CONFIG_XARRAY_MULTI)) {
1594 offset = (xas->xa_index >> node->shift) & XA_CHUNK_MASK;
1595 entry = xa_entry(xas->xa, node, offset);
1596 if (!xa_is_sibling(entry))
1597 return entry;
1598 offset = xa_to_sibling(entry);
1599 } else {
1600 offset = xas->xa_offset;
1601 }
1602 return xa_entry(xas->xa, node, offset);
1603 }
1604
1605 /**
1606 * xas_set() - Set up XArray operation state for a different index.
1607 * @xas: XArray operation state.
1608 * @index: New index into the XArray.
1609 *
1610 * Move the operation state to refer to a different index. This will
1611 * have the effect of starting a walk from the top; see xas_next()
1612 * to move to an adjacent index.
1613 */
xas_set(struct xa_state * xas,unsigned long index)1614 static inline void xas_set(struct xa_state *xas, unsigned long index)
1615 {
1616 xas->xa_index = index;
1617 xas->xa_node = XAS_RESTART;
1618 }
1619
1620 /**
1621 * xas_advance() - Skip over sibling entries.
1622 * @xas: XArray operation state.
1623 * @index: Index of last sibling entry.
1624 *
1625 * Move the operation state to refer to the last sibling entry.
1626 * This is useful for loops that normally want to see sibling
1627 * entries but sometimes want to skip them. Use xas_set() if you
1628 * want to move to an index which is not part of this entry.
1629 */
xas_advance(struct xa_state * xas,unsigned long index)1630 static inline void xas_advance(struct xa_state *xas, unsigned long index)
1631 {
1632 unsigned char shift = xas_is_node(xas) ? xas->xa_node->shift : 0;
1633
1634 xas->xa_index = index;
1635 xas->xa_offset = (index >> shift) & XA_CHUNK_MASK;
1636 }
1637
1638 /**
1639 * xas_set_order() - Set up XArray operation state for a multislot entry.
1640 * @xas: XArray operation state.
1641 * @index: Target of the operation.
1642 * @order: Entry occupies 2^@order indices.
1643 */
xas_set_order(struct xa_state * xas,unsigned long index,unsigned int order)1644 static inline void xas_set_order(struct xa_state *xas, unsigned long index,
1645 unsigned int order)
1646 {
1647 #ifdef CONFIG_XARRAY_MULTI
1648 xas->xa_index = order < BITS_PER_LONG ? (index >> order) << order : 0;
1649 xas->xa_shift = order - (order % XA_CHUNK_SHIFT);
1650 xas->xa_sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
1651 xas->xa_node = XAS_RESTART;
1652 #else
1653 BUG_ON(order > 0);
1654 xas_set(xas, index);
1655 #endif
1656 }
1657
1658 /**
1659 * xas_set_update() - Set up XArray operation state for a callback.
1660 * @xas: XArray operation state.
1661 * @update: Function to call when updating a node.
1662 *
1663 * The XArray can notify a caller after it has updated an xa_node.
1664 * This is advanced functionality and is only needed by the page
1665 * cache and swap cache.
1666 */
xas_set_update(struct xa_state * xas,xa_update_node_t update)1667 static inline void xas_set_update(struct xa_state *xas, xa_update_node_t update)
1668 {
1669 xas->xa_update = update;
1670 }
1671
xas_set_lru(struct xa_state * xas,struct list_lru * lru)1672 static inline void xas_set_lru(struct xa_state *xas, struct list_lru *lru)
1673 {
1674 xas->xa_lru = lru;
1675 }
1676
1677 /**
1678 * xas_next_entry() - Advance iterator to next present entry.
1679 * @xas: XArray operation state.
1680 * @max: Highest index to return.
1681 *
1682 * xas_next_entry() is an inline function to optimise xarray traversal for
1683 * speed. It is equivalent to calling xas_find(), and will call xas_find()
1684 * for all the hard cases.
1685 *
1686 * Return: The next present entry after the one currently referred to by @xas.
1687 */
xas_next_entry(struct xa_state * xas,unsigned long max)1688 static inline void *xas_next_entry(struct xa_state *xas, unsigned long max)
1689 {
1690 struct xa_node *node = xas->xa_node;
1691 void *entry;
1692
1693 if (unlikely(xas_not_node(node) || node->shift ||
1694 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)))
1695 return xas_find(xas, max);
1696
1697 do {
1698 if (unlikely(xas->xa_index >= max))
1699 return xas_find(xas, max);
1700 if (unlikely(xas->xa_offset == XA_CHUNK_MASK))
1701 return xas_find(xas, max);
1702 entry = xa_entry(xas->xa, node, xas->xa_offset + 1);
1703 if (unlikely(xa_is_internal(entry)))
1704 return xas_find(xas, max);
1705 xas->xa_offset++;
1706 xas->xa_index++;
1707 } while (!entry);
1708
1709 return entry;
1710 }
1711
1712 /* Private */
xas_find_chunk(struct xa_state * xas,bool advance,xa_mark_t mark)1713 static inline unsigned int xas_find_chunk(struct xa_state *xas, bool advance,
1714 xa_mark_t mark)
1715 {
1716 unsigned long *addr = xas->xa_node->marks[(__force unsigned)mark];
1717 unsigned int offset = xas->xa_offset;
1718
1719 if (advance)
1720 offset++;
1721 if (XA_CHUNK_SIZE == BITS_PER_LONG) {
1722 if (offset < XA_CHUNK_SIZE) {
1723 unsigned long data = *addr & (~0UL << offset);
1724 if (data)
1725 return __ffs(data);
1726 }
1727 return XA_CHUNK_SIZE;
1728 }
1729
1730 return find_next_bit(addr, XA_CHUNK_SIZE, offset);
1731 }
1732
1733 /**
1734 * xas_next_marked() - Advance iterator to next marked entry.
1735 * @xas: XArray operation state.
1736 * @max: Highest index to return.
1737 * @mark: Mark to search for.
1738 *
1739 * xas_next_marked() is an inline function to optimise xarray traversal for
1740 * speed. It is equivalent to calling xas_find_marked(), and will call
1741 * xas_find_marked() for all the hard cases.
1742 *
1743 * Return: The next marked entry after the one currently referred to by @xas.
1744 */
xas_next_marked(struct xa_state * xas,unsigned long max,xa_mark_t mark)1745 static inline void *xas_next_marked(struct xa_state *xas, unsigned long max,
1746 xa_mark_t mark)
1747 {
1748 struct xa_node *node = xas->xa_node;
1749 void *entry;
1750 unsigned int offset;
1751
1752 if (unlikely(xas_not_node(node) || node->shift))
1753 return xas_find_marked(xas, max, mark);
1754 offset = xas_find_chunk(xas, true, mark);
1755 xas->xa_offset = offset;
1756 xas->xa_index = (xas->xa_index & ~XA_CHUNK_MASK) + offset;
1757 if (xas->xa_index > max)
1758 return NULL;
1759 if (offset == XA_CHUNK_SIZE)
1760 return xas_find_marked(xas, max, mark);
1761 entry = xa_entry(xas->xa, node, offset);
1762 if (!entry)
1763 return xas_find_marked(xas, max, mark);
1764 return entry;
1765 }
1766
1767 /*
1768 * If iterating while holding a lock, drop the lock and reschedule
1769 * every %XA_CHECK_SCHED loops.
1770 */
1771 enum {
1772 XA_CHECK_SCHED = 4096,
1773 };
1774
1775 /**
1776 * xas_for_each() - Iterate over a range of an XArray.
1777 * @xas: XArray operation state.
1778 * @entry: Entry retrieved from the array.
1779 * @max: Maximum index to retrieve from array.
1780 *
1781 * The loop body will be executed for each entry present in the xarray
1782 * between the current xas position and @max. @entry will be set to
1783 * the entry retrieved from the xarray. It is safe to delete entries
1784 * from the array in the loop body. You should hold either the RCU lock
1785 * or the xa_lock while iterating. If you need to drop the lock, call
1786 * xas_pause() first.
1787 */
1788 #define xas_for_each(xas, entry, max) \
1789 for (entry = xas_find(xas, max); entry; \
1790 entry = xas_next_entry(xas, max))
1791
1792 /**
1793 * xas_for_each_marked() - Iterate over a range of an XArray.
1794 * @xas: XArray operation state.
1795 * @entry: Entry retrieved from the array.
1796 * @max: Maximum index to retrieve from array.
1797 * @mark: Mark to search for.
1798 *
1799 * The loop body will be executed for each marked entry in the xarray
1800 * between the current xas position and @max. @entry will be set to
1801 * the entry retrieved from the xarray. It is safe to delete entries
1802 * from the array in the loop body. You should hold either the RCU lock
1803 * or the xa_lock while iterating. If you need to drop the lock, call
1804 * xas_pause() first.
1805 */
1806 #define xas_for_each_marked(xas, entry, max, mark) \
1807 for (entry = xas_find_marked(xas, max, mark); entry; \
1808 entry = xas_next_marked(xas, max, mark))
1809
1810 /**
1811 * xas_for_each_conflict() - Iterate over a range of an XArray.
1812 * @xas: XArray operation state.
1813 * @entry: Entry retrieved from the array.
1814 *
1815 * The loop body will be executed for each entry in the XArray that
1816 * lies within the range specified by @xas. If the loop terminates
1817 * normally, @entry will be %NULL. The user may break out of the loop,
1818 * which will leave @entry set to the conflicting entry. The caller
1819 * may also call xa_set_err() to exit the loop while setting an error
1820 * to record the reason.
1821 */
1822 #define xas_for_each_conflict(xas, entry) \
1823 while ((entry = xas_find_conflict(xas)))
1824
1825 void *__xas_next(struct xa_state *);
1826 void *__xas_prev(struct xa_state *);
1827
1828 /**
1829 * xas_prev() - Move iterator to previous index.
1830 * @xas: XArray operation state.
1831 *
1832 * If the @xas was in an error state, it will remain in an error state
1833 * and this function will return %NULL. If the @xas has never been walked,
1834 * it will have the effect of calling xas_load(). Otherwise one will be
1835 * subtracted from the index and the state will be walked to the correct
1836 * location in the array for the next operation.
1837 *
1838 * If the iterator was referencing index 0, this function wraps
1839 * around to %ULONG_MAX.
1840 *
1841 * Return: The entry at the new index. This may be %NULL or an internal
1842 * entry.
1843 */
xas_prev(struct xa_state * xas)1844 static inline void *xas_prev(struct xa_state *xas)
1845 {
1846 struct xa_node *node = xas->xa_node;
1847
1848 if (unlikely(xas_not_node(node) || node->shift ||
1849 xas->xa_offset == 0))
1850 return __xas_prev(xas);
1851
1852 xas->xa_index--;
1853 xas->xa_offset--;
1854 return xa_entry(xas->xa, node, xas->xa_offset);
1855 }
1856
1857 /**
1858 * xas_next() - Move state to next index.
1859 * @xas: XArray operation state.
1860 *
1861 * If the @xas was in an error state, it will remain in an error state
1862 * and this function will return %NULL. If the @xas has never been walked,
1863 * it will have the effect of calling xas_load(). Otherwise one will be
1864 * added to the index and the state will be walked to the correct
1865 * location in the array for the next operation.
1866 *
1867 * If the iterator was referencing index %ULONG_MAX, this function wraps
1868 * around to 0.
1869 *
1870 * Return: The entry at the new index. This may be %NULL or an internal
1871 * entry.
1872 */
xas_next(struct xa_state * xas)1873 static inline void *xas_next(struct xa_state *xas)
1874 {
1875 struct xa_node *node = xas->xa_node;
1876
1877 if (unlikely(xas_not_node(node) || node->shift ||
1878 xas->xa_offset == XA_CHUNK_MASK))
1879 return __xas_next(xas);
1880
1881 xas->xa_index++;
1882 xas->xa_offset++;
1883 return xa_entry(xas->xa, node, xas->xa_offset);
1884 }
1885
1886 #endif /* _LINUX_XARRAY_H */
1887