Commit | Line | Data |
---|---|---|
0fc479b1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
88459642 OS |
2 | /* |
3 | * Fast and scalable bitmaps. | |
4 | * | |
5 | * Copyright (C) 2016 Facebook | |
6 | * Copyright (C) 2013-2014 Jens Axboe | |
88459642 OS |
7 | */ |
8 | ||
9 | #ifndef __LINUX_SCALE_BITMAP_H | |
10 | #define __LINUX_SCALE_BITMAP_H | |
11 | ||
1fcbd5de AS |
12 | #include <linux/atomic.h> |
13 | #include <linux/bitops.h> | |
14 | #include <linux/cache.h> | |
15 | #include <linux/list.h> | |
16 | #include <linux/log2.h> | |
17 | #include <linux/minmax.h> | |
18 | #include <linux/percpu.h> | |
88459642 | 19 | #include <linux/slab.h> |
1fcbd5de AS |
20 | #include <linux/smp.h> |
21 | #include <linux/types.h> | |
22 | #include <linux/wait.h> | |
88459642 | 23 | |
14b470b5 AB |
24 | struct seq_file; |
25 | ||
88459642 OS |
26 | /** |
27 | * struct sbitmap_word - Word in a &struct sbitmap. | |
28 | */ | |
29 | struct sbitmap_word { | |
88459642 | 30 | /** |
ea86ea2c | 31 | * @word: word holding free bits |
88459642 | 32 | */ |
3301bc53 | 33 | unsigned long word; |
ea86ea2c JA |
34 | |
35 | /** | |
36 | * @cleared: word holding cleared bits | |
37 | */ | |
38 | unsigned long cleared ____cacheline_aligned_in_smp; | |
88459642 OS |
39 | } ____cacheline_aligned_in_smp; |
40 | ||
41 | /** | |
42 | * struct sbitmap - Scalable bitmap. | |
43 | * | |
44 | * A &struct sbitmap is spread over multiple cachelines to avoid ping-pong. This | |
45 | * trades off higher memory usage for better scalability. | |
46 | */ | |
47 | struct sbitmap { | |
48 | /** | |
49 | * @depth: Number of bits used in the whole bitmap. | |
50 | */ | |
51 | unsigned int depth; | |
52 | ||
53 | /** | |
54 | * @shift: log2(number of bits used per word) | |
55 | */ | |
56 | unsigned int shift; | |
57 | ||
58 | /** | |
59 | * @map_nr: Number of words (cachelines) being used for the bitmap. | |
60 | */ | |
61 | unsigned int map_nr; | |
62 | ||
efe1f3a1 ML |
63 | /** |
64 | * @round_robin: Allocate bits in strict round-robin order. | |
65 | */ | |
66 | bool round_robin; | |
67 | ||
88459642 OS |
68 | /** |
69 | * @map: Allocated bitmap. | |
70 | */ | |
71 | struct sbitmap_word *map; | |
c548e62b ML |
72 | |
73 | /* | |
74 | * @alloc_hint: Cache of last successfully allocated or freed bit. | |
75 | * | |
76 | * This is per-cpu, which allows multiple users to stick to different | |
77 | * cachelines until the map is exhausted. | |
78 | */ | |
79 | unsigned int __percpu *alloc_hint; | |
88459642 OS |
80 | }; |
81 | ||
82 | #define SBQ_WAIT_QUEUES 8 | |
83 | #define SBQ_WAKE_BATCH 8 | |
84 | ||
85 | /** | |
86 | * struct sbq_wait_state - Wait queue in a &struct sbitmap_queue. | |
87 | */ | |
88 | struct sbq_wait_state { | |
88459642 OS |
89 | /** |
90 | * @wait: Wait queue. | |
91 | */ | |
92 | wait_queue_head_t wait; | |
93 | } ____cacheline_aligned_in_smp; | |
94 | ||
95 | /** | |
96 | * struct sbitmap_queue - Scalable bitmap with the added ability to wait on free | |
97 | * bits. | |
98 | * | |
99 | * A &struct sbitmap_queue uses multiple wait queues and rolling wakeups to | |
100 | * avoid contention on the wait queue spinlock. This ensures that we don't hit a | |
101 | * scalability wall when we run out of free bits and have to start putting tasks | |
102 | * to sleep. | |
103 | */ | |
104 | struct sbitmap_queue { | |
105 | /** | |
106 | * @sb: Scalable bitmap. | |
107 | */ | |
108 | struct sbitmap sb; | |
109 | ||
110 | /** | |
111 | * @wake_batch: Number of bits which must be freed before we wake up any | |
112 | * waiters. | |
113 | */ | |
114 | unsigned int wake_batch; | |
115 | ||
116 | /** | |
117 | * @wake_index: Next wait queue in @ws to wake up. | |
118 | */ | |
119 | atomic_t wake_index; | |
120 | ||
121 | /** | |
122 | * @ws: Wait queues. | |
123 | */ | |
124 | struct sbq_wait_state *ws; | |
f4a644db | 125 | |
5d2ee712 JA |
126 | /* |
127 | * @ws_active: count of currently active ws waitqueues | |
128 | */ | |
129 | atomic_t ws_active; | |
130 | ||
a3275539 OS |
131 | /** |
132 | * @min_shallow_depth: The minimum shallow depth which may be passed to | |
3f607293 | 133 | * sbitmap_queue_get_shallow() |
a3275539 OS |
134 | */ |
135 | unsigned int min_shallow_depth; | |
4f8126bb GKB |
136 | |
137 | /** | |
138 | * @completion_cnt: Number of bits cleared passed to the | |
139 | * wakeup function. | |
140 | */ | |
141 | atomic_t completion_cnt; | |
142 | ||
143 | /** | |
144 | * @wakeup_cnt: Number of thread wake ups issued. | |
145 | */ | |
146 | atomic_t wakeup_cnt; | |
88459642 OS |
147 | }; |
148 | ||
149 | /** | |
150 | * sbitmap_init_node() - Initialize a &struct sbitmap on a specific memory node. | |
151 | * @sb: Bitmap to initialize. | |
152 | * @depth: Number of bits to allocate. | |
153 | * @shift: Use 2^@shift bits per word in the bitmap; if a negative number if | |
154 | * given, a good default is chosen. | |
155 | * @flags: Allocation flags. | |
156 | * @node: Memory node to allocate on. | |
efe1f3a1 ML |
157 | * @round_robin: If true, be stricter about allocation order; always allocate |
158 | * starting from the last allocated bit. This is less efficient | |
159 | * than the default behavior (false). | |
c548e62b ML |
160 | * @alloc_hint: If true, apply percpu hint for where to start searching for |
161 | * a free bit. | |
88459642 OS |
162 | * |
163 | * Return: Zero on success or negative errno on failure. | |
164 | */ | |
165 | int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, | |
c548e62b | 166 | gfp_t flags, int node, bool round_robin, bool alloc_hint); |
88459642 | 167 | |
3301bc53 ML |
168 | /* sbitmap internal helper */ |
169 | static inline unsigned int __map_depth(const struct sbitmap *sb, int index) | |
170 | { | |
171 | if (index == sb->map_nr - 1) | |
172 | return sb->depth - (index << sb->shift); | |
173 | return 1U << sb->shift; | |
174 | } | |
175 | ||
88459642 OS |
176 | /** |
177 | * sbitmap_free() - Free memory used by a &struct sbitmap. | |
178 | * @sb: Bitmap to free. | |
179 | */ | |
180 | static inline void sbitmap_free(struct sbitmap *sb) | |
181 | { | |
c548e62b | 182 | free_percpu(sb->alloc_hint); |
863a66cd | 183 | kvfree(sb->map); |
88459642 OS |
184 | sb->map = NULL; |
185 | } | |
186 | ||
187 | /** | |
188 | * sbitmap_resize() - Resize a &struct sbitmap. | |
189 | * @sb: Bitmap to resize. | |
190 | * @depth: New number of bits to resize to. | |
191 | * | |
192 | * Doesn't reallocate anything. It's up to the caller to ensure that the new | |
193 | * depth doesn't exceed the depth that the sb was initialized with. | |
194 | */ | |
195 | void sbitmap_resize(struct sbitmap *sb, unsigned int depth); | |
196 | ||
197 | /** | |
198 | * sbitmap_get() - Try to allocate a free bit from a &struct sbitmap. | |
199 | * @sb: Bitmap to allocate from. | |
88459642 | 200 | * |
4ace53f1 OS |
201 | * This operation provides acquire barrier semantics if it succeeds. |
202 | * | |
88459642 OS |
203 | * Return: Non-negative allocated bit number if successful, -1 otherwise. |
204 | */ | |
c548e62b | 205 | int sbitmap_get(struct sbitmap *sb); |
88459642 | 206 | |
c05e6673 OS |
207 | /** |
208 | * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap, | |
209 | * limiting the depth used from each word. | |
210 | * @sb: Bitmap to allocate from. | |
c05e6673 OS |
211 | * @shallow_depth: The maximum number of bits to allocate from a single word. |
212 | * | |
213 | * This rather specific operation allows for having multiple users with | |
214 | * different allocation limits. E.g., there can be a high-priority class that | |
215 | * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow() | |
216 | * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority | |
217 | * class can only allocate half of the total bits in the bitmap, preventing it | |
218 | * from starving out the high-priority class. | |
219 | * | |
220 | * Return: Non-negative allocated bit number if successful, -1 otherwise. | |
221 | */ | |
c548e62b | 222 | int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth); |
c05e6673 | 223 | |
88459642 OS |
224 | /** |
225 | * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap. | |
226 | * @sb: Bitmap to check. | |
227 | * | |
228 | * Return: true if any bit in the bitmap is set, false otherwise. | |
229 | */ | |
230 | bool sbitmap_any_bit_set(const struct sbitmap *sb); | |
231 | ||
7930d0a0 ML |
232 | #define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift) |
233 | #define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U)) | |
234 | ||
88459642 OS |
235 | typedef bool (*sb_for_each_fn)(struct sbitmap *, unsigned int, void *); |
236 | ||
237 | /** | |
7930d0a0 ML |
238 | * __sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. |
239 | * @start: Where to start the iteration. | |
88459642 OS |
240 | * @sb: Bitmap to iterate over. |
241 | * @fn: Callback. Should return true to continue or false to break early. | |
242 | * @data: Pointer to pass to callback. | |
243 | * | |
244 | * This is inline even though it's non-trivial so that the function calls to the | |
245 | * callback will hopefully get optimized away. | |
246 | */ | |
7930d0a0 ML |
247 | static inline void __sbitmap_for_each_set(struct sbitmap *sb, |
248 | unsigned int start, | |
249 | sb_for_each_fn fn, void *data) | |
88459642 | 250 | { |
7930d0a0 ML |
251 | unsigned int index; |
252 | unsigned int nr; | |
253 | unsigned int scanned = 0; | |
88459642 | 254 | |
7930d0a0 ML |
255 | if (start >= sb->depth) |
256 | start = 0; | |
257 | index = SB_NR_TO_INDEX(sb, start); | |
258 | nr = SB_NR_TO_BIT(sb, start); | |
88459642 | 259 | |
7930d0a0 | 260 | while (scanned < sb->depth) { |
8c2def89 OS |
261 | unsigned long word; |
262 | unsigned int depth = min_t(unsigned int, | |
3301bc53 | 263 | __map_depth(sb, index) - nr, |
7930d0a0 | 264 | sb->depth - scanned); |
88459642 | 265 | |
7930d0a0 | 266 | scanned += depth; |
8c2def89 OS |
267 | word = sb->map[index].word & ~sb->map[index].cleared; |
268 | if (!word) | |
7930d0a0 ML |
269 | goto next; |
270 | ||
271 | /* | |
272 | * On the first iteration of the outer loop, we need to add the | |
273 | * bit offset back to the size of the word for find_next_bit(). | |
274 | * On all other iterations, nr is zero, so this is a noop. | |
275 | */ | |
276 | depth += nr; | |
88459642 | 277 | while (1) { |
8c2def89 | 278 | nr = find_next_bit(&word, depth, nr); |
7930d0a0 | 279 | if (nr >= depth) |
88459642 | 280 | break; |
7930d0a0 | 281 | if (!fn(sb, (index << sb->shift) + nr, data)) |
88459642 OS |
282 | return; |
283 | ||
284 | nr++; | |
285 | } | |
7930d0a0 ML |
286 | next: |
287 | nr = 0; | |
288 | if (++index >= sb->map_nr) | |
289 | index = 0; | |
88459642 OS |
290 | } |
291 | } | |
292 | ||
7930d0a0 ML |
293 | /** |
294 | * sbitmap_for_each_set() - Iterate over each set bit in a &struct sbitmap. | |
295 | * @sb: Bitmap to iterate over. | |
296 | * @fn: Callback. Should return true to continue or false to break early. | |
297 | * @data: Pointer to pass to callback. | |
298 | */ | |
299 | static inline void sbitmap_for_each_set(struct sbitmap *sb, sb_for_each_fn fn, | |
300 | void *data) | |
301 | { | |
302 | __sbitmap_for_each_set(sb, 0, fn, data); | |
303 | } | |
88459642 OS |
304 | |
305 | static inline unsigned long *__sbitmap_word(struct sbitmap *sb, | |
306 | unsigned int bitnr) | |
307 | { | |
308 | return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word; | |
309 | } | |
310 | ||
311 | /* Helpers equivalent to the operations in asm/bitops.h and linux/bitmap.h */ | |
312 | ||
313 | static inline void sbitmap_set_bit(struct sbitmap *sb, unsigned int bitnr) | |
314 | { | |
315 | set_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); | |
316 | } | |
317 | ||
318 | static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr) | |
319 | { | |
320 | clear_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); | |
321 | } | |
322 | ||
ea86ea2c JA |
323 | /* |
324 | * This one is special, since it doesn't actually clear the bit, rather it | |
325 | * sets the corresponding bit in the ->cleared mask instead. Paired with | |
1e4471e7 | 326 | * the caller doing sbitmap_deferred_clear() if a given index is full, which |
ea86ea2c JA |
327 | * will clear the previously freed entries in the corresponding ->word. |
328 | */ | |
329 | static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr) | |
330 | { | |
331 | unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared; | |
332 | ||
333 | set_bit(SB_NR_TO_BIT(sb, bitnr), addr); | |
334 | } | |
335 | ||
c548e62b ML |
336 | /* |
337 | * Pair of sbitmap_get, and this one applies both cleared bit and | |
338 | * allocation hint. | |
339 | */ | |
340 | static inline void sbitmap_put(struct sbitmap *sb, unsigned int bitnr) | |
341 | { | |
342 | sbitmap_deferred_clear_bit(sb, bitnr); | |
343 | ||
344 | if (likely(sb->alloc_hint && !sb->round_robin && bitnr < sb->depth)) | |
035e9f47 | 345 | *raw_cpu_ptr(sb->alloc_hint) = bitnr; |
c548e62b ML |
346 | } |
347 | ||
88459642 OS |
348 | static inline int sbitmap_test_bit(struct sbitmap *sb, unsigned int bitnr) |
349 | { | |
350 | return test_bit(SB_NR_TO_BIT(sb, bitnr), __sbitmap_word(sb, bitnr)); | |
351 | } | |
352 | ||
2d13b1ea ML |
353 | static inline int sbitmap_calculate_shift(unsigned int depth) |
354 | { | |
355 | int shift = ilog2(BITS_PER_LONG); | |
356 | ||
357 | /* | |
358 | * If the bitmap is small, shrink the number of bits per word so | |
359 | * we spread over a few cachelines, at least. If less than 4 | |
360 | * bits, just forget about it, it's not going to work optimally | |
361 | * anyway. | |
362 | */ | |
363 | if (depth >= 4) { | |
364 | while ((4U << shift) > depth) | |
365 | shift--; | |
366 | } | |
367 | ||
368 | return shift; | |
369 | } | |
370 | ||
24af1ccf OS |
371 | /** |
372 | * sbitmap_show() - Dump &struct sbitmap information to a &struct seq_file. | |
373 | * @sb: Bitmap to show. | |
374 | * @m: struct seq_file to write to. | |
375 | * | |
376 | * This is intended for debugging. The format may change at any time. | |
377 | */ | |
378 | void sbitmap_show(struct sbitmap *sb, struct seq_file *m); | |
379 | ||
cbb9950b ML |
380 | |
381 | /** | |
382 | * sbitmap_weight() - Return how many set and not cleared bits in a &struct | |
383 | * sbitmap. | |
384 | * @sb: Bitmap to check. | |
385 | * | |
386 | * Return: How many set and not cleared bits set | |
387 | */ | |
388 | unsigned int sbitmap_weight(const struct sbitmap *sb); | |
389 | ||
24af1ccf OS |
390 | /** |
391 | * sbitmap_bitmap_show() - Write a hex dump of a &struct sbitmap to a &struct | |
392 | * seq_file. | |
393 | * @sb: Bitmap to show. | |
394 | * @m: struct seq_file to write to. | |
395 | * | |
396 | * This is intended for debugging. The output isn't guaranteed to be internally | |
397 | * consistent. | |
398 | */ | |
399 | void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m); | |
400 | ||
88459642 OS |
401 | /** |
402 | * sbitmap_queue_init_node() - Initialize a &struct sbitmap_queue on a specific | |
403 | * memory node. | |
404 | * @sbq: Bitmap queue to initialize. | |
405 | * @depth: See sbitmap_init_node(). | |
406 | * @shift: See sbitmap_init_node(). | |
f4a644db | 407 | * @round_robin: See sbitmap_get(). |
88459642 OS |
408 | * @flags: Allocation flags. |
409 | * @node: Memory node to allocate on. | |
410 | * | |
411 | * Return: Zero on success or negative errno on failure. | |
412 | */ | |
413 | int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, | |
f4a644db | 414 | int shift, bool round_robin, gfp_t flags, int node); |
88459642 OS |
415 | |
416 | /** | |
417 | * sbitmap_queue_free() - Free memory used by a &struct sbitmap_queue. | |
418 | * | |
419 | * @sbq: Bitmap queue to free. | |
420 | */ | |
421 | static inline void sbitmap_queue_free(struct sbitmap_queue *sbq) | |
422 | { | |
423 | kfree(sbq->ws); | |
424 | sbitmap_free(&sbq->sb); | |
425 | } | |
426 | ||
180dccb0 LQ |
427 | /** |
428 | * sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch | |
429 | * @sbq: Bitmap queue to recalculate wake batch. | |
430 | * @users: Number of shares. | |
431 | * | |
432 | * Like sbitmap_queue_update_wake_batch(), this will calculate wake batch | |
433 | * by depth. This interface is for HCTX shared tags or queue shared tags. | |
434 | */ | |
435 | void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq, | |
436 | unsigned int users); | |
437 | ||
88459642 OS |
438 | /** |
439 | * sbitmap_queue_resize() - Resize a &struct sbitmap_queue. | |
440 | * @sbq: Bitmap queue to resize. | |
441 | * @depth: New number of bits to resize to. | |
442 | * | |
443 | * Like sbitmap_resize(), this doesn't reallocate anything. It has to do | |
444 | * some extra work on the &struct sbitmap_queue, so it's not safe to just | |
445 | * resize the underlying &struct sbitmap. | |
446 | */ | |
447 | void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth); | |
448 | ||
40aabb67 OS |
449 | /** |
450 | * __sbitmap_queue_get() - Try to allocate a free bit from a &struct | |
451 | * sbitmap_queue with preemption already disabled. | |
452 | * @sbq: Bitmap queue to allocate from. | |
40aabb67 OS |
453 | * |
454 | * Return: Non-negative allocated bit number if successful, -1 otherwise. | |
455 | */ | |
f4a644db | 456 | int __sbitmap_queue_get(struct sbitmap_queue *sbq); |
40aabb67 | 457 | |
9672b0d4 JA |
458 | /** |
459 | * __sbitmap_queue_get_batch() - Try to allocate a batch of free bits | |
460 | * @sbq: Bitmap queue to allocate from. | |
461 | * @nr_tags: number of tags requested | |
462 | * @offset: offset to add to returned bits | |
463 | * | |
464 | * Return: Mask of allocated tags, 0 if none are found. Each tag allocated is | |
465 | * a bit in the mask returned, and the caller must add @offset to the value to | |
466 | * get the absolute tag value. | |
467 | */ | |
468 | unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, | |
469 | unsigned int *offset); | |
470 | ||
c05e6673 | 471 | /** |
3f607293 | 472 | * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct |
c05e6673 OS |
473 | * sbitmap_queue, limiting the depth used from each word, with preemption |
474 | * already disabled. | |
475 | * @sbq: Bitmap queue to allocate from. | |
476 | * @shallow_depth: The maximum number of bits to allocate from a single word. | |
477 | * See sbitmap_get_shallow(). | |
478 | * | |
a3275539 OS |
479 | * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after |
480 | * initializing @sbq. | |
481 | * | |
c05e6673 OS |
482 | * Return: Non-negative allocated bit number if successful, -1 otherwise. |
483 | */ | |
3f607293 JG |
484 | int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, |
485 | unsigned int shallow_depth); | |
c05e6673 | 486 | |
40aabb67 OS |
487 | /** |
488 | * sbitmap_queue_get() - Try to allocate a free bit from a &struct | |
489 | * sbitmap_queue. | |
490 | * @sbq: Bitmap queue to allocate from. | |
40aabb67 OS |
491 | * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to |
492 | * sbitmap_queue_clear()). | |
493 | * | |
494 | * Return: Non-negative allocated bit number if successful, -1 otherwise. | |
495 | */ | |
f4a644db | 496 | static inline int sbitmap_queue_get(struct sbitmap_queue *sbq, |
40aabb67 OS |
497 | unsigned int *cpu) |
498 | { | |
499 | int nr; | |
500 | ||
501 | *cpu = get_cpu(); | |
f4a644db | 502 | nr = __sbitmap_queue_get(sbq); |
40aabb67 OS |
503 | put_cpu(); |
504 | return nr; | |
505 | } | |
506 | ||
a3275539 OS |
507 | /** |
508 | * sbitmap_queue_min_shallow_depth() - Inform a &struct sbitmap_queue of the | |
509 | * minimum shallow depth that will be used. | |
510 | * @sbq: Bitmap queue in question. | |
511 | * @min_shallow_depth: The minimum shallow depth that will be passed to | |
512 | * sbitmap_queue_get_shallow() or __sbitmap_queue_get_shallow(). | |
513 | * | |
514 | * sbitmap_queue_clear() batches wakeups as an optimization. The batch size | |
515 | * depends on the depth of the bitmap. Since the shallow allocation functions | |
516 | * effectively operate with a different depth, the shallow depth must be taken | |
517 | * into account when calculating the batch size. This function must be called | |
518 | * with the minimum shallow depth that will be used. Failure to do so can result | |
519 | * in missed wakeups. | |
520 | */ | |
521 | void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, | |
522 | unsigned int min_shallow_depth); | |
523 | ||
88459642 OS |
524 | /** |
525 | * sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a | |
526 | * &struct sbitmap_queue. | |
527 | * @sbq: Bitmap to free from. | |
528 | * @nr: Bit number to free. | |
40aabb67 | 529 | * @cpu: CPU the bit was allocated on. |
88459642 | 530 | */ |
40aabb67 | 531 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, |
f4a644db | 532 | unsigned int cpu); |
88459642 | 533 | |
1aec5e4a JA |
534 | /** |
535 | * sbitmap_queue_clear_batch() - Free a batch of allocated bits | |
536 | * &struct sbitmap_queue. | |
537 | * @sbq: Bitmap to free from. | |
538 | * @offset: offset for each tag in array | |
539 | * @tags: array of tags | |
540 | * @nr_tags: number of tags in array | |
541 | */ | |
542 | void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, | |
543 | int *tags, int nr_tags); | |
544 | ||
88459642 OS |
545 | static inline int sbq_index_inc(int index) |
546 | { | |
547 | return (index + 1) & (SBQ_WAIT_QUEUES - 1); | |
548 | } | |
549 | ||
550 | static inline void sbq_index_atomic_inc(atomic_t *index) | |
551 | { | |
552 | int old = atomic_read(index); | |
553 | int new = sbq_index_inc(old); | |
554 | atomic_cmpxchg(index, old, new); | |
555 | } | |
556 | ||
557 | /** | |
558 | * sbq_wait_ptr() - Get the next wait queue to use for a &struct | |
559 | * sbitmap_queue. | |
560 | * @sbq: Bitmap queue to wait on. | |
561 | * @wait_index: A counter per "user" of @sbq. | |
562 | */ | |
563 | static inline struct sbq_wait_state *sbq_wait_ptr(struct sbitmap_queue *sbq, | |
564 | atomic_t *wait_index) | |
565 | { | |
566 | struct sbq_wait_state *ws; | |
567 | ||
568 | ws = &sbq->ws[atomic_read(wait_index)]; | |
569 | sbq_index_atomic_inc(wait_index); | |
570 | return ws; | |
571 | } | |
572 | ||
573 | /** | |
574 | * sbitmap_queue_wake_all() - Wake up everything waiting on a &struct | |
575 | * sbitmap_queue. | |
576 | * @sbq: Bitmap queue to wake up. | |
577 | */ | |
578 | void sbitmap_queue_wake_all(struct sbitmap_queue *sbq); | |
579 | ||
e6fc4649 ML |
580 | /** |
581 | * sbitmap_queue_wake_up() - Wake up some of waiters in one waitqueue | |
582 | * on a &struct sbitmap_queue. | |
583 | * @sbq: Bitmap queue to wake up. | |
4acb8341 | 584 | * @nr: Number of bits cleared. |
e6fc4649 | 585 | */ |
4acb8341 | 586 | void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr); |
e6fc4649 | 587 | |
24af1ccf OS |
588 | /** |
589 | * sbitmap_queue_show() - Dump &struct sbitmap_queue information to a &struct | |
590 | * seq_file. | |
591 | * @sbq: Bitmap queue to show. | |
592 | * @m: struct seq_file to write to. | |
593 | * | |
594 | * This is intended for debugging. The format may change at any time. | |
595 | */ | |
596 | void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m); | |
597 | ||
5d2ee712 | 598 | struct sbq_wait { |
9f6b7ef6 | 599 | struct sbitmap_queue *sbq; /* if set, sbq_wait is accounted */ |
5d2ee712 JA |
600 | struct wait_queue_entry wait; |
601 | }; | |
602 | ||
603 | #define DEFINE_SBQ_WAIT(name) \ | |
604 | struct sbq_wait name = { \ | |
9f6b7ef6 | 605 | .sbq = NULL, \ |
5d2ee712 JA |
606 | .wait = { \ |
607 | .private = current, \ | |
608 | .func = autoremove_wake_function, \ | |
609 | .entry = LIST_HEAD_INIT((name).wait.entry), \ | |
610 | } \ | |
611 | } | |
612 | ||
613 | /* | |
614 | * Wrapper around prepare_to_wait_exclusive(), which maintains some extra | |
615 | * internal state. | |
616 | */ | |
617 | void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, | |
618 | struct sbq_wait_state *ws, | |
619 | struct sbq_wait *sbq_wait, int state); | |
620 | ||
621 | /* | |
622 | * Must be paired with sbitmap_prepare_to_wait(). | |
623 | */ | |
624 | void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, | |
625 | struct sbq_wait *sbq_wait); | |
626 | ||
9f6b7ef6 JA |
627 | /* |
628 | * Wrapper around add_wait_queue(), which maintains some extra internal state | |
629 | */ | |
630 | void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, | |
631 | struct sbq_wait_state *ws, | |
632 | struct sbq_wait *sbq_wait); | |
633 | ||
634 | /* | |
635 | * Must be paired with sbitmap_add_wait_queue() | |
636 | */ | |
637 | void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait); | |
638 | ||
88459642 | 639 | #endif /* __LINUX_SCALE_BITMAP_H */ |