1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Facebook
4 * Copyright (C) 2013-2014 Jens Axboe
7 #include <linux/sched.h>
8 #include <linux/random.h>
9 #include <linux/sbitmap.h>
10 #include <linux/seq_file.h>
12 static int init_alloc_hint(struct sbitmap *sb, gfp_t flags)
14 unsigned depth = sb->depth;
16 sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
20 if (depth && !sb->round_robin) {
23 for_each_possible_cpu(i)
24 *per_cpu_ptr(sb->alloc_hint, i) = get_random_u32_below(depth);
29 static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb,
34 hint = this_cpu_read(*sb->alloc_hint);
35 if (unlikely(hint >= depth)) {
36 hint = depth ? get_random_u32_below(depth) : 0;
37 this_cpu_write(*sb->alloc_hint, hint);
43 static inline void update_alloc_hint_after_get(struct sbitmap *sb,
49 /* If the map is full, a hint won't do us much good. */
50 this_cpu_write(*sb->alloc_hint, 0);
51 } else if (nr == hint || unlikely(sb->round_robin)) {
52 /* Only update the hint if we used it. */
54 if (hint >= depth - 1)
56 this_cpu_write(*sb->alloc_hint, hint);
61 * See if we have deferred clears that we can batch move
63 static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
67 if (!READ_ONCE(map->cleared))
71 * First get a stable cleared mask, setting the old mask to 0.
73 mask = xchg(&map->cleared, 0);
76 * Now clear the masked bits in our free word
78 atomic_long_andnot(mask, (atomic_long_t *)&map->word);
79 BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word));
83 int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
84 gfp_t flags, int node, bool round_robin,
87 unsigned int bits_per_word;
90 shift = sbitmap_calculate_shift(depth);
92 bits_per_word = 1U << shift;
93 if (bits_per_word > BITS_PER_LONG)
98 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
99 sb->round_robin = round_robin;
107 if (init_alloc_hint(sb, flags))
110 sb->alloc_hint = NULL;
113 sb->map = kvzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node);
115 free_percpu(sb->alloc_hint);
121 EXPORT_SYMBOL_GPL(sbitmap_init_node);
123 void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
125 unsigned int bits_per_word = 1U << sb->shift;
128 for (i = 0; i < sb->map_nr; i++)
129 sbitmap_deferred_clear(&sb->map[i]);
132 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
134 EXPORT_SYMBOL_GPL(sbitmap_resize);
136 static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
137 unsigned int hint, bool wrap)
141 /* don't wrap if starting from 0 */
145 nr = find_next_zero_bit(word, depth, hint);
146 if (unlikely(nr >= depth)) {
148 * We started with an offset, and we didn't reset the
149 * offset to 0 in a failure case, so start from 0 to
159 if (!test_and_set_bit_lock(nr, word))
163 if (hint >= depth - 1)
170 static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
171 unsigned int alloc_hint)
173 struct sbitmap_word *map = &sb->map[index];
177 nr = __sbitmap_get_word(&map->word, __map_depth(sb, index),
178 alloc_hint, !sb->round_robin);
181 if (!sbitmap_deferred_clear(map))
188 static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
190 unsigned int i, index;
193 index = SB_NR_TO_INDEX(sb, alloc_hint);
196 * Unless we're doing round robin tag allocation, just use the
197 * alloc_hint to find the right word index. No point in looping
198 * twice in find_next_zero_bit() for that case.
201 alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
205 for (i = 0; i < sb->map_nr; i++) {
206 nr = sbitmap_find_bit_in_index(sb, index, alloc_hint);
208 nr += index << sb->shift;
212 /* Jump to next index. */
214 if (++index >= sb->map_nr)
221 int sbitmap_get(struct sbitmap *sb)
224 unsigned int hint, depth;
226 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
229 depth = READ_ONCE(sb->depth);
230 hint = update_alloc_hint_before_get(sb, depth);
231 nr = __sbitmap_get(sb, hint);
232 update_alloc_hint_after_get(sb, depth, hint, nr);
236 EXPORT_SYMBOL_GPL(sbitmap_get);
238 static int __sbitmap_get_shallow(struct sbitmap *sb,
239 unsigned int alloc_hint,
240 unsigned long shallow_depth)
242 unsigned int i, index;
245 index = SB_NR_TO_INDEX(sb, alloc_hint);
247 for (i = 0; i < sb->map_nr; i++) {
249 nr = __sbitmap_get_word(&sb->map[index].word,
251 __map_depth(sb, index),
253 SB_NR_TO_BIT(sb, alloc_hint), true);
255 nr += index << sb->shift;
259 if (sbitmap_deferred_clear(&sb->map[index]))
262 /* Jump to next index. */
264 alloc_hint = index << sb->shift;
266 if (index >= sb->map_nr) {
275 int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth)
278 unsigned int hint, depth;
280 if (WARN_ON_ONCE(unlikely(!sb->alloc_hint)))
283 depth = READ_ONCE(sb->depth);
284 hint = update_alloc_hint_before_get(sb, depth);
285 nr = __sbitmap_get_shallow(sb, hint, shallow_depth);
286 update_alloc_hint_after_get(sb, depth, hint, nr);
290 EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
292 bool sbitmap_any_bit_set(const struct sbitmap *sb)
296 for (i = 0; i < sb->map_nr; i++) {
297 if (sb->map[i].word & ~sb->map[i].cleared)
302 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
304 static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
306 unsigned int i, weight = 0;
308 for (i = 0; i < sb->map_nr; i++) {
309 const struct sbitmap_word *word = &sb->map[i];
310 unsigned int word_depth = __map_depth(sb, i);
313 weight += bitmap_weight(&word->word, word_depth);
315 weight += bitmap_weight(&word->cleared, word_depth);
320 static unsigned int sbitmap_cleared(const struct sbitmap *sb)
322 return __sbitmap_weight(sb, false);
325 unsigned int sbitmap_weight(const struct sbitmap *sb)
327 return __sbitmap_weight(sb, true) - sbitmap_cleared(sb);
329 EXPORT_SYMBOL_GPL(sbitmap_weight);
331 void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
333 seq_printf(m, "depth=%u\n", sb->depth);
334 seq_printf(m, "busy=%u\n", sbitmap_weight(sb));
335 seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
336 seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
337 seq_printf(m, "map_nr=%u\n", sb->map_nr);
339 EXPORT_SYMBOL_GPL(sbitmap_show);
341 static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
343 if ((offset & 0xf) == 0) {
346 seq_printf(m, "%08x:", offset);
348 if ((offset & 0x1) == 0)
350 seq_printf(m, "%02x", byte);
353 void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
356 unsigned int byte_bits = 0;
357 unsigned int offset = 0;
360 for (i = 0; i < sb->map_nr; i++) {
361 unsigned long word = READ_ONCE(sb->map[i].word);
362 unsigned long cleared = READ_ONCE(sb->map[i].cleared);
363 unsigned int word_bits = __map_depth(sb, i);
367 while (word_bits > 0) {
368 unsigned int bits = min(8 - byte_bits, word_bits);
370 byte |= (word & (BIT(bits) - 1)) << byte_bits;
372 if (byte_bits == 8) {
373 emit_byte(m, offset, byte);
383 emit_byte(m, offset, byte);
389 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
391 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
394 unsigned int wake_batch;
395 unsigned int shallow_depth;
398 * For each batch, we wake up one queue. We need to make sure that our
399 * batch size is small enough that the full depth of the bitmap,
400 * potentially limited by a shallow depth, is enough to wake up all of
403 * Each full word of the bitmap has bits_per_word bits, and there might
404 * be a partial word. There are depth / bits_per_word full words and
405 * depth % bits_per_word bits left over. In bitwise arithmetic:
407 * bits_per_word = 1 << shift
408 * depth / bits_per_word = depth >> shift
409 * depth % bits_per_word = depth & ((1 << shift) - 1)
411 * Each word can be limited to sbq->min_shallow_depth bits.
413 shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
414 depth = ((depth >> sbq->sb.shift) * shallow_depth +
415 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
416 wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
422 int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
423 int shift, bool round_robin, gfp_t flags, int node)
428 ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node,
433 sbq->min_shallow_depth = UINT_MAX;
434 sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
435 atomic_set(&sbq->wake_index, 0);
436 atomic_set(&sbq->ws_active, 0);
437 atomic_set(&sbq->completion_cnt, 0);
438 atomic_set(&sbq->wakeup_cnt, 0);
440 sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
442 sbitmap_free(&sbq->sb);
446 for (i = 0; i < SBQ_WAIT_QUEUES; i++)
447 init_waitqueue_head(&sbq->ws[i].wait);
451 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
453 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
456 unsigned int wake_batch;
458 wake_batch = sbq_calc_wake_batch(sbq, depth);
459 if (sbq->wake_batch != wake_batch)
460 WRITE_ONCE(sbq->wake_batch, wake_batch);
463 void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
466 unsigned int wake_batch;
467 unsigned int min_batch;
468 unsigned int depth = (sbq->sb.depth + users - 1) / users;
470 min_batch = sbq->sb.depth >= (4 * SBQ_WAIT_QUEUES) ? 4 : 1;
472 wake_batch = clamp_val(depth / SBQ_WAIT_QUEUES,
473 min_batch, SBQ_WAKE_BATCH);
475 WRITE_ONCE(sbq->wake_batch, wake_batch);
477 EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch);
479 void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
481 sbitmap_queue_update_wake_batch(sbq, depth);
482 sbitmap_resize(&sbq->sb, depth);
484 EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
486 int __sbitmap_queue_get(struct sbitmap_queue *sbq)
488 return sbitmap_get(&sbq->sb);
490 EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
492 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
493 unsigned int *offset)
495 struct sbitmap *sb = &sbq->sb;
496 unsigned int hint, depth;
497 unsigned long index, nr;
500 if (unlikely(sb->round_robin))
503 depth = READ_ONCE(sb->depth);
504 hint = update_alloc_hint_before_get(sb, depth);
506 index = SB_NR_TO_INDEX(sb, hint);
508 for (i = 0; i < sb->map_nr; i++) {
509 struct sbitmap_word *map = &sb->map[index];
510 unsigned long get_mask;
511 unsigned int map_depth = __map_depth(sb, index);
513 sbitmap_deferred_clear(map);
514 if (map->word == (1UL << (map_depth - 1)) - 1)
517 nr = find_first_zero_bit(&map->word, map_depth);
518 if (nr + nr_tags <= map_depth) {
519 atomic_long_t *ptr = (atomic_long_t *) &map->word;
522 get_mask = ((1UL << nr_tags) - 1) << nr;
523 val = READ_ONCE(map->word);
525 if ((val & ~get_mask) != val)
527 } while (!atomic_long_try_cmpxchg(ptr, &val,
529 get_mask = (get_mask & ~val) >> nr;
531 *offset = nr + (index << sb->shift);
532 update_alloc_hint_after_get(sb, depth, hint,
533 *offset + nr_tags - 1);
538 /* Jump to next index. */
539 if (++index >= sb->map_nr)
546 int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
547 unsigned int shallow_depth)
549 WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
551 return sbitmap_get_shallow(&sbq->sb, shallow_depth);
553 EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow);
555 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
556 unsigned int min_shallow_depth)
558 sbq->min_shallow_depth = min_shallow_depth;
559 sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
561 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
563 static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
567 if (!atomic_read(&sbq->ws_active))
570 wake_index = atomic_read(&sbq->wake_index);
571 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
572 struct sbq_wait_state *ws = &sbq->ws[wake_index];
575 * Advance the index before checking the current queue.
576 * It improves fairness, by ensuring the queue doesn't
577 * need to be fully emptied before trying to wake up
580 wake_index = sbq_index_inc(wake_index);
583 * It is sufficient to wake up at least one waiter to
584 * guarantee forward progress.
586 if (waitqueue_active(&ws->wait) &&
587 wake_up_nr(&ws->wait, nr))
591 if (wake_index != atomic_read(&sbq->wake_index))
592 atomic_set(&sbq->wake_index, wake_index);
595 void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
597 unsigned int wake_batch = READ_ONCE(sbq->wake_batch);
598 unsigned int wakeups;
600 if (!atomic_read(&sbq->ws_active))
603 atomic_add(nr, &sbq->completion_cnt);
604 wakeups = atomic_read(&sbq->wakeup_cnt);
607 if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch)
609 } while (!atomic_try_cmpxchg(&sbq->wakeup_cnt,
610 &wakeups, wakeups + wake_batch));
612 __sbitmap_queue_wake_up(sbq, wake_batch);
614 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
616 static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag)
618 if (likely(!sb->round_robin && tag < sb->depth))
619 data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag);
622 void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset,
623 int *tags, int nr_tags)
625 struct sbitmap *sb = &sbq->sb;
626 unsigned long *addr = NULL;
627 unsigned long mask = 0;
630 smp_mb__before_atomic();
631 for (i = 0; i < nr_tags; i++) {
632 const int tag = tags[i] - offset;
633 unsigned long *this_addr;
635 /* since we're clearing a batch, skip the deferred map */
636 this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word;
639 } else if (addr != this_addr) {
640 atomic_long_andnot(mask, (atomic_long_t *) addr);
644 mask |= (1UL << SB_NR_TO_BIT(sb, tag));
648 atomic_long_andnot(mask, (atomic_long_t *) addr);
650 smp_mb__after_atomic();
651 sbitmap_queue_wake_up(sbq, nr_tags);
652 sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(),
653 tags[nr_tags - 1] - offset);
656 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
660 * Once the clear bit is set, the bit may be allocated out.
662 * Orders READ/WRITE on the associated instance(such as request
663 * of blk_mq) by this bit for avoiding race with re-allocation,
664 * and its pair is the memory barrier implied in __sbitmap_get_word.
666 * One invariant is that the clear bit has to be zero when the bit
669 smp_mb__before_atomic();
670 sbitmap_deferred_clear_bit(&sbq->sb, nr);
673 * Pairs with the memory barrier in set_current_state() to ensure the
674 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
675 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
676 * waiter. See the comment on waitqueue_active().
678 smp_mb__after_atomic();
679 sbitmap_queue_wake_up(sbq, 1);
680 sbitmap_update_cpu_hint(&sbq->sb, cpu, nr);
682 EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
684 void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
689 * Pairs with the memory barrier in set_current_state() like in
690 * sbitmap_queue_wake_up().
693 wake_index = atomic_read(&sbq->wake_index);
694 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
695 struct sbq_wait_state *ws = &sbq->ws[wake_index];
697 if (waitqueue_active(&ws->wait))
700 wake_index = sbq_index_inc(wake_index);
703 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
705 void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
710 sbitmap_show(&sbq->sb, m);
712 seq_puts(m, "alloc_hint={");
714 for_each_possible_cpu(i) {
718 seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i));
722 seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
723 seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
724 seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
726 seq_puts(m, "ws={\n");
727 for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
728 struct sbq_wait_state *ws = &sbq->ws[i];
729 seq_printf(m, "\t{.wait=%s},\n",
730 waitqueue_active(&ws->wait) ? "active" : "inactive");
734 seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin);
735 seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
737 EXPORT_SYMBOL_GPL(sbitmap_queue_show);
739 void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
740 struct sbq_wait_state *ws,
741 struct sbq_wait *sbq_wait)
743 if (!sbq_wait->sbq) {
745 atomic_inc(&sbq->ws_active);
746 add_wait_queue(&ws->wait, &sbq_wait->wait);
749 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
751 void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
753 list_del_init(&sbq_wait->wait.entry);
755 atomic_dec(&sbq_wait->sbq->ws_active);
756 sbq_wait->sbq = NULL;
759 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
761 void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
762 struct sbq_wait_state *ws,
763 struct sbq_wait *sbq_wait, int state)
765 if (!sbq_wait->sbq) {
766 atomic_inc(&sbq->ws_active);
769 prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
771 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
773 void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
774 struct sbq_wait *sbq_wait)
776 finish_wait(&ws->wait, &sbq_wait->wait);
778 atomic_dec(&sbq->ws_active);
779 sbq_wait->sbq = NULL;
782 EXPORT_SYMBOL_GPL(sbitmap_finish_wait);