Commit | Line | Data |
---|---|---|
0fc479b1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
88459642 OS |
2 | /* |
3 | * Copyright (C) 2016 Facebook | |
4 | * Copyright (C) 2013-2014 Jens Axboe | |
88459642 OS |
5 | */ |
6 | ||
af8601ad | 7 | #include <linux/sched.h> |
98d95416 | 8 | #include <linux/random.h> |
88459642 | 9 | #include <linux/sbitmap.h> |
24af1ccf | 10 | #include <linux/seq_file.h> |
88459642 | 11 | |
c548e62b | 12 | static int init_alloc_hint(struct sbitmap *sb, gfp_t flags) |
bf2c4282 | 13 | { |
c548e62b | 14 | unsigned depth = sb->depth; |
bf2c4282 | 15 | |
c548e62b ML |
16 | sb->alloc_hint = alloc_percpu_gfp(unsigned int, flags); |
17 | if (!sb->alloc_hint) | |
bf2c4282 ML |
18 | return -ENOMEM; |
19 | ||
c548e62b | 20 | if (depth && !sb->round_robin) { |
bf2c4282 ML |
21 | int i; |
22 | ||
23 | for_each_possible_cpu(i) | |
c548e62b | 24 | *per_cpu_ptr(sb->alloc_hint, i) = prandom_u32() % depth; |
bf2c4282 | 25 | } |
bf2c4282 ML |
26 | return 0; |
27 | } | |
28 | ||
c548e62b | 29 | static inline unsigned update_alloc_hint_before_get(struct sbitmap *sb, |
bf2c4282 ML |
30 | unsigned int depth) |
31 | { | |
32 | unsigned hint; | |
33 | ||
c548e62b | 34 | hint = this_cpu_read(*sb->alloc_hint); |
bf2c4282 ML |
35 | if (unlikely(hint >= depth)) { |
36 | hint = depth ? prandom_u32() % depth : 0; | |
c548e62b | 37 | this_cpu_write(*sb->alloc_hint, hint); |
bf2c4282 ML |
38 | } |
39 | ||
40 | return hint; | |
41 | } | |
42 | ||
c548e62b | 43 | static inline void update_alloc_hint_after_get(struct sbitmap *sb, |
bf2c4282 ML |
44 | unsigned int depth, |
45 | unsigned int hint, | |
46 | unsigned int nr) | |
47 | { | |
48 | if (nr == -1) { | |
49 | /* If the map is full, a hint won't do us much good. */ | |
c548e62b ML |
50 | this_cpu_write(*sb->alloc_hint, 0); |
51 | } else if (nr == hint || unlikely(sb->round_robin)) { | |
bf2c4282 ML |
52 | /* Only update the hint if we used it. */ |
53 | hint = nr + 1; | |
54 | if (hint >= depth - 1) | |
55 | hint = 0; | |
c548e62b | 56 | this_cpu_write(*sb->alloc_hint, hint); |
bf2c4282 ML |
57 | } |
58 | } | |
59 | ||
b2dbff1b JA |
60 | /* |
61 | * See if we have deferred clears that we can batch move | |
62 | */ | |
b78beea0 | 63 | static inline bool sbitmap_deferred_clear(struct sbitmap_word *map) |
b2dbff1b | 64 | { |
c3250c8d | 65 | unsigned long mask; |
b2dbff1b | 66 | |
661d4f55 PB |
67 | if (!READ_ONCE(map->cleared)) |
68 | return false; | |
b2dbff1b JA |
69 | |
70 | /* | |
71 | * First get a stable cleared mask, setting the old mask to 0. | |
72 | */ | |
b78beea0 | 73 | mask = xchg(&map->cleared, 0); |
b2dbff1b JA |
74 | |
75 | /* | |
76 | * Now clear the masked bits in our free word | |
77 | */ | |
c3250c8d PB |
78 | atomic_long_andnot(mask, (atomic_long_t *)&map->word); |
79 | BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word)); | |
661d4f55 | 80 | return true; |
b2dbff1b JA |
81 | } |
82 | ||
88459642 | 83 | int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, |
c548e62b ML |
84 | gfp_t flags, int node, bool round_robin, |
85 | bool alloc_hint) | |
88459642 OS |
86 | { |
87 | unsigned int bits_per_word; | |
88 | unsigned int i; | |
89 | ||
2d13b1ea ML |
90 | if (shift < 0) |
91 | shift = sbitmap_calculate_shift(depth); | |
92 | ||
88459642 OS |
93 | bits_per_word = 1U << shift; |
94 | if (bits_per_word > BITS_PER_LONG) | |
95 | return -EINVAL; | |
96 | ||
97 | sb->shift = shift; | |
98 | sb->depth = depth; | |
99 | sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); | |
efe1f3a1 | 100 | sb->round_robin = round_robin; |
88459642 OS |
101 | |
102 | if (depth == 0) { | |
103 | sb->map = NULL; | |
104 | return 0; | |
105 | } | |
106 | ||
c548e62b ML |
107 | if (alloc_hint) { |
108 | if (init_alloc_hint(sb, flags)) | |
109 | return -ENOMEM; | |
110 | } else { | |
111 | sb->alloc_hint = NULL; | |
112 | } | |
113 | ||
590b5b7d | 114 | sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node); |
c548e62b ML |
115 | if (!sb->map) { |
116 | free_percpu(sb->alloc_hint); | |
88459642 | 117 | return -ENOMEM; |
c548e62b | 118 | } |
88459642 OS |
119 | |
120 | for (i = 0; i < sb->map_nr; i++) { | |
121 | sb->map[i].depth = min(depth, bits_per_word); | |
122 | depth -= sb->map[i].depth; | |
123 | } | |
124 | return 0; | |
125 | } | |
126 | EXPORT_SYMBOL_GPL(sbitmap_init_node); | |
127 | ||
128 | void sbitmap_resize(struct sbitmap *sb, unsigned int depth) | |
129 | { | |
130 | unsigned int bits_per_word = 1U << sb->shift; | |
131 | unsigned int i; | |
132 | ||
b2dbff1b | 133 | for (i = 0; i < sb->map_nr; i++) |
b78beea0 | 134 | sbitmap_deferred_clear(&sb->map[i]); |
b2dbff1b | 135 | |
88459642 OS |
136 | sb->depth = depth; |
137 | sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); | |
138 | ||
139 | for (i = 0; i < sb->map_nr; i++) { | |
140 | sb->map[i].depth = min(depth, bits_per_word); | |
141 | depth -= sb->map[i].depth; | |
142 | } | |
143 | } | |
144 | EXPORT_SYMBOL_GPL(sbitmap_resize); | |
145 | ||
c05e6673 OS |
146 | static int __sbitmap_get_word(unsigned long *word, unsigned long depth, |
147 | unsigned int hint, bool wrap) | |
88459642 | 148 | { |
88459642 OS |
149 | int nr; |
150 | ||
0eff1f1a PB |
151 | /* don't wrap if starting from 0 */ |
152 | wrap = wrap && hint; | |
153 | ||
88459642 | 154 | while (1) { |
c05e6673 OS |
155 | nr = find_next_zero_bit(word, depth, hint); |
156 | if (unlikely(nr >= depth)) { | |
88459642 OS |
157 | /* |
158 | * We started with an offset, and we didn't reset the | |
159 | * offset to 0 in a failure case, so start from 0 to | |
160 | * exhaust the map. | |
161 | */ | |
0eff1f1a PB |
162 | if (hint && wrap) { |
163 | hint = 0; | |
88459642 OS |
164 | continue; |
165 | } | |
166 | return -1; | |
167 | } | |
168 | ||
4ace53f1 | 169 | if (!test_and_set_bit_lock(nr, word)) |
88459642 OS |
170 | break; |
171 | ||
172 | hint = nr + 1; | |
c05e6673 | 173 | if (hint >= depth - 1) |
88459642 OS |
174 | hint = 0; |
175 | } | |
176 | ||
177 | return nr; | |
178 | } | |
179 | ||
ea86ea2c | 180 | static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, |
efe1f3a1 | 181 | unsigned int alloc_hint) |
ea86ea2c | 182 | { |
b78beea0 | 183 | struct sbitmap_word *map = &sb->map[index]; |
ea86ea2c JA |
184 | int nr; |
185 | ||
186 | do { | |
b78beea0 | 187 | nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint, |
efe1f3a1 | 188 | !sb->round_robin); |
ea86ea2c JA |
189 | if (nr != -1) |
190 | break; | |
b78beea0 | 191 | if (!sbitmap_deferred_clear(map)) |
ea86ea2c JA |
192 | break; |
193 | } while (1); | |
194 | ||
195 | return nr; | |
196 | } | |
197 | ||
c548e62b | 198 | static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint) |
88459642 OS |
199 | { |
200 | unsigned int i, index; | |
201 | int nr = -1; | |
202 | ||
203 | index = SB_NR_TO_INDEX(sb, alloc_hint); | |
204 | ||
27fae429 JA |
205 | /* |
206 | * Unless we're doing round robin tag allocation, just use the | |
207 | * alloc_hint to find the right word index. No point in looping | |
208 | * twice in find_next_zero_bit() for that case. | |
209 | */ | |
efe1f3a1 | 210 | if (sb->round_robin) |
27fae429 JA |
211 | alloc_hint = SB_NR_TO_BIT(sb, alloc_hint); |
212 | else | |
213 | alloc_hint = 0; | |
214 | ||
88459642 | 215 | for (i = 0; i < sb->map_nr; i++) { |
efe1f3a1 | 216 | nr = sbitmap_find_bit_in_index(sb, index, alloc_hint); |
88459642 OS |
217 | if (nr != -1) { |
218 | nr += index << sb->shift; | |
219 | break; | |
220 | } | |
221 | ||
222 | /* Jump to next index. */ | |
27fae429 JA |
223 | alloc_hint = 0; |
224 | if (++index >= sb->map_nr) | |
88459642 | 225 | index = 0; |
88459642 OS |
226 | } |
227 | ||
228 | return nr; | |
229 | } | |
c548e62b ML |
230 | |
231 | int sbitmap_get(struct sbitmap *sb) | |
232 | { | |
233 | int nr; | |
234 | unsigned int hint, depth; | |
235 | ||
236 | if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) | |
237 | return -1; | |
238 | ||
239 | depth = READ_ONCE(sb->depth); | |
240 | hint = update_alloc_hint_before_get(sb, depth); | |
241 | nr = __sbitmap_get(sb, hint); | |
242 | update_alloc_hint_after_get(sb, depth, hint, nr); | |
243 | ||
244 | return nr; | |
245 | } | |
88459642 OS |
246 | EXPORT_SYMBOL_GPL(sbitmap_get); |
247 | ||
c548e62b ML |
248 | static int __sbitmap_get_shallow(struct sbitmap *sb, |
249 | unsigned int alloc_hint, | |
250 | unsigned long shallow_depth) | |
c05e6673 OS |
251 | { |
252 | unsigned int i, index; | |
253 | int nr = -1; | |
254 | ||
255 | index = SB_NR_TO_INDEX(sb, alloc_hint); | |
256 | ||
257 | for (i = 0; i < sb->map_nr; i++) { | |
b2dbff1b | 258 | again: |
c05e6673 OS |
259 | nr = __sbitmap_get_word(&sb->map[index].word, |
260 | min(sb->map[index].depth, shallow_depth), | |
261 | SB_NR_TO_BIT(sb, alloc_hint), true); | |
262 | if (nr != -1) { | |
263 | nr += index << sb->shift; | |
264 | break; | |
265 | } | |
266 | ||
b78beea0 | 267 | if (sbitmap_deferred_clear(&sb->map[index])) |
b2dbff1b JA |
268 | goto again; |
269 | ||
c05e6673 OS |
270 | /* Jump to next index. */ |
271 | index++; | |
272 | alloc_hint = index << sb->shift; | |
273 | ||
274 | if (index >= sb->map_nr) { | |
275 | index = 0; | |
276 | alloc_hint = 0; | |
277 | } | |
278 | } | |
279 | ||
280 | return nr; | |
281 | } | |
c548e62b ML |
282 | |
283 | int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth) | |
284 | { | |
285 | int nr; | |
286 | unsigned int hint, depth; | |
287 | ||
288 | if (WARN_ON_ONCE(unlikely(!sb->alloc_hint))) | |
289 | return -1; | |
290 | ||
291 | depth = READ_ONCE(sb->depth); | |
292 | hint = update_alloc_hint_before_get(sb, depth); | |
293 | nr = __sbitmap_get_shallow(sb, hint, shallow_depth); | |
294 | update_alloc_hint_after_get(sb, depth, hint, nr); | |
295 | ||
296 | return nr; | |
297 | } | |
c05e6673 OS |
298 | EXPORT_SYMBOL_GPL(sbitmap_get_shallow); |
299 | ||
88459642 OS |
300 | bool sbitmap_any_bit_set(const struct sbitmap *sb) |
301 | { | |
302 | unsigned int i; | |
303 | ||
304 | for (i = 0; i < sb->map_nr; i++) { | |
b2dbff1b | 305 | if (sb->map[i].word & ~sb->map[i].cleared) |
88459642 OS |
306 | return true; |
307 | } | |
308 | return false; | |
309 | } | |
310 | EXPORT_SYMBOL_GPL(sbitmap_any_bit_set); | |
311 | ||
ea86ea2c | 312 | static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) |
88459642 | 313 | { |
60658e0d | 314 | unsigned int i, weight = 0; |
88459642 OS |
315 | |
316 | for (i = 0; i < sb->map_nr; i++) { | |
317 | const struct sbitmap_word *word = &sb->map[i]; | |
318 | ||
ea86ea2c JA |
319 | if (set) |
320 | weight += bitmap_weight(&word->word, word->depth); | |
321 | else | |
322 | weight += bitmap_weight(&word->cleared, word->depth); | |
88459642 OS |
323 | } |
324 | return weight; | |
325 | } | |
ea86ea2c | 326 | |
cbb9950b | 327 | static unsigned int sbitmap_cleared(const struct sbitmap *sb) |
ea86ea2c | 328 | { |
cbb9950b | 329 | return __sbitmap_weight(sb, false); |
ea86ea2c JA |
330 | } |
331 | ||
cbb9950b | 332 | unsigned int sbitmap_weight(const struct sbitmap *sb) |
ea86ea2c | 333 | { |
cbb9950b | 334 | return __sbitmap_weight(sb, true) - sbitmap_cleared(sb); |
ea86ea2c | 335 | } |
cbb9950b | 336 | EXPORT_SYMBOL_GPL(sbitmap_weight); |
88459642 | 337 | |
24af1ccf OS |
338 | void sbitmap_show(struct sbitmap *sb, struct seq_file *m) |
339 | { | |
340 | seq_printf(m, "depth=%u\n", sb->depth); | |
cbb9950b | 341 | seq_printf(m, "busy=%u\n", sbitmap_weight(sb)); |
ea86ea2c | 342 | seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb)); |
24af1ccf OS |
343 | seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); |
344 | seq_printf(m, "map_nr=%u\n", sb->map_nr); | |
345 | } | |
346 | EXPORT_SYMBOL_GPL(sbitmap_show); | |
347 | ||
348 | static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte) | |
349 | { | |
350 | if ((offset & 0xf) == 0) { | |
351 | if (offset != 0) | |
352 | seq_putc(m, '\n'); | |
353 | seq_printf(m, "%08x:", offset); | |
354 | } | |
355 | if ((offset & 0x1) == 0) | |
356 | seq_putc(m, ' '); | |
357 | seq_printf(m, "%02x", byte); | |
358 | } | |
359 | ||
360 | void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) | |
361 | { | |
362 | u8 byte = 0; | |
363 | unsigned int byte_bits = 0; | |
364 | unsigned int offset = 0; | |
365 | int i; | |
366 | ||
367 | for (i = 0; i < sb->map_nr; i++) { | |
368 | unsigned long word = READ_ONCE(sb->map[i].word); | |
6bf0eb55 | 369 | unsigned long cleared = READ_ONCE(sb->map[i].cleared); |
24af1ccf OS |
370 | unsigned int word_bits = READ_ONCE(sb->map[i].depth); |
371 | ||
6bf0eb55 JG |
372 | word &= ~cleared; |
373 | ||
24af1ccf OS |
374 | while (word_bits > 0) { |
375 | unsigned int bits = min(8 - byte_bits, word_bits); | |
376 | ||
377 | byte |= (word & (BIT(bits) - 1)) << byte_bits; | |
378 | byte_bits += bits; | |
379 | if (byte_bits == 8) { | |
380 | emit_byte(m, offset, byte); | |
381 | byte = 0; | |
382 | byte_bits = 0; | |
383 | offset++; | |
384 | } | |
385 | word >>= bits; | |
386 | word_bits -= bits; | |
387 | } | |
388 | } | |
389 | if (byte_bits) { | |
390 | emit_byte(m, offset, byte); | |
391 | offset++; | |
392 | } | |
393 | if (offset) | |
394 | seq_putc(m, '\n'); | |
395 | } | |
396 | EXPORT_SYMBOL_GPL(sbitmap_bitmap_show); | |
397 | ||
a3275539 OS |
398 | static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq, |
399 | unsigned int depth) | |
88459642 OS |
400 | { |
401 | unsigned int wake_batch; | |
a3275539 | 402 | unsigned int shallow_depth; |
88459642 OS |
403 | |
404 | /* | |
405 | * For each batch, we wake up one queue. We need to make sure that our | |
a3275539 OS |
406 | * batch size is small enough that the full depth of the bitmap, |
407 | * potentially limited by a shallow depth, is enough to wake up all of | |
408 | * the queues. | |
409 | * | |
410 | * Each full word of the bitmap has bits_per_word bits, and there might | |
411 | * be a partial word. There are depth / bits_per_word full words and | |
412 | * depth % bits_per_word bits left over. In bitwise arithmetic: | |
413 | * | |
414 | * bits_per_word = 1 << shift | |
415 | * depth / bits_per_word = depth >> shift | |
416 | * depth % bits_per_word = depth & ((1 << shift) - 1) | |
417 | * | |
418 | * Each word can be limited to sbq->min_shallow_depth bits. | |
88459642 | 419 | */ |
a3275539 OS |
420 | shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); |
421 | depth = ((depth >> sbq->sb.shift) * shallow_depth + | |
422 | min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); | |
423 | wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1, | |
424 | SBQ_WAKE_BATCH); | |
88459642 OS |
425 | |
426 | return wake_batch; | |
427 | } | |
428 | ||
429 | int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, | |
f4a644db | 430 | int shift, bool round_robin, gfp_t flags, int node) |
88459642 OS |
431 | { |
432 | int ret; | |
433 | int i; | |
434 | ||
efe1f3a1 | 435 | ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node, |
c548e62b | 436 | round_robin, true); |
88459642 OS |
437 | if (ret) |
438 | return ret; | |
439 | ||
a3275539 OS |
440 | sbq->min_shallow_depth = UINT_MAX; |
441 | sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); | |
88459642 | 442 | atomic_set(&sbq->wake_index, 0); |
5d2ee712 | 443 | atomic_set(&sbq->ws_active, 0); |
88459642 | 444 | |
48e28166 | 445 | sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); |
88459642 OS |
446 | if (!sbq->ws) { |
447 | sbitmap_free(&sbq->sb); | |
448 | return -ENOMEM; | |
449 | } | |
450 | ||
451 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | |
452 | init_waitqueue_head(&sbq->ws[i].wait); | |
453 | atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); | |
454 | } | |
f4a644db | 455 | |
88459642 OS |
456 | return 0; |
457 | } | |
458 | EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); | |
459 | ||
a3275539 OS |
460 | static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, |
461 | unsigned int depth) | |
88459642 | 462 | { |
a3275539 | 463 | unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth); |
6c0ca7ae OS |
464 | int i; |
465 | ||
466 | if (sbq->wake_batch != wake_batch) { | |
467 | WRITE_ONCE(sbq->wake_batch, wake_batch); | |
468 | /* | |
e6fc4649 ML |
469 | * Pairs with the memory barrier in sbitmap_queue_wake_up() |
470 | * to ensure that the batch size is updated before the wait | |
471 | * counts. | |
6c0ca7ae | 472 | */ |
a0934fd2 | 473 | smp_mb(); |
6c0ca7ae OS |
474 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) |
475 | atomic_set(&sbq->ws[i].wait_cnt, 1); | |
476 | } | |
a3275539 OS |
477 | } |
478 | ||
479 | void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) | |
480 | { | |
481 | sbitmap_queue_update_wake_batch(sbq, depth); | |
88459642 OS |
482 | sbitmap_resize(&sbq->sb, depth); |
483 | } | |
484 | EXPORT_SYMBOL_GPL(sbitmap_queue_resize); | |
485 | ||
f4a644db | 486 | int __sbitmap_queue_get(struct sbitmap_queue *sbq) |
40aabb67 | 487 | { |
c548e62b | 488 | return sbitmap_get(&sbq->sb); |
40aabb67 OS |
489 | } |
490 | EXPORT_SYMBOL_GPL(__sbitmap_queue_get); | |
491 | ||
9672b0d4 JA |
492 | unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, |
493 | unsigned int *offset) | |
494 | { | |
495 | struct sbitmap *sb = &sbq->sb; | |
496 | unsigned int hint, depth; | |
497 | unsigned long index, nr; | |
498 | int i; | |
499 | ||
500 | if (unlikely(sb->round_robin)) | |
501 | return 0; | |
502 | ||
503 | depth = READ_ONCE(sb->depth); | |
504 | hint = update_alloc_hint_before_get(sb, depth); | |
505 | ||
506 | index = SB_NR_TO_INDEX(sb, hint); | |
507 | ||
508 | for (i = 0; i < sb->map_nr; i++) { | |
509 | struct sbitmap_word *map = &sb->map[index]; | |
510 | unsigned long get_mask; | |
511 | ||
512 | sbitmap_deferred_clear(map); | |
513 | if (map->word == (1UL << (map->depth - 1)) - 1) | |
514 | continue; | |
515 | ||
516 | nr = find_first_zero_bit(&map->word, map->depth); | |
517 | if (nr + nr_tags <= map->depth) { | |
518 | atomic_long_t *ptr = (atomic_long_t *) &map->word; | |
519 | int map_tags = min_t(int, nr_tags, map->depth); | |
520 | unsigned long val, ret; | |
521 | ||
522 | get_mask = ((1UL << map_tags) - 1) << nr; | |
523 | do { | |
524 | val = READ_ONCE(map->word); | |
525 | ret = atomic_long_cmpxchg(ptr, val, get_mask | val); | |
526 | } while (ret != val); | |
527 | get_mask = (get_mask & ~ret) >> nr; | |
528 | if (get_mask) { | |
529 | *offset = nr + (index << sb->shift); | |
530 | update_alloc_hint_after_get(sb, depth, hint, | |
531 | *offset + map_tags - 1); | |
532 | return get_mask; | |
533 | } | |
534 | } | |
535 | /* Jump to next index. */ | |
536 | if (++index >= sb->map_nr) | |
537 | index = 0; | |
538 | } | |
539 | ||
540 | return 0; | |
541 | } | |
542 | ||
c05e6673 OS |
543 | int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, |
544 | unsigned int shallow_depth) | |
545 | { | |
61445b56 OS |
546 | WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); |
547 | ||
c548e62b | 548 | return sbitmap_get_shallow(&sbq->sb, shallow_depth); |
c05e6673 OS |
549 | } |
550 | EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); | |
551 | ||
a3275539 OS |
552 | void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, |
553 | unsigned int min_shallow_depth) | |
554 | { | |
555 | sbq->min_shallow_depth = min_shallow_depth; | |
556 | sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); | |
557 | } | |
558 | EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth); | |
559 | ||
88459642 OS |
560 | static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) |
561 | { | |
562 | int i, wake_index; | |
563 | ||
5d2ee712 JA |
564 | if (!atomic_read(&sbq->ws_active)) |
565 | return NULL; | |
566 | ||
88459642 OS |
567 | wake_index = atomic_read(&sbq->wake_index); |
568 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | |
569 | struct sbq_wait_state *ws = &sbq->ws[wake_index]; | |
570 | ||
571 | if (waitqueue_active(&ws->wait)) { | |
41723288 PB |
572 | if (wake_index != atomic_read(&sbq->wake_index)) |
573 | atomic_set(&sbq->wake_index, wake_index); | |
88459642 OS |
574 | return ws; |
575 | } | |
576 | ||
577 | wake_index = sbq_index_inc(wake_index); | |
578 | } | |
579 | ||
580 | return NULL; | |
581 | } | |
582 | ||
c854ab57 | 583 | static bool __sbq_wake_up(struct sbitmap_queue *sbq) |
88459642 OS |
584 | { |
585 | struct sbq_wait_state *ws; | |
6c0ca7ae | 586 | unsigned int wake_batch; |
88459642 OS |
587 | int wait_cnt; |
588 | ||
88459642 OS |
589 | ws = sbq_wake_ptr(sbq); |
590 | if (!ws) | |
c854ab57 | 591 | return false; |
88459642 OS |
592 | |
593 | wait_cnt = atomic_dec_return(&ws->wait_cnt); | |
6c0ca7ae | 594 | if (wait_cnt <= 0) { |
c854ab57 JA |
595 | int ret; |
596 | ||
6c0ca7ae | 597 | wake_batch = READ_ONCE(sbq->wake_batch); |
c854ab57 | 598 | |
6c0ca7ae OS |
599 | /* |
600 | * Pairs with the memory barrier in sbitmap_queue_resize() to | |
601 | * ensure that we see the batch size update before the wait | |
602 | * count is reset. | |
603 | */ | |
604 | smp_mb__before_atomic(); | |
c854ab57 | 605 | |
6c0ca7ae | 606 | /* |
c854ab57 JA |
607 | * For concurrent callers of this, the one that failed the |
608 | * atomic_cmpxhcg() race should call this function again | |
609 | * to wakeup a new batch on a different 'ws'. | |
6c0ca7ae | 610 | */ |
c854ab57 JA |
611 | ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch); |
612 | if (ret == wait_cnt) { | |
613 | sbq_index_atomic_inc(&sbq->wake_index); | |
614 | wake_up_nr(&ws->wait, wake_batch); | |
615 | return false; | |
616 | } | |
617 | ||
618 | return true; | |
88459642 | 619 | } |
c854ab57 JA |
620 | |
621 | return false; | |
622 | } | |
623 | ||
e6fc4649 | 624 | void sbitmap_queue_wake_up(struct sbitmap_queue *sbq) |
c854ab57 JA |
625 | { |
626 | while (__sbq_wake_up(sbq)) | |
627 | ; | |
88459642 | 628 | } |
e6fc4649 | 629 | EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); |
88459642 | 630 | |
1aec5e4a JA |
631 | static inline void sbitmap_update_cpu_hint(struct sbitmap *sb, int cpu, int tag) |
632 | { | |
633 | if (likely(!sb->round_robin && tag < sb->depth)) | |
9f8b93a7 | 634 | data_race(*per_cpu_ptr(sb->alloc_hint, cpu) = tag); |
1aec5e4a JA |
635 | } |
636 | ||
637 | void sbitmap_queue_clear_batch(struct sbitmap_queue *sbq, int offset, | |
638 | int *tags, int nr_tags) | |
639 | { | |
640 | struct sbitmap *sb = &sbq->sb; | |
641 | unsigned long *addr = NULL; | |
642 | unsigned long mask = 0; | |
643 | int i; | |
644 | ||
645 | smp_mb__before_atomic(); | |
646 | for (i = 0; i < nr_tags; i++) { | |
647 | const int tag = tags[i] - offset; | |
648 | unsigned long *this_addr; | |
649 | ||
650 | /* since we're clearing a batch, skip the deferred map */ | |
651 | this_addr = &sb->map[SB_NR_TO_INDEX(sb, tag)].word; | |
652 | if (!addr) { | |
653 | addr = this_addr; | |
654 | } else if (addr != this_addr) { | |
655 | atomic_long_andnot(mask, (atomic_long_t *) addr); | |
656 | mask = 0; | |
657 | addr = this_addr; | |
658 | } | |
659 | mask |= (1UL << SB_NR_TO_BIT(sb, tag)); | |
660 | } | |
661 | ||
662 | if (mask) | |
663 | atomic_long_andnot(mask, (atomic_long_t *) addr); | |
664 | ||
665 | smp_mb__after_atomic(); | |
666 | sbitmap_queue_wake_up(sbq); | |
667 | sbitmap_update_cpu_hint(&sbq->sb, raw_smp_processor_id(), | |
668 | tags[nr_tags - 1] - offset); | |
669 | } | |
670 | ||
40aabb67 | 671 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, |
f4a644db | 672 | unsigned int cpu) |
88459642 | 673 | { |
e6d1fa58 ML |
674 | /* |
675 | * Once the clear bit is set, the bit may be allocated out. | |
676 | * | |
9dbbc3b9 | 677 | * Orders READ/WRITE on the associated instance(such as request |
e6d1fa58 ML |
678 | * of blk_mq) by this bit for avoiding race with re-allocation, |
679 | * and its pair is the memory barrier implied in __sbitmap_get_word. | |
680 | * | |
681 | * One invariant is that the clear bit has to be zero when the bit | |
682 | * is in use. | |
683 | */ | |
684 | smp_mb__before_atomic(); | |
ea86ea2c JA |
685 | sbitmap_deferred_clear_bit(&sbq->sb, nr); |
686 | ||
e6fc4649 ML |
687 | /* |
688 | * Pairs with the memory barrier in set_current_state() to ensure the | |
689 | * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker | |
690 | * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the | |
691 | * waiter. See the comment on waitqueue_active(). | |
692 | */ | |
693 | smp_mb__after_atomic(); | |
694 | sbitmap_queue_wake_up(sbq); | |
1aec5e4a | 695 | sbitmap_update_cpu_hint(&sbq->sb, cpu, nr); |
88459642 OS |
696 | } |
697 | EXPORT_SYMBOL_GPL(sbitmap_queue_clear); | |
698 | ||
699 | void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) | |
700 | { | |
701 | int i, wake_index; | |
702 | ||
703 | /* | |
f66227de | 704 | * Pairs with the memory barrier in set_current_state() like in |
e6fc4649 | 705 | * sbitmap_queue_wake_up(). |
88459642 OS |
706 | */ |
707 | smp_mb(); | |
708 | wake_index = atomic_read(&sbq->wake_index); | |
709 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | |
710 | struct sbq_wait_state *ws = &sbq->ws[wake_index]; | |
711 | ||
712 | if (waitqueue_active(&ws->wait)) | |
713 | wake_up(&ws->wait); | |
714 | ||
715 | wake_index = sbq_index_inc(wake_index); | |
716 | } | |
717 | } | |
718 | EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all); | |
24af1ccf OS |
719 | |
720 | void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) | |
721 | { | |
722 | bool first; | |
723 | int i; | |
724 | ||
725 | sbitmap_show(&sbq->sb, m); | |
726 | ||
727 | seq_puts(m, "alloc_hint={"); | |
728 | first = true; | |
729 | for_each_possible_cpu(i) { | |
730 | if (!first) | |
731 | seq_puts(m, ", "); | |
732 | first = false; | |
c548e62b | 733 | seq_printf(m, "%u", *per_cpu_ptr(sbq->sb.alloc_hint, i)); |
24af1ccf OS |
734 | } |
735 | seq_puts(m, "}\n"); | |
736 | ||
737 | seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); | |
738 | seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); | |
5d2ee712 | 739 | seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); |
24af1ccf OS |
740 | |
741 | seq_puts(m, "ws={\n"); | |
742 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | |
743 | struct sbq_wait_state *ws = &sbq->ws[i]; | |
744 | ||
745 | seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n", | |
746 | atomic_read(&ws->wait_cnt), | |
747 | waitqueue_active(&ws->wait) ? "active" : "inactive"); | |
748 | } | |
749 | seq_puts(m, "}\n"); | |
750 | ||
efe1f3a1 | 751 | seq_printf(m, "round_robin=%d\n", sbq->sb.round_robin); |
a3275539 | 752 | seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); |
24af1ccf OS |
753 | } |
754 | EXPORT_SYMBOL_GPL(sbitmap_queue_show); | |
5d2ee712 | 755 | |
9f6b7ef6 JA |
756 | void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, |
757 | struct sbq_wait_state *ws, | |
758 | struct sbq_wait *sbq_wait) | |
759 | { | |
760 | if (!sbq_wait->sbq) { | |
761 | sbq_wait->sbq = sbq; | |
762 | atomic_inc(&sbq->ws_active); | |
df034c93 | 763 | add_wait_queue(&ws->wait, &sbq_wait->wait); |
9f6b7ef6 | 764 | } |
9f6b7ef6 JA |
765 | } |
766 | EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); | |
767 | ||
768 | void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait) | |
769 | { | |
770 | list_del_init(&sbq_wait->wait.entry); | |
771 | if (sbq_wait->sbq) { | |
772 | atomic_dec(&sbq_wait->sbq->ws_active); | |
773 | sbq_wait->sbq = NULL; | |
774 | } | |
775 | } | |
776 | EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue); | |
777 | ||
5d2ee712 JA |
778 | void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, |
779 | struct sbq_wait_state *ws, | |
780 | struct sbq_wait *sbq_wait, int state) | |
781 | { | |
9f6b7ef6 | 782 | if (!sbq_wait->sbq) { |
5d2ee712 | 783 | atomic_inc(&sbq->ws_active); |
9f6b7ef6 | 784 | sbq_wait->sbq = sbq; |
5d2ee712 JA |
785 | } |
786 | prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); | |
787 | } | |
788 | EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait); | |
789 | ||
790 | void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, | |
791 | struct sbq_wait *sbq_wait) | |
792 | { | |
793 | finish_wait(&ws->wait, &sbq_wait->wait); | |
9f6b7ef6 | 794 | if (sbq_wait->sbq) { |
5d2ee712 | 795 | atomic_dec(&sbq->ws_active); |
9f6b7ef6 | 796 | sbq_wait->sbq = NULL; |
5d2ee712 JA |
797 | } |
798 | } | |
799 | EXPORT_SYMBOL_GPL(sbitmap_finish_wait); |