Commit | Line | Data |
---|---|---|
0fc479b1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
88459642 OS |
2 | /* |
3 | * Copyright (C) 2016 Facebook | |
4 | * Copyright (C) 2013-2014 Jens Axboe | |
88459642 OS |
5 | */ |
6 | ||
af8601ad | 7 | #include <linux/sched.h> |
98d95416 | 8 | #include <linux/random.h> |
88459642 | 9 | #include <linux/sbitmap.h> |
24af1ccf | 10 | #include <linux/seq_file.h> |
88459642 | 11 | |
b2dbff1b JA |
12 | /* |
13 | * See if we have deferred clears that we can batch move | |
14 | */ | |
b78beea0 | 15 | static inline bool sbitmap_deferred_clear(struct sbitmap_word *map) |
b2dbff1b | 16 | { |
c3250c8d | 17 | unsigned long mask; |
b2dbff1b | 18 | |
661d4f55 PB |
19 | if (!READ_ONCE(map->cleared)) |
20 | return false; | |
b2dbff1b JA |
21 | |
22 | /* | |
23 | * First get a stable cleared mask, setting the old mask to 0. | |
24 | */ | |
b78beea0 | 25 | mask = xchg(&map->cleared, 0); |
b2dbff1b JA |
26 | |
27 | /* | |
28 | * Now clear the masked bits in our free word | |
29 | */ | |
c3250c8d PB |
30 | atomic_long_andnot(mask, (atomic_long_t *)&map->word); |
31 | BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(map->word)); | |
661d4f55 | 32 | return true; |
b2dbff1b JA |
33 | } |
34 | ||
88459642 OS |
35 | int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, |
36 | gfp_t flags, int node) | |
37 | { | |
38 | unsigned int bits_per_word; | |
39 | unsigned int i; | |
40 | ||
41 | if (shift < 0) { | |
42 | shift = ilog2(BITS_PER_LONG); | |
43 | /* | |
44 | * If the bitmap is small, shrink the number of bits per word so | |
45 | * we spread over a few cachelines, at least. If less than 4 | |
46 | * bits, just forget about it, it's not going to work optimally | |
47 | * anyway. | |
48 | */ | |
49 | if (depth >= 4) { | |
50 | while ((4U << shift) > depth) | |
51 | shift--; | |
52 | } | |
53 | } | |
54 | bits_per_word = 1U << shift; | |
55 | if (bits_per_word > BITS_PER_LONG) | |
56 | return -EINVAL; | |
57 | ||
58 | sb->shift = shift; | |
59 | sb->depth = depth; | |
60 | sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); | |
61 | ||
62 | if (depth == 0) { | |
63 | sb->map = NULL; | |
64 | return 0; | |
65 | } | |
66 | ||
590b5b7d | 67 | sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node); |
88459642 OS |
68 | if (!sb->map) |
69 | return -ENOMEM; | |
70 | ||
71 | for (i = 0; i < sb->map_nr; i++) { | |
72 | sb->map[i].depth = min(depth, bits_per_word); | |
73 | depth -= sb->map[i].depth; | |
74 | } | |
75 | return 0; | |
76 | } | |
77 | EXPORT_SYMBOL_GPL(sbitmap_init_node); | |
78 | ||
79 | void sbitmap_resize(struct sbitmap *sb, unsigned int depth) | |
80 | { | |
81 | unsigned int bits_per_word = 1U << sb->shift; | |
82 | unsigned int i; | |
83 | ||
b2dbff1b | 84 | for (i = 0; i < sb->map_nr; i++) |
b78beea0 | 85 | sbitmap_deferred_clear(&sb->map[i]); |
b2dbff1b | 86 | |
88459642 OS |
87 | sb->depth = depth; |
88 | sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); | |
89 | ||
90 | for (i = 0; i < sb->map_nr; i++) { | |
91 | sb->map[i].depth = min(depth, bits_per_word); | |
92 | depth -= sb->map[i].depth; | |
93 | } | |
94 | } | |
95 | EXPORT_SYMBOL_GPL(sbitmap_resize); | |
96 | ||
c05e6673 OS |
97 | static int __sbitmap_get_word(unsigned long *word, unsigned long depth, |
98 | unsigned int hint, bool wrap) | |
88459642 | 99 | { |
88459642 OS |
100 | int nr; |
101 | ||
0eff1f1a PB |
102 | /* don't wrap if starting from 0 */ |
103 | wrap = wrap && hint; | |
104 | ||
88459642 | 105 | while (1) { |
c05e6673 OS |
106 | nr = find_next_zero_bit(word, depth, hint); |
107 | if (unlikely(nr >= depth)) { | |
88459642 OS |
108 | /* |
109 | * We started with an offset, and we didn't reset the | |
110 | * offset to 0 in a failure case, so start from 0 to | |
111 | * exhaust the map. | |
112 | */ | |
0eff1f1a PB |
113 | if (hint && wrap) { |
114 | hint = 0; | |
88459642 OS |
115 | continue; |
116 | } | |
117 | return -1; | |
118 | } | |
119 | ||
4ace53f1 | 120 | if (!test_and_set_bit_lock(nr, word)) |
88459642 OS |
121 | break; |
122 | ||
123 | hint = nr + 1; | |
c05e6673 | 124 | if (hint >= depth - 1) |
88459642 OS |
125 | hint = 0; |
126 | } | |
127 | ||
128 | return nr; | |
129 | } | |
130 | ||
ea86ea2c JA |
131 | static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, |
132 | unsigned int alloc_hint, bool round_robin) | |
133 | { | |
b78beea0 | 134 | struct sbitmap_word *map = &sb->map[index]; |
ea86ea2c JA |
135 | int nr; |
136 | ||
137 | do { | |
b78beea0 | 138 | nr = __sbitmap_get_word(&map->word, map->depth, alloc_hint, |
ea86ea2c JA |
139 | !round_robin); |
140 | if (nr != -1) | |
141 | break; | |
b78beea0 | 142 | if (!sbitmap_deferred_clear(map)) |
ea86ea2c JA |
143 | break; |
144 | } while (1); | |
145 | ||
146 | return nr; | |
147 | } | |
148 | ||
88459642 OS |
149 | int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) |
150 | { | |
151 | unsigned int i, index; | |
152 | int nr = -1; | |
153 | ||
154 | index = SB_NR_TO_INDEX(sb, alloc_hint); | |
155 | ||
27fae429 JA |
156 | /* |
157 | * Unless we're doing round robin tag allocation, just use the | |
158 | * alloc_hint to find the right word index. No point in looping | |
159 | * twice in find_next_zero_bit() for that case. | |
160 | */ | |
161 | if (round_robin) | |
162 | alloc_hint = SB_NR_TO_BIT(sb, alloc_hint); | |
163 | else | |
164 | alloc_hint = 0; | |
165 | ||
88459642 | 166 | for (i = 0; i < sb->map_nr; i++) { |
ea86ea2c JA |
167 | nr = sbitmap_find_bit_in_index(sb, index, alloc_hint, |
168 | round_robin); | |
88459642 OS |
169 | if (nr != -1) { |
170 | nr += index << sb->shift; | |
171 | break; | |
172 | } | |
173 | ||
174 | /* Jump to next index. */ | |
27fae429 JA |
175 | alloc_hint = 0; |
176 | if (++index >= sb->map_nr) | |
88459642 | 177 | index = 0; |
88459642 OS |
178 | } |
179 | ||
180 | return nr; | |
181 | } | |
182 | EXPORT_SYMBOL_GPL(sbitmap_get); | |
183 | ||
c05e6673 OS |
184 | int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint, |
185 | unsigned long shallow_depth) | |
186 | { | |
187 | unsigned int i, index; | |
188 | int nr = -1; | |
189 | ||
190 | index = SB_NR_TO_INDEX(sb, alloc_hint); | |
191 | ||
192 | for (i = 0; i < sb->map_nr; i++) { | |
b2dbff1b | 193 | again: |
c05e6673 OS |
194 | nr = __sbitmap_get_word(&sb->map[index].word, |
195 | min(sb->map[index].depth, shallow_depth), | |
196 | SB_NR_TO_BIT(sb, alloc_hint), true); | |
197 | if (nr != -1) { | |
198 | nr += index << sb->shift; | |
199 | break; | |
200 | } | |
201 | ||
b78beea0 | 202 | if (sbitmap_deferred_clear(&sb->map[index])) |
b2dbff1b JA |
203 | goto again; |
204 | ||
c05e6673 OS |
205 | /* Jump to next index. */ |
206 | index++; | |
207 | alloc_hint = index << sb->shift; | |
208 | ||
209 | if (index >= sb->map_nr) { | |
210 | index = 0; | |
211 | alloc_hint = 0; | |
212 | } | |
213 | } | |
214 | ||
215 | return nr; | |
216 | } | |
217 | EXPORT_SYMBOL_GPL(sbitmap_get_shallow); | |
218 | ||
88459642 OS |
219 | bool sbitmap_any_bit_set(const struct sbitmap *sb) |
220 | { | |
221 | unsigned int i; | |
222 | ||
223 | for (i = 0; i < sb->map_nr; i++) { | |
b2dbff1b | 224 | if (sb->map[i].word & ~sb->map[i].cleared) |
88459642 OS |
225 | return true; |
226 | } | |
227 | return false; | |
228 | } | |
229 | EXPORT_SYMBOL_GPL(sbitmap_any_bit_set); | |
230 | ||
ea86ea2c | 231 | static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set) |
88459642 | 232 | { |
60658e0d | 233 | unsigned int i, weight = 0; |
88459642 OS |
234 | |
235 | for (i = 0; i < sb->map_nr; i++) { | |
236 | const struct sbitmap_word *word = &sb->map[i]; | |
237 | ||
ea86ea2c JA |
238 | if (set) |
239 | weight += bitmap_weight(&word->word, word->depth); | |
240 | else | |
241 | weight += bitmap_weight(&word->cleared, word->depth); | |
88459642 OS |
242 | } |
243 | return weight; | |
244 | } | |
ea86ea2c JA |
245 | |
246 | static unsigned int sbitmap_weight(const struct sbitmap *sb) | |
247 | { | |
248 | return __sbitmap_weight(sb, true); | |
249 | } | |
250 | ||
251 | static unsigned int sbitmap_cleared(const struct sbitmap *sb) | |
252 | { | |
253 | return __sbitmap_weight(sb, false); | |
254 | } | |
88459642 | 255 | |
24af1ccf OS |
256 | void sbitmap_show(struct sbitmap *sb, struct seq_file *m) |
257 | { | |
258 | seq_printf(m, "depth=%u\n", sb->depth); | |
ea86ea2c JA |
259 | seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb)); |
260 | seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb)); | |
24af1ccf OS |
261 | seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift); |
262 | seq_printf(m, "map_nr=%u\n", sb->map_nr); | |
263 | } | |
264 | EXPORT_SYMBOL_GPL(sbitmap_show); | |
265 | ||
266 | static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte) | |
267 | { | |
268 | if ((offset & 0xf) == 0) { | |
269 | if (offset != 0) | |
270 | seq_putc(m, '\n'); | |
271 | seq_printf(m, "%08x:", offset); | |
272 | } | |
273 | if ((offset & 0x1) == 0) | |
274 | seq_putc(m, ' '); | |
275 | seq_printf(m, "%02x", byte); | |
276 | } | |
277 | ||
278 | void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m) | |
279 | { | |
280 | u8 byte = 0; | |
281 | unsigned int byte_bits = 0; | |
282 | unsigned int offset = 0; | |
283 | int i; | |
284 | ||
285 | for (i = 0; i < sb->map_nr; i++) { | |
286 | unsigned long word = READ_ONCE(sb->map[i].word); | |
6bf0eb55 | 287 | unsigned long cleared = READ_ONCE(sb->map[i].cleared); |
24af1ccf OS |
288 | unsigned int word_bits = READ_ONCE(sb->map[i].depth); |
289 | ||
6bf0eb55 JG |
290 | word &= ~cleared; |
291 | ||
24af1ccf OS |
292 | while (word_bits > 0) { |
293 | unsigned int bits = min(8 - byte_bits, word_bits); | |
294 | ||
295 | byte |= (word & (BIT(bits) - 1)) << byte_bits; | |
296 | byte_bits += bits; | |
297 | if (byte_bits == 8) { | |
298 | emit_byte(m, offset, byte); | |
299 | byte = 0; | |
300 | byte_bits = 0; | |
301 | offset++; | |
302 | } | |
303 | word >>= bits; | |
304 | word_bits -= bits; | |
305 | } | |
306 | } | |
307 | if (byte_bits) { | |
308 | emit_byte(m, offset, byte); | |
309 | offset++; | |
310 | } | |
311 | if (offset) | |
312 | seq_putc(m, '\n'); | |
313 | } | |
314 | EXPORT_SYMBOL_GPL(sbitmap_bitmap_show); | |
315 | ||
a3275539 OS |
316 | static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq, |
317 | unsigned int depth) | |
88459642 OS |
318 | { |
319 | unsigned int wake_batch; | |
a3275539 | 320 | unsigned int shallow_depth; |
88459642 OS |
321 | |
322 | /* | |
323 | * For each batch, we wake up one queue. We need to make sure that our | |
a3275539 OS |
324 | * batch size is small enough that the full depth of the bitmap, |
325 | * potentially limited by a shallow depth, is enough to wake up all of | |
326 | * the queues. | |
327 | * | |
328 | * Each full word of the bitmap has bits_per_word bits, and there might | |
329 | * be a partial word. There are depth / bits_per_word full words and | |
330 | * depth % bits_per_word bits left over. In bitwise arithmetic: | |
331 | * | |
332 | * bits_per_word = 1 << shift | |
333 | * depth / bits_per_word = depth >> shift | |
334 | * depth % bits_per_word = depth & ((1 << shift) - 1) | |
335 | * | |
336 | * Each word can be limited to sbq->min_shallow_depth bits. | |
88459642 | 337 | */ |
a3275539 OS |
338 | shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth); |
339 | depth = ((depth >> sbq->sb.shift) * shallow_depth + | |
340 | min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth)); | |
341 | wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1, | |
342 | SBQ_WAKE_BATCH); | |
88459642 OS |
343 | |
344 | return wake_batch; | |
345 | } | |
346 | ||
347 | int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, | |
f4a644db | 348 | int shift, bool round_robin, gfp_t flags, int node) |
88459642 OS |
349 | { |
350 | int ret; | |
351 | int i; | |
352 | ||
353 | ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node); | |
354 | if (ret) | |
355 | return ret; | |
356 | ||
40aabb67 OS |
357 | sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags); |
358 | if (!sbq->alloc_hint) { | |
359 | sbitmap_free(&sbq->sb); | |
360 | return -ENOMEM; | |
361 | } | |
362 | ||
98d95416 OS |
363 | if (depth && !round_robin) { |
364 | for_each_possible_cpu(i) | |
365 | *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth; | |
366 | } | |
367 | ||
a3275539 OS |
368 | sbq->min_shallow_depth = UINT_MAX; |
369 | sbq->wake_batch = sbq_calc_wake_batch(sbq, depth); | |
88459642 | 370 | atomic_set(&sbq->wake_index, 0); |
5d2ee712 | 371 | atomic_set(&sbq->ws_active, 0); |
88459642 | 372 | |
48e28166 | 373 | sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); |
88459642 | 374 | if (!sbq->ws) { |
40aabb67 | 375 | free_percpu(sbq->alloc_hint); |
88459642 OS |
376 | sbitmap_free(&sbq->sb); |
377 | return -ENOMEM; | |
378 | } | |
379 | ||
380 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | |
381 | init_waitqueue_head(&sbq->ws[i].wait); | |
382 | atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); | |
383 | } | |
f4a644db OS |
384 | |
385 | sbq->round_robin = round_robin; | |
88459642 OS |
386 | return 0; |
387 | } | |
388 | EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); | |
389 | ||
a3275539 OS |
390 | static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq, |
391 | unsigned int depth) | |
88459642 | 392 | { |
a3275539 | 393 | unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth); |
6c0ca7ae OS |
394 | int i; |
395 | ||
396 | if (sbq->wake_batch != wake_batch) { | |
397 | WRITE_ONCE(sbq->wake_batch, wake_batch); | |
398 | /* | |
e6fc4649 ML |
399 | * Pairs with the memory barrier in sbitmap_queue_wake_up() |
400 | * to ensure that the batch size is updated before the wait | |
401 | * counts. | |
6c0ca7ae | 402 | */ |
a0934fd2 | 403 | smp_mb(); |
6c0ca7ae OS |
404 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) |
405 | atomic_set(&sbq->ws[i].wait_cnt, 1); | |
406 | } | |
a3275539 OS |
407 | } |
408 | ||
409 | void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) | |
410 | { | |
411 | sbitmap_queue_update_wake_batch(sbq, depth); | |
88459642 OS |
412 | sbitmap_resize(&sbq->sb, depth); |
413 | } | |
414 | EXPORT_SYMBOL_GPL(sbitmap_queue_resize); | |
415 | ||
f4a644db | 416 | int __sbitmap_queue_get(struct sbitmap_queue *sbq) |
40aabb67 | 417 | { |
05fd095d | 418 | unsigned int hint, depth; |
40aabb67 OS |
419 | int nr; |
420 | ||
421 | hint = this_cpu_read(*sbq->alloc_hint); | |
05fd095d OS |
422 | depth = READ_ONCE(sbq->sb.depth); |
423 | if (unlikely(hint >= depth)) { | |
424 | hint = depth ? prandom_u32() % depth : 0; | |
425 | this_cpu_write(*sbq->alloc_hint, hint); | |
426 | } | |
f4a644db | 427 | nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin); |
40aabb67 OS |
428 | |
429 | if (nr == -1) { | |
430 | /* If the map is full, a hint won't do us much good. */ | |
431 | this_cpu_write(*sbq->alloc_hint, 0); | |
f4a644db | 432 | } else if (nr == hint || unlikely(sbq->round_robin)) { |
40aabb67 OS |
433 | /* Only update the hint if we used it. */ |
434 | hint = nr + 1; | |
05fd095d | 435 | if (hint >= depth - 1) |
40aabb67 OS |
436 | hint = 0; |
437 | this_cpu_write(*sbq->alloc_hint, hint); | |
438 | } | |
439 | ||
440 | return nr; | |
441 | } | |
442 | EXPORT_SYMBOL_GPL(__sbitmap_queue_get); | |
443 | ||
c05e6673 OS |
444 | int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq, |
445 | unsigned int shallow_depth) | |
446 | { | |
447 | unsigned int hint, depth; | |
448 | int nr; | |
449 | ||
61445b56 OS |
450 | WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth); |
451 | ||
c05e6673 OS |
452 | hint = this_cpu_read(*sbq->alloc_hint); |
453 | depth = READ_ONCE(sbq->sb.depth); | |
454 | if (unlikely(hint >= depth)) { | |
455 | hint = depth ? prandom_u32() % depth : 0; | |
456 | this_cpu_write(*sbq->alloc_hint, hint); | |
457 | } | |
458 | nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth); | |
459 | ||
460 | if (nr == -1) { | |
461 | /* If the map is full, a hint won't do us much good. */ | |
462 | this_cpu_write(*sbq->alloc_hint, 0); | |
463 | } else if (nr == hint || unlikely(sbq->round_robin)) { | |
464 | /* Only update the hint if we used it. */ | |
465 | hint = nr + 1; | |
466 | if (hint >= depth - 1) | |
467 | hint = 0; | |
468 | this_cpu_write(*sbq->alloc_hint, hint); | |
469 | } | |
470 | ||
471 | return nr; | |
472 | } | |
473 | EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow); | |
474 | ||
a3275539 OS |
475 | void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq, |
476 | unsigned int min_shallow_depth) | |
477 | { | |
478 | sbq->min_shallow_depth = min_shallow_depth; | |
479 | sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth); | |
480 | } | |
481 | EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth); | |
482 | ||
88459642 OS |
483 | static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) |
484 | { | |
485 | int i, wake_index; | |
486 | ||
5d2ee712 JA |
487 | if (!atomic_read(&sbq->ws_active)) |
488 | return NULL; | |
489 | ||
88459642 OS |
490 | wake_index = atomic_read(&sbq->wake_index); |
491 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | |
492 | struct sbq_wait_state *ws = &sbq->ws[wake_index]; | |
493 | ||
494 | if (waitqueue_active(&ws->wait)) { | |
41723288 PB |
495 | if (wake_index != atomic_read(&sbq->wake_index)) |
496 | atomic_set(&sbq->wake_index, wake_index); | |
88459642 OS |
497 | return ws; |
498 | } | |
499 | ||
500 | wake_index = sbq_index_inc(wake_index); | |
501 | } | |
502 | ||
503 | return NULL; | |
504 | } | |
505 | ||
c854ab57 | 506 | static bool __sbq_wake_up(struct sbitmap_queue *sbq) |
88459642 OS |
507 | { |
508 | struct sbq_wait_state *ws; | |
6c0ca7ae | 509 | unsigned int wake_batch; |
88459642 OS |
510 | int wait_cnt; |
511 | ||
88459642 OS |
512 | ws = sbq_wake_ptr(sbq); |
513 | if (!ws) | |
c854ab57 | 514 | return false; |
88459642 OS |
515 | |
516 | wait_cnt = atomic_dec_return(&ws->wait_cnt); | |
6c0ca7ae | 517 | if (wait_cnt <= 0) { |
c854ab57 JA |
518 | int ret; |
519 | ||
6c0ca7ae | 520 | wake_batch = READ_ONCE(sbq->wake_batch); |
c854ab57 | 521 | |
6c0ca7ae OS |
522 | /* |
523 | * Pairs with the memory barrier in sbitmap_queue_resize() to | |
524 | * ensure that we see the batch size update before the wait | |
525 | * count is reset. | |
526 | */ | |
527 | smp_mb__before_atomic(); | |
c854ab57 | 528 | |
6c0ca7ae | 529 | /* |
c854ab57 JA |
530 | * For concurrent callers of this, the one that failed the |
531 | * atomic_cmpxhcg() race should call this function again | |
532 | * to wakeup a new batch on a different 'ws'. | |
6c0ca7ae | 533 | */ |
c854ab57 JA |
534 | ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch); |
535 | if (ret == wait_cnt) { | |
536 | sbq_index_atomic_inc(&sbq->wake_index); | |
537 | wake_up_nr(&ws->wait, wake_batch); | |
538 | return false; | |
539 | } | |
540 | ||
541 | return true; | |
88459642 | 542 | } |
c854ab57 JA |
543 | |
544 | return false; | |
545 | } | |
546 | ||
e6fc4649 | 547 | void sbitmap_queue_wake_up(struct sbitmap_queue *sbq) |
c854ab57 JA |
548 | { |
549 | while (__sbq_wake_up(sbq)) | |
550 | ; | |
88459642 | 551 | } |
e6fc4649 | 552 | EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up); |
88459642 | 553 | |
40aabb67 | 554 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, |
f4a644db | 555 | unsigned int cpu) |
88459642 | 556 | { |
e6d1fa58 ML |
557 | /* |
558 | * Once the clear bit is set, the bit may be allocated out. | |
559 | * | |
560 | * Orders READ/WRITE on the asssociated instance(such as request | |
561 | * of blk_mq) by this bit for avoiding race with re-allocation, | |
562 | * and its pair is the memory barrier implied in __sbitmap_get_word. | |
563 | * | |
564 | * One invariant is that the clear bit has to be zero when the bit | |
565 | * is in use. | |
566 | */ | |
567 | smp_mb__before_atomic(); | |
ea86ea2c JA |
568 | sbitmap_deferred_clear_bit(&sbq->sb, nr); |
569 | ||
e6fc4649 ML |
570 | /* |
571 | * Pairs with the memory barrier in set_current_state() to ensure the | |
572 | * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker | |
573 | * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the | |
574 | * waiter. See the comment on waitqueue_active(). | |
575 | */ | |
576 | smp_mb__after_atomic(); | |
577 | sbitmap_queue_wake_up(sbq); | |
578 | ||
5c64a8df | 579 | if (likely(!sbq->round_robin && nr < sbq->sb.depth)) |
40aabb67 | 580 | *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; |
88459642 OS |
581 | } |
582 | EXPORT_SYMBOL_GPL(sbitmap_queue_clear); | |
583 | ||
584 | void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) | |
585 | { | |
586 | int i, wake_index; | |
587 | ||
588 | /* | |
f66227de | 589 | * Pairs with the memory barrier in set_current_state() like in |
e6fc4649 | 590 | * sbitmap_queue_wake_up(). |
88459642 OS |
591 | */ |
592 | smp_mb(); | |
593 | wake_index = atomic_read(&sbq->wake_index); | |
594 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | |
595 | struct sbq_wait_state *ws = &sbq->ws[wake_index]; | |
596 | ||
597 | if (waitqueue_active(&ws->wait)) | |
598 | wake_up(&ws->wait); | |
599 | ||
600 | wake_index = sbq_index_inc(wake_index); | |
601 | } | |
602 | } | |
603 | EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all); | |
24af1ccf OS |
604 | |
605 | void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m) | |
606 | { | |
607 | bool first; | |
608 | int i; | |
609 | ||
610 | sbitmap_show(&sbq->sb, m); | |
611 | ||
612 | seq_puts(m, "alloc_hint={"); | |
613 | first = true; | |
614 | for_each_possible_cpu(i) { | |
615 | if (!first) | |
616 | seq_puts(m, ", "); | |
617 | first = false; | |
618 | seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i)); | |
619 | } | |
620 | seq_puts(m, "}\n"); | |
621 | ||
622 | seq_printf(m, "wake_batch=%u\n", sbq->wake_batch); | |
623 | seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index)); | |
5d2ee712 | 624 | seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active)); |
24af1ccf OS |
625 | |
626 | seq_puts(m, "ws={\n"); | |
627 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | |
628 | struct sbq_wait_state *ws = &sbq->ws[i]; | |
629 | ||
630 | seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n", | |
631 | atomic_read(&ws->wait_cnt), | |
632 | waitqueue_active(&ws->wait) ? "active" : "inactive"); | |
633 | } | |
634 | seq_puts(m, "}\n"); | |
635 | ||
636 | seq_printf(m, "round_robin=%d\n", sbq->round_robin); | |
a3275539 | 637 | seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth); |
24af1ccf OS |
638 | } |
639 | EXPORT_SYMBOL_GPL(sbitmap_queue_show); | |
5d2ee712 | 640 | |
9f6b7ef6 JA |
641 | void sbitmap_add_wait_queue(struct sbitmap_queue *sbq, |
642 | struct sbq_wait_state *ws, | |
643 | struct sbq_wait *sbq_wait) | |
644 | { | |
645 | if (!sbq_wait->sbq) { | |
646 | sbq_wait->sbq = sbq; | |
647 | atomic_inc(&sbq->ws_active); | |
df034c93 | 648 | add_wait_queue(&ws->wait, &sbq_wait->wait); |
9f6b7ef6 | 649 | } |
9f6b7ef6 JA |
650 | } |
651 | EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue); | |
652 | ||
653 | void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait) | |
654 | { | |
655 | list_del_init(&sbq_wait->wait.entry); | |
656 | if (sbq_wait->sbq) { | |
657 | atomic_dec(&sbq_wait->sbq->ws_active); | |
658 | sbq_wait->sbq = NULL; | |
659 | } | |
660 | } | |
661 | EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue); | |
662 | ||
5d2ee712 JA |
663 | void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq, |
664 | struct sbq_wait_state *ws, | |
665 | struct sbq_wait *sbq_wait, int state) | |
666 | { | |
9f6b7ef6 | 667 | if (!sbq_wait->sbq) { |
5d2ee712 | 668 | atomic_inc(&sbq->ws_active); |
9f6b7ef6 | 669 | sbq_wait->sbq = sbq; |
5d2ee712 JA |
670 | } |
671 | prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state); | |
672 | } | |
673 | EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait); | |
674 | ||
675 | void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws, | |
676 | struct sbq_wait *sbq_wait) | |
677 | { | |
678 | finish_wait(&ws->wait, &sbq_wait->wait); | |
9f6b7ef6 | 679 | if (sbq_wait->sbq) { |
5d2ee712 | 680 | atomic_dec(&sbq->ws_active); |
9f6b7ef6 | 681 | sbq_wait->sbq = NULL; |
5d2ee712 JA |
682 | } |
683 | } | |
684 | EXPORT_SYMBOL_GPL(sbitmap_finish_wait); |