dm bio prison v1: intelligently size dm_bio_prison's prison_regions
[linux-block.git] / drivers / md / dm-bio-prison-v1.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Red Hat, Inc.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include "dm.h"
9 #include "dm-bio-prison-v1.h"
10 #include "dm-bio-prison-v2.h"
11
12 #include <linux/spinlock.h>
13 #include <linux/mempool.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16
17 /*----------------------------------------------------------------*/
18
19 #define MIN_CELLS 1024
20
21 struct prison_region {
22         spinlock_t lock;
23         struct rb_root cell;
24 } ____cacheline_aligned_in_smp;
25
26 struct dm_bio_prison {
27         mempool_t cell_pool;
28         unsigned int num_locks;
29         struct prison_region regions[];
30 };
31
32 static struct kmem_cache *_cell_cache;
33
34 /*----------------------------------------------------------------*/
35
36 /*
37  * @nr_cells should be the number of cells you want in use _concurrently_.
38  * Don't confuse it with the number of distinct keys.
39  */
40 struct dm_bio_prison *dm_bio_prison_create(void)
41 {
42         int ret;
43         unsigned int i, num_locks;
44         struct dm_bio_prison *prison;
45
46         num_locks = dm_num_hash_locks();
47         prison = kzalloc(struct_size(prison, regions, num_locks), GFP_KERNEL);
48         if (!prison)
49                 return NULL;
50         prison->num_locks = num_locks;
51
52         for (i = 0; i < prison->num_locks; i++) {
53                 spin_lock_init(&prison->regions[i].lock);
54                 prison->regions[i].cell = RB_ROOT;
55         }
56
57         ret = mempool_init_slab_pool(&prison->cell_pool, MIN_CELLS, _cell_cache);
58         if (ret) {
59                 kfree(prison);
60                 return NULL;
61         }
62
63         return prison;
64 }
65 EXPORT_SYMBOL_GPL(dm_bio_prison_create);
66
67 void dm_bio_prison_destroy(struct dm_bio_prison *prison)
68 {
69         mempool_exit(&prison->cell_pool);
70         kfree(prison);
71 }
72 EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
73
74 struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
75 {
76         return mempool_alloc(&prison->cell_pool, gfp);
77 }
78 EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
79
80 void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
81                              struct dm_bio_prison_cell *cell)
82 {
83         mempool_free(cell, &prison->cell_pool);
84 }
85 EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
86
87 static void __setup_new_cell(struct dm_cell_key *key,
88                              struct bio *holder,
89                              struct dm_bio_prison_cell *cell)
90 {
91         memcpy(&cell->key, key, sizeof(cell->key));
92         cell->holder = holder;
93         bio_list_init(&cell->bios);
94 }
95
96 static int cmp_keys(struct dm_cell_key *lhs,
97                     struct dm_cell_key *rhs)
98 {
99         if (lhs->virtual < rhs->virtual)
100                 return -1;
101
102         if (lhs->virtual > rhs->virtual)
103                 return 1;
104
105         if (lhs->dev < rhs->dev)
106                 return -1;
107
108         if (lhs->dev > rhs->dev)
109                 return 1;
110
111         if (lhs->block_end <= rhs->block_begin)
112                 return -1;
113
114         if (lhs->block_begin >= rhs->block_end)
115                 return 1;
116
117         return 0;
118 }
119
120 static unsigned lock_nr(struct dm_cell_key *key, unsigned int num_locks)
121 {
122         return (key->block_begin >> BIO_PRISON_MAX_RANGE_SHIFT) & (num_locks - 1);
123 }
124
125 bool dm_cell_key_has_valid_range(struct dm_cell_key *key)
126 {
127         if (WARN_ON_ONCE(key->block_end - key->block_begin > BIO_PRISON_MAX_RANGE))
128                 return false;
129         if (WARN_ON_ONCE((key->block_begin >> BIO_PRISON_MAX_RANGE_SHIFT) !=
130                          (key->block_end - 1) >> BIO_PRISON_MAX_RANGE_SHIFT))
131                 return false;
132
133         return true;
134 }
135 EXPORT_SYMBOL(dm_cell_key_has_valid_range);
136
137 static int __bio_detain(struct rb_root *root,
138                         struct dm_cell_key *key,
139                         struct bio *inmate,
140                         struct dm_bio_prison_cell *cell_prealloc,
141                         struct dm_bio_prison_cell **cell_result)
142 {
143         int r;
144         struct rb_node **new = &root->rb_node, *parent = NULL;
145
146         while (*new) {
147                 struct dm_bio_prison_cell *cell =
148                         rb_entry(*new, struct dm_bio_prison_cell, node);
149
150                 r = cmp_keys(key, &cell->key);
151
152                 parent = *new;
153                 if (r < 0)
154                         new = &((*new)->rb_left);
155                 else if (r > 0)
156                         new = &((*new)->rb_right);
157                 else {
158                         if (inmate)
159                                 bio_list_add(&cell->bios, inmate);
160                         *cell_result = cell;
161                         return 1;
162                 }
163         }
164
165         __setup_new_cell(key, inmate, cell_prealloc);
166         *cell_result = cell_prealloc;
167
168         rb_link_node(&cell_prealloc->node, parent, new);
169         rb_insert_color(&cell_prealloc->node, root);
170
171         return 0;
172 }
173
174 static int bio_detain(struct dm_bio_prison *prison,
175                       struct dm_cell_key *key,
176                       struct bio *inmate,
177                       struct dm_bio_prison_cell *cell_prealloc,
178                       struct dm_bio_prison_cell **cell_result)
179 {
180         int r;
181         unsigned l = lock_nr(key, prison->num_locks);
182
183         spin_lock_irq(&prison->regions[l].lock);
184         r = __bio_detain(&prison->regions[l].cell, key, inmate, cell_prealloc, cell_result);
185         spin_unlock_irq(&prison->regions[l].lock);
186
187         return r;
188 }
189
190 int dm_bio_detain(struct dm_bio_prison *prison,
191                   struct dm_cell_key *key,
192                   struct bio *inmate,
193                   struct dm_bio_prison_cell *cell_prealloc,
194                   struct dm_bio_prison_cell **cell_result)
195 {
196         return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
197 }
198 EXPORT_SYMBOL_GPL(dm_bio_detain);
199
200 int dm_get_cell(struct dm_bio_prison *prison,
201                 struct dm_cell_key *key,
202                 struct dm_bio_prison_cell *cell_prealloc,
203                 struct dm_bio_prison_cell **cell_result)
204 {
205         return bio_detain(prison, key, NULL, cell_prealloc, cell_result);
206 }
207 EXPORT_SYMBOL_GPL(dm_get_cell);
208
209 /*
210  * @inmates must have been initialised prior to this call
211  */
212 static void __cell_release(struct rb_root *root,
213                            struct dm_bio_prison_cell *cell,
214                            struct bio_list *inmates)
215 {
216         rb_erase(&cell->node, root);
217
218         if (inmates) {
219                 if (cell->holder)
220                         bio_list_add(inmates, cell->holder);
221                 bio_list_merge(inmates, &cell->bios);
222         }
223 }
224
225 void dm_cell_release(struct dm_bio_prison *prison,
226                      struct dm_bio_prison_cell *cell,
227                      struct bio_list *bios)
228 {
229         unsigned l = lock_nr(&cell->key, prison->num_locks);
230
231         spin_lock_irq(&prison->regions[l].lock);
232         __cell_release(&prison->regions[l].cell, cell, bios);
233         spin_unlock_irq(&prison->regions[l].lock);
234 }
235 EXPORT_SYMBOL_GPL(dm_cell_release);
236
237 /*
238  * Sometimes we don't want the holder, just the additional bios.
239  */
240 static void __cell_release_no_holder(struct rb_root *root,
241                                      struct dm_bio_prison_cell *cell,
242                                      struct bio_list *inmates)
243 {
244         rb_erase(&cell->node, root);
245         bio_list_merge(inmates, &cell->bios);
246 }
247
248 void dm_cell_release_no_holder(struct dm_bio_prison *prison,
249                                struct dm_bio_prison_cell *cell,
250                                struct bio_list *inmates)
251 {
252         unsigned l = lock_nr(&cell->key, prison->num_locks);
253         unsigned long flags;
254
255         spin_lock_irqsave(&prison->regions[l].lock, flags);
256         __cell_release_no_holder(&prison->regions[l].cell, cell, inmates);
257         spin_unlock_irqrestore(&prison->regions[l].lock, flags);
258 }
259 EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
260
261 void dm_cell_error(struct dm_bio_prison *prison,
262                    struct dm_bio_prison_cell *cell, blk_status_t error)
263 {
264         struct bio_list bios;
265         struct bio *bio;
266
267         bio_list_init(&bios);
268         dm_cell_release(prison, cell, &bios);
269
270         while ((bio = bio_list_pop(&bios))) {
271                 bio->bi_status = error;
272                 bio_endio(bio);
273         }
274 }
275 EXPORT_SYMBOL_GPL(dm_cell_error);
276
277 void dm_cell_visit_release(struct dm_bio_prison *prison,
278                            void (*visit_fn)(void *, struct dm_bio_prison_cell *),
279                            void *context,
280                            struct dm_bio_prison_cell *cell)
281 {
282         unsigned l = lock_nr(&cell->key, prison->num_locks);
283         spin_lock_irq(&prison->regions[l].lock);
284         visit_fn(context, cell);
285         rb_erase(&cell->node, &prison->regions[l].cell);
286         spin_unlock_irq(&prison->regions[l].lock);
287 }
288 EXPORT_SYMBOL_GPL(dm_cell_visit_release);
289
290 static int __promote_or_release(struct rb_root *root,
291                                 struct dm_bio_prison_cell *cell)
292 {
293         if (bio_list_empty(&cell->bios)) {
294                 rb_erase(&cell->node, root);
295                 return 1;
296         }
297
298         cell->holder = bio_list_pop(&cell->bios);
299         return 0;
300 }
301
302 int dm_cell_promote_or_release(struct dm_bio_prison *prison,
303                                struct dm_bio_prison_cell *cell)
304 {
305         int r;
306         unsigned l = lock_nr(&cell->key, prison->num_locks);
307
308         spin_lock_irq(&prison->regions[l].lock);
309         r = __promote_or_release(&prison->regions[l].cell, cell);
310         spin_unlock_irq(&prison->regions[l].lock);
311
312         return r;
313 }
314 EXPORT_SYMBOL_GPL(dm_cell_promote_or_release);
315
316 /*----------------------------------------------------------------*/
317
318 #define DEFERRED_SET_SIZE 64
319
320 struct dm_deferred_entry {
321         struct dm_deferred_set *ds;
322         unsigned int count;
323         struct list_head work_items;
324 };
325
326 struct dm_deferred_set {
327         spinlock_t lock;
328         unsigned int current_entry;
329         unsigned int sweeper;
330         struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
331 };
332
333 struct dm_deferred_set *dm_deferred_set_create(void)
334 {
335         int i;
336         struct dm_deferred_set *ds;
337
338         ds = kmalloc(sizeof(*ds), GFP_KERNEL);
339         if (!ds)
340                 return NULL;
341
342         spin_lock_init(&ds->lock);
343         ds->current_entry = 0;
344         ds->sweeper = 0;
345         for (i = 0; i < DEFERRED_SET_SIZE; i++) {
346                 ds->entries[i].ds = ds;
347                 ds->entries[i].count = 0;
348                 INIT_LIST_HEAD(&ds->entries[i].work_items);
349         }
350
351         return ds;
352 }
353 EXPORT_SYMBOL_GPL(dm_deferred_set_create);
354
355 void dm_deferred_set_destroy(struct dm_deferred_set *ds)
356 {
357         kfree(ds);
358 }
359 EXPORT_SYMBOL_GPL(dm_deferred_set_destroy);
360
361 struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
362 {
363         unsigned long flags;
364         struct dm_deferred_entry *entry;
365
366         spin_lock_irqsave(&ds->lock, flags);
367         entry = ds->entries + ds->current_entry;
368         entry->count++;
369         spin_unlock_irqrestore(&ds->lock, flags);
370
371         return entry;
372 }
373 EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
374
375 static unsigned int ds_next(unsigned int index)
376 {
377         return (index + 1) % DEFERRED_SET_SIZE;
378 }
379
380 static void __sweep(struct dm_deferred_set *ds, struct list_head *head)
381 {
382         while ((ds->sweeper != ds->current_entry) &&
383                !ds->entries[ds->sweeper].count) {
384                 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
385                 ds->sweeper = ds_next(ds->sweeper);
386         }
387
388         if ((ds->sweeper == ds->current_entry) && !ds->entries[ds->sweeper].count)
389                 list_splice_init(&ds->entries[ds->sweeper].work_items, head);
390 }
391
392 void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head)
393 {
394         unsigned long flags;
395
396         spin_lock_irqsave(&entry->ds->lock, flags);
397         BUG_ON(!entry->count);
398         --entry->count;
399         __sweep(entry->ds, head);
400         spin_unlock_irqrestore(&entry->ds->lock, flags);
401 }
402 EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
403
404 /*
405  * Returns 1 if deferred or 0 if no pending items to delay job.
406  */
407 int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
408 {
409         int r = 1;
410         unsigned int next_entry;
411
412         spin_lock_irq(&ds->lock);
413         if ((ds->sweeper == ds->current_entry) &&
414             !ds->entries[ds->current_entry].count)
415                 r = 0;
416         else {
417                 list_add(work, &ds->entries[ds->current_entry].work_items);
418                 next_entry = ds_next(ds->current_entry);
419                 if (!ds->entries[next_entry].count)
420                         ds->current_entry = next_entry;
421         }
422         spin_unlock_irq(&ds->lock);
423
424         return r;
425 }
426 EXPORT_SYMBOL_GPL(dm_deferred_set_add_work);
427
428 /*----------------------------------------------------------------*/
429
430 static int __init dm_bio_prison_init_v1(void)
431 {
432         _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
433         if (!_cell_cache)
434                 return -ENOMEM;
435
436         return 0;
437 }
438
439 static void dm_bio_prison_exit_v1(void)
440 {
441         kmem_cache_destroy(_cell_cache);
442         _cell_cache = NULL;
443 }
444
445 static int (*_inits[])(void) __initdata = {
446         dm_bio_prison_init_v1,
447         dm_bio_prison_init_v2,
448 };
449
450 static void (*_exits[])(void) = {
451         dm_bio_prison_exit_v1,
452         dm_bio_prison_exit_v2,
453 };
454
455 static int __init dm_bio_prison_init(void)
456 {
457         const int count = ARRAY_SIZE(_inits);
458
459         int r, i;
460
461         for (i = 0; i < count; i++) {
462                 r = _inits[i]();
463                 if (r)
464                         goto bad;
465         }
466
467         return 0;
468
469 bad:
470         while (i--)
471                 _exits[i]();
472
473         return r;
474 }
475
476 static void __exit dm_bio_prison_exit(void)
477 {
478         int i = ARRAY_SIZE(_exits);
479
480         while (i--)
481                 _exits[i]();
482 }
483
484 /*
485  * module hooks
486  */
487 module_init(dm_bio_prison_init);
488 module_exit(dm_bio_prison_exit);
489
490 MODULE_DESCRIPTION(DM_NAME " bio prison");
491 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
492 MODULE_LICENSE("GPL");