block: move same page handling from __bio_add_pc_page to the callers
[linux-block.git] / drivers / block / null_blk_main.c
CommitLineData
09c434b8 1// SPDX-License-Identifier: GPL-2.0-only
3bf2bd20
SL
2/*
3 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
4 * Shaohua Li <shli@fb.com>
5 */
f2298c04 6#include <linux/module.h>
fc1bc354 7
f2298c04
JA
8#include <linux/moduleparam.h>
9#include <linux/sched.h>
10#include <linux/fs.h>
f2298c04 11#include <linux/init.h>
6dad38d3 12#include "null_blk.h"
f2298c04 13
5bcd0e0c
SL
14#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
15#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
5bcd0e0c
SL
16#define SECTOR_MASK (PAGE_SECTORS - 1)
17
18#define FREE_BATCH 16
19
eff2c4f1
SL
20#define TICKS_PER_SEC 50ULL
21#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
22
33f782c4 23#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
93b57046 24static DECLARE_FAULT_ATTR(null_timeout_attr);
24941b90 25static DECLARE_FAULT_ATTR(null_requeue_attr);
33f782c4 26#endif
93b57046 27
eff2c4f1
SL
28static inline u64 mb_per_tick(int mbps)
29{
30 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
31}
f2298c04 32
3bf2bd20
SL
33/*
34 * Status flags for nullb_device.
35 *
36 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
37 * UP: Device is currently on and visible in userspace.
eff2c4f1 38 * THROTTLED: Device is being throttled.
deb78b41 39 * CACHE: Device is using a write-back cache.
3bf2bd20
SL
40 */
41enum nullb_device_flags {
42 NULLB_DEV_FL_CONFIGURED = 0,
43 NULLB_DEV_FL_UP = 1,
eff2c4f1 44 NULLB_DEV_FL_THROTTLED = 2,
deb78b41 45 NULLB_DEV_FL_CACHE = 3,
3bf2bd20
SL
46};
47
66231ad3 48#define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
5bcd0e0c
SL
49/*
50 * nullb_page is a page in memory for nullb devices.
51 *
52 * @page: The page holding the data.
53 * @bitmap: The bitmap represents which sector in the page has data.
54 * Each bit represents one block size. For example, sector 8
55 * will use the 7th bit
deb78b41
SL
56 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
57 * page is being flushing to storage. FREE means the cache page is freed and
58 * should be skipped from flushing to storage. Please see
59 * null_make_cache_space
5bcd0e0c
SL
60 */
61struct nullb_page {
62 struct page *page;
66231ad3 63 DECLARE_BITMAP(bitmap, MAP_SZ);
5bcd0e0c 64};
66231ad3
ML
65#define NULLB_PAGE_LOCK (MAP_SZ - 1)
66#define NULLB_PAGE_FREE (MAP_SZ - 2)
5bcd0e0c 67
f2298c04
JA
68static LIST_HEAD(nullb_list);
69static struct mutex lock;
70static int null_major;
94bc02e3 71static DEFINE_IDA(nullb_indexes);
82f402fe 72static struct blk_mq_tag_set tag_set;
f2298c04 73
f2298c04
JA
74enum {
75 NULL_IRQ_NONE = 0,
76 NULL_IRQ_SOFTIRQ = 1,
77 NULL_IRQ_TIMER = 2,
ce2c350b 78};
f2298c04 79
ce2c350b 80enum {
f2298c04
JA
81 NULL_Q_BIO = 0,
82 NULL_Q_RQ = 1,
83 NULL_Q_MQ = 2,
84};
85
b3cffc38 86static int g_no_sched;
5657a819 87module_param_named(no_sched, g_no_sched, int, 0444);
b3cffc38 88MODULE_PARM_DESC(no_sched, "No io scheduler");
89
2984c868 90static int g_submit_queues = 1;
5657a819 91module_param_named(submit_queues, g_submit_queues, int, 0444);
f2298c04
JA
92MODULE_PARM_DESC(submit_queues, "Number of submission queues");
93
2984c868 94static int g_home_node = NUMA_NO_NODE;
5657a819 95module_param_named(home_node, g_home_node, int, 0444);
f2298c04
JA
96MODULE_PARM_DESC(home_node, "Home node for the device");
97
33f782c4 98#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
93b57046 99static char g_timeout_str[80];
5657a819 100module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
24941b90
JA
101
102static char g_requeue_str[80];
5657a819 103module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
33f782c4 104#endif
93b57046 105
2984c868 106static int g_queue_mode = NULL_Q_MQ;
709c8667
MB
107
108static int null_param_store_val(const char *str, int *val, int min, int max)
109{
110 int ret, new_val;
111
112 ret = kstrtoint(str, 10, &new_val);
113 if (ret)
114 return -EINVAL;
115
116 if (new_val < min || new_val > max)
117 return -EINVAL;
118
119 *val = new_val;
120 return 0;
121}
122
123static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
124{
2984c868 125 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
709c8667
MB
126}
127
9c27847d 128static const struct kernel_param_ops null_queue_mode_param_ops = {
709c8667
MB
129 .set = null_set_queue_mode,
130 .get = param_get_int,
131};
132
5657a819 133device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
54ae81cd 134MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
f2298c04 135
2984c868 136static int g_gb = 250;
5657a819 137module_param_named(gb, g_gb, int, 0444);
f2298c04
JA
138MODULE_PARM_DESC(gb, "Size in GB");
139
2984c868 140static int g_bs = 512;
5657a819 141module_param_named(bs, g_bs, int, 0444);
f2298c04
JA
142MODULE_PARM_DESC(bs, "Block size (in bytes)");
143
82f402fe 144static int nr_devices = 1;
5657a819 145module_param(nr_devices, int, 0444);
f2298c04
JA
146MODULE_PARM_DESC(nr_devices, "Number of devices to register");
147
2984c868 148static bool g_blocking;
5657a819 149module_param_named(blocking, g_blocking, bool, 0444);
db5bcf87
JA
150MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
151
82f402fe 152static bool shared_tags;
5657a819 153module_param(shared_tags, bool, 0444);
82f402fe
JA
154MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
155
2984c868 156static int g_irqmode = NULL_IRQ_SOFTIRQ;
709c8667
MB
157
158static int null_set_irqmode(const char *str, const struct kernel_param *kp)
159{
2984c868 160 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
709c8667
MB
161 NULL_IRQ_TIMER);
162}
163
9c27847d 164static const struct kernel_param_ops null_irqmode_param_ops = {
709c8667
MB
165 .set = null_set_irqmode,
166 .get = param_get_int,
167};
168
5657a819 169device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
f2298c04
JA
170MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
171
2984c868 172static unsigned long g_completion_nsec = 10000;
5657a819 173module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
f2298c04
JA
174MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
175
2984c868 176static int g_hw_queue_depth = 64;
5657a819 177module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
f2298c04
JA
178MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
179
2984c868 180static bool g_use_per_node_hctx;
5657a819 181module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
20005244 182MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
f2298c04 183
ca4b2a01
MB
184static bool g_zoned;
185module_param_named(zoned, g_zoned, bool, S_IRUGO);
186MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
187
188static unsigned long g_zone_size = 256;
189module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
190MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
191
ea2c18e1
MS
192static unsigned int g_zone_nr_conv;
193module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
194MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
195
3bf2bd20
SL
196static struct nullb_device *null_alloc_dev(void);
197static void null_free_dev(struct nullb_device *dev);
cedcafad
SL
198static void null_del_dev(struct nullb *nullb);
199static int null_add_dev(struct nullb_device *dev);
deb78b41 200static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
3bf2bd20
SL
201
202static inline struct nullb_device *to_nullb_device(struct config_item *item)
203{
204 return item ? container_of(item, struct nullb_device, item) : NULL;
205}
206
207static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
208{
209 return snprintf(page, PAGE_SIZE, "%u\n", val);
210}
211
212static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
213 char *page)
214{
215 return snprintf(page, PAGE_SIZE, "%lu\n", val);
216}
217
218static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
219{
220 return snprintf(page, PAGE_SIZE, "%u\n", val);
221}
222
223static ssize_t nullb_device_uint_attr_store(unsigned int *val,
224 const char *page, size_t count)
225{
226 unsigned int tmp;
227 int result;
228
229 result = kstrtouint(page, 0, &tmp);
230 if (result)
231 return result;
232
233 *val = tmp;
234 return count;
235}
236
237static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
238 const char *page, size_t count)
239{
240 int result;
241 unsigned long tmp;
242
243 result = kstrtoul(page, 0, &tmp);
244 if (result)
245 return result;
246
247 *val = tmp;
248 return count;
249}
250
251static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
252 size_t count)
253{
254 bool tmp;
255 int result;
256
257 result = kstrtobool(page, &tmp);
258 if (result)
259 return result;
260
261 *val = tmp;
262 return count;
263}
264
265/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
266#define NULLB_DEVICE_ATTR(NAME, TYPE) \
267static ssize_t \
268nullb_device_##NAME##_show(struct config_item *item, char *page) \
269{ \
270 return nullb_device_##TYPE##_attr_show( \
271 to_nullb_device(item)->NAME, page); \
272} \
273static ssize_t \
274nullb_device_##NAME##_store(struct config_item *item, const char *page, \
275 size_t count) \
276{ \
277 if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
278 return -EBUSY; \
279 return nullb_device_##TYPE##_attr_store( \
280 &to_nullb_device(item)->NAME, page, count); \
281} \
282CONFIGFS_ATTR(nullb_device_, NAME);
283
284NULLB_DEVICE_ATTR(size, ulong);
285NULLB_DEVICE_ATTR(completion_nsec, ulong);
286NULLB_DEVICE_ATTR(submit_queues, uint);
287NULLB_DEVICE_ATTR(home_node, uint);
288NULLB_DEVICE_ATTR(queue_mode, uint);
289NULLB_DEVICE_ATTR(blocksize, uint);
290NULLB_DEVICE_ATTR(irqmode, uint);
291NULLB_DEVICE_ATTR(hw_queue_depth, uint);
cedcafad 292NULLB_DEVICE_ATTR(index, uint);
3bf2bd20
SL
293NULLB_DEVICE_ATTR(blocking, bool);
294NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
5bcd0e0c 295NULLB_DEVICE_ATTR(memory_backed, bool);
306eb6b4 296NULLB_DEVICE_ATTR(discard, bool);
eff2c4f1 297NULLB_DEVICE_ATTR(mbps, uint);
deb78b41 298NULLB_DEVICE_ATTR(cache_size, ulong);
ca4b2a01
MB
299NULLB_DEVICE_ATTR(zoned, bool);
300NULLB_DEVICE_ATTR(zone_size, ulong);
ea2c18e1 301NULLB_DEVICE_ATTR(zone_nr_conv, uint);
3bf2bd20 302
cedcafad
SL
303static ssize_t nullb_device_power_show(struct config_item *item, char *page)
304{
305 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
306}
307
308static ssize_t nullb_device_power_store(struct config_item *item,
309 const char *page, size_t count)
310{
311 struct nullb_device *dev = to_nullb_device(item);
312 bool newp = false;
313 ssize_t ret;
314
315 ret = nullb_device_bool_attr_store(&newp, page, count);
316 if (ret < 0)
317 return ret;
318
319 if (!dev->power && newp) {
320 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
321 return count;
322 if (null_add_dev(dev)) {
323 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
324 return -ENOMEM;
325 }
326
327 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
328 dev->power = newp;
b3c30512 329 } else if (dev->power && !newp) {
7602843f
BL
330 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
331 mutex_lock(&lock);
332 dev->power = newp;
333 null_del_dev(dev->nullb);
334 mutex_unlock(&lock);
335 }
00a8cdb8 336 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
cedcafad
SL
337 }
338
339 return count;
340}
341
342CONFIGFS_ATTR(nullb_device_, power);
343
2f54a613
SL
344static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
345{
346 struct nullb_device *t_dev = to_nullb_device(item);
347
348 return badblocks_show(&t_dev->badblocks, page, 0);
349}
350
351static ssize_t nullb_device_badblocks_store(struct config_item *item,
352 const char *page, size_t count)
353{
354 struct nullb_device *t_dev = to_nullb_device(item);
355 char *orig, *buf, *tmp;
356 u64 start, end;
357 int ret;
358
359 orig = kstrndup(page, count, GFP_KERNEL);
360 if (!orig)
361 return -ENOMEM;
362
363 buf = strstrip(orig);
364
365 ret = -EINVAL;
366 if (buf[0] != '+' && buf[0] != '-')
367 goto out;
368 tmp = strchr(&buf[1], '-');
369 if (!tmp)
370 goto out;
371 *tmp = '\0';
372 ret = kstrtoull(buf + 1, 0, &start);
373 if (ret)
374 goto out;
375 ret = kstrtoull(tmp + 1, 0, &end);
376 if (ret)
377 goto out;
378 ret = -EINVAL;
379 if (start > end)
380 goto out;
381 /* enable badblocks */
382 cmpxchg(&t_dev->badblocks.shift, -1, 0);
383 if (buf[0] == '+')
384 ret = badblocks_set(&t_dev->badblocks, start,
385 end - start + 1, 1);
386 else
387 ret = badblocks_clear(&t_dev->badblocks, start,
388 end - start + 1);
389 if (ret == 0)
390 ret = count;
391out:
392 kfree(orig);
393 return ret;
394}
395CONFIGFS_ATTR(nullb_device_, badblocks);
396
3bf2bd20
SL
397static struct configfs_attribute *nullb_device_attrs[] = {
398 &nullb_device_attr_size,
399 &nullb_device_attr_completion_nsec,
400 &nullb_device_attr_submit_queues,
401 &nullb_device_attr_home_node,
402 &nullb_device_attr_queue_mode,
403 &nullb_device_attr_blocksize,
404 &nullb_device_attr_irqmode,
405 &nullb_device_attr_hw_queue_depth,
cedcafad 406 &nullb_device_attr_index,
3bf2bd20
SL
407 &nullb_device_attr_blocking,
408 &nullb_device_attr_use_per_node_hctx,
cedcafad 409 &nullb_device_attr_power,
5bcd0e0c 410 &nullb_device_attr_memory_backed,
306eb6b4 411 &nullb_device_attr_discard,
eff2c4f1 412 &nullb_device_attr_mbps,
deb78b41 413 &nullb_device_attr_cache_size,
2f54a613 414 &nullb_device_attr_badblocks,
ca4b2a01
MB
415 &nullb_device_attr_zoned,
416 &nullb_device_attr_zone_size,
ea2c18e1 417 &nullb_device_attr_zone_nr_conv,
3bf2bd20
SL
418 NULL,
419};
420
421static void nullb_device_release(struct config_item *item)
422{
5bcd0e0c
SL
423 struct nullb_device *dev = to_nullb_device(item);
424
deb78b41 425 null_free_device_storage(dev, false);
5bcd0e0c 426 null_free_dev(dev);
3bf2bd20
SL
427}
428
429static struct configfs_item_operations nullb_device_ops = {
430 .release = nullb_device_release,
431};
432
e1919dff 433static const struct config_item_type nullb_device_type = {
3bf2bd20
SL
434 .ct_item_ops = &nullb_device_ops,
435 .ct_attrs = nullb_device_attrs,
436 .ct_owner = THIS_MODULE,
437};
438
439static struct
440config_item *nullb_group_make_item(struct config_group *group, const char *name)
441{
442 struct nullb_device *dev;
443
444 dev = null_alloc_dev();
445 if (!dev)
446 return ERR_PTR(-ENOMEM);
447
448 config_item_init_type_name(&dev->item, name, &nullb_device_type);
449
450 return &dev->item;
451}
452
453static void
454nullb_group_drop_item(struct config_group *group, struct config_item *item)
455{
cedcafad
SL
456 struct nullb_device *dev = to_nullb_device(item);
457
458 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
459 mutex_lock(&lock);
460 dev->power = false;
461 null_del_dev(dev->nullb);
462 mutex_unlock(&lock);
463 }
464
3bf2bd20
SL
465 config_item_put(item);
466}
467
468static ssize_t memb_group_features_show(struct config_item *item, char *page)
469{
ca4b2a01 470 return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n");
3bf2bd20
SL
471}
472
473CONFIGFS_ATTR_RO(memb_group_, features);
474
475static struct configfs_attribute *nullb_group_attrs[] = {
476 &memb_group_attr_features,
477 NULL,
478};
479
480static struct configfs_group_operations nullb_group_ops = {
481 .make_item = nullb_group_make_item,
482 .drop_item = nullb_group_drop_item,
483};
484
e1919dff 485static const struct config_item_type nullb_group_type = {
3bf2bd20
SL
486 .ct_group_ops = &nullb_group_ops,
487 .ct_attrs = nullb_group_attrs,
488 .ct_owner = THIS_MODULE,
489};
490
491static struct configfs_subsystem nullb_subsys = {
492 .su_group = {
493 .cg_item = {
494 .ci_namebuf = "nullb",
495 .ci_type = &nullb_group_type,
496 },
497 },
498};
499
deb78b41
SL
500static inline int null_cache_active(struct nullb *nullb)
501{
502 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
503}
504
2984c868
SL
505static struct nullb_device *null_alloc_dev(void)
506{
507 struct nullb_device *dev;
508
509 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
510 if (!dev)
511 return NULL;
5bcd0e0c 512 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
deb78b41 513 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
2f54a613
SL
514 if (badblocks_init(&dev->badblocks, 0)) {
515 kfree(dev);
516 return NULL;
517 }
518
2984c868
SL
519 dev->size = g_gb * 1024;
520 dev->completion_nsec = g_completion_nsec;
521 dev->submit_queues = g_submit_queues;
522 dev->home_node = g_home_node;
523 dev->queue_mode = g_queue_mode;
524 dev->blocksize = g_bs;
525 dev->irqmode = g_irqmode;
526 dev->hw_queue_depth = g_hw_queue_depth;
2984c868
SL
527 dev->blocking = g_blocking;
528 dev->use_per_node_hctx = g_use_per_node_hctx;
ca4b2a01
MB
529 dev->zoned = g_zoned;
530 dev->zone_size = g_zone_size;
ea2c18e1 531 dev->zone_nr_conv = g_zone_nr_conv;
2984c868
SL
532 return dev;
533}
534
535static void null_free_dev(struct nullb_device *dev)
536{
1addb798
DD
537 if (!dev)
538 return;
539
ca4b2a01 540 null_zone_exit(dev);
1addb798 541 badblocks_exit(&dev->badblocks);
2984c868
SL
542 kfree(dev);
543}
544
f2298c04
JA
545static void put_tag(struct nullb_queue *nq, unsigned int tag)
546{
547 clear_bit_unlock(tag, nq->tag_map);
548
549 if (waitqueue_active(&nq->wait))
550 wake_up(&nq->wait);
551}
552
553static unsigned int get_tag(struct nullb_queue *nq)
554{
555 unsigned int tag;
556
557 do {
558 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
559 if (tag >= nq->queue_depth)
560 return -1U;
561 } while (test_and_set_bit_lock(tag, nq->tag_map));
562
563 return tag;
564}
565
566static void free_cmd(struct nullb_cmd *cmd)
567{
568 put_tag(cmd->nq, cmd->tag);
569}
570
3c395a96
PV
571static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
572
f2298c04
JA
573static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
574{
575 struct nullb_cmd *cmd;
576 unsigned int tag;
577
578 tag = get_tag(nq);
579 if (tag != -1U) {
580 cmd = &nq->cmds[tag];
581 cmd->tag = tag;
582 cmd->nq = nq;
2984c868 583 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
3c395a96
PV
584 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
585 HRTIMER_MODE_REL);
586 cmd->timer.function = null_cmd_timer_expired;
587 }
f2298c04
JA
588 return cmd;
589 }
590
591 return NULL;
592}
593
594static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
595{
596 struct nullb_cmd *cmd;
597 DEFINE_WAIT(wait);
598
599 cmd = __alloc_cmd(nq);
600 if (cmd || !can_wait)
601 return cmd;
602
603 do {
604 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
605 cmd = __alloc_cmd(nq);
606 if (cmd)
607 break;
608
609 io_schedule();
610 } while (1);
611
612 finish_wait(&nq->wait, &wait);
613 return cmd;
614}
615
616static void end_cmd(struct nullb_cmd *cmd)
617{
2984c868 618 int queue_mode = cmd->nq->dev->queue_mode;
cf8ecc5a 619
ce2c350b
CH
620 switch (queue_mode) {
621 case NULL_Q_MQ:
5bcd0e0c 622 blk_mq_end_request(cmd->rq, cmd->error);
ce2c350b 623 return;
ce2c350b 624 case NULL_Q_BIO:
5bcd0e0c 625 cmd->bio->bi_status = cmd->error;
4246a0b6 626 bio_endio(cmd->bio);
48cc661e 627 break;
ce2c350b 628 }
f2298c04 629
48cc661e 630 free_cmd(cmd);
cf8ecc5a
AA
631}
632
633static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
634{
635 end_cmd(container_of(timer, struct nullb_cmd, timer));
f2298c04
JA
636
637 return HRTIMER_NORESTART;
638}
639
640static void null_cmd_end_timer(struct nullb_cmd *cmd)
641{
2984c868 642 ktime_t kt = cmd->nq->dev->completion_nsec;
f2298c04 643
3c395a96 644 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
f2298c04
JA
645}
646
49f66136 647static void null_complete_rq(struct request *rq)
f2298c04 648{
49f66136 649 end_cmd(blk_mq_rq_to_pdu(rq));
f2298c04
JA
650}
651
5bcd0e0c
SL
652static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
653{
654 struct nullb_page *t_page;
655
656 t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
657 if (!t_page)
658 goto out;
659
660 t_page->page = alloc_pages(gfp_flags, 0);
661 if (!t_page->page)
662 goto out_freepage;
663
66231ad3 664 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
5bcd0e0c
SL
665 return t_page;
666out_freepage:
667 kfree(t_page);
668out:
669 return NULL;
670}
671
672static void null_free_page(struct nullb_page *t_page)
673{
66231ad3
ML
674 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
675 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
deb78b41 676 return;
5bcd0e0c
SL
677 __free_page(t_page->page);
678 kfree(t_page);
679}
680
66231ad3
ML
681static bool null_page_empty(struct nullb_page *page)
682{
683 int size = MAP_SZ - 2;
684
685 return find_first_bit(page->bitmap, size) == size;
686}
687
deb78b41
SL
688static void null_free_sector(struct nullb *nullb, sector_t sector,
689 bool is_cache)
5bcd0e0c
SL
690{
691 unsigned int sector_bit;
692 u64 idx;
693 struct nullb_page *t_page, *ret;
694 struct radix_tree_root *root;
695
deb78b41 696 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
5bcd0e0c
SL
697 idx = sector >> PAGE_SECTORS_SHIFT;
698 sector_bit = (sector & SECTOR_MASK);
699
700 t_page = radix_tree_lookup(root, idx);
701 if (t_page) {
66231ad3 702 __clear_bit(sector_bit, t_page->bitmap);
5bcd0e0c 703
66231ad3 704 if (null_page_empty(t_page)) {
5bcd0e0c
SL
705 ret = radix_tree_delete_item(root, idx, t_page);
706 WARN_ON(ret != t_page);
707 null_free_page(ret);
deb78b41
SL
708 if (is_cache)
709 nullb->dev->curr_cache -= PAGE_SIZE;
5bcd0e0c
SL
710 }
711 }
712}
713
714static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
deb78b41 715 struct nullb_page *t_page, bool is_cache)
5bcd0e0c
SL
716{
717 struct radix_tree_root *root;
718
deb78b41 719 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
5bcd0e0c
SL
720
721 if (radix_tree_insert(root, idx, t_page)) {
722 null_free_page(t_page);
723 t_page = radix_tree_lookup(root, idx);
724 WARN_ON(!t_page || t_page->page->index != idx);
deb78b41
SL
725 } else if (is_cache)
726 nullb->dev->curr_cache += PAGE_SIZE;
5bcd0e0c
SL
727
728 return t_page;
729}
730
deb78b41 731static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
5bcd0e0c
SL
732{
733 unsigned long pos = 0;
734 int nr_pages;
735 struct nullb_page *ret, *t_pages[FREE_BATCH];
736 struct radix_tree_root *root;
737
deb78b41 738 root = is_cache ? &dev->cache : &dev->data;
5bcd0e0c
SL
739
740 do {
741 int i;
742
743 nr_pages = radix_tree_gang_lookup(root,
744 (void **)t_pages, pos, FREE_BATCH);
745
746 for (i = 0; i < nr_pages; i++) {
747 pos = t_pages[i]->page->index;
748 ret = radix_tree_delete_item(root, pos, t_pages[i]);
749 WARN_ON(ret != t_pages[i]);
750 null_free_page(ret);
751 }
752
753 pos++;
754 } while (nr_pages == FREE_BATCH);
deb78b41
SL
755
756 if (is_cache)
757 dev->curr_cache = 0;
5bcd0e0c
SL
758}
759
deb78b41
SL
760static struct nullb_page *__null_lookup_page(struct nullb *nullb,
761 sector_t sector, bool for_write, bool is_cache)
5bcd0e0c
SL
762{
763 unsigned int sector_bit;
764 u64 idx;
765 struct nullb_page *t_page;
deb78b41 766 struct radix_tree_root *root;
5bcd0e0c
SL
767
768 idx = sector >> PAGE_SECTORS_SHIFT;
769 sector_bit = (sector & SECTOR_MASK);
770
deb78b41
SL
771 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
772 t_page = radix_tree_lookup(root, idx);
5bcd0e0c
SL
773 WARN_ON(t_page && t_page->page->index != idx);
774
66231ad3 775 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
5bcd0e0c
SL
776 return t_page;
777
778 return NULL;
779}
780
deb78b41
SL
781static struct nullb_page *null_lookup_page(struct nullb *nullb,
782 sector_t sector, bool for_write, bool ignore_cache)
783{
784 struct nullb_page *page = NULL;
785
786 if (!ignore_cache)
787 page = __null_lookup_page(nullb, sector, for_write, true);
788 if (page)
789 return page;
790 return __null_lookup_page(nullb, sector, for_write, false);
791}
792
5bcd0e0c 793static struct nullb_page *null_insert_page(struct nullb *nullb,
61884de0
JA
794 sector_t sector, bool ignore_cache)
795 __releases(&nullb->lock)
796 __acquires(&nullb->lock)
5bcd0e0c
SL
797{
798 u64 idx;
799 struct nullb_page *t_page;
800
deb78b41 801 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
5bcd0e0c
SL
802 if (t_page)
803 return t_page;
804
805 spin_unlock_irq(&nullb->lock);
806
807 t_page = null_alloc_page(GFP_NOIO);
808 if (!t_page)
809 goto out_lock;
810
811 if (radix_tree_preload(GFP_NOIO))
812 goto out_freepage;
813
814 spin_lock_irq(&nullb->lock);
815 idx = sector >> PAGE_SECTORS_SHIFT;
816 t_page->page->index = idx;
deb78b41 817 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
5bcd0e0c
SL
818 radix_tree_preload_end();
819
820 return t_page;
821out_freepage:
822 null_free_page(t_page);
823out_lock:
824 spin_lock_irq(&nullb->lock);
deb78b41
SL
825 return null_lookup_page(nullb, sector, true, ignore_cache);
826}
827
828static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
829{
830 int i;
831 unsigned int offset;
832 u64 idx;
833 struct nullb_page *t_page, *ret;
834 void *dst, *src;
835
836 idx = c_page->page->index;
837
838 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
839
66231ad3
ML
840 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
841 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
deb78b41 842 null_free_page(c_page);
66231ad3 843 if (t_page && null_page_empty(t_page)) {
deb78b41
SL
844 ret = radix_tree_delete_item(&nullb->dev->data,
845 idx, t_page);
846 null_free_page(t_page);
847 }
848 return 0;
849 }
850
851 if (!t_page)
852 return -ENOMEM;
853
854 src = kmap_atomic(c_page->page);
855 dst = kmap_atomic(t_page->page);
856
857 for (i = 0; i < PAGE_SECTORS;
858 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
66231ad3 859 if (test_bit(i, c_page->bitmap)) {
deb78b41
SL
860 offset = (i << SECTOR_SHIFT);
861 memcpy(dst + offset, src + offset,
862 nullb->dev->blocksize);
66231ad3 863 __set_bit(i, t_page->bitmap);
deb78b41
SL
864 }
865 }
866
867 kunmap_atomic(dst);
868 kunmap_atomic(src);
869
870 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
871 null_free_page(ret);
872 nullb->dev->curr_cache -= PAGE_SIZE;
873
874 return 0;
875}
876
877static int null_make_cache_space(struct nullb *nullb, unsigned long n)
f2298c04 878{
deb78b41
SL
879 int i, err, nr_pages;
880 struct nullb_page *c_pages[FREE_BATCH];
881 unsigned long flushed = 0, one_round;
882
883again:
884 if ((nullb->dev->cache_size * 1024 * 1024) >
885 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
886 return 0;
887
888 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
889 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
890 /*
891 * nullb_flush_cache_page could unlock before using the c_pages. To
892 * avoid race, we don't allow page free
893 */
894 for (i = 0; i < nr_pages; i++) {
895 nullb->cache_flush_pos = c_pages[i]->page->index;
896 /*
897 * We found the page which is being flushed to disk by other
898 * threads
899 */
66231ad3 900 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
deb78b41
SL
901 c_pages[i] = NULL;
902 else
66231ad3 903 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
deb78b41
SL
904 }
905
906 one_round = 0;
907 for (i = 0; i < nr_pages; i++) {
908 if (c_pages[i] == NULL)
909 continue;
910 err = null_flush_cache_page(nullb, c_pages[i]);
911 if (err)
912 return err;
913 one_round++;
914 }
915 flushed += one_round << PAGE_SHIFT;
916
917 if (n > flushed) {
918 if (nr_pages == 0)
919 nullb->cache_flush_pos = 0;
920 if (one_round == 0) {
921 /* give other threads a chance */
922 spin_unlock_irq(&nullb->lock);
923 spin_lock_irq(&nullb->lock);
924 }
925 goto again;
926 }
927 return 0;
5bcd0e0c
SL
928}
929
930static int copy_to_nullb(struct nullb *nullb, struct page *source,
deb78b41 931 unsigned int off, sector_t sector, size_t n, bool is_fua)
5bcd0e0c
SL
932{
933 size_t temp, count = 0;
934 unsigned int offset;
935 struct nullb_page *t_page;
936 void *dst, *src;
937
938 while (count < n) {
939 temp = min_t(size_t, nullb->dev->blocksize, n - count);
940
deb78b41
SL
941 if (null_cache_active(nullb) && !is_fua)
942 null_make_cache_space(nullb, PAGE_SIZE);
943
5bcd0e0c 944 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
deb78b41
SL
945 t_page = null_insert_page(nullb, sector,
946 !null_cache_active(nullb) || is_fua);
5bcd0e0c
SL
947 if (!t_page)
948 return -ENOSPC;
949
950 src = kmap_atomic(source);
951 dst = kmap_atomic(t_page->page);
952 memcpy(dst + offset, src + off + count, temp);
953 kunmap_atomic(dst);
954 kunmap_atomic(src);
955
66231ad3 956 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
5bcd0e0c 957
deb78b41
SL
958 if (is_fua)
959 null_free_sector(nullb, sector, true);
960
5bcd0e0c
SL
961 count += temp;
962 sector += temp >> SECTOR_SHIFT;
963 }
964 return 0;
965}
966
967static int copy_from_nullb(struct nullb *nullb, struct page *dest,
968 unsigned int off, sector_t sector, size_t n)
969{
970 size_t temp, count = 0;
971 unsigned int offset;
972 struct nullb_page *t_page;
973 void *dst, *src;
974
975 while (count < n) {
976 temp = min_t(size_t, nullb->dev->blocksize, n - count);
977
978 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
deb78b41
SL
979 t_page = null_lookup_page(nullb, sector, false,
980 !null_cache_active(nullb));
5bcd0e0c
SL
981
982 dst = kmap_atomic(dest);
983 if (!t_page) {
984 memset(dst + off + count, 0, temp);
985 goto next;
986 }
987 src = kmap_atomic(t_page->page);
988 memcpy(dst + off + count, src + offset, temp);
989 kunmap_atomic(src);
990next:
991 kunmap_atomic(dst);
992
993 count += temp;
994 sector += temp >> SECTOR_SHIFT;
995 }
996 return 0;
997}
998
306eb6b4
SL
999static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
1000{
1001 size_t temp;
1002
1003 spin_lock_irq(&nullb->lock);
1004 while (n > 0) {
1005 temp = min_t(size_t, n, nullb->dev->blocksize);
deb78b41
SL
1006 null_free_sector(nullb, sector, false);
1007 if (null_cache_active(nullb))
1008 null_free_sector(nullb, sector, true);
306eb6b4
SL
1009 sector += temp >> SECTOR_SHIFT;
1010 n -= temp;
1011 }
1012 spin_unlock_irq(&nullb->lock);
1013}
1014
deb78b41
SL
1015static int null_handle_flush(struct nullb *nullb)
1016{
1017 int err;
1018
1019 if (!null_cache_active(nullb))
1020 return 0;
1021
1022 spin_lock_irq(&nullb->lock);
1023 while (true) {
1024 err = null_make_cache_space(nullb,
1025 nullb->dev->cache_size * 1024 * 1024);
1026 if (err || nullb->dev->curr_cache == 0)
1027 break;
1028 }
1029
1030 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1031 spin_unlock_irq(&nullb->lock);
1032 return err;
1033}
1034
5bcd0e0c 1035static int null_transfer(struct nullb *nullb, struct page *page,
deb78b41
SL
1036 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1037 bool is_fua)
f2298c04 1038{
5bcd0e0c
SL
1039 int err = 0;
1040
1041 if (!is_write) {
1042 err = copy_from_nullb(nullb, page, off, sector, len);
1043 flush_dcache_page(page);
1044 } else {
1045 flush_dcache_page(page);
deb78b41 1046 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
5bcd0e0c
SL
1047 }
1048
1049 return err;
1050}
1051
1052static int null_handle_rq(struct nullb_cmd *cmd)
1053{
1054 struct request *rq = cmd->rq;
1055 struct nullb *nullb = cmd->nq->dev->nullb;
1056 int err;
1057 unsigned int len;
1058 sector_t sector;
1059 struct req_iterator iter;
1060 struct bio_vec bvec;
1061
1062 sector = blk_rq_pos(rq);
1063
306eb6b4
SL
1064 if (req_op(rq) == REQ_OP_DISCARD) {
1065 null_handle_discard(nullb, sector, blk_rq_bytes(rq));
1066 return 0;
1067 }
1068
5bcd0e0c
SL
1069 spin_lock_irq(&nullb->lock);
1070 rq_for_each_segment(bvec, rq, iter) {
1071 len = bvec.bv_len;
1072 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
deb78b41
SL
1073 op_is_write(req_op(rq)), sector,
1074 req_op(rq) & REQ_FUA);
5bcd0e0c
SL
1075 if (err) {
1076 spin_unlock_irq(&nullb->lock);
1077 return err;
1078 }
1079 sector += len >> SECTOR_SHIFT;
1080 }
1081 spin_unlock_irq(&nullb->lock);
1082
1083 return 0;
1084}
1085
1086static int null_handle_bio(struct nullb_cmd *cmd)
1087{
1088 struct bio *bio = cmd->bio;
1089 struct nullb *nullb = cmd->nq->dev->nullb;
1090 int err;
1091 unsigned int len;
1092 sector_t sector;
1093 struct bio_vec bvec;
1094 struct bvec_iter iter;
1095
1096 sector = bio->bi_iter.bi_sector;
1097
306eb6b4
SL
1098 if (bio_op(bio) == REQ_OP_DISCARD) {
1099 null_handle_discard(nullb, sector,
1100 bio_sectors(bio) << SECTOR_SHIFT);
1101 return 0;
1102 }
1103
5bcd0e0c
SL
1104 spin_lock_irq(&nullb->lock);
1105 bio_for_each_segment(bvec, bio, iter) {
1106 len = bvec.bv_len;
1107 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
deb78b41 1108 op_is_write(bio_op(bio)), sector,
bf7c7a04 1109 bio->bi_opf & REQ_FUA);
5bcd0e0c
SL
1110 if (err) {
1111 spin_unlock_irq(&nullb->lock);
1112 return err;
1113 }
1114 sector += len >> SECTOR_SHIFT;
1115 }
1116 spin_unlock_irq(&nullb->lock);
1117 return 0;
1118}
1119
eff2c4f1
SL
1120static void null_stop_queue(struct nullb *nullb)
1121{
1122 struct request_queue *q = nullb->q;
1123
1124 if (nullb->dev->queue_mode == NULL_Q_MQ)
1125 blk_mq_stop_hw_queues(q);
eff2c4f1
SL
1126}
1127
1128static void null_restart_queue_async(struct nullb *nullb)
1129{
1130 struct request_queue *q = nullb->q;
eff2c4f1
SL
1131
1132 if (nullb->dev->queue_mode == NULL_Q_MQ)
1133 blk_mq_start_stopped_hw_queues(q, true);
eff2c4f1
SL
1134}
1135
5bcd0e0c
SL
1136static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
1137{
1138 struct nullb_device *dev = cmd->nq->dev;
eff2c4f1 1139 struct nullb *nullb = dev->nullb;
5bcd0e0c
SL
1140 int err = 0;
1141
eff2c4f1
SL
1142 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1143 struct request *rq = cmd->rq;
1144
1145 if (!hrtimer_active(&nullb->bw_timer))
1146 hrtimer_restart(&nullb->bw_timer);
1147
1148 if (atomic_long_sub_return(blk_rq_bytes(rq),
1149 &nullb->cur_bytes) < 0) {
1150 null_stop_queue(nullb);
1151 /* race with timer */
1152 if (atomic_long_read(&nullb->cur_bytes) > 0)
1153 null_restart_queue_async(nullb);
e50b1e32
JA
1154 /* requeue request */
1155 return BLK_STS_DEV_RESOURCE;
eff2c4f1
SL
1156 }
1157 }
1158
2f54a613
SL
1159 if (nullb->dev->badblocks.shift != -1) {
1160 int bad_sectors;
1161 sector_t sector, size, first_bad;
1162 bool is_flush = true;
1163
1164 if (dev->queue_mode == NULL_Q_BIO &&
1165 bio_op(cmd->bio) != REQ_OP_FLUSH) {
1166 is_flush = false;
1167 sector = cmd->bio->bi_iter.bi_sector;
1168 size = bio_sectors(cmd->bio);
1169 }
1170 if (dev->queue_mode != NULL_Q_BIO &&
1171 req_op(cmd->rq) != REQ_OP_FLUSH) {
1172 is_flush = false;
1173 sector = blk_rq_pos(cmd->rq);
1174 size = blk_rq_sectors(cmd->rq);
1175 }
1176 if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
1177 size, &first_bad, &bad_sectors)) {
1178 cmd->error = BLK_STS_IOERR;
1179 goto out;
1180 }
1181 }
1182
5bcd0e0c 1183 if (dev->memory_backed) {
deb78b41
SL
1184 if (dev->queue_mode == NULL_Q_BIO) {
1185 if (bio_op(cmd->bio) == REQ_OP_FLUSH)
1186 err = null_handle_flush(nullb);
1187 else
1188 err = null_handle_bio(cmd);
1189 } else {
1190 if (req_op(cmd->rq) == REQ_OP_FLUSH)
1191 err = null_handle_flush(nullb);
1192 else
1193 err = null_handle_rq(cmd);
1194 }
5bcd0e0c
SL
1195 }
1196 cmd->error = errno_to_blk_status(err);
ca4b2a01
MB
1197
1198 if (!cmd->error && dev->zoned) {
b228ba1c
JA
1199 sector_t sector;
1200 unsigned int nr_sectors;
152c762e 1201 enum req_opf op;
b228ba1c
JA
1202
1203 if (dev->queue_mode == NULL_Q_BIO) {
1204 op = bio_op(cmd->bio);
1205 sector = cmd->bio->bi_iter.bi_sector;
1206 nr_sectors = cmd->bio->bi_iter.bi_size >> 9;
1207 } else {
1208 op = req_op(cmd->rq);
1209 sector = blk_rq_pos(cmd->rq);
1210 nr_sectors = blk_rq_sectors(cmd->rq);
1211 }
1212
1213 if (op == REQ_OP_WRITE)
1214 null_zone_write(cmd, sector, nr_sectors);
1215 else if (op == REQ_OP_ZONE_RESET)
1216 null_zone_reset(cmd, sector);
a61dbfb1
CK
1217 else if (op == REQ_OP_ZONE_RESET_ALL)
1218 null_zone_reset(cmd, 0);
ca4b2a01 1219 }
2f54a613 1220out:
f2298c04 1221 /* Complete IO by inline, softirq or timer */
5bcd0e0c 1222 switch (dev->irqmode) {
f2298c04 1223 case NULL_IRQ_SOFTIRQ:
5bcd0e0c 1224 switch (dev->queue_mode) {
ce2c350b 1225 case NULL_Q_MQ:
08e0029a 1226 blk_mq_complete_request(cmd->rq);
ce2c350b 1227 break;
ce2c350b
CH
1228 case NULL_Q_BIO:
1229 /*
1230 * XXX: no proper submitting cpu information available.
1231 */
1232 end_cmd(cmd);
1233 break;
1234 }
1235 break;
1236 case NULL_IRQ_NONE:
f2298c04 1237 end_cmd(cmd);
f2298c04
JA
1238 break;
1239 case NULL_IRQ_TIMER:
1240 null_cmd_end_timer(cmd);
1241 break;
1242 }
5bcd0e0c 1243 return BLK_STS_OK;
f2298c04
JA
1244}
1245
eff2c4f1
SL
1246static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1247{
1248 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1249 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1250 unsigned int mbps = nullb->dev->mbps;
1251
1252 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1253 return HRTIMER_NORESTART;
1254
1255 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1256 null_restart_queue_async(nullb);
1257
1258 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1259
1260 return HRTIMER_RESTART;
1261}
1262
1263static void nullb_setup_bwtimer(struct nullb *nullb)
1264{
1265 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1266
1267 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1268 nullb->bw_timer.function = nullb_bwtimer_fn;
1269 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1270 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
f2298c04
JA
1271}
1272
1273static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1274{
1275 int index = 0;
1276
1277 if (nullb->nr_queues != 1)
1278 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1279
1280 return &nullb->queues[index];
1281}
1282
dece1635 1283static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
f2298c04
JA
1284{
1285 struct nullb *nullb = q->queuedata;
1286 struct nullb_queue *nq = nullb_to_queue(nullb);
1287 struct nullb_cmd *cmd;
1288
1289 cmd = alloc_cmd(nq, 1);
1290 cmd->bio = bio;
1291
1292 null_handle_cmd(cmd);
dece1635 1293 return BLK_QC_T_NONE;
f2298c04
JA
1294}
1295
93b57046
JA
1296static bool should_timeout_request(struct request *rq)
1297{
33f782c4 1298#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
93b57046
JA
1299 if (g_timeout_str[0])
1300 return should_fail(&null_timeout_attr, 1);
33f782c4 1301#endif
24941b90
JA
1302 return false;
1303}
93b57046 1304
24941b90
JA
1305static bool should_requeue_request(struct request *rq)
1306{
1307#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1308 if (g_requeue_str[0])
1309 return should_fail(&null_requeue_attr, 1);
1310#endif
93b57046
JA
1311 return false;
1312}
1313
5448aca4
JA
1314static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
1315{
1316 pr_info("null: rq %p timed out\n", rq);
0df0bb08
CH
1317 blk_mq_complete_request(rq);
1318 return BLK_EH_DONE;
5448aca4
JA
1319}
1320
fc17b653 1321static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
74c45052 1322 const struct blk_mq_queue_data *bd)
f2298c04 1323{
74c45052 1324 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
2984c868 1325 struct nullb_queue *nq = hctx->driver_data;
f2298c04 1326
db5bcf87
JA
1327 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1328
2984c868 1329 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
3c395a96
PV
1330 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1331 cmd->timer.function = null_cmd_timer_expired;
1332 }
74c45052 1333 cmd->rq = bd->rq;
2984c868 1334 cmd->nq = nq;
f2298c04 1335
74c45052 1336 blk_mq_start_request(bd->rq);
e2490073 1337
24941b90
JA
1338 if (should_requeue_request(bd->rq)) {
1339 /*
1340 * Alternate between hitting the core BUSY path, and the
1341 * driver driven requeue path
1342 */
1343 nq->requeue_selection++;
1344 if (nq->requeue_selection & 1)
1345 return BLK_STS_RESOURCE;
1346 else {
1347 blk_mq_requeue_request(bd->rq, true);
1348 return BLK_STS_OK;
1349 }
1350 }
1351 if (should_timeout_request(bd->rq))
1352 return BLK_STS_OK;
93b57046 1353
24941b90 1354 return null_handle_cmd(cmd);
f2298c04
JA
1355}
1356
f363b089 1357static const struct blk_mq_ops null_mq_ops = {
f2298c04 1358 .queue_rq = null_queue_rq,
49f66136 1359 .complete = null_complete_rq,
5448aca4 1360 .timeout = null_timeout_rq,
f2298c04
JA
1361};
1362
de65d2d2
MB
1363static void cleanup_queue(struct nullb_queue *nq)
1364{
1365 kfree(nq->tag_map);
1366 kfree(nq->cmds);
1367}
1368
1369static void cleanup_queues(struct nullb *nullb)
1370{
1371 int i;
1372
1373 for (i = 0; i < nullb->nr_queues; i++)
1374 cleanup_queue(&nullb->queues[i]);
1375
1376 kfree(nullb->queues);
1377}
1378
9ae2d0aa
MB
1379static void null_del_dev(struct nullb *nullb)
1380{
2984c868
SL
1381 struct nullb_device *dev = nullb->dev;
1382
94bc02e3
SL
1383 ida_simple_remove(&nullb_indexes, nullb->index);
1384
9ae2d0aa
MB
1385 list_del_init(&nullb->list);
1386
74ede5af 1387 del_gendisk(nullb->disk);
eff2c4f1
SL
1388
1389 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1390 hrtimer_cancel(&nullb->bw_timer);
1391 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1392 null_restart_queue_async(nullb);
1393 }
1394
9ae2d0aa 1395 blk_cleanup_queue(nullb->q);
2984c868
SL
1396 if (dev->queue_mode == NULL_Q_MQ &&
1397 nullb->tag_set == &nullb->__tag_set)
82f402fe 1398 blk_mq_free_tag_set(nullb->tag_set);
74ede5af 1399 put_disk(nullb->disk);
9ae2d0aa 1400 cleanup_queues(nullb);
deb78b41
SL
1401 if (null_cache_active(nullb))
1402 null_free_device_storage(nullb->dev, true);
9ae2d0aa 1403 kfree(nullb);
2984c868 1404 dev->nullb = NULL;
9ae2d0aa
MB
1405}
1406
306eb6b4
SL
1407static void null_config_discard(struct nullb *nullb)
1408{
1409 if (nullb->dev->discard == false)
1410 return;
1411 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1412 nullb->q->limits.discard_alignment = nullb->dev->blocksize;
1413 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
8b904b5b 1414 blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
9ae2d0aa
MB
1415}
1416
f2298c04
JA
1417static int null_open(struct block_device *bdev, fmode_t mode)
1418{
1419 return 0;
1420}
1421
1422static void null_release(struct gendisk *disk, fmode_t mode)
1423{
1424}
1425
1426static const struct block_device_operations null_fops = {
1427 .owner = THIS_MODULE,
1428 .open = null_open,
1429 .release = null_release,
e76239a3 1430 .report_zones = null_zone_report,
f2298c04
JA
1431};
1432
82f402fe
JA
1433static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1434{
1435 BUG_ON(!nullb);
1436 BUG_ON(!nq);
1437
1438 init_waitqueue_head(&nq->wait);
1439 nq->queue_depth = nullb->queue_depth;
2984c868 1440 nq->dev = nullb->dev;
82f402fe
JA
1441}
1442
1443static void null_init_queues(struct nullb *nullb)
1444{
1445 struct request_queue *q = nullb->q;
1446 struct blk_mq_hw_ctx *hctx;
1447 struct nullb_queue *nq;
1448 int i;
1449
1450 queue_for_each_hw_ctx(q, hctx, i) {
1451 if (!hctx->nr_ctx || !hctx->tags)
1452 continue;
1453 nq = &nullb->queues[i];
1454 hctx->driver_data = nq;
1455 null_init_queue(nullb, nq);
1456 nullb->nr_queues++;
1457 }
1458}
1459
f2298c04
JA
1460static int setup_commands(struct nullb_queue *nq)
1461{
1462 struct nullb_cmd *cmd;
1463 int i, tag_size;
1464
6396bb22 1465 nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
f2298c04 1466 if (!nq->cmds)
2d263a78 1467 return -ENOMEM;
f2298c04
JA
1468
1469 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
6396bb22 1470 nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL);
f2298c04
JA
1471 if (!nq->tag_map) {
1472 kfree(nq->cmds);
2d263a78 1473 return -ENOMEM;
f2298c04
JA
1474 }
1475
1476 for (i = 0; i < nq->queue_depth; i++) {
1477 cmd = &nq->cmds[i];
1478 INIT_LIST_HEAD(&cmd->list);
1479 cmd->ll_list.next = NULL;
1480 cmd->tag = -1U;
1481 }
1482
1483 return 0;
1484}
1485
f2298c04
JA
1486static int setup_queues(struct nullb *nullb)
1487{
6396bb22
KC
1488 nullb->queues = kcalloc(nullb->dev->submit_queues,
1489 sizeof(struct nullb_queue),
1490 GFP_KERNEL);
f2298c04 1491 if (!nullb->queues)
2d263a78 1492 return -ENOMEM;
f2298c04 1493
2984c868 1494 nullb->queue_depth = nullb->dev->hw_queue_depth;
f2298c04 1495
2d263a78
MB
1496 return 0;
1497}
1498
1499static int init_driver_queues(struct nullb *nullb)
1500{
1501 struct nullb_queue *nq;
1502 int i, ret = 0;
f2298c04 1503
2984c868 1504 for (i = 0; i < nullb->dev->submit_queues; i++) {
f2298c04 1505 nq = &nullb->queues[i];
2d263a78
MB
1506
1507 null_init_queue(nullb, nq);
1508
1509 ret = setup_commands(nq);
1510 if (ret)
31f9690e 1511 return ret;
f2298c04
JA
1512 nullb->nr_queues++;
1513 }
2d263a78 1514 return 0;
f2298c04
JA
1515}
1516
9ae2d0aa 1517static int null_gendisk_register(struct nullb *nullb)
f2298c04
JA
1518{
1519 struct gendisk *disk;
f2298c04 1520 sector_t size;
9ae2d0aa 1521
2984c868 1522 disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
9ae2d0aa
MB
1523 if (!disk)
1524 return -ENOMEM;
2984c868 1525 size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
9ae2d0aa
MB
1526 set_capacity(disk, size >> 9);
1527
1528 disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
1529 disk->major = null_major;
1530 disk->first_minor = nullb->index;
1531 disk->fops = &null_fops;
1532 disk->private_data = nullb;
1533 disk->queue = nullb->q;
1534 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1535
bf505456
DLM
1536 if (nullb->dev->zoned) {
1537 int ret = blk_revalidate_disk_zones(disk);
1538
1539 if (ret != 0)
1540 return ret;
1541 }
1542
9ae2d0aa
MB
1543 add_disk(disk);
1544 return 0;
1545}
1546
2984c868 1547static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
82f402fe
JA
1548{
1549 set->ops = &null_mq_ops;
2984c868
SL
1550 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
1551 g_submit_queues;
1552 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
1553 g_hw_queue_depth;
1554 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
82f402fe
JA
1555 set->cmd_size = sizeof(struct nullb_cmd);
1556 set->flags = BLK_MQ_F_SHOULD_MERGE;
b3cffc38 1557 if (g_no_sched)
1558 set->flags |= BLK_MQ_F_NO_SCHED;
82f402fe
JA
1559 set->driver_data = NULL;
1560
0d06a42f 1561 if ((nullb && nullb->dev->blocking) || g_blocking)
82f402fe
JA
1562 set->flags |= BLK_MQ_F_BLOCKING;
1563
1564 return blk_mq_alloc_tag_set(set);
1565}
1566
cedcafad
SL
1567static void null_validate_conf(struct nullb_device *dev)
1568{
1569 dev->blocksize = round_down(dev->blocksize, 512);
1570 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
cedcafad
SL
1571
1572 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
1573 if (dev->submit_queues != nr_online_nodes)
1574 dev->submit_queues = nr_online_nodes;
1575 } else if (dev->submit_queues > nr_cpu_ids)
1576 dev->submit_queues = nr_cpu_ids;
1577 else if (dev->submit_queues == 0)
1578 dev->submit_queues = 1;
1579
1580 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
1581 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
5bcd0e0c
SL
1582
1583 /* Do memory allocation, so set blocking */
1584 if (dev->memory_backed)
1585 dev->blocking = true;
deb78b41
SL
1586 else /* cache is meaningless */
1587 dev->cache_size = 0;
1588 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1589 dev->cache_size);
eff2c4f1
SL
1590 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1591 /* can not stop a queue */
1592 if (dev->queue_mode == NULL_Q_BIO)
1593 dev->mbps = 0;
cedcafad
SL
1594}
1595
33f782c4 1596#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
24941b90
JA
1597static bool __null_setup_fault(struct fault_attr *attr, char *str)
1598{
1599 if (!str[0])
93b57046
JA
1600 return true;
1601
24941b90 1602 if (!setup_fault_attr(attr, str))
93b57046
JA
1603 return false;
1604
24941b90
JA
1605 attr->verbose = 0;
1606 return true;
1607}
1608#endif
1609
1610static bool null_setup_fault(void)
1611{
1612#ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1613 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
1614 return false;
1615 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
1616 return false;
33f782c4 1617#endif
93b57046
JA
1618 return true;
1619}
1620
2984c868 1621static int null_add_dev(struct nullb_device *dev)
9ae2d0aa
MB
1622{
1623 struct nullb *nullb;
dc501dc0 1624 int rv;
f2298c04 1625
cedcafad
SL
1626 null_validate_conf(dev);
1627
2984c868 1628 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
dc501dc0
RE
1629 if (!nullb) {
1630 rv = -ENOMEM;
24d2f903 1631 goto out;
dc501dc0 1632 }
2984c868
SL
1633 nullb->dev = dev;
1634 dev->nullb = nullb;
f2298c04
JA
1635
1636 spin_lock_init(&nullb->lock);
1637
dc501dc0
RE
1638 rv = setup_queues(nullb);
1639 if (rv)
24d2f903 1640 goto out_free_nullb;
f2298c04 1641
2984c868 1642 if (dev->queue_mode == NULL_Q_MQ) {
82f402fe
JA
1643 if (shared_tags) {
1644 nullb->tag_set = &tag_set;
1645 rv = 0;
1646 } else {
1647 nullb->tag_set = &nullb->__tag_set;
2984c868 1648 rv = null_init_tag_set(nullb, nullb->tag_set);
82f402fe
JA
1649 }
1650
dc501dc0 1651 if (rv)
24d2f903
CH
1652 goto out_cleanup_queues;
1653
93b57046
JA
1654 if (!null_setup_fault())
1655 goto out_cleanup_queues;
1656
5448aca4 1657 nullb->tag_set->timeout = 5 * HZ;
82f402fe 1658 nullb->q = blk_mq_init_queue(nullb->tag_set);
35b489d3 1659 if (IS_ERR(nullb->q)) {
dc501dc0 1660 rv = -ENOMEM;
24d2f903 1661 goto out_cleanup_tags;
dc501dc0 1662 }
82f402fe 1663 null_init_queues(nullb);
2984c868 1664 } else if (dev->queue_mode == NULL_Q_BIO) {
6d469642 1665 nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
dc501dc0
RE
1666 if (!nullb->q) {
1667 rv = -ENOMEM;
24d2f903 1668 goto out_cleanup_queues;
dc501dc0 1669 }
f2298c04 1670 blk_queue_make_request(nullb->q, null_queue_bio);
31f9690e
JK
1671 rv = init_driver_queues(nullb);
1672 if (rv)
1673 goto out_cleanup_blk_queue;
f2298c04
JA
1674 }
1675
eff2c4f1
SL
1676 if (dev->mbps) {
1677 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1678 nullb_setup_bwtimer(nullb);
1679 }
1680
deb78b41
SL
1681 if (dev->cache_size > 0) {
1682 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1683 blk_queue_write_cache(nullb->q, true, true);
deb78b41
SL
1684 }
1685
ca4b2a01
MB
1686 if (dev->zoned) {
1687 rv = null_zone_init(dev);
1688 if (rv)
1689 goto out_cleanup_blk_queue;
1690
1691 blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects);
1692 nullb->q->limits.zoned = BLK_ZONED_HM;
a61dbfb1 1693 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, nullb->q);
ca4b2a01
MB
1694 }
1695
f2298c04 1696 nullb->q->queuedata = nullb;
8b904b5b
BVA
1697 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
1698 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
f2298c04 1699
f2298c04 1700 mutex_lock(&lock);
94bc02e3 1701 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
cedcafad 1702 dev->index = nullb->index;
f2298c04
JA
1703 mutex_unlock(&lock);
1704
2984c868
SL
1705 blk_queue_logical_block_size(nullb->q, dev->blocksize);
1706 blk_queue_physical_block_size(nullb->q, dev->blocksize);
f2298c04 1707
306eb6b4 1708 null_config_discard(nullb);
f2298c04 1709
b2b7e001
MB
1710 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1711
74ede5af 1712 rv = null_gendisk_register(nullb);
9ae2d0aa 1713 if (rv)
ca4b2a01 1714 goto out_cleanup_zone;
a514379b
MB
1715
1716 mutex_lock(&lock);
1717 list_add_tail(&nullb->list, &nullb_list);
1718 mutex_unlock(&lock);
3681c85d 1719
f2298c04 1720 return 0;
ca4b2a01
MB
1721out_cleanup_zone:
1722 if (dev->zoned)
1723 null_zone_exit(dev);
24d2f903
CH
1724out_cleanup_blk_queue:
1725 blk_cleanup_queue(nullb->q);
1726out_cleanup_tags:
2984c868 1727 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
82f402fe 1728 blk_mq_free_tag_set(nullb->tag_set);
24d2f903
CH
1729out_cleanup_queues:
1730 cleanup_queues(nullb);
1731out_free_nullb:
1732 kfree(nullb);
1733out:
dc501dc0 1734 return rv;
f2298c04
JA
1735}
1736
1737static int __init null_init(void)
1738{
af096e22 1739 int ret = 0;
f2298c04 1740 unsigned int i;
af096e22 1741 struct nullb *nullb;
2984c868 1742 struct nullb_device *dev;
f2298c04 1743
2984c868 1744 if (g_bs > PAGE_SIZE) {
9967d8ac
R
1745 pr_warn("null_blk: invalid block size\n");
1746 pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
2984c868 1747 g_bs = PAGE_SIZE;
9967d8ac 1748 }
f2298c04 1749
ca4b2a01
MB
1750 if (!is_power_of_2(g_zone_size)) {
1751 pr_err("null_blk: zone_size must be power-of-two\n");
1752 return -EINVAL;
1753 }
1754
7ff684a6
JP
1755 if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
1756 pr_err("null_blk: invalid home_node value\n");
1757 g_home_node = NUMA_NO_NODE;
1758 }
1759
e50b1e32
JA
1760 if (g_queue_mode == NULL_Q_RQ) {
1761 pr_err("null_blk: legacy IO path no longer available\n");
1762 return -EINVAL;
1763 }
2984c868
SL
1764 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
1765 if (g_submit_queues != nr_online_nodes) {
558ab300 1766 pr_warn("null_blk: submit_queues param is set to %u.\n",
d15ee6b1 1767 nr_online_nodes);
2984c868 1768 g_submit_queues = nr_online_nodes;
fc1bc354 1769 }
2984c868
SL
1770 } else if (g_submit_queues > nr_cpu_ids)
1771 g_submit_queues = nr_cpu_ids;
1772 else if (g_submit_queues <= 0)
1773 g_submit_queues = 1;
f2298c04 1774
2984c868
SL
1775 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
1776 ret = null_init_tag_set(NULL, &tag_set);
db2d153d
MG
1777 if (ret)
1778 return ret;
1779 }
1780
3bf2bd20
SL
1781 config_group_init(&nullb_subsys.su_group);
1782 mutex_init(&nullb_subsys.su_mutex);
1783
1784 ret = configfs_register_subsystem(&nullb_subsys);
1785 if (ret)
1786 goto err_tagset;
1787
f2298c04
JA
1788 mutex_init(&lock);
1789
f2298c04 1790 null_major = register_blkdev(0, "nullb");
db2d153d
MG
1791 if (null_major < 0) {
1792 ret = null_major;
3bf2bd20 1793 goto err_conf;
db2d153d 1794 }
f2298c04
JA
1795
1796 for (i = 0; i < nr_devices; i++) {
2984c868 1797 dev = null_alloc_dev();
30c516d7
WY
1798 if (!dev) {
1799 ret = -ENOMEM;
2984c868 1800 goto err_dev;
30c516d7 1801 }
2984c868
SL
1802 ret = null_add_dev(dev);
1803 if (ret) {
1804 null_free_dev(dev);
af096e22 1805 goto err_dev;
2984c868 1806 }
f2298c04
JA
1807 }
1808
1809 pr_info("null: module loaded\n");
1810 return 0;
af096e22
MH
1811
1812err_dev:
1813 while (!list_empty(&nullb_list)) {
1814 nullb = list_entry(nullb_list.next, struct nullb, list);
2984c868 1815 dev = nullb->dev;
af096e22 1816 null_del_dev(nullb);
2984c868 1817 null_free_dev(dev);
af096e22 1818 }
af096e22 1819 unregister_blkdev(null_major, "nullb");
3bf2bd20
SL
1820err_conf:
1821 configfs_unregister_subsystem(&nullb_subsys);
db2d153d 1822err_tagset:
2984c868 1823 if (g_queue_mode == NULL_Q_MQ && shared_tags)
db2d153d 1824 blk_mq_free_tag_set(&tag_set);
af096e22 1825 return ret;
f2298c04
JA
1826}
1827
1828static void __exit null_exit(void)
1829{
1830 struct nullb *nullb;
1831
3bf2bd20
SL
1832 configfs_unregister_subsystem(&nullb_subsys);
1833
f2298c04
JA
1834 unregister_blkdev(null_major, "nullb");
1835
1836 mutex_lock(&lock);
1837 while (!list_empty(&nullb_list)) {
2984c868
SL
1838 struct nullb_device *dev;
1839
f2298c04 1840 nullb = list_entry(nullb_list.next, struct nullb, list);
2984c868 1841 dev = nullb->dev;
f2298c04 1842 null_del_dev(nullb);
2984c868 1843 null_free_dev(dev);
f2298c04
JA
1844 }
1845 mutex_unlock(&lock);
6bb9535b 1846
2984c868 1847 if (g_queue_mode == NULL_Q_MQ && shared_tags)
82f402fe 1848 blk_mq_free_tag_set(&tag_set);
f2298c04
JA
1849}
1850
1851module_init(null_init);
1852module_exit(null_exit);
1853
231b3db1 1854MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
f2298c04 1855MODULE_LICENSE("GPL");