1 // SPDX-License-Identifier: GPL-2.0-only
3 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
4 * Shaohua Li <shli@fb.com>
6 #include <linux/module.h>
8 #include <linux/moduleparam.h>
9 #include <linux/sched.h>
11 #include <linux/init.h>
15 #define pr_fmt(fmt) "null_blk: " fmt
19 #define TICKS_PER_SEC 50ULL
20 #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
22 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
23 static DECLARE_FAULT_ATTR(null_timeout_attr);
24 static DECLARE_FAULT_ATTR(null_requeue_attr);
25 static DECLARE_FAULT_ATTR(null_init_hctx_attr);
28 static inline u64 mb_per_tick(int mbps)
30 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
34 * Status flags for nullb_device.
36 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
37 * UP: Device is currently on and visible in userspace.
38 * THROTTLED: Device is being throttled.
39 * CACHE: Device is using a write-back cache.
41 enum nullb_device_flags {
42 NULLB_DEV_FL_CONFIGURED = 0,
44 NULLB_DEV_FL_THROTTLED = 2,
45 NULLB_DEV_FL_CACHE = 3,
48 #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
50 * nullb_page is a page in memory for nullb devices.
52 * @page: The page holding the data.
53 * @bitmap: The bitmap represents which sector in the page has data.
54 * Each bit represents one block size. For example, sector 8
55 * will use the 7th bit
56 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
57 * page is being flushing to storage. FREE means the cache page is freed and
58 * should be skipped from flushing to storage. Please see
59 * null_make_cache_space
63 DECLARE_BITMAP(bitmap, MAP_SZ);
65 #define NULLB_PAGE_LOCK (MAP_SZ - 1)
66 #define NULLB_PAGE_FREE (MAP_SZ - 2)
68 static LIST_HEAD(nullb_list);
69 static struct mutex lock;
70 static int null_major;
71 static DEFINE_IDA(nullb_indexes);
72 static struct blk_mq_tag_set tag_set;
80 static bool g_virt_boundary = false;
81 module_param_named(virt_boundary, g_virt_boundary, bool, 0444);
82 MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False");
84 static int g_no_sched;
85 module_param_named(no_sched, g_no_sched, int, 0444);
86 MODULE_PARM_DESC(no_sched, "No io scheduler");
88 static int g_submit_queues = 1;
89 module_param_named(submit_queues, g_submit_queues, int, 0444);
90 MODULE_PARM_DESC(submit_queues, "Number of submission queues");
92 static int g_poll_queues = 1;
93 module_param_named(poll_queues, g_poll_queues, int, 0444);
94 MODULE_PARM_DESC(poll_queues, "Number of IOPOLL submission queues");
96 static int g_home_node = NUMA_NO_NODE;
97 module_param_named(home_node, g_home_node, int, 0444);
98 MODULE_PARM_DESC(home_node, "Home node for the device");
100 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
102 * For more details about fault injection, please refer to
103 * Documentation/fault-injection/fault-injection.rst.
105 static char g_timeout_str[80];
106 module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
107 MODULE_PARM_DESC(timeout, "Fault injection. timeout=<interval>,<probability>,<space>,<times>");
109 static char g_requeue_str[80];
110 module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
111 MODULE_PARM_DESC(requeue, "Fault injection. requeue=<interval>,<probability>,<space>,<times>");
113 static char g_init_hctx_str[80];
114 module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444);
115 MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>");
118 static int g_queue_mode = NULL_Q_MQ;
120 static int null_param_store_val(const char *str, int *val, int min, int max)
124 ret = kstrtoint(str, 10, &new_val);
128 if (new_val < min || new_val > max)
135 static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
137 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
140 static const struct kernel_param_ops null_queue_mode_param_ops = {
141 .set = null_set_queue_mode,
142 .get = param_get_int,
145 device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
146 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
148 static int g_gb = 250;
149 module_param_named(gb, g_gb, int, 0444);
150 MODULE_PARM_DESC(gb, "Size in GB");
152 static int g_bs = 512;
153 module_param_named(bs, g_bs, int, 0444);
154 MODULE_PARM_DESC(bs, "Block size (in bytes)");
156 static int g_max_sectors;
157 module_param_named(max_sectors, g_max_sectors, int, 0444);
158 MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
160 static unsigned int nr_devices = 1;
161 module_param(nr_devices, uint, 0444);
162 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
164 static bool g_blocking;
165 module_param_named(blocking, g_blocking, bool, 0444);
166 MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
168 static bool shared_tags;
169 module_param(shared_tags, bool, 0444);
170 MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
172 static bool g_shared_tag_bitmap;
173 module_param_named(shared_tag_bitmap, g_shared_tag_bitmap, bool, 0444);
174 MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq");
176 static int g_irqmode = NULL_IRQ_SOFTIRQ;
178 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
180 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
184 static const struct kernel_param_ops null_irqmode_param_ops = {
185 .set = null_set_irqmode,
186 .get = param_get_int,
189 device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
190 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
192 static unsigned long g_completion_nsec = 10000;
193 module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
194 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
196 static int g_hw_queue_depth = 64;
197 module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
198 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
200 static bool g_use_per_node_hctx;
201 module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
202 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
204 static bool g_memory_backed;
205 module_param_named(memory_backed, g_memory_backed, bool, 0444);
206 MODULE_PARM_DESC(memory_backed, "Create a memory-backed block device. Default: false");
208 static bool g_discard;
209 module_param_named(discard, g_discard, bool, 0444);
210 MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed null_blk device). Default: false");
212 static unsigned long g_cache_size;
213 module_param_named(cache_size, g_cache_size, ulong, 0444);
214 MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
216 static unsigned int g_mbps;
217 module_param_named(mbps, g_mbps, uint, 0444);
218 MODULE_PARM_DESC(mbps, "Limit maximum bandwidth (in MiB/s). Default: 0 (no limit)");
221 module_param_named(zoned, g_zoned, bool, S_IRUGO);
222 MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
224 static unsigned long g_zone_size = 256;
225 module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
226 MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
228 static unsigned long g_zone_capacity;
229 module_param_named(zone_capacity, g_zone_capacity, ulong, 0444);
230 MODULE_PARM_DESC(zone_capacity, "Zone capacity in MB when block device is zoned. Can be less than or equal to zone size. Default: Zone size");
232 static unsigned int g_zone_nr_conv;
233 module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
234 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
236 static unsigned int g_zone_max_open;
237 module_param_named(zone_max_open, g_zone_max_open, uint, 0444);
238 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones when block device is zoned. Default: 0 (no limit)");
240 static unsigned int g_zone_max_active;
241 module_param_named(zone_max_active, g_zone_max_active, uint, 0444);
242 MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)");
244 static struct nullb_device *null_alloc_dev(void);
245 static void null_free_dev(struct nullb_device *dev);
246 static void null_del_dev(struct nullb *nullb);
247 static int null_add_dev(struct nullb_device *dev);
248 static struct nullb *null_find_dev_by_name(const char *name);
249 static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
251 static inline struct nullb_device *to_nullb_device(struct config_item *item)
253 return item ? container_of(item, struct nullb_device, item) : NULL;
256 static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
258 return snprintf(page, PAGE_SIZE, "%u\n", val);
261 static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
264 return snprintf(page, PAGE_SIZE, "%lu\n", val);
267 static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
269 return snprintf(page, PAGE_SIZE, "%u\n", val);
272 static ssize_t nullb_device_uint_attr_store(unsigned int *val,
273 const char *page, size_t count)
278 result = kstrtouint(page, 0, &tmp);
286 static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
287 const char *page, size_t count)
292 result = kstrtoul(page, 0, &tmp);
300 static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
306 result = kstrtobool(page, &tmp);
314 /* The following macro should only be used with TYPE = {uint, ulong, bool}. */
315 #define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
317 nullb_device_##NAME##_show(struct config_item *item, char *page) \
319 return nullb_device_##TYPE##_attr_show( \
320 to_nullb_device(item)->NAME, page); \
323 nullb_device_##NAME##_store(struct config_item *item, const char *page, \
326 int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\
327 struct nullb_device *dev = to_nullb_device(item); \
328 TYPE new_value = 0; \
331 ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\
335 ret = apply_fn(dev, new_value); \
336 else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
340 dev->NAME = new_value; \
343 CONFIGFS_ATTR(nullb_device_, NAME);
345 static int nullb_update_nr_hw_queues(struct nullb_device *dev,
346 unsigned int submit_queues,
347 unsigned int poll_queues)
350 struct blk_mq_tag_set *set;
351 int ret, nr_hw_queues;
357 * Make sure at least one submit queue exists.
363 * Make sure that null_init_hctx() does not access nullb->queues[] past
364 * the end of that array.
366 if (submit_queues > nr_cpu_ids || poll_queues > g_poll_queues)
370 * Keep previous and new queue numbers in nullb_device for reference in
371 * the call back function null_map_queues().
373 dev->prev_submit_queues = dev->submit_queues;
374 dev->prev_poll_queues = dev->poll_queues;
375 dev->submit_queues = submit_queues;
376 dev->poll_queues = poll_queues;
378 set = dev->nullb->tag_set;
379 nr_hw_queues = submit_queues + poll_queues;
380 blk_mq_update_nr_hw_queues(set, nr_hw_queues);
381 ret = set->nr_hw_queues == nr_hw_queues ? 0 : -ENOMEM;
384 /* on error, revert the queue numbers */
385 dev->submit_queues = dev->prev_submit_queues;
386 dev->poll_queues = dev->prev_poll_queues;
392 static int nullb_apply_submit_queues(struct nullb_device *dev,
393 unsigned int submit_queues)
395 return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
398 static int nullb_apply_poll_queues(struct nullb_device *dev,
399 unsigned int poll_queues)
401 return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
404 NULLB_DEVICE_ATTR(size, ulong, NULL);
405 NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
406 NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
407 NULLB_DEVICE_ATTR(poll_queues, uint, nullb_apply_poll_queues);
408 NULLB_DEVICE_ATTR(home_node, uint, NULL);
409 NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
410 NULLB_DEVICE_ATTR(blocksize, uint, NULL);
411 NULLB_DEVICE_ATTR(max_sectors, uint, NULL);
412 NULLB_DEVICE_ATTR(irqmode, uint, NULL);
413 NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
414 NULLB_DEVICE_ATTR(index, uint, NULL);
415 NULLB_DEVICE_ATTR(blocking, bool, NULL);
416 NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
417 NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
418 NULLB_DEVICE_ATTR(discard, bool, NULL);
419 NULLB_DEVICE_ATTR(mbps, uint, NULL);
420 NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
421 NULLB_DEVICE_ATTR(zoned, bool, NULL);
422 NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
423 NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL);
424 NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
425 NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
426 NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
427 NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
429 static ssize_t nullb_device_power_show(struct config_item *item, char *page)
431 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
434 static ssize_t nullb_device_power_store(struct config_item *item,
435 const char *page, size_t count)
437 struct nullb_device *dev = to_nullb_device(item);
441 ret = nullb_device_bool_attr_store(&newp, page, count);
445 if (!dev->power && newp) {
446 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
448 ret = null_add_dev(dev);
450 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
454 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
456 } else if (dev->power && !newp) {
457 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
460 null_del_dev(dev->nullb);
463 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
469 CONFIGFS_ATTR(nullb_device_, power);
471 static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
473 struct nullb_device *t_dev = to_nullb_device(item);
475 return badblocks_show(&t_dev->badblocks, page, 0);
478 static ssize_t nullb_device_badblocks_store(struct config_item *item,
479 const char *page, size_t count)
481 struct nullb_device *t_dev = to_nullb_device(item);
482 char *orig, *buf, *tmp;
486 orig = kstrndup(page, count, GFP_KERNEL);
490 buf = strstrip(orig);
493 if (buf[0] != '+' && buf[0] != '-')
495 tmp = strchr(&buf[1], '-');
499 ret = kstrtoull(buf + 1, 0, &start);
502 ret = kstrtoull(tmp + 1, 0, &end);
508 /* enable badblocks */
509 cmpxchg(&t_dev->badblocks.shift, -1, 0);
511 ret = badblocks_set(&t_dev->badblocks, start,
514 ret = badblocks_clear(&t_dev->badblocks, start,
522 CONFIGFS_ATTR(nullb_device_, badblocks);
524 static struct configfs_attribute *nullb_device_attrs[] = {
525 &nullb_device_attr_size,
526 &nullb_device_attr_completion_nsec,
527 &nullb_device_attr_submit_queues,
528 &nullb_device_attr_poll_queues,
529 &nullb_device_attr_home_node,
530 &nullb_device_attr_queue_mode,
531 &nullb_device_attr_blocksize,
532 &nullb_device_attr_max_sectors,
533 &nullb_device_attr_irqmode,
534 &nullb_device_attr_hw_queue_depth,
535 &nullb_device_attr_index,
536 &nullb_device_attr_blocking,
537 &nullb_device_attr_use_per_node_hctx,
538 &nullb_device_attr_power,
539 &nullb_device_attr_memory_backed,
540 &nullb_device_attr_discard,
541 &nullb_device_attr_mbps,
542 &nullb_device_attr_cache_size,
543 &nullb_device_attr_badblocks,
544 &nullb_device_attr_zoned,
545 &nullb_device_attr_zone_size,
546 &nullb_device_attr_zone_capacity,
547 &nullb_device_attr_zone_nr_conv,
548 &nullb_device_attr_zone_max_open,
549 &nullb_device_attr_zone_max_active,
550 &nullb_device_attr_virt_boundary,
554 static void nullb_device_release(struct config_item *item)
556 struct nullb_device *dev = to_nullb_device(item);
558 null_free_device_storage(dev, false);
562 static struct configfs_item_operations nullb_device_ops = {
563 .release = nullb_device_release,
566 static const struct config_item_type nullb_device_type = {
567 .ct_item_ops = &nullb_device_ops,
568 .ct_attrs = nullb_device_attrs,
569 .ct_owner = THIS_MODULE,
573 config_item *nullb_group_make_item(struct config_group *group, const char *name)
575 struct nullb_device *dev;
577 if (null_find_dev_by_name(name))
578 return ERR_PTR(-EEXIST);
580 dev = null_alloc_dev();
582 return ERR_PTR(-ENOMEM);
584 config_item_init_type_name(&dev->item, name, &nullb_device_type);
590 nullb_group_drop_item(struct config_group *group, struct config_item *item)
592 struct nullb_device *dev = to_nullb_device(item);
594 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
597 null_del_dev(dev->nullb);
601 config_item_put(item);
604 static ssize_t memb_group_features_show(struct config_item *item, char *page)
606 return snprintf(page, PAGE_SIZE,
607 "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active,blocksize,max_sectors,virt_boundary\n");
610 CONFIGFS_ATTR_RO(memb_group_, features);
612 static struct configfs_attribute *nullb_group_attrs[] = {
613 &memb_group_attr_features,
617 static struct configfs_group_operations nullb_group_ops = {
618 .make_item = nullb_group_make_item,
619 .drop_item = nullb_group_drop_item,
622 static const struct config_item_type nullb_group_type = {
623 .ct_group_ops = &nullb_group_ops,
624 .ct_attrs = nullb_group_attrs,
625 .ct_owner = THIS_MODULE,
628 static struct configfs_subsystem nullb_subsys = {
631 .ci_namebuf = "nullb",
632 .ci_type = &nullb_group_type,
637 static inline int null_cache_active(struct nullb *nullb)
639 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
642 static struct nullb_device *null_alloc_dev(void)
644 struct nullb_device *dev;
646 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
649 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
650 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
651 if (badblocks_init(&dev->badblocks, 0)) {
656 dev->size = g_gb * 1024;
657 dev->completion_nsec = g_completion_nsec;
658 dev->submit_queues = g_submit_queues;
659 dev->prev_submit_queues = g_submit_queues;
660 dev->poll_queues = g_poll_queues;
661 dev->prev_poll_queues = g_poll_queues;
662 dev->home_node = g_home_node;
663 dev->queue_mode = g_queue_mode;
664 dev->blocksize = g_bs;
665 dev->max_sectors = g_max_sectors;
666 dev->irqmode = g_irqmode;
667 dev->hw_queue_depth = g_hw_queue_depth;
668 dev->blocking = g_blocking;
669 dev->memory_backed = g_memory_backed;
670 dev->discard = g_discard;
671 dev->cache_size = g_cache_size;
673 dev->use_per_node_hctx = g_use_per_node_hctx;
674 dev->zoned = g_zoned;
675 dev->zone_size = g_zone_size;
676 dev->zone_capacity = g_zone_capacity;
677 dev->zone_nr_conv = g_zone_nr_conv;
678 dev->zone_max_open = g_zone_max_open;
679 dev->zone_max_active = g_zone_max_active;
680 dev->virt_boundary = g_virt_boundary;
684 static void null_free_dev(struct nullb_device *dev)
689 null_free_zoned_dev(dev);
690 badblocks_exit(&dev->badblocks);
694 static void put_tag(struct nullb_queue *nq, unsigned int tag)
696 clear_bit_unlock(tag, nq->tag_map);
698 if (waitqueue_active(&nq->wait))
702 static unsigned int get_tag(struct nullb_queue *nq)
707 tag = find_first_zero_bit(nq->tag_map, nq->queue_depth);
708 if (tag >= nq->queue_depth)
710 } while (test_and_set_bit_lock(tag, nq->tag_map));
715 static void free_cmd(struct nullb_cmd *cmd)
717 put_tag(cmd->nq, cmd->tag);
720 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer);
722 static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
724 struct nullb_cmd *cmd;
729 cmd = &nq->cmds[tag];
731 cmd->error = BLK_STS_OK;
733 if (nq->dev->irqmode == NULL_IRQ_TIMER) {
734 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
736 cmd->timer.function = null_cmd_timer_expired;
744 static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, struct bio *bio)
746 struct nullb_cmd *cmd;
751 * This avoids multiple return statements, multiple calls to
752 * __alloc_cmd() and a fast path call to prepare_to_wait().
754 cmd = __alloc_cmd(nq);
759 prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE);
761 finish_wait(&nq->wait, &wait);
765 static void end_cmd(struct nullb_cmd *cmd)
767 int queue_mode = cmd->nq->dev->queue_mode;
769 switch (queue_mode) {
771 blk_mq_end_request(cmd->rq, cmd->error);
774 cmd->bio->bi_status = cmd->error;
782 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
784 end_cmd(container_of(timer, struct nullb_cmd, timer));
786 return HRTIMER_NORESTART;
789 static void null_cmd_end_timer(struct nullb_cmd *cmd)
791 ktime_t kt = cmd->nq->dev->completion_nsec;
793 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
796 static void null_complete_rq(struct request *rq)
798 end_cmd(blk_mq_rq_to_pdu(rq));
801 static struct nullb_page *null_alloc_page(void)
803 struct nullb_page *t_page;
805 t_page = kmalloc(sizeof(struct nullb_page), GFP_NOIO);
809 t_page->page = alloc_pages(GFP_NOIO, 0);
815 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
819 static void null_free_page(struct nullb_page *t_page)
821 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
822 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
824 __free_page(t_page->page);
828 static bool null_page_empty(struct nullb_page *page)
830 int size = MAP_SZ - 2;
832 return find_first_bit(page->bitmap, size) == size;
835 static void null_free_sector(struct nullb *nullb, sector_t sector,
838 unsigned int sector_bit;
840 struct nullb_page *t_page, *ret;
841 struct radix_tree_root *root;
843 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
844 idx = sector >> PAGE_SECTORS_SHIFT;
845 sector_bit = (sector & SECTOR_MASK);
847 t_page = radix_tree_lookup(root, idx);
849 __clear_bit(sector_bit, t_page->bitmap);
851 if (null_page_empty(t_page)) {
852 ret = radix_tree_delete_item(root, idx, t_page);
853 WARN_ON(ret != t_page);
856 nullb->dev->curr_cache -= PAGE_SIZE;
861 static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
862 struct nullb_page *t_page, bool is_cache)
864 struct radix_tree_root *root;
866 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
868 if (radix_tree_insert(root, idx, t_page)) {
869 null_free_page(t_page);
870 t_page = radix_tree_lookup(root, idx);
871 WARN_ON(!t_page || t_page->page->index != idx);
873 nullb->dev->curr_cache += PAGE_SIZE;
878 static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
880 unsigned long pos = 0;
882 struct nullb_page *ret, *t_pages[FREE_BATCH];
883 struct radix_tree_root *root;
885 root = is_cache ? &dev->cache : &dev->data;
890 nr_pages = radix_tree_gang_lookup(root,
891 (void **)t_pages, pos, FREE_BATCH);
893 for (i = 0; i < nr_pages; i++) {
894 pos = t_pages[i]->page->index;
895 ret = radix_tree_delete_item(root, pos, t_pages[i]);
896 WARN_ON(ret != t_pages[i]);
901 } while (nr_pages == FREE_BATCH);
907 static struct nullb_page *__null_lookup_page(struct nullb *nullb,
908 sector_t sector, bool for_write, bool is_cache)
910 unsigned int sector_bit;
912 struct nullb_page *t_page;
913 struct radix_tree_root *root;
915 idx = sector >> PAGE_SECTORS_SHIFT;
916 sector_bit = (sector & SECTOR_MASK);
918 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
919 t_page = radix_tree_lookup(root, idx);
920 WARN_ON(t_page && t_page->page->index != idx);
922 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
928 static struct nullb_page *null_lookup_page(struct nullb *nullb,
929 sector_t sector, bool for_write, bool ignore_cache)
931 struct nullb_page *page = NULL;
934 page = __null_lookup_page(nullb, sector, for_write, true);
937 return __null_lookup_page(nullb, sector, for_write, false);
940 static struct nullb_page *null_insert_page(struct nullb *nullb,
941 sector_t sector, bool ignore_cache)
942 __releases(&nullb->lock)
943 __acquires(&nullb->lock)
946 struct nullb_page *t_page;
948 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
952 spin_unlock_irq(&nullb->lock);
954 t_page = null_alloc_page();
958 if (radix_tree_preload(GFP_NOIO))
961 spin_lock_irq(&nullb->lock);
962 idx = sector >> PAGE_SECTORS_SHIFT;
963 t_page->page->index = idx;
964 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
965 radix_tree_preload_end();
969 null_free_page(t_page);
971 spin_lock_irq(&nullb->lock);
972 return null_lookup_page(nullb, sector, true, ignore_cache);
975 static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
980 struct nullb_page *t_page, *ret;
983 idx = c_page->page->index;
985 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
987 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
988 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
989 null_free_page(c_page);
990 if (t_page && null_page_empty(t_page)) {
991 ret = radix_tree_delete_item(&nullb->dev->data,
993 null_free_page(t_page);
1001 src = kmap_atomic(c_page->page);
1002 dst = kmap_atomic(t_page->page);
1004 for (i = 0; i < PAGE_SECTORS;
1005 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
1006 if (test_bit(i, c_page->bitmap)) {
1007 offset = (i << SECTOR_SHIFT);
1008 memcpy(dst + offset, src + offset,
1009 nullb->dev->blocksize);
1010 __set_bit(i, t_page->bitmap);
1017 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
1018 null_free_page(ret);
1019 nullb->dev->curr_cache -= PAGE_SIZE;
1024 static int null_make_cache_space(struct nullb *nullb, unsigned long n)
1026 int i, err, nr_pages;
1027 struct nullb_page *c_pages[FREE_BATCH];
1028 unsigned long flushed = 0, one_round;
1031 if ((nullb->dev->cache_size * 1024 * 1024) >
1032 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
1035 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
1036 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
1038 * nullb_flush_cache_page could unlock before using the c_pages. To
1039 * avoid race, we don't allow page free
1041 for (i = 0; i < nr_pages; i++) {
1042 nullb->cache_flush_pos = c_pages[i]->page->index;
1044 * We found the page which is being flushed to disk by other
1047 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
1050 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
1054 for (i = 0; i < nr_pages; i++) {
1055 if (c_pages[i] == NULL)
1057 err = null_flush_cache_page(nullb, c_pages[i]);
1062 flushed += one_round << PAGE_SHIFT;
1066 nullb->cache_flush_pos = 0;
1067 if (one_round == 0) {
1068 /* give other threads a chance */
1069 spin_unlock_irq(&nullb->lock);
1070 spin_lock_irq(&nullb->lock);
1077 static int copy_to_nullb(struct nullb *nullb, struct page *source,
1078 unsigned int off, sector_t sector, size_t n, bool is_fua)
1080 size_t temp, count = 0;
1081 unsigned int offset;
1082 struct nullb_page *t_page;
1086 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1088 if (null_cache_active(nullb) && !is_fua)
1089 null_make_cache_space(nullb, PAGE_SIZE);
1091 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
1092 t_page = null_insert_page(nullb, sector,
1093 !null_cache_active(nullb) || is_fua);
1097 src = kmap_atomic(source);
1098 dst = kmap_atomic(t_page->page);
1099 memcpy(dst + offset, src + off + count, temp);
1103 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
1106 null_free_sector(nullb, sector, true);
1109 sector += temp >> SECTOR_SHIFT;
1114 static int copy_from_nullb(struct nullb *nullb, struct page *dest,
1115 unsigned int off, sector_t sector, size_t n)
1117 size_t temp, count = 0;
1118 unsigned int offset;
1119 struct nullb_page *t_page;
1123 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1125 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
1126 t_page = null_lookup_page(nullb, sector, false,
1127 !null_cache_active(nullb));
1129 dst = kmap_atomic(dest);
1131 memset(dst + off + count, 0, temp);
1134 src = kmap_atomic(t_page->page);
1135 memcpy(dst + off + count, src + offset, temp);
1141 sector += temp >> SECTOR_SHIFT;
1146 static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
1147 unsigned int len, unsigned int off)
1151 dst = kmap_atomic(page);
1152 memset(dst + off, 0xFF, len);
1156 blk_status_t null_handle_discard(struct nullb_device *dev,
1157 sector_t sector, sector_t nr_sectors)
1159 struct nullb *nullb = dev->nullb;
1160 size_t n = nr_sectors << SECTOR_SHIFT;
1163 spin_lock_irq(&nullb->lock);
1165 temp = min_t(size_t, n, dev->blocksize);
1166 null_free_sector(nullb, sector, false);
1167 if (null_cache_active(nullb))
1168 null_free_sector(nullb, sector, true);
1169 sector += temp >> SECTOR_SHIFT;
1172 spin_unlock_irq(&nullb->lock);
1177 static int null_handle_flush(struct nullb *nullb)
1181 if (!null_cache_active(nullb))
1184 spin_lock_irq(&nullb->lock);
1186 err = null_make_cache_space(nullb,
1187 nullb->dev->cache_size * 1024 * 1024);
1188 if (err || nullb->dev->curr_cache == 0)
1192 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1193 spin_unlock_irq(&nullb->lock);
1197 static int null_transfer(struct nullb *nullb, struct page *page,
1198 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1201 struct nullb_device *dev = nullb->dev;
1202 unsigned int valid_len = len;
1207 valid_len = null_zone_valid_read_len(nullb,
1211 err = copy_from_nullb(nullb, page, off,
1218 nullb_fill_pattern(nullb, page, len, off);
1219 flush_dcache_page(page);
1221 flush_dcache_page(page);
1222 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
1228 static int null_handle_rq(struct nullb_cmd *cmd)
1230 struct request *rq = cmd->rq;
1231 struct nullb *nullb = cmd->nq->dev->nullb;
1234 sector_t sector = blk_rq_pos(rq);
1235 struct req_iterator iter;
1236 struct bio_vec bvec;
1238 spin_lock_irq(&nullb->lock);
1239 rq_for_each_segment(bvec, rq, iter) {
1241 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1242 op_is_write(req_op(rq)), sector,
1243 rq->cmd_flags & REQ_FUA);
1245 spin_unlock_irq(&nullb->lock);
1248 sector += len >> SECTOR_SHIFT;
1250 spin_unlock_irq(&nullb->lock);
1255 static int null_handle_bio(struct nullb_cmd *cmd)
1257 struct bio *bio = cmd->bio;
1258 struct nullb *nullb = cmd->nq->dev->nullb;
1261 sector_t sector = bio->bi_iter.bi_sector;
1262 struct bio_vec bvec;
1263 struct bvec_iter iter;
1265 spin_lock_irq(&nullb->lock);
1266 bio_for_each_segment(bvec, bio, iter) {
1268 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1269 op_is_write(bio_op(bio)), sector,
1270 bio->bi_opf & REQ_FUA);
1272 spin_unlock_irq(&nullb->lock);
1275 sector += len >> SECTOR_SHIFT;
1277 spin_unlock_irq(&nullb->lock);
1281 static void null_stop_queue(struct nullb *nullb)
1283 struct request_queue *q = nullb->q;
1285 if (nullb->dev->queue_mode == NULL_Q_MQ)
1286 blk_mq_stop_hw_queues(q);
1289 static void null_restart_queue_async(struct nullb *nullb)
1291 struct request_queue *q = nullb->q;
1293 if (nullb->dev->queue_mode == NULL_Q_MQ)
1294 blk_mq_start_stopped_hw_queues(q, true);
1297 static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
1299 struct nullb_device *dev = cmd->nq->dev;
1300 struct nullb *nullb = dev->nullb;
1301 blk_status_t sts = BLK_STS_OK;
1302 struct request *rq = cmd->rq;
1304 if (!hrtimer_active(&nullb->bw_timer))
1305 hrtimer_restart(&nullb->bw_timer);
1307 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
1308 null_stop_queue(nullb);
1309 /* race with timer */
1310 if (atomic_long_read(&nullb->cur_bytes) > 0)
1311 null_restart_queue_async(nullb);
1312 /* requeue request */
1313 sts = BLK_STS_DEV_RESOURCE;
1318 static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
1320 sector_t nr_sectors)
1322 struct badblocks *bb = &cmd->nq->dev->badblocks;
1326 if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
1327 return BLK_STS_IOERR;
1332 static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
1335 sector_t nr_sectors)
1337 struct nullb_device *dev = cmd->nq->dev;
1340 if (op == REQ_OP_DISCARD)
1341 return null_handle_discard(dev, sector, nr_sectors);
1343 if (dev->queue_mode == NULL_Q_BIO)
1344 err = null_handle_bio(cmd);
1346 err = null_handle_rq(cmd);
1348 return errno_to_blk_status(err);
1351 static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
1353 struct nullb_device *dev = cmd->nq->dev;
1356 if (dev->memory_backed)
1359 if (dev->queue_mode == NULL_Q_BIO && bio_op(cmd->bio) == REQ_OP_READ) {
1360 zero_fill_bio(cmd->bio);
1361 } else if (req_op(cmd->rq) == REQ_OP_READ) {
1362 __rq_for_each_bio(bio, cmd->rq)
1367 static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
1370 * Since root privileges are required to configure the null_blk
1371 * driver, it is fine that this driver does not initialize the
1372 * data buffers of read commands. Zero-initialize these buffers
1373 * anyway if KMSAN is enabled to prevent that KMSAN complains
1374 * about null_blk not initializing read data buffers.
1376 if (IS_ENABLED(CONFIG_KMSAN))
1377 nullb_zero_read_cmd_buffer(cmd);
1379 /* Complete IO by inline, softirq or timer */
1380 switch (cmd->nq->dev->irqmode) {
1381 case NULL_IRQ_SOFTIRQ:
1382 switch (cmd->nq->dev->queue_mode) {
1384 if (likely(!blk_should_fake_timeout(cmd->rq->q)))
1385 blk_mq_complete_request(cmd->rq);
1389 * XXX: no proper submitting cpu information available.
1398 case NULL_IRQ_TIMER:
1399 null_cmd_end_timer(cmd);
1404 blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
1405 sector_t sector, unsigned int nr_sectors)
1407 struct nullb_device *dev = cmd->nq->dev;
1410 if (dev->badblocks.shift != -1) {
1411 ret = null_handle_badblocks(cmd, sector, nr_sectors);
1412 if (ret != BLK_STS_OK)
1416 if (dev->memory_backed)
1417 return null_handle_memory_backed(cmd, op, sector, nr_sectors);
1422 static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
1423 sector_t nr_sectors, enum req_op op)
1425 struct nullb_device *dev = cmd->nq->dev;
1426 struct nullb *nullb = dev->nullb;
1429 if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
1430 sts = null_handle_throttled(cmd);
1431 if (sts != BLK_STS_OK)
1435 if (op == REQ_OP_FLUSH) {
1436 cmd->error = errno_to_blk_status(null_handle_flush(nullb));
1441 sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
1443 sts = null_process_cmd(cmd, op, sector, nr_sectors);
1445 /* Do not overwrite errors (e.g. timeout errors) */
1446 if (cmd->error == BLK_STS_OK)
1450 nullb_complete_cmd(cmd);
1454 static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1456 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1457 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1458 unsigned int mbps = nullb->dev->mbps;
1460 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1461 return HRTIMER_NORESTART;
1463 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1464 null_restart_queue_async(nullb);
1466 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1468 return HRTIMER_RESTART;
1471 static void nullb_setup_bwtimer(struct nullb *nullb)
1473 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1475 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1476 nullb->bw_timer.function = nullb_bwtimer_fn;
1477 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1478 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
1481 static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
1485 if (nullb->nr_queues != 1)
1486 index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues);
1488 return &nullb->queues[index];
1491 static void null_submit_bio(struct bio *bio)
1493 sector_t sector = bio->bi_iter.bi_sector;
1494 sector_t nr_sectors = bio_sectors(bio);
1495 struct nullb *nullb = bio->bi_bdev->bd_disk->private_data;
1496 struct nullb_queue *nq = nullb_to_queue(nullb);
1498 null_handle_cmd(alloc_cmd(nq, bio), sector, nr_sectors, bio_op(bio));
1501 static bool should_timeout_request(struct request *rq)
1503 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1504 if (g_timeout_str[0])
1505 return should_fail(&null_timeout_attr, 1);
1510 static bool should_requeue_request(struct request *rq)
1512 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1513 if (g_requeue_str[0])
1514 return should_fail(&null_requeue_attr, 1);
1519 static int null_map_queues(struct blk_mq_tag_set *set)
1521 struct nullb *nullb = set->driver_data;
1523 unsigned int submit_queues = g_submit_queues;
1524 unsigned int poll_queues = g_poll_queues;
1527 struct nullb_device *dev = nullb->dev;
1530 * Refer nr_hw_queues of the tag set to check if the expected
1531 * number of hardware queues are prepared. If block layer failed
1532 * to prepare them, use previous numbers of submit queues and
1533 * poll queues to map queues.
1535 if (set->nr_hw_queues ==
1536 dev->submit_queues + dev->poll_queues) {
1537 submit_queues = dev->submit_queues;
1538 poll_queues = dev->poll_queues;
1539 } else if (set->nr_hw_queues ==
1540 dev->prev_submit_queues + dev->prev_poll_queues) {
1541 submit_queues = dev->prev_submit_queues;
1542 poll_queues = dev->prev_poll_queues;
1544 pr_warn("tag set has unexpected nr_hw_queues: %d\n",
1550 for (i = 0, qoff = 0; i < set->nr_maps; i++) {
1551 struct blk_mq_queue_map *map = &set->map[i];
1554 case HCTX_TYPE_DEFAULT:
1555 map->nr_queues = submit_queues;
1557 case HCTX_TYPE_READ:
1560 case HCTX_TYPE_POLL:
1561 map->nr_queues = poll_queues;
1564 map->queue_offset = qoff;
1565 qoff += map->nr_queues;
1566 blk_mq_map_queues(map);
1572 static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
1574 struct nullb_queue *nq = hctx->driver_data;
1578 spin_lock(&nq->poll_lock);
1579 list_splice_init(&nq->poll_list, &list);
1580 spin_unlock(&nq->poll_lock);
1582 while (!list_empty(&list)) {
1583 struct nullb_cmd *cmd;
1584 struct request *req;
1586 req = list_first_entry(&list, struct request, queuelist);
1587 list_del_init(&req->queuelist);
1588 cmd = blk_mq_rq_to_pdu(req);
1589 cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
1590 blk_rq_sectors(req));
1591 if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
1592 blk_mq_end_request_batch))
1600 static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
1602 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1603 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
1605 pr_info("rq %p timed out\n", rq);
1607 if (hctx->type == HCTX_TYPE_POLL) {
1608 struct nullb_queue *nq = hctx->driver_data;
1610 spin_lock(&nq->poll_lock);
1611 list_del_init(&rq->queuelist);
1612 spin_unlock(&nq->poll_lock);
1616 * If the device is marked as blocking (i.e. memory backed or zoned
1617 * device), the submission path may be blocked waiting for resources
1618 * and cause real timeouts. For these real timeouts, the submission
1619 * path will complete the request using blk_mq_complete_request().
1620 * Only fake timeouts need to execute blk_mq_complete_request() here.
1622 cmd->error = BLK_STS_TIMEOUT;
1623 if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL)
1624 blk_mq_complete_request(rq);
1628 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
1629 const struct blk_mq_queue_data *bd)
1631 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1632 struct nullb_queue *nq = hctx->driver_data;
1633 sector_t nr_sectors = blk_rq_sectors(bd->rq);
1634 sector_t sector = blk_rq_pos(bd->rq);
1635 const bool is_poll = hctx->type == HCTX_TYPE_POLL;
1637 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1639 if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
1640 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1641 cmd->timer.function = null_cmd_timer_expired;
1644 cmd->error = BLK_STS_OK;
1646 cmd->fake_timeout = should_timeout_request(bd->rq);
1648 blk_mq_start_request(bd->rq);
1650 if (should_requeue_request(bd->rq)) {
1652 * Alternate between hitting the core BUSY path, and the
1653 * driver driven requeue path
1655 nq->requeue_selection++;
1656 if (nq->requeue_selection & 1)
1657 return BLK_STS_RESOURCE;
1659 blk_mq_requeue_request(bd->rq, true);
1665 spin_lock(&nq->poll_lock);
1666 list_add_tail(&bd->rq->queuelist, &nq->poll_list);
1667 spin_unlock(&nq->poll_lock);
1670 if (cmd->fake_timeout)
1673 return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
1676 static void cleanup_queue(struct nullb_queue *nq)
1678 bitmap_free(nq->tag_map);
1682 static void cleanup_queues(struct nullb *nullb)
1686 for (i = 0; i < nullb->nr_queues; i++)
1687 cleanup_queue(&nullb->queues[i]);
1689 kfree(nullb->queues);
1692 static void null_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
1694 struct nullb_queue *nq = hctx->driver_data;
1695 struct nullb *nullb = nq->dev->nullb;
1700 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1702 init_waitqueue_head(&nq->wait);
1703 nq->queue_depth = nullb->queue_depth;
1704 nq->dev = nullb->dev;
1705 INIT_LIST_HEAD(&nq->poll_list);
1706 spin_lock_init(&nq->poll_lock);
1709 static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
1710 unsigned int hctx_idx)
1712 struct nullb *nullb = hctx->queue->queuedata;
1713 struct nullb_queue *nq;
1715 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1716 if (g_init_hctx_str[0] && should_fail(&null_init_hctx_attr, 1))
1720 nq = &nullb->queues[hctx_idx];
1721 hctx->driver_data = nq;
1722 null_init_queue(nullb, nq);
1728 static const struct blk_mq_ops null_mq_ops = {
1729 .queue_rq = null_queue_rq,
1730 .complete = null_complete_rq,
1731 .timeout = null_timeout_rq,
1733 .map_queues = null_map_queues,
1734 .init_hctx = null_init_hctx,
1735 .exit_hctx = null_exit_hctx,
1738 static void null_del_dev(struct nullb *nullb)
1740 struct nullb_device *dev;
1747 ida_simple_remove(&nullb_indexes, nullb->index);
1749 list_del_init(&nullb->list);
1751 del_gendisk(nullb->disk);
1753 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1754 hrtimer_cancel(&nullb->bw_timer);
1755 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1756 null_restart_queue_async(nullb);
1759 put_disk(nullb->disk);
1760 if (dev->queue_mode == NULL_Q_MQ &&
1761 nullb->tag_set == &nullb->__tag_set)
1762 blk_mq_free_tag_set(nullb->tag_set);
1763 cleanup_queues(nullb);
1764 if (null_cache_active(nullb))
1765 null_free_device_storage(nullb->dev, true);
1770 static void null_config_discard(struct nullb *nullb)
1772 if (nullb->dev->discard == false)
1775 if (!nullb->dev->memory_backed) {
1776 nullb->dev->discard = false;
1777 pr_info("discard option is ignored without memory backing\n");
1781 if (nullb->dev->zoned) {
1782 nullb->dev->discard = false;
1783 pr_info("discard option is ignored in zoned mode\n");
1787 nullb->q->limits.discard_granularity = nullb->dev->blocksize;
1788 blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
1791 static const struct block_device_operations null_bio_ops = {
1792 .owner = THIS_MODULE,
1793 .submit_bio = null_submit_bio,
1794 .report_zones = null_report_zones,
1797 static const struct block_device_operations null_rq_ops = {
1798 .owner = THIS_MODULE,
1799 .report_zones = null_report_zones,
1802 static int setup_commands(struct nullb_queue *nq)
1804 struct nullb_cmd *cmd;
1807 nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL);
1811 nq->tag_map = bitmap_zalloc(nq->queue_depth, GFP_KERNEL);
1817 for (i = 0; i < nq->queue_depth; i++) {
1825 static int setup_queues(struct nullb *nullb)
1827 int nqueues = nr_cpu_ids;
1830 nqueues += g_poll_queues;
1832 nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue),
1837 nullb->queue_depth = nullb->dev->hw_queue_depth;
1841 static int init_driver_queues(struct nullb *nullb)
1843 struct nullb_queue *nq;
1846 for (i = 0; i < nullb->dev->submit_queues; i++) {
1847 nq = &nullb->queues[i];
1849 null_init_queue(nullb, nq);
1851 ret = setup_commands(nq);
1859 static int null_gendisk_register(struct nullb *nullb)
1861 sector_t size = ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT;
1862 struct gendisk *disk = nullb->disk;
1864 set_capacity(disk, size);
1866 disk->major = null_major;
1867 disk->first_minor = nullb->index;
1869 if (queue_is_mq(nullb->q))
1870 disk->fops = &null_rq_ops;
1872 disk->fops = &null_bio_ops;
1873 disk->private_data = nullb;
1874 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1876 if (nullb->dev->zoned) {
1877 int ret = null_register_zoned_dev(nullb);
1883 return add_disk(disk);
1886 static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
1890 set->ops = &null_mq_ops;
1891 set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
1893 poll_queues = nullb ? nullb->dev->poll_queues : g_poll_queues;
1895 set->nr_hw_queues += poll_queues;
1896 set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
1898 set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
1899 set->cmd_size = sizeof(struct nullb_cmd);
1900 set->flags = BLK_MQ_F_SHOULD_MERGE;
1902 set->flags |= BLK_MQ_F_NO_SCHED;
1903 if (g_shared_tag_bitmap)
1904 set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
1905 set->driver_data = nullb;
1911 if ((nullb && nullb->dev->blocking) || g_blocking)
1912 set->flags |= BLK_MQ_F_BLOCKING;
1914 return blk_mq_alloc_tag_set(set);
1917 static int null_validate_conf(struct nullb_device *dev)
1919 dev->blocksize = round_down(dev->blocksize, 512);
1920 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
1922 if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
1923 if (dev->submit_queues != nr_online_nodes)
1924 dev->submit_queues = nr_online_nodes;
1925 } else if (dev->submit_queues > nr_cpu_ids)
1926 dev->submit_queues = nr_cpu_ids;
1927 else if (dev->submit_queues == 0)
1928 dev->submit_queues = 1;
1929 dev->prev_submit_queues = dev->submit_queues;
1931 if (dev->poll_queues > g_poll_queues)
1932 dev->poll_queues = g_poll_queues;
1933 dev->prev_poll_queues = dev->poll_queues;
1935 dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
1936 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
1938 /* Do memory allocation, so set blocking */
1939 if (dev->memory_backed)
1940 dev->blocking = true;
1941 else /* cache is meaningless */
1942 dev->cache_size = 0;
1943 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1945 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1946 /* can not stop a queue */
1947 if (dev->queue_mode == NULL_Q_BIO)
1951 (!dev->zone_size || !is_power_of_2(dev->zone_size))) {
1952 pr_err("zone_size must be power-of-two\n");
1959 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1960 static bool __null_setup_fault(struct fault_attr *attr, char *str)
1965 if (!setup_fault_attr(attr, str))
1973 static bool null_setup_fault(void)
1975 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1976 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
1978 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
1980 if (!__null_setup_fault(&null_init_hctx_attr, g_init_hctx_str))
1986 static int null_add_dev(struct nullb_device *dev)
1988 struct nullb *nullb;
1991 rv = null_validate_conf(dev);
1995 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
2003 spin_lock_init(&nullb->lock);
2005 rv = setup_queues(nullb);
2007 goto out_free_nullb;
2009 if (dev->queue_mode == NULL_Q_MQ) {
2011 nullb->tag_set = &tag_set;
2014 nullb->tag_set = &nullb->__tag_set;
2015 rv = null_init_tag_set(nullb, nullb->tag_set);
2019 goto out_cleanup_queues;
2021 if (!null_setup_fault())
2022 goto out_cleanup_tags;
2024 nullb->tag_set->timeout = 5 * HZ;
2025 nullb->disk = blk_mq_alloc_disk(nullb->tag_set, nullb);
2026 if (IS_ERR(nullb->disk)) {
2027 rv = PTR_ERR(nullb->disk);
2028 goto out_cleanup_tags;
2030 nullb->q = nullb->disk->queue;
2031 } else if (dev->queue_mode == NULL_Q_BIO) {
2033 nullb->disk = blk_alloc_disk(nullb->dev->home_node);
2035 goto out_cleanup_queues;
2037 nullb->q = nullb->disk->queue;
2038 rv = init_driver_queues(nullb);
2040 goto out_cleanup_disk;
2044 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
2045 nullb_setup_bwtimer(nullb);
2048 if (dev->cache_size > 0) {
2049 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
2050 blk_queue_write_cache(nullb->q, true, true);
2054 rv = null_init_zoned_dev(dev, nullb->q);
2056 goto out_cleanup_disk;
2059 nullb->q->queuedata = nullb;
2060 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
2061 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);
2064 nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
2065 dev->index = nullb->index;
2066 mutex_unlock(&lock);
2068 blk_queue_logical_block_size(nullb->q, dev->blocksize);
2069 blk_queue_physical_block_size(nullb->q, dev->blocksize);
2070 if (!dev->max_sectors)
2071 dev->max_sectors = queue_max_hw_sectors(nullb->q);
2072 dev->max_sectors = min_t(unsigned int, dev->max_sectors,
2073 BLK_DEF_MAX_SECTORS);
2074 blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
2076 if (dev->virt_boundary)
2077 blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1);
2079 null_config_discard(nullb);
2081 if (config_item_name(&dev->item)) {
2082 /* Use configfs dir name as the device name */
2083 snprintf(nullb->disk_name, sizeof(nullb->disk_name),
2084 "%s", config_item_name(&dev->item));
2086 sprintf(nullb->disk_name, "nullb%d", nullb->index);
2089 rv = null_gendisk_register(nullb);
2091 goto out_cleanup_zone;
2094 list_add_tail(&nullb->list, &nullb_list);
2095 mutex_unlock(&lock);
2097 pr_info("disk %s created\n", nullb->disk_name);
2101 null_free_zoned_dev(dev);
2103 put_disk(nullb->disk);
2105 if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
2106 blk_mq_free_tag_set(nullb->tag_set);
2108 cleanup_queues(nullb);
2116 static struct nullb *null_find_dev_by_name(const char *name)
2118 struct nullb *nullb = NULL, *nb;
2121 list_for_each_entry(nb, &nullb_list, list) {
2122 if (strcmp(nb->disk_name, name) == 0) {
2127 mutex_unlock(&lock);
2132 static int null_create_dev(void)
2134 struct nullb_device *dev;
2137 dev = null_alloc_dev();
2141 ret = null_add_dev(dev);
2150 static void null_destroy_dev(struct nullb *nullb)
2152 struct nullb_device *dev = nullb->dev;
2154 null_del_dev(nullb);
2158 static int __init null_init(void)
2162 struct nullb *nullb;
2164 if (g_bs > PAGE_SIZE) {
2165 pr_warn("invalid block size\n");
2166 pr_warn("defaults block size to %lu\n", PAGE_SIZE);
2170 if (g_max_sectors > BLK_DEF_MAX_SECTORS) {
2171 pr_warn("invalid max sectors\n");
2172 pr_warn("defaults max sectors to %u\n", BLK_DEF_MAX_SECTORS);
2173 g_max_sectors = BLK_DEF_MAX_SECTORS;
2176 if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
2177 pr_err("invalid home_node value\n");
2178 g_home_node = NUMA_NO_NODE;
2181 if (g_queue_mode == NULL_Q_RQ) {
2182 pr_err("legacy IO path is no longer available\n");
2186 if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
2187 if (g_submit_queues != nr_online_nodes) {
2188 pr_warn("submit_queues param is set to %u.\n",
2190 g_submit_queues = nr_online_nodes;
2192 } else if (g_submit_queues > nr_cpu_ids) {
2193 g_submit_queues = nr_cpu_ids;
2194 } else if (g_submit_queues <= 0) {
2195 g_submit_queues = 1;
2198 if (g_queue_mode == NULL_Q_MQ && shared_tags) {
2199 ret = null_init_tag_set(NULL, &tag_set);
2204 config_group_init(&nullb_subsys.su_group);
2205 mutex_init(&nullb_subsys.su_mutex);
2207 ret = configfs_register_subsystem(&nullb_subsys);
2213 null_major = register_blkdev(0, "nullb");
2214 if (null_major < 0) {
2219 for (i = 0; i < nr_devices; i++) {
2220 ret = null_create_dev();
2225 pr_info("module loaded\n");
2229 while (!list_empty(&nullb_list)) {
2230 nullb = list_entry(nullb_list.next, struct nullb, list);
2231 null_destroy_dev(nullb);
2233 unregister_blkdev(null_major, "nullb");
2235 configfs_unregister_subsystem(&nullb_subsys);
2237 if (g_queue_mode == NULL_Q_MQ && shared_tags)
2238 blk_mq_free_tag_set(&tag_set);
2242 static void __exit null_exit(void)
2244 struct nullb *nullb;
2246 configfs_unregister_subsystem(&nullb_subsys);
2248 unregister_blkdev(null_major, "nullb");
2251 while (!list_empty(&nullb_list)) {
2252 nullb = list_entry(nullb_list.next, struct nullb, list);
2253 null_destroy_dev(nullb);
2255 mutex_unlock(&lock);
2257 if (g_queue_mode == NULL_Q_MQ && shared_tags)
2258 blk_mq_free_tag_set(&tag_set);
2261 module_init(null_init);
2262 module_exit(null_exit);
2264 MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
2265 MODULE_LICENSE("GPL");