1 // SPDX-License-Identifier: GPL-2.0-only
3 * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
4 * Shaohua Li <shli@fb.com>
6 #include <linux/module.h>
8 #include <linux/moduleparam.h>
9 #include <linux/sched.h>
11 #include <linux/init.h>
15 #define pr_fmt(fmt) "null_blk: " fmt
19 #define TICKS_PER_SEC 50ULL
20 #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
22 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
23 static DECLARE_FAULT_ATTR(null_timeout_attr);
24 static DECLARE_FAULT_ATTR(null_requeue_attr);
25 static DECLARE_FAULT_ATTR(null_init_hctx_attr);
28 static inline u64 mb_per_tick(int mbps)
30 return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
34 * Status flags for nullb_device.
36 * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
37 * UP: Device is currently on and visible in userspace.
38 * THROTTLED: Device is being throttled.
39 * CACHE: Device is using a write-back cache.
41 enum nullb_device_flags {
42 NULLB_DEV_FL_CONFIGURED = 0,
44 NULLB_DEV_FL_THROTTLED = 2,
45 NULLB_DEV_FL_CACHE = 3,
48 #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2)
50 * nullb_page is a page in memory for nullb devices.
52 * @page: The page holding the data.
53 * @bitmap: The bitmap represents which sector in the page has data.
54 * Each bit represents one block size. For example, sector 8
55 * will use the 7th bit
56 * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
57 * page is being flushing to storage. FREE means the cache page is freed and
58 * should be skipped from flushing to storage. Please see
59 * null_make_cache_space
63 DECLARE_BITMAP(bitmap, MAP_SZ);
65 #define NULLB_PAGE_LOCK (MAP_SZ - 1)
66 #define NULLB_PAGE_FREE (MAP_SZ - 2)
68 static LIST_HEAD(nullb_list);
69 static struct mutex lock;
70 static int null_major;
71 static DEFINE_IDA(nullb_indexes);
72 static struct blk_mq_tag_set tag_set;
80 static bool g_virt_boundary = false;
81 module_param_named(virt_boundary, g_virt_boundary, bool, 0444);
82 MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False");
84 static int g_no_sched;
85 module_param_named(no_sched, g_no_sched, int, 0444);
86 MODULE_PARM_DESC(no_sched, "No io scheduler");
88 static int g_submit_queues = 1;
89 module_param_named(submit_queues, g_submit_queues, int, 0444);
90 MODULE_PARM_DESC(submit_queues, "Number of submission queues");
92 static int g_poll_queues = 1;
93 module_param_named(poll_queues, g_poll_queues, int, 0444);
94 MODULE_PARM_DESC(poll_queues, "Number of IOPOLL submission queues");
96 static int g_home_node = NUMA_NO_NODE;
97 module_param_named(home_node, g_home_node, int, 0444);
98 MODULE_PARM_DESC(home_node, "Home node for the device");
100 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
102 * For more details about fault injection, please refer to
103 * Documentation/fault-injection/fault-injection.rst.
105 static char g_timeout_str[80];
106 module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444);
107 MODULE_PARM_DESC(timeout, "Fault injection. timeout=<interval>,<probability>,<space>,<times>");
109 static char g_requeue_str[80];
110 module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444);
111 MODULE_PARM_DESC(requeue, "Fault injection. requeue=<interval>,<probability>,<space>,<times>");
113 static char g_init_hctx_str[80];
114 module_param_string(init_hctx, g_init_hctx_str, sizeof(g_init_hctx_str), 0444);
115 MODULE_PARM_DESC(init_hctx, "Fault injection to fail hctx init. init_hctx=<interval>,<probability>,<space>,<times>");
119 * Historic queue modes.
121 * These days nothing but NULL_Q_MQ is actually supported, but we keep it the
122 * enum for error reporting.
130 static int g_queue_mode = NULL_Q_MQ;
132 static int null_param_store_val(const char *str, int *val, int min, int max)
136 ret = kstrtoint(str, 10, &new_val);
140 if (new_val < min || new_val > max)
147 static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
149 return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
152 static const struct kernel_param_ops null_queue_mode_param_ops = {
153 .set = null_set_queue_mode,
154 .get = param_get_int,
157 device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444);
158 MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
160 static int g_gb = 250;
161 module_param_named(gb, g_gb, int, 0444);
162 MODULE_PARM_DESC(gb, "Size in GB");
164 static int g_bs = 512;
165 module_param_named(bs, g_bs, int, 0444);
166 MODULE_PARM_DESC(bs, "Block size (in bytes)");
168 static int g_max_sectors;
169 module_param_named(max_sectors, g_max_sectors, int, 0444);
170 MODULE_PARM_DESC(max_sectors, "Maximum size of a command (in 512B sectors)");
172 static unsigned int nr_devices = 1;
173 module_param(nr_devices, uint, 0444);
174 MODULE_PARM_DESC(nr_devices, "Number of devices to register");
176 static bool g_blocking;
177 module_param_named(blocking, g_blocking, bool, 0444);
178 MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
180 static bool g_shared_tags;
181 module_param_named(shared_tags, g_shared_tags, bool, 0444);
182 MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
184 static bool g_shared_tag_bitmap;
185 module_param_named(shared_tag_bitmap, g_shared_tag_bitmap, bool, 0444);
186 MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq");
188 static int g_irqmode = NULL_IRQ_SOFTIRQ;
190 static int null_set_irqmode(const char *str, const struct kernel_param *kp)
192 return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
196 static const struct kernel_param_ops null_irqmode_param_ops = {
197 .set = null_set_irqmode,
198 .get = param_get_int,
201 device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444);
202 MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
204 static unsigned long g_completion_nsec = 10000;
205 module_param_named(completion_nsec, g_completion_nsec, ulong, 0444);
206 MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
208 static int g_hw_queue_depth = 64;
209 module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444);
210 MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
212 static bool g_use_per_node_hctx;
213 module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444);
214 MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
216 static bool g_memory_backed;
217 module_param_named(memory_backed, g_memory_backed, bool, 0444);
218 MODULE_PARM_DESC(memory_backed, "Create a memory-backed block device. Default: false");
220 static bool g_discard;
221 module_param_named(discard, g_discard, bool, 0444);
222 MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed null_blk device). Default: false");
224 static unsigned long g_cache_size;
225 module_param_named(cache_size, g_cache_size, ulong, 0444);
226 MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)");
228 static bool g_fua = true;
229 module_param_named(fua, g_fua, bool, 0444);
230 MODULE_PARM_DESC(zoned, "Enable/disable FUA support when cache_size is used. Default: true");
232 static unsigned int g_mbps;
233 module_param_named(mbps, g_mbps, uint, 0444);
234 MODULE_PARM_DESC(mbps, "Limit maximum bandwidth (in MiB/s). Default: 0 (no limit)");
237 module_param_named(zoned, g_zoned, bool, S_IRUGO);
238 MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false");
240 static unsigned long g_zone_size = 256;
241 module_param_named(zone_size, g_zone_size, ulong, S_IRUGO);
242 MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256");
244 static unsigned long g_zone_capacity;
245 module_param_named(zone_capacity, g_zone_capacity, ulong, 0444);
246 MODULE_PARM_DESC(zone_capacity, "Zone capacity in MB when block device is zoned. Can be less than or equal to zone size. Default: Zone size");
248 static unsigned int g_zone_nr_conv;
249 module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444);
250 MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0");
252 static unsigned int g_zone_max_open;
253 module_param_named(zone_max_open, g_zone_max_open, uint, 0444);
254 MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones when block device is zoned. Default: 0 (no limit)");
256 static unsigned int g_zone_max_active;
257 module_param_named(zone_max_active, g_zone_max_active, uint, 0444);
258 MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)");
260 static int g_zone_append_max_sectors = INT_MAX;
261 module_param_named(zone_append_max_sectors, g_zone_append_max_sectors, int, 0444);
262 MODULE_PARM_DESC(zone_append_max_sectors,
263 "Maximum size of a zone append command (in 512B sectors). Specify 0 for zone append emulation");
265 static struct nullb_device *null_alloc_dev(void);
266 static void null_free_dev(struct nullb_device *dev);
267 static void null_del_dev(struct nullb *nullb);
268 static int null_add_dev(struct nullb_device *dev);
269 static struct nullb *null_find_dev_by_name(const char *name);
270 static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
272 static inline struct nullb_device *to_nullb_device(struct config_item *item)
274 return item ? container_of(to_config_group(item), struct nullb_device, group) : NULL;
277 static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
279 return snprintf(page, PAGE_SIZE, "%u\n", val);
282 static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
285 return snprintf(page, PAGE_SIZE, "%lu\n", val);
288 static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
290 return snprintf(page, PAGE_SIZE, "%u\n", val);
293 static ssize_t nullb_device_uint_attr_store(unsigned int *val,
294 const char *page, size_t count)
299 result = kstrtouint(page, 0, &tmp);
307 static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
308 const char *page, size_t count)
313 result = kstrtoul(page, 0, &tmp);
321 static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
327 result = kstrtobool(page, &tmp);
335 /* The following macro should only be used with TYPE = {uint, ulong, bool}. */
336 #define NULLB_DEVICE_ATTR(NAME, TYPE, APPLY) \
338 nullb_device_##NAME##_show(struct config_item *item, char *page) \
340 return nullb_device_##TYPE##_attr_show( \
341 to_nullb_device(item)->NAME, page); \
344 nullb_device_##NAME##_store(struct config_item *item, const char *page, \
347 int (*apply_fn)(struct nullb_device *dev, TYPE new_value) = APPLY;\
348 struct nullb_device *dev = to_nullb_device(item); \
349 TYPE new_value = 0; \
352 ret = nullb_device_##TYPE##_attr_store(&new_value, page, count);\
356 ret = apply_fn(dev, new_value); \
357 else if (test_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags)) \
361 dev->NAME = new_value; \
364 CONFIGFS_ATTR(nullb_device_, NAME);
366 static int nullb_update_nr_hw_queues(struct nullb_device *dev,
367 unsigned int submit_queues,
368 unsigned int poll_queues)
371 struct blk_mq_tag_set *set;
372 int ret, nr_hw_queues;
378 * Make sure at least one submit queue exists.
384 * Make sure that null_init_hctx() does not access nullb->queues[] past
385 * the end of that array.
387 if (submit_queues > nr_cpu_ids || poll_queues > g_poll_queues)
391 * Keep previous and new queue numbers in nullb_device for reference in
392 * the call back function null_map_queues().
394 dev->prev_submit_queues = dev->submit_queues;
395 dev->prev_poll_queues = dev->poll_queues;
396 dev->submit_queues = submit_queues;
397 dev->poll_queues = poll_queues;
399 set = dev->nullb->tag_set;
400 nr_hw_queues = submit_queues + poll_queues;
401 blk_mq_update_nr_hw_queues(set, nr_hw_queues);
402 ret = set->nr_hw_queues == nr_hw_queues ? 0 : -ENOMEM;
405 /* on error, revert the queue numbers */
406 dev->submit_queues = dev->prev_submit_queues;
407 dev->poll_queues = dev->prev_poll_queues;
413 static int nullb_apply_submit_queues(struct nullb_device *dev,
414 unsigned int submit_queues)
416 return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
419 static int nullb_apply_poll_queues(struct nullb_device *dev,
420 unsigned int poll_queues)
422 return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
425 NULLB_DEVICE_ATTR(size, ulong, NULL);
426 NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
427 NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
428 NULLB_DEVICE_ATTR(poll_queues, uint, nullb_apply_poll_queues);
429 NULLB_DEVICE_ATTR(home_node, uint, NULL);
430 NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
431 NULLB_DEVICE_ATTR(blocksize, uint, NULL);
432 NULLB_DEVICE_ATTR(max_sectors, uint, NULL);
433 NULLB_DEVICE_ATTR(irqmode, uint, NULL);
434 NULLB_DEVICE_ATTR(hw_queue_depth, uint, NULL);
435 NULLB_DEVICE_ATTR(index, uint, NULL);
436 NULLB_DEVICE_ATTR(blocking, bool, NULL);
437 NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL);
438 NULLB_DEVICE_ATTR(memory_backed, bool, NULL);
439 NULLB_DEVICE_ATTR(discard, bool, NULL);
440 NULLB_DEVICE_ATTR(mbps, uint, NULL);
441 NULLB_DEVICE_ATTR(cache_size, ulong, NULL);
442 NULLB_DEVICE_ATTR(zoned, bool, NULL);
443 NULLB_DEVICE_ATTR(zone_size, ulong, NULL);
444 NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL);
445 NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL);
446 NULLB_DEVICE_ATTR(zone_max_open, uint, NULL);
447 NULLB_DEVICE_ATTR(zone_max_active, uint, NULL);
448 NULLB_DEVICE_ATTR(zone_append_max_sectors, uint, NULL);
449 NULLB_DEVICE_ATTR(virt_boundary, bool, NULL);
450 NULLB_DEVICE_ATTR(no_sched, bool, NULL);
451 NULLB_DEVICE_ATTR(shared_tags, bool, NULL);
452 NULLB_DEVICE_ATTR(shared_tag_bitmap, bool, NULL);
453 NULLB_DEVICE_ATTR(fua, bool, NULL);
455 static ssize_t nullb_device_power_show(struct config_item *item, char *page)
457 return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
460 static ssize_t nullb_device_power_store(struct config_item *item,
461 const char *page, size_t count)
463 struct nullb_device *dev = to_nullb_device(item);
467 ret = nullb_device_bool_attr_store(&newp, page, count);
471 if (!dev->power && newp) {
472 if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
474 ret = null_add_dev(dev);
476 clear_bit(NULLB_DEV_FL_UP, &dev->flags);
480 set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
482 } else if (dev->power && !newp) {
483 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
486 null_del_dev(dev->nullb);
489 clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
495 CONFIGFS_ATTR(nullb_device_, power);
497 static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
499 struct nullb_device *t_dev = to_nullb_device(item);
501 return badblocks_show(&t_dev->badblocks, page, 0);
504 static ssize_t nullb_device_badblocks_store(struct config_item *item,
505 const char *page, size_t count)
507 struct nullb_device *t_dev = to_nullb_device(item);
508 char *orig, *buf, *tmp;
512 orig = kstrndup(page, count, GFP_KERNEL);
516 buf = strstrip(orig);
519 if (buf[0] != '+' && buf[0] != '-')
521 tmp = strchr(&buf[1], '-');
525 ret = kstrtoull(buf + 1, 0, &start);
528 ret = kstrtoull(tmp + 1, 0, &end);
534 /* enable badblocks */
535 cmpxchg(&t_dev->badblocks.shift, -1, 0);
537 ret = badblocks_set(&t_dev->badblocks, start,
540 ret = badblocks_clear(&t_dev->badblocks, start,
548 CONFIGFS_ATTR(nullb_device_, badblocks);
550 static ssize_t nullb_device_zone_readonly_store(struct config_item *item,
551 const char *page, size_t count)
553 struct nullb_device *dev = to_nullb_device(item);
555 return zone_cond_store(dev, page, count, BLK_ZONE_COND_READONLY);
557 CONFIGFS_ATTR_WO(nullb_device_, zone_readonly);
559 static ssize_t nullb_device_zone_offline_store(struct config_item *item,
560 const char *page, size_t count)
562 struct nullb_device *dev = to_nullb_device(item);
564 return zone_cond_store(dev, page, count, BLK_ZONE_COND_OFFLINE);
566 CONFIGFS_ATTR_WO(nullb_device_, zone_offline);
568 static struct configfs_attribute *nullb_device_attrs[] = {
569 &nullb_device_attr_size,
570 &nullb_device_attr_completion_nsec,
571 &nullb_device_attr_submit_queues,
572 &nullb_device_attr_poll_queues,
573 &nullb_device_attr_home_node,
574 &nullb_device_attr_queue_mode,
575 &nullb_device_attr_blocksize,
576 &nullb_device_attr_max_sectors,
577 &nullb_device_attr_irqmode,
578 &nullb_device_attr_hw_queue_depth,
579 &nullb_device_attr_index,
580 &nullb_device_attr_blocking,
581 &nullb_device_attr_use_per_node_hctx,
582 &nullb_device_attr_power,
583 &nullb_device_attr_memory_backed,
584 &nullb_device_attr_discard,
585 &nullb_device_attr_mbps,
586 &nullb_device_attr_cache_size,
587 &nullb_device_attr_badblocks,
588 &nullb_device_attr_zoned,
589 &nullb_device_attr_zone_size,
590 &nullb_device_attr_zone_capacity,
591 &nullb_device_attr_zone_nr_conv,
592 &nullb_device_attr_zone_max_open,
593 &nullb_device_attr_zone_max_active,
594 &nullb_device_attr_zone_append_max_sectors,
595 &nullb_device_attr_zone_readonly,
596 &nullb_device_attr_zone_offline,
597 &nullb_device_attr_virt_boundary,
598 &nullb_device_attr_no_sched,
599 &nullb_device_attr_shared_tags,
600 &nullb_device_attr_shared_tag_bitmap,
601 &nullb_device_attr_fua,
605 static void nullb_device_release(struct config_item *item)
607 struct nullb_device *dev = to_nullb_device(item);
609 null_free_device_storage(dev, false);
613 static struct configfs_item_operations nullb_device_ops = {
614 .release = nullb_device_release,
617 static const struct config_item_type nullb_device_type = {
618 .ct_item_ops = &nullb_device_ops,
619 .ct_attrs = nullb_device_attrs,
620 .ct_owner = THIS_MODULE,
623 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
625 static void nullb_add_fault_config(struct nullb_device *dev)
627 fault_config_init(&dev->timeout_config, "timeout_inject");
628 fault_config_init(&dev->requeue_config, "requeue_inject");
629 fault_config_init(&dev->init_hctx_fault_config, "init_hctx_fault_inject");
631 configfs_add_default_group(&dev->timeout_config.group, &dev->group);
632 configfs_add_default_group(&dev->requeue_config.group, &dev->group);
633 configfs_add_default_group(&dev->init_hctx_fault_config.group, &dev->group);
638 static void nullb_add_fault_config(struct nullb_device *dev)
645 config_group *nullb_group_make_group(struct config_group *group, const char *name)
647 struct nullb_device *dev;
649 if (null_find_dev_by_name(name))
650 return ERR_PTR(-EEXIST);
652 dev = null_alloc_dev();
654 return ERR_PTR(-ENOMEM);
656 config_group_init_type_name(&dev->group, name, &nullb_device_type);
657 nullb_add_fault_config(dev);
663 nullb_group_drop_item(struct config_group *group, struct config_item *item)
665 struct nullb_device *dev = to_nullb_device(item);
667 if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
670 null_del_dev(dev->nullb);
674 config_item_put(item);
677 static ssize_t memb_group_features_show(struct config_item *item, char *page)
679 return snprintf(page, PAGE_SIZE,
680 "badblocks,blocking,blocksize,cache_size,fua,"
681 "completion_nsec,discard,home_node,hw_queue_depth,"
682 "irqmode,max_sectors,mbps,memory_backed,no_sched,"
683 "poll_queues,power,queue_mode,shared_tag_bitmap,"
684 "shared_tags,size,submit_queues,use_per_node_hctx,"
685 "virt_boundary,zoned,zone_capacity,zone_max_active,"
686 "zone_max_open,zone_nr_conv,zone_offline,zone_readonly,"
687 "zone_size,zone_append_max_sectors\n");
690 CONFIGFS_ATTR_RO(memb_group_, features);
692 static struct configfs_attribute *nullb_group_attrs[] = {
693 &memb_group_attr_features,
697 static struct configfs_group_operations nullb_group_ops = {
698 .make_group = nullb_group_make_group,
699 .drop_item = nullb_group_drop_item,
702 static const struct config_item_type nullb_group_type = {
703 .ct_group_ops = &nullb_group_ops,
704 .ct_attrs = nullb_group_attrs,
705 .ct_owner = THIS_MODULE,
708 static struct configfs_subsystem nullb_subsys = {
711 .ci_namebuf = "nullb",
712 .ci_type = &nullb_group_type,
717 static inline int null_cache_active(struct nullb *nullb)
719 return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
722 static struct nullb_device *null_alloc_dev(void)
724 struct nullb_device *dev;
726 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
730 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
731 dev->timeout_config.attr = null_timeout_attr;
732 dev->requeue_config.attr = null_requeue_attr;
733 dev->init_hctx_fault_config.attr = null_init_hctx_attr;
736 INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
737 INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
738 if (badblocks_init(&dev->badblocks, 0)) {
743 dev->size = g_gb * 1024;
744 dev->completion_nsec = g_completion_nsec;
745 dev->submit_queues = g_submit_queues;
746 dev->prev_submit_queues = g_submit_queues;
747 dev->poll_queues = g_poll_queues;
748 dev->prev_poll_queues = g_poll_queues;
749 dev->home_node = g_home_node;
750 dev->queue_mode = g_queue_mode;
751 dev->blocksize = g_bs;
752 dev->max_sectors = g_max_sectors;
753 dev->irqmode = g_irqmode;
754 dev->hw_queue_depth = g_hw_queue_depth;
755 dev->blocking = g_blocking;
756 dev->memory_backed = g_memory_backed;
757 dev->discard = g_discard;
758 dev->cache_size = g_cache_size;
760 dev->use_per_node_hctx = g_use_per_node_hctx;
761 dev->zoned = g_zoned;
762 dev->zone_size = g_zone_size;
763 dev->zone_capacity = g_zone_capacity;
764 dev->zone_nr_conv = g_zone_nr_conv;
765 dev->zone_max_open = g_zone_max_open;
766 dev->zone_max_active = g_zone_max_active;
767 dev->zone_append_max_sectors = g_zone_append_max_sectors;
768 dev->virt_boundary = g_virt_boundary;
769 dev->no_sched = g_no_sched;
770 dev->shared_tags = g_shared_tags;
771 dev->shared_tag_bitmap = g_shared_tag_bitmap;
777 static void null_free_dev(struct nullb_device *dev)
782 null_free_zoned_dev(dev);
783 badblocks_exit(&dev->badblocks);
787 static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
789 struct nullb_cmd *cmd = container_of(timer, struct nullb_cmd, timer);
791 blk_mq_end_request(blk_mq_rq_from_pdu(cmd), cmd->error);
792 return HRTIMER_NORESTART;
795 static void null_cmd_end_timer(struct nullb_cmd *cmd)
797 ktime_t kt = cmd->nq->dev->completion_nsec;
799 hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
802 static void null_complete_rq(struct request *rq)
804 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
806 blk_mq_end_request(rq, cmd->error);
809 static struct nullb_page *null_alloc_page(void)
811 struct nullb_page *t_page;
813 t_page = kmalloc(sizeof(struct nullb_page), GFP_NOIO);
817 t_page->page = alloc_pages(GFP_NOIO, 0);
823 memset(t_page->bitmap, 0, sizeof(t_page->bitmap));
827 static void null_free_page(struct nullb_page *t_page)
829 __set_bit(NULLB_PAGE_FREE, t_page->bitmap);
830 if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap))
832 __free_page(t_page->page);
836 static bool null_page_empty(struct nullb_page *page)
838 int size = MAP_SZ - 2;
840 return find_first_bit(page->bitmap, size) == size;
843 static void null_free_sector(struct nullb *nullb, sector_t sector,
846 unsigned int sector_bit;
848 struct nullb_page *t_page, *ret;
849 struct radix_tree_root *root;
851 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
852 idx = sector >> PAGE_SECTORS_SHIFT;
853 sector_bit = (sector & SECTOR_MASK);
855 t_page = radix_tree_lookup(root, idx);
857 __clear_bit(sector_bit, t_page->bitmap);
859 if (null_page_empty(t_page)) {
860 ret = radix_tree_delete_item(root, idx, t_page);
861 WARN_ON(ret != t_page);
864 nullb->dev->curr_cache -= PAGE_SIZE;
869 static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
870 struct nullb_page *t_page, bool is_cache)
872 struct radix_tree_root *root;
874 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
876 if (radix_tree_insert(root, idx, t_page)) {
877 null_free_page(t_page);
878 t_page = radix_tree_lookup(root, idx);
879 WARN_ON(!t_page || t_page->page->index != idx);
881 nullb->dev->curr_cache += PAGE_SIZE;
886 static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
888 unsigned long pos = 0;
890 struct nullb_page *ret, *t_pages[FREE_BATCH];
891 struct radix_tree_root *root;
893 root = is_cache ? &dev->cache : &dev->data;
898 nr_pages = radix_tree_gang_lookup(root,
899 (void **)t_pages, pos, FREE_BATCH);
901 for (i = 0; i < nr_pages; i++) {
902 pos = t_pages[i]->page->index;
903 ret = radix_tree_delete_item(root, pos, t_pages[i]);
904 WARN_ON(ret != t_pages[i]);
909 } while (nr_pages == FREE_BATCH);
915 static struct nullb_page *__null_lookup_page(struct nullb *nullb,
916 sector_t sector, bool for_write, bool is_cache)
918 unsigned int sector_bit;
920 struct nullb_page *t_page;
921 struct radix_tree_root *root;
923 idx = sector >> PAGE_SECTORS_SHIFT;
924 sector_bit = (sector & SECTOR_MASK);
926 root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
927 t_page = radix_tree_lookup(root, idx);
928 WARN_ON(t_page && t_page->page->index != idx);
930 if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap)))
936 static struct nullb_page *null_lookup_page(struct nullb *nullb,
937 sector_t sector, bool for_write, bool ignore_cache)
939 struct nullb_page *page = NULL;
942 page = __null_lookup_page(nullb, sector, for_write, true);
945 return __null_lookup_page(nullb, sector, for_write, false);
948 static struct nullb_page *null_insert_page(struct nullb *nullb,
949 sector_t sector, bool ignore_cache)
950 __releases(&nullb->lock)
951 __acquires(&nullb->lock)
954 struct nullb_page *t_page;
956 t_page = null_lookup_page(nullb, sector, true, ignore_cache);
960 spin_unlock_irq(&nullb->lock);
962 t_page = null_alloc_page();
966 if (radix_tree_preload(GFP_NOIO))
969 spin_lock_irq(&nullb->lock);
970 idx = sector >> PAGE_SECTORS_SHIFT;
971 t_page->page->index = idx;
972 t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
973 radix_tree_preload_end();
977 null_free_page(t_page);
979 spin_lock_irq(&nullb->lock);
980 return null_lookup_page(nullb, sector, true, ignore_cache);
983 static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
988 struct nullb_page *t_page, *ret;
991 idx = c_page->page->index;
993 t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
995 __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap);
996 if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) {
997 null_free_page(c_page);
998 if (t_page && null_page_empty(t_page)) {
999 ret = radix_tree_delete_item(&nullb->dev->data,
1001 null_free_page(t_page);
1009 src = kmap_local_page(c_page->page);
1010 dst = kmap_local_page(t_page->page);
1012 for (i = 0; i < PAGE_SECTORS;
1013 i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
1014 if (test_bit(i, c_page->bitmap)) {
1015 offset = (i << SECTOR_SHIFT);
1016 memcpy(dst + offset, src + offset,
1017 nullb->dev->blocksize);
1018 __set_bit(i, t_page->bitmap);
1025 ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
1026 null_free_page(ret);
1027 nullb->dev->curr_cache -= PAGE_SIZE;
1032 static int null_make_cache_space(struct nullb *nullb, unsigned long n)
1034 int i, err, nr_pages;
1035 struct nullb_page *c_pages[FREE_BATCH];
1036 unsigned long flushed = 0, one_round;
1039 if ((nullb->dev->cache_size * 1024 * 1024) >
1040 nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
1043 nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
1044 (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
1046 * nullb_flush_cache_page could unlock before using the c_pages. To
1047 * avoid race, we don't allow page free
1049 for (i = 0; i < nr_pages; i++) {
1050 nullb->cache_flush_pos = c_pages[i]->page->index;
1052 * We found the page which is being flushed to disk by other
1055 if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap))
1058 __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap);
1062 for (i = 0; i < nr_pages; i++) {
1063 if (c_pages[i] == NULL)
1065 err = null_flush_cache_page(nullb, c_pages[i]);
1070 flushed += one_round << PAGE_SHIFT;
1074 nullb->cache_flush_pos = 0;
1075 if (one_round == 0) {
1076 /* give other threads a chance */
1077 spin_unlock_irq(&nullb->lock);
1078 spin_lock_irq(&nullb->lock);
1085 static int copy_to_nullb(struct nullb *nullb, struct page *source,
1086 unsigned int off, sector_t sector, size_t n, bool is_fua)
1088 size_t temp, count = 0;
1089 unsigned int offset;
1090 struct nullb_page *t_page;
1093 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1095 if (null_cache_active(nullb) && !is_fua)
1096 null_make_cache_space(nullb, PAGE_SIZE);
1098 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
1099 t_page = null_insert_page(nullb, sector,
1100 !null_cache_active(nullb) || is_fua);
1104 memcpy_page(t_page->page, offset, source, off + count, temp);
1106 __set_bit(sector & SECTOR_MASK, t_page->bitmap);
1109 null_free_sector(nullb, sector, true);
1112 sector += temp >> SECTOR_SHIFT;
1117 static int copy_from_nullb(struct nullb *nullb, struct page *dest,
1118 unsigned int off, sector_t sector, size_t n)
1120 size_t temp, count = 0;
1121 unsigned int offset;
1122 struct nullb_page *t_page;
1125 temp = min_t(size_t, nullb->dev->blocksize, n - count);
1127 offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
1128 t_page = null_lookup_page(nullb, sector, false,
1129 !null_cache_active(nullb));
1132 memcpy_page(dest, off + count, t_page->page, offset,
1135 zero_user(dest, off + count, temp);
1138 sector += temp >> SECTOR_SHIFT;
1143 static void nullb_fill_pattern(struct nullb *nullb, struct page *page,
1144 unsigned int len, unsigned int off)
1146 memset_page(page, off, 0xff, len);
1149 blk_status_t null_handle_discard(struct nullb_device *dev,
1150 sector_t sector, sector_t nr_sectors)
1152 struct nullb *nullb = dev->nullb;
1153 size_t n = nr_sectors << SECTOR_SHIFT;
1156 spin_lock_irq(&nullb->lock);
1158 temp = min_t(size_t, n, dev->blocksize);
1159 null_free_sector(nullb, sector, false);
1160 if (null_cache_active(nullb))
1161 null_free_sector(nullb, sector, true);
1162 sector += temp >> SECTOR_SHIFT;
1165 spin_unlock_irq(&nullb->lock);
1170 static blk_status_t null_handle_flush(struct nullb *nullb)
1174 if (!null_cache_active(nullb))
1177 spin_lock_irq(&nullb->lock);
1179 err = null_make_cache_space(nullb,
1180 nullb->dev->cache_size * 1024 * 1024);
1181 if (err || nullb->dev->curr_cache == 0)
1185 WARN_ON(!radix_tree_empty(&nullb->dev->cache));
1186 spin_unlock_irq(&nullb->lock);
1187 return errno_to_blk_status(err);
1190 static int null_transfer(struct nullb *nullb, struct page *page,
1191 unsigned int len, unsigned int off, bool is_write, sector_t sector,
1194 struct nullb_device *dev = nullb->dev;
1195 unsigned int valid_len = len;
1200 valid_len = null_zone_valid_read_len(nullb,
1204 err = copy_from_nullb(nullb, page, off,
1211 nullb_fill_pattern(nullb, page, len, off);
1212 flush_dcache_page(page);
1214 flush_dcache_page(page);
1215 err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
1221 static int null_handle_rq(struct nullb_cmd *cmd)
1223 struct request *rq = blk_mq_rq_from_pdu(cmd);
1224 struct nullb *nullb = cmd->nq->dev->nullb;
1227 sector_t sector = blk_rq_pos(rq);
1228 struct req_iterator iter;
1229 struct bio_vec bvec;
1231 spin_lock_irq(&nullb->lock);
1232 rq_for_each_segment(bvec, rq, iter) {
1234 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
1235 op_is_write(req_op(rq)), sector,
1236 rq->cmd_flags & REQ_FUA);
1239 sector += len >> SECTOR_SHIFT;
1241 spin_unlock_irq(&nullb->lock);
1243 return errno_to_blk_status(err);
1246 static inline blk_status_t null_handle_throttled(struct nullb_cmd *cmd)
1248 struct nullb_device *dev = cmd->nq->dev;
1249 struct nullb *nullb = dev->nullb;
1250 blk_status_t sts = BLK_STS_OK;
1251 struct request *rq = blk_mq_rq_from_pdu(cmd);
1253 if (!hrtimer_active(&nullb->bw_timer))
1254 hrtimer_restart(&nullb->bw_timer);
1256 if (atomic_long_sub_return(blk_rq_bytes(rq), &nullb->cur_bytes) < 0) {
1257 blk_mq_stop_hw_queues(nullb->q);
1258 /* race with timer */
1259 if (atomic_long_read(&nullb->cur_bytes) > 0)
1260 blk_mq_start_stopped_hw_queues(nullb->q, true);
1261 /* requeue request */
1262 sts = BLK_STS_DEV_RESOURCE;
1267 static inline blk_status_t null_handle_badblocks(struct nullb_cmd *cmd,
1269 sector_t nr_sectors)
1271 struct badblocks *bb = &cmd->nq->dev->badblocks;
1275 if (badblocks_check(bb, sector, nr_sectors, &first_bad, &bad_sectors))
1276 return BLK_STS_IOERR;
1281 static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd,
1284 sector_t nr_sectors)
1286 struct nullb_device *dev = cmd->nq->dev;
1288 if (op == REQ_OP_DISCARD)
1289 return null_handle_discard(dev, sector, nr_sectors);
1291 return null_handle_rq(cmd);
1294 static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
1296 struct request *rq = blk_mq_rq_from_pdu(cmd);
1297 struct nullb_device *dev = cmd->nq->dev;
1300 if (!dev->memory_backed && req_op(rq) == REQ_OP_READ) {
1301 __rq_for_each_bio(bio, rq)
1306 static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
1308 struct request *rq = blk_mq_rq_from_pdu(cmd);
1311 * Since root privileges are required to configure the null_blk
1312 * driver, it is fine that this driver does not initialize the
1313 * data buffers of read commands. Zero-initialize these buffers
1314 * anyway if KMSAN is enabled to prevent that KMSAN complains
1315 * about null_blk not initializing read data buffers.
1317 if (IS_ENABLED(CONFIG_KMSAN))
1318 nullb_zero_read_cmd_buffer(cmd);
1320 /* Complete IO by inline, softirq or timer */
1321 switch (cmd->nq->dev->irqmode) {
1322 case NULL_IRQ_SOFTIRQ:
1323 blk_mq_complete_request(rq);
1326 blk_mq_end_request(rq, cmd->error);
1328 case NULL_IRQ_TIMER:
1329 null_cmd_end_timer(cmd);
1334 blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
1335 sector_t sector, unsigned int nr_sectors)
1337 struct nullb_device *dev = cmd->nq->dev;
1340 if (dev->badblocks.shift != -1) {
1341 ret = null_handle_badblocks(cmd, sector, nr_sectors);
1342 if (ret != BLK_STS_OK)
1346 if (dev->memory_backed)
1347 return null_handle_memory_backed(cmd, op, sector, nr_sectors);
1352 static void null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
1353 sector_t nr_sectors, enum req_op op)
1355 struct nullb_device *dev = cmd->nq->dev;
1356 struct nullb *nullb = dev->nullb;
1359 if (op == REQ_OP_FLUSH) {
1360 cmd->error = null_handle_flush(nullb);
1365 sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
1367 sts = null_process_cmd(cmd, op, sector, nr_sectors);
1369 /* Do not overwrite errors (e.g. timeout errors) */
1370 if (cmd->error == BLK_STS_OK)
1374 nullb_complete_cmd(cmd);
1377 static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
1379 struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
1380 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1381 unsigned int mbps = nullb->dev->mbps;
1383 if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
1384 return HRTIMER_NORESTART;
1386 atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
1387 blk_mq_start_stopped_hw_queues(nullb->q, true);
1389 hrtimer_forward_now(&nullb->bw_timer, timer_interval);
1391 return HRTIMER_RESTART;
1394 static void nullb_setup_bwtimer(struct nullb *nullb)
1396 ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
1398 hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1399 nullb->bw_timer.function = nullb_bwtimer_fn;
1400 atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
1401 hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
1404 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1406 static bool should_timeout_request(struct request *rq)
1408 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
1409 struct nullb_device *dev = cmd->nq->dev;
1411 return should_fail(&dev->timeout_config.attr, 1);
1414 static bool should_requeue_request(struct request *rq)
1416 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
1417 struct nullb_device *dev = cmd->nq->dev;
1419 return should_fail(&dev->requeue_config.attr, 1);
1422 static bool should_init_hctx_fail(struct nullb_device *dev)
1424 return should_fail(&dev->init_hctx_fault_config.attr, 1);
1429 static bool should_timeout_request(struct request *rq)
1434 static bool should_requeue_request(struct request *rq)
1439 static bool should_init_hctx_fail(struct nullb_device *dev)
1446 static void null_map_queues(struct blk_mq_tag_set *set)
1448 struct nullb *nullb = set->driver_data;
1450 unsigned int submit_queues = g_submit_queues;
1451 unsigned int poll_queues = g_poll_queues;
1454 struct nullb_device *dev = nullb->dev;
1457 * Refer nr_hw_queues of the tag set to check if the expected
1458 * number of hardware queues are prepared. If block layer failed
1459 * to prepare them, use previous numbers of submit queues and
1460 * poll queues to map queues.
1462 if (set->nr_hw_queues ==
1463 dev->submit_queues + dev->poll_queues) {
1464 submit_queues = dev->submit_queues;
1465 poll_queues = dev->poll_queues;
1466 } else if (set->nr_hw_queues ==
1467 dev->prev_submit_queues + dev->prev_poll_queues) {
1468 submit_queues = dev->prev_submit_queues;
1469 poll_queues = dev->prev_poll_queues;
1471 pr_warn("tag set has unexpected nr_hw_queues: %d\n",
1479 for (i = 0, qoff = 0; i < set->nr_maps; i++) {
1480 struct blk_mq_queue_map *map = &set->map[i];
1483 case HCTX_TYPE_DEFAULT:
1484 map->nr_queues = submit_queues;
1486 case HCTX_TYPE_READ:
1489 case HCTX_TYPE_POLL:
1490 map->nr_queues = poll_queues;
1493 map->queue_offset = qoff;
1494 qoff += map->nr_queues;
1495 blk_mq_map_queues(map);
1499 static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
1501 struct nullb_queue *nq = hctx->driver_data;
1506 spin_lock(&nq->poll_lock);
1507 list_splice_init(&nq->poll_list, &list);
1508 list_for_each_entry(rq, &list, queuelist)
1509 blk_mq_set_request_complete(rq);
1510 spin_unlock(&nq->poll_lock);
1512 while (!list_empty(&list)) {
1513 struct nullb_cmd *cmd;
1514 struct request *req;
1516 req = list_first_entry(&list, struct request, queuelist);
1517 list_del_init(&req->queuelist);
1518 cmd = blk_mq_rq_to_pdu(req);
1519 cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
1520 blk_rq_sectors(req));
1521 if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
1522 blk_mq_end_request_batch))
1523 blk_mq_end_request(req, cmd->error);
1530 static enum blk_eh_timer_return null_timeout_rq(struct request *rq)
1532 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
1533 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
1535 if (hctx->type == HCTX_TYPE_POLL) {
1536 struct nullb_queue *nq = hctx->driver_data;
1538 spin_lock(&nq->poll_lock);
1539 /* The request may have completed meanwhile. */
1540 if (blk_mq_request_completed(rq)) {
1541 spin_unlock(&nq->poll_lock);
1544 list_del_init(&rq->queuelist);
1545 spin_unlock(&nq->poll_lock);
1548 pr_info("rq %p timed out\n", rq);
1551 * If the device is marked as blocking (i.e. memory backed or zoned
1552 * device), the submission path may be blocked waiting for resources
1553 * and cause real timeouts. For these real timeouts, the submission
1554 * path will complete the request using blk_mq_complete_request().
1555 * Only fake timeouts need to execute blk_mq_complete_request() here.
1557 cmd->error = BLK_STS_TIMEOUT;
1558 if (cmd->fake_timeout || hctx->type == HCTX_TYPE_POLL)
1559 blk_mq_complete_request(rq);
1563 static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
1564 const struct blk_mq_queue_data *bd)
1566 struct request *rq = bd->rq;
1567 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
1568 struct nullb_queue *nq = hctx->driver_data;
1569 sector_t nr_sectors = blk_rq_sectors(rq);
1570 sector_t sector = blk_rq_pos(rq);
1571 const bool is_poll = hctx->type == HCTX_TYPE_POLL;
1573 might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
1575 if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
1576 hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1577 cmd->timer.function = null_cmd_timer_expired;
1579 cmd->error = BLK_STS_OK;
1581 cmd->fake_timeout = should_timeout_request(rq) ||
1582 blk_should_fake_timeout(rq->q);
1584 if (should_requeue_request(rq)) {
1586 * Alternate between hitting the core BUSY path, and the
1587 * driver driven requeue path
1589 nq->requeue_selection++;
1590 if (nq->requeue_selection & 1)
1591 return BLK_STS_RESOURCE;
1592 blk_mq_requeue_request(rq, true);
1596 if (test_bit(NULLB_DEV_FL_THROTTLED, &nq->dev->flags)) {
1597 blk_status_t sts = null_handle_throttled(cmd);
1599 if (sts != BLK_STS_OK)
1603 blk_mq_start_request(rq);
1606 spin_lock(&nq->poll_lock);
1607 list_add_tail(&rq->queuelist, &nq->poll_list);
1608 spin_unlock(&nq->poll_lock);
1611 if (cmd->fake_timeout)
1614 null_handle_cmd(cmd, sector, nr_sectors, req_op(rq));
1618 static void null_queue_rqs(struct request **rqlist)
1620 struct request *requeue_list = NULL;
1621 struct request **requeue_lastp = &requeue_list;
1622 struct blk_mq_queue_data bd = { };
1626 struct request *rq = rq_list_pop(rqlist);
1629 ret = null_queue_rq(rq->mq_hctx, &bd);
1630 if (ret != BLK_STS_OK)
1631 rq_list_add_tail(&requeue_lastp, rq);
1632 } while (!rq_list_empty(*rqlist));
1634 *rqlist = requeue_list;
1637 static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
1639 nq->dev = nullb->dev;
1640 INIT_LIST_HEAD(&nq->poll_list);
1641 spin_lock_init(&nq->poll_lock);
1644 static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
1645 unsigned int hctx_idx)
1647 struct nullb *nullb = hctx->queue->queuedata;
1648 struct nullb_queue *nq;
1650 if (should_init_hctx_fail(nullb->dev))
1653 nq = &nullb->queues[hctx_idx];
1654 hctx->driver_data = nq;
1655 null_init_queue(nullb, nq);
1660 static const struct blk_mq_ops null_mq_ops = {
1661 .queue_rq = null_queue_rq,
1662 .queue_rqs = null_queue_rqs,
1663 .complete = null_complete_rq,
1664 .timeout = null_timeout_rq,
1666 .map_queues = null_map_queues,
1667 .init_hctx = null_init_hctx,
1670 static void null_del_dev(struct nullb *nullb)
1672 struct nullb_device *dev;
1679 ida_free(&nullb_indexes, nullb->index);
1681 list_del_init(&nullb->list);
1683 del_gendisk(nullb->disk);
1685 if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
1686 hrtimer_cancel(&nullb->bw_timer);
1687 atomic_long_set(&nullb->cur_bytes, LONG_MAX);
1688 blk_mq_start_stopped_hw_queues(nullb->q, true);
1691 put_disk(nullb->disk);
1692 if (nullb->tag_set == &nullb->__tag_set)
1693 blk_mq_free_tag_set(nullb->tag_set);
1694 kfree(nullb->queues);
1695 if (null_cache_active(nullb))
1696 null_free_device_storage(nullb->dev, true);
1701 static void null_config_discard(struct nullb *nullb, struct queue_limits *lim)
1703 if (nullb->dev->discard == false)
1706 if (!nullb->dev->memory_backed) {
1707 nullb->dev->discard = false;
1708 pr_info("discard option is ignored without memory backing\n");
1712 if (nullb->dev->zoned) {
1713 nullb->dev->discard = false;
1714 pr_info("discard option is ignored in zoned mode\n");
1718 lim->max_hw_discard_sectors = UINT_MAX >> 9;
1721 static const struct block_device_operations null_ops = {
1722 .owner = THIS_MODULE,
1723 .report_zones = null_report_zones,
1726 static int setup_queues(struct nullb *nullb)
1728 int nqueues = nr_cpu_ids;
1731 nqueues += g_poll_queues;
1733 nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue),
1741 static int null_init_tag_set(struct blk_mq_tag_set *set, int poll_queues)
1743 set->ops = &null_mq_ops;
1744 set->cmd_size = sizeof(struct nullb_cmd);
1745 set->timeout = 5 * HZ;
1748 set->nr_hw_queues += poll_queues;
1751 return blk_mq_alloc_tag_set(set);
1754 static int null_init_global_tag_set(void)
1761 tag_set.nr_hw_queues = g_submit_queues;
1762 tag_set.queue_depth = g_hw_queue_depth;
1763 tag_set.numa_node = g_home_node;
1764 tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1766 tag_set.flags |= BLK_MQ_F_NO_SCHED;
1767 if (g_shared_tag_bitmap)
1768 tag_set.flags |= BLK_MQ_F_TAG_HCTX_SHARED;
1770 tag_set.flags |= BLK_MQ_F_BLOCKING;
1772 error = null_init_tag_set(&tag_set, g_poll_queues);
1778 static int null_setup_tagset(struct nullb *nullb)
1780 if (nullb->dev->shared_tags) {
1781 nullb->tag_set = &tag_set;
1782 return null_init_global_tag_set();
1785 nullb->tag_set = &nullb->__tag_set;
1786 nullb->tag_set->driver_data = nullb;
1787 nullb->tag_set->nr_hw_queues = nullb->dev->submit_queues;
1788 nullb->tag_set->queue_depth = nullb->dev->hw_queue_depth;
1789 nullb->tag_set->numa_node = nullb->dev->home_node;
1790 nullb->tag_set->flags = BLK_MQ_F_SHOULD_MERGE;
1791 if (nullb->dev->no_sched)
1792 nullb->tag_set->flags |= BLK_MQ_F_NO_SCHED;
1793 if (nullb->dev->shared_tag_bitmap)
1794 nullb->tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
1795 if (nullb->dev->blocking)
1796 nullb->tag_set->flags |= BLK_MQ_F_BLOCKING;
1797 return null_init_tag_set(nullb->tag_set, nullb->dev->poll_queues);
1800 static int null_validate_conf(struct nullb_device *dev)
1802 if (dev->queue_mode == NULL_Q_RQ) {
1803 pr_err("legacy IO path is no longer available\n");
1806 if (dev->queue_mode == NULL_Q_BIO) {
1807 pr_err("BIO-based IO path is no longer available, using blk-mq instead.\n");
1808 dev->queue_mode = NULL_Q_MQ;
1811 dev->blocksize = round_down(dev->blocksize, 512);
1812 dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
1814 if (dev->use_per_node_hctx) {
1815 if (dev->submit_queues != nr_online_nodes)
1816 dev->submit_queues = nr_online_nodes;
1817 } else if (dev->submit_queues > nr_cpu_ids)
1818 dev->submit_queues = nr_cpu_ids;
1819 else if (dev->submit_queues == 0)
1820 dev->submit_queues = 1;
1821 dev->prev_submit_queues = dev->submit_queues;
1823 if (dev->poll_queues > g_poll_queues)
1824 dev->poll_queues = g_poll_queues;
1825 dev->prev_poll_queues = dev->poll_queues;
1826 dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
1828 /* Do memory allocation, so set blocking */
1829 if (dev->memory_backed)
1830 dev->blocking = true;
1831 else /* cache is meaningless */
1832 dev->cache_size = 0;
1833 dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
1835 dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
1838 (!dev->zone_size || !is_power_of_2(dev->zone_size))) {
1839 pr_err("zone_size must be power-of-two\n");
1846 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1847 static bool __null_setup_fault(struct fault_attr *attr, char *str)
1852 if (!setup_fault_attr(attr, str))
1860 static bool null_setup_fault(void)
1862 #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION
1863 if (!__null_setup_fault(&null_timeout_attr, g_timeout_str))
1865 if (!__null_setup_fault(&null_requeue_attr, g_requeue_str))
1867 if (!__null_setup_fault(&null_init_hctx_attr, g_init_hctx_str))
1873 static int null_add_dev(struct nullb_device *dev)
1875 struct queue_limits lim = {
1876 .logical_block_size = dev->blocksize,
1877 .physical_block_size = dev->blocksize,
1878 .max_hw_sectors = dev->max_sectors,
1881 struct nullb *nullb;
1884 rv = null_validate_conf(dev);
1888 nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
1896 spin_lock_init(&nullb->lock);
1898 rv = setup_queues(nullb);
1900 goto out_free_nullb;
1902 rv = null_setup_tagset(nullb);
1904 goto out_cleanup_queues;
1906 if (dev->virt_boundary)
1907 lim.virt_boundary_mask = PAGE_SIZE - 1;
1908 null_config_discard(nullb, &lim);
1910 rv = null_init_zoned_dev(dev, &lim);
1912 goto out_cleanup_tags;
1915 nullb->disk = blk_mq_alloc_disk(nullb->tag_set, &lim, nullb);
1916 if (IS_ERR(nullb->disk)) {
1917 rv = PTR_ERR(nullb->disk);
1918 goto out_cleanup_zone;
1920 nullb->q = nullb->disk->queue;
1923 set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
1924 nullb_setup_bwtimer(nullb);
1927 if (dev->cache_size > 0) {
1928 set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
1929 blk_queue_write_cache(nullb->q, true, dev->fua);
1932 nullb->q->queuedata = nullb;
1933 blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
1936 rv = ida_alloc(&nullb_indexes, GFP_KERNEL);
1938 mutex_unlock(&lock);
1939 goto out_cleanup_disk;
1943 mutex_unlock(&lock);
1945 if (config_item_name(&dev->group.cg_item)) {
1946 /* Use configfs dir name as the device name */
1947 snprintf(nullb->disk_name, sizeof(nullb->disk_name),
1948 "%s", config_item_name(&dev->group.cg_item));
1950 sprintf(nullb->disk_name, "nullb%d", nullb->index);
1953 set_capacity(nullb->disk,
1954 ((sector_t)nullb->dev->size * SZ_1M) >> SECTOR_SHIFT);
1955 nullb->disk->major = null_major;
1956 nullb->disk->first_minor = nullb->index;
1957 nullb->disk->minors = 1;
1958 nullb->disk->fops = &null_ops;
1959 nullb->disk->private_data = nullb;
1960 strscpy_pad(nullb->disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
1962 if (nullb->dev->zoned) {
1963 rv = null_register_zoned_dev(nullb);
1968 rv = add_disk(nullb->disk);
1973 list_add_tail(&nullb->list, &nullb_list);
1974 mutex_unlock(&lock);
1976 pr_info("disk %s created\n", nullb->disk_name);
1981 ida_free(&nullb_indexes, nullb->index);
1983 null_free_zoned_dev(dev);
1985 put_disk(nullb->disk);
1987 if (nullb->tag_set == &nullb->__tag_set)
1988 blk_mq_free_tag_set(nullb->tag_set);
1990 kfree(nullb->queues);
1998 static struct nullb *null_find_dev_by_name(const char *name)
2000 struct nullb *nullb = NULL, *nb;
2003 list_for_each_entry(nb, &nullb_list, list) {
2004 if (strcmp(nb->disk_name, name) == 0) {
2009 mutex_unlock(&lock);
2014 static int null_create_dev(void)
2016 struct nullb_device *dev;
2019 dev = null_alloc_dev();
2023 ret = null_add_dev(dev);
2032 static void null_destroy_dev(struct nullb *nullb)
2034 struct nullb_device *dev = nullb->dev;
2036 null_del_dev(nullb);
2037 null_free_device_storage(dev, false);
2041 static int __init null_init(void)
2045 struct nullb *nullb;
2047 if (g_bs > PAGE_SIZE) {
2048 pr_warn("invalid block size\n");
2049 pr_warn("defaults block size to %lu\n", PAGE_SIZE);
2053 if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
2054 pr_err("invalid home_node value\n");
2055 g_home_node = NUMA_NO_NODE;
2058 if (!null_setup_fault())
2061 if (g_queue_mode == NULL_Q_RQ) {
2062 pr_err("legacy IO path is no longer available\n");
2066 if (g_use_per_node_hctx) {
2067 if (g_submit_queues != nr_online_nodes) {
2068 pr_warn("submit_queues param is set to %u.\n",
2070 g_submit_queues = nr_online_nodes;
2072 } else if (g_submit_queues > nr_cpu_ids) {
2073 g_submit_queues = nr_cpu_ids;
2074 } else if (g_submit_queues <= 0) {
2075 g_submit_queues = 1;
2078 config_group_init(&nullb_subsys.su_group);
2079 mutex_init(&nullb_subsys.su_mutex);
2081 ret = configfs_register_subsystem(&nullb_subsys);
2087 null_major = register_blkdev(0, "nullb");
2088 if (null_major < 0) {
2093 for (i = 0; i < nr_devices; i++) {
2094 ret = null_create_dev();
2099 pr_info("module loaded\n");
2103 while (!list_empty(&nullb_list)) {
2104 nullb = list_entry(nullb_list.next, struct nullb, list);
2105 null_destroy_dev(nullb);
2107 unregister_blkdev(null_major, "nullb");
2109 configfs_unregister_subsystem(&nullb_subsys);
2113 static void __exit null_exit(void)
2115 struct nullb *nullb;
2117 configfs_unregister_subsystem(&nullb_subsys);
2119 unregister_blkdev(null_major, "nullb");
2122 while (!list_empty(&nullb_list)) {
2123 nullb = list_entry(nullb_list.next, struct nullb, list);
2124 null_destroy_dev(nullb);
2126 mutex_unlock(&lock);
2129 blk_mq_free_tag_set(&tag_set);
2131 mutex_destroy(&lock);
2134 module_init(null_init);
2135 module_exit(null_exit);
2137 MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
2138 MODULE_LICENSE("GPL");