Commit | Line | Data |
---|---|---|
09c434b8 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3bf2bd20 SL |
2 | /* |
3 | * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and | |
4 | * Shaohua Li <shli@fb.com> | |
5 | */ | |
f2298c04 | 6 | #include <linux/module.h> |
fc1bc354 | 7 | |
f2298c04 JA |
8 | #include <linux/moduleparam.h> |
9 | #include <linux/sched.h> | |
10 | #include <linux/fs.h> | |
f2298c04 | 11 | #include <linux/init.h> |
6dad38d3 | 12 | #include "null_blk.h" |
f2298c04 | 13 | |
5bcd0e0c SL |
14 | #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) |
15 | #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) | |
5bcd0e0c SL |
16 | #define SECTOR_MASK (PAGE_SECTORS - 1) |
17 | ||
18 | #define FREE_BATCH 16 | |
19 | ||
eff2c4f1 SL |
20 | #define TICKS_PER_SEC 50ULL |
21 | #define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC) | |
22 | ||
33f782c4 | 23 | #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION |
93b57046 | 24 | static DECLARE_FAULT_ATTR(null_timeout_attr); |
24941b90 | 25 | static DECLARE_FAULT_ATTR(null_requeue_attr); |
33f782c4 | 26 | #endif |
93b57046 | 27 | |
eff2c4f1 SL |
28 | static inline u64 mb_per_tick(int mbps) |
29 | { | |
30 | return (1 << 20) / TICKS_PER_SEC * ((u64) mbps); | |
31 | } | |
f2298c04 | 32 | |
3bf2bd20 SL |
33 | /* |
34 | * Status flags for nullb_device. | |
35 | * | |
36 | * CONFIGURED: Device has been configured and turned on. Cannot reconfigure. | |
37 | * UP: Device is currently on and visible in userspace. | |
eff2c4f1 | 38 | * THROTTLED: Device is being throttled. |
deb78b41 | 39 | * CACHE: Device is using a write-back cache. |
3bf2bd20 SL |
40 | */ |
41 | enum nullb_device_flags { | |
42 | NULLB_DEV_FL_CONFIGURED = 0, | |
43 | NULLB_DEV_FL_UP = 1, | |
eff2c4f1 | 44 | NULLB_DEV_FL_THROTTLED = 2, |
deb78b41 | 45 | NULLB_DEV_FL_CACHE = 3, |
3bf2bd20 SL |
46 | }; |
47 | ||
66231ad3 | 48 | #define MAP_SZ ((PAGE_SIZE >> SECTOR_SHIFT) + 2) |
5bcd0e0c SL |
49 | /* |
50 | * nullb_page is a page in memory for nullb devices. | |
51 | * | |
52 | * @page: The page holding the data. | |
53 | * @bitmap: The bitmap represents which sector in the page has data. | |
54 | * Each bit represents one block size. For example, sector 8 | |
55 | * will use the 7th bit | |
deb78b41 SL |
56 | * The highest 2 bits of bitmap are for special purpose. LOCK means the cache |
57 | * page is being flushing to storage. FREE means the cache page is freed and | |
58 | * should be skipped from flushing to storage. Please see | |
59 | * null_make_cache_space | |
5bcd0e0c SL |
60 | */ |
61 | struct nullb_page { | |
62 | struct page *page; | |
66231ad3 | 63 | DECLARE_BITMAP(bitmap, MAP_SZ); |
5bcd0e0c | 64 | }; |
66231ad3 ML |
65 | #define NULLB_PAGE_LOCK (MAP_SZ - 1) |
66 | #define NULLB_PAGE_FREE (MAP_SZ - 2) | |
5bcd0e0c | 67 | |
f2298c04 JA |
68 | static LIST_HEAD(nullb_list); |
69 | static struct mutex lock; | |
70 | static int null_major; | |
94bc02e3 | 71 | static DEFINE_IDA(nullb_indexes); |
82f402fe | 72 | static struct blk_mq_tag_set tag_set; |
f2298c04 | 73 | |
f2298c04 JA |
74 | enum { |
75 | NULL_IRQ_NONE = 0, | |
76 | NULL_IRQ_SOFTIRQ = 1, | |
77 | NULL_IRQ_TIMER = 2, | |
ce2c350b | 78 | }; |
f2298c04 | 79 | |
ce2c350b | 80 | enum { |
f2298c04 JA |
81 | NULL_Q_BIO = 0, |
82 | NULL_Q_RQ = 1, | |
83 | NULL_Q_MQ = 2, | |
84 | }; | |
85 | ||
b3cffc38 | 86 | static int g_no_sched; |
5657a819 | 87 | module_param_named(no_sched, g_no_sched, int, 0444); |
b3cffc38 | 88 | MODULE_PARM_DESC(no_sched, "No io scheduler"); |
89 | ||
2984c868 | 90 | static int g_submit_queues = 1; |
5657a819 | 91 | module_param_named(submit_queues, g_submit_queues, int, 0444); |
f2298c04 JA |
92 | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); |
93 | ||
2984c868 | 94 | static int g_home_node = NUMA_NO_NODE; |
5657a819 | 95 | module_param_named(home_node, g_home_node, int, 0444); |
f2298c04 JA |
96 | MODULE_PARM_DESC(home_node, "Home node for the device"); |
97 | ||
33f782c4 | 98 | #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION |
93b57046 | 99 | static char g_timeout_str[80]; |
5657a819 | 100 | module_param_string(timeout, g_timeout_str, sizeof(g_timeout_str), 0444); |
24941b90 JA |
101 | |
102 | static char g_requeue_str[80]; | |
5657a819 | 103 | module_param_string(requeue, g_requeue_str, sizeof(g_requeue_str), 0444); |
33f782c4 | 104 | #endif |
93b57046 | 105 | |
2984c868 | 106 | static int g_queue_mode = NULL_Q_MQ; |
709c8667 MB |
107 | |
108 | static int null_param_store_val(const char *str, int *val, int min, int max) | |
109 | { | |
110 | int ret, new_val; | |
111 | ||
112 | ret = kstrtoint(str, 10, &new_val); | |
113 | if (ret) | |
114 | return -EINVAL; | |
115 | ||
116 | if (new_val < min || new_val > max) | |
117 | return -EINVAL; | |
118 | ||
119 | *val = new_val; | |
120 | return 0; | |
121 | } | |
122 | ||
123 | static int null_set_queue_mode(const char *str, const struct kernel_param *kp) | |
124 | { | |
2984c868 | 125 | return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ); |
709c8667 MB |
126 | } |
127 | ||
9c27847d | 128 | static const struct kernel_param_ops null_queue_mode_param_ops = { |
709c8667 MB |
129 | .set = null_set_queue_mode, |
130 | .get = param_get_int, | |
131 | }; | |
132 | ||
5657a819 | 133 | device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, 0444); |
54ae81cd | 134 | MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)"); |
f2298c04 | 135 | |
2984c868 | 136 | static int g_gb = 250; |
5657a819 | 137 | module_param_named(gb, g_gb, int, 0444); |
f2298c04 JA |
138 | MODULE_PARM_DESC(gb, "Size in GB"); |
139 | ||
2984c868 | 140 | static int g_bs = 512; |
5657a819 | 141 | module_param_named(bs, g_bs, int, 0444); |
f2298c04 JA |
142 | MODULE_PARM_DESC(bs, "Block size (in bytes)"); |
143 | ||
82f402fe | 144 | static int nr_devices = 1; |
5657a819 | 145 | module_param(nr_devices, int, 0444); |
f2298c04 JA |
146 | MODULE_PARM_DESC(nr_devices, "Number of devices to register"); |
147 | ||
2984c868 | 148 | static bool g_blocking; |
5657a819 | 149 | module_param_named(blocking, g_blocking, bool, 0444); |
db5bcf87 JA |
150 | MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device"); |
151 | ||
82f402fe | 152 | static bool shared_tags; |
5657a819 | 153 | module_param(shared_tags, bool, 0444); |
82f402fe JA |
154 | MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq"); |
155 | ||
2984c868 | 156 | static int g_irqmode = NULL_IRQ_SOFTIRQ; |
709c8667 MB |
157 | |
158 | static int null_set_irqmode(const char *str, const struct kernel_param *kp) | |
159 | { | |
2984c868 | 160 | return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE, |
709c8667 MB |
161 | NULL_IRQ_TIMER); |
162 | } | |
163 | ||
9c27847d | 164 | static const struct kernel_param_ops null_irqmode_param_ops = { |
709c8667 MB |
165 | .set = null_set_irqmode, |
166 | .get = param_get_int, | |
167 | }; | |
168 | ||
5657a819 | 169 | device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, 0444); |
f2298c04 JA |
170 | MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); |
171 | ||
2984c868 | 172 | static unsigned long g_completion_nsec = 10000; |
5657a819 | 173 | module_param_named(completion_nsec, g_completion_nsec, ulong, 0444); |
f2298c04 JA |
174 | MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); |
175 | ||
2984c868 | 176 | static int g_hw_queue_depth = 64; |
5657a819 | 177 | module_param_named(hw_queue_depth, g_hw_queue_depth, int, 0444); |
f2298c04 JA |
178 | MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); |
179 | ||
2984c868 | 180 | static bool g_use_per_node_hctx; |
5657a819 | 181 | module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444); |
20005244 | 182 | MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); |
f2298c04 | 183 | |
ca4b2a01 MB |
184 | static bool g_zoned; |
185 | module_param_named(zoned, g_zoned, bool, S_IRUGO); | |
186 | MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false"); | |
187 | ||
188 | static unsigned long g_zone_size = 256; | |
189 | module_param_named(zone_size, g_zone_size, ulong, S_IRUGO); | |
190 | MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256"); | |
191 | ||
ea2c18e1 MS |
192 | static unsigned int g_zone_nr_conv; |
193 | module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444); | |
194 | MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0"); | |
195 | ||
3bf2bd20 SL |
196 | static struct nullb_device *null_alloc_dev(void); |
197 | static void null_free_dev(struct nullb_device *dev); | |
cedcafad SL |
198 | static void null_del_dev(struct nullb *nullb); |
199 | static int null_add_dev(struct nullb_device *dev); | |
deb78b41 | 200 | static void null_free_device_storage(struct nullb_device *dev, bool is_cache); |
3bf2bd20 SL |
201 | |
202 | static inline struct nullb_device *to_nullb_device(struct config_item *item) | |
203 | { | |
204 | return item ? container_of(item, struct nullb_device, item) : NULL; | |
205 | } | |
206 | ||
207 | static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page) | |
208 | { | |
209 | return snprintf(page, PAGE_SIZE, "%u\n", val); | |
210 | } | |
211 | ||
212 | static inline ssize_t nullb_device_ulong_attr_show(unsigned long val, | |
213 | char *page) | |
214 | { | |
215 | return snprintf(page, PAGE_SIZE, "%lu\n", val); | |
216 | } | |
217 | ||
218 | static inline ssize_t nullb_device_bool_attr_show(bool val, char *page) | |
219 | { | |
220 | return snprintf(page, PAGE_SIZE, "%u\n", val); | |
221 | } | |
222 | ||
223 | static ssize_t nullb_device_uint_attr_store(unsigned int *val, | |
224 | const char *page, size_t count) | |
225 | { | |
226 | unsigned int tmp; | |
227 | int result; | |
228 | ||
229 | result = kstrtouint(page, 0, &tmp); | |
230 | if (result) | |
231 | return result; | |
232 | ||
233 | *val = tmp; | |
234 | return count; | |
235 | } | |
236 | ||
237 | static ssize_t nullb_device_ulong_attr_store(unsigned long *val, | |
238 | const char *page, size_t count) | |
239 | { | |
240 | int result; | |
241 | unsigned long tmp; | |
242 | ||
243 | result = kstrtoul(page, 0, &tmp); | |
244 | if (result) | |
245 | return result; | |
246 | ||
247 | *val = tmp; | |
248 | return count; | |
249 | } | |
250 | ||
251 | static ssize_t nullb_device_bool_attr_store(bool *val, const char *page, | |
252 | size_t count) | |
253 | { | |
254 | bool tmp; | |
255 | int result; | |
256 | ||
257 | result = kstrtobool(page, &tmp); | |
258 | if (result) | |
259 | return result; | |
260 | ||
261 | *val = tmp; | |
262 | return count; | |
263 | } | |
264 | ||
265 | /* The following macro should only be used with TYPE = {uint, ulong, bool}. */ | |
266 | #define NULLB_DEVICE_ATTR(NAME, TYPE) \ | |
267 | static ssize_t \ | |
268 | nullb_device_##NAME##_show(struct config_item *item, char *page) \ | |
269 | { \ | |
270 | return nullb_device_##TYPE##_attr_show( \ | |
271 | to_nullb_device(item)->NAME, page); \ | |
272 | } \ | |
273 | static ssize_t \ | |
274 | nullb_device_##NAME##_store(struct config_item *item, const char *page, \ | |
275 | size_t count) \ | |
276 | { \ | |
277 | if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \ | |
278 | return -EBUSY; \ | |
279 | return nullb_device_##TYPE##_attr_store( \ | |
280 | &to_nullb_device(item)->NAME, page, count); \ | |
281 | } \ | |
282 | CONFIGFS_ATTR(nullb_device_, NAME); | |
283 | ||
284 | NULLB_DEVICE_ATTR(size, ulong); | |
285 | NULLB_DEVICE_ATTR(completion_nsec, ulong); | |
286 | NULLB_DEVICE_ATTR(submit_queues, uint); | |
287 | NULLB_DEVICE_ATTR(home_node, uint); | |
288 | NULLB_DEVICE_ATTR(queue_mode, uint); | |
289 | NULLB_DEVICE_ATTR(blocksize, uint); | |
290 | NULLB_DEVICE_ATTR(irqmode, uint); | |
291 | NULLB_DEVICE_ATTR(hw_queue_depth, uint); | |
cedcafad | 292 | NULLB_DEVICE_ATTR(index, uint); |
3bf2bd20 SL |
293 | NULLB_DEVICE_ATTR(blocking, bool); |
294 | NULLB_DEVICE_ATTR(use_per_node_hctx, bool); | |
5bcd0e0c | 295 | NULLB_DEVICE_ATTR(memory_backed, bool); |
306eb6b4 | 296 | NULLB_DEVICE_ATTR(discard, bool); |
eff2c4f1 | 297 | NULLB_DEVICE_ATTR(mbps, uint); |
deb78b41 | 298 | NULLB_DEVICE_ATTR(cache_size, ulong); |
ca4b2a01 MB |
299 | NULLB_DEVICE_ATTR(zoned, bool); |
300 | NULLB_DEVICE_ATTR(zone_size, ulong); | |
ea2c18e1 | 301 | NULLB_DEVICE_ATTR(zone_nr_conv, uint); |
3bf2bd20 | 302 | |
cedcafad SL |
303 | static ssize_t nullb_device_power_show(struct config_item *item, char *page) |
304 | { | |
305 | return nullb_device_bool_attr_show(to_nullb_device(item)->power, page); | |
306 | } | |
307 | ||
308 | static ssize_t nullb_device_power_store(struct config_item *item, | |
309 | const char *page, size_t count) | |
310 | { | |
311 | struct nullb_device *dev = to_nullb_device(item); | |
312 | bool newp = false; | |
313 | ssize_t ret; | |
314 | ||
315 | ret = nullb_device_bool_attr_store(&newp, page, count); | |
316 | if (ret < 0) | |
317 | return ret; | |
318 | ||
319 | if (!dev->power && newp) { | |
320 | if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags)) | |
321 | return count; | |
322 | if (null_add_dev(dev)) { | |
323 | clear_bit(NULLB_DEV_FL_UP, &dev->flags); | |
324 | return -ENOMEM; | |
325 | } | |
326 | ||
327 | set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); | |
328 | dev->power = newp; | |
b3c30512 | 329 | } else if (dev->power && !newp) { |
cedcafad SL |
330 | mutex_lock(&lock); |
331 | dev->power = newp; | |
332 | null_del_dev(dev->nullb); | |
333 | mutex_unlock(&lock); | |
334 | clear_bit(NULLB_DEV_FL_UP, &dev->flags); | |
00a8cdb8 | 335 | clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); |
cedcafad SL |
336 | } |
337 | ||
338 | return count; | |
339 | } | |
340 | ||
341 | CONFIGFS_ATTR(nullb_device_, power); | |
342 | ||
2f54a613 SL |
343 | static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page) |
344 | { | |
345 | struct nullb_device *t_dev = to_nullb_device(item); | |
346 | ||
347 | return badblocks_show(&t_dev->badblocks, page, 0); | |
348 | } | |
349 | ||
350 | static ssize_t nullb_device_badblocks_store(struct config_item *item, | |
351 | const char *page, size_t count) | |
352 | { | |
353 | struct nullb_device *t_dev = to_nullb_device(item); | |
354 | char *orig, *buf, *tmp; | |
355 | u64 start, end; | |
356 | int ret; | |
357 | ||
358 | orig = kstrndup(page, count, GFP_KERNEL); | |
359 | if (!orig) | |
360 | return -ENOMEM; | |
361 | ||
362 | buf = strstrip(orig); | |
363 | ||
364 | ret = -EINVAL; | |
365 | if (buf[0] != '+' && buf[0] != '-') | |
366 | goto out; | |
367 | tmp = strchr(&buf[1], '-'); | |
368 | if (!tmp) | |
369 | goto out; | |
370 | *tmp = '\0'; | |
371 | ret = kstrtoull(buf + 1, 0, &start); | |
372 | if (ret) | |
373 | goto out; | |
374 | ret = kstrtoull(tmp + 1, 0, &end); | |
375 | if (ret) | |
376 | goto out; | |
377 | ret = -EINVAL; | |
378 | if (start > end) | |
379 | goto out; | |
380 | /* enable badblocks */ | |
381 | cmpxchg(&t_dev->badblocks.shift, -1, 0); | |
382 | if (buf[0] == '+') | |
383 | ret = badblocks_set(&t_dev->badblocks, start, | |
384 | end - start + 1, 1); | |
385 | else | |
386 | ret = badblocks_clear(&t_dev->badblocks, start, | |
387 | end - start + 1); | |
388 | if (ret == 0) | |
389 | ret = count; | |
390 | out: | |
391 | kfree(orig); | |
392 | return ret; | |
393 | } | |
394 | CONFIGFS_ATTR(nullb_device_, badblocks); | |
395 | ||
3bf2bd20 SL |
396 | static struct configfs_attribute *nullb_device_attrs[] = { |
397 | &nullb_device_attr_size, | |
398 | &nullb_device_attr_completion_nsec, | |
399 | &nullb_device_attr_submit_queues, | |
400 | &nullb_device_attr_home_node, | |
401 | &nullb_device_attr_queue_mode, | |
402 | &nullb_device_attr_blocksize, | |
403 | &nullb_device_attr_irqmode, | |
404 | &nullb_device_attr_hw_queue_depth, | |
cedcafad | 405 | &nullb_device_attr_index, |
3bf2bd20 SL |
406 | &nullb_device_attr_blocking, |
407 | &nullb_device_attr_use_per_node_hctx, | |
cedcafad | 408 | &nullb_device_attr_power, |
5bcd0e0c | 409 | &nullb_device_attr_memory_backed, |
306eb6b4 | 410 | &nullb_device_attr_discard, |
eff2c4f1 | 411 | &nullb_device_attr_mbps, |
deb78b41 | 412 | &nullb_device_attr_cache_size, |
2f54a613 | 413 | &nullb_device_attr_badblocks, |
ca4b2a01 MB |
414 | &nullb_device_attr_zoned, |
415 | &nullb_device_attr_zone_size, | |
ea2c18e1 | 416 | &nullb_device_attr_zone_nr_conv, |
3bf2bd20 SL |
417 | NULL, |
418 | }; | |
419 | ||
420 | static void nullb_device_release(struct config_item *item) | |
421 | { | |
5bcd0e0c SL |
422 | struct nullb_device *dev = to_nullb_device(item); |
423 | ||
deb78b41 | 424 | null_free_device_storage(dev, false); |
5bcd0e0c | 425 | null_free_dev(dev); |
3bf2bd20 SL |
426 | } |
427 | ||
428 | static struct configfs_item_operations nullb_device_ops = { | |
429 | .release = nullb_device_release, | |
430 | }; | |
431 | ||
e1919dff | 432 | static const struct config_item_type nullb_device_type = { |
3bf2bd20 SL |
433 | .ct_item_ops = &nullb_device_ops, |
434 | .ct_attrs = nullb_device_attrs, | |
435 | .ct_owner = THIS_MODULE, | |
436 | }; | |
437 | ||
438 | static struct | |
439 | config_item *nullb_group_make_item(struct config_group *group, const char *name) | |
440 | { | |
441 | struct nullb_device *dev; | |
442 | ||
443 | dev = null_alloc_dev(); | |
444 | if (!dev) | |
445 | return ERR_PTR(-ENOMEM); | |
446 | ||
447 | config_item_init_type_name(&dev->item, name, &nullb_device_type); | |
448 | ||
449 | return &dev->item; | |
450 | } | |
451 | ||
452 | static void | |
453 | nullb_group_drop_item(struct config_group *group, struct config_item *item) | |
454 | { | |
cedcafad SL |
455 | struct nullb_device *dev = to_nullb_device(item); |
456 | ||
457 | if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) { | |
458 | mutex_lock(&lock); | |
459 | dev->power = false; | |
460 | null_del_dev(dev->nullb); | |
461 | mutex_unlock(&lock); | |
462 | } | |
463 | ||
3bf2bd20 SL |
464 | config_item_put(item); |
465 | } | |
466 | ||
467 | static ssize_t memb_group_features_show(struct config_item *item, char *page) | |
468 | { | |
ca4b2a01 | 469 | return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n"); |
3bf2bd20 SL |
470 | } |
471 | ||
472 | CONFIGFS_ATTR_RO(memb_group_, features); | |
473 | ||
474 | static struct configfs_attribute *nullb_group_attrs[] = { | |
475 | &memb_group_attr_features, | |
476 | NULL, | |
477 | }; | |
478 | ||
479 | static struct configfs_group_operations nullb_group_ops = { | |
480 | .make_item = nullb_group_make_item, | |
481 | .drop_item = nullb_group_drop_item, | |
482 | }; | |
483 | ||
e1919dff | 484 | static const struct config_item_type nullb_group_type = { |
3bf2bd20 SL |
485 | .ct_group_ops = &nullb_group_ops, |
486 | .ct_attrs = nullb_group_attrs, | |
487 | .ct_owner = THIS_MODULE, | |
488 | }; | |
489 | ||
490 | static struct configfs_subsystem nullb_subsys = { | |
491 | .su_group = { | |
492 | .cg_item = { | |
493 | .ci_namebuf = "nullb", | |
494 | .ci_type = &nullb_group_type, | |
495 | }, | |
496 | }, | |
497 | }; | |
498 | ||
deb78b41 SL |
499 | static inline int null_cache_active(struct nullb *nullb) |
500 | { | |
501 | return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); | |
502 | } | |
503 | ||
2984c868 SL |
504 | static struct nullb_device *null_alloc_dev(void) |
505 | { | |
506 | struct nullb_device *dev; | |
507 | ||
508 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | |
509 | if (!dev) | |
510 | return NULL; | |
5bcd0e0c | 511 | INIT_RADIX_TREE(&dev->data, GFP_ATOMIC); |
deb78b41 | 512 | INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC); |
2f54a613 SL |
513 | if (badblocks_init(&dev->badblocks, 0)) { |
514 | kfree(dev); | |
515 | return NULL; | |
516 | } | |
517 | ||
2984c868 SL |
518 | dev->size = g_gb * 1024; |
519 | dev->completion_nsec = g_completion_nsec; | |
520 | dev->submit_queues = g_submit_queues; | |
521 | dev->home_node = g_home_node; | |
522 | dev->queue_mode = g_queue_mode; | |
523 | dev->blocksize = g_bs; | |
524 | dev->irqmode = g_irqmode; | |
525 | dev->hw_queue_depth = g_hw_queue_depth; | |
2984c868 SL |
526 | dev->blocking = g_blocking; |
527 | dev->use_per_node_hctx = g_use_per_node_hctx; | |
ca4b2a01 MB |
528 | dev->zoned = g_zoned; |
529 | dev->zone_size = g_zone_size; | |
ea2c18e1 | 530 | dev->zone_nr_conv = g_zone_nr_conv; |
2984c868 SL |
531 | return dev; |
532 | } | |
533 | ||
534 | static void null_free_dev(struct nullb_device *dev) | |
535 | { | |
1addb798 DD |
536 | if (!dev) |
537 | return; | |
538 | ||
ca4b2a01 | 539 | null_zone_exit(dev); |
1addb798 | 540 | badblocks_exit(&dev->badblocks); |
2984c868 SL |
541 | kfree(dev); |
542 | } | |
543 | ||
f2298c04 JA |
544 | static void put_tag(struct nullb_queue *nq, unsigned int tag) |
545 | { | |
546 | clear_bit_unlock(tag, nq->tag_map); | |
547 | ||
548 | if (waitqueue_active(&nq->wait)) | |
549 | wake_up(&nq->wait); | |
550 | } | |
551 | ||
552 | static unsigned int get_tag(struct nullb_queue *nq) | |
553 | { | |
554 | unsigned int tag; | |
555 | ||
556 | do { | |
557 | tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); | |
558 | if (tag >= nq->queue_depth) | |
559 | return -1U; | |
560 | } while (test_and_set_bit_lock(tag, nq->tag_map)); | |
561 | ||
562 | return tag; | |
563 | } | |
564 | ||
565 | static void free_cmd(struct nullb_cmd *cmd) | |
566 | { | |
567 | put_tag(cmd->nq, cmd->tag); | |
568 | } | |
569 | ||
3c395a96 PV |
570 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer); |
571 | ||
f2298c04 JA |
572 | static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) |
573 | { | |
574 | struct nullb_cmd *cmd; | |
575 | unsigned int tag; | |
576 | ||
577 | tag = get_tag(nq); | |
578 | if (tag != -1U) { | |
579 | cmd = &nq->cmds[tag]; | |
580 | cmd->tag = tag; | |
581 | cmd->nq = nq; | |
2984c868 | 582 | if (nq->dev->irqmode == NULL_IRQ_TIMER) { |
3c395a96 PV |
583 | hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, |
584 | HRTIMER_MODE_REL); | |
585 | cmd->timer.function = null_cmd_timer_expired; | |
586 | } | |
f2298c04 JA |
587 | return cmd; |
588 | } | |
589 | ||
590 | return NULL; | |
591 | } | |
592 | ||
593 | static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) | |
594 | { | |
595 | struct nullb_cmd *cmd; | |
596 | DEFINE_WAIT(wait); | |
597 | ||
598 | cmd = __alloc_cmd(nq); | |
599 | if (cmd || !can_wait) | |
600 | return cmd; | |
601 | ||
602 | do { | |
603 | prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); | |
604 | cmd = __alloc_cmd(nq); | |
605 | if (cmd) | |
606 | break; | |
607 | ||
608 | io_schedule(); | |
609 | } while (1); | |
610 | ||
611 | finish_wait(&nq->wait, &wait); | |
612 | return cmd; | |
613 | } | |
614 | ||
615 | static void end_cmd(struct nullb_cmd *cmd) | |
616 | { | |
2984c868 | 617 | int queue_mode = cmd->nq->dev->queue_mode; |
cf8ecc5a | 618 | |
ce2c350b CH |
619 | switch (queue_mode) { |
620 | case NULL_Q_MQ: | |
5bcd0e0c | 621 | blk_mq_end_request(cmd->rq, cmd->error); |
ce2c350b | 622 | return; |
ce2c350b | 623 | case NULL_Q_BIO: |
5bcd0e0c | 624 | cmd->bio->bi_status = cmd->error; |
4246a0b6 | 625 | bio_endio(cmd->bio); |
48cc661e | 626 | break; |
ce2c350b | 627 | } |
f2298c04 | 628 | |
48cc661e | 629 | free_cmd(cmd); |
cf8ecc5a AA |
630 | } |
631 | ||
632 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) | |
633 | { | |
634 | end_cmd(container_of(timer, struct nullb_cmd, timer)); | |
f2298c04 JA |
635 | |
636 | return HRTIMER_NORESTART; | |
637 | } | |
638 | ||
639 | static void null_cmd_end_timer(struct nullb_cmd *cmd) | |
640 | { | |
2984c868 | 641 | ktime_t kt = cmd->nq->dev->completion_nsec; |
f2298c04 | 642 | |
3c395a96 | 643 | hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL); |
f2298c04 JA |
644 | } |
645 | ||
49f66136 | 646 | static void null_complete_rq(struct request *rq) |
f2298c04 | 647 | { |
49f66136 | 648 | end_cmd(blk_mq_rq_to_pdu(rq)); |
f2298c04 JA |
649 | } |
650 | ||
5bcd0e0c SL |
651 | static struct nullb_page *null_alloc_page(gfp_t gfp_flags) |
652 | { | |
653 | struct nullb_page *t_page; | |
654 | ||
655 | t_page = kmalloc(sizeof(struct nullb_page), gfp_flags); | |
656 | if (!t_page) | |
657 | goto out; | |
658 | ||
659 | t_page->page = alloc_pages(gfp_flags, 0); | |
660 | if (!t_page->page) | |
661 | goto out_freepage; | |
662 | ||
66231ad3 | 663 | memset(t_page->bitmap, 0, sizeof(t_page->bitmap)); |
5bcd0e0c SL |
664 | return t_page; |
665 | out_freepage: | |
666 | kfree(t_page); | |
667 | out: | |
668 | return NULL; | |
669 | } | |
670 | ||
671 | static void null_free_page(struct nullb_page *t_page) | |
672 | { | |
66231ad3 ML |
673 | __set_bit(NULLB_PAGE_FREE, t_page->bitmap); |
674 | if (test_bit(NULLB_PAGE_LOCK, t_page->bitmap)) | |
deb78b41 | 675 | return; |
5bcd0e0c SL |
676 | __free_page(t_page->page); |
677 | kfree(t_page); | |
678 | } | |
679 | ||
66231ad3 ML |
680 | static bool null_page_empty(struct nullb_page *page) |
681 | { | |
682 | int size = MAP_SZ - 2; | |
683 | ||
684 | return find_first_bit(page->bitmap, size) == size; | |
685 | } | |
686 | ||
deb78b41 SL |
687 | static void null_free_sector(struct nullb *nullb, sector_t sector, |
688 | bool is_cache) | |
5bcd0e0c SL |
689 | { |
690 | unsigned int sector_bit; | |
691 | u64 idx; | |
692 | struct nullb_page *t_page, *ret; | |
693 | struct radix_tree_root *root; | |
694 | ||
deb78b41 | 695 | root = is_cache ? &nullb->dev->cache : &nullb->dev->data; |
5bcd0e0c SL |
696 | idx = sector >> PAGE_SECTORS_SHIFT; |
697 | sector_bit = (sector & SECTOR_MASK); | |
698 | ||
699 | t_page = radix_tree_lookup(root, idx); | |
700 | if (t_page) { | |
66231ad3 | 701 | __clear_bit(sector_bit, t_page->bitmap); |
5bcd0e0c | 702 | |
66231ad3 | 703 | if (null_page_empty(t_page)) { |
5bcd0e0c SL |
704 | ret = radix_tree_delete_item(root, idx, t_page); |
705 | WARN_ON(ret != t_page); | |
706 | null_free_page(ret); | |
deb78b41 SL |
707 | if (is_cache) |
708 | nullb->dev->curr_cache -= PAGE_SIZE; | |
5bcd0e0c SL |
709 | } |
710 | } | |
711 | } | |
712 | ||
713 | static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx, | |
deb78b41 | 714 | struct nullb_page *t_page, bool is_cache) |
5bcd0e0c SL |
715 | { |
716 | struct radix_tree_root *root; | |
717 | ||
deb78b41 | 718 | root = is_cache ? &nullb->dev->cache : &nullb->dev->data; |
5bcd0e0c SL |
719 | |
720 | if (radix_tree_insert(root, idx, t_page)) { | |
721 | null_free_page(t_page); | |
722 | t_page = radix_tree_lookup(root, idx); | |
723 | WARN_ON(!t_page || t_page->page->index != idx); | |
deb78b41 SL |
724 | } else if (is_cache) |
725 | nullb->dev->curr_cache += PAGE_SIZE; | |
5bcd0e0c SL |
726 | |
727 | return t_page; | |
728 | } | |
729 | ||
deb78b41 | 730 | static void null_free_device_storage(struct nullb_device *dev, bool is_cache) |
5bcd0e0c SL |
731 | { |
732 | unsigned long pos = 0; | |
733 | int nr_pages; | |
734 | struct nullb_page *ret, *t_pages[FREE_BATCH]; | |
735 | struct radix_tree_root *root; | |
736 | ||
deb78b41 | 737 | root = is_cache ? &dev->cache : &dev->data; |
5bcd0e0c SL |
738 | |
739 | do { | |
740 | int i; | |
741 | ||
742 | nr_pages = radix_tree_gang_lookup(root, | |
743 | (void **)t_pages, pos, FREE_BATCH); | |
744 | ||
745 | for (i = 0; i < nr_pages; i++) { | |
746 | pos = t_pages[i]->page->index; | |
747 | ret = radix_tree_delete_item(root, pos, t_pages[i]); | |
748 | WARN_ON(ret != t_pages[i]); | |
749 | null_free_page(ret); | |
750 | } | |
751 | ||
752 | pos++; | |
753 | } while (nr_pages == FREE_BATCH); | |
deb78b41 SL |
754 | |
755 | if (is_cache) | |
756 | dev->curr_cache = 0; | |
5bcd0e0c SL |
757 | } |
758 | ||
deb78b41 SL |
759 | static struct nullb_page *__null_lookup_page(struct nullb *nullb, |
760 | sector_t sector, bool for_write, bool is_cache) | |
5bcd0e0c SL |
761 | { |
762 | unsigned int sector_bit; | |
763 | u64 idx; | |
764 | struct nullb_page *t_page; | |
deb78b41 | 765 | struct radix_tree_root *root; |
5bcd0e0c SL |
766 | |
767 | idx = sector >> PAGE_SECTORS_SHIFT; | |
768 | sector_bit = (sector & SECTOR_MASK); | |
769 | ||
deb78b41 SL |
770 | root = is_cache ? &nullb->dev->cache : &nullb->dev->data; |
771 | t_page = radix_tree_lookup(root, idx); | |
5bcd0e0c SL |
772 | WARN_ON(t_page && t_page->page->index != idx); |
773 | ||
66231ad3 | 774 | if (t_page && (for_write || test_bit(sector_bit, t_page->bitmap))) |
5bcd0e0c SL |
775 | return t_page; |
776 | ||
777 | return NULL; | |
778 | } | |
779 | ||
deb78b41 SL |
780 | static struct nullb_page *null_lookup_page(struct nullb *nullb, |
781 | sector_t sector, bool for_write, bool ignore_cache) | |
782 | { | |
783 | struct nullb_page *page = NULL; | |
784 | ||
785 | if (!ignore_cache) | |
786 | page = __null_lookup_page(nullb, sector, for_write, true); | |
787 | if (page) | |
788 | return page; | |
789 | return __null_lookup_page(nullb, sector, for_write, false); | |
790 | } | |
791 | ||
5bcd0e0c | 792 | static struct nullb_page *null_insert_page(struct nullb *nullb, |
61884de0 JA |
793 | sector_t sector, bool ignore_cache) |
794 | __releases(&nullb->lock) | |
795 | __acquires(&nullb->lock) | |
5bcd0e0c SL |
796 | { |
797 | u64 idx; | |
798 | struct nullb_page *t_page; | |
799 | ||
deb78b41 | 800 | t_page = null_lookup_page(nullb, sector, true, ignore_cache); |
5bcd0e0c SL |
801 | if (t_page) |
802 | return t_page; | |
803 | ||
804 | spin_unlock_irq(&nullb->lock); | |
805 | ||
806 | t_page = null_alloc_page(GFP_NOIO); | |
807 | if (!t_page) | |
808 | goto out_lock; | |
809 | ||
810 | if (radix_tree_preload(GFP_NOIO)) | |
811 | goto out_freepage; | |
812 | ||
813 | spin_lock_irq(&nullb->lock); | |
814 | idx = sector >> PAGE_SECTORS_SHIFT; | |
815 | t_page->page->index = idx; | |
deb78b41 | 816 | t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache); |
5bcd0e0c SL |
817 | radix_tree_preload_end(); |
818 | ||
819 | return t_page; | |
820 | out_freepage: | |
821 | null_free_page(t_page); | |
822 | out_lock: | |
823 | spin_lock_irq(&nullb->lock); | |
deb78b41 SL |
824 | return null_lookup_page(nullb, sector, true, ignore_cache); |
825 | } | |
826 | ||
827 | static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page) | |
828 | { | |
829 | int i; | |
830 | unsigned int offset; | |
831 | u64 idx; | |
832 | struct nullb_page *t_page, *ret; | |
833 | void *dst, *src; | |
834 | ||
835 | idx = c_page->page->index; | |
836 | ||
837 | t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true); | |
838 | ||
66231ad3 ML |
839 | __clear_bit(NULLB_PAGE_LOCK, c_page->bitmap); |
840 | if (test_bit(NULLB_PAGE_FREE, c_page->bitmap)) { | |
deb78b41 | 841 | null_free_page(c_page); |
66231ad3 | 842 | if (t_page && null_page_empty(t_page)) { |
deb78b41 SL |
843 | ret = radix_tree_delete_item(&nullb->dev->data, |
844 | idx, t_page); | |
845 | null_free_page(t_page); | |
846 | } | |
847 | return 0; | |
848 | } | |
849 | ||
850 | if (!t_page) | |
851 | return -ENOMEM; | |
852 | ||
853 | src = kmap_atomic(c_page->page); | |
854 | dst = kmap_atomic(t_page->page); | |
855 | ||
856 | for (i = 0; i < PAGE_SECTORS; | |
857 | i += (nullb->dev->blocksize >> SECTOR_SHIFT)) { | |
66231ad3 | 858 | if (test_bit(i, c_page->bitmap)) { |
deb78b41 SL |
859 | offset = (i << SECTOR_SHIFT); |
860 | memcpy(dst + offset, src + offset, | |
861 | nullb->dev->blocksize); | |
66231ad3 | 862 | __set_bit(i, t_page->bitmap); |
deb78b41 SL |
863 | } |
864 | } | |
865 | ||
866 | kunmap_atomic(dst); | |
867 | kunmap_atomic(src); | |
868 | ||
869 | ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page); | |
870 | null_free_page(ret); | |
871 | nullb->dev->curr_cache -= PAGE_SIZE; | |
872 | ||
873 | return 0; | |
874 | } | |
875 | ||
876 | static int null_make_cache_space(struct nullb *nullb, unsigned long n) | |
f2298c04 | 877 | { |
deb78b41 SL |
878 | int i, err, nr_pages; |
879 | struct nullb_page *c_pages[FREE_BATCH]; | |
880 | unsigned long flushed = 0, one_round; | |
881 | ||
882 | again: | |
883 | if ((nullb->dev->cache_size * 1024 * 1024) > | |
884 | nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0) | |
885 | return 0; | |
886 | ||
887 | nr_pages = radix_tree_gang_lookup(&nullb->dev->cache, | |
888 | (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH); | |
889 | /* | |
890 | * nullb_flush_cache_page could unlock before using the c_pages. To | |
891 | * avoid race, we don't allow page free | |
892 | */ | |
893 | for (i = 0; i < nr_pages; i++) { | |
894 | nullb->cache_flush_pos = c_pages[i]->page->index; | |
895 | /* | |
896 | * We found the page which is being flushed to disk by other | |
897 | * threads | |
898 | */ | |
66231ad3 | 899 | if (test_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap)) |
deb78b41 SL |
900 | c_pages[i] = NULL; |
901 | else | |
66231ad3 | 902 | __set_bit(NULLB_PAGE_LOCK, c_pages[i]->bitmap); |
deb78b41 SL |
903 | } |
904 | ||
905 | one_round = 0; | |
906 | for (i = 0; i < nr_pages; i++) { | |
907 | if (c_pages[i] == NULL) | |
908 | continue; | |
909 | err = null_flush_cache_page(nullb, c_pages[i]); | |
910 | if (err) | |
911 | return err; | |
912 | one_round++; | |
913 | } | |
914 | flushed += one_round << PAGE_SHIFT; | |
915 | ||
916 | if (n > flushed) { | |
917 | if (nr_pages == 0) | |
918 | nullb->cache_flush_pos = 0; | |
919 | if (one_round == 0) { | |
920 | /* give other threads a chance */ | |
921 | spin_unlock_irq(&nullb->lock); | |
922 | spin_lock_irq(&nullb->lock); | |
923 | } | |
924 | goto again; | |
925 | } | |
926 | return 0; | |
5bcd0e0c SL |
927 | } |
928 | ||
929 | static int copy_to_nullb(struct nullb *nullb, struct page *source, | |
deb78b41 | 930 | unsigned int off, sector_t sector, size_t n, bool is_fua) |
5bcd0e0c SL |
931 | { |
932 | size_t temp, count = 0; | |
933 | unsigned int offset; | |
934 | struct nullb_page *t_page; | |
935 | void *dst, *src; | |
936 | ||
937 | while (count < n) { | |
938 | temp = min_t(size_t, nullb->dev->blocksize, n - count); | |
939 | ||
deb78b41 SL |
940 | if (null_cache_active(nullb) && !is_fua) |
941 | null_make_cache_space(nullb, PAGE_SIZE); | |
942 | ||
5bcd0e0c | 943 | offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; |
deb78b41 SL |
944 | t_page = null_insert_page(nullb, sector, |
945 | !null_cache_active(nullb) || is_fua); | |
5bcd0e0c SL |
946 | if (!t_page) |
947 | return -ENOSPC; | |
948 | ||
949 | src = kmap_atomic(source); | |
950 | dst = kmap_atomic(t_page->page); | |
951 | memcpy(dst + offset, src + off + count, temp); | |
952 | kunmap_atomic(dst); | |
953 | kunmap_atomic(src); | |
954 | ||
66231ad3 | 955 | __set_bit(sector & SECTOR_MASK, t_page->bitmap); |
5bcd0e0c | 956 | |
deb78b41 SL |
957 | if (is_fua) |
958 | null_free_sector(nullb, sector, true); | |
959 | ||
5bcd0e0c SL |
960 | count += temp; |
961 | sector += temp >> SECTOR_SHIFT; | |
962 | } | |
963 | return 0; | |
964 | } | |
965 | ||
966 | static int copy_from_nullb(struct nullb *nullb, struct page *dest, | |
967 | unsigned int off, sector_t sector, size_t n) | |
968 | { | |
969 | size_t temp, count = 0; | |
970 | unsigned int offset; | |
971 | struct nullb_page *t_page; | |
972 | void *dst, *src; | |
973 | ||
974 | while (count < n) { | |
975 | temp = min_t(size_t, nullb->dev->blocksize, n - count); | |
976 | ||
977 | offset = (sector & SECTOR_MASK) << SECTOR_SHIFT; | |
deb78b41 SL |
978 | t_page = null_lookup_page(nullb, sector, false, |
979 | !null_cache_active(nullb)); | |
5bcd0e0c SL |
980 | |
981 | dst = kmap_atomic(dest); | |
982 | if (!t_page) { | |
983 | memset(dst + off + count, 0, temp); | |
984 | goto next; | |
985 | } | |
986 | src = kmap_atomic(t_page->page); | |
987 | memcpy(dst + off + count, src + offset, temp); | |
988 | kunmap_atomic(src); | |
989 | next: | |
990 | kunmap_atomic(dst); | |
991 | ||
992 | count += temp; | |
993 | sector += temp >> SECTOR_SHIFT; | |
994 | } | |
995 | return 0; | |
996 | } | |
997 | ||
306eb6b4 SL |
998 | static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n) |
999 | { | |
1000 | size_t temp; | |
1001 | ||
1002 | spin_lock_irq(&nullb->lock); | |
1003 | while (n > 0) { | |
1004 | temp = min_t(size_t, n, nullb->dev->blocksize); | |
deb78b41 SL |
1005 | null_free_sector(nullb, sector, false); |
1006 | if (null_cache_active(nullb)) | |
1007 | null_free_sector(nullb, sector, true); | |
306eb6b4 SL |
1008 | sector += temp >> SECTOR_SHIFT; |
1009 | n -= temp; | |
1010 | } | |
1011 | spin_unlock_irq(&nullb->lock); | |
1012 | } | |
1013 | ||
deb78b41 SL |
1014 | static int null_handle_flush(struct nullb *nullb) |
1015 | { | |
1016 | int err; | |
1017 | ||
1018 | if (!null_cache_active(nullb)) | |
1019 | return 0; | |
1020 | ||
1021 | spin_lock_irq(&nullb->lock); | |
1022 | while (true) { | |
1023 | err = null_make_cache_space(nullb, | |
1024 | nullb->dev->cache_size * 1024 * 1024); | |
1025 | if (err || nullb->dev->curr_cache == 0) | |
1026 | break; | |
1027 | } | |
1028 | ||
1029 | WARN_ON(!radix_tree_empty(&nullb->dev->cache)); | |
1030 | spin_unlock_irq(&nullb->lock); | |
1031 | return err; | |
1032 | } | |
1033 | ||
5bcd0e0c | 1034 | static int null_transfer(struct nullb *nullb, struct page *page, |
deb78b41 SL |
1035 | unsigned int len, unsigned int off, bool is_write, sector_t sector, |
1036 | bool is_fua) | |
f2298c04 | 1037 | { |
5bcd0e0c SL |
1038 | int err = 0; |
1039 | ||
1040 | if (!is_write) { | |
1041 | err = copy_from_nullb(nullb, page, off, sector, len); | |
1042 | flush_dcache_page(page); | |
1043 | } else { | |
1044 | flush_dcache_page(page); | |
deb78b41 | 1045 | err = copy_to_nullb(nullb, page, off, sector, len, is_fua); |
5bcd0e0c SL |
1046 | } |
1047 | ||
1048 | return err; | |
1049 | } | |
1050 | ||
1051 | static int null_handle_rq(struct nullb_cmd *cmd) | |
1052 | { | |
1053 | struct request *rq = cmd->rq; | |
1054 | struct nullb *nullb = cmd->nq->dev->nullb; | |
1055 | int err; | |
1056 | unsigned int len; | |
1057 | sector_t sector; | |
1058 | struct req_iterator iter; | |
1059 | struct bio_vec bvec; | |
1060 | ||
1061 | sector = blk_rq_pos(rq); | |
1062 | ||
306eb6b4 SL |
1063 | if (req_op(rq) == REQ_OP_DISCARD) { |
1064 | null_handle_discard(nullb, sector, blk_rq_bytes(rq)); | |
1065 | return 0; | |
1066 | } | |
1067 | ||
5bcd0e0c SL |
1068 | spin_lock_irq(&nullb->lock); |
1069 | rq_for_each_segment(bvec, rq, iter) { | |
1070 | len = bvec.bv_len; | |
1071 | err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, | |
deb78b41 SL |
1072 | op_is_write(req_op(rq)), sector, |
1073 | req_op(rq) & REQ_FUA); | |
5bcd0e0c SL |
1074 | if (err) { |
1075 | spin_unlock_irq(&nullb->lock); | |
1076 | return err; | |
1077 | } | |
1078 | sector += len >> SECTOR_SHIFT; | |
1079 | } | |
1080 | spin_unlock_irq(&nullb->lock); | |
1081 | ||
1082 | return 0; | |
1083 | } | |
1084 | ||
1085 | static int null_handle_bio(struct nullb_cmd *cmd) | |
1086 | { | |
1087 | struct bio *bio = cmd->bio; | |
1088 | struct nullb *nullb = cmd->nq->dev->nullb; | |
1089 | int err; | |
1090 | unsigned int len; | |
1091 | sector_t sector; | |
1092 | struct bio_vec bvec; | |
1093 | struct bvec_iter iter; | |
1094 | ||
1095 | sector = bio->bi_iter.bi_sector; | |
1096 | ||
306eb6b4 SL |
1097 | if (bio_op(bio) == REQ_OP_DISCARD) { |
1098 | null_handle_discard(nullb, sector, | |
1099 | bio_sectors(bio) << SECTOR_SHIFT); | |
1100 | return 0; | |
1101 | } | |
1102 | ||
5bcd0e0c SL |
1103 | spin_lock_irq(&nullb->lock); |
1104 | bio_for_each_segment(bvec, bio, iter) { | |
1105 | len = bvec.bv_len; | |
1106 | err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset, | |
deb78b41 | 1107 | op_is_write(bio_op(bio)), sector, |
bf7c7a04 | 1108 | bio->bi_opf & REQ_FUA); |
5bcd0e0c SL |
1109 | if (err) { |
1110 | spin_unlock_irq(&nullb->lock); | |
1111 | return err; | |
1112 | } | |
1113 | sector += len >> SECTOR_SHIFT; | |
1114 | } | |
1115 | spin_unlock_irq(&nullb->lock); | |
1116 | return 0; | |
1117 | } | |
1118 | ||
eff2c4f1 SL |
1119 | static void null_stop_queue(struct nullb *nullb) |
1120 | { | |
1121 | struct request_queue *q = nullb->q; | |
1122 | ||
1123 | if (nullb->dev->queue_mode == NULL_Q_MQ) | |
1124 | blk_mq_stop_hw_queues(q); | |
eff2c4f1 SL |
1125 | } |
1126 | ||
1127 | static void null_restart_queue_async(struct nullb *nullb) | |
1128 | { | |
1129 | struct request_queue *q = nullb->q; | |
eff2c4f1 SL |
1130 | |
1131 | if (nullb->dev->queue_mode == NULL_Q_MQ) | |
1132 | blk_mq_start_stopped_hw_queues(q, true); | |
eff2c4f1 SL |
1133 | } |
1134 | ||
5bcd0e0c SL |
1135 | static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) |
1136 | { | |
1137 | struct nullb_device *dev = cmd->nq->dev; | |
eff2c4f1 | 1138 | struct nullb *nullb = dev->nullb; |
5bcd0e0c SL |
1139 | int err = 0; |
1140 | ||
eff2c4f1 SL |
1141 | if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { |
1142 | struct request *rq = cmd->rq; | |
1143 | ||
1144 | if (!hrtimer_active(&nullb->bw_timer)) | |
1145 | hrtimer_restart(&nullb->bw_timer); | |
1146 | ||
1147 | if (atomic_long_sub_return(blk_rq_bytes(rq), | |
1148 | &nullb->cur_bytes) < 0) { | |
1149 | null_stop_queue(nullb); | |
1150 | /* race with timer */ | |
1151 | if (atomic_long_read(&nullb->cur_bytes) > 0) | |
1152 | null_restart_queue_async(nullb); | |
e50b1e32 JA |
1153 | /* requeue request */ |
1154 | return BLK_STS_DEV_RESOURCE; | |
eff2c4f1 SL |
1155 | } |
1156 | } | |
1157 | ||
2f54a613 SL |
1158 | if (nullb->dev->badblocks.shift != -1) { |
1159 | int bad_sectors; | |
1160 | sector_t sector, size, first_bad; | |
1161 | bool is_flush = true; | |
1162 | ||
1163 | if (dev->queue_mode == NULL_Q_BIO && | |
1164 | bio_op(cmd->bio) != REQ_OP_FLUSH) { | |
1165 | is_flush = false; | |
1166 | sector = cmd->bio->bi_iter.bi_sector; | |
1167 | size = bio_sectors(cmd->bio); | |
1168 | } | |
1169 | if (dev->queue_mode != NULL_Q_BIO && | |
1170 | req_op(cmd->rq) != REQ_OP_FLUSH) { | |
1171 | is_flush = false; | |
1172 | sector = blk_rq_pos(cmd->rq); | |
1173 | size = blk_rq_sectors(cmd->rq); | |
1174 | } | |
1175 | if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector, | |
1176 | size, &first_bad, &bad_sectors)) { | |
1177 | cmd->error = BLK_STS_IOERR; | |
1178 | goto out; | |
1179 | } | |
1180 | } | |
1181 | ||
5bcd0e0c | 1182 | if (dev->memory_backed) { |
deb78b41 SL |
1183 | if (dev->queue_mode == NULL_Q_BIO) { |
1184 | if (bio_op(cmd->bio) == REQ_OP_FLUSH) | |
1185 | err = null_handle_flush(nullb); | |
1186 | else | |
1187 | err = null_handle_bio(cmd); | |
1188 | } else { | |
1189 | if (req_op(cmd->rq) == REQ_OP_FLUSH) | |
1190 | err = null_handle_flush(nullb); | |
1191 | else | |
1192 | err = null_handle_rq(cmd); | |
1193 | } | |
5bcd0e0c SL |
1194 | } |
1195 | cmd->error = errno_to_blk_status(err); | |
ca4b2a01 MB |
1196 | |
1197 | if (!cmd->error && dev->zoned) { | |
b228ba1c JA |
1198 | sector_t sector; |
1199 | unsigned int nr_sectors; | |
1200 | int op; | |
1201 | ||
1202 | if (dev->queue_mode == NULL_Q_BIO) { | |
1203 | op = bio_op(cmd->bio); | |
1204 | sector = cmd->bio->bi_iter.bi_sector; | |
1205 | nr_sectors = cmd->bio->bi_iter.bi_size >> 9; | |
1206 | } else { | |
1207 | op = req_op(cmd->rq); | |
1208 | sector = blk_rq_pos(cmd->rq); | |
1209 | nr_sectors = blk_rq_sectors(cmd->rq); | |
1210 | } | |
1211 | ||
1212 | if (op == REQ_OP_WRITE) | |
1213 | null_zone_write(cmd, sector, nr_sectors); | |
1214 | else if (op == REQ_OP_ZONE_RESET) | |
1215 | null_zone_reset(cmd, sector); | |
ca4b2a01 | 1216 | } |
2f54a613 | 1217 | out: |
f2298c04 | 1218 | /* Complete IO by inline, softirq or timer */ |
5bcd0e0c | 1219 | switch (dev->irqmode) { |
f2298c04 | 1220 | case NULL_IRQ_SOFTIRQ: |
5bcd0e0c | 1221 | switch (dev->queue_mode) { |
ce2c350b | 1222 | case NULL_Q_MQ: |
08e0029a | 1223 | blk_mq_complete_request(cmd->rq); |
ce2c350b | 1224 | break; |
ce2c350b CH |
1225 | case NULL_Q_BIO: |
1226 | /* | |
1227 | * XXX: no proper submitting cpu information available. | |
1228 | */ | |
1229 | end_cmd(cmd); | |
1230 | break; | |
1231 | } | |
1232 | break; | |
1233 | case NULL_IRQ_NONE: | |
f2298c04 | 1234 | end_cmd(cmd); |
f2298c04 JA |
1235 | break; |
1236 | case NULL_IRQ_TIMER: | |
1237 | null_cmd_end_timer(cmd); | |
1238 | break; | |
1239 | } | |
5bcd0e0c | 1240 | return BLK_STS_OK; |
f2298c04 JA |
1241 | } |
1242 | ||
eff2c4f1 SL |
1243 | static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer) |
1244 | { | |
1245 | struct nullb *nullb = container_of(timer, struct nullb, bw_timer); | |
1246 | ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL); | |
1247 | unsigned int mbps = nullb->dev->mbps; | |
1248 | ||
1249 | if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps)) | |
1250 | return HRTIMER_NORESTART; | |
1251 | ||
1252 | atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps)); | |
1253 | null_restart_queue_async(nullb); | |
1254 | ||
1255 | hrtimer_forward_now(&nullb->bw_timer, timer_interval); | |
1256 | ||
1257 | return HRTIMER_RESTART; | |
1258 | } | |
1259 | ||
1260 | static void nullb_setup_bwtimer(struct nullb *nullb) | |
1261 | { | |
1262 | ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL); | |
1263 | ||
1264 | hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
1265 | nullb->bw_timer.function = nullb_bwtimer_fn; | |
1266 | atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps)); | |
1267 | hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL); | |
f2298c04 JA |
1268 | } |
1269 | ||
1270 | static struct nullb_queue *nullb_to_queue(struct nullb *nullb) | |
1271 | { | |
1272 | int index = 0; | |
1273 | ||
1274 | if (nullb->nr_queues != 1) | |
1275 | index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); | |
1276 | ||
1277 | return &nullb->queues[index]; | |
1278 | } | |
1279 | ||
dece1635 | 1280 | static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) |
f2298c04 JA |
1281 | { |
1282 | struct nullb *nullb = q->queuedata; | |
1283 | struct nullb_queue *nq = nullb_to_queue(nullb); | |
1284 | struct nullb_cmd *cmd; | |
1285 | ||
1286 | cmd = alloc_cmd(nq, 1); | |
1287 | cmd->bio = bio; | |
1288 | ||
1289 | null_handle_cmd(cmd); | |
dece1635 | 1290 | return BLK_QC_T_NONE; |
f2298c04 JA |
1291 | } |
1292 | ||
93b57046 JA |
1293 | static bool should_timeout_request(struct request *rq) |
1294 | { | |
33f782c4 | 1295 | #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION |
93b57046 JA |
1296 | if (g_timeout_str[0]) |
1297 | return should_fail(&null_timeout_attr, 1); | |
33f782c4 | 1298 | #endif |
24941b90 JA |
1299 | return false; |
1300 | } | |
93b57046 | 1301 | |
24941b90 JA |
1302 | static bool should_requeue_request(struct request *rq) |
1303 | { | |
1304 | #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION | |
1305 | if (g_requeue_str[0]) | |
1306 | return should_fail(&null_requeue_attr, 1); | |
1307 | #endif | |
93b57046 JA |
1308 | return false; |
1309 | } | |
1310 | ||
5448aca4 JA |
1311 | static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) |
1312 | { | |
1313 | pr_info("null: rq %p timed out\n", rq); | |
0df0bb08 CH |
1314 | blk_mq_complete_request(rq); |
1315 | return BLK_EH_DONE; | |
5448aca4 JA |
1316 | } |
1317 | ||
fc17b653 | 1318 | static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, |
74c45052 | 1319 | const struct blk_mq_queue_data *bd) |
f2298c04 | 1320 | { |
74c45052 | 1321 | struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); |
2984c868 | 1322 | struct nullb_queue *nq = hctx->driver_data; |
f2298c04 | 1323 | |
db5bcf87 JA |
1324 | might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); |
1325 | ||
2984c868 | 1326 | if (nq->dev->irqmode == NULL_IRQ_TIMER) { |
3c395a96 PV |
1327 | hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1328 | cmd->timer.function = null_cmd_timer_expired; | |
1329 | } | |
74c45052 | 1330 | cmd->rq = bd->rq; |
2984c868 | 1331 | cmd->nq = nq; |
f2298c04 | 1332 | |
74c45052 | 1333 | blk_mq_start_request(bd->rq); |
e2490073 | 1334 | |
24941b90 JA |
1335 | if (should_requeue_request(bd->rq)) { |
1336 | /* | |
1337 | * Alternate between hitting the core BUSY path, and the | |
1338 | * driver driven requeue path | |
1339 | */ | |
1340 | nq->requeue_selection++; | |
1341 | if (nq->requeue_selection & 1) | |
1342 | return BLK_STS_RESOURCE; | |
1343 | else { | |
1344 | blk_mq_requeue_request(bd->rq, true); | |
1345 | return BLK_STS_OK; | |
1346 | } | |
1347 | } | |
1348 | if (should_timeout_request(bd->rq)) | |
1349 | return BLK_STS_OK; | |
93b57046 | 1350 | |
24941b90 | 1351 | return null_handle_cmd(cmd); |
f2298c04 JA |
1352 | } |
1353 | ||
f363b089 | 1354 | static const struct blk_mq_ops null_mq_ops = { |
f2298c04 | 1355 | .queue_rq = null_queue_rq, |
49f66136 | 1356 | .complete = null_complete_rq, |
5448aca4 | 1357 | .timeout = null_timeout_rq, |
f2298c04 JA |
1358 | }; |
1359 | ||
de65d2d2 MB |
1360 | static void cleanup_queue(struct nullb_queue *nq) |
1361 | { | |
1362 | kfree(nq->tag_map); | |
1363 | kfree(nq->cmds); | |
1364 | } | |
1365 | ||
1366 | static void cleanup_queues(struct nullb *nullb) | |
1367 | { | |
1368 | int i; | |
1369 | ||
1370 | for (i = 0; i < nullb->nr_queues; i++) | |
1371 | cleanup_queue(&nullb->queues[i]); | |
1372 | ||
1373 | kfree(nullb->queues); | |
1374 | } | |
1375 | ||
9ae2d0aa MB |
1376 | static void null_del_dev(struct nullb *nullb) |
1377 | { | |
2984c868 SL |
1378 | struct nullb_device *dev = nullb->dev; |
1379 | ||
94bc02e3 SL |
1380 | ida_simple_remove(&nullb_indexes, nullb->index); |
1381 | ||
9ae2d0aa MB |
1382 | list_del_init(&nullb->list); |
1383 | ||
74ede5af | 1384 | del_gendisk(nullb->disk); |
eff2c4f1 SL |
1385 | |
1386 | if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) { | |
1387 | hrtimer_cancel(&nullb->bw_timer); | |
1388 | atomic_long_set(&nullb->cur_bytes, LONG_MAX); | |
1389 | null_restart_queue_async(nullb); | |
1390 | } | |
1391 | ||
9ae2d0aa | 1392 | blk_cleanup_queue(nullb->q); |
2984c868 SL |
1393 | if (dev->queue_mode == NULL_Q_MQ && |
1394 | nullb->tag_set == &nullb->__tag_set) | |
82f402fe | 1395 | blk_mq_free_tag_set(nullb->tag_set); |
74ede5af | 1396 | put_disk(nullb->disk); |
9ae2d0aa | 1397 | cleanup_queues(nullb); |
deb78b41 SL |
1398 | if (null_cache_active(nullb)) |
1399 | null_free_device_storage(nullb->dev, true); | |
9ae2d0aa | 1400 | kfree(nullb); |
2984c868 | 1401 | dev->nullb = NULL; |
9ae2d0aa MB |
1402 | } |
1403 | ||
306eb6b4 SL |
1404 | static void null_config_discard(struct nullb *nullb) |
1405 | { | |
1406 | if (nullb->dev->discard == false) | |
1407 | return; | |
1408 | nullb->q->limits.discard_granularity = nullb->dev->blocksize; | |
1409 | nullb->q->limits.discard_alignment = nullb->dev->blocksize; | |
1410 | blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); | |
8b904b5b | 1411 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q); |
9ae2d0aa MB |
1412 | } |
1413 | ||
f2298c04 JA |
1414 | static int null_open(struct block_device *bdev, fmode_t mode) |
1415 | { | |
1416 | return 0; | |
1417 | } | |
1418 | ||
1419 | static void null_release(struct gendisk *disk, fmode_t mode) | |
1420 | { | |
1421 | } | |
1422 | ||
1423 | static const struct block_device_operations null_fops = { | |
1424 | .owner = THIS_MODULE, | |
1425 | .open = null_open, | |
1426 | .release = null_release, | |
e76239a3 | 1427 | .report_zones = null_zone_report, |
f2298c04 JA |
1428 | }; |
1429 | ||
82f402fe JA |
1430 | static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) |
1431 | { | |
1432 | BUG_ON(!nullb); | |
1433 | BUG_ON(!nq); | |
1434 | ||
1435 | init_waitqueue_head(&nq->wait); | |
1436 | nq->queue_depth = nullb->queue_depth; | |
2984c868 | 1437 | nq->dev = nullb->dev; |
82f402fe JA |
1438 | } |
1439 | ||
1440 | static void null_init_queues(struct nullb *nullb) | |
1441 | { | |
1442 | struct request_queue *q = nullb->q; | |
1443 | struct blk_mq_hw_ctx *hctx; | |
1444 | struct nullb_queue *nq; | |
1445 | int i; | |
1446 | ||
1447 | queue_for_each_hw_ctx(q, hctx, i) { | |
1448 | if (!hctx->nr_ctx || !hctx->tags) | |
1449 | continue; | |
1450 | nq = &nullb->queues[i]; | |
1451 | hctx->driver_data = nq; | |
1452 | null_init_queue(nullb, nq); | |
1453 | nullb->nr_queues++; | |
1454 | } | |
1455 | } | |
1456 | ||
f2298c04 JA |
1457 | static int setup_commands(struct nullb_queue *nq) |
1458 | { | |
1459 | struct nullb_cmd *cmd; | |
1460 | int i, tag_size; | |
1461 | ||
6396bb22 | 1462 | nq->cmds = kcalloc(nq->queue_depth, sizeof(*cmd), GFP_KERNEL); |
f2298c04 | 1463 | if (!nq->cmds) |
2d263a78 | 1464 | return -ENOMEM; |
f2298c04 JA |
1465 | |
1466 | tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; | |
6396bb22 | 1467 | nq->tag_map = kcalloc(tag_size, sizeof(unsigned long), GFP_KERNEL); |
f2298c04 JA |
1468 | if (!nq->tag_map) { |
1469 | kfree(nq->cmds); | |
2d263a78 | 1470 | return -ENOMEM; |
f2298c04 JA |
1471 | } |
1472 | ||
1473 | for (i = 0; i < nq->queue_depth; i++) { | |
1474 | cmd = &nq->cmds[i]; | |
1475 | INIT_LIST_HEAD(&cmd->list); | |
1476 | cmd->ll_list.next = NULL; | |
1477 | cmd->tag = -1U; | |
1478 | } | |
1479 | ||
1480 | return 0; | |
1481 | } | |
1482 | ||
f2298c04 JA |
1483 | static int setup_queues(struct nullb *nullb) |
1484 | { | |
6396bb22 KC |
1485 | nullb->queues = kcalloc(nullb->dev->submit_queues, |
1486 | sizeof(struct nullb_queue), | |
1487 | GFP_KERNEL); | |
f2298c04 | 1488 | if (!nullb->queues) |
2d263a78 | 1489 | return -ENOMEM; |
f2298c04 JA |
1490 | |
1491 | nullb->nr_queues = 0; | |
2984c868 | 1492 | nullb->queue_depth = nullb->dev->hw_queue_depth; |
f2298c04 | 1493 | |
2d263a78 MB |
1494 | return 0; |
1495 | } | |
1496 | ||
1497 | static int init_driver_queues(struct nullb *nullb) | |
1498 | { | |
1499 | struct nullb_queue *nq; | |
1500 | int i, ret = 0; | |
f2298c04 | 1501 | |
2984c868 | 1502 | for (i = 0; i < nullb->dev->submit_queues; i++) { |
f2298c04 | 1503 | nq = &nullb->queues[i]; |
2d263a78 MB |
1504 | |
1505 | null_init_queue(nullb, nq); | |
1506 | ||
1507 | ret = setup_commands(nq); | |
1508 | if (ret) | |
31f9690e | 1509 | return ret; |
f2298c04 JA |
1510 | nullb->nr_queues++; |
1511 | } | |
2d263a78 | 1512 | return 0; |
f2298c04 JA |
1513 | } |
1514 | ||
9ae2d0aa | 1515 | static int null_gendisk_register(struct nullb *nullb) |
f2298c04 JA |
1516 | { |
1517 | struct gendisk *disk; | |
f2298c04 | 1518 | sector_t size; |
9ae2d0aa | 1519 | |
2984c868 | 1520 | disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node); |
9ae2d0aa MB |
1521 | if (!disk) |
1522 | return -ENOMEM; | |
2984c868 | 1523 | size = (sector_t)nullb->dev->size * 1024 * 1024ULL; |
9ae2d0aa MB |
1524 | set_capacity(disk, size >> 9); |
1525 | ||
1526 | disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; | |
1527 | disk->major = null_major; | |
1528 | disk->first_minor = nullb->index; | |
1529 | disk->fops = &null_fops; | |
1530 | disk->private_data = nullb; | |
1531 | disk->queue = nullb->q; | |
1532 | strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); | |
1533 | ||
bf505456 DLM |
1534 | if (nullb->dev->zoned) { |
1535 | int ret = blk_revalidate_disk_zones(disk); | |
1536 | ||
1537 | if (ret != 0) | |
1538 | return ret; | |
1539 | } | |
1540 | ||
9ae2d0aa MB |
1541 | add_disk(disk); |
1542 | return 0; | |
1543 | } | |
1544 | ||
2984c868 | 1545 | static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set) |
82f402fe JA |
1546 | { |
1547 | set->ops = &null_mq_ops; | |
2984c868 SL |
1548 | set->nr_hw_queues = nullb ? nullb->dev->submit_queues : |
1549 | g_submit_queues; | |
1550 | set->queue_depth = nullb ? nullb->dev->hw_queue_depth : | |
1551 | g_hw_queue_depth; | |
1552 | set->numa_node = nullb ? nullb->dev->home_node : g_home_node; | |
82f402fe JA |
1553 | set->cmd_size = sizeof(struct nullb_cmd); |
1554 | set->flags = BLK_MQ_F_SHOULD_MERGE; | |
b3cffc38 | 1555 | if (g_no_sched) |
1556 | set->flags |= BLK_MQ_F_NO_SCHED; | |
82f402fe JA |
1557 | set->driver_data = NULL; |
1558 | ||
0d06a42f | 1559 | if ((nullb && nullb->dev->blocking) || g_blocking) |
82f402fe JA |
1560 | set->flags |= BLK_MQ_F_BLOCKING; |
1561 | ||
1562 | return blk_mq_alloc_tag_set(set); | |
1563 | } | |
1564 | ||
cedcafad SL |
1565 | static void null_validate_conf(struct nullb_device *dev) |
1566 | { | |
1567 | dev->blocksize = round_down(dev->blocksize, 512); | |
1568 | dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096); | |
cedcafad SL |
1569 | |
1570 | if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) { | |
1571 | if (dev->submit_queues != nr_online_nodes) | |
1572 | dev->submit_queues = nr_online_nodes; | |
1573 | } else if (dev->submit_queues > nr_cpu_ids) | |
1574 | dev->submit_queues = nr_cpu_ids; | |
1575 | else if (dev->submit_queues == 0) | |
1576 | dev->submit_queues = 1; | |
1577 | ||
1578 | dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ); | |
1579 | dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER); | |
5bcd0e0c SL |
1580 | |
1581 | /* Do memory allocation, so set blocking */ | |
1582 | if (dev->memory_backed) | |
1583 | dev->blocking = true; | |
deb78b41 SL |
1584 | else /* cache is meaningless */ |
1585 | dev->cache_size = 0; | |
1586 | dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024, | |
1587 | dev->cache_size); | |
eff2c4f1 SL |
1588 | dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps); |
1589 | /* can not stop a queue */ | |
1590 | if (dev->queue_mode == NULL_Q_BIO) | |
1591 | dev->mbps = 0; | |
cedcafad SL |
1592 | } |
1593 | ||
33f782c4 | 1594 | #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION |
24941b90 JA |
1595 | static bool __null_setup_fault(struct fault_attr *attr, char *str) |
1596 | { | |
1597 | if (!str[0]) | |
93b57046 JA |
1598 | return true; |
1599 | ||
24941b90 | 1600 | if (!setup_fault_attr(attr, str)) |
93b57046 JA |
1601 | return false; |
1602 | ||
24941b90 JA |
1603 | attr->verbose = 0; |
1604 | return true; | |
1605 | } | |
1606 | #endif | |
1607 | ||
1608 | static bool null_setup_fault(void) | |
1609 | { | |
1610 | #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION | |
1611 | if (!__null_setup_fault(&null_timeout_attr, g_timeout_str)) | |
1612 | return false; | |
1613 | if (!__null_setup_fault(&null_requeue_attr, g_requeue_str)) | |
1614 | return false; | |
33f782c4 | 1615 | #endif |
93b57046 JA |
1616 | return true; |
1617 | } | |
1618 | ||
2984c868 | 1619 | static int null_add_dev(struct nullb_device *dev) |
9ae2d0aa MB |
1620 | { |
1621 | struct nullb *nullb; | |
dc501dc0 | 1622 | int rv; |
f2298c04 | 1623 | |
cedcafad SL |
1624 | null_validate_conf(dev); |
1625 | ||
2984c868 | 1626 | nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node); |
dc501dc0 RE |
1627 | if (!nullb) { |
1628 | rv = -ENOMEM; | |
24d2f903 | 1629 | goto out; |
dc501dc0 | 1630 | } |
2984c868 SL |
1631 | nullb->dev = dev; |
1632 | dev->nullb = nullb; | |
f2298c04 JA |
1633 | |
1634 | spin_lock_init(&nullb->lock); | |
1635 | ||
dc501dc0 RE |
1636 | rv = setup_queues(nullb); |
1637 | if (rv) | |
24d2f903 | 1638 | goto out_free_nullb; |
f2298c04 | 1639 | |
2984c868 | 1640 | if (dev->queue_mode == NULL_Q_MQ) { |
82f402fe JA |
1641 | if (shared_tags) { |
1642 | nullb->tag_set = &tag_set; | |
1643 | rv = 0; | |
1644 | } else { | |
1645 | nullb->tag_set = &nullb->__tag_set; | |
2984c868 | 1646 | rv = null_init_tag_set(nullb, nullb->tag_set); |
82f402fe JA |
1647 | } |
1648 | ||
dc501dc0 | 1649 | if (rv) |
24d2f903 CH |
1650 | goto out_cleanup_queues; |
1651 | ||
93b57046 JA |
1652 | if (!null_setup_fault()) |
1653 | goto out_cleanup_queues; | |
1654 | ||
5448aca4 | 1655 | nullb->tag_set->timeout = 5 * HZ; |
82f402fe | 1656 | nullb->q = blk_mq_init_queue(nullb->tag_set); |
35b489d3 | 1657 | if (IS_ERR(nullb->q)) { |
dc501dc0 | 1658 | rv = -ENOMEM; |
24d2f903 | 1659 | goto out_cleanup_tags; |
dc501dc0 | 1660 | } |
82f402fe | 1661 | null_init_queues(nullb); |
2984c868 | 1662 | } else if (dev->queue_mode == NULL_Q_BIO) { |
6d469642 | 1663 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node); |
dc501dc0 RE |
1664 | if (!nullb->q) { |
1665 | rv = -ENOMEM; | |
24d2f903 | 1666 | goto out_cleanup_queues; |
dc501dc0 | 1667 | } |
f2298c04 | 1668 | blk_queue_make_request(nullb->q, null_queue_bio); |
31f9690e JK |
1669 | rv = init_driver_queues(nullb); |
1670 | if (rv) | |
1671 | goto out_cleanup_blk_queue; | |
f2298c04 JA |
1672 | } |
1673 | ||
eff2c4f1 SL |
1674 | if (dev->mbps) { |
1675 | set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags); | |
1676 | nullb_setup_bwtimer(nullb); | |
1677 | } | |
1678 | ||
deb78b41 SL |
1679 | if (dev->cache_size > 0) { |
1680 | set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags); | |
1681 | blk_queue_write_cache(nullb->q, true, true); | |
deb78b41 SL |
1682 | } |
1683 | ||
ca4b2a01 MB |
1684 | if (dev->zoned) { |
1685 | rv = null_zone_init(dev); | |
1686 | if (rv) | |
1687 | goto out_cleanup_blk_queue; | |
1688 | ||
1689 | blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects); | |
1690 | nullb->q->limits.zoned = BLK_ZONED_HM; | |
1691 | } | |
1692 | ||
f2298c04 | 1693 | nullb->q->queuedata = nullb; |
8b904b5b BVA |
1694 | blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); |
1695 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q); | |
f2298c04 | 1696 | |
f2298c04 | 1697 | mutex_lock(&lock); |
94bc02e3 | 1698 | nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); |
cedcafad | 1699 | dev->index = nullb->index; |
f2298c04 JA |
1700 | mutex_unlock(&lock); |
1701 | ||
2984c868 SL |
1702 | blk_queue_logical_block_size(nullb->q, dev->blocksize); |
1703 | blk_queue_physical_block_size(nullb->q, dev->blocksize); | |
f2298c04 | 1704 | |
306eb6b4 | 1705 | null_config_discard(nullb); |
f2298c04 | 1706 | |
b2b7e001 MB |
1707 | sprintf(nullb->disk_name, "nullb%d", nullb->index); |
1708 | ||
74ede5af | 1709 | rv = null_gendisk_register(nullb); |
9ae2d0aa | 1710 | if (rv) |
ca4b2a01 | 1711 | goto out_cleanup_zone; |
a514379b MB |
1712 | |
1713 | mutex_lock(&lock); | |
1714 | list_add_tail(&nullb->list, &nullb_list); | |
1715 | mutex_unlock(&lock); | |
3681c85d | 1716 | |
f2298c04 | 1717 | return 0; |
ca4b2a01 MB |
1718 | out_cleanup_zone: |
1719 | if (dev->zoned) | |
1720 | null_zone_exit(dev); | |
24d2f903 CH |
1721 | out_cleanup_blk_queue: |
1722 | blk_cleanup_queue(nullb->q); | |
1723 | out_cleanup_tags: | |
2984c868 | 1724 | if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set) |
82f402fe | 1725 | blk_mq_free_tag_set(nullb->tag_set); |
24d2f903 CH |
1726 | out_cleanup_queues: |
1727 | cleanup_queues(nullb); | |
1728 | out_free_nullb: | |
1729 | kfree(nullb); | |
1730 | out: | |
dc501dc0 | 1731 | return rv; |
f2298c04 JA |
1732 | } |
1733 | ||
1734 | static int __init null_init(void) | |
1735 | { | |
af096e22 | 1736 | int ret = 0; |
f2298c04 | 1737 | unsigned int i; |
af096e22 | 1738 | struct nullb *nullb; |
2984c868 | 1739 | struct nullb_device *dev; |
f2298c04 | 1740 | |
2984c868 | 1741 | if (g_bs > PAGE_SIZE) { |
9967d8ac R |
1742 | pr_warn("null_blk: invalid block size\n"); |
1743 | pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); | |
2984c868 | 1744 | g_bs = PAGE_SIZE; |
9967d8ac | 1745 | } |
f2298c04 | 1746 | |
ca4b2a01 MB |
1747 | if (!is_power_of_2(g_zone_size)) { |
1748 | pr_err("null_blk: zone_size must be power-of-two\n"); | |
1749 | return -EINVAL; | |
1750 | } | |
1751 | ||
7ff684a6 JP |
1752 | if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) { |
1753 | pr_err("null_blk: invalid home_node value\n"); | |
1754 | g_home_node = NUMA_NO_NODE; | |
1755 | } | |
1756 | ||
e50b1e32 JA |
1757 | if (g_queue_mode == NULL_Q_RQ) { |
1758 | pr_err("null_blk: legacy IO path no longer available\n"); | |
1759 | return -EINVAL; | |
1760 | } | |
2984c868 SL |
1761 | if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) { |
1762 | if (g_submit_queues != nr_online_nodes) { | |
558ab300 | 1763 | pr_warn("null_blk: submit_queues param is set to %u.\n", |
d15ee6b1 | 1764 | nr_online_nodes); |
2984c868 | 1765 | g_submit_queues = nr_online_nodes; |
fc1bc354 | 1766 | } |
2984c868 SL |
1767 | } else if (g_submit_queues > nr_cpu_ids) |
1768 | g_submit_queues = nr_cpu_ids; | |
1769 | else if (g_submit_queues <= 0) | |
1770 | g_submit_queues = 1; | |
f2298c04 | 1771 | |
2984c868 SL |
1772 | if (g_queue_mode == NULL_Q_MQ && shared_tags) { |
1773 | ret = null_init_tag_set(NULL, &tag_set); | |
db2d153d MG |
1774 | if (ret) |
1775 | return ret; | |
1776 | } | |
1777 | ||
3bf2bd20 SL |
1778 | config_group_init(&nullb_subsys.su_group); |
1779 | mutex_init(&nullb_subsys.su_mutex); | |
1780 | ||
1781 | ret = configfs_register_subsystem(&nullb_subsys); | |
1782 | if (ret) | |
1783 | goto err_tagset; | |
1784 | ||
f2298c04 JA |
1785 | mutex_init(&lock); |
1786 | ||
f2298c04 | 1787 | null_major = register_blkdev(0, "nullb"); |
db2d153d MG |
1788 | if (null_major < 0) { |
1789 | ret = null_major; | |
3bf2bd20 | 1790 | goto err_conf; |
db2d153d | 1791 | } |
f2298c04 JA |
1792 | |
1793 | for (i = 0; i < nr_devices; i++) { | |
2984c868 | 1794 | dev = null_alloc_dev(); |
30c516d7 WY |
1795 | if (!dev) { |
1796 | ret = -ENOMEM; | |
2984c868 | 1797 | goto err_dev; |
30c516d7 | 1798 | } |
2984c868 SL |
1799 | ret = null_add_dev(dev); |
1800 | if (ret) { | |
1801 | null_free_dev(dev); | |
af096e22 | 1802 | goto err_dev; |
2984c868 | 1803 | } |
f2298c04 JA |
1804 | } |
1805 | ||
1806 | pr_info("null: module loaded\n"); | |
1807 | return 0; | |
af096e22 MH |
1808 | |
1809 | err_dev: | |
1810 | while (!list_empty(&nullb_list)) { | |
1811 | nullb = list_entry(nullb_list.next, struct nullb, list); | |
2984c868 | 1812 | dev = nullb->dev; |
af096e22 | 1813 | null_del_dev(nullb); |
2984c868 | 1814 | null_free_dev(dev); |
af096e22 | 1815 | } |
af096e22 | 1816 | unregister_blkdev(null_major, "nullb"); |
3bf2bd20 SL |
1817 | err_conf: |
1818 | configfs_unregister_subsystem(&nullb_subsys); | |
db2d153d | 1819 | err_tagset: |
2984c868 | 1820 | if (g_queue_mode == NULL_Q_MQ && shared_tags) |
db2d153d | 1821 | blk_mq_free_tag_set(&tag_set); |
af096e22 | 1822 | return ret; |
f2298c04 JA |
1823 | } |
1824 | ||
1825 | static void __exit null_exit(void) | |
1826 | { | |
1827 | struct nullb *nullb; | |
1828 | ||
3bf2bd20 SL |
1829 | configfs_unregister_subsystem(&nullb_subsys); |
1830 | ||
f2298c04 JA |
1831 | unregister_blkdev(null_major, "nullb"); |
1832 | ||
1833 | mutex_lock(&lock); | |
1834 | while (!list_empty(&nullb_list)) { | |
2984c868 SL |
1835 | struct nullb_device *dev; |
1836 | ||
f2298c04 | 1837 | nullb = list_entry(nullb_list.next, struct nullb, list); |
2984c868 | 1838 | dev = nullb->dev; |
f2298c04 | 1839 | null_del_dev(nullb); |
2984c868 | 1840 | null_free_dev(dev); |
f2298c04 JA |
1841 | } |
1842 | mutex_unlock(&lock); | |
6bb9535b | 1843 | |
2984c868 | 1844 | if (g_queue_mode == NULL_Q_MQ && shared_tags) |
82f402fe | 1845 | blk_mq_free_tag_set(&tag_set); |
f2298c04 JA |
1846 | } |
1847 | ||
1848 | module_init(null_init); | |
1849 | module_exit(null_exit); | |
1850 | ||
231b3db1 | 1851 | MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>"); |
f2298c04 | 1852 | MODULE_LICENSE("GPL"); |