Commit | Line | Data |
---|---|---|
f2298c04 | 1 | #include <linux/module.h> |
fc1bc354 | 2 | |
f2298c04 JA |
3 | #include <linux/moduleparam.h> |
4 | #include <linux/sched.h> | |
5 | #include <linux/fs.h> | |
6 | #include <linux/blkdev.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/blk-mq.h> | |
10 | #include <linux/hrtimer.h> | |
11 | ||
12 | struct nullb_cmd { | |
13 | struct list_head list; | |
14 | struct llist_node ll_list; | |
15 | struct call_single_data csd; | |
16 | struct request *rq; | |
17 | struct bio *bio; | |
18 | unsigned int tag; | |
19 | struct nullb_queue *nq; | |
20 | }; | |
21 | ||
22 | struct nullb_queue { | |
23 | unsigned long *tag_map; | |
24 | wait_queue_head_t wait; | |
25 | unsigned int queue_depth; | |
26 | ||
27 | struct nullb_cmd *cmds; | |
28 | }; | |
29 | ||
30 | struct nullb { | |
31 | struct list_head list; | |
32 | unsigned int index; | |
33 | struct request_queue *q; | |
34 | struct gendisk *disk; | |
24d2f903 | 35 | struct blk_mq_tag_set tag_set; |
f2298c04 JA |
36 | struct hrtimer timer; |
37 | unsigned int queue_depth; | |
38 | spinlock_t lock; | |
39 | ||
40 | struct nullb_queue *queues; | |
41 | unsigned int nr_queues; | |
42 | }; | |
43 | ||
44 | static LIST_HEAD(nullb_list); | |
45 | static struct mutex lock; | |
46 | static int null_major; | |
47 | static int nullb_indexes; | |
48 | ||
49 | struct completion_queue { | |
50 | struct llist_head list; | |
51 | struct hrtimer timer; | |
52 | }; | |
53 | ||
54 | /* | |
55 | * These are per-cpu for now, they will need to be configured by the | |
56 | * complete_queues parameter and appropriately mapped. | |
57 | */ | |
58 | static DEFINE_PER_CPU(struct completion_queue, completion_queues); | |
59 | ||
60 | enum { | |
61 | NULL_IRQ_NONE = 0, | |
62 | NULL_IRQ_SOFTIRQ = 1, | |
63 | NULL_IRQ_TIMER = 2, | |
ce2c350b | 64 | }; |
f2298c04 | 65 | |
ce2c350b | 66 | enum { |
f2298c04 JA |
67 | NULL_Q_BIO = 0, |
68 | NULL_Q_RQ = 1, | |
69 | NULL_Q_MQ = 2, | |
70 | }; | |
71 | ||
2d263a78 | 72 | static int submit_queues; |
f2298c04 JA |
73 | module_param(submit_queues, int, S_IRUGO); |
74 | MODULE_PARM_DESC(submit_queues, "Number of submission queues"); | |
75 | ||
76 | static int home_node = NUMA_NO_NODE; | |
77 | module_param(home_node, int, S_IRUGO); | |
78 | MODULE_PARM_DESC(home_node, "Home node for the device"); | |
79 | ||
80 | static int queue_mode = NULL_Q_MQ; | |
709c8667 MB |
81 | |
82 | static int null_param_store_val(const char *str, int *val, int min, int max) | |
83 | { | |
84 | int ret, new_val; | |
85 | ||
86 | ret = kstrtoint(str, 10, &new_val); | |
87 | if (ret) | |
88 | return -EINVAL; | |
89 | ||
90 | if (new_val < min || new_val > max) | |
91 | return -EINVAL; | |
92 | ||
93 | *val = new_val; | |
94 | return 0; | |
95 | } | |
96 | ||
97 | static int null_set_queue_mode(const char *str, const struct kernel_param *kp) | |
98 | { | |
99 | return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ); | |
100 | } | |
101 | ||
9c27847d | 102 | static const struct kernel_param_ops null_queue_mode_param_ops = { |
709c8667 MB |
103 | .set = null_set_queue_mode, |
104 | .get = param_get_int, | |
105 | }; | |
106 | ||
107 | device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO); | |
54ae81cd | 108 | MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)"); |
f2298c04 JA |
109 | |
110 | static int gb = 250; | |
111 | module_param(gb, int, S_IRUGO); | |
112 | MODULE_PARM_DESC(gb, "Size in GB"); | |
113 | ||
114 | static int bs = 512; | |
115 | module_param(bs, int, S_IRUGO); | |
116 | MODULE_PARM_DESC(bs, "Block size (in bytes)"); | |
117 | ||
118 | static int nr_devices = 2; | |
119 | module_param(nr_devices, int, S_IRUGO); | |
120 | MODULE_PARM_DESC(nr_devices, "Number of devices to register"); | |
121 | ||
122 | static int irqmode = NULL_IRQ_SOFTIRQ; | |
709c8667 MB |
123 | |
124 | static int null_set_irqmode(const char *str, const struct kernel_param *kp) | |
125 | { | |
126 | return null_param_store_val(str, &irqmode, NULL_IRQ_NONE, | |
127 | NULL_IRQ_TIMER); | |
128 | } | |
129 | ||
9c27847d | 130 | static const struct kernel_param_ops null_irqmode_param_ops = { |
709c8667 MB |
131 | .set = null_set_irqmode, |
132 | .get = param_get_int, | |
133 | }; | |
134 | ||
135 | device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO); | |
f2298c04 JA |
136 | MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer"); |
137 | ||
138 | static int completion_nsec = 10000; | |
139 | module_param(completion_nsec, int, S_IRUGO); | |
140 | MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns"); | |
141 | ||
142 | static int hw_queue_depth = 64; | |
143 | module_param(hw_queue_depth, int, S_IRUGO); | |
144 | MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); | |
145 | ||
20005244 | 146 | static bool use_per_node_hctx = false; |
f2298c04 | 147 | module_param(use_per_node_hctx, bool, S_IRUGO); |
20005244 | 148 | MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); |
f2298c04 JA |
149 | |
150 | static void put_tag(struct nullb_queue *nq, unsigned int tag) | |
151 | { | |
152 | clear_bit_unlock(tag, nq->tag_map); | |
153 | ||
154 | if (waitqueue_active(&nq->wait)) | |
155 | wake_up(&nq->wait); | |
156 | } | |
157 | ||
158 | static unsigned int get_tag(struct nullb_queue *nq) | |
159 | { | |
160 | unsigned int tag; | |
161 | ||
162 | do { | |
163 | tag = find_first_zero_bit(nq->tag_map, nq->queue_depth); | |
164 | if (tag >= nq->queue_depth) | |
165 | return -1U; | |
166 | } while (test_and_set_bit_lock(tag, nq->tag_map)); | |
167 | ||
168 | return tag; | |
169 | } | |
170 | ||
171 | static void free_cmd(struct nullb_cmd *cmd) | |
172 | { | |
173 | put_tag(cmd->nq, cmd->tag); | |
174 | } | |
175 | ||
176 | static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq) | |
177 | { | |
178 | struct nullb_cmd *cmd; | |
179 | unsigned int tag; | |
180 | ||
181 | tag = get_tag(nq); | |
182 | if (tag != -1U) { | |
183 | cmd = &nq->cmds[tag]; | |
184 | cmd->tag = tag; | |
185 | cmd->nq = nq; | |
186 | return cmd; | |
187 | } | |
188 | ||
189 | return NULL; | |
190 | } | |
191 | ||
192 | static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait) | |
193 | { | |
194 | struct nullb_cmd *cmd; | |
195 | DEFINE_WAIT(wait); | |
196 | ||
197 | cmd = __alloc_cmd(nq); | |
198 | if (cmd || !can_wait) | |
199 | return cmd; | |
200 | ||
201 | do { | |
202 | prepare_to_wait(&nq->wait, &wait, TASK_UNINTERRUPTIBLE); | |
203 | cmd = __alloc_cmd(nq); | |
204 | if (cmd) | |
205 | break; | |
206 | ||
207 | io_schedule(); | |
208 | } while (1); | |
209 | ||
210 | finish_wait(&nq->wait, &wait); | |
211 | return cmd; | |
212 | } | |
213 | ||
214 | static void end_cmd(struct nullb_cmd *cmd) | |
215 | { | |
ce2c350b CH |
216 | switch (queue_mode) { |
217 | case NULL_Q_MQ: | |
c8a446ad | 218 | blk_mq_end_request(cmd->rq, 0); |
ce2c350b CH |
219 | return; |
220 | case NULL_Q_RQ: | |
221 | INIT_LIST_HEAD(&cmd->rq->queuelist); | |
222 | blk_end_request_all(cmd->rq, 0); | |
223 | break; | |
224 | case NULL_Q_BIO: | |
f2298c04 | 225 | bio_endio(cmd->bio, 0); |
ce2c350b CH |
226 | break; |
227 | } | |
f2298c04 | 228 | |
ce2c350b | 229 | free_cmd(cmd); |
f2298c04 JA |
230 | } |
231 | ||
232 | static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) | |
233 | { | |
234 | struct completion_queue *cq; | |
235 | struct llist_node *entry; | |
236 | struct nullb_cmd *cmd; | |
237 | ||
238 | cq = &per_cpu(completion_queues, smp_processor_id()); | |
239 | ||
240 | while ((entry = llist_del_all(&cq->list)) != NULL) { | |
d7790b92 | 241 | entry = llist_reverse_order(entry); |
f2298c04 JA |
242 | do { |
243 | cmd = container_of(entry, struct nullb_cmd, ll_list); | |
f2298c04 | 244 | entry = entry->next; |
fc27691f | 245 | end_cmd(cmd); |
8b70f45e AM |
246 | |
247 | if (cmd->rq) { | |
248 | struct request_queue *q = cmd->rq->q; | |
249 | ||
250 | if (!q->mq_ops && blk_queue_stopped(q)) { | |
251 | spin_lock(q->queue_lock); | |
252 | if (blk_queue_stopped(q)) | |
253 | blk_start_queue(q); | |
254 | spin_unlock(q->queue_lock); | |
255 | } | |
256 | } | |
f2298c04 JA |
257 | } while (entry); |
258 | } | |
259 | ||
260 | return HRTIMER_NORESTART; | |
261 | } | |
262 | ||
263 | static void null_cmd_end_timer(struct nullb_cmd *cmd) | |
264 | { | |
265 | struct completion_queue *cq = &per_cpu(completion_queues, get_cpu()); | |
266 | ||
267 | cmd->ll_list.next = NULL; | |
268 | if (llist_add(&cmd->ll_list, &cq->list)) { | |
269 | ktime_t kt = ktime_set(0, completion_nsec); | |
270 | ||
419c21a3 | 271 | hrtimer_start(&cq->timer, kt, HRTIMER_MODE_REL_PINNED); |
f2298c04 JA |
272 | } |
273 | ||
274 | put_cpu(); | |
275 | } | |
276 | ||
277 | static void null_softirq_done_fn(struct request *rq) | |
278 | { | |
d891fa70 JA |
279 | if (queue_mode == NULL_Q_MQ) |
280 | end_cmd(blk_mq_rq_to_pdu(rq)); | |
281 | else | |
282 | end_cmd(rq->special); | |
f2298c04 JA |
283 | } |
284 | ||
f2298c04 JA |
285 | static inline void null_handle_cmd(struct nullb_cmd *cmd) |
286 | { | |
287 | /* Complete IO by inline, softirq or timer */ | |
288 | switch (irqmode) { | |
f2298c04 | 289 | case NULL_IRQ_SOFTIRQ: |
ce2c350b CH |
290 | switch (queue_mode) { |
291 | case NULL_Q_MQ: | |
292 | blk_mq_complete_request(cmd->rq); | |
293 | break; | |
294 | case NULL_Q_RQ: | |
295 | blk_complete_request(cmd->rq); | |
296 | break; | |
297 | case NULL_Q_BIO: | |
298 | /* | |
299 | * XXX: no proper submitting cpu information available. | |
300 | */ | |
301 | end_cmd(cmd); | |
302 | break; | |
303 | } | |
304 | break; | |
305 | case NULL_IRQ_NONE: | |
f2298c04 | 306 | end_cmd(cmd); |
f2298c04 JA |
307 | break; |
308 | case NULL_IRQ_TIMER: | |
309 | null_cmd_end_timer(cmd); | |
310 | break; | |
311 | } | |
312 | } | |
313 | ||
314 | static struct nullb_queue *nullb_to_queue(struct nullb *nullb) | |
315 | { | |
316 | int index = 0; | |
317 | ||
318 | if (nullb->nr_queues != 1) | |
319 | index = raw_smp_processor_id() / ((nr_cpu_ids + nullb->nr_queues - 1) / nullb->nr_queues); | |
320 | ||
321 | return &nullb->queues[index]; | |
322 | } | |
323 | ||
324 | static void null_queue_bio(struct request_queue *q, struct bio *bio) | |
325 | { | |
326 | struct nullb *nullb = q->queuedata; | |
327 | struct nullb_queue *nq = nullb_to_queue(nullb); | |
328 | struct nullb_cmd *cmd; | |
329 | ||
330 | cmd = alloc_cmd(nq, 1); | |
331 | cmd->bio = bio; | |
332 | ||
333 | null_handle_cmd(cmd); | |
334 | } | |
335 | ||
336 | static int null_rq_prep_fn(struct request_queue *q, struct request *req) | |
337 | { | |
338 | struct nullb *nullb = q->queuedata; | |
339 | struct nullb_queue *nq = nullb_to_queue(nullb); | |
340 | struct nullb_cmd *cmd; | |
341 | ||
342 | cmd = alloc_cmd(nq, 0); | |
343 | if (cmd) { | |
344 | cmd->rq = req; | |
345 | req->special = cmd; | |
346 | return BLKPREP_OK; | |
347 | } | |
8b70f45e | 348 | blk_stop_queue(q); |
f2298c04 JA |
349 | |
350 | return BLKPREP_DEFER; | |
351 | } | |
352 | ||
353 | static void null_request_fn(struct request_queue *q) | |
354 | { | |
355 | struct request *rq; | |
356 | ||
357 | while ((rq = blk_fetch_request(q)) != NULL) { | |
358 | struct nullb_cmd *cmd = rq->special; | |
359 | ||
360 | spin_unlock_irq(q->queue_lock); | |
361 | null_handle_cmd(cmd); | |
362 | spin_lock_irq(q->queue_lock); | |
363 | } | |
364 | } | |
365 | ||
74c45052 JA |
366 | static int null_queue_rq(struct blk_mq_hw_ctx *hctx, |
367 | const struct blk_mq_queue_data *bd) | |
f2298c04 | 368 | { |
74c45052 | 369 | struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); |
f2298c04 | 370 | |
74c45052 | 371 | cmd->rq = bd->rq; |
f2298c04 JA |
372 | cmd->nq = hctx->driver_data; |
373 | ||
74c45052 | 374 | blk_mq_start_request(bd->rq); |
e2490073 | 375 | |
f2298c04 JA |
376 | null_handle_cmd(cmd); |
377 | return BLK_MQ_RQ_QUEUE_OK; | |
378 | } | |
379 | ||
2d263a78 MB |
380 | static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) |
381 | { | |
382 | BUG_ON(!nullb); | |
383 | BUG_ON(!nq); | |
384 | ||
385 | init_waitqueue_head(&nq->wait); | |
386 | nq->queue_depth = nullb->queue_depth; | |
387 | } | |
388 | ||
f2298c04 JA |
389 | static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
390 | unsigned int index) | |
391 | { | |
392 | struct nullb *nullb = data; | |
393 | struct nullb_queue *nq = &nullb->queues[index]; | |
394 | ||
f2298c04 | 395 | hctx->driver_data = nq; |
2d263a78 MB |
396 | null_init_queue(nullb, nq); |
397 | nullb->nr_queues++; | |
f2298c04 JA |
398 | |
399 | return 0; | |
400 | } | |
401 | ||
402 | static struct blk_mq_ops null_mq_ops = { | |
403 | .queue_rq = null_queue_rq, | |
404 | .map_queue = blk_mq_map_queue, | |
405 | .init_hctx = null_init_hctx, | |
ce2c350b | 406 | .complete = null_softirq_done_fn, |
f2298c04 JA |
407 | }; |
408 | ||
409 | static void null_del_dev(struct nullb *nullb) | |
410 | { | |
411 | list_del_init(&nullb->list); | |
412 | ||
413 | del_gendisk(nullb->disk); | |
518d00b7 | 414 | blk_cleanup_queue(nullb->q); |
24d2f903 CH |
415 | if (queue_mode == NULL_Q_MQ) |
416 | blk_mq_free_tag_set(&nullb->tag_set); | |
f2298c04 JA |
417 | put_disk(nullb->disk); |
418 | kfree(nullb); | |
419 | } | |
420 | ||
421 | static int null_open(struct block_device *bdev, fmode_t mode) | |
422 | { | |
423 | return 0; | |
424 | } | |
425 | ||
426 | static void null_release(struct gendisk *disk, fmode_t mode) | |
427 | { | |
428 | } | |
429 | ||
430 | static const struct block_device_operations null_fops = { | |
431 | .owner = THIS_MODULE, | |
432 | .open = null_open, | |
433 | .release = null_release, | |
434 | }; | |
435 | ||
436 | static int setup_commands(struct nullb_queue *nq) | |
437 | { | |
438 | struct nullb_cmd *cmd; | |
439 | int i, tag_size; | |
440 | ||
441 | nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); | |
442 | if (!nq->cmds) | |
2d263a78 | 443 | return -ENOMEM; |
f2298c04 JA |
444 | |
445 | tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; | |
446 | nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); | |
447 | if (!nq->tag_map) { | |
448 | kfree(nq->cmds); | |
2d263a78 | 449 | return -ENOMEM; |
f2298c04 JA |
450 | } |
451 | ||
452 | for (i = 0; i < nq->queue_depth; i++) { | |
453 | cmd = &nq->cmds[i]; | |
454 | INIT_LIST_HEAD(&cmd->list); | |
455 | cmd->ll_list.next = NULL; | |
456 | cmd->tag = -1U; | |
457 | } | |
458 | ||
459 | return 0; | |
460 | } | |
461 | ||
462 | static void cleanup_queue(struct nullb_queue *nq) | |
463 | { | |
464 | kfree(nq->tag_map); | |
465 | kfree(nq->cmds); | |
466 | } | |
467 | ||
468 | static void cleanup_queues(struct nullb *nullb) | |
469 | { | |
470 | int i; | |
471 | ||
472 | for (i = 0; i < nullb->nr_queues; i++) | |
473 | cleanup_queue(&nullb->queues[i]); | |
474 | ||
475 | kfree(nullb->queues); | |
476 | } | |
477 | ||
478 | static int setup_queues(struct nullb *nullb) | |
479 | { | |
2d263a78 MB |
480 | nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), |
481 | GFP_KERNEL); | |
f2298c04 | 482 | if (!nullb->queues) |
2d263a78 | 483 | return -ENOMEM; |
f2298c04 JA |
484 | |
485 | nullb->nr_queues = 0; | |
486 | nullb->queue_depth = hw_queue_depth; | |
487 | ||
2d263a78 MB |
488 | return 0; |
489 | } | |
490 | ||
491 | static int init_driver_queues(struct nullb *nullb) | |
492 | { | |
493 | struct nullb_queue *nq; | |
494 | int i, ret = 0; | |
f2298c04 JA |
495 | |
496 | for (i = 0; i < submit_queues; i++) { | |
497 | nq = &nullb->queues[i]; | |
2d263a78 MB |
498 | |
499 | null_init_queue(nullb, nq); | |
500 | ||
501 | ret = setup_commands(nq); | |
502 | if (ret) | |
31f9690e | 503 | return ret; |
f2298c04 JA |
504 | nullb->nr_queues++; |
505 | } | |
2d263a78 | 506 | return 0; |
f2298c04 JA |
507 | } |
508 | ||
509 | static int null_add_dev(void) | |
510 | { | |
511 | struct gendisk *disk; | |
512 | struct nullb *nullb; | |
513 | sector_t size; | |
dc501dc0 | 514 | int rv; |
f2298c04 JA |
515 | |
516 | nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); | |
dc501dc0 RE |
517 | if (!nullb) { |
518 | rv = -ENOMEM; | |
24d2f903 | 519 | goto out; |
dc501dc0 | 520 | } |
f2298c04 JA |
521 | |
522 | spin_lock_init(&nullb->lock); | |
523 | ||
57053d8c MB |
524 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) |
525 | submit_queues = nr_online_nodes; | |
526 | ||
dc501dc0 RE |
527 | rv = setup_queues(nullb); |
528 | if (rv) | |
24d2f903 | 529 | goto out_free_nullb; |
f2298c04 JA |
530 | |
531 | if (queue_mode == NULL_Q_MQ) { | |
cdef54dd | 532 | nullb->tag_set.ops = &null_mq_ops; |
24d2f903 CH |
533 | nullb->tag_set.nr_hw_queues = submit_queues; |
534 | nullb->tag_set.queue_depth = hw_queue_depth; | |
535 | nullb->tag_set.numa_node = home_node; | |
536 | nullb->tag_set.cmd_size = sizeof(struct nullb_cmd); | |
537 | nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | |
538 | nullb->tag_set.driver_data = nullb; | |
539 | ||
dc501dc0 RE |
540 | rv = blk_mq_alloc_tag_set(&nullb->tag_set); |
541 | if (rv) | |
24d2f903 CH |
542 | goto out_cleanup_queues; |
543 | ||
544 | nullb->q = blk_mq_init_queue(&nullb->tag_set); | |
35b489d3 | 545 | if (IS_ERR(nullb->q)) { |
dc501dc0 | 546 | rv = -ENOMEM; |
24d2f903 | 547 | goto out_cleanup_tags; |
dc501dc0 | 548 | } |
f2298c04 JA |
549 | } else if (queue_mode == NULL_Q_BIO) { |
550 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); | |
dc501dc0 RE |
551 | if (!nullb->q) { |
552 | rv = -ENOMEM; | |
24d2f903 | 553 | goto out_cleanup_queues; |
dc501dc0 | 554 | } |
f2298c04 | 555 | blk_queue_make_request(nullb->q, null_queue_bio); |
31f9690e JK |
556 | rv = init_driver_queues(nullb); |
557 | if (rv) | |
558 | goto out_cleanup_blk_queue; | |
f2298c04 JA |
559 | } else { |
560 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); | |
dc501dc0 RE |
561 | if (!nullb->q) { |
562 | rv = -ENOMEM; | |
24d2f903 | 563 | goto out_cleanup_queues; |
dc501dc0 | 564 | } |
f2298c04 | 565 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); |
24d2f903 | 566 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); |
31f9690e JK |
567 | rv = init_driver_queues(nullb); |
568 | if (rv) | |
569 | goto out_cleanup_blk_queue; | |
f2298c04 JA |
570 | } |
571 | ||
f2298c04 JA |
572 | nullb->q->queuedata = nullb; |
573 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); | |
b277da0a | 574 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); |
f2298c04 JA |
575 | |
576 | disk = nullb->disk = alloc_disk_node(1, home_node); | |
dc501dc0 RE |
577 | if (!disk) { |
578 | rv = -ENOMEM; | |
24d2f903 | 579 | goto out_cleanup_blk_queue; |
dc501dc0 | 580 | } |
f2298c04 JA |
581 | |
582 | mutex_lock(&lock); | |
583 | list_add_tail(&nullb->list, &nullb_list); | |
584 | nullb->index = nullb_indexes++; | |
585 | mutex_unlock(&lock); | |
586 | ||
587 | blk_queue_logical_block_size(nullb->q, bs); | |
588 | blk_queue_physical_block_size(nullb->q, bs); | |
589 | ||
590 | size = gb * 1024 * 1024 * 1024ULL; | |
591 | sector_div(size, bs); | |
592 | set_capacity(disk, size); | |
593 | ||
227290b4 | 594 | disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; |
f2298c04 JA |
595 | disk->major = null_major; |
596 | disk->first_minor = nullb->index; | |
597 | disk->fops = &null_fops; | |
598 | disk->private_data = nullb; | |
599 | disk->queue = nullb->q; | |
600 | sprintf(disk->disk_name, "nullb%d", nullb->index); | |
601 | add_disk(disk); | |
602 | return 0; | |
24d2f903 CH |
603 | |
604 | out_cleanup_blk_queue: | |
605 | blk_cleanup_queue(nullb->q); | |
606 | out_cleanup_tags: | |
607 | if (queue_mode == NULL_Q_MQ) | |
608 | blk_mq_free_tag_set(&nullb->tag_set); | |
609 | out_cleanup_queues: | |
610 | cleanup_queues(nullb); | |
611 | out_free_nullb: | |
612 | kfree(nullb); | |
613 | out: | |
dc501dc0 | 614 | return rv; |
f2298c04 JA |
615 | } |
616 | ||
617 | static int __init null_init(void) | |
618 | { | |
619 | unsigned int i; | |
620 | ||
9967d8ac R |
621 | if (bs > PAGE_SIZE) { |
622 | pr_warn("null_blk: invalid block size\n"); | |
623 | pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE); | |
624 | bs = PAGE_SIZE; | |
625 | } | |
f2298c04 | 626 | |
d15ee6b1 | 627 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { |
fc1bc354 | 628 | if (submit_queues < nr_online_nodes) { |
d15ee6b1 MB |
629 | pr_warn("null_blk: submit_queues param is set to %u.", |
630 | nr_online_nodes); | |
fc1bc354 MB |
631 | submit_queues = nr_online_nodes; |
632 | } | |
d15ee6b1 | 633 | } else if (submit_queues > nr_cpu_ids) |
f2298c04 JA |
634 | submit_queues = nr_cpu_ids; |
635 | else if (!submit_queues) | |
636 | submit_queues = 1; | |
637 | ||
638 | mutex_init(&lock); | |
639 | ||
640 | /* Initialize a separate list for each CPU for issuing softirqs */ | |
641 | for_each_possible_cpu(i) { | |
642 | struct completion_queue *cq = &per_cpu(completion_queues, i); | |
643 | ||
644 | init_llist_head(&cq->list); | |
645 | ||
646 | if (irqmode != NULL_IRQ_TIMER) | |
647 | continue; | |
648 | ||
649 | hrtimer_init(&cq->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
650 | cq->timer.function = null_cmd_timer_expired; | |
651 | } | |
652 | ||
653 | null_major = register_blkdev(0, "nullb"); | |
654 | if (null_major < 0) | |
655 | return null_major; | |
656 | ||
657 | for (i = 0; i < nr_devices; i++) { | |
658 | if (null_add_dev()) { | |
659 | unregister_blkdev(null_major, "nullb"); | |
660 | return -EINVAL; | |
661 | } | |
662 | } | |
663 | ||
664 | pr_info("null: module loaded\n"); | |
665 | return 0; | |
666 | } | |
667 | ||
668 | static void __exit null_exit(void) | |
669 | { | |
670 | struct nullb *nullb; | |
671 | ||
672 | unregister_blkdev(null_major, "nullb"); | |
673 | ||
674 | mutex_lock(&lock); | |
675 | while (!list_empty(&nullb_list)) { | |
676 | nullb = list_entry(nullb_list.next, struct nullb, list); | |
677 | null_del_dev(nullb); | |
678 | } | |
679 | mutex_unlock(&lock); | |
680 | } | |
681 | ||
682 | module_init(null_init); | |
683 | module_exit(null_exit); | |
684 | ||
685 | MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>"); | |
686 | MODULE_LICENSE("GPL"); |