nvmet: use a private workqueue instead of the system workqueue
[linux-block.git] / drivers / nvme / target / loop.c
CommitLineData
d0ad6904 1// SPDX-License-Identifier: GPL-2.0
3a85a5de
CH
2/*
3 * NVMe over Fabrics loopback device.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
3a85a5de
CH
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/scatterlist.h>
3a85a5de
CH
8#include <linux/blk-mq.h>
9#include <linux/nvme.h>
10#include <linux/module.h>
11#include <linux/parser.h>
3a85a5de
CH
12#include "nvmet.h"
13#include "../host/nvme.h"
14#include "../host/fabrics.h"
15
3a85a5de
CH
16#define NVME_LOOP_MAX_SEGMENTS 256
17
3a85a5de 18struct nvme_loop_iod {
d49187e9 19 struct nvme_request nvme_req;
3a85a5de 20 struct nvme_command cmd;
fc6c9730 21 struct nvme_completion cqe;
3a85a5de
CH
22 struct nvmet_req req;
23 struct nvme_loop_queue *queue;
24 struct work_struct work;
25 struct sg_table sg_table;
26 struct scatterlist first_sgl[];
27};
28
29struct nvme_loop_ctrl {
3a85a5de 30 struct nvme_loop_queue *queues;
3a85a5de
CH
31
32 struct blk_mq_tag_set admin_tag_set;
33
34 struct list_head list;
3a85a5de
CH
35 struct blk_mq_tag_set tag_set;
36 struct nvme_loop_iod async_event_iod;
37 struct nvme_ctrl ctrl;
38
fe4a9791 39 struct nvmet_port *port;
3a85a5de
CH
40};
41
42static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
43{
44 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
45}
46
9d7fab04
SG
47enum nvme_loop_queue_flags {
48 NVME_LOOP_Q_LIVE = 0,
49};
50
3a85a5de
CH
51struct nvme_loop_queue {
52 struct nvmet_cq nvme_cq;
53 struct nvmet_sq nvme_sq;
54 struct nvme_loop_ctrl *ctrl;
9d7fab04 55 unsigned long flags;
3a85a5de
CH
56};
57
fe4a9791
CH
58static LIST_HEAD(nvme_loop_ports);
59static DEFINE_MUTEX(nvme_loop_ports_mutex);
3a85a5de
CH
60
61static LIST_HEAD(nvme_loop_ctrl_list);
62static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
63
64static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
65static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
66
e929f06d 67static const struct nvmet_fabrics_ops nvme_loop_ops;
3a85a5de
CH
68
69static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
70{
71 return queue - queue->ctrl->queues;
72}
73
74static void nvme_loop_complete_rq(struct request *req)
75{
76 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
3a85a5de 77
52e6d8ed 78 sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT);
77f02a7a 79 nvme_complete_rq(req);
3a85a5de 80}
3a85a5de 81
3b068376
SG
82static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
83{
84 u32 queue_idx = nvme_loop_queue_idx(queue);
3a85a5de 85
3b068376
SG
86 if (queue_idx == 0)
87 return queue->ctrl->admin_tag_set.tags[queue_idx];
88 return queue->ctrl->tag_set.tags[queue_idx - 1];
3a85a5de
CH
89}
90
d49187e9 91static void nvme_loop_queue_response(struct nvmet_req *req)
3a85a5de 92{
3b068376
SG
93 struct nvme_loop_queue *queue =
94 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
fc6c9730 95 struct nvme_completion *cqe = req->cqe;
3a85a5de
CH
96
97 /*
98 * AEN requests are special as they don't time out and can
99 * survive any kind of queue freeze and often don't respond to
100 * aborts. We don't even bother to allocate a struct request
101 * for them but rather special case them here.
102 */
58a8df67
IR
103 if (unlikely(nvme_is_aen_req(nvme_loop_queue_idx(queue),
104 cqe->command_id))) {
3b068376 105 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
7bf58533 106 &cqe->result);
3a85a5de 107 } else {
3b068376 108 struct request *rq;
3b068376 109
e7006de6 110 rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
3b068376
SG
111 if (!rq) {
112 dev_err(queue->ctrl->ctrl.device,
e7006de6 113 "got bad command_id %#x on queue %d\n",
3b068376
SG
114 cqe->command_id, nvme_loop_queue_idx(queue));
115 return;
116 }
3a85a5de 117
2eb81a33 118 if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
ff029451 119 nvme_loop_complete_rq(rq);
3a85a5de
CH
120 }
121}
122
123static void nvme_loop_execute_work(struct work_struct *work)
124{
125 struct nvme_loop_iod *iod =
126 container_of(work, struct nvme_loop_iod, work);
127
be3f3114 128 iod->req.execute(&iod->req);
3a85a5de
CH
129}
130
fc17b653 131static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
3a85a5de
CH
132 const struct blk_mq_queue_data *bd)
133{
134 struct nvme_ns *ns = hctx->queue->queuedata;
135 struct nvme_loop_queue *queue = hctx->driver_data;
136 struct request *req = bd->rq;
137 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
3bc32bb1 138 bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
fc17b653 139 blk_status_t ret;
3a85a5de 140
a9715744
TC
141 if (!nvme_check_ready(&queue->ctrl->ctrl, req, queue_ready))
142 return nvme_fail_nonready_command(&queue->ctrl->ctrl, req);
9d7fab04 143
f4b9e6c9 144 ret = nvme_setup_cmd(ns, req);
fc17b653 145 if (ret)
3a85a5de
CH
146 return ret;
147
11d9ea6f 148 blk_mq_start_request(req);
3a85a5de 149 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
fe4a9791 150 iod->req.port = queue->ctrl->port;
3a85a5de 151 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
11d9ea6f 152 &queue->nvme_sq, &nvme_loop_ops))
fc17b653 153 return BLK_STS_OK;
3a85a5de 154
eb464833 155 if (blk_rq_nr_phys_segments(req)) {
3a85a5de 156 iod->sg_table.sgl = iod->first_sgl;
fc17b653 157 if (sg_alloc_table_chained(&iod->sg_table,
f9d03f96 158 blk_rq_nr_phys_segments(req),
52e6d8ed 159 iod->sg_table.sgl, NVME_INLINE_SG_CNT)) {
5812d04c 160 nvme_cleanup_cmd(req);
fc17b653 161 return BLK_STS_RESOURCE;
5812d04c 162 }
3a85a5de
CH
163
164 iod->req.sg = iod->sg_table.sgl;
165 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
796b0b8d 166 iod->req.transfer_len = blk_rq_payload_bytes(req);
3a85a5de
CH
167 }
168
8832cf92 169 queue_work(nvmet_wq, &iod->work);
fc17b653 170 return BLK_STS_OK;
3a85a5de
CH
171}
172
ad22c355 173static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
3a85a5de
CH
174{
175 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
176 struct nvme_loop_queue *queue = &ctrl->queues[0];
177 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
178
179 memset(&iod->cmd, 0, sizeof(iod->cmd));
180 iod->cmd.common.opcode = nvme_admin_async_event;
38dabe21 181 iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
3a85a5de
CH
182 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
183
184 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
185 &nvme_loop_ops)) {
186 dev_err(ctrl->ctrl.device, "failed async event work\n");
187 return;
188 }
189
8832cf92 190 queue_work(nvmet_wq, &iod->work);
3a85a5de
CH
191}
192
193static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
194 struct nvme_loop_iod *iod, unsigned int queue_idx)
195{
3a85a5de 196 iod->req.cmd = &iod->cmd;
fc6c9730 197 iod->req.cqe = &iod->cqe;
3a85a5de
CH
198 iod->queue = &ctrl->queues[queue_idx];
199 INIT_WORK(&iod->work, nvme_loop_execute_work);
200 return 0;
201}
202
d6296d39
CH
203static int nvme_loop_init_request(struct blk_mq_tag_set *set,
204 struct request *req, unsigned int hctx_idx,
205 unsigned int numa_node)
3a85a5de 206{
62b83b18 207 struct nvme_loop_ctrl *ctrl = set->driver_data;
f4b9e6c9 208 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
3a85a5de 209
59e29ce6 210 nvme_req(req)->ctrl = &ctrl->ctrl;
f4b9e6c9 211 nvme_req(req)->cmd = &iod->cmd;
62b83b18
CH
212 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
213 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
3a85a5de
CH
214}
215
88c99793
ML
216static struct lock_class_key loop_hctx_fq_lock_key;
217
3a85a5de
CH
218static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
219 unsigned int hctx_idx)
220{
221 struct nvme_loop_ctrl *ctrl = data;
222 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
223
d858e5f0 224 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
3a85a5de 225
88c99793
ML
226 /*
227 * flush_end_io() can be called recursively for us, so use our own
228 * lock class key for avoiding lockdep possible recursive locking,
229 * then we can remove the dynamically allocated lock class for each
230 * flush queue, that way may cause horrible boot delay.
231 */
232 blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key);
233
3a85a5de
CH
234 hctx->driver_data = queue;
235 return 0;
236}
237
238static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
239 unsigned int hctx_idx)
240{
241 struct nvme_loop_ctrl *ctrl = data;
242 struct nvme_loop_queue *queue = &ctrl->queues[0];
243
244 BUG_ON(hctx_idx != 0);
245
246 hctx->driver_data = queue;
247 return 0;
248}
249
f363b089 250static const struct blk_mq_ops nvme_loop_mq_ops = {
3a85a5de
CH
251 .queue_rq = nvme_loop_queue_rq,
252 .complete = nvme_loop_complete_rq,
3a85a5de
CH
253 .init_request = nvme_loop_init_request,
254 .init_hctx = nvme_loop_init_hctx,
3a85a5de
CH
255};
256
f363b089 257static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
3a85a5de
CH
258 .queue_rq = nvme_loop_queue_rq,
259 .complete = nvme_loop_complete_rq,
62b83b18 260 .init_request = nvme_loop_init_request,
3a85a5de 261 .init_hctx = nvme_loop_init_admin_hctx,
3a85a5de
CH
262};
263
264static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
265{
4237de2f
HR
266 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
267 return;
e4c5d376 268 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
3a85a5de 269 blk_cleanup_queue(ctrl->ctrl.admin_q);
e7832cb4 270 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
3a85a5de 271 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3a85a5de
CH
272}
273
274static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
275{
276 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
277
278 if (list_empty(&ctrl->list))
279 goto free_ctrl;
280
281 mutex_lock(&nvme_loop_ctrl_mutex);
282 list_del(&ctrl->list);
283 mutex_unlock(&nvme_loop_ctrl_mutex);
284
285 if (nctrl->tagset) {
286 blk_cleanup_queue(ctrl->ctrl.connect_q);
287 blk_mq_free_tag_set(&ctrl->tag_set);
288 }
289 kfree(ctrl->queues);
290 nvmf_free_options(nctrl->opts);
291free_ctrl:
292 kfree(ctrl);
293}
294
945dd5ba
SG
295static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
296{
297 int i;
298
9d7fab04
SG
299 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
300 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
945dd5ba 301 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
9d7fab04 302 }
a6c144f3 303 ctrl->ctrl.queue_count = 1;
945dd5ba
SG
304}
305
306static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
307{
308 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
309 unsigned int nr_io_queues;
310 int ret, i;
311
312 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
313 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
314 if (ret || !nr_io_queues)
315 return ret;
316
317 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
318
319 for (i = 1; i <= nr_io_queues; i++) {
320 ctrl->queues[i].ctrl = ctrl;
321 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
322 if (ret)
323 goto out_destroy_queues;
324
d858e5f0 325 ctrl->ctrl.queue_count++;
945dd5ba
SG
326 }
327
328 return 0;
329
330out_destroy_queues:
331 nvme_loop_destroy_io_queues(ctrl);
332 return ret;
333}
334
297186d6
SG
335static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
336{
337 int i, ret;
338
d858e5f0 339 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
be42a33b 340 ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
297186d6
SG
341 if (ret)
342 return ret;
9d7fab04 343 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
297186d6
SG
344 }
345
346 return 0;
347}
348
3a85a5de
CH
349static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
350{
351 int error;
352
353 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
354 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
38dabe21 355 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
ed01fee2 356 ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
1b4ad7a5 357 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
3a85a5de 358 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
52e6d8ed 359 NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
3a85a5de
CH
360 ctrl->admin_tag_set.driver_data = ctrl;
361 ctrl->admin_tag_set.nr_hw_queues = 1;
dc96f938 362 ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
86f36b9c 363 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3a85a5de
CH
364
365 ctrl->queues[0].ctrl = ctrl;
366 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
367 if (error)
368 return error;
d858e5f0 369 ctrl->ctrl.queue_count = 1;
3a85a5de
CH
370
371 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
372 if (error)
373 goto out_free_sq;
34b6c231 374 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3a85a5de 375
e7832cb4
SG
376 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
377 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
378 error = PTR_ERR(ctrl->ctrl.fabrics_q);
379 goto out_free_tagset;
380 }
381
3a85a5de
CH
382 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
383 if (IS_ERR(ctrl->ctrl.admin_q)) {
384 error = PTR_ERR(ctrl->ctrl.admin_q);
e7832cb4 385 goto out_cleanup_fabrics_q;
3a85a5de 386 }
1d35d519
ML
387 /* reset stopped state for the fresh admin queue */
388 clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
3a85a5de
CH
389
390 error = nvmf_connect_admin_queue(&ctrl->ctrl);
391 if (error)
392 goto out_cleanup_queue;
393
9d7fab04
SG
394 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
395
c0f2f45b 396 error = nvme_enable_ctrl(&ctrl->ctrl);
3a85a5de
CH
397 if (error)
398 goto out_cleanup_queue;
399
400 ctrl->ctrl.max_hw_sectors =
401 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
402
6ca1d902 403 nvme_start_admin_queue(&ctrl->ctrl);
e7832cb4 404
f21c4769 405 error = nvme_init_ctrl_finish(&ctrl->ctrl);
3a85a5de
CH
406 if (error)
407 goto out_cleanup_queue;
408
3a85a5de
CH
409 return 0;
410
411out_cleanup_queue:
1c5f8e88 412 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
3a85a5de 413 blk_cleanup_queue(ctrl->ctrl.admin_q);
e7832cb4
SG
414out_cleanup_fabrics_q:
415 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
3a85a5de
CH
416out_free_tagset:
417 blk_mq_free_tag_set(&ctrl->admin_tag_set);
418out_free_sq:
419 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
420 return error;
421}
422
423static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
424{
d858e5f0 425 if (ctrl->ctrl.queue_count > 1) {
3a85a5de
CH
426 nvme_stop_queues(&ctrl->ctrl);
427 blk_mq_tagset_busy_iter(&ctrl->tag_set,
428 nvme_cancel_request, &ctrl->ctrl);
622b8b68 429 blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
945dd5ba 430 nvme_loop_destroy_io_queues(ctrl);
3a85a5de
CH
431 }
432
6ca1d902 433 nvme_stop_admin_queue(&ctrl->ctrl);
3a85a5de
CH
434 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
435 nvme_shutdown_ctrl(&ctrl->ctrl);
436
3a85a5de
CH
437 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
438 nvme_cancel_request, &ctrl->ctrl);
622b8b68 439 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
3a85a5de
CH
440 nvme_loop_destroy_admin_queue(ctrl);
441}
442
c5017e85 443static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
3a85a5de 444{
c5017e85 445 nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
3a85a5de
CH
446}
447
448static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
449{
450 struct nvme_loop_ctrl *ctrl;
451
452 mutex_lock(&nvme_loop_ctrl_mutex);
453 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
454 if (ctrl->ctrl.cntlid == nctrl->cntlid)
c5017e85 455 nvme_delete_ctrl(&ctrl->ctrl);
3a85a5de
CH
456 }
457 mutex_unlock(&nvme_loop_ctrl_mutex);
458}
459
460static void nvme_loop_reset_ctrl_work(struct work_struct *work)
461{
d86c4d8e
CH
462 struct nvme_loop_ctrl *ctrl =
463 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
297186d6 464 int ret;
3a85a5de 465
d09f2b45 466 nvme_stop_ctrl(&ctrl->ctrl);
3a85a5de
CH
467 nvme_loop_shutdown_ctrl(ctrl);
468
8bfc3b4c 469 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
6622f9ac
HR
470 if (ctrl->ctrl.state != NVME_CTRL_DELETING &&
471 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO)
472 /* state change failure for non-deleted ctrl? */
473 WARN_ON_ONCE(1);
8bfc3b4c
JT
474 return;
475 }
476
3a85a5de
CH
477 ret = nvme_loop_configure_admin_queue(ctrl);
478 if (ret)
479 goto out_disable;
480
945dd5ba
SG
481 ret = nvme_loop_init_io_queues(ctrl);
482 if (ret)
483 goto out_destroy_admin;
3a85a5de 484
297186d6
SG
485 ret = nvme_loop_connect_io_queues(ctrl);
486 if (ret)
487 goto out_destroy_io;
3a85a5de 488
4368c39b
SG
489 blk_mq_update_nr_hw_queues(&ctrl->tag_set,
490 ctrl->ctrl.queue_count - 1);
491
b6cec06d
CK
492 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
493 WARN_ON_ONCE(1);
3a85a5de 494
d09f2b45 495 nvme_start_ctrl(&ctrl->ctrl);
3a85a5de
CH
496
497 return;
498
945dd5ba
SG
499out_destroy_io:
500 nvme_loop_destroy_io_queues(ctrl);
501out_destroy_admin:
3a85a5de
CH
502 nvme_loop_destroy_admin_queue(ctrl);
503out_disable:
504 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
3a85a5de 505 nvme_uninit_ctrl(&ctrl->ctrl);
3a85a5de
CH
506}
507
3a85a5de
CH
508static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
509 .name = "loop",
510 .module = THIS_MODULE,
d3d5b87d 511 .flags = NVME_F_FABRICS,
3a85a5de
CH
512 .reg_read32 = nvmf_reg_read32,
513 .reg_read64 = nvmf_reg_read64,
514 .reg_write32 = nvmf_reg_write32,
3a85a5de
CH
515 .free_ctrl = nvme_loop_free_ctrl,
516 .submit_async_event = nvme_loop_submit_async_event,
c5017e85 517 .delete_ctrl = nvme_loop_delete_ctrl_host,
fe4a9791 518 .get_address = nvmf_get_address,
3a85a5de
CH
519};
520
521static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
522{
297186d6 523 int ret;
3a85a5de 524
945dd5ba
SG
525 ret = nvme_loop_init_io_queues(ctrl);
526 if (ret)
3a85a5de
CH
527 return ret;
528
3a85a5de
CH
529 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
530 ctrl->tag_set.ops = &nvme_loop_mq_ops;
eadb7cf4 531 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
ed01fee2 532 ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
1b4ad7a5 533 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
3a85a5de
CH
534 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
535 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
52e6d8ed 536 NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
3a85a5de 537 ctrl->tag_set.driver_data = ctrl;
d858e5f0 538 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
3a85a5de
CH
539 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
540 ctrl->ctrl.tagset = &ctrl->tag_set;
541
542 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
543 if (ret)
544 goto out_destroy_queues;
545
72e8b5cd
CK
546 ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
547 if (ret)
3a85a5de 548 goto out_free_tagset;
3a85a5de 549
297186d6
SG
550 ret = nvme_loop_connect_io_queues(ctrl);
551 if (ret)
552 goto out_cleanup_connect_q;
3a85a5de
CH
553
554 return 0;
555
556out_cleanup_connect_q:
557 blk_cleanup_queue(ctrl->ctrl.connect_q);
558out_free_tagset:
559 blk_mq_free_tag_set(&ctrl->tag_set);
560out_destroy_queues:
945dd5ba 561 nvme_loop_destroy_io_queues(ctrl);
3a85a5de
CH
562 return ret;
563}
564
fe4a9791
CH
565static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
566{
567 struct nvmet_port *p, *found = NULL;
568
569 mutex_lock(&nvme_loop_ports_mutex);
570 list_for_each_entry(p, &nvme_loop_ports, entry) {
571 /* if no transport address is specified use the first port */
572 if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
573 strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
574 continue;
575 found = p;
576 break;
577 }
578 mutex_unlock(&nvme_loop_ports_mutex);
579 return found;
580}
581
3a85a5de
CH
582static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
583 struct nvmf_ctrl_options *opts)
584{
585 struct nvme_loop_ctrl *ctrl;
3a85a5de
CH
586 int ret;
587
588 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
589 if (!ctrl)
590 return ERR_PTR(-ENOMEM);
591 ctrl->ctrl.opts = opts;
592 INIT_LIST_HEAD(&ctrl->list);
593
d86c4d8e 594 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
3a85a5de
CH
595
596 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
597 0 /* no quirks, we're perfect! */);
03504e3b
WB
598 if (ret) {
599 kfree(ctrl);
1401fcc4 600 goto out;
03504e3b 601 }
3a85a5de 602
b6cec06d
CK
603 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
604 WARN_ON_ONCE(1);
64d452b3 605
3a85a5de
CH
606 ret = -ENOMEM;
607
eadb7cf4 608 ctrl->ctrl.sqsize = opts->queue_size - 1;
3a85a5de 609 ctrl->ctrl.kato = opts->kato;
fe4a9791 610 ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
3a85a5de
CH
611
612 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
613 GFP_KERNEL);
614 if (!ctrl->queues)
615 goto out_uninit_ctrl;
616
617 ret = nvme_loop_configure_admin_queue(ctrl);
618 if (ret)
619 goto out_free_queues;
620
621 if (opts->queue_size > ctrl->ctrl.maxcmd) {
622 /* warn if maxcmd is lower than queue_size */
623 dev_warn(ctrl->ctrl.device,
624 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
625 opts->queue_size, ctrl->ctrl.maxcmd);
626 opts->queue_size = ctrl->ctrl.maxcmd;
627 }
628
629 if (opts->nr_io_queues) {
630 ret = nvme_loop_create_io_queues(ctrl);
631 if (ret)
632 goto out_remove_admin_queue;
633 }
634
635 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
636
637 dev_info(ctrl->ctrl.device,
638 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
639
b6cec06d
CK
640 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE))
641 WARN_ON_ONCE(1);
3a85a5de
CH
642
643 mutex_lock(&nvme_loop_ctrl_mutex);
644 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
645 mutex_unlock(&nvme_loop_ctrl_mutex);
646
d09f2b45 647 nvme_start_ctrl(&ctrl->ctrl);
3a85a5de
CH
648
649 return &ctrl->ctrl;
650
651out_remove_admin_queue:
652 nvme_loop_destroy_admin_queue(ctrl);
653out_free_queues:
654 kfree(ctrl->queues);
655out_uninit_ctrl:
656 nvme_uninit_ctrl(&ctrl->ctrl);
3a85a5de 657 nvme_put_ctrl(&ctrl->ctrl);
1401fcc4 658out:
3a85a5de
CH
659 if (ret > 0)
660 ret = -EIO;
661 return ERR_PTR(ret);
662}
663
664static int nvme_loop_add_port(struct nvmet_port *port)
665{
fe4a9791
CH
666 mutex_lock(&nvme_loop_ports_mutex);
667 list_add_tail(&port->entry, &nvme_loop_ports);
668 mutex_unlock(&nvme_loop_ports_mutex);
3a85a5de
CH
669 return 0;
670}
671
672static void nvme_loop_remove_port(struct nvmet_port *port)
673{
fe4a9791
CH
674 mutex_lock(&nvme_loop_ports_mutex);
675 list_del_init(&port->entry);
676 mutex_unlock(&nvme_loop_ports_mutex);
86b9a63e
LG
677
678 /*
679 * Ensure any ctrls that are in the process of being
680 * deleted are in fact deleted before we return
681 * and free the port. This is to prevent active
682 * ctrls from using a port after it's freed.
683 */
684 flush_workqueue(nvme_delete_wq);
3a85a5de
CH
685}
686
e929f06d 687static const struct nvmet_fabrics_ops nvme_loop_ops = {
3a85a5de
CH
688 .owner = THIS_MODULE,
689 .type = NVMF_TRTYPE_LOOP,
690 .add_port = nvme_loop_add_port,
691 .remove_port = nvme_loop_remove_port,
692 .queue_response = nvme_loop_queue_response,
693 .delete_ctrl = nvme_loop_delete_ctrl,
694};
695
696static struct nvmf_transport_ops nvme_loop_transport = {
697 .name = "loop",
0de5cd36 698 .module = THIS_MODULE,
3a85a5de 699 .create_ctrl = nvme_loop_create_ctrl,
fe4a9791 700 .allowed_opts = NVMF_OPT_TRADDR,
3a85a5de
CH
701};
702
703static int __init nvme_loop_init_module(void)
704{
705 int ret;
706
707 ret = nvmet_register_transport(&nvme_loop_ops);
708 if (ret)
709 return ret;
d19eef02
SG
710
711 ret = nvmf_register_transport(&nvme_loop_transport);
712 if (ret)
713 nvmet_unregister_transport(&nvme_loop_ops);
714
715 return ret;
3a85a5de
CH
716}
717
718static void __exit nvme_loop_cleanup_module(void)
719{
720 struct nvme_loop_ctrl *ctrl, *next;
721
722 nvmf_unregister_transport(&nvme_loop_transport);
723 nvmet_unregister_transport(&nvme_loop_ops);
724
725 mutex_lock(&nvme_loop_ctrl_mutex);
726 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
c5017e85 727 nvme_delete_ctrl(&ctrl->ctrl);
3a85a5de
CH
728 mutex_unlock(&nvme_loop_ctrl_mutex);
729
b227c59b 730 flush_workqueue(nvme_delete_wq);
3a85a5de
CH
731}
732
733module_init(nvme_loop_init_module);
734module_exit(nvme_loop_cleanup_module);
735
736MODULE_LICENSE("GPL v2");
737MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */