treewide: Add SPDX license identifier - Makefile/Kconfig
[linux-2.6-block.git] / drivers / nvme / target / loop.c
CommitLineData
d0ad6904 1// SPDX-License-Identifier: GPL-2.0
3a85a5de
CH
2/*
3 * NVMe over Fabrics loopback device.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
3a85a5de
CH
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/scatterlist.h>
3a85a5de
CH
8#include <linux/blk-mq.h>
9#include <linux/nvme.h>
10#include <linux/module.h>
11#include <linux/parser.h>
3a85a5de
CH
12#include "nvmet.h"
13#include "../host/nvme.h"
14#include "../host/fabrics.h"
15
3a85a5de
CH
16#define NVME_LOOP_MAX_SEGMENTS 256
17
3a85a5de 18struct nvme_loop_iod {
d49187e9 19 struct nvme_request nvme_req;
3a85a5de 20 struct nvme_command cmd;
fc6c9730 21 struct nvme_completion cqe;
3a85a5de
CH
22 struct nvmet_req req;
23 struct nvme_loop_queue *queue;
24 struct work_struct work;
25 struct sg_table sg_table;
26 struct scatterlist first_sgl[];
27};
28
29struct nvme_loop_ctrl {
3a85a5de 30 struct nvme_loop_queue *queues;
3a85a5de
CH
31
32 struct blk_mq_tag_set admin_tag_set;
33
34 struct list_head list;
3a85a5de
CH
35 struct blk_mq_tag_set tag_set;
36 struct nvme_loop_iod async_event_iod;
37 struct nvme_ctrl ctrl;
38
39 struct nvmet_ctrl *target_ctrl;
fe4a9791 40 struct nvmet_port *port;
3a85a5de
CH
41};
42
43static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
44{
45 return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
46}
47
9d7fab04
SG
48enum nvme_loop_queue_flags {
49 NVME_LOOP_Q_LIVE = 0,
50};
51
3a85a5de
CH
52struct nvme_loop_queue {
53 struct nvmet_cq nvme_cq;
54 struct nvmet_sq nvme_sq;
55 struct nvme_loop_ctrl *ctrl;
9d7fab04 56 unsigned long flags;
3a85a5de
CH
57};
58
fe4a9791
CH
59static LIST_HEAD(nvme_loop_ports);
60static DEFINE_MUTEX(nvme_loop_ports_mutex);
3a85a5de
CH
61
62static LIST_HEAD(nvme_loop_ctrl_list);
63static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
64
65static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
66static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
67
e929f06d 68static const struct nvmet_fabrics_ops nvme_loop_ops;
3a85a5de
CH
69
70static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
71{
72 return queue - queue->ctrl->queues;
73}
74
75static void nvme_loop_complete_rq(struct request *req)
76{
77 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
3a85a5de
CH
78
79 nvme_cleanup_cmd(req);
80 sg_free_table_chained(&iod->sg_table, true);
77f02a7a 81 nvme_complete_rq(req);
3a85a5de 82}
3a85a5de 83
3b068376
SG
84static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
85{
86 u32 queue_idx = nvme_loop_queue_idx(queue);
3a85a5de 87
3b068376
SG
88 if (queue_idx == 0)
89 return queue->ctrl->admin_tag_set.tags[queue_idx];
90 return queue->ctrl->tag_set.tags[queue_idx - 1];
3a85a5de
CH
91}
92
d49187e9 93static void nvme_loop_queue_response(struct nvmet_req *req)
3a85a5de 94{
3b068376
SG
95 struct nvme_loop_queue *queue =
96 container_of(req->sq, struct nvme_loop_queue, nvme_sq);
fc6c9730 97 struct nvme_completion *cqe = req->cqe;
3a85a5de
CH
98
99 /*
100 * AEN requests are special as they don't time out and can
101 * survive any kind of queue freeze and often don't respond to
102 * aborts. We don't even bother to allocate a struct request
103 * for them but rather special case them here.
104 */
3b068376 105 if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
38dabe21 106 cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
3b068376 107 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
7bf58533 108 &cqe->result);
3a85a5de 109 } else {
3b068376 110 struct request *rq;
3b068376
SG
111
112 rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
113 if (!rq) {
114 dev_err(queue->ctrl->ctrl.device,
115 "tag 0x%x on queue %d not found\n",
116 cqe->command_id, nvme_loop_queue_idx(queue));
117 return;
118 }
3a85a5de 119
27fa9bc5 120 nvme_end_request(rq, cqe->status, cqe->result);
3a85a5de
CH
121 }
122}
123
124static void nvme_loop_execute_work(struct work_struct *work)
125{
126 struct nvme_loop_iod *iod =
127 container_of(work, struct nvme_loop_iod, work);
128
5e62d5c9 129 nvmet_req_execute(&iod->req);
3a85a5de
CH
130}
131
fc17b653 132static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
3a85a5de
CH
133 const struct blk_mq_queue_data *bd)
134{
135 struct nvme_ns *ns = hctx->queue->queuedata;
136 struct nvme_loop_queue *queue = hctx->driver_data;
137 struct request *req = bd->rq;
138 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
3bc32bb1 139 bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
fc17b653 140 blk_status_t ret;
3a85a5de 141
3bc32bb1 142 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
6cdefc6e 143 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
9d7fab04 144
3a85a5de 145 ret = nvme_setup_cmd(ns, req, &iod->cmd);
fc17b653 146 if (ret)
3a85a5de
CH
147 return ret;
148
11d9ea6f 149 blk_mq_start_request(req);
3a85a5de 150 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
fe4a9791 151 iod->req.port = queue->ctrl->port;
3a85a5de 152 if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
11d9ea6f 153 &queue->nvme_sq, &nvme_loop_ops))
fc17b653 154 return BLK_STS_OK;
3a85a5de 155
eb464833 156 if (blk_rq_nr_phys_segments(req)) {
3a85a5de 157 iod->sg_table.sgl = iod->first_sgl;
fc17b653 158 if (sg_alloc_table_chained(&iod->sg_table,
f9d03f96 159 blk_rq_nr_phys_segments(req),
fc17b653
CH
160 iod->sg_table.sgl))
161 return BLK_STS_RESOURCE;
3a85a5de
CH
162
163 iod->req.sg = iod->sg_table.sgl;
164 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
796b0b8d 165 iod->req.transfer_len = blk_rq_payload_bytes(req);
3a85a5de
CH
166 }
167
3a85a5de 168 schedule_work(&iod->work);
fc17b653 169 return BLK_STS_OK;
3a85a5de
CH
170}
171
ad22c355 172static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
3a85a5de
CH
173{
174 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
175 struct nvme_loop_queue *queue = &ctrl->queues[0];
176 struct nvme_loop_iod *iod = &ctrl->async_event_iod;
177
178 memset(&iod->cmd, 0, sizeof(iod->cmd));
179 iod->cmd.common.opcode = nvme_admin_async_event;
38dabe21 180 iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
3a85a5de
CH
181 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
182
183 if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
184 &nvme_loop_ops)) {
185 dev_err(ctrl->ctrl.device, "failed async event work\n");
186 return;
187 }
188
189 schedule_work(&iod->work);
190}
191
192static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
193 struct nvme_loop_iod *iod, unsigned int queue_idx)
194{
3a85a5de 195 iod->req.cmd = &iod->cmd;
fc6c9730 196 iod->req.cqe = &iod->cqe;
3a85a5de
CH
197 iod->queue = &ctrl->queues[queue_idx];
198 INIT_WORK(&iod->work, nvme_loop_execute_work);
199 return 0;
200}
201
d6296d39
CH
202static int nvme_loop_init_request(struct blk_mq_tag_set *set,
203 struct request *req, unsigned int hctx_idx,
204 unsigned int numa_node)
3a85a5de 205{
62b83b18 206 struct nvme_loop_ctrl *ctrl = set->driver_data;
3a85a5de 207
59e29ce6 208 nvme_req(req)->ctrl = &ctrl->ctrl;
62b83b18
CH
209 return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
210 (set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
3a85a5de
CH
211}
212
213static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
214 unsigned int hctx_idx)
215{
216 struct nvme_loop_ctrl *ctrl = data;
217 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
218
d858e5f0 219 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
3a85a5de
CH
220
221 hctx->driver_data = queue;
222 return 0;
223}
224
225static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
226 unsigned int hctx_idx)
227{
228 struct nvme_loop_ctrl *ctrl = data;
229 struct nvme_loop_queue *queue = &ctrl->queues[0];
230
231 BUG_ON(hctx_idx != 0);
232
233 hctx->driver_data = queue;
234 return 0;
235}
236
f363b089 237static const struct blk_mq_ops nvme_loop_mq_ops = {
3a85a5de
CH
238 .queue_rq = nvme_loop_queue_rq,
239 .complete = nvme_loop_complete_rq,
3a85a5de
CH
240 .init_request = nvme_loop_init_request,
241 .init_hctx = nvme_loop_init_hctx,
3a85a5de
CH
242};
243
f363b089 244static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
3a85a5de
CH
245 .queue_rq = nvme_loop_queue_rq,
246 .complete = nvme_loop_complete_rq,
62b83b18 247 .init_request = nvme_loop_init_request,
3a85a5de 248 .init_hctx = nvme_loop_init_admin_hctx,
3a85a5de
CH
249};
250
251static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
252{
9d7fab04 253 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
e4c5d376 254 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
3a85a5de
CH
255 blk_cleanup_queue(ctrl->ctrl.admin_q);
256 blk_mq_free_tag_set(&ctrl->admin_tag_set);
3a85a5de
CH
257}
258
259static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
260{
261 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
262
263 if (list_empty(&ctrl->list))
264 goto free_ctrl;
265
266 mutex_lock(&nvme_loop_ctrl_mutex);
267 list_del(&ctrl->list);
268 mutex_unlock(&nvme_loop_ctrl_mutex);
269
270 if (nctrl->tagset) {
271 blk_cleanup_queue(ctrl->ctrl.connect_q);
272 blk_mq_free_tag_set(&ctrl->tag_set);
273 }
274 kfree(ctrl->queues);
275 nvmf_free_options(nctrl->opts);
276free_ctrl:
277 kfree(ctrl);
278}
279
945dd5ba
SG
280static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
281{
282 int i;
283
9d7fab04
SG
284 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
285 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
945dd5ba 286 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
9d7fab04 287 }
945dd5ba
SG
288}
289
290static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
291{
292 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
293 unsigned int nr_io_queues;
294 int ret, i;
295
296 nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
297 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
298 if (ret || !nr_io_queues)
299 return ret;
300
301 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
302
303 for (i = 1; i <= nr_io_queues; i++) {
304 ctrl->queues[i].ctrl = ctrl;
305 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
306 if (ret)
307 goto out_destroy_queues;
308
d858e5f0 309 ctrl->ctrl.queue_count++;
945dd5ba
SG
310 }
311
312 return 0;
313
314out_destroy_queues:
315 nvme_loop_destroy_io_queues(ctrl);
316 return ret;
317}
318
297186d6
SG
319static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
320{
321 int i, ret;
322
d858e5f0 323 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
26c68227 324 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
297186d6
SG
325 if (ret)
326 return ret;
9d7fab04 327 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
297186d6
SG
328 }
329
330 return 0;
331}
332
3a85a5de
CH
333static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
334{
335 int error;
336
337 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
338 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
38dabe21 339 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
3a85a5de
CH
340 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
341 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
342 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
343 SG_CHUNK_SIZE * sizeof(struct scatterlist);
344 ctrl->admin_tag_set.driver_data = ctrl;
345 ctrl->admin_tag_set.nr_hw_queues = 1;
346 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
86f36b9c 347 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
3a85a5de
CH
348
349 ctrl->queues[0].ctrl = ctrl;
350 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
351 if (error)
352 return error;
d858e5f0 353 ctrl->ctrl.queue_count = 1;
3a85a5de
CH
354
355 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
356 if (error)
357 goto out_free_sq;
34b6c231 358 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
3a85a5de
CH
359
360 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
361 if (IS_ERR(ctrl->ctrl.admin_q)) {
362 error = PTR_ERR(ctrl->ctrl.admin_q);
363 goto out_free_tagset;
364 }
365
366 error = nvmf_connect_admin_queue(&ctrl->ctrl);
367 if (error)
368 goto out_cleanup_queue;
369
9d7fab04
SG
370 set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
371
20d0dfe6 372 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
3a85a5de
CH
373 if (error) {
374 dev_err(ctrl->ctrl.device,
375 "prop_get NVME_REG_CAP failed\n");
376 goto out_cleanup_queue;
377 }
378
379 ctrl->ctrl.sqsize =
20d0dfe6 380 min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
3a85a5de 381
20d0dfe6 382 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
3a85a5de
CH
383 if (error)
384 goto out_cleanup_queue;
385
386 ctrl->ctrl.max_hw_sectors =
387 (NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
388
389 error = nvme_init_identify(&ctrl->ctrl);
390 if (error)
391 goto out_cleanup_queue;
392
3a85a5de
CH
393 return 0;
394
395out_cleanup_queue:
396 blk_cleanup_queue(ctrl->ctrl.admin_q);
397out_free_tagset:
398 blk_mq_free_tag_set(&ctrl->admin_tag_set);
399out_free_sq:
400 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
401 return error;
402}
403
404static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
405{
d858e5f0 406 if (ctrl->ctrl.queue_count > 1) {
3a85a5de
CH
407 nvme_stop_queues(&ctrl->ctrl);
408 blk_mq_tagset_busy_iter(&ctrl->tag_set,
409 nvme_cancel_request, &ctrl->ctrl);
945dd5ba 410 nvme_loop_destroy_io_queues(ctrl);
3a85a5de
CH
411 }
412
413 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
414 nvme_shutdown_ctrl(&ctrl->ctrl);
415
c1c0ffff 416 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
3a85a5de
CH
417 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
418 nvme_cancel_request, &ctrl->ctrl);
c1c0ffff 419 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
3a85a5de
CH
420 nvme_loop_destroy_admin_queue(ctrl);
421}
422
c5017e85 423static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
3a85a5de 424{
c5017e85 425 nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
3a85a5de
CH
426}
427
428static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
429{
430 struct nvme_loop_ctrl *ctrl;
431
432 mutex_lock(&nvme_loop_ctrl_mutex);
433 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
434 if (ctrl->ctrl.cntlid == nctrl->cntlid)
c5017e85 435 nvme_delete_ctrl(&ctrl->ctrl);
3a85a5de
CH
436 }
437 mutex_unlock(&nvme_loop_ctrl_mutex);
438}
439
440static void nvme_loop_reset_ctrl_work(struct work_struct *work)
441{
d86c4d8e
CH
442 struct nvme_loop_ctrl *ctrl =
443 container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
3a85a5de 444 bool changed;
297186d6 445 int ret;
3a85a5de 446
d09f2b45 447 nvme_stop_ctrl(&ctrl->ctrl);
3a85a5de
CH
448 nvme_loop_shutdown_ctrl(ctrl);
449
8bfc3b4c
JT
450 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
451 /* state change failure should never happen */
452 WARN_ON_ONCE(1);
453 return;
454 }
455
3a85a5de
CH
456 ret = nvme_loop_configure_admin_queue(ctrl);
457 if (ret)
458 goto out_disable;
459
945dd5ba
SG
460 ret = nvme_loop_init_io_queues(ctrl);
461 if (ret)
462 goto out_destroy_admin;
3a85a5de 463
297186d6
SG
464 ret = nvme_loop_connect_io_queues(ctrl);
465 if (ret)
466 goto out_destroy_io;
3a85a5de 467
4368c39b
SG
468 blk_mq_update_nr_hw_queues(&ctrl->tag_set,
469 ctrl->ctrl.queue_count - 1);
470
3a85a5de
CH
471 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
472 WARN_ON_ONCE(!changed);
473
d09f2b45 474 nvme_start_ctrl(&ctrl->ctrl);
3a85a5de
CH
475
476 return;
477
945dd5ba
SG
478out_destroy_io:
479 nvme_loop_destroy_io_queues(ctrl);
480out_destroy_admin:
3a85a5de
CH
481 nvme_loop_destroy_admin_queue(ctrl);
482out_disable:
483 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
3a85a5de
CH
484 nvme_uninit_ctrl(&ctrl->ctrl);
485 nvme_put_ctrl(&ctrl->ctrl);
486}
487
3a85a5de
CH
488static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
489 .name = "loop",
490 .module = THIS_MODULE,
d3d5b87d 491 .flags = NVME_F_FABRICS,
3a85a5de
CH
492 .reg_read32 = nvmf_reg_read32,
493 .reg_read64 = nvmf_reg_read64,
494 .reg_write32 = nvmf_reg_write32,
3a85a5de
CH
495 .free_ctrl = nvme_loop_free_ctrl,
496 .submit_async_event = nvme_loop_submit_async_event,
c5017e85 497 .delete_ctrl = nvme_loop_delete_ctrl_host,
fe4a9791 498 .get_address = nvmf_get_address,
3a85a5de
CH
499};
500
501static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
502{
297186d6 503 int ret;
3a85a5de 504
945dd5ba
SG
505 ret = nvme_loop_init_io_queues(ctrl);
506 if (ret)
3a85a5de
CH
507 return ret;
508
3a85a5de
CH
509 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
510 ctrl->tag_set.ops = &nvme_loop_mq_ops;
eadb7cf4 511 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
3a85a5de
CH
512 ctrl->tag_set.reserved_tags = 1; /* fabric connect */
513 ctrl->tag_set.numa_node = NUMA_NO_NODE;
514 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
515 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
516 SG_CHUNK_SIZE * sizeof(struct scatterlist);
517 ctrl->tag_set.driver_data = ctrl;
d858e5f0 518 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
3a85a5de
CH
519 ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
520 ctrl->ctrl.tagset = &ctrl->tag_set;
521
522 ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
523 if (ret)
524 goto out_destroy_queues;
525
526 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
527 if (IS_ERR(ctrl->ctrl.connect_q)) {
528 ret = PTR_ERR(ctrl->ctrl.connect_q);
529 goto out_free_tagset;
530 }
531
297186d6
SG
532 ret = nvme_loop_connect_io_queues(ctrl);
533 if (ret)
534 goto out_cleanup_connect_q;
3a85a5de
CH
535
536 return 0;
537
538out_cleanup_connect_q:
539 blk_cleanup_queue(ctrl->ctrl.connect_q);
540out_free_tagset:
541 blk_mq_free_tag_set(&ctrl->tag_set);
542out_destroy_queues:
945dd5ba 543 nvme_loop_destroy_io_queues(ctrl);
3a85a5de
CH
544 return ret;
545}
546
fe4a9791
CH
547static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
548{
549 struct nvmet_port *p, *found = NULL;
550
551 mutex_lock(&nvme_loop_ports_mutex);
552 list_for_each_entry(p, &nvme_loop_ports, entry) {
553 /* if no transport address is specified use the first port */
554 if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
555 strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
556 continue;
557 found = p;
558 break;
559 }
560 mutex_unlock(&nvme_loop_ports_mutex);
561 return found;
562}
563
3a85a5de
CH
564static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
565 struct nvmf_ctrl_options *opts)
566{
567 struct nvme_loop_ctrl *ctrl;
568 bool changed;
569 int ret;
570
571 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
572 if (!ctrl)
573 return ERR_PTR(-ENOMEM);
574 ctrl->ctrl.opts = opts;
575 INIT_LIST_HEAD(&ctrl->list);
576
d86c4d8e 577 INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
3a85a5de
CH
578
579 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
580 0 /* no quirks, we're perfect! */);
581 if (ret)
582 goto out_put_ctrl;
583
3a85a5de
CH
584 ret = -ENOMEM;
585
eadb7cf4 586 ctrl->ctrl.sqsize = opts->queue_size - 1;
3a85a5de 587 ctrl->ctrl.kato = opts->kato;
fe4a9791 588 ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
3a85a5de
CH
589
590 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
591 GFP_KERNEL);
592 if (!ctrl->queues)
593 goto out_uninit_ctrl;
594
595 ret = nvme_loop_configure_admin_queue(ctrl);
596 if (ret)
597 goto out_free_queues;
598
599 if (opts->queue_size > ctrl->ctrl.maxcmd) {
600 /* warn if maxcmd is lower than queue_size */
601 dev_warn(ctrl->ctrl.device,
602 "queue_size %zu > ctrl maxcmd %u, clamping down\n",
603 opts->queue_size, ctrl->ctrl.maxcmd);
604 opts->queue_size = ctrl->ctrl.maxcmd;
605 }
606
607 if (opts->nr_io_queues) {
608 ret = nvme_loop_create_io_queues(ctrl);
609 if (ret)
610 goto out_remove_admin_queue;
611 }
612
613 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
614
615 dev_info(ctrl->ctrl.device,
616 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
617
d22524a4 618 nvme_get_ctrl(&ctrl->ctrl);
3a85a5de
CH
619
620 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
621 WARN_ON_ONCE(!changed);
622
623 mutex_lock(&nvme_loop_ctrl_mutex);
624 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
625 mutex_unlock(&nvme_loop_ctrl_mutex);
626
d09f2b45 627 nvme_start_ctrl(&ctrl->ctrl);
3a85a5de
CH
628
629 return &ctrl->ctrl;
630
631out_remove_admin_queue:
632 nvme_loop_destroy_admin_queue(ctrl);
633out_free_queues:
634 kfree(ctrl->queues);
635out_uninit_ctrl:
636 nvme_uninit_ctrl(&ctrl->ctrl);
637out_put_ctrl:
638 nvme_put_ctrl(&ctrl->ctrl);
639 if (ret > 0)
640 ret = -EIO;
641 return ERR_PTR(ret);
642}
643
644static int nvme_loop_add_port(struct nvmet_port *port)
645{
fe4a9791
CH
646 mutex_lock(&nvme_loop_ports_mutex);
647 list_add_tail(&port->entry, &nvme_loop_ports);
648 mutex_unlock(&nvme_loop_ports_mutex);
3a85a5de
CH
649 return 0;
650}
651
652static void nvme_loop_remove_port(struct nvmet_port *port)
653{
fe4a9791
CH
654 mutex_lock(&nvme_loop_ports_mutex);
655 list_del_init(&port->entry);
656 mutex_unlock(&nvme_loop_ports_mutex);
3a85a5de
CH
657}
658
e929f06d 659static const struct nvmet_fabrics_ops nvme_loop_ops = {
3a85a5de
CH
660 .owner = THIS_MODULE,
661 .type = NVMF_TRTYPE_LOOP,
662 .add_port = nvme_loop_add_port,
663 .remove_port = nvme_loop_remove_port,
664 .queue_response = nvme_loop_queue_response,
665 .delete_ctrl = nvme_loop_delete_ctrl,
666};
667
668static struct nvmf_transport_ops nvme_loop_transport = {
669 .name = "loop",
0de5cd36 670 .module = THIS_MODULE,
3a85a5de 671 .create_ctrl = nvme_loop_create_ctrl,
fe4a9791 672 .allowed_opts = NVMF_OPT_TRADDR,
3a85a5de
CH
673};
674
675static int __init nvme_loop_init_module(void)
676{
677 int ret;
678
679 ret = nvmet_register_transport(&nvme_loop_ops);
680 if (ret)
681 return ret;
d19eef02
SG
682
683 ret = nvmf_register_transport(&nvme_loop_transport);
684 if (ret)
685 nvmet_unregister_transport(&nvme_loop_ops);
686
687 return ret;
3a85a5de
CH
688}
689
690static void __exit nvme_loop_cleanup_module(void)
691{
692 struct nvme_loop_ctrl *ctrl, *next;
693
694 nvmf_unregister_transport(&nvme_loop_transport);
695 nvmet_unregister_transport(&nvme_loop_ops);
696
697 mutex_lock(&nvme_loop_ctrl_mutex);
698 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
c5017e85 699 nvme_delete_ctrl(&ctrl->ctrl);
3a85a5de
CH
700 mutex_unlock(&nvme_loop_ctrl_mutex);
701
b227c59b 702 flush_workqueue(nvme_delete_wq);
3a85a5de
CH
703}
704
705module_init(nvme_loop_init_module);
706module_exit(nvme_loop_cleanup_module);
707
708MODULE_LICENSE("GPL v2");
709MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */