nvmet: introduce new max queue size configuration entry
[linux-2.6-block.git] / drivers / nvme / target / core.c
CommitLineData
77141dc6 1// SPDX-License-Identifier: GPL-2.0
a07b4970
CH
2/*
3 * Common code for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
a07b4970
CH
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
28b89118 8#include <linux/random.h>
b2d09103 9#include <linux/rculist.h>
c6925093 10#include <linux/pci-p2pdma.h>
a5dffbb6 11#include <linux/scatterlist.h>
b2d09103 12
68c5444c
AM
13#include <generated/utsrelease.h>
14
a5448fdc
MI
15#define CREATE_TRACE_POINTS
16#include "trace.h"
17
a07b4970
CH
18#include "nvmet.h"
19
fa8f9ac4 20struct kmem_cache *nvmet_bvec_cache;
55eb942e 21struct workqueue_struct *buffered_io_wq;
aaf2e048 22struct workqueue_struct *zbd_wq;
e929f06d 23static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
15fbad96 24static DEFINE_IDA(cntlid_ida);
a07b4970 25
8832cf92
SG
26struct workqueue_struct *nvmet_wq;
27EXPORT_SYMBOL_GPL(nvmet_wq);
28
a07b4970
CH
29/*
30 * This read/write semaphore is used to synchronize access to configuration
31 * information on a target system that will result in discovery log page
32 * information change for at least one host.
33 * The full list of resources to protected by this semaphore is:
34 *
35 * - subsystems list
36 * - per-subsystem allowed hosts list
37 * - allow_any_host subsystem attribute
38 * - nvmet_genctr
39 * - the nvmet_transports array
40 *
41 * When updating any of those lists/structures write lock should be obtained,
42 * while when reading (popolating discovery log page or checking host-subsystem
43 * link) read lock is obtained to allow concurrent reads.
44 */
45DECLARE_RWSEM(nvmet_config_sem);
46
72efd25d
CH
47u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
48u64 nvmet_ana_chgcnt;
49DECLARE_RWSEM(nvmet_ana_sem);
50
c6aa3542
CK
51inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
52{
c6aa3542 53 switch (errno) {
cfc1a1af 54 case 0:
7860569a 55 return NVME_SC_SUCCESS;
c6aa3542
CK
56 case -ENOSPC:
57 req->error_loc = offsetof(struct nvme_rw_command, length);
7860569a 58 return NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
c6aa3542
CK
59 case -EREMOTEIO:
60 req->error_loc = offsetof(struct nvme_rw_command, slba);
7860569a 61 return NVME_SC_LBA_RANGE | NVME_SC_DNR;
c6aa3542
CK
62 case -EOPNOTSUPP:
63 req->error_loc = offsetof(struct nvme_common_command, opcode);
64 switch (req->cmd->common.opcode) {
65 case nvme_cmd_dsm:
66 case nvme_cmd_write_zeroes:
7860569a 67 return NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
c6aa3542 68 default:
7860569a 69 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
c6aa3542
CK
70 }
71 break;
72 case -ENODATA:
73 req->error_loc = offsetof(struct nvme_rw_command, nsid);
7860569a 74 return NVME_SC_ACCESS_DENIED;
c6aa3542 75 case -EIO:
df561f66 76 fallthrough;
c6aa3542
CK
77 default:
78 req->error_loc = offsetof(struct nvme_common_command, opcode);
7860569a 79 return NVME_SC_INTERNAL | NVME_SC_DNR;
c6aa3542 80 }
c6aa3542
CK
81}
82
d81d57cf
CK
83u16 nvmet_report_invalid_opcode(struct nvmet_req *req)
84{
85 pr_debug("unhandled cmd %d on qid %d\n", req->cmd->common.opcode,
86 req->sq->qid);
87
88 req->error_loc = offsetof(struct nvme_common_command, opcode);
89 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
90}
91
a07b4970
CH
92static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
93 const char *subsysnqn);
94
95u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
96 size_t len)
97{
e81446af
CK
98 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
99 req->error_loc = offsetof(struct nvme_common_command, dptr);
a07b4970 100 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
e81446af 101 }
a07b4970
CH
102 return 0;
103}
104
105u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
106{
e81446af
CK
107 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
108 req->error_loc = offsetof(struct nvme_common_command, dptr);
a07b4970 109 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
e81446af 110 }
a07b4970
CH
111 return 0;
112}
113
c7759fff
CH
114u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
115{
e81446af
CK
116 if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
117 req->error_loc = offsetof(struct nvme_common_command, dptr);
c7759fff 118 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
e81446af 119 }
c7759fff
CH
120 return 0;
121}
122
245067e3 123static u32 nvmet_max_nsid(struct nvmet_subsys *subsys)
ba2dec35 124{
7774e77e
CK
125 struct nvmet_ns *cur;
126 unsigned long idx;
245067e3 127 u32 nsid = 0;
ba2dec35 128
7774e77e
CK
129 xa_for_each(&subsys->namespaces, idx, cur)
130 nsid = cur->nsid;
ba2dec35 131
7774e77e 132 return nsid;
ba2dec35
RS
133}
134
a07b4970
CH
135static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
136{
137 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
138}
139
819f7b88
CK
140static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
141{
819f7b88
CK
142 struct nvmet_req *req;
143
144 mutex_lock(&ctrl->lock);
145 while (ctrl->nr_async_event_cmds) {
146 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
147 mutex_unlock(&ctrl->lock);
8bb6cb9b 148 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
819f7b88
CK
149 mutex_lock(&ctrl->lock);
150 }
151 mutex_unlock(&ctrl->lock);
152}
153
154static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
a07b4970 155{
a07b4970
CH
156 struct nvmet_async_event *aen;
157 struct nvmet_req *req;
158
1cdf9f76
DM
159 mutex_lock(&ctrl->lock);
160 while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
161 aen = list_first_entry(&ctrl->async_events,
162 struct nvmet_async_event, entry);
a07b4970 163 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
819f7b88 164 nvmet_set_result(req, nvmet_async_event_result(aen));
a07b4970
CH
165
166 list_del(&aen->entry);
167 kfree(aen);
168
169 mutex_unlock(&ctrl->lock);
696ece75 170 trace_nvmet_async_event(ctrl, req->cqe->result.u32);
819f7b88 171 nvmet_req_complete(req, 0);
1cdf9f76 172 mutex_lock(&ctrl->lock);
a07b4970 173 }
1cdf9f76 174 mutex_unlock(&ctrl->lock);
a07b4970
CH
175}
176
0f5be6a4
DW
177static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
178{
64f5e9cd 179 struct nvmet_async_event *aen, *tmp;
0f5be6a4
DW
180
181 mutex_lock(&ctrl->lock);
64f5e9cd
SG
182 list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
183 list_del(&aen->entry);
184 kfree(aen);
0f5be6a4
DW
185 }
186 mutex_unlock(&ctrl->lock);
187}
188
189static void nvmet_async_event_work(struct work_struct *work)
190{
191 struct nvmet_ctrl *ctrl =
192 container_of(work, struct nvmet_ctrl, async_event_work);
193
819f7b88 194 nvmet_async_events_process(ctrl);
0f5be6a4
DW
195}
196
b662a078 197void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
a07b4970
CH
198 u8 event_info, u8 log_page)
199{
200 struct nvmet_async_event *aen;
201
202 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
203 if (!aen)
204 return;
205
206 aen->event_type = event_type;
207 aen->event_info = event_info;
208 aen->log_page = log_page;
209
210 mutex_lock(&ctrl->lock);
211 list_add_tail(&aen->entry, &ctrl->async_events);
212 mutex_unlock(&ctrl->lock);
213
8832cf92 214 queue_work(nvmet_wq, &ctrl->async_event_work);
a07b4970
CH
215}
216
c16734ea
CH
217static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
218{
219 u32 i;
220
221 mutex_lock(&ctrl->lock);
222 if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
223 goto out_unlock;
224
225 for (i = 0; i < ctrl->nr_changed_ns; i++) {
226 if (ctrl->changed_ns_list[i] == nsid)
227 goto out_unlock;
228 }
229
230 if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
231 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
232 ctrl->nr_changed_ns = U32_MAX;
233 goto out_unlock;
234 }
235
236 ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
237out_unlock:
238 mutex_unlock(&ctrl->lock);
239}
240
dedf0be5 241void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
c16734ea
CH
242{
243 struct nvmet_ctrl *ctrl;
244
013a63ef
MG
245 lockdep_assert_held(&subsys->lock);
246
c16734ea
CH
247 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
248 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
7114ddeb 249 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
c86b8f7b 250 continue;
41353fba 251 nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
c16734ea
CH
252 NVME_AER_NOTICE_NS_CHANGED,
253 NVME_LOG_CHANGED_NS);
254 }
255}
256
62ac0d32
CH
257void nvmet_send_ana_event(struct nvmet_subsys *subsys,
258 struct nvmet_port *port)
259{
260 struct nvmet_ctrl *ctrl;
261
262 mutex_lock(&subsys->lock);
263 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
264 if (port && ctrl->port != port)
265 continue;
7114ddeb 266 if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
62ac0d32 267 continue;
41353fba 268 nvmet_add_async_event(ctrl, NVME_AER_NOTICE,
62ac0d32
CH
269 NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
270 }
271 mutex_unlock(&subsys->lock);
272}
273
274void nvmet_port_send_ana_event(struct nvmet_port *port)
275{
276 struct nvmet_subsys_link *p;
277
278 down_read(&nvmet_config_sem);
279 list_for_each_entry(p, &port->subsystems, entry)
280 nvmet_send_ana_event(p->subsys, port);
281 up_read(&nvmet_config_sem);
282}
283
e929f06d 284int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
a07b4970
CH
285{
286 int ret = 0;
287
288 down_write(&nvmet_config_sem);
289 if (nvmet_transports[ops->type])
290 ret = -EINVAL;
291 else
292 nvmet_transports[ops->type] = ops;
293 up_write(&nvmet_config_sem);
294
295 return ret;
296}
297EXPORT_SYMBOL_GPL(nvmet_register_transport);
298
e929f06d 299void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
a07b4970
CH
300{
301 down_write(&nvmet_config_sem);
302 nvmet_transports[ops->type] = NULL;
303 up_write(&nvmet_config_sem);
304}
305EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
306
3aed8673
LG
307void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
308{
309 struct nvmet_ctrl *ctrl;
310
311 mutex_lock(&subsys->lock);
312 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
313 if (ctrl->port == port)
314 ctrl->ops->delete_ctrl(ctrl);
315 }
316 mutex_unlock(&subsys->lock);
317}
318
a07b4970
CH
319int nvmet_enable_port(struct nvmet_port *port)
320{
e929f06d 321 const struct nvmet_fabrics_ops *ops;
a07b4970
CH
322 int ret;
323
324 lockdep_assert_held(&nvmet_config_sem);
325
326 ops = nvmet_transports[port->disc_addr.trtype];
327 if (!ops) {
328 up_write(&nvmet_config_sem);
329 request_module("nvmet-transport-%d", port->disc_addr.trtype);
330 down_write(&nvmet_config_sem);
331 ops = nvmet_transports[port->disc_addr.trtype];
332 if (!ops) {
333 pr_err("transport type %d not supported\n",
334 port->disc_addr.trtype);
335 return -EINVAL;
336 }
337 }
338
339 if (!try_module_get(ops->owner))
340 return -EINVAL;
341
ea52ac1c
IR
342 /*
343 * If the user requested PI support and the transport isn't pi capable,
344 * don't enable the port.
345 */
6fa350f7 346 if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
ea52ac1c
IR
347 pr_err("T10-PI is not supported by transport type %d\n",
348 port->disc_addr.trtype);
349 ret = -EINVAL;
350 goto out_put;
a07b4970
CH
351 }
352
ea52ac1c
IR
353 ret = ops->add_port(port);
354 if (ret)
355 goto out_put;
356
0d5ee2b2
SW
357 /* If the transport didn't set inline_data_size, then disable it. */
358 if (port->inline_data_size < 0)
359 port->inline_data_size = 0;
360
ca2b221d
MG
361 /*
362 * If the transport didn't set the max_queue_size properly, then clamp
363 * it to the target limits. Also set default values in case the
364 * transport didn't set it at all.
365 */
366 if (port->max_queue_size < 0)
367 port->max_queue_size = NVMET_MAX_QUEUE_SIZE;
368 else
369 port->max_queue_size = clamp_t(int, port->max_queue_size,
370 NVMET_MIN_QUEUE_SIZE,
371 NVMET_MAX_QUEUE_SIZE);
372
a07b4970 373 port->enabled = true;
9d09dd8d 374 port->tr_ops = ops;
a07b4970 375 return 0;
ea52ac1c
IR
376
377out_put:
378 module_put(ops->owner);
379 return ret;
a07b4970
CH
380}
381
382void nvmet_disable_port(struct nvmet_port *port)
383{
e929f06d 384 const struct nvmet_fabrics_ops *ops;
a07b4970
CH
385
386 lockdep_assert_held(&nvmet_config_sem);
387
388 port->enabled = false;
9d09dd8d 389 port->tr_ops = NULL;
a07b4970
CH
390
391 ops = nvmet_transports[port->disc_addr.trtype];
392 ops->remove_port(port);
393 module_put(ops->owner);
394}
395
396static void nvmet_keep_alive_timer(struct work_struct *work)
397{
398 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
399 struct nvmet_ctrl, ka_work);
aaeadd70 400 bool reset_tbkas = ctrl->reset_tbkas;
c09305ae 401
aaeadd70
SG
402 ctrl->reset_tbkas = false;
403 if (reset_tbkas) {
c09305ae
SG
404 pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
405 ctrl->cntlid);
8832cf92 406 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
c09305ae
SG
407 return;
408 }
a07b4970
CH
409
410 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
411 ctrl->cntlid, ctrl->kato);
412
23a8ed4a 413 nvmet_ctrl_fatal_error(ctrl);
a07b4970
CH
414}
415
4e683c48 416void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
a07b4970 417{
0d3b6a8d
AE
418 if (unlikely(ctrl->kato == 0))
419 return;
420
a07b4970
CH
421 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
422 ctrl->cntlid, ctrl->kato);
423
8832cf92 424 queue_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
a07b4970
CH
425}
426
4e683c48 427void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
a07b4970 428{
0d3b6a8d
AE
429 if (unlikely(ctrl->kato == 0))
430 return;
431
a07b4970
CH
432 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
433
434 cancel_delayed_work_sync(&ctrl->ka_work);
435}
436
3a1f7c79 437u16 nvmet_req_find_ns(struct nvmet_req *req)
a07b4970 438{
3a1f7c79 439 u32 nsid = le32_to_cpu(req->cmd->common.nsid);
a07b4970 440
20c2c3bb 441 req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
3a1f7c79
CK
442 if (unlikely(!req->ns)) {
443 req->error_loc = offsetof(struct nvme_common_command, nsid);
444 return NVME_SC_INVALID_NS | NVME_SC_DNR;
445 }
a07b4970 446
3a1f7c79
CK
447 percpu_ref_get(&req->ns->ref);
448 return NVME_SC_SUCCESS;
a07b4970
CH
449}
450
451static void nvmet_destroy_namespace(struct percpu_ref *ref)
452{
453 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
454
455 complete(&ns->disable_done);
456}
457
458void nvmet_put_namespace(struct nvmet_ns *ns)
459{
460 percpu_ref_put(&ns->ref);
461}
462
d5eff33e
CK
463static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
464{
465 nvmet_bdev_ns_disable(ns);
466 nvmet_file_ns_disable(ns);
467}
468
c6925093
LG
469static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
470{
471 int ret;
472 struct pci_dev *p2p_dev;
473
474 if (!ns->use_p2pmem)
475 return 0;
476
477 if (!ns->bdev) {
478 pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
479 return -EINVAL;
480 }
481
e556f6ba 482 if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
c6925093
LG
483 pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
484 ns->device_path);
485 return -EINVAL;
486 }
487
488 if (ns->p2p_dev) {
489 ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
490 if (ret < 0)
491 return -EINVAL;
492 } else {
493 /*
494 * Right now we just check that there is p2pmem available so
495 * we can report an error to the user right away if there
496 * is not. We'll find the actual device to use once we
497 * setup the controller when the port's device is available.
498 */
499
500 p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
501 if (!p2p_dev) {
502 pr_err("no peer-to-peer memory is available for %s\n",
503 ns->device_path);
504 return -EINVAL;
505 }
506
507 pci_dev_put(p2p_dev);
508 }
509
510 return 0;
511}
512
513/*
514 * Note: ctrl->subsys->lock should be held when calling this function
515 */
516static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
517 struct nvmet_ns *ns)
518{
519 struct device *clients[2];
520 struct pci_dev *p2p_dev;
521 int ret;
522
21d3bbdd 523 if (!ctrl->p2p_client || !ns->use_p2pmem)
c6925093
LG
524 return;
525
526 if (ns->p2p_dev) {
527 ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
528 if (ret < 0)
529 return;
530
531 p2p_dev = pci_dev_get(ns->p2p_dev);
532 } else {
533 clients[0] = ctrl->p2p_client;
534 clients[1] = nvmet_ns_dev(ns);
535
536 p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
537 if (!p2p_dev) {
538 pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
539 dev_name(ctrl->p2p_client), ns->device_path);
540 return;
541 }
542 }
543
544 ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
545 if (ret < 0)
546 pci_dev_put(p2p_dev);
547
548 pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
549 ns->nsid);
550}
551
da783733 552bool nvmet_ns_revalidate(struct nvmet_ns *ns)
463c5fab 553{
de124f42
CK
554 loff_t oldsize = ns->size;
555
463c5fab
CK
556 if (ns->bdev)
557 nvmet_bdev_ns_revalidate(ns);
558 else
559 nvmet_file_ns_revalidate(ns);
de124f42 560
da783733 561 return oldsize != ns->size;
463c5fab
CK
562}
563
a07b4970
CH
564int nvmet_ns_enable(struct nvmet_ns *ns)
565{
566 struct nvmet_subsys *subsys = ns->subsys;
c6925093 567 struct nvmet_ctrl *ctrl;
793c7cfc 568 int ret;
a07b4970
CH
569
570 mutex_lock(&subsys->lock);
793c7cfc 571 ret = 0;
ba76af67 572
ab7a2737 573 if (nvmet_is_passthru_subsys(subsys)) {
ba76af67
LG
574 pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
575 goto out_unlock;
576 }
577
e4fcf07c 578 if (ns->enabled)
a07b4970
CH
579 goto out_unlock;
580
e84c2091
MG
581 ret = -EMFILE;
582 if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
583 goto out_unlock;
584
d5eff33e 585 ret = nvmet_bdev_ns_enable(ns);
405a7519 586 if (ret == -ENOTBLK)
d5eff33e
CK
587 ret = nvmet_file_ns_enable(ns);
588 if (ret)
a07b4970 589 goto out_unlock;
a07b4970 590
c6925093
LG
591 ret = nvmet_p2pmem_ns_enable(ns);
592 if (ret)
a536b497 593 goto out_dev_disable;
c6925093
LG
594
595 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
596 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
597
a07b4970
CH
598 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
599 0, GFP_KERNEL);
600 if (ret)
d5eff33e 601 goto out_dev_put;
a07b4970
CH
602
603 if (ns->nsid > subsys->max_nsid)
604 subsys->max_nsid = ns->nsid;
605
7774e77e
CK
606 ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
607 if (ret)
608 goto out_restore_subsys_maxnsid;
a07b4970 609
793c7cfc 610 subsys->nr_namespaces++;
a07b4970 611
c16734ea 612 nvmet_ns_changed(subsys, ns->nsid);
e4fcf07c 613 ns->enabled = true;
a07b4970
CH
614 ret = 0;
615out_unlock:
616 mutex_unlock(&subsys->lock);
617 return ret;
7774e77e
CK
618
619out_restore_subsys_maxnsid:
620 subsys->max_nsid = nvmet_max_nsid(subsys);
621 percpu_ref_exit(&ns->ref);
d5eff33e 622out_dev_put:
c6925093
LG
623 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
624 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
a536b497 625out_dev_disable:
d5eff33e 626 nvmet_ns_dev_disable(ns);
a07b4970
CH
627 goto out_unlock;
628}
629
630void nvmet_ns_disable(struct nvmet_ns *ns)
631{
632 struct nvmet_subsys *subsys = ns->subsys;
c6925093 633 struct nvmet_ctrl *ctrl;
a07b4970
CH
634
635 mutex_lock(&subsys->lock);
e4fcf07c
SA
636 if (!ns->enabled)
637 goto out_unlock;
638
639 ns->enabled = false;
7774e77e 640 xa_erase(&ns->subsys->namespaces, ns->nsid);
ba2dec35
RS
641 if (ns->nsid == subsys->max_nsid)
642 subsys->max_nsid = nvmet_max_nsid(subsys);
c6925093
LG
643
644 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
645 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
646
a07b4970
CH
647 mutex_unlock(&subsys->lock);
648
649 /*
650 * Now that we removed the namespaces from the lookup list, we
651 * can kill the per_cpu ref and wait for any remaining references
652 * to be dropped, as well as a RCU grace period for anyone only
653 * using the namepace under rcu_read_lock(). Note that we can't
654 * use call_rcu here as we need to ensure the namespaces have
655 * been fully destroyed before unloading the module.
656 */
657 percpu_ref_kill(&ns->ref);
658 synchronize_rcu();
659 wait_for_completion(&ns->disable_done);
660 percpu_ref_exit(&ns->ref);
661
662 mutex_lock(&subsys->lock);
c6925093 663
793c7cfc 664 subsys->nr_namespaces--;
c16734ea 665 nvmet_ns_changed(subsys, ns->nsid);
d5eff33e 666 nvmet_ns_dev_disable(ns);
e4fcf07c 667out_unlock:
a07b4970
CH
668 mutex_unlock(&subsys->lock);
669}
670
671void nvmet_ns_free(struct nvmet_ns *ns)
672{
673 nvmet_ns_disable(ns);
674
72efd25d
CH
675 down_write(&nvmet_ana_sem);
676 nvmet_ana_group_enabled[ns->anagrpid]--;
677 up_write(&nvmet_ana_sem);
678
a07b4970
CH
679 kfree(ns->device_path);
680 kfree(ns);
681}
682
683struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
684{
685 struct nvmet_ns *ns;
686
687 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
688 if (!ns)
689 return NULL;
690
a07b4970
CH
691 init_completion(&ns->disable_done);
692
693 ns->nsid = nsid;
694 ns->subsys = subsys;
72efd25d
CH
695
696 down_write(&nvmet_ana_sem);
697 ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
698 nvmet_ana_group_enabled[ns->anagrpid]++;
699 up_write(&nvmet_ana_sem);
700
637dc0f3 701 uuid_gen(&ns->uuid);
55eb942e 702 ns->buffered_io = false;
ab5d0b38 703 ns->csi = NVME_CSI_NVM;
a07b4970
CH
704
705 return ns;
706}
707
e6a622fd 708static void nvmet_update_sq_head(struct nvmet_req *req)
a07b4970 709{
f9cf2a64 710 if (req->sq->size) {
e6a622fd
SG
711 u32 old_sqhd, new_sqhd;
712
bbf5410b 713 old_sqhd = READ_ONCE(req->sq->sqhd);
f9cf2a64 714 do {
f9cf2a64 715 new_sqhd = (old_sqhd + 1) % req->sq->size;
bbf5410b 716 } while (!try_cmpxchg(&req->sq->sqhd, &old_sqhd, new_sqhd));
f9cf2a64 717 }
fc6c9730 718 req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
e6a622fd
SG
719}
720
76574f37
CK
721static void nvmet_set_error(struct nvmet_req *req, u16 status)
722{
723 struct nvmet_ctrl *ctrl = req->sq->ctrl;
724 struct nvme_error_slot *new_error_slot;
725 unsigned long flags;
726
fc6c9730 727 req->cqe->status = cpu_to_le16(status << 1);
76574f37 728
5698b805 729 if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
76574f37
CK
730 return;
731
732 spin_lock_irqsave(&ctrl->error_lock, flags);
733 ctrl->err_counter++;
734 new_error_slot =
735 &ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
736
737 new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
738 new_error_slot->sqid = cpu_to_le16(req->sq->qid);
739 new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
740 new_error_slot->status_field = cpu_to_le16(status << 1);
741 new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
742 new_error_slot->lba = cpu_to_le64(req->error_slba);
743 new_error_slot->nsid = req->cmd->common.nsid;
744 spin_unlock_irqrestore(&ctrl->error_lock, flags);
745
746 /* set the more bit for this request */
fc6c9730 747 req->cqe->status |= cpu_to_le16(1 << 14);
76574f37
CK
748}
749
e6a622fd
SG
750static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
751{
6a02a61e
BVA
752 struct nvmet_ns *ns = req->ns;
753
e6a622fd
SG
754 if (!req->sq->sqhd_disabled)
755 nvmet_update_sq_head(req);
fc6c9730
MG
756 req->cqe->sq_id = cpu_to_le16(req->sq->qid);
757 req->cqe->command_id = req->cmd->common.command_id;
76574f37 758
cb019da3 759 if (unlikely(status))
76574f37 760 nvmet_set_error(req, status);
a5448fdc
MI
761
762 trace_nvmet_req_complete(req);
763
a07b4970 764 req->ops->queue_response(req);
6a02a61e
BVA
765 if (ns)
766 nvmet_put_namespace(ns);
a07b4970
CH
767}
768
769void nvmet_req_complete(struct nvmet_req *req, u16 status)
770{
6173a77b
DLM
771 struct nvmet_sq *sq = req->sq;
772
a07b4970 773 __nvmet_req_complete(req, status);
6173a77b 774 percpu_ref_put(&sq->ref);
a07b4970
CH
775}
776EXPORT_SYMBOL_GPL(nvmet_req_complete);
777
778void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
779 u16 qid, u16 size)
780{
781 cq->qid = qid;
782 cq->size = size;
a07b4970
CH
783}
784
785void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
786 u16 qid, u16 size)
787{
bb1cc747 788 sq->sqhd = 0;
a07b4970
CH
789 sq->qid = qid;
790 sq->size = size;
791
792 ctrl->sqs[qid] = sq;
793}
794
427242ce
SG
795static void nvmet_confirm_sq(struct percpu_ref *ref)
796{
797 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
798
799 complete(&sq->confirm_done);
800}
801
a07b4970
CH
802void nvmet_sq_destroy(struct nvmet_sq *sq)
803{
0f5be6a4
DW
804 struct nvmet_ctrl *ctrl = sq->ctrl;
805
a07b4970
CH
806 /*
807 * If this is the admin queue, complete all AERs so that our
808 * queue doesn't have outstanding requests on it.
809 */
64f5e9cd 810 if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
819f7b88 811 nvmet_async_events_failall(ctrl);
427242ce
SG
812 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
813 wait_for_completion(&sq->confirm_done);
a07b4970
CH
814 wait_for_completion(&sq->free_done);
815 percpu_ref_exit(&sq->ref);
db1312dd 816 nvmet_auth_sq_free(sq);
a07b4970 817
0f5be6a4 818 if (ctrl) {
aaeadd70
SG
819 /*
820 * The teardown flow may take some time, and the host may not
821 * send us keep-alive during this period, hence reset the
822 * traffic based keep-alive timer so we don't trigger a
823 * controller teardown as a result of a keep-alive expiration.
824 */
825 ctrl->reset_tbkas = true;
b71df126 826 sq->ctrl->sqs[sq->qid] = NULL;
0f5be6a4 827 nvmet_ctrl_put(ctrl);
a07b4970
CH
828 sq->ctrl = NULL; /* allows reusing the queue later */
829 }
830}
831EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
832
833static void nvmet_sq_free(struct percpu_ref *ref)
834{
835 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
836
837 complete(&sq->free_done);
838}
839
840int nvmet_sq_init(struct nvmet_sq *sq)
841{
842 int ret;
843
844 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
845 if (ret) {
846 pr_err("percpu_ref init failed!\n");
847 return ret;
848 }
849 init_completion(&sq->free_done);
427242ce 850 init_completion(&sq->confirm_done);
1befd944 851 nvmet_auth_sq_init(sq);
a07b4970
CH
852
853 return 0;
854}
855EXPORT_SYMBOL_GPL(nvmet_sq_init);
856
72efd25d
CH
857static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
858 struct nvmet_ns *ns)
859{
860 enum nvme_ana_state state = port->ana_state[ns->anagrpid];
861
862 if (unlikely(state == NVME_ANA_INACCESSIBLE))
863 return NVME_SC_ANA_INACCESSIBLE;
864 if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
865 return NVME_SC_ANA_PERSISTENT_LOSS;
866 if (unlikely(state == NVME_ANA_CHANGE))
867 return NVME_SC_ANA_TRANSITION;
868 return 0;
869}
870
dedf0be5
CK
871static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
872{
873 if (unlikely(req->ns->readonly)) {
874 switch (req->cmd->common.opcode) {
875 case nvme_cmd_read:
876 case nvme_cmd_flush:
877 break;
878 default:
879 return NVME_SC_NS_WRITE_PROTECTED;
880 }
881 }
882
883 return 0;
884}
885
d5eff33e
CK
886static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
887{
6490c9ed 888 struct nvme_command *cmd = req->cmd;
d5eff33e
CK
889 u16 ret;
890
6490c9ed
HR
891 if (nvme_is_fabrics(cmd))
892 return nvmet_parse_fabrics_io_cmd(req);
893
db1312dd
HR
894 if (unlikely(!nvmet_check_auth_status(req)))
895 return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
896
7798df6f 897 ret = nvmet_check_ctrl_status(req);
d5eff33e
CK
898 if (unlikely(ret))
899 return ret;
900
ab7a2737 901 if (nvmet_is_passthru_req(req))
c1fef73f
LG
902 return nvmet_parse_passthru_io_cmd(req);
903
3a1f7c79
CK
904 ret = nvmet_req_find_ns(req);
905 if (unlikely(ret))
906 return ret;
907
72efd25d 908 ret = nvmet_check_ana_state(req->port, req->ns);
e81446af
CK
909 if (unlikely(ret)) {
910 req->error_loc = offsetof(struct nvme_common_command, nsid);
dedf0be5 911 return ret;
e81446af 912 }
dedf0be5 913 ret = nvmet_io_cmd_check_access(req);
e81446af
CK
914 if (unlikely(ret)) {
915 req->error_loc = offsetof(struct nvme_common_command, nsid);
72efd25d 916 return ret;
e81446af 917 }
d5eff33e 918
ab5d0b38
CK
919 switch (req->ns->csi) {
920 case NVME_CSI_NVM:
921 if (req->ns->file)
922 return nvmet_file_parse_io_cmd(req);
923 return nvmet_bdev_parse_io_cmd(req);
aaf2e048
CK
924 case NVME_CSI_ZNS:
925 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED))
926 return nvmet_bdev_zns_parse_io_cmd(req);
927 return NVME_SC_INVALID_IO_CMD_SET;
ab5d0b38
CK
928 default:
929 return NVME_SC_INVALID_IO_CMD_SET;
930 }
d5eff33e
CK
931}
932
a07b4970 933bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
e929f06d 934 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
a07b4970
CH
935{
936 u8 flags = req->cmd->common.flags;
937 u16 status;
938
939 req->cq = cq;
940 req->sq = sq;
941 req->ops = ops;
942 req->sg = NULL;
c6e3f133 943 req->metadata_sg = NULL;
a07b4970 944 req->sg_cnt = 0;
c6e3f133 945 req->metadata_sg_cnt = 0;
5e62d5c9 946 req->transfer_len = 0;
c6e3f133 947 req->metadata_len = 0;
fc6c9730
MG
948 req->cqe->status = 0;
949 req->cqe->sq_head = 0;
423b4487 950 req->ns = NULL;
5698b805 951 req->error_loc = NVMET_NO_ERROR_LOC;
e4a97625 952 req->error_slba = 0;
a07b4970
CH
953
954 /* no support for fused commands yet */
955 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
e81446af 956 req->error_loc = offsetof(struct nvme_common_command, flags);
a07b4970
CH
957 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
958 goto fail;
959 }
960
bffd2b61
MG
961 /*
962 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
963 * contains an address of a single contiguous physical buffer that is
964 * byte aligned.
965 */
966 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
e81446af 967 req->error_loc = offsetof(struct nvme_common_command, flags);
a07b4970
CH
968 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
969 goto fail;
970 }
971
972 if (unlikely(!req->sq->ctrl))
d84dd8cd 973 /* will return an error for any non-connect command: */
a07b4970
CH
974 status = nvmet_parse_connect_cmd(req);
975 else if (likely(req->sq->qid != 0))
976 status = nvmet_parse_io_cmd(req);
a07b4970
CH
977 else
978 status = nvmet_parse_admin_cmd(req);
979
980 if (status)
981 goto fail;
982
3c3751f2
CK
983 trace_nvmet_req_init(req, req->cmd);
984
a07b4970
CH
985 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
986 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
987 goto fail;
988 }
989
c09305ae 990 if (sq->ctrl)
aaeadd70 991 sq->ctrl->reset_tbkas = true;
c09305ae 992
a07b4970
CH
993 return true;
994
995fail:
996 __nvmet_req_complete(req, status);
997 return false;
998}
999EXPORT_SYMBOL_GPL(nvmet_req_init);
1000
549f01ae
VI
1001void nvmet_req_uninit(struct nvmet_req *req)
1002{
1003 percpu_ref_put(&req->sq->ref);
423b4487
SG
1004 if (req->ns)
1005 nvmet_put_namespace(req->ns);
549f01ae
VI
1006}
1007EXPORT_SYMBOL_GPL(nvmet_req_uninit);
1008
136cc1ff 1009bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
5e62d5c9 1010{
136cc1ff 1011 if (unlikely(len != req->transfer_len)) {
e81446af 1012 req->error_loc = offsetof(struct nvme_common_command, dptr);
5e62d5c9 1013 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
e9061c39
CH
1014 return false;
1015 }
1016
1017 return true;
1018}
136cc1ff 1019EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
e9061c39 1020
b716e688
SG
1021bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
1022{
1023 if (unlikely(data_len > req->transfer_len)) {
1024 req->error_loc = offsetof(struct nvme_common_command, dptr);
1025 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
1026 return false;
1027 }
1028
1029 return true;
1030}
1031
c6e3f133 1032static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
5b2322e4 1033{
c6e3f133
IR
1034 return req->transfer_len - req->metadata_len;
1035}
c6925093 1036
bcd9a079
MG
1037static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
1038 struct nvmet_req *req)
c6e3f133 1039{
bcd9a079 1040 req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
c6e3f133
IR
1041 nvmet_data_transfer_len(req));
1042 if (!req->sg)
1043 goto out_err;
1044
1045 if (req->metadata_len) {
bcd9a079 1046 req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
c6e3f133
IR
1047 &req->metadata_sg_cnt, req->metadata_len);
1048 if (!req->metadata_sg)
1049 goto out_free_sg;
1050 }
bcd9a079
MG
1051
1052 req->p2p_dev = p2p_dev;
1053
c6e3f133
IR
1054 return 0;
1055out_free_sg:
1056 pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1057out_err:
1058 return -ENOMEM;
1059}
1060
bcd9a079 1061static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
c6e3f133 1062{
bcd9a079
MG
1063 if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
1064 !req->sq->ctrl || !req->sq->qid || !req->ns)
1065 return NULL;
1066 return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
c6e3f133
IR
1067}
1068
1069int nvmet_req_alloc_sgls(struct nvmet_req *req)
1070{
bcd9a079
MG
1071 struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
1072
1073 if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
c6e3f133
IR
1074 return 0;
1075
1076 req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
1077 &req->sg_cnt);
e522f446 1078 if (unlikely(!req->sg))
c6e3f133
IR
1079 goto out;
1080
1081 if (req->metadata_len) {
1082 req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
1083 &req->metadata_sg_cnt);
1084 if (unlikely(!req->metadata_sg))
1085 goto out_free;
1086 }
5b2322e4
LG
1087
1088 return 0;
c6e3f133
IR
1089out_free:
1090 sgl_free(req->sg);
1091out:
1092 return -ENOMEM;
5b2322e4 1093}
c6e3f133 1094EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
5b2322e4 1095
c6e3f133 1096void nvmet_req_free_sgls(struct nvmet_req *req)
5b2322e4 1097{
c6e3f133 1098 if (req->p2p_dev) {
c6925093 1099 pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
c6e3f133
IR
1100 if (req->metadata_sg)
1101 pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
bcd9a079 1102 req->p2p_dev = NULL;
c6e3f133 1103 } else {
c6925093 1104 sgl_free(req->sg);
c6e3f133
IR
1105 if (req->metadata_sg)
1106 sgl_free(req->metadata_sg);
1107 }
c6925093 1108
5b2322e4 1109 req->sg = NULL;
c6e3f133 1110 req->metadata_sg = NULL;
5b2322e4 1111 req->sg_cnt = 0;
c6e3f133 1112 req->metadata_sg_cnt = 0;
5b2322e4 1113}
c6e3f133 1114EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
5b2322e4 1115
a07b4970
CH
1116static inline bool nvmet_cc_en(u32 cc)
1117{
ad4e05b2 1118 return (cc >> NVME_CC_EN_SHIFT) & 0x1;
a07b4970
CH
1119}
1120
1121static inline u8 nvmet_cc_css(u32 cc)
1122{
ad4e05b2 1123 return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
a07b4970
CH
1124}
1125
1126static inline u8 nvmet_cc_mps(u32 cc)
1127{
ad4e05b2 1128 return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
a07b4970
CH
1129}
1130
1131static inline u8 nvmet_cc_ams(u32 cc)
1132{
ad4e05b2 1133 return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
a07b4970
CH
1134}
1135
1136static inline u8 nvmet_cc_shn(u32 cc)
1137{
ad4e05b2 1138 return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
a07b4970
CH
1139}
1140
1141static inline u8 nvmet_cc_iosqes(u32 cc)
1142{
ad4e05b2 1143 return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
a07b4970
CH
1144}
1145
1146static inline u8 nvmet_cc_iocqes(u32 cc)
1147{
ad4e05b2 1148 return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
a07b4970
CH
1149}
1150
ab5d0b38
CK
1151static inline bool nvmet_css_supported(u8 cc_css)
1152{
63bc732c 1153 switch (cc_css << NVME_CC_CSS_SHIFT) {
ab5d0b38
CK
1154 case NVME_CC_CSS_NVM:
1155 case NVME_CC_CSS_CSI:
1156 return true;
1157 default:
1158 return false;
1159 }
1160}
1161
a07b4970
CH
1162static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1163{
1164 lockdep_assert_held(&ctrl->lock);
1165
d218a8a3
SG
1166 /*
1167 * Only I/O controllers should verify iosqes,iocqes.
1168 * Strictly speaking, the spec says a discovery controller
1169 * should verify iosqes,iocqes are zeroed, however that
1170 * would break backwards compatibility, so don't enforce it.
1171 */
a294711e 1172 if (!nvmet_is_disc_subsys(ctrl->subsys) &&
d218a8a3
SG
1173 (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1174 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
1175 ctrl->csts = NVME_CSTS_CFS;
1176 return;
1177 }
1178
1179 if (nvmet_cc_mps(ctrl->cc) != 0 ||
a07b4970 1180 nvmet_cc_ams(ctrl->cc) != 0 ||
ab5d0b38 1181 !nvmet_css_supported(nvmet_cc_css(ctrl->cc))) {
a07b4970
CH
1182 ctrl->csts = NVME_CSTS_CFS;
1183 return;
1184 }
1185
1186 ctrl->csts = NVME_CSTS_RDY;
d68a90e1
MG
1187
1188 /*
1189 * Controllers that are not yet enabled should not really enforce the
1190 * keep alive timeout, but we still want to track a timeout and cleanup
1191 * in case a host died before it enabled the controller. Hence, simply
1192 * reset the keep alive timer when the controller is enabled.
1193 */
85bd23f3 1194 if (ctrl->kato)
ddd2b8de 1195 mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
a07b4970
CH
1196}
1197
1198static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1199{
1200 lockdep_assert_held(&ctrl->lock);
1201
1202 /* XXX: tear down queues? */
1203 ctrl->csts &= ~NVME_CSTS_RDY;
1204 ctrl->cc = 0;
1205}
1206
1207void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1208{
1209 u32 old;
1210
1211 mutex_lock(&ctrl->lock);
1212 old = ctrl->cc;
1213 ctrl->cc = new;
1214
1215 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1216 nvmet_start_ctrl(ctrl);
1217 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1218 nvmet_clear_ctrl(ctrl);
1219 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1220 nvmet_clear_ctrl(ctrl);
1221 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1222 }
1223 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1224 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1225 mutex_unlock(&ctrl->lock);
1226}
1227
1228static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1229{
1230 /* command sets supported: NVMe command set: */
1231 ctrl->cap = (1ULL << 37);
ab5d0b38
CK
1232 /* Controller supports one or more I/O Command Sets */
1233 ctrl->cap |= (1ULL << 43);
a07b4970
CH
1234 /* CC.EN timeout in 500msec units: */
1235 ctrl->cap |= (15ULL << 24);
1236 /* maximum queue entries supported: */
6d1555cc 1237 if (ctrl->ops->get_max_queue_size)
ca2b221d
MG
1238 ctrl->cap |= min_t(u16, ctrl->ops->get_max_queue_size(ctrl),
1239 ctrl->port->max_queue_size) - 1;
6d1555cc 1240 else
ca2b221d 1241 ctrl->cap |= ctrl->port->max_queue_size - 1;
77d651a6 1242
ab7a2737 1243 if (nvmet_is_passthru_subsys(ctrl->subsys))
77d651a6 1244 nvmet_passthrough_override_cap(ctrl);
a07b4970
CH
1245}
1246
de587804
CK
1247struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
1248 const char *hostnqn, u16 cntlid,
1249 struct nvmet_req *req)
a07b4970 1250{
de587804 1251 struct nvmet_ctrl *ctrl = NULL;
a07b4970 1252 struct nvmet_subsys *subsys;
a07b4970
CH
1253
1254 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1255 if (!subsys) {
1256 pr_warn("connect request for invalid subsystem %s!\n",
1257 subsysnqn);
fc6c9730 1258 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
de587804 1259 goto out;
a07b4970
CH
1260 }
1261
1262 mutex_lock(&subsys->lock);
1263 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1264 if (ctrl->cntlid == cntlid) {
1265 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1266 pr_warn("hostnqn mismatch.\n");
1267 continue;
1268 }
1269 if (!kref_get_unless_zero(&ctrl->ref))
1270 continue;
1271
de587804
CK
1272 /* ctrl found */
1273 goto found;
a07b4970
CH
1274 }
1275 }
1276
de587804 1277 ctrl = NULL; /* ctrl not found */
a07b4970
CH
1278 pr_warn("could not find controller %d for subsys %s / host %s\n",
1279 cntlid, subsysnqn, hostnqn);
fc6c9730 1280 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
a07b4970 1281
de587804 1282found:
a07b4970
CH
1283 mutex_unlock(&subsys->lock);
1284 nvmet_subsys_put(subsys);
de587804
CK
1285out:
1286 return ctrl;
a07b4970
CH
1287}
1288
7798df6f 1289u16 nvmet_check_ctrl_status(struct nvmet_req *req)
64a0ca88
PP
1290{
1291 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
b40b83e3 1292 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
7798df6f 1293 req->cmd->common.opcode, req->sq->qid);
64a0ca88
PP
1294 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1295 }
1296
1297 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
b40b83e3 1298 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
7798df6f 1299 req->cmd->common.opcode, req->sq->qid);
64a0ca88
PP
1300 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1301 }
db1312dd
HR
1302
1303 if (unlikely(!nvmet_check_auth_status(req))) {
1304 pr_warn("qid %d not authenticated\n", req->sq->qid);
1305 return NVME_SC_AUTH_REQUIRED | NVME_SC_DNR;
1306 }
64a0ca88
PP
1307 return 0;
1308}
1309
253928ee 1310bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
a07b4970
CH
1311{
1312 struct nvmet_host_link *p;
1313
253928ee
SG
1314 lockdep_assert_held(&nvmet_config_sem);
1315
a07b4970
CH
1316 if (subsys->allow_any_host)
1317 return true;
1318
a294711e 1319 if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */
253928ee
SG
1320 return true;
1321
a07b4970
CH
1322 list_for_each_entry(p, &subsys->hosts, entry) {
1323 if (!strcmp(nvmet_host_name(p->host), hostnqn))
1324 return true;
1325 }
1326
1327 return false;
1328}
1329
c6925093
LG
1330/*
1331 * Note: ctrl->subsys->lock should be held when calling this function
1332 */
1333static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1334 struct nvmet_req *req)
1335{
1336 struct nvmet_ns *ns;
7774e77e 1337 unsigned long idx;
c6925093
LG
1338
1339 if (!req->p2p_client)
1340 return;
1341
1342 ctrl->p2p_client = get_device(req->p2p_client);
1343
7774e77e 1344 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
c6925093
LG
1345 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1346}
1347
1348/*
1349 * Note: ctrl->subsys->lock should be held when calling this function
1350 */
1351static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1352{
1353 struct radix_tree_iter iter;
1354 void __rcu **slot;
1355
1356 radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1357 pci_dev_put(radix_tree_deref_slot(slot));
1358
1359 put_device(ctrl->p2p_client);
1360}
1361
d11de63f
YY
1362static void nvmet_fatal_error_handler(struct work_struct *work)
1363{
1364 struct nvmet_ctrl *ctrl =
1365 container_of(work, struct nvmet_ctrl, fatal_err_work);
1366
1367 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1368 ctrl->ops->delete_ctrl(ctrl);
1369}
1370
a07b4970
CH
1371u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1372 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1373{
1374 struct nvmet_subsys *subsys;
1375 struct nvmet_ctrl *ctrl;
1376 int ret;
1377 u16 status;
1378
1379 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1380 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1381 if (!subsys) {
1382 pr_warn("connect request for invalid subsystem %s!\n",
1383 subsysnqn);
fc6c9730 1384 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
a56f14c2 1385 req->error_loc = offsetof(struct nvme_common_command, dptr);
a07b4970
CH
1386 goto out;
1387 }
1388
a07b4970 1389 down_read(&nvmet_config_sem);
253928ee 1390 if (!nvmet_host_allowed(subsys, hostnqn)) {
a07b4970
CH
1391 pr_info("connect by host %s for subsystem %s not allowed\n",
1392 hostnqn, subsysnqn);
fc6c9730 1393 req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
a07b4970 1394 up_read(&nvmet_config_sem);
130c24b5 1395 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
a56f14c2 1396 req->error_loc = offsetof(struct nvme_common_command, dptr);
a07b4970
CH
1397 goto out_put_subsystem;
1398 }
1399 up_read(&nvmet_config_sem);
1400
1401 status = NVME_SC_INTERNAL;
1402 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1403 if (!ctrl)
1404 goto out_put_subsystem;
1405 mutex_init(&ctrl->lock);
1406
4ee43280 1407 ctrl->port = req->port;
6d1555cc 1408 ctrl->ops = req->ops;
4ee43280 1409
34ad6151
AA
1410#ifdef CONFIG_NVME_TARGET_PASSTHRU
1411 /* By default, set loop targets to clear IDS by default */
1412 if (ctrl->port->disc_addr.trtype == NVMF_TRTYPE_LOOP)
1413 subsys->clear_ids = 1;
1414#endif
1415
a07b4970
CH
1416 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1417 INIT_LIST_HEAD(&ctrl->async_events);
c6925093 1418 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
d11de63f 1419 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
f6e8bd59 1420 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
a07b4970
CH
1421
1422 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1423 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1424
1425 kref_init(&ctrl->ref);
1426 ctrl->subsys = subsys;
c82c370d 1427 ctrl->pi_support = ctrl->port->pi_enable && ctrl->subsys->pi_support;
77d651a6 1428 nvmet_init_cap(ctrl);
c86b8f7b 1429 WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
a07b4970 1430
c16734ea
CH
1431 ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1432 sizeof(__le32), GFP_KERNEL);
1433 if (!ctrl->changed_ns_list)
1434 goto out_free_ctrl;
1435
a07b4970
CH
1436 ctrl->sqs = kcalloc(subsys->max_qid + 1,
1437 sizeof(struct nvmet_sq *),
1438 GFP_KERNEL);
1439 if (!ctrl->sqs)
6d65aeab 1440 goto out_free_changed_ns_list;
a07b4970 1441
22027a98 1442 ret = ida_alloc_range(&cntlid_ida,
94a39d61 1443 subsys->cntlid_min, subsys->cntlid_max,
a07b4970
CH
1444 GFP_KERNEL);
1445 if (ret < 0) {
1446 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1447 goto out_free_sqs;
1448 }
1449 ctrl->cntlid = ret;
1450
f9362ac1
JS
1451 /*
1452 * Discovery controllers may use some arbitrary high value
1453 * in order to cleanup stale discovery sessions
1454 */
a294711e 1455 if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
f9362ac1
JS
1456 kato = NVMET_DISC_KATO_MS;
1457
1458 /* keep-alive timeout in seconds */
1459 ctrl->kato = DIV_ROUND_UP(kato, 1000);
1460
e4a97625
CK
1461 ctrl->err_counter = 0;
1462 spin_lock_init(&ctrl->error_lock);
1463
a07b4970
CH
1464 nvmet_start_keep_alive_timer(ctrl);
1465
1466 mutex_lock(&subsys->lock);
1467 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
c6925093 1468 nvmet_setup_p2p_ns_map(ctrl, req);
a07b4970
CH
1469 mutex_unlock(&subsys->lock);
1470
1471 *ctrlp = ctrl;
1472 return 0;
1473
1474out_free_sqs:
1475 kfree(ctrl->sqs);
c16734ea
CH
1476out_free_changed_ns_list:
1477 kfree(ctrl->changed_ns_list);
a07b4970
CH
1478out_free_ctrl:
1479 kfree(ctrl);
1480out_put_subsystem:
1481 nvmet_subsys_put(subsys);
1482out:
1483 return status;
1484}
1485
1486static void nvmet_ctrl_free(struct kref *ref)
1487{
1488 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1489 struct nvmet_subsys *subsys = ctrl->subsys;
1490
a07b4970 1491 mutex_lock(&subsys->lock);
c6925093 1492 nvmet_release_p2p_ns_map(ctrl);
a07b4970
CH
1493 list_del(&ctrl->subsys_entry);
1494 mutex_unlock(&subsys->lock);
1495
6b1943af
IR
1496 nvmet_stop_keep_alive_timer(ctrl);
1497
06406d81
SG
1498 flush_work(&ctrl->async_event_work);
1499 cancel_work_sync(&ctrl->fatal_err_work);
1500
db1312dd
HR
1501 nvmet_destroy_auth(ctrl);
1502
22027a98 1503 ida_free(&cntlid_ida, ctrl->cntlid);
a07b4970 1504
64f5e9cd 1505 nvmet_async_events_free(ctrl);
a07b4970 1506 kfree(ctrl->sqs);
c16734ea 1507 kfree(ctrl->changed_ns_list);
a07b4970 1508 kfree(ctrl);
6b1943af
IR
1509
1510 nvmet_subsys_put(subsys);
a07b4970
CH
1511}
1512
1513void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1514{
1515 kref_put(&ctrl->ref, nvmet_ctrl_free);
1516}
1517
a07b4970
CH
1518void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1519{
8242ddac
SG
1520 mutex_lock(&ctrl->lock);
1521 if (!(ctrl->csts & NVME_CSTS_CFS)) {
1522 ctrl->csts |= NVME_CSTS_CFS;
8832cf92 1523 queue_work(nvmet_wq, &ctrl->fatal_err_work);
8242ddac
SG
1524 }
1525 mutex_unlock(&ctrl->lock);
a07b4970
CH
1526}
1527EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1528
1529static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1530 const char *subsysnqn)
1531{
1532 struct nvmet_subsys_link *p;
1533
1534 if (!port)
1535 return NULL;
1536
0c48645a 1537 if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
a07b4970
CH
1538 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1539 return NULL;
1540 return nvmet_disc_subsys;
1541 }
1542
1543 down_read(&nvmet_config_sem);
1544 list_for_each_entry(p, &port->subsystems, entry) {
1545 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1546 NVMF_NQN_SIZE)) {
1547 if (!kref_get_unless_zero(&p->subsys->ref))
1548 break;
1549 up_read(&nvmet_config_sem);
1550 return p->subsys;
1551 }
1552 }
1553 up_read(&nvmet_config_sem);
1554 return NULL;
1555}
1556
1557struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1558 enum nvme_subsys_type type)
1559{
1560 struct nvmet_subsys *subsys;
e13b0615 1561 char serial[NVMET_SN_MAX_SIZE / 2];
0d148efd 1562 int ret;
a07b4970
CH
1563
1564 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1565 if (!subsys)
6b7e631b 1566 return ERR_PTR(-ENOMEM);
a07b4970 1567
ba76af67 1568 subsys->ver = NVMET_DEFAULT_VS;
2e7f5d2a 1569 /* generate a random serial number as our controllers are ephemeral: */
e13b0615
NG
1570 get_random_bytes(&serial, sizeof(serial));
1571 bin2hex(subsys->serial, &serial, sizeof(serial));
a07b4970 1572
0d148efd
NG
1573 subsys->model_number = kstrdup(NVMET_DEFAULT_CTRL_MODEL, GFP_KERNEL);
1574 if (!subsys->model_number) {
1575 ret = -ENOMEM;
1576 goto free_subsys;
1577 }
a07b4970 1578
23855abd
AM
1579 subsys->ieee_oui = 0;
1580
68c5444c
AM
1581 subsys->firmware_rev = kstrndup(UTS_RELEASE, NVMET_FR_MAX_SIZE, GFP_KERNEL);
1582 if (!subsys->firmware_rev) {
1583 ret = -ENOMEM;
1584 goto free_mn;
1585 }
1586
a07b4970
CH
1587 switch (type) {
1588 case NVME_NQN_NVME:
1589 subsys->max_qid = NVMET_NR_QUEUES;
1590 break;
1591 case NVME_NQN_DISC:
2953b30b 1592 case NVME_NQN_CURR:
a07b4970
CH
1593 subsys->max_qid = 0;
1594 break;
1595 default:
1596 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
0d148efd 1597 ret = -EINVAL;
68c5444c 1598 goto free_fr;
a07b4970
CH
1599 }
1600 subsys->type = type;
1601 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1602 GFP_KERNEL);
69555af2 1603 if (!subsys->subsysnqn) {
0d148efd 1604 ret = -ENOMEM;
68c5444c 1605 goto free_fr;
a07b4970 1606 }
94a39d61
CK
1607 subsys->cntlid_min = NVME_CNTLID_MIN;
1608 subsys->cntlid_max = NVME_CNTLID_MAX;
a07b4970
CH
1609 kref_init(&subsys->ref);
1610
1611 mutex_init(&subsys->lock);
7774e77e 1612 xa_init(&subsys->namespaces);
a07b4970 1613 INIT_LIST_HEAD(&subsys->ctrls);
a07b4970
CH
1614 INIT_LIST_HEAD(&subsys->hosts);
1615
1616 return subsys;
0d148efd 1617
68c5444c
AM
1618free_fr:
1619 kfree(subsys->firmware_rev);
0d148efd
NG
1620free_mn:
1621 kfree(subsys->model_number);
1622free_subsys:
1623 kfree(subsys);
1624 return ERR_PTR(ret);
a07b4970
CH
1625}
1626
1627static void nvmet_subsys_free(struct kref *ref)
1628{
1629 struct nvmet_subsys *subsys =
1630 container_of(ref, struct nvmet_subsys, ref);
1631
7774e77e 1632 WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
a07b4970 1633
7774e77e 1634 xa_destroy(&subsys->namespaces);
ba76af67
LG
1635 nvmet_passthru_subsys_free(subsys);
1636
a07b4970 1637 kfree(subsys->subsysnqn);
d9f273b7 1638 kfree(subsys->model_number);
68c5444c 1639 kfree(subsys->firmware_rev);
a07b4970
CH
1640 kfree(subsys);
1641}
1642
344770b0
SG
1643void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1644{
1645 struct nvmet_ctrl *ctrl;
1646
1647 mutex_lock(&subsys->lock);
1648 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1649 ctrl->ops->delete_ctrl(ctrl);
1650 mutex_unlock(&subsys->lock);
1651}
1652
a07b4970
CH
1653void nvmet_subsys_put(struct nvmet_subsys *subsys)
1654{
1655 kref_put(&subsys->ref, nvmet_subsys_free);
1656}
1657
1658static int __init nvmet_init(void)
1659{
fa8f9ac4 1660 int error = -ENOMEM;
a07b4970 1661
72efd25d
CH
1662 nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1663
fa8f9ac4
CH
1664 nvmet_bvec_cache = kmem_cache_create("nvmet-bvec",
1665 NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec), 0,
1666 SLAB_HWCACHE_ALIGN, NULL);
1667 if (!nvmet_bvec_cache)
1668 return -ENOMEM;
1669
aaf2e048
CK
1670 zbd_wq = alloc_workqueue("nvmet-zbd-wq", WQ_MEM_RECLAIM, 0);
1671 if (!zbd_wq)
fa8f9ac4 1672 goto out_destroy_bvec_cache;
aaf2e048 1673
55eb942e
CK
1674 buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1675 WQ_MEM_RECLAIM, 0);
fa8f9ac4 1676 if (!buffered_io_wq)
aaf2e048 1677 goto out_free_zbd_work_queue;
72efd25d 1678
8832cf92 1679 nvmet_wq = alloc_workqueue("nvmet-wq", WQ_MEM_RECLAIM, 0);
fa8f9ac4 1680 if (!nvmet_wq)
8832cf92 1681 goto out_free_buffered_work_queue;
8832cf92 1682
a07b4970
CH
1683 error = nvmet_init_discovery();
1684 if (error)
8832cf92 1685 goto out_free_nvmet_work_queue;
a07b4970
CH
1686
1687 error = nvmet_init_configfs();
1688 if (error)
1689 goto out_exit_discovery;
1690 return 0;
1691
1692out_exit_discovery:
1693 nvmet_exit_discovery();
8832cf92
SG
1694out_free_nvmet_work_queue:
1695 destroy_workqueue(nvmet_wq);
1696out_free_buffered_work_queue:
04db0e5e 1697 destroy_workqueue(buffered_io_wq);
aaf2e048
CK
1698out_free_zbd_work_queue:
1699 destroy_workqueue(zbd_wq);
fa8f9ac4
CH
1700out_destroy_bvec_cache:
1701 kmem_cache_destroy(nvmet_bvec_cache);
a07b4970
CH
1702 return error;
1703}
1704
1705static void __exit nvmet_exit(void)
1706{
1707 nvmet_exit_configfs();
1708 nvmet_exit_discovery();
15fbad96 1709 ida_destroy(&cntlid_ida);
8832cf92 1710 destroy_workqueue(nvmet_wq);
55eb942e 1711 destroy_workqueue(buffered_io_wq);
aaf2e048 1712 destroy_workqueue(zbd_wq);
fa8f9ac4 1713 kmem_cache_destroy(nvmet_bvec_cache);
a07b4970
CH
1714
1715 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1716 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1717}
1718
1719module_init(nvmet_init);
1720module_exit(nvmet_exit);
1721
41951f83 1722MODULE_DESCRIPTION("NVMe target core framework");
a07b4970 1723MODULE_LICENSE("GPL v2");