Merge tag 'mvebu-dt-4.19-1' of git://git.infradead.org/linux-mvebu into next/dt
[linux-block.git] / drivers / nvme / target / core.c
CommitLineData
a07b4970
CH
1/*
2 * Common code for the NVMe target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15#include <linux/module.h>
28b89118 16#include <linux/random.h>
b2d09103
IM
17#include <linux/rculist.h>
18
a07b4970
CH
19#include "nvmet.h"
20
e929f06d 21static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
15fbad96 22static DEFINE_IDA(cntlid_ida);
a07b4970
CH
23
24/*
25 * This read/write semaphore is used to synchronize access to configuration
26 * information on a target system that will result in discovery log page
27 * information change for at least one host.
28 * The full list of resources to protected by this semaphore is:
29 *
30 * - subsystems list
31 * - per-subsystem allowed hosts list
32 * - allow_any_host subsystem attribute
33 * - nvmet_genctr
34 * - the nvmet_transports array
35 *
36 * When updating any of those lists/structures write lock should be obtained,
37 * while when reading (popolating discovery log page or checking host-subsystem
38 * link) read lock is obtained to allow concurrent reads.
39 */
40DECLARE_RWSEM(nvmet_config_sem);
41
42static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
43 const char *subsysnqn);
44
45u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
46 size_t len)
47{
48 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
49 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
50 return 0;
51}
52
53u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
54{
55 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
56 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
57 return 0;
58}
59
c7759fff
CH
60u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
61{
62 if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len)
63 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
64 return 0;
65}
66
ba2dec35
RS
67static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
68{
69 struct nvmet_ns *ns;
70
71 if (list_empty(&subsys->namespaces))
72 return 0;
73
74 ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
75 return ns->nsid;
76}
77
a07b4970
CH
78static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
79{
80 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
81}
82
83static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
84{
85 struct nvmet_req *req;
86
87 while (1) {
88 mutex_lock(&ctrl->lock);
89 if (!ctrl->nr_async_event_cmds) {
90 mutex_unlock(&ctrl->lock);
91 return;
92 }
93
94 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
95 mutex_unlock(&ctrl->lock);
96 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
97 }
98}
99
100static void nvmet_async_event_work(struct work_struct *work)
101{
102 struct nvmet_ctrl *ctrl =
103 container_of(work, struct nvmet_ctrl, async_event_work);
104 struct nvmet_async_event *aen;
105 struct nvmet_req *req;
106
107 while (1) {
108 mutex_lock(&ctrl->lock);
109 aen = list_first_entry_or_null(&ctrl->async_events,
110 struct nvmet_async_event, entry);
111 if (!aen || !ctrl->nr_async_event_cmds) {
112 mutex_unlock(&ctrl->lock);
113 return;
114 }
115
116 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
117 nvmet_set_result(req, nvmet_async_event_result(aen));
118
119 list_del(&aen->entry);
120 kfree(aen);
121
122 mutex_unlock(&ctrl->lock);
123 nvmet_req_complete(req, 0);
124 }
125}
126
127static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
128 u8 event_info, u8 log_page)
129{
130 struct nvmet_async_event *aen;
131
132 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
133 if (!aen)
134 return;
135
136 aen->event_type = event_type;
137 aen->event_info = event_info;
138 aen->log_page = log_page;
139
140 mutex_lock(&ctrl->lock);
141 list_add_tail(&aen->entry, &ctrl->async_events);
142 mutex_unlock(&ctrl->lock);
143
144 schedule_work(&ctrl->async_event_work);
145}
146
55fdd6b6
CH
147static bool nvmet_aen_disabled(struct nvmet_ctrl *ctrl, u32 aen)
148{
149 if (!(READ_ONCE(ctrl->aen_enabled) & aen))
150 return true;
151 return test_and_set_bit(aen, &ctrl->aen_masked);
152}
153
c16734ea
CH
154static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
155{
156 u32 i;
157
158 mutex_lock(&ctrl->lock);
159 if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
160 goto out_unlock;
161
162 for (i = 0; i < ctrl->nr_changed_ns; i++) {
163 if (ctrl->changed_ns_list[i] == nsid)
164 goto out_unlock;
165 }
166
167 if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
168 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
169 ctrl->nr_changed_ns = U32_MAX;
170 goto out_unlock;
171 }
172
173 ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
174out_unlock:
175 mutex_unlock(&ctrl->lock);
176}
177
178static void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
179{
180 struct nvmet_ctrl *ctrl;
181
182 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
183 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
55fdd6b6 184 if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_NS_ATTR))
c86b8f7b 185 continue;
c16734ea
CH
186 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
187 NVME_AER_NOTICE_NS_CHANGED,
188 NVME_LOG_CHANGED_NS);
189 }
190}
191
e929f06d 192int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
a07b4970
CH
193{
194 int ret = 0;
195
196 down_write(&nvmet_config_sem);
197 if (nvmet_transports[ops->type])
198 ret = -EINVAL;
199 else
200 nvmet_transports[ops->type] = ops;
201 up_write(&nvmet_config_sem);
202
203 return ret;
204}
205EXPORT_SYMBOL_GPL(nvmet_register_transport);
206
e929f06d 207void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
a07b4970
CH
208{
209 down_write(&nvmet_config_sem);
210 nvmet_transports[ops->type] = NULL;
211 up_write(&nvmet_config_sem);
212}
213EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
214
215int nvmet_enable_port(struct nvmet_port *port)
216{
e929f06d 217 const struct nvmet_fabrics_ops *ops;
a07b4970
CH
218 int ret;
219
220 lockdep_assert_held(&nvmet_config_sem);
221
222 ops = nvmet_transports[port->disc_addr.trtype];
223 if (!ops) {
224 up_write(&nvmet_config_sem);
225 request_module("nvmet-transport-%d", port->disc_addr.trtype);
226 down_write(&nvmet_config_sem);
227 ops = nvmet_transports[port->disc_addr.trtype];
228 if (!ops) {
229 pr_err("transport type %d not supported\n",
230 port->disc_addr.trtype);
231 return -EINVAL;
232 }
233 }
234
235 if (!try_module_get(ops->owner))
236 return -EINVAL;
237
238 ret = ops->add_port(port);
239 if (ret) {
240 module_put(ops->owner);
241 return ret;
242 }
243
244 port->enabled = true;
245 return 0;
246}
247
248void nvmet_disable_port(struct nvmet_port *port)
249{
e929f06d 250 const struct nvmet_fabrics_ops *ops;
a07b4970
CH
251
252 lockdep_assert_held(&nvmet_config_sem);
253
254 port->enabled = false;
255
256 ops = nvmet_transports[port->disc_addr.trtype];
257 ops->remove_port(port);
258 module_put(ops->owner);
259}
260
261static void nvmet_keep_alive_timer(struct work_struct *work)
262{
263 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
264 struct nvmet_ctrl, ka_work);
265
266 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
267 ctrl->cntlid, ctrl->kato);
268
23a8ed4a 269 nvmet_ctrl_fatal_error(ctrl);
a07b4970
CH
270}
271
272static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
273{
274 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
275 ctrl->cntlid, ctrl->kato);
276
277 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
278 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
279}
280
281static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
282{
283 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
284
285 cancel_delayed_work_sync(&ctrl->ka_work);
286}
287
288static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
289 __le32 nsid)
290{
291 struct nvmet_ns *ns;
292
293 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
294 if (ns->nsid == le32_to_cpu(nsid))
295 return ns;
296 }
297
298 return NULL;
299}
300
301struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
302{
303 struct nvmet_ns *ns;
304
305 rcu_read_lock();
306 ns = __nvmet_find_namespace(ctrl, nsid);
307 if (ns)
308 percpu_ref_get(&ns->ref);
309 rcu_read_unlock();
310
311 return ns;
312}
313
314static void nvmet_destroy_namespace(struct percpu_ref *ref)
315{
316 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
317
318 complete(&ns->disable_done);
319}
320
321void nvmet_put_namespace(struct nvmet_ns *ns)
322{
323 percpu_ref_put(&ns->ref);
324}
325
d5eff33e
CK
326static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
327{
328 nvmet_bdev_ns_disable(ns);
329 nvmet_file_ns_disable(ns);
330}
331
a07b4970
CH
332int nvmet_ns_enable(struct nvmet_ns *ns)
333{
334 struct nvmet_subsys *subsys = ns->subsys;
a07b4970
CH
335 int ret = 0;
336
337 mutex_lock(&subsys->lock);
e4fcf07c 338 if (ns->enabled)
a07b4970
CH
339 goto out_unlock;
340
d5eff33e
CK
341 ret = nvmet_bdev_ns_enable(ns);
342 if (ret)
343 ret = nvmet_file_ns_enable(ns);
344 if (ret)
a07b4970 345 goto out_unlock;
a07b4970
CH
346
347 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
348 0, GFP_KERNEL);
349 if (ret)
d5eff33e 350 goto out_dev_put;
a07b4970
CH
351
352 if (ns->nsid > subsys->max_nsid)
353 subsys->max_nsid = ns->nsid;
354
355 /*
356 * The namespaces list needs to be sorted to simplify the implementation
357 * of the Identify Namepace List subcommand.
358 */
359 if (list_empty(&subsys->namespaces)) {
360 list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
361 } else {
362 struct nvmet_ns *old;
363
364 list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
365 BUG_ON(ns->nsid == old->nsid);
366 if (ns->nsid < old->nsid)
367 break;
368 }
369
370 list_add_tail_rcu(&ns->dev_link, &old->dev_link);
371 }
372
c16734ea 373 nvmet_ns_changed(subsys, ns->nsid);
e4fcf07c 374 ns->enabled = true;
a07b4970
CH
375 ret = 0;
376out_unlock:
377 mutex_unlock(&subsys->lock);
378 return ret;
d5eff33e
CK
379out_dev_put:
380 nvmet_ns_dev_disable(ns);
a07b4970
CH
381 goto out_unlock;
382}
383
384void nvmet_ns_disable(struct nvmet_ns *ns)
385{
386 struct nvmet_subsys *subsys = ns->subsys;
a07b4970
CH
387
388 mutex_lock(&subsys->lock);
e4fcf07c
SA
389 if (!ns->enabled)
390 goto out_unlock;
391
392 ns->enabled = false;
393 list_del_rcu(&ns->dev_link);
ba2dec35
RS
394 if (ns->nsid == subsys->max_nsid)
395 subsys->max_nsid = nvmet_max_nsid(subsys);
a07b4970
CH
396 mutex_unlock(&subsys->lock);
397
398 /*
399 * Now that we removed the namespaces from the lookup list, we
400 * can kill the per_cpu ref and wait for any remaining references
401 * to be dropped, as well as a RCU grace period for anyone only
402 * using the namepace under rcu_read_lock(). Note that we can't
403 * use call_rcu here as we need to ensure the namespaces have
404 * been fully destroyed before unloading the module.
405 */
406 percpu_ref_kill(&ns->ref);
407 synchronize_rcu();
408 wait_for_completion(&ns->disable_done);
409 percpu_ref_exit(&ns->ref);
410
411 mutex_lock(&subsys->lock);
c16734ea 412 nvmet_ns_changed(subsys, ns->nsid);
d5eff33e 413 nvmet_ns_dev_disable(ns);
e4fcf07c 414out_unlock:
a07b4970
CH
415 mutex_unlock(&subsys->lock);
416}
417
418void nvmet_ns_free(struct nvmet_ns *ns)
419{
420 nvmet_ns_disable(ns);
421
422 kfree(ns->device_path);
423 kfree(ns);
424}
425
426struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
427{
428 struct nvmet_ns *ns;
429
430 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
431 if (!ns)
432 return NULL;
433
434 INIT_LIST_HEAD(&ns->dev_link);
435 init_completion(&ns->disable_done);
436
437 ns->nsid = nsid;
438 ns->subsys = subsys;
637dc0f3 439 uuid_gen(&ns->uuid);
a07b4970
CH
440
441 return ns;
442}
443
444static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
445{
f9cf2a64
JS
446 u32 old_sqhd, new_sqhd;
447 u16 sqhd;
448
a07b4970
CH
449 if (status)
450 nvmet_set_status(req, status);
451
f9cf2a64
JS
452 if (req->sq->size) {
453 do {
454 old_sqhd = req->sq->sqhd;
455 new_sqhd = (old_sqhd + 1) % req->sq->size;
456 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
457 old_sqhd);
458 }
459 sqhd = req->sq->sqhd & 0x0000FFFF;
460 req->rsp->sq_head = cpu_to_le16(sqhd);
bb1cc747 461 req->rsp->sq_id = cpu_to_le16(req->sq->qid);
a07b4970
CH
462 req->rsp->command_id = req->cmd->common.command_id;
463
464 if (req->ns)
465 nvmet_put_namespace(req->ns);
466 req->ops->queue_response(req);
467}
468
469void nvmet_req_complete(struct nvmet_req *req, u16 status)
470{
471 __nvmet_req_complete(req, status);
472 percpu_ref_put(&req->sq->ref);
473}
474EXPORT_SYMBOL_GPL(nvmet_req_complete);
475
476void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
477 u16 qid, u16 size)
478{
479 cq->qid = qid;
480 cq->size = size;
481
482 ctrl->cqs[qid] = cq;
483}
484
485void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
486 u16 qid, u16 size)
487{
bb1cc747 488 sq->sqhd = 0;
a07b4970
CH
489 sq->qid = qid;
490 sq->size = size;
491
492 ctrl->sqs[qid] = sq;
493}
494
427242ce
SG
495static void nvmet_confirm_sq(struct percpu_ref *ref)
496{
497 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
498
499 complete(&sq->confirm_done);
500}
501
a07b4970
CH
502void nvmet_sq_destroy(struct nvmet_sq *sq)
503{
504 /*
505 * If this is the admin queue, complete all AERs so that our
506 * queue doesn't have outstanding requests on it.
507 */
508 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
509 nvmet_async_events_free(sq->ctrl);
427242ce
SG
510 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
511 wait_for_completion(&sq->confirm_done);
a07b4970
CH
512 wait_for_completion(&sq->free_done);
513 percpu_ref_exit(&sq->ref);
514
515 if (sq->ctrl) {
516 nvmet_ctrl_put(sq->ctrl);
517 sq->ctrl = NULL; /* allows reusing the queue later */
518 }
519}
520EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
521
522static void nvmet_sq_free(struct percpu_ref *ref)
523{
524 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
525
526 complete(&sq->free_done);
527}
528
529int nvmet_sq_init(struct nvmet_sq *sq)
530{
531 int ret;
532
533 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
534 if (ret) {
535 pr_err("percpu_ref init failed!\n");
536 return ret;
537 }
538 init_completion(&sq->free_done);
427242ce 539 init_completion(&sq->confirm_done);
a07b4970
CH
540
541 return 0;
542}
543EXPORT_SYMBOL_GPL(nvmet_sq_init);
544
d5eff33e
CK
545static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
546{
547 struct nvme_command *cmd = req->cmd;
548 u16 ret;
549
550 ret = nvmet_check_ctrl_status(req, cmd);
551 if (unlikely(ret))
552 return ret;
553
554 req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
555 if (unlikely(!req->ns))
556 return NVME_SC_INVALID_NS | NVME_SC_DNR;
557
558 if (req->ns->file)
559 return nvmet_file_parse_io_cmd(req);
560 else
561 return nvmet_bdev_parse_io_cmd(req);
562}
563
a07b4970 564bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
e929f06d 565 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
a07b4970
CH
566{
567 u8 flags = req->cmd->common.flags;
568 u16 status;
569
570 req->cq = cq;
571 req->sq = sq;
572 req->ops = ops;
573 req->sg = NULL;
574 req->sg_cnt = 0;
5e62d5c9 575 req->transfer_len = 0;
a07b4970 576 req->rsp->status = 0;
423b4487 577 req->ns = NULL;
a07b4970
CH
578
579 /* no support for fused commands yet */
580 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
581 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
582 goto fail;
583 }
584
bffd2b61
MG
585 /*
586 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
587 * contains an address of a single contiguous physical buffer that is
588 * byte aligned.
589 */
590 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
a07b4970
CH
591 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
592 goto fail;
593 }
594
595 if (unlikely(!req->sq->ctrl))
596 /* will return an error for any Non-connect command: */
597 status = nvmet_parse_connect_cmd(req);
598 else if (likely(req->sq->qid != 0))
599 status = nvmet_parse_io_cmd(req);
600 else if (req->cmd->common.opcode == nvme_fabrics_command)
601 status = nvmet_parse_fabrics_cmd(req);
602 else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
603 status = nvmet_parse_discovery_cmd(req);
604 else
605 status = nvmet_parse_admin_cmd(req);
606
607 if (status)
608 goto fail;
609
610 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
611 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
612 goto fail;
613 }
614
615 return true;
616
617fail:
618 __nvmet_req_complete(req, status);
619 return false;
620}
621EXPORT_SYMBOL_GPL(nvmet_req_init);
622
549f01ae
VI
623void nvmet_req_uninit(struct nvmet_req *req)
624{
625 percpu_ref_put(&req->sq->ref);
423b4487
SG
626 if (req->ns)
627 nvmet_put_namespace(req->ns);
549f01ae
VI
628}
629EXPORT_SYMBOL_GPL(nvmet_req_uninit);
630
5e62d5c9
CH
631void nvmet_req_execute(struct nvmet_req *req)
632{
633 if (unlikely(req->data_len != req->transfer_len))
634 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
635 else
636 req->execute(req);
637}
638EXPORT_SYMBOL_GPL(nvmet_req_execute);
639
a07b4970
CH
640static inline bool nvmet_cc_en(u32 cc)
641{
ad4e05b2 642 return (cc >> NVME_CC_EN_SHIFT) & 0x1;
a07b4970
CH
643}
644
645static inline u8 nvmet_cc_css(u32 cc)
646{
ad4e05b2 647 return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
a07b4970
CH
648}
649
650static inline u8 nvmet_cc_mps(u32 cc)
651{
ad4e05b2 652 return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
a07b4970
CH
653}
654
655static inline u8 nvmet_cc_ams(u32 cc)
656{
ad4e05b2 657 return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
a07b4970
CH
658}
659
660static inline u8 nvmet_cc_shn(u32 cc)
661{
ad4e05b2 662 return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
a07b4970
CH
663}
664
665static inline u8 nvmet_cc_iosqes(u32 cc)
666{
ad4e05b2 667 return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
a07b4970
CH
668}
669
670static inline u8 nvmet_cc_iocqes(u32 cc)
671{
ad4e05b2 672 return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
a07b4970
CH
673}
674
675static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
676{
677 lockdep_assert_held(&ctrl->lock);
678
679 if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
680 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES ||
681 nvmet_cc_mps(ctrl->cc) != 0 ||
682 nvmet_cc_ams(ctrl->cc) != 0 ||
683 nvmet_cc_css(ctrl->cc) != 0) {
684 ctrl->csts = NVME_CSTS_CFS;
685 return;
686 }
687
688 ctrl->csts = NVME_CSTS_RDY;
d68a90e1
MG
689
690 /*
691 * Controllers that are not yet enabled should not really enforce the
692 * keep alive timeout, but we still want to track a timeout and cleanup
693 * in case a host died before it enabled the controller. Hence, simply
694 * reset the keep alive timer when the controller is enabled.
695 */
696 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
a07b4970
CH
697}
698
699static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
700{
701 lockdep_assert_held(&ctrl->lock);
702
703 /* XXX: tear down queues? */
704 ctrl->csts &= ~NVME_CSTS_RDY;
705 ctrl->cc = 0;
706}
707
708void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
709{
710 u32 old;
711
712 mutex_lock(&ctrl->lock);
713 old = ctrl->cc;
714 ctrl->cc = new;
715
716 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
717 nvmet_start_ctrl(ctrl);
718 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
719 nvmet_clear_ctrl(ctrl);
720 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
721 nvmet_clear_ctrl(ctrl);
722 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
723 }
724 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
725 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
726 mutex_unlock(&ctrl->lock);
727}
728
729static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
730{
731 /* command sets supported: NVMe command set: */
732 ctrl->cap = (1ULL << 37);
733 /* CC.EN timeout in 500msec units: */
734 ctrl->cap |= (15ULL << 24);
735 /* maximum queue entries supported: */
736 ctrl->cap |= NVMET_QUEUE_SIZE - 1;
737}
738
739u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
740 struct nvmet_req *req, struct nvmet_ctrl **ret)
741{
742 struct nvmet_subsys *subsys;
743 struct nvmet_ctrl *ctrl;
744 u16 status = 0;
745
746 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
747 if (!subsys) {
748 pr_warn("connect request for invalid subsystem %s!\n",
749 subsysnqn);
d49187e9 750 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
a07b4970
CH
751 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
752 }
753
754 mutex_lock(&subsys->lock);
755 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
756 if (ctrl->cntlid == cntlid) {
757 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
758 pr_warn("hostnqn mismatch.\n");
759 continue;
760 }
761 if (!kref_get_unless_zero(&ctrl->ref))
762 continue;
763
764 *ret = ctrl;
765 goto out;
766 }
767 }
768
769 pr_warn("could not find controller %d for subsys %s / host %s\n",
770 cntlid, subsysnqn, hostnqn);
d49187e9 771 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
a07b4970
CH
772 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
773
774out:
775 mutex_unlock(&subsys->lock);
776 nvmet_subsys_put(subsys);
777 return status;
778}
779
64a0ca88
PP
780u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
781{
782 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
b40b83e3 783 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
64a0ca88
PP
784 cmd->common.opcode, req->sq->qid);
785 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
786 }
787
788 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
b40b83e3 789 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
64a0ca88 790 cmd->common.opcode, req->sq->qid);
64a0ca88
PP
791 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
792 }
793 return 0;
794}
795
a07b4970
CH
796static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
797 const char *hostnqn)
798{
799 struct nvmet_host_link *p;
800
801 if (subsys->allow_any_host)
802 return true;
803
804 list_for_each_entry(p, &subsys->hosts, entry) {
805 if (!strcmp(nvmet_host_name(p->host), hostnqn))
806 return true;
807 }
808
809 return false;
810}
811
812static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
813 const char *hostnqn)
814{
815 struct nvmet_subsys_link *s;
816
817 list_for_each_entry(s, &req->port->subsystems, entry) {
818 if (__nvmet_host_allowed(s->subsys, hostnqn))
819 return true;
820 }
821
822 return false;
823}
824
825bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
826 const char *hostnqn)
827{
828 lockdep_assert_held(&nvmet_config_sem);
829
830 if (subsys->type == NVME_NQN_DISC)
831 return nvmet_host_discovery_allowed(req, hostnqn);
832 else
833 return __nvmet_host_allowed(subsys, hostnqn);
834}
835
836u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
837 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
838{
839 struct nvmet_subsys *subsys;
840 struct nvmet_ctrl *ctrl;
841 int ret;
842 u16 status;
843
844 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
845 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
846 if (!subsys) {
847 pr_warn("connect request for invalid subsystem %s!\n",
848 subsysnqn);
d49187e9 849 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
a07b4970
CH
850 goto out;
851 }
852
853 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
854 down_read(&nvmet_config_sem);
855 if (!nvmet_host_allowed(req, subsys, hostnqn)) {
856 pr_info("connect by host %s for subsystem %s not allowed\n",
857 hostnqn, subsysnqn);
d49187e9 858 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
a07b4970 859 up_read(&nvmet_config_sem);
130c24b5 860 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
a07b4970
CH
861 goto out_put_subsystem;
862 }
863 up_read(&nvmet_config_sem);
864
865 status = NVME_SC_INTERNAL;
866 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
867 if (!ctrl)
868 goto out_put_subsystem;
869 mutex_init(&ctrl->lock);
870
871 nvmet_init_cap(ctrl);
872
873 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
874 INIT_LIST_HEAD(&ctrl->async_events);
875
876 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
877 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
878
879 kref_init(&ctrl->ref);
880 ctrl->subsys = subsys;
c86b8f7b 881 WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
a07b4970 882
c16734ea
CH
883 ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
884 sizeof(__le32), GFP_KERNEL);
885 if (!ctrl->changed_ns_list)
886 goto out_free_ctrl;
887
a07b4970
CH
888 ctrl->cqs = kcalloc(subsys->max_qid + 1,
889 sizeof(struct nvmet_cq *),
890 GFP_KERNEL);
891 if (!ctrl->cqs)
c16734ea 892 goto out_free_changed_ns_list;
a07b4970
CH
893
894 ctrl->sqs = kcalloc(subsys->max_qid + 1,
895 sizeof(struct nvmet_sq *),
896 GFP_KERNEL);
897 if (!ctrl->sqs)
898 goto out_free_cqs;
899
15fbad96 900 ret = ida_simple_get(&cntlid_ida,
a07b4970
CH
901 NVME_CNTLID_MIN, NVME_CNTLID_MAX,
902 GFP_KERNEL);
903 if (ret < 0) {
904 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
905 goto out_free_sqs;
906 }
907 ctrl->cntlid = ret;
908
909 ctrl->ops = req->ops;
910 if (ctrl->subsys->type == NVME_NQN_DISC) {
911 /* Don't accept keep-alive timeout for discovery controllers */
912 if (kato) {
913 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
eca19dc1 914 goto out_remove_ida;
a07b4970
CH
915 }
916
917 /*
918 * Discovery controllers use some arbitrary high value in order
919 * to cleanup stale discovery sessions
920 *
921 * From the latest base diff RC:
922 * "The Keep Alive command is not supported by
923 * Discovery controllers. A transport may specify a
924 * fixed Discovery controller activity timeout value
925 * (e.g., 2 minutes). If no commands are received
926 * by a Discovery controller within that time
927 * period, the controller may perform the
928 * actions for Keep Alive Timer expiration".
929 */
930 ctrl->kato = NVMET_DISC_KATO;
931 } else {
932 /* keep-alive timeout in seconds */
933 ctrl->kato = DIV_ROUND_UP(kato, 1000);
934 }
935 nvmet_start_keep_alive_timer(ctrl);
936
937 mutex_lock(&subsys->lock);
938 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
939 mutex_unlock(&subsys->lock);
940
941 *ctrlp = ctrl;
942 return 0;
943
eca19dc1
IR
944out_remove_ida:
945 ida_simple_remove(&cntlid_ida, ctrl->cntlid);
a07b4970
CH
946out_free_sqs:
947 kfree(ctrl->sqs);
948out_free_cqs:
949 kfree(ctrl->cqs);
c16734ea
CH
950out_free_changed_ns_list:
951 kfree(ctrl->changed_ns_list);
a07b4970
CH
952out_free_ctrl:
953 kfree(ctrl);
954out_put_subsystem:
955 nvmet_subsys_put(subsys);
956out:
957 return status;
958}
959
960static void nvmet_ctrl_free(struct kref *ref)
961{
962 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
963 struct nvmet_subsys *subsys = ctrl->subsys;
964
a07b4970
CH
965 mutex_lock(&subsys->lock);
966 list_del(&ctrl->subsys_entry);
967 mutex_unlock(&subsys->lock);
968
6b1943af
IR
969 nvmet_stop_keep_alive_timer(ctrl);
970
06406d81
SG
971 flush_work(&ctrl->async_event_work);
972 cancel_work_sync(&ctrl->fatal_err_work);
973
15fbad96 974 ida_simple_remove(&cntlid_ida, ctrl->cntlid);
a07b4970
CH
975
976 kfree(ctrl->sqs);
977 kfree(ctrl->cqs);
c16734ea 978 kfree(ctrl->changed_ns_list);
a07b4970 979 kfree(ctrl);
6b1943af
IR
980
981 nvmet_subsys_put(subsys);
a07b4970
CH
982}
983
984void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
985{
986 kref_put(&ctrl->ref, nvmet_ctrl_free);
987}
988
989static void nvmet_fatal_error_handler(struct work_struct *work)
990{
991 struct nvmet_ctrl *ctrl =
992 container_of(work, struct nvmet_ctrl, fatal_err_work);
993
994 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
995 ctrl->ops->delete_ctrl(ctrl);
996}
997
998void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
999{
8242ddac
SG
1000 mutex_lock(&ctrl->lock);
1001 if (!(ctrl->csts & NVME_CSTS_CFS)) {
1002 ctrl->csts |= NVME_CSTS_CFS;
1003 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1004 schedule_work(&ctrl->fatal_err_work);
1005 }
1006 mutex_unlock(&ctrl->lock);
a07b4970
CH
1007}
1008EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1009
1010static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1011 const char *subsysnqn)
1012{
1013 struct nvmet_subsys_link *p;
1014
1015 if (!port)
1016 return NULL;
1017
1018 if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
1019 NVMF_NQN_SIZE)) {
1020 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1021 return NULL;
1022 return nvmet_disc_subsys;
1023 }
1024
1025 down_read(&nvmet_config_sem);
1026 list_for_each_entry(p, &port->subsystems, entry) {
1027 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1028 NVMF_NQN_SIZE)) {
1029 if (!kref_get_unless_zero(&p->subsys->ref))
1030 break;
1031 up_read(&nvmet_config_sem);
1032 return p->subsys;
1033 }
1034 }
1035 up_read(&nvmet_config_sem);
1036 return NULL;
1037}
1038
1039struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1040 enum nvme_subsys_type type)
1041{
1042 struct nvmet_subsys *subsys;
1043
1044 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1045 if (!subsys)
1046 return NULL;
1047
637dc0f3 1048 subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
2e7f5d2a
JT
1049 /* generate a random serial number as our controllers are ephemeral: */
1050 get_random_bytes(&subsys->serial, sizeof(subsys->serial));
a07b4970
CH
1051
1052 switch (type) {
1053 case NVME_NQN_NVME:
1054 subsys->max_qid = NVMET_NR_QUEUES;
1055 break;
1056 case NVME_NQN_DISC:
1057 subsys->max_qid = 0;
1058 break;
1059 default:
1060 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1061 kfree(subsys);
1062 return NULL;
1063 }
1064 subsys->type = type;
1065 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1066 GFP_KERNEL);
69555af2 1067 if (!subsys->subsysnqn) {
a07b4970
CH
1068 kfree(subsys);
1069 return NULL;
1070 }
1071
1072 kref_init(&subsys->ref);
1073
1074 mutex_init(&subsys->lock);
1075 INIT_LIST_HEAD(&subsys->namespaces);
1076 INIT_LIST_HEAD(&subsys->ctrls);
a07b4970
CH
1077 INIT_LIST_HEAD(&subsys->hosts);
1078
1079 return subsys;
1080}
1081
1082static void nvmet_subsys_free(struct kref *ref)
1083{
1084 struct nvmet_subsys *subsys =
1085 container_of(ref, struct nvmet_subsys, ref);
1086
1087 WARN_ON_ONCE(!list_empty(&subsys->namespaces));
1088
a07b4970
CH
1089 kfree(subsys->subsysnqn);
1090 kfree(subsys);
1091}
1092
344770b0
SG
1093void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1094{
1095 struct nvmet_ctrl *ctrl;
1096
1097 mutex_lock(&subsys->lock);
1098 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1099 ctrl->ops->delete_ctrl(ctrl);
1100 mutex_unlock(&subsys->lock);
1101}
1102
a07b4970
CH
1103void nvmet_subsys_put(struct nvmet_subsys *subsys)
1104{
1105 kref_put(&subsys->ref, nvmet_subsys_free);
1106}
1107
1108static int __init nvmet_init(void)
1109{
1110 int error;
1111
1112 error = nvmet_init_discovery();
1113 if (error)
1114 goto out;
1115
1116 error = nvmet_init_configfs();
1117 if (error)
1118 goto out_exit_discovery;
1119 return 0;
1120
1121out_exit_discovery:
1122 nvmet_exit_discovery();
1123out:
1124 return error;
1125}
1126
1127static void __exit nvmet_exit(void)
1128{
1129 nvmet_exit_configfs();
1130 nvmet_exit_discovery();
15fbad96 1131 ida_destroy(&cntlid_ida);
a07b4970
CH
1132
1133 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1134 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1135}
1136
1137module_init(nvmet_init);
1138module_exit(nvmet_exit);
1139
1140MODULE_LICENSE("GPL v2");