1 // SPDX-License-Identifier: GPL-2.0
3 * Configfs interface for the NVMe target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/kstrtox.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/stat.h>
12 #include <linux/ctype.h>
13 #include <linux/pci.h>
14 #include <linux/pci-p2pdma.h>
15 #ifdef CONFIG_NVME_TARGET_AUTH
16 #include <linux/nvme-auth.h>
18 #include <linux/nvme-keyring.h>
19 #include <crypto/hash.h>
20 #include <crypto/kpp.h>
24 static const struct config_item_type nvmet_host_type;
25 static const struct config_item_type nvmet_subsys_type;
27 static LIST_HEAD(nvmet_ports_list);
28 struct list_head *nvmet_ports = &nvmet_ports_list;
30 struct nvmet_type_name_map {
35 static struct nvmet_type_name_map nvmet_transport[] = {
36 { NVMF_TRTYPE_RDMA, "rdma" },
37 { NVMF_TRTYPE_FC, "fc" },
38 { NVMF_TRTYPE_TCP, "tcp" },
39 { NVMF_TRTYPE_LOOP, "loop" },
42 static const struct nvmet_type_name_map nvmet_addr_family[] = {
43 { NVMF_ADDR_FAMILY_PCI, "pcie" },
44 { NVMF_ADDR_FAMILY_IP4, "ipv4" },
45 { NVMF_ADDR_FAMILY_IP6, "ipv6" },
46 { NVMF_ADDR_FAMILY_IB, "ib" },
47 { NVMF_ADDR_FAMILY_FC, "fc" },
48 { NVMF_ADDR_FAMILY_LOOP, "loop" },
51 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
54 pr_err("Disable port '%u' before changing attribute in %s\n",
55 le16_to_cpu(p->disc_addr.portid), caller);
60 * nvmet_port Generic ConfigFS definitions.
61 * Used in any place in the ConfigFS tree that refers to an address.
63 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
65 u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
68 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
69 if (nvmet_addr_family[i].type == adrfam)
70 return snprintf(page, PAGE_SIZE, "%s\n",
71 nvmet_addr_family[i].name);
74 return snprintf(page, PAGE_SIZE, "\n");
77 static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
78 const char *page, size_t count)
80 struct nvmet_port *port = to_nvmet_port(item);
83 if (nvmet_is_port_enabled(port, __func__))
86 for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
87 if (sysfs_streq(page, nvmet_addr_family[i].name))
91 pr_err("Invalid value '%s' for adrfam\n", page);
95 port->disc_addr.adrfam = nvmet_addr_family[i].type;
99 CONFIGFS_ATTR(nvmet_, addr_adrfam);
101 static ssize_t nvmet_addr_portid_show(struct config_item *item,
104 __le16 portid = to_nvmet_port(item)->disc_addr.portid;
106 return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
109 static ssize_t nvmet_addr_portid_store(struct config_item *item,
110 const char *page, size_t count)
112 struct nvmet_port *port = to_nvmet_port(item);
115 if (kstrtou16(page, 0, &portid)) {
116 pr_err("Invalid value '%s' for portid\n", page);
120 if (nvmet_is_port_enabled(port, __func__))
123 port->disc_addr.portid = cpu_to_le16(portid);
127 CONFIGFS_ATTR(nvmet_, addr_portid);
129 static ssize_t nvmet_addr_traddr_show(struct config_item *item,
132 struct nvmet_port *port = to_nvmet_port(item);
134 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
137 static ssize_t nvmet_addr_traddr_store(struct config_item *item,
138 const char *page, size_t count)
140 struct nvmet_port *port = to_nvmet_port(item);
142 if (count > NVMF_TRADDR_SIZE) {
143 pr_err("Invalid value '%s' for traddr\n", page);
147 if (nvmet_is_port_enabled(port, __func__))
150 if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
155 CONFIGFS_ATTR(nvmet_, addr_traddr);
157 static const struct nvmet_type_name_map nvmet_addr_treq[] = {
158 { NVMF_TREQ_NOT_SPECIFIED, "not specified" },
159 { NVMF_TREQ_REQUIRED, "required" },
160 { NVMF_TREQ_NOT_REQUIRED, "not required" },
163 static inline u8 nvmet_port_disc_addr_treq_mask(struct nvmet_port *port)
165 return (port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK);
168 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
170 u8 treq = nvmet_port_disc_addr_treq_secure_channel(to_nvmet_port(item));
173 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
174 if (treq == nvmet_addr_treq[i].type)
175 return snprintf(page, PAGE_SIZE, "%s\n",
176 nvmet_addr_treq[i].name);
179 return snprintf(page, PAGE_SIZE, "\n");
182 static ssize_t nvmet_addr_treq_store(struct config_item *item,
183 const char *page, size_t count)
185 struct nvmet_port *port = to_nvmet_port(item);
186 u8 treq = nvmet_port_disc_addr_treq_mask(port);
189 if (nvmet_is_port_enabled(port, __func__))
192 for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
193 if (sysfs_streq(page, nvmet_addr_treq[i].name))
197 pr_err("Invalid value '%s' for treq\n", page);
201 if (port->disc_addr.trtype == NVMF_TRTYPE_TCP &&
202 port->disc_addr.tsas.tcp.sectype == NVMF_TCP_SECTYPE_TLS13) {
203 switch (nvmet_addr_treq[i].type) {
204 case NVMF_TREQ_NOT_SPECIFIED:
205 pr_debug("treq '%s' not allowed for TLS1.3\n",
206 nvmet_addr_treq[i].name);
208 case NVMF_TREQ_NOT_REQUIRED:
209 pr_warn("Allow non-TLS connections while TLS1.3 is enabled\n");
215 treq |= nvmet_addr_treq[i].type;
216 port->disc_addr.treq = treq;
220 CONFIGFS_ATTR(nvmet_, addr_treq);
222 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
225 struct nvmet_port *port = to_nvmet_port(item);
227 return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
230 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
231 const char *page, size_t count)
233 struct nvmet_port *port = to_nvmet_port(item);
235 if (count > NVMF_TRSVCID_SIZE) {
236 pr_err("Invalid value '%s' for trsvcid\n", page);
239 if (nvmet_is_port_enabled(port, __func__))
242 if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
247 CONFIGFS_ATTR(nvmet_, addr_trsvcid);
249 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
252 struct nvmet_port *port = to_nvmet_port(item);
254 return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
257 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
258 const char *page, size_t count)
260 struct nvmet_port *port = to_nvmet_port(item);
263 if (nvmet_is_port_enabled(port, __func__))
265 ret = kstrtoint(page, 0, &port->inline_data_size);
267 pr_err("Invalid value '%s' for inline_data_size\n", page);
273 CONFIGFS_ATTR(nvmet_, param_inline_data_size);
275 #ifdef CONFIG_BLK_DEV_INTEGRITY
276 static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
279 struct nvmet_port *port = to_nvmet_port(item);
281 return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
284 static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
285 const char *page, size_t count)
287 struct nvmet_port *port = to_nvmet_port(item);
290 if (kstrtobool(page, &val))
293 if (nvmet_is_port_enabled(port, __func__))
296 port->pi_enable = val;
300 CONFIGFS_ATTR(nvmet_, param_pi_enable);
303 static ssize_t nvmet_addr_trtype_show(struct config_item *item,
306 struct nvmet_port *port = to_nvmet_port(item);
309 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
310 if (port->disc_addr.trtype == nvmet_transport[i].type)
311 return snprintf(page, PAGE_SIZE,
312 "%s\n", nvmet_transport[i].name);
315 return sprintf(page, "\n");
318 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
320 port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
321 port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
322 port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
325 static void nvmet_port_init_tsas_tcp(struct nvmet_port *port, int sectype)
327 port->disc_addr.tsas.tcp.sectype = sectype;
330 static ssize_t nvmet_addr_trtype_store(struct config_item *item,
331 const char *page, size_t count)
333 struct nvmet_port *port = to_nvmet_port(item);
336 if (nvmet_is_port_enabled(port, __func__))
339 for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
340 if (sysfs_streq(page, nvmet_transport[i].name))
344 pr_err("Invalid value '%s' for trtype\n", page);
348 memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
349 port->disc_addr.trtype = nvmet_transport[i].type;
350 if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
351 nvmet_port_init_tsas_rdma(port);
352 else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP)
353 nvmet_port_init_tsas_tcp(port, NVMF_TCP_SECTYPE_NONE);
357 CONFIGFS_ATTR(nvmet_, addr_trtype);
359 static const struct nvmet_type_name_map nvmet_addr_tsas_tcp[] = {
360 { NVMF_TCP_SECTYPE_NONE, "none" },
361 { NVMF_TCP_SECTYPE_TLS13, "tls1.3" },
364 static const struct nvmet_type_name_map nvmet_addr_tsas_rdma[] = {
365 { NVMF_RDMA_QPTYPE_CONNECTED, "connected" },
366 { NVMF_RDMA_QPTYPE_DATAGRAM, "datagram" },
369 static ssize_t nvmet_addr_tsas_show(struct config_item *item,
372 struct nvmet_port *port = to_nvmet_port(item);
375 if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) {
376 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
377 if (port->disc_addr.tsas.tcp.sectype == nvmet_addr_tsas_tcp[i].type)
378 return sprintf(page, "%s\n", nvmet_addr_tsas_tcp[i].name);
380 } else if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) {
381 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) {
382 if (port->disc_addr.tsas.rdma.qptype == nvmet_addr_tsas_rdma[i].type)
383 return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name);
386 return sprintf(page, "reserved\n");
389 static ssize_t nvmet_addr_tsas_store(struct config_item *item,
390 const char *page, size_t count)
392 struct nvmet_port *port = to_nvmet_port(item);
393 u8 treq = nvmet_port_disc_addr_treq_mask(port);
397 if (nvmet_is_port_enabled(port, __func__))
400 if (port->disc_addr.trtype != NVMF_TRTYPE_TCP)
403 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
404 if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) {
405 sectype = nvmet_addr_tsas_tcp[i].type;
410 pr_err("Invalid value '%s' for tsas\n", page);
414 if (sectype == NVMF_TCP_SECTYPE_TLS13) {
415 if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS)) {
416 pr_err("TLS is not supported\n");
419 if (!port->keyring) {
420 pr_err("TLS keyring not configured\n");
425 nvmet_port_init_tsas_tcp(port, sectype);
427 * If TLS is enabled TREQ should be set to 'required' per default
429 if (sectype == NVMF_TCP_SECTYPE_TLS13) {
430 u8 sc = nvmet_port_disc_addr_treq_secure_channel(port);
432 if (sc == NVMF_TREQ_NOT_SPECIFIED)
433 treq |= NVMF_TREQ_REQUIRED;
437 treq |= NVMF_TREQ_NOT_SPECIFIED;
439 port->disc_addr.treq = treq;
443 CONFIGFS_ATTR(nvmet_, addr_tsas);
446 * Namespace structures & file operation functions below
448 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
450 return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
453 static ssize_t nvmet_ns_device_path_store(struct config_item *item,
454 const char *page, size_t count)
456 struct nvmet_ns *ns = to_nvmet_ns(item);
457 struct nvmet_subsys *subsys = ns->subsys;
461 mutex_lock(&subsys->lock);
467 len = strcspn(page, "\n");
471 kfree(ns->device_path);
473 ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
474 if (!ns->device_path)
477 mutex_unlock(&subsys->lock);
481 mutex_unlock(&subsys->lock);
485 CONFIGFS_ATTR(nvmet_ns_, device_path);
487 #ifdef CONFIG_PCI_P2PDMA
488 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
490 struct nvmet_ns *ns = to_nvmet_ns(item);
492 return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
495 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
496 const char *page, size_t count)
498 struct nvmet_ns *ns = to_nvmet_ns(item);
499 struct pci_dev *p2p_dev = NULL;
504 mutex_lock(&ns->subsys->lock);
510 error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
516 ns->use_p2pmem = use_p2pmem;
517 pci_dev_put(ns->p2p_dev);
518 ns->p2p_dev = p2p_dev;
521 mutex_unlock(&ns->subsys->lock);
526 CONFIGFS_ATTR(nvmet_ns_, p2pmem);
527 #endif /* CONFIG_PCI_P2PDMA */
529 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
531 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
534 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
535 const char *page, size_t count)
537 struct nvmet_ns *ns = to_nvmet_ns(item);
538 struct nvmet_subsys *subsys = ns->subsys;
541 mutex_lock(&subsys->lock);
547 if (uuid_parse(page, &ns->uuid))
551 mutex_unlock(&subsys->lock);
552 return ret ? ret : count;
555 CONFIGFS_ATTR(nvmet_ns_, device_uuid);
557 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
559 return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
562 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
563 const char *page, size_t count)
565 struct nvmet_ns *ns = to_nvmet_ns(item);
566 struct nvmet_subsys *subsys = ns->subsys;
568 const char *p = page;
572 mutex_lock(&subsys->lock);
578 for (i = 0; i < 16; i++) {
579 if (p + 2 > page + count) {
583 if (!isxdigit(p[0]) || !isxdigit(p[1])) {
588 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
591 if (*p == '-' || *p == ':')
595 memcpy(&ns->nguid, nguid, sizeof(nguid));
597 mutex_unlock(&subsys->lock);
598 return ret ? ret : count;
601 CONFIGFS_ATTR(nvmet_ns_, device_nguid);
603 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
605 return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
608 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
609 const char *page, size_t count)
611 struct nvmet_ns *ns = to_nvmet_ns(item);
612 u32 oldgrpid, newgrpid;
615 ret = kstrtou32(page, 0, &newgrpid);
619 if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
622 down_write(&nvmet_ana_sem);
623 oldgrpid = ns->anagrpid;
624 nvmet_ana_group_enabled[newgrpid]++;
625 ns->anagrpid = newgrpid;
626 nvmet_ana_group_enabled[oldgrpid]--;
628 up_write(&nvmet_ana_sem);
630 nvmet_send_ana_event(ns->subsys, NULL);
634 CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
636 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
638 return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
641 static ssize_t nvmet_ns_enable_store(struct config_item *item,
642 const char *page, size_t count)
644 struct nvmet_ns *ns = to_nvmet_ns(item);
648 if (kstrtobool(page, &enable))
652 ret = nvmet_ns_enable(ns);
654 nvmet_ns_disable(ns);
656 return ret ? ret : count;
659 CONFIGFS_ATTR(nvmet_ns_, enable);
661 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
663 return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
666 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
667 const char *page, size_t count)
669 struct nvmet_ns *ns = to_nvmet_ns(item);
672 if (kstrtobool(page, &val))
675 mutex_lock(&ns->subsys->lock);
677 pr_err("disable ns before setting buffered_io value.\n");
678 mutex_unlock(&ns->subsys->lock);
682 ns->buffered_io = val;
683 mutex_unlock(&ns->subsys->lock);
687 CONFIGFS_ATTR(nvmet_ns_, buffered_io);
689 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
690 const char *page, size_t count)
692 struct nvmet_ns *ns = to_nvmet_ns(item);
695 if (kstrtobool(page, &val))
701 mutex_lock(&ns->subsys->lock);
703 pr_err("enable ns before revalidate.\n");
704 mutex_unlock(&ns->subsys->lock);
707 if (nvmet_ns_revalidate(ns))
708 nvmet_ns_changed(ns->subsys, ns->nsid);
709 mutex_unlock(&ns->subsys->lock);
713 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
715 static struct configfs_attribute *nvmet_ns_attrs[] = {
716 &nvmet_ns_attr_device_path,
717 &nvmet_ns_attr_device_nguid,
718 &nvmet_ns_attr_device_uuid,
719 &nvmet_ns_attr_ana_grpid,
720 &nvmet_ns_attr_enable,
721 &nvmet_ns_attr_buffered_io,
722 &nvmet_ns_attr_revalidate_size,
723 #ifdef CONFIG_PCI_P2PDMA
724 &nvmet_ns_attr_p2pmem,
729 static void nvmet_ns_release(struct config_item *item)
731 struct nvmet_ns *ns = to_nvmet_ns(item);
736 static struct configfs_item_operations nvmet_ns_item_ops = {
737 .release = nvmet_ns_release,
740 static const struct config_item_type nvmet_ns_type = {
741 .ct_item_ops = &nvmet_ns_item_ops,
742 .ct_attrs = nvmet_ns_attrs,
743 .ct_owner = THIS_MODULE,
746 static struct config_group *nvmet_ns_make(struct config_group *group,
749 struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
754 ret = kstrtou32(name, 0, &nsid);
759 if (nsid == 0 || nsid == NVME_NSID_ALL) {
760 pr_err("invalid nsid %#x", nsid);
765 ns = nvmet_ns_alloc(subsys, nsid);
768 config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
770 pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
777 static struct configfs_group_operations nvmet_namespaces_group_ops = {
778 .make_group = nvmet_ns_make,
781 static const struct config_item_type nvmet_namespaces_type = {
782 .ct_group_ops = &nvmet_namespaces_group_ops,
783 .ct_owner = THIS_MODULE,
786 #ifdef CONFIG_NVME_TARGET_PASSTHRU
788 static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
791 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
793 return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
796 static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
797 const char *page, size_t count)
799 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
803 mutex_lock(&subsys->lock);
806 if (subsys->passthru_ctrl)
810 len = strcspn(page, "\n");
814 kfree(subsys->passthru_ctrl_path);
816 subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
817 if (!subsys->passthru_ctrl_path)
820 mutex_unlock(&subsys->lock);
824 mutex_unlock(&subsys->lock);
827 CONFIGFS_ATTR(nvmet_passthru_, device_path);
829 static ssize_t nvmet_passthru_enable_show(struct config_item *item,
832 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
834 return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
837 static ssize_t nvmet_passthru_enable_store(struct config_item *item,
838 const char *page, size_t count)
840 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
844 if (kstrtobool(page, &enable))
848 ret = nvmet_passthru_ctrl_enable(subsys);
850 nvmet_passthru_ctrl_disable(subsys);
852 return ret ? ret : count;
854 CONFIGFS_ATTR(nvmet_passthru_, enable);
856 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
859 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
862 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
863 const char *page, size_t count)
865 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
866 unsigned int timeout;
868 if (kstrtouint(page, 0, &timeout))
870 subsys->admin_timeout = timeout;
873 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
875 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
878 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
881 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
882 const char *page, size_t count)
884 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
885 unsigned int timeout;
887 if (kstrtouint(page, 0, &timeout))
889 subsys->io_timeout = timeout;
892 CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
894 static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
897 return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
900 static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
901 const char *page, size_t count)
903 struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
904 unsigned int clear_ids;
906 if (kstrtouint(page, 0, &clear_ids))
908 subsys->clear_ids = clear_ids;
911 CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
913 static struct configfs_attribute *nvmet_passthru_attrs[] = {
914 &nvmet_passthru_attr_device_path,
915 &nvmet_passthru_attr_enable,
916 &nvmet_passthru_attr_admin_timeout,
917 &nvmet_passthru_attr_io_timeout,
918 &nvmet_passthru_attr_clear_ids,
922 static const struct config_item_type nvmet_passthru_type = {
923 .ct_attrs = nvmet_passthru_attrs,
924 .ct_owner = THIS_MODULE,
927 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
929 config_group_init_type_name(&subsys->passthru_group,
930 "passthru", &nvmet_passthru_type);
931 configfs_add_default_group(&subsys->passthru_group,
935 #else /* CONFIG_NVME_TARGET_PASSTHRU */
937 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
941 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
943 static int nvmet_port_subsys_allow_link(struct config_item *parent,
944 struct config_item *target)
946 struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
947 struct nvmet_subsys *subsys;
948 struct nvmet_subsys_link *link, *p;
951 if (target->ci_type != &nvmet_subsys_type) {
952 pr_err("can only link subsystems into the subsystems dir.!\n");
955 subsys = to_subsys(target);
956 link = kmalloc(sizeof(*link), GFP_KERNEL);
959 link->subsys = subsys;
961 down_write(&nvmet_config_sem);
963 list_for_each_entry(p, &port->subsystems, entry) {
964 if (p->subsys == subsys)
968 if (list_empty(&port->subsystems)) {
969 ret = nvmet_enable_port(port);
974 list_add_tail(&link->entry, &port->subsystems);
975 nvmet_port_disc_changed(port, subsys);
977 up_write(&nvmet_config_sem);
981 up_write(&nvmet_config_sem);
986 static void nvmet_port_subsys_drop_link(struct config_item *parent,
987 struct config_item *target)
989 struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
990 struct nvmet_subsys *subsys = to_subsys(target);
991 struct nvmet_subsys_link *p;
993 down_write(&nvmet_config_sem);
994 list_for_each_entry(p, &port->subsystems, entry) {
995 if (p->subsys == subsys)
998 up_write(&nvmet_config_sem);
1002 list_del(&p->entry);
1003 nvmet_port_del_ctrls(port, subsys);
1004 nvmet_port_disc_changed(port, subsys);
1006 if (list_empty(&port->subsystems))
1007 nvmet_disable_port(port);
1008 up_write(&nvmet_config_sem);
1012 static struct configfs_item_operations nvmet_port_subsys_item_ops = {
1013 .allow_link = nvmet_port_subsys_allow_link,
1014 .drop_link = nvmet_port_subsys_drop_link,
1017 static const struct config_item_type nvmet_port_subsys_type = {
1018 .ct_item_ops = &nvmet_port_subsys_item_ops,
1019 .ct_owner = THIS_MODULE,
1022 static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
1023 struct config_item *target)
1025 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
1026 struct nvmet_host *host;
1027 struct nvmet_host_link *link, *p;
1030 if (target->ci_type != &nvmet_host_type) {
1031 pr_err("can only link hosts into the allowed_hosts directory!\n");
1035 host = to_host(target);
1036 link = kmalloc(sizeof(*link), GFP_KERNEL);
1041 down_write(&nvmet_config_sem);
1043 if (subsys->allow_any_host) {
1044 pr_err("can't add hosts when allow_any_host is set!\n");
1049 list_for_each_entry(p, &subsys->hosts, entry) {
1050 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
1053 list_add_tail(&link->entry, &subsys->hosts);
1054 nvmet_subsys_disc_changed(subsys, host);
1056 up_write(&nvmet_config_sem);
1059 up_write(&nvmet_config_sem);
1064 static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
1065 struct config_item *target)
1067 struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
1068 struct nvmet_host *host = to_host(target);
1069 struct nvmet_host_link *p;
1071 down_write(&nvmet_config_sem);
1072 list_for_each_entry(p, &subsys->hosts, entry) {
1073 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
1076 up_write(&nvmet_config_sem);
1080 list_del(&p->entry);
1081 nvmet_subsys_disc_changed(subsys, host);
1083 up_write(&nvmet_config_sem);
1087 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
1088 .allow_link = nvmet_allowed_hosts_allow_link,
1089 .drop_link = nvmet_allowed_hosts_drop_link,
1092 static const struct config_item_type nvmet_allowed_hosts_type = {
1093 .ct_item_ops = &nvmet_allowed_hosts_item_ops,
1094 .ct_owner = THIS_MODULE,
1097 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
1100 return snprintf(page, PAGE_SIZE, "%d\n",
1101 to_subsys(item)->allow_any_host);
1104 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
1105 const char *page, size_t count)
1107 struct nvmet_subsys *subsys = to_subsys(item);
1108 bool allow_any_host;
1111 if (kstrtobool(page, &allow_any_host))
1114 down_write(&nvmet_config_sem);
1115 if (allow_any_host && !list_empty(&subsys->hosts)) {
1116 pr_err("Can't set allow_any_host when explicit hosts are set!\n");
1121 if (subsys->allow_any_host != allow_any_host) {
1122 subsys->allow_any_host = allow_any_host;
1123 nvmet_subsys_disc_changed(subsys, NULL);
1127 up_write(&nvmet_config_sem);
1128 return ret ? ret : count;
1131 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
1133 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
1136 struct nvmet_subsys *subsys = to_subsys(item);
1138 if (NVME_TERTIARY(subsys->ver))
1139 return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
1140 NVME_MAJOR(subsys->ver),
1141 NVME_MINOR(subsys->ver),
1142 NVME_TERTIARY(subsys->ver));
1144 return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
1145 NVME_MAJOR(subsys->ver),
1146 NVME_MINOR(subsys->ver));
1150 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
1151 const char *page, size_t count)
1153 int major, minor, tertiary = 0;
1156 if (subsys->subsys_discovered) {
1157 if (NVME_TERTIARY(subsys->ver))
1158 pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
1159 NVME_MAJOR(subsys->ver),
1160 NVME_MINOR(subsys->ver),
1161 NVME_TERTIARY(subsys->ver));
1163 pr_err("Can't set version number. %llu.%llu is already assigned\n",
1164 NVME_MAJOR(subsys->ver),
1165 NVME_MINOR(subsys->ver));
1169 /* passthru subsystems use the underlying controller's version */
1170 if (nvmet_is_passthru_subsys(subsys))
1173 ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
1174 if (ret != 2 && ret != 3)
1177 subsys->ver = NVME_VS(major, minor, tertiary);
1182 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
1183 const char *page, size_t count)
1185 struct nvmet_subsys *subsys = to_subsys(item);
1188 down_write(&nvmet_config_sem);
1189 mutex_lock(&subsys->lock);
1190 ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
1191 mutex_unlock(&subsys->lock);
1192 up_write(&nvmet_config_sem);
1196 CONFIGFS_ATTR(nvmet_subsys_, attr_version);
1198 /* See Section 1.5 of NVMe 1.4 */
1199 static bool nvmet_is_ascii(const char c)
1201 return c >= 0x20 && c <= 0x7e;
1204 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
1207 struct nvmet_subsys *subsys = to_subsys(item);
1209 return snprintf(page, PAGE_SIZE, "%.*s\n",
1210 NVMET_SN_MAX_SIZE, subsys->serial);
1214 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
1215 const char *page, size_t count)
1217 int pos, len = strcspn(page, "\n");
1219 if (subsys->subsys_discovered) {
1220 pr_err("Can't set serial number. %s is already assigned\n",
1225 if (!len || len > NVMET_SN_MAX_SIZE) {
1226 pr_err("Serial Number can not be empty or exceed %d Bytes\n",
1231 for (pos = 0; pos < len; pos++) {
1232 if (!nvmet_is_ascii(page[pos])) {
1233 pr_err("Serial Number must contain only ASCII strings\n");
1238 memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
1243 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
1244 const char *page, size_t count)
1246 struct nvmet_subsys *subsys = to_subsys(item);
1249 down_write(&nvmet_config_sem);
1250 mutex_lock(&subsys->lock);
1251 ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
1252 mutex_unlock(&subsys->lock);
1253 up_write(&nvmet_config_sem);
1257 CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
1259 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
1262 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
1265 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
1266 const char *page, size_t cnt)
1270 if (sscanf(page, "%hu\n", &cntlid_min) != 1)
1273 if (cntlid_min == 0)
1276 down_write(&nvmet_config_sem);
1277 if (cntlid_min >= to_subsys(item)->cntlid_max)
1279 to_subsys(item)->cntlid_min = cntlid_min;
1280 up_write(&nvmet_config_sem);
1284 up_write(&nvmet_config_sem);
1287 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
1289 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
1292 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
1295 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
1296 const char *page, size_t cnt)
1300 if (sscanf(page, "%hu\n", &cntlid_max) != 1)
1303 if (cntlid_max == 0)
1306 down_write(&nvmet_config_sem);
1307 if (cntlid_max <= to_subsys(item)->cntlid_min)
1309 to_subsys(item)->cntlid_max = cntlid_max;
1310 up_write(&nvmet_config_sem);
1314 up_write(&nvmet_config_sem);
1317 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
1319 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
1322 struct nvmet_subsys *subsys = to_subsys(item);
1324 return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
1327 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
1328 const char *page, size_t count)
1333 if (subsys->subsys_discovered) {
1334 pr_err("Can't set model number. %s is already assigned\n",
1335 subsys->model_number);
1339 len = strcspn(page, "\n");
1343 if (len > NVMET_MN_MAX_SIZE) {
1344 pr_err("Model number size can not exceed %d Bytes\n",
1349 for (pos = 0; pos < len; pos++) {
1350 if (!nvmet_is_ascii(page[pos]))
1354 val = kmemdup_nul(page, len, GFP_KERNEL);
1357 kfree(subsys->model_number);
1358 subsys->model_number = val;
1362 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
1363 const char *page, size_t count)
1365 struct nvmet_subsys *subsys = to_subsys(item);
1368 down_write(&nvmet_config_sem);
1369 mutex_lock(&subsys->lock);
1370 ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
1371 mutex_unlock(&subsys->lock);
1372 up_write(&nvmet_config_sem);
1376 CONFIGFS_ATTR(nvmet_subsys_, attr_model);
1378 static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item,
1381 struct nvmet_subsys *subsys = to_subsys(item);
1383 return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui);
1386 static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys,
1387 const char *page, size_t count)
1392 if (subsys->subsys_discovered) {
1393 pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n",
1398 ret = kstrtou32(page, 0, &val);
1402 if (val >= 0x1000000)
1405 subsys->ieee_oui = val;
1410 static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item,
1411 const char *page, size_t count)
1413 struct nvmet_subsys *subsys = to_subsys(item);
1416 down_write(&nvmet_config_sem);
1417 mutex_lock(&subsys->lock);
1418 ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count);
1419 mutex_unlock(&subsys->lock);
1420 up_write(&nvmet_config_sem);
1424 CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui);
1426 static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item,
1429 struct nvmet_subsys *subsys = to_subsys(item);
1431 return sysfs_emit(page, "%s\n", subsys->firmware_rev);
1434 static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys,
1435 const char *page, size_t count)
1440 if (subsys->subsys_discovered) {
1441 pr_err("Can't set firmware revision. %s is already assigned\n",
1442 subsys->firmware_rev);
1446 len = strcspn(page, "\n");
1450 if (len > NVMET_FR_MAX_SIZE) {
1451 pr_err("Firmware revision size can not exceed %d Bytes\n",
1456 for (pos = 0; pos < len; pos++) {
1457 if (!nvmet_is_ascii(page[pos]))
1461 val = kmemdup_nul(page, len, GFP_KERNEL);
1465 kfree(subsys->firmware_rev);
1467 subsys->firmware_rev = val;
1472 static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item,
1473 const char *page, size_t count)
1475 struct nvmet_subsys *subsys = to_subsys(item);
1478 down_write(&nvmet_config_sem);
1479 mutex_lock(&subsys->lock);
1480 ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count);
1481 mutex_unlock(&subsys->lock);
1482 up_write(&nvmet_config_sem);
1486 CONFIGFS_ATTR(nvmet_subsys_, attr_firmware);
1488 #ifdef CONFIG_BLK_DEV_INTEGRITY
1489 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
1492 return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
1495 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
1496 const char *page, size_t count)
1498 struct nvmet_subsys *subsys = to_subsys(item);
1501 if (kstrtobool(page, &pi_enable))
1504 subsys->pi_support = pi_enable;
1507 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
1510 static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
1513 return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
1516 static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
1517 const char *page, size_t cnt)
1519 struct nvmet_subsys *subsys = to_subsys(item);
1520 struct nvmet_ctrl *ctrl;
1523 if (sscanf(page, "%hu\n", &qid_max) != 1)
1526 if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
1529 down_write(&nvmet_config_sem);
1530 subsys->max_qid = qid_max;
1532 /* Force reconnect */
1533 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1534 ctrl->ops->delete_ctrl(ctrl);
1535 up_write(&nvmet_config_sem);
1539 CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
1541 static struct configfs_attribute *nvmet_subsys_attrs[] = {
1542 &nvmet_subsys_attr_attr_allow_any_host,
1543 &nvmet_subsys_attr_attr_version,
1544 &nvmet_subsys_attr_attr_serial,
1545 &nvmet_subsys_attr_attr_cntlid_min,
1546 &nvmet_subsys_attr_attr_cntlid_max,
1547 &nvmet_subsys_attr_attr_model,
1548 &nvmet_subsys_attr_attr_qid_max,
1549 &nvmet_subsys_attr_attr_ieee_oui,
1550 &nvmet_subsys_attr_attr_firmware,
1551 #ifdef CONFIG_BLK_DEV_INTEGRITY
1552 &nvmet_subsys_attr_attr_pi_enable,
1558 * Subsystem structures & folder operation functions below
1560 static void nvmet_subsys_release(struct config_item *item)
1562 struct nvmet_subsys *subsys = to_subsys(item);
1564 nvmet_subsys_del_ctrls(subsys);
1565 nvmet_subsys_put(subsys);
1568 static struct configfs_item_operations nvmet_subsys_item_ops = {
1569 .release = nvmet_subsys_release,
1572 static const struct config_item_type nvmet_subsys_type = {
1573 .ct_item_ops = &nvmet_subsys_item_ops,
1574 .ct_attrs = nvmet_subsys_attrs,
1575 .ct_owner = THIS_MODULE,
1578 static struct config_group *nvmet_subsys_make(struct config_group *group,
1581 struct nvmet_subsys *subsys;
1583 if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
1584 pr_err("can't create discovery subsystem through configfs\n");
1585 return ERR_PTR(-EINVAL);
1588 subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
1590 return ERR_CAST(subsys);
1592 config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
1594 config_group_init_type_name(&subsys->namespaces_group,
1595 "namespaces", &nvmet_namespaces_type);
1596 configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
1598 config_group_init_type_name(&subsys->allowed_hosts_group,
1599 "allowed_hosts", &nvmet_allowed_hosts_type);
1600 configfs_add_default_group(&subsys->allowed_hosts_group,
1603 nvmet_add_passthru_group(subsys);
1605 return &subsys->group;
1608 static struct configfs_group_operations nvmet_subsystems_group_ops = {
1609 .make_group = nvmet_subsys_make,
1612 static const struct config_item_type nvmet_subsystems_type = {
1613 .ct_group_ops = &nvmet_subsystems_group_ops,
1614 .ct_owner = THIS_MODULE,
1617 static ssize_t nvmet_referral_enable_show(struct config_item *item,
1620 return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
1623 static ssize_t nvmet_referral_enable_store(struct config_item *item,
1624 const char *page, size_t count)
1626 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1627 struct nvmet_port *port = to_nvmet_port(item);
1630 if (kstrtobool(page, &enable))
1634 nvmet_referral_enable(parent, port);
1636 nvmet_referral_disable(parent, port);
1640 pr_err("Invalid value '%s' for enable\n", page);
1644 CONFIGFS_ATTR(nvmet_referral_, enable);
1647 * Discovery Service subsystem definitions
1649 static struct configfs_attribute *nvmet_referral_attrs[] = {
1650 &nvmet_attr_addr_adrfam,
1651 &nvmet_attr_addr_portid,
1652 &nvmet_attr_addr_treq,
1653 &nvmet_attr_addr_traddr,
1654 &nvmet_attr_addr_trsvcid,
1655 &nvmet_attr_addr_trtype,
1656 &nvmet_referral_attr_enable,
1660 static void nvmet_referral_notify(struct config_group *group,
1661 struct config_item *item)
1663 struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1664 struct nvmet_port *port = to_nvmet_port(item);
1666 nvmet_referral_disable(parent, port);
1669 static void nvmet_referral_release(struct config_item *item)
1671 struct nvmet_port *port = to_nvmet_port(item);
1676 static struct configfs_item_operations nvmet_referral_item_ops = {
1677 .release = nvmet_referral_release,
1680 static const struct config_item_type nvmet_referral_type = {
1681 .ct_owner = THIS_MODULE,
1682 .ct_attrs = nvmet_referral_attrs,
1683 .ct_item_ops = &nvmet_referral_item_ops,
1686 static struct config_group *nvmet_referral_make(
1687 struct config_group *group, const char *name)
1689 struct nvmet_port *port;
1691 port = kzalloc(sizeof(*port), GFP_KERNEL);
1693 return ERR_PTR(-ENOMEM);
1695 INIT_LIST_HEAD(&port->entry);
1696 config_group_init_type_name(&port->group, name, &nvmet_referral_type);
1698 return &port->group;
1701 static struct configfs_group_operations nvmet_referral_group_ops = {
1702 .make_group = nvmet_referral_make,
1703 .disconnect_notify = nvmet_referral_notify,
1706 static const struct config_item_type nvmet_referrals_type = {
1707 .ct_owner = THIS_MODULE,
1708 .ct_group_ops = &nvmet_referral_group_ops,
1711 static struct nvmet_type_name_map nvmet_ana_state[] = {
1712 { NVME_ANA_OPTIMIZED, "optimized" },
1713 { NVME_ANA_NONOPTIMIZED, "non-optimized" },
1714 { NVME_ANA_INACCESSIBLE, "inaccessible" },
1715 { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" },
1716 { NVME_ANA_CHANGE, "change" },
1719 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
1722 struct nvmet_ana_group *grp = to_ana_group(item);
1723 enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
1726 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1727 if (state == nvmet_ana_state[i].type)
1728 return sprintf(page, "%s\n", nvmet_ana_state[i].name);
1731 return sprintf(page, "\n");
1734 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
1735 const char *page, size_t count)
1737 struct nvmet_ana_group *grp = to_ana_group(item);
1738 enum nvme_ana_state *ana_state = grp->port->ana_state;
1741 for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1742 if (sysfs_streq(page, nvmet_ana_state[i].name))
1746 pr_err("Invalid value '%s' for ana_state\n", page);
1750 down_write(&nvmet_ana_sem);
1751 ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
1753 up_write(&nvmet_ana_sem);
1754 nvmet_port_send_ana_event(grp->port);
1758 CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
1760 static struct configfs_attribute *nvmet_ana_group_attrs[] = {
1761 &nvmet_ana_group_attr_ana_state,
1765 static void nvmet_ana_group_release(struct config_item *item)
1767 struct nvmet_ana_group *grp = to_ana_group(item);
1769 if (grp == &grp->port->ana_default_group)
1772 down_write(&nvmet_ana_sem);
1773 grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
1774 nvmet_ana_group_enabled[grp->grpid]--;
1775 up_write(&nvmet_ana_sem);
1777 nvmet_port_send_ana_event(grp->port);
1781 static struct configfs_item_operations nvmet_ana_group_item_ops = {
1782 .release = nvmet_ana_group_release,
1785 static const struct config_item_type nvmet_ana_group_type = {
1786 .ct_item_ops = &nvmet_ana_group_item_ops,
1787 .ct_attrs = nvmet_ana_group_attrs,
1788 .ct_owner = THIS_MODULE,
1791 static struct config_group *nvmet_ana_groups_make_group(
1792 struct config_group *group, const char *name)
1794 struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
1795 struct nvmet_ana_group *grp;
1799 ret = kstrtou32(name, 0, &grpid);
1804 if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
1808 grp = kzalloc(sizeof(*grp), GFP_KERNEL);
1814 down_write(&nvmet_ana_sem);
1815 nvmet_ana_group_enabled[grpid]++;
1816 up_write(&nvmet_ana_sem);
1818 nvmet_port_send_ana_event(grp->port);
1820 config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
1823 return ERR_PTR(ret);
1826 static struct configfs_group_operations nvmet_ana_groups_group_ops = {
1827 .make_group = nvmet_ana_groups_make_group,
1830 static const struct config_item_type nvmet_ana_groups_type = {
1831 .ct_group_ops = &nvmet_ana_groups_group_ops,
1832 .ct_owner = THIS_MODULE,
1836 * Ports definitions.
1838 static void nvmet_port_release(struct config_item *item)
1840 struct nvmet_port *port = to_nvmet_port(item);
1842 /* Let inflight controllers teardown complete */
1843 flush_workqueue(nvmet_wq);
1844 list_del(&port->global_entry);
1846 key_put(port->keyring);
1847 kfree(port->ana_state);
1851 static struct configfs_attribute *nvmet_port_attrs[] = {
1852 &nvmet_attr_addr_adrfam,
1853 &nvmet_attr_addr_treq,
1854 &nvmet_attr_addr_traddr,
1855 &nvmet_attr_addr_trsvcid,
1856 &nvmet_attr_addr_trtype,
1857 &nvmet_attr_addr_tsas,
1858 &nvmet_attr_param_inline_data_size,
1859 #ifdef CONFIG_BLK_DEV_INTEGRITY
1860 &nvmet_attr_param_pi_enable,
1865 static struct configfs_item_operations nvmet_port_item_ops = {
1866 .release = nvmet_port_release,
1869 static const struct config_item_type nvmet_port_type = {
1870 .ct_attrs = nvmet_port_attrs,
1871 .ct_item_ops = &nvmet_port_item_ops,
1872 .ct_owner = THIS_MODULE,
1875 static struct config_group *nvmet_ports_make(struct config_group *group,
1878 struct nvmet_port *port;
1882 if (kstrtou16(name, 0, &portid))
1883 return ERR_PTR(-EINVAL);
1885 port = kzalloc(sizeof(*port), GFP_KERNEL);
1887 return ERR_PTR(-ENOMEM);
1889 port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
1890 sizeof(*port->ana_state), GFP_KERNEL);
1891 if (!port->ana_state) {
1893 return ERR_PTR(-ENOMEM);
1896 if (IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS) && nvme_keyring_id()) {
1897 port->keyring = key_lookup(nvme_keyring_id());
1898 if (IS_ERR(port->keyring)) {
1899 pr_warn("NVMe keyring not available, disabling TLS\n");
1900 port->keyring = NULL;
1904 for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
1905 if (i == NVMET_DEFAULT_ANA_GRPID)
1906 port->ana_state[1] = NVME_ANA_OPTIMIZED;
1908 port->ana_state[i] = NVME_ANA_INACCESSIBLE;
1911 list_add(&port->global_entry, &nvmet_ports_list);
1913 INIT_LIST_HEAD(&port->entry);
1914 INIT_LIST_HEAD(&port->subsystems);
1915 INIT_LIST_HEAD(&port->referrals);
1916 port->inline_data_size = -1; /* < 0 == let the transport choose */
1918 port->disc_addr.portid = cpu_to_le16(portid);
1919 port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
1920 port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
1921 config_group_init_type_name(&port->group, name, &nvmet_port_type);
1923 config_group_init_type_name(&port->subsys_group,
1924 "subsystems", &nvmet_port_subsys_type);
1925 configfs_add_default_group(&port->subsys_group, &port->group);
1927 config_group_init_type_name(&port->referrals_group,
1928 "referrals", &nvmet_referrals_type);
1929 configfs_add_default_group(&port->referrals_group, &port->group);
1931 config_group_init_type_name(&port->ana_groups_group,
1932 "ana_groups", &nvmet_ana_groups_type);
1933 configfs_add_default_group(&port->ana_groups_group, &port->group);
1935 port->ana_default_group.port = port;
1936 port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
1937 config_group_init_type_name(&port->ana_default_group.group,
1938 __stringify(NVMET_DEFAULT_ANA_GRPID),
1939 &nvmet_ana_group_type);
1940 configfs_add_default_group(&port->ana_default_group.group,
1941 &port->ana_groups_group);
1943 return &port->group;
1946 static struct configfs_group_operations nvmet_ports_group_ops = {
1947 .make_group = nvmet_ports_make,
1950 static const struct config_item_type nvmet_ports_type = {
1951 .ct_group_ops = &nvmet_ports_group_ops,
1952 .ct_owner = THIS_MODULE,
1955 static struct config_group nvmet_subsystems_group;
1956 static struct config_group nvmet_ports_group;
1958 #ifdef CONFIG_NVME_TARGET_AUTH
1959 static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
1962 u8 *dhchap_secret = to_host(item)->dhchap_secret;
1965 return sprintf(page, "\n");
1966 return sprintf(page, "%s\n", dhchap_secret);
1969 static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
1970 const char *page, size_t count)
1972 struct nvmet_host *host = to_host(item);
1975 ret = nvmet_auth_set_key(host, page, false);
1977 * Re-authentication is a soft state, so keep the
1978 * current authentication valid until the host
1979 * requests re-authentication.
1981 return ret < 0 ? ret : count;
1984 CONFIGFS_ATTR(nvmet_host_, dhchap_key);
1986 static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
1989 u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
1992 return sprintf(page, "\n");
1993 return sprintf(page, "%s\n", dhchap_secret);
1996 static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
1997 const char *page, size_t count)
1999 struct nvmet_host *host = to_host(item);
2002 ret = nvmet_auth_set_key(host, page, true);
2004 * Re-authentication is a soft state, so keep the
2005 * current authentication valid until the host
2006 * requests re-authentication.
2008 return ret < 0 ? ret : count;
2011 CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
2013 static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
2016 struct nvmet_host *host = to_host(item);
2017 const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
2019 return sprintf(page, "%s\n", hash_name ? hash_name : "none");
2022 static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
2023 const char *page, size_t count)
2025 struct nvmet_host *host = to_host(item);
2028 hmac_id = nvme_auth_hmac_id(page);
2029 if (hmac_id == NVME_AUTH_HASH_INVALID)
2031 if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
2033 host->dhchap_hash_id = hmac_id;
2037 CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
2039 static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
2042 struct nvmet_host *host = to_host(item);
2043 const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
2045 return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
2048 static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
2049 const char *page, size_t count)
2051 struct nvmet_host *host = to_host(item);
2054 dhgroup_id = nvme_auth_dhgroup_id(page);
2055 if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
2057 if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
2058 const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
2060 if (!crypto_has_kpp(kpp, 0, 0))
2063 host->dhchap_dhgroup_id = dhgroup_id;
2067 CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
2069 static struct configfs_attribute *nvmet_host_attrs[] = {
2070 &nvmet_host_attr_dhchap_key,
2071 &nvmet_host_attr_dhchap_ctrl_key,
2072 &nvmet_host_attr_dhchap_hash,
2073 &nvmet_host_attr_dhchap_dhgroup,
2076 #endif /* CONFIG_NVME_TARGET_AUTH */
2078 static void nvmet_host_release(struct config_item *item)
2080 struct nvmet_host *host = to_host(item);
2082 #ifdef CONFIG_NVME_TARGET_AUTH
2083 kfree(host->dhchap_secret);
2084 kfree(host->dhchap_ctrl_secret);
2089 static struct configfs_item_operations nvmet_host_item_ops = {
2090 .release = nvmet_host_release,
2093 static const struct config_item_type nvmet_host_type = {
2094 .ct_item_ops = &nvmet_host_item_ops,
2095 #ifdef CONFIG_NVME_TARGET_AUTH
2096 .ct_attrs = nvmet_host_attrs,
2098 .ct_owner = THIS_MODULE,
2101 static struct config_group *nvmet_hosts_make_group(struct config_group *group,
2104 struct nvmet_host *host;
2106 host = kzalloc(sizeof(*host), GFP_KERNEL);
2108 return ERR_PTR(-ENOMEM);
2110 #ifdef CONFIG_NVME_TARGET_AUTH
2111 /* Default to SHA256 */
2112 host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
2115 config_group_init_type_name(&host->group, name, &nvmet_host_type);
2117 return &host->group;
2120 static struct configfs_group_operations nvmet_hosts_group_ops = {
2121 .make_group = nvmet_hosts_make_group,
2124 static const struct config_item_type nvmet_hosts_type = {
2125 .ct_group_ops = &nvmet_hosts_group_ops,
2126 .ct_owner = THIS_MODULE,
2129 static struct config_group nvmet_hosts_group;
2131 static const struct config_item_type nvmet_root_type = {
2132 .ct_owner = THIS_MODULE,
2135 static struct configfs_subsystem nvmet_configfs_subsystem = {
2138 .ci_namebuf = "nvmet",
2139 .ci_type = &nvmet_root_type,
2144 int __init nvmet_init_configfs(void)
2148 config_group_init(&nvmet_configfs_subsystem.su_group);
2149 mutex_init(&nvmet_configfs_subsystem.su_mutex);
2151 config_group_init_type_name(&nvmet_subsystems_group,
2152 "subsystems", &nvmet_subsystems_type);
2153 configfs_add_default_group(&nvmet_subsystems_group,
2154 &nvmet_configfs_subsystem.su_group);
2156 config_group_init_type_name(&nvmet_ports_group,
2157 "ports", &nvmet_ports_type);
2158 configfs_add_default_group(&nvmet_ports_group,
2159 &nvmet_configfs_subsystem.su_group);
2161 config_group_init_type_name(&nvmet_hosts_group,
2162 "hosts", &nvmet_hosts_type);
2163 configfs_add_default_group(&nvmet_hosts_group,
2164 &nvmet_configfs_subsystem.su_group);
2166 ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
2168 pr_err("configfs_register_subsystem: %d\n", ret);
2175 void __exit nvmet_exit_configfs(void)
2177 configfs_unregister_subsystem(&nvmet_configfs_subsystem);