perf/x86/intel: Correct incorrect 'or' operation for PMU capabilities
[linux-block.git] / drivers / nvme / target / configfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Configfs interface for the NVMe target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/kstrtox.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/stat.h>
12 #include <linux/ctype.h>
13 #include <linux/pci.h>
14 #include <linux/pci-p2pdma.h>
15 #ifdef CONFIG_NVME_TARGET_AUTH
16 #include <linux/nvme-auth.h>
17 #endif
18 #include <linux/nvme-keyring.h>
19 #include <crypto/hash.h>
20 #include <crypto/kpp.h>
21
22 #include "nvmet.h"
23
24 static const struct config_item_type nvmet_host_type;
25 static const struct config_item_type nvmet_subsys_type;
26
27 static LIST_HEAD(nvmet_ports_list);
28 struct list_head *nvmet_ports = &nvmet_ports_list;
29
30 struct nvmet_type_name_map {
31         u8              type;
32         const char      *name;
33 };
34
35 static struct nvmet_type_name_map nvmet_transport[] = {
36         { NVMF_TRTYPE_RDMA,     "rdma" },
37         { NVMF_TRTYPE_FC,       "fc" },
38         { NVMF_TRTYPE_TCP,      "tcp" },
39         { NVMF_TRTYPE_LOOP,     "loop" },
40 };
41
42 static const struct nvmet_type_name_map nvmet_addr_family[] = {
43         { NVMF_ADDR_FAMILY_PCI,         "pcie" },
44         { NVMF_ADDR_FAMILY_IP4,         "ipv4" },
45         { NVMF_ADDR_FAMILY_IP6,         "ipv6" },
46         { NVMF_ADDR_FAMILY_IB,          "ib" },
47         { NVMF_ADDR_FAMILY_FC,          "fc" },
48         { NVMF_ADDR_FAMILY_LOOP,        "loop" },
49 };
50
51 static bool nvmet_is_port_enabled(struct nvmet_port *p, const char *caller)
52 {
53         if (p->enabled)
54                 pr_err("Disable port '%u' before changing attribute in %s\n",
55                        le16_to_cpu(p->disc_addr.portid), caller);
56         return p->enabled;
57 }
58
59 /*
60  * nvmet_port Generic ConfigFS definitions.
61  * Used in any place in the ConfigFS tree that refers to an address.
62  */
63 static ssize_t nvmet_addr_adrfam_show(struct config_item *item, char *page)
64 {
65         u8 adrfam = to_nvmet_port(item)->disc_addr.adrfam;
66         int i;
67
68         for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
69                 if (nvmet_addr_family[i].type == adrfam)
70                         return snprintf(page, PAGE_SIZE, "%s\n",
71                                         nvmet_addr_family[i].name);
72         }
73
74         return snprintf(page, PAGE_SIZE, "\n");
75 }
76
77 static ssize_t nvmet_addr_adrfam_store(struct config_item *item,
78                 const char *page, size_t count)
79 {
80         struct nvmet_port *port = to_nvmet_port(item);
81         int i;
82
83         if (nvmet_is_port_enabled(port, __func__))
84                 return -EACCES;
85
86         for (i = 1; i < ARRAY_SIZE(nvmet_addr_family); i++) {
87                 if (sysfs_streq(page, nvmet_addr_family[i].name))
88                         goto found;
89         }
90
91         pr_err("Invalid value '%s' for adrfam\n", page);
92         return -EINVAL;
93
94 found:
95         port->disc_addr.adrfam = nvmet_addr_family[i].type;
96         return count;
97 }
98
99 CONFIGFS_ATTR(nvmet_, addr_adrfam);
100
101 static ssize_t nvmet_addr_portid_show(struct config_item *item,
102                 char *page)
103 {
104         __le16 portid = to_nvmet_port(item)->disc_addr.portid;
105
106         return snprintf(page, PAGE_SIZE, "%d\n", le16_to_cpu(portid));
107 }
108
109 static ssize_t nvmet_addr_portid_store(struct config_item *item,
110                 const char *page, size_t count)
111 {
112         struct nvmet_port *port = to_nvmet_port(item);
113         u16 portid = 0;
114
115         if (kstrtou16(page, 0, &portid)) {
116                 pr_err("Invalid value '%s' for portid\n", page);
117                 return -EINVAL;
118         }
119
120         if (nvmet_is_port_enabled(port, __func__))
121                 return -EACCES;
122
123         port->disc_addr.portid = cpu_to_le16(portid);
124         return count;
125 }
126
127 CONFIGFS_ATTR(nvmet_, addr_portid);
128
129 static ssize_t nvmet_addr_traddr_show(struct config_item *item,
130                 char *page)
131 {
132         struct nvmet_port *port = to_nvmet_port(item);
133
134         return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.traddr);
135 }
136
137 static ssize_t nvmet_addr_traddr_store(struct config_item *item,
138                 const char *page, size_t count)
139 {
140         struct nvmet_port *port = to_nvmet_port(item);
141
142         if (count > NVMF_TRADDR_SIZE) {
143                 pr_err("Invalid value '%s' for traddr\n", page);
144                 return -EINVAL;
145         }
146
147         if (nvmet_is_port_enabled(port, __func__))
148                 return -EACCES;
149
150         if (sscanf(page, "%s\n", port->disc_addr.traddr) != 1)
151                 return -EINVAL;
152         return count;
153 }
154
155 CONFIGFS_ATTR(nvmet_, addr_traddr);
156
157 static const struct nvmet_type_name_map nvmet_addr_treq[] = {
158         { NVMF_TREQ_NOT_SPECIFIED,      "not specified" },
159         { NVMF_TREQ_REQUIRED,           "required" },
160         { NVMF_TREQ_NOT_REQUIRED,       "not required" },
161 };
162
163 static inline u8 nvmet_port_disc_addr_treq_mask(struct nvmet_port *port)
164 {
165         return (port->disc_addr.treq & ~NVME_TREQ_SECURE_CHANNEL_MASK);
166 }
167
168 static ssize_t nvmet_addr_treq_show(struct config_item *item, char *page)
169 {
170         u8 treq = nvmet_port_disc_addr_treq_secure_channel(to_nvmet_port(item));
171         int i;
172
173         for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
174                 if (treq == nvmet_addr_treq[i].type)
175                         return snprintf(page, PAGE_SIZE, "%s\n",
176                                         nvmet_addr_treq[i].name);
177         }
178
179         return snprintf(page, PAGE_SIZE, "\n");
180 }
181
182 static ssize_t nvmet_addr_treq_store(struct config_item *item,
183                 const char *page, size_t count)
184 {
185         struct nvmet_port *port = to_nvmet_port(item);
186         u8 treq = nvmet_port_disc_addr_treq_mask(port);
187         int i;
188
189         if (nvmet_is_port_enabled(port, __func__))
190                 return -EACCES;
191
192         for (i = 0; i < ARRAY_SIZE(nvmet_addr_treq); i++) {
193                 if (sysfs_streq(page, nvmet_addr_treq[i].name))
194                         goto found;
195         }
196
197         pr_err("Invalid value '%s' for treq\n", page);
198         return -EINVAL;
199
200 found:
201         if (port->disc_addr.trtype == NVMF_TRTYPE_TCP &&
202             port->disc_addr.tsas.tcp.sectype == NVMF_TCP_SECTYPE_TLS13) {
203                 switch (nvmet_addr_treq[i].type) {
204                 case NVMF_TREQ_NOT_SPECIFIED:
205                         pr_debug("treq '%s' not allowed for TLS1.3\n",
206                                  nvmet_addr_treq[i].name);
207                         return -EINVAL;
208                 case NVMF_TREQ_NOT_REQUIRED:
209                         pr_warn("Allow non-TLS connections while TLS1.3 is enabled\n");
210                         break;
211                 default:
212                         break;
213                 }
214         }
215         treq |= nvmet_addr_treq[i].type;
216         port->disc_addr.treq = treq;
217         return count;
218 }
219
220 CONFIGFS_ATTR(nvmet_, addr_treq);
221
222 static ssize_t nvmet_addr_trsvcid_show(struct config_item *item,
223                 char *page)
224 {
225         struct nvmet_port *port = to_nvmet_port(item);
226
227         return snprintf(page, PAGE_SIZE, "%s\n", port->disc_addr.trsvcid);
228 }
229
230 static ssize_t nvmet_addr_trsvcid_store(struct config_item *item,
231                 const char *page, size_t count)
232 {
233         struct nvmet_port *port = to_nvmet_port(item);
234
235         if (count > NVMF_TRSVCID_SIZE) {
236                 pr_err("Invalid value '%s' for trsvcid\n", page);
237                 return -EINVAL;
238         }
239         if (nvmet_is_port_enabled(port, __func__))
240                 return -EACCES;
241
242         if (sscanf(page, "%s\n", port->disc_addr.trsvcid) != 1)
243                 return -EINVAL;
244         return count;
245 }
246
247 CONFIGFS_ATTR(nvmet_, addr_trsvcid);
248
249 static ssize_t nvmet_param_inline_data_size_show(struct config_item *item,
250                 char *page)
251 {
252         struct nvmet_port *port = to_nvmet_port(item);
253
254         return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size);
255 }
256
257 static ssize_t nvmet_param_inline_data_size_store(struct config_item *item,
258                 const char *page, size_t count)
259 {
260         struct nvmet_port *port = to_nvmet_port(item);
261         int ret;
262
263         if (nvmet_is_port_enabled(port, __func__))
264                 return -EACCES;
265         ret = kstrtoint(page, 0, &port->inline_data_size);
266         if (ret) {
267                 pr_err("Invalid value '%s' for inline_data_size\n", page);
268                 return -EINVAL;
269         }
270         return count;
271 }
272
273 CONFIGFS_ATTR(nvmet_, param_inline_data_size);
274
275 #ifdef CONFIG_BLK_DEV_INTEGRITY
276 static ssize_t nvmet_param_pi_enable_show(struct config_item *item,
277                 char *page)
278 {
279         struct nvmet_port *port = to_nvmet_port(item);
280
281         return snprintf(page, PAGE_SIZE, "%d\n", port->pi_enable);
282 }
283
284 static ssize_t nvmet_param_pi_enable_store(struct config_item *item,
285                 const char *page, size_t count)
286 {
287         struct nvmet_port *port = to_nvmet_port(item);
288         bool val;
289
290         if (kstrtobool(page, &val))
291                 return -EINVAL;
292
293         if (nvmet_is_port_enabled(port, __func__))
294                 return -EACCES;
295
296         port->pi_enable = val;
297         return count;
298 }
299
300 CONFIGFS_ATTR(nvmet_, param_pi_enable);
301 #endif
302
303 static ssize_t nvmet_addr_trtype_show(struct config_item *item,
304                 char *page)
305 {
306         struct nvmet_port *port = to_nvmet_port(item);
307         int i;
308
309         for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
310                 if (port->disc_addr.trtype == nvmet_transport[i].type)
311                         return snprintf(page, PAGE_SIZE,
312                                         "%s\n", nvmet_transport[i].name);
313         }
314
315         return sprintf(page, "\n");
316 }
317
318 static void nvmet_port_init_tsas_rdma(struct nvmet_port *port)
319 {
320         port->disc_addr.tsas.rdma.qptype = NVMF_RDMA_QPTYPE_CONNECTED;
321         port->disc_addr.tsas.rdma.prtype = NVMF_RDMA_PRTYPE_NOT_SPECIFIED;
322         port->disc_addr.tsas.rdma.cms = NVMF_RDMA_CMS_RDMA_CM;
323 }
324
325 static void nvmet_port_init_tsas_tcp(struct nvmet_port *port, int sectype)
326 {
327         port->disc_addr.tsas.tcp.sectype = sectype;
328 }
329
330 static ssize_t nvmet_addr_trtype_store(struct config_item *item,
331                 const char *page, size_t count)
332 {
333         struct nvmet_port *port = to_nvmet_port(item);
334         int i;
335
336         if (nvmet_is_port_enabled(port, __func__))
337                 return -EACCES;
338
339         for (i = 0; i < ARRAY_SIZE(nvmet_transport); i++) {
340                 if (sysfs_streq(page, nvmet_transport[i].name))
341                         goto found;
342         }
343
344         pr_err("Invalid value '%s' for trtype\n", page);
345         return -EINVAL;
346
347 found:
348         memset(&port->disc_addr.tsas, 0, NVMF_TSAS_SIZE);
349         port->disc_addr.trtype = nvmet_transport[i].type;
350         if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA)
351                 nvmet_port_init_tsas_rdma(port);
352         else if (port->disc_addr.trtype == NVMF_TRTYPE_TCP)
353                 nvmet_port_init_tsas_tcp(port, NVMF_TCP_SECTYPE_NONE);
354         return count;
355 }
356
357 CONFIGFS_ATTR(nvmet_, addr_trtype);
358
359 static const struct nvmet_type_name_map nvmet_addr_tsas_tcp[] = {
360         { NVMF_TCP_SECTYPE_NONE,        "none" },
361         { NVMF_TCP_SECTYPE_TLS13,       "tls1.3" },
362 };
363
364 static const struct nvmet_type_name_map nvmet_addr_tsas_rdma[] = {
365         { NVMF_RDMA_QPTYPE_CONNECTED,   "connected" },
366         { NVMF_RDMA_QPTYPE_DATAGRAM,    "datagram"  },
367 };
368
369 static ssize_t nvmet_addr_tsas_show(struct config_item *item,
370                 char *page)
371 {
372         struct nvmet_port *port = to_nvmet_port(item);
373         int i;
374
375         if (port->disc_addr.trtype == NVMF_TRTYPE_TCP) {
376                 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
377                         if (port->disc_addr.tsas.tcp.sectype == nvmet_addr_tsas_tcp[i].type)
378                                 return sprintf(page, "%s\n", nvmet_addr_tsas_tcp[i].name);
379                 }
380         } else if (port->disc_addr.trtype == NVMF_TRTYPE_RDMA) {
381                 for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_rdma); i++) {
382                         if (port->disc_addr.tsas.rdma.qptype == nvmet_addr_tsas_rdma[i].type)
383                                 return sprintf(page, "%s\n", nvmet_addr_tsas_rdma[i].name);
384                 }
385         }
386         return sprintf(page, "reserved\n");
387 }
388
389 static ssize_t nvmet_addr_tsas_store(struct config_item *item,
390                 const char *page, size_t count)
391 {
392         struct nvmet_port *port = to_nvmet_port(item);
393         u8 treq = nvmet_port_disc_addr_treq_mask(port);
394         u8 sectype;
395         int i;
396
397         if (nvmet_is_port_enabled(port, __func__))
398                 return -EACCES;
399
400         if (port->disc_addr.trtype != NVMF_TRTYPE_TCP)
401                 return -EINVAL;
402
403         for (i = 0; i < ARRAY_SIZE(nvmet_addr_tsas_tcp); i++) {
404                 if (sysfs_streq(page, nvmet_addr_tsas_tcp[i].name)) {
405                         sectype = nvmet_addr_tsas_tcp[i].type;
406                         goto found;
407                 }
408         }
409
410         pr_err("Invalid value '%s' for tsas\n", page);
411         return -EINVAL;
412
413 found:
414         if (sectype == NVMF_TCP_SECTYPE_TLS13) {
415                 if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS)) {
416                         pr_err("TLS is not supported\n");
417                         return -EINVAL;
418                 }
419                 if (!port->keyring) {
420                         pr_err("TLS keyring not configured\n");
421                         return -EINVAL;
422                 }
423         }
424
425         nvmet_port_init_tsas_tcp(port, sectype);
426         /*
427          * If TLS is enabled TREQ should be set to 'required' per default
428          */
429         if (sectype == NVMF_TCP_SECTYPE_TLS13) {
430                 u8 sc = nvmet_port_disc_addr_treq_secure_channel(port);
431
432                 if (sc == NVMF_TREQ_NOT_SPECIFIED)
433                         treq |= NVMF_TREQ_REQUIRED;
434                 else
435                         treq |= sc;
436         } else {
437                 treq |= NVMF_TREQ_NOT_SPECIFIED;
438         }
439         port->disc_addr.treq = treq;
440         return count;
441 }
442
443 CONFIGFS_ATTR(nvmet_, addr_tsas);
444
445 /*
446  * Namespace structures & file operation functions below
447  */
448 static ssize_t nvmet_ns_device_path_show(struct config_item *item, char *page)
449 {
450         return sprintf(page, "%s\n", to_nvmet_ns(item)->device_path);
451 }
452
453 static ssize_t nvmet_ns_device_path_store(struct config_item *item,
454                 const char *page, size_t count)
455 {
456         struct nvmet_ns *ns = to_nvmet_ns(item);
457         struct nvmet_subsys *subsys = ns->subsys;
458         size_t len;
459         int ret;
460
461         mutex_lock(&subsys->lock);
462         ret = -EBUSY;
463         if (ns->enabled)
464                 goto out_unlock;
465
466         ret = -EINVAL;
467         len = strcspn(page, "\n");
468         if (!len)
469                 goto out_unlock;
470
471         kfree(ns->device_path);
472         ret = -ENOMEM;
473         ns->device_path = kmemdup_nul(page, len, GFP_KERNEL);
474         if (!ns->device_path)
475                 goto out_unlock;
476
477         mutex_unlock(&subsys->lock);
478         return count;
479
480 out_unlock:
481         mutex_unlock(&subsys->lock);
482         return ret;
483 }
484
485 CONFIGFS_ATTR(nvmet_ns_, device_path);
486
487 #ifdef CONFIG_PCI_P2PDMA
488 static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
489 {
490         struct nvmet_ns *ns = to_nvmet_ns(item);
491
492         return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
493 }
494
495 static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
496                 const char *page, size_t count)
497 {
498         struct nvmet_ns *ns = to_nvmet_ns(item);
499         struct pci_dev *p2p_dev = NULL;
500         bool use_p2pmem;
501         int ret = count;
502         int error;
503
504         mutex_lock(&ns->subsys->lock);
505         if (ns->enabled) {
506                 ret = -EBUSY;
507                 goto out_unlock;
508         }
509
510         error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
511         if (error) {
512                 ret = error;
513                 goto out_unlock;
514         }
515
516         ns->use_p2pmem = use_p2pmem;
517         pci_dev_put(ns->p2p_dev);
518         ns->p2p_dev = p2p_dev;
519
520 out_unlock:
521         mutex_unlock(&ns->subsys->lock);
522
523         return ret;
524 }
525
526 CONFIGFS_ATTR(nvmet_ns_, p2pmem);
527 #endif /* CONFIG_PCI_P2PDMA */
528
529 static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
530 {
531         return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
532 }
533
534 static ssize_t nvmet_ns_device_uuid_store(struct config_item *item,
535                                           const char *page, size_t count)
536 {
537         struct nvmet_ns *ns = to_nvmet_ns(item);
538         struct nvmet_subsys *subsys = ns->subsys;
539         int ret = 0;
540
541         mutex_lock(&subsys->lock);
542         if (ns->enabled) {
543                 ret = -EBUSY;
544                 goto out_unlock;
545         }
546
547         if (uuid_parse(page, &ns->uuid))
548                 ret = -EINVAL;
549
550 out_unlock:
551         mutex_unlock(&subsys->lock);
552         return ret ? ret : count;
553 }
554
555 CONFIGFS_ATTR(nvmet_ns_, device_uuid);
556
557 static ssize_t nvmet_ns_device_nguid_show(struct config_item *item, char *page)
558 {
559         return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->nguid);
560 }
561
562 static ssize_t nvmet_ns_device_nguid_store(struct config_item *item,
563                 const char *page, size_t count)
564 {
565         struct nvmet_ns *ns = to_nvmet_ns(item);
566         struct nvmet_subsys *subsys = ns->subsys;
567         u8 nguid[16];
568         const char *p = page;
569         int i;
570         int ret = 0;
571
572         mutex_lock(&subsys->lock);
573         if (ns->enabled) {
574                 ret = -EBUSY;
575                 goto out_unlock;
576         }
577
578         for (i = 0; i < 16; i++) {
579                 if (p + 2 > page + count) {
580                         ret = -EINVAL;
581                         goto out_unlock;
582                 }
583                 if (!isxdigit(p[0]) || !isxdigit(p[1])) {
584                         ret = -EINVAL;
585                         goto out_unlock;
586                 }
587
588                 nguid[i] = (hex_to_bin(p[0]) << 4) | hex_to_bin(p[1]);
589                 p += 2;
590
591                 if (*p == '-' || *p == ':')
592                         p++;
593         }
594
595         memcpy(&ns->nguid, nguid, sizeof(nguid));
596 out_unlock:
597         mutex_unlock(&subsys->lock);
598         return ret ? ret : count;
599 }
600
601 CONFIGFS_ATTR(nvmet_ns_, device_nguid);
602
603 static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page)
604 {
605         return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid);
606 }
607
608 static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item,
609                 const char *page, size_t count)
610 {
611         struct nvmet_ns *ns = to_nvmet_ns(item);
612         u32 oldgrpid, newgrpid;
613         int ret;
614
615         ret = kstrtou32(page, 0, &newgrpid);
616         if (ret)
617                 return ret;
618
619         if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS)
620                 return -EINVAL;
621
622         down_write(&nvmet_ana_sem);
623         oldgrpid = ns->anagrpid;
624         nvmet_ana_group_enabled[newgrpid]++;
625         ns->anagrpid = newgrpid;
626         nvmet_ana_group_enabled[oldgrpid]--;
627         nvmet_ana_chgcnt++;
628         up_write(&nvmet_ana_sem);
629
630         nvmet_send_ana_event(ns->subsys, NULL);
631         return count;
632 }
633
634 CONFIGFS_ATTR(nvmet_ns_, ana_grpid);
635
636 static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page)
637 {
638         return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled);
639 }
640
641 static ssize_t nvmet_ns_enable_store(struct config_item *item,
642                 const char *page, size_t count)
643 {
644         struct nvmet_ns *ns = to_nvmet_ns(item);
645         bool enable;
646         int ret = 0;
647
648         if (kstrtobool(page, &enable))
649                 return -EINVAL;
650
651         if (enable)
652                 ret = nvmet_ns_enable(ns);
653         else
654                 nvmet_ns_disable(ns);
655
656         return ret ? ret : count;
657 }
658
659 CONFIGFS_ATTR(nvmet_ns_, enable);
660
661 static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page)
662 {
663         return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io);
664 }
665
666 static ssize_t nvmet_ns_buffered_io_store(struct config_item *item,
667                 const char *page, size_t count)
668 {
669         struct nvmet_ns *ns = to_nvmet_ns(item);
670         bool val;
671
672         if (kstrtobool(page, &val))
673                 return -EINVAL;
674
675         mutex_lock(&ns->subsys->lock);
676         if (ns->enabled) {
677                 pr_err("disable ns before setting buffered_io value.\n");
678                 mutex_unlock(&ns->subsys->lock);
679                 return -EINVAL;
680         }
681
682         ns->buffered_io = val;
683         mutex_unlock(&ns->subsys->lock);
684         return count;
685 }
686
687 CONFIGFS_ATTR(nvmet_ns_, buffered_io);
688
689 static ssize_t nvmet_ns_revalidate_size_store(struct config_item *item,
690                 const char *page, size_t count)
691 {
692         struct nvmet_ns *ns = to_nvmet_ns(item);
693         bool val;
694
695         if (kstrtobool(page, &val))
696                 return -EINVAL;
697
698         if (!val)
699                 return -EINVAL;
700
701         mutex_lock(&ns->subsys->lock);
702         if (!ns->enabled) {
703                 pr_err("enable ns before revalidate.\n");
704                 mutex_unlock(&ns->subsys->lock);
705                 return -EINVAL;
706         }
707         if (nvmet_ns_revalidate(ns))
708                 nvmet_ns_changed(ns->subsys, ns->nsid);
709         mutex_unlock(&ns->subsys->lock);
710         return count;
711 }
712
713 CONFIGFS_ATTR_WO(nvmet_ns_, revalidate_size);
714
715 static struct configfs_attribute *nvmet_ns_attrs[] = {
716         &nvmet_ns_attr_device_path,
717         &nvmet_ns_attr_device_nguid,
718         &nvmet_ns_attr_device_uuid,
719         &nvmet_ns_attr_ana_grpid,
720         &nvmet_ns_attr_enable,
721         &nvmet_ns_attr_buffered_io,
722         &nvmet_ns_attr_revalidate_size,
723 #ifdef CONFIG_PCI_P2PDMA
724         &nvmet_ns_attr_p2pmem,
725 #endif
726         NULL,
727 };
728
729 static void nvmet_ns_release(struct config_item *item)
730 {
731         struct nvmet_ns *ns = to_nvmet_ns(item);
732
733         nvmet_ns_free(ns);
734 }
735
736 static struct configfs_item_operations nvmet_ns_item_ops = {
737         .release                = nvmet_ns_release,
738 };
739
740 static const struct config_item_type nvmet_ns_type = {
741         .ct_item_ops            = &nvmet_ns_item_ops,
742         .ct_attrs               = nvmet_ns_attrs,
743         .ct_owner               = THIS_MODULE,
744 };
745
746 static struct config_group *nvmet_ns_make(struct config_group *group,
747                 const char *name)
748 {
749         struct nvmet_subsys *subsys = namespaces_to_subsys(&group->cg_item);
750         struct nvmet_ns *ns;
751         int ret;
752         u32 nsid;
753
754         ret = kstrtou32(name, 0, &nsid);
755         if (ret)
756                 goto out;
757
758         ret = -EINVAL;
759         if (nsid == 0 || nsid == NVME_NSID_ALL) {
760                 pr_err("invalid nsid %#x", nsid);
761                 goto out;
762         }
763
764         ret = -ENOMEM;
765         ns = nvmet_ns_alloc(subsys, nsid);
766         if (!ns)
767                 goto out;
768         config_group_init_type_name(&ns->group, name, &nvmet_ns_type);
769
770         pr_info("adding nsid %d to subsystem %s\n", nsid, subsys->subsysnqn);
771
772         return &ns->group;
773 out:
774         return ERR_PTR(ret);
775 }
776
777 static struct configfs_group_operations nvmet_namespaces_group_ops = {
778         .make_group             = nvmet_ns_make,
779 };
780
781 static const struct config_item_type nvmet_namespaces_type = {
782         .ct_group_ops           = &nvmet_namespaces_group_ops,
783         .ct_owner               = THIS_MODULE,
784 };
785
786 #ifdef CONFIG_NVME_TARGET_PASSTHRU
787
788 static ssize_t nvmet_passthru_device_path_show(struct config_item *item,
789                 char *page)
790 {
791         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
792
793         return snprintf(page, PAGE_SIZE, "%s\n", subsys->passthru_ctrl_path);
794 }
795
796 static ssize_t nvmet_passthru_device_path_store(struct config_item *item,
797                 const char *page, size_t count)
798 {
799         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
800         size_t len;
801         int ret;
802
803         mutex_lock(&subsys->lock);
804
805         ret = -EBUSY;
806         if (subsys->passthru_ctrl)
807                 goto out_unlock;
808
809         ret = -EINVAL;
810         len = strcspn(page, "\n");
811         if (!len)
812                 goto out_unlock;
813
814         kfree(subsys->passthru_ctrl_path);
815         ret = -ENOMEM;
816         subsys->passthru_ctrl_path = kstrndup(page, len, GFP_KERNEL);
817         if (!subsys->passthru_ctrl_path)
818                 goto out_unlock;
819
820         mutex_unlock(&subsys->lock);
821
822         return count;
823 out_unlock:
824         mutex_unlock(&subsys->lock);
825         return ret;
826 }
827 CONFIGFS_ATTR(nvmet_passthru_, device_path);
828
829 static ssize_t nvmet_passthru_enable_show(struct config_item *item,
830                 char *page)
831 {
832         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
833
834         return sprintf(page, "%d\n", subsys->passthru_ctrl ? 1 : 0);
835 }
836
837 static ssize_t nvmet_passthru_enable_store(struct config_item *item,
838                 const char *page, size_t count)
839 {
840         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
841         bool enable;
842         int ret = 0;
843
844         if (kstrtobool(page, &enable))
845                 return -EINVAL;
846
847         if (enable)
848                 ret = nvmet_passthru_ctrl_enable(subsys);
849         else
850                 nvmet_passthru_ctrl_disable(subsys);
851
852         return ret ? ret : count;
853 }
854 CONFIGFS_ATTR(nvmet_passthru_, enable);
855
856 static ssize_t nvmet_passthru_admin_timeout_show(struct config_item *item,
857                 char *page)
858 {
859         return sprintf(page, "%u\n", to_subsys(item->ci_parent)->admin_timeout);
860 }
861
862 static ssize_t nvmet_passthru_admin_timeout_store(struct config_item *item,
863                 const char *page, size_t count)
864 {
865         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
866         unsigned int timeout;
867
868         if (kstrtouint(page, 0, &timeout))
869                 return -EINVAL;
870         subsys->admin_timeout = timeout;
871         return count;
872 }
873 CONFIGFS_ATTR(nvmet_passthru_, admin_timeout);
874
875 static ssize_t nvmet_passthru_io_timeout_show(struct config_item *item,
876                 char *page)
877 {
878         return sprintf(page, "%u\n", to_subsys(item->ci_parent)->io_timeout);
879 }
880
881 static ssize_t nvmet_passthru_io_timeout_store(struct config_item *item,
882                 const char *page, size_t count)
883 {
884         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
885         unsigned int timeout;
886
887         if (kstrtouint(page, 0, &timeout))
888                 return -EINVAL;
889         subsys->io_timeout = timeout;
890         return count;
891 }
892 CONFIGFS_ATTR(nvmet_passthru_, io_timeout);
893
894 static ssize_t nvmet_passthru_clear_ids_show(struct config_item *item,
895                 char *page)
896 {
897         return sprintf(page, "%u\n", to_subsys(item->ci_parent)->clear_ids);
898 }
899
900 static ssize_t nvmet_passthru_clear_ids_store(struct config_item *item,
901                 const char *page, size_t count)
902 {
903         struct nvmet_subsys *subsys = to_subsys(item->ci_parent);
904         unsigned int clear_ids;
905
906         if (kstrtouint(page, 0, &clear_ids))
907                 return -EINVAL;
908         subsys->clear_ids = clear_ids;
909         return count;
910 }
911 CONFIGFS_ATTR(nvmet_passthru_, clear_ids);
912
913 static struct configfs_attribute *nvmet_passthru_attrs[] = {
914         &nvmet_passthru_attr_device_path,
915         &nvmet_passthru_attr_enable,
916         &nvmet_passthru_attr_admin_timeout,
917         &nvmet_passthru_attr_io_timeout,
918         &nvmet_passthru_attr_clear_ids,
919         NULL,
920 };
921
922 static const struct config_item_type nvmet_passthru_type = {
923         .ct_attrs               = nvmet_passthru_attrs,
924         .ct_owner               = THIS_MODULE,
925 };
926
927 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
928 {
929         config_group_init_type_name(&subsys->passthru_group,
930                                     "passthru", &nvmet_passthru_type);
931         configfs_add_default_group(&subsys->passthru_group,
932                                    &subsys->group);
933 }
934
935 #else /* CONFIG_NVME_TARGET_PASSTHRU */
936
937 static void nvmet_add_passthru_group(struct nvmet_subsys *subsys)
938 {
939 }
940
941 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
942
943 static int nvmet_port_subsys_allow_link(struct config_item *parent,
944                 struct config_item *target)
945 {
946         struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
947         struct nvmet_subsys *subsys;
948         struct nvmet_subsys_link *link, *p;
949         int ret;
950
951         if (target->ci_type != &nvmet_subsys_type) {
952                 pr_err("can only link subsystems into the subsystems dir.!\n");
953                 return -EINVAL;
954         }
955         subsys = to_subsys(target);
956         link = kmalloc(sizeof(*link), GFP_KERNEL);
957         if (!link)
958                 return -ENOMEM;
959         link->subsys = subsys;
960
961         down_write(&nvmet_config_sem);
962         ret = -EEXIST;
963         list_for_each_entry(p, &port->subsystems, entry) {
964                 if (p->subsys == subsys)
965                         goto out_free_link;
966         }
967
968         if (list_empty(&port->subsystems)) {
969                 ret = nvmet_enable_port(port);
970                 if (ret)
971                         goto out_free_link;
972         }
973
974         list_add_tail(&link->entry, &port->subsystems);
975         nvmet_port_disc_changed(port, subsys);
976
977         up_write(&nvmet_config_sem);
978         return 0;
979
980 out_free_link:
981         up_write(&nvmet_config_sem);
982         kfree(link);
983         return ret;
984 }
985
986 static void nvmet_port_subsys_drop_link(struct config_item *parent,
987                 struct config_item *target)
988 {
989         struct nvmet_port *port = to_nvmet_port(parent->ci_parent);
990         struct nvmet_subsys *subsys = to_subsys(target);
991         struct nvmet_subsys_link *p;
992
993         down_write(&nvmet_config_sem);
994         list_for_each_entry(p, &port->subsystems, entry) {
995                 if (p->subsys == subsys)
996                         goto found;
997         }
998         up_write(&nvmet_config_sem);
999         return;
1000
1001 found:
1002         list_del(&p->entry);
1003         nvmet_port_del_ctrls(port, subsys);
1004         nvmet_port_disc_changed(port, subsys);
1005
1006         if (list_empty(&port->subsystems))
1007                 nvmet_disable_port(port);
1008         up_write(&nvmet_config_sem);
1009         kfree(p);
1010 }
1011
1012 static struct configfs_item_operations nvmet_port_subsys_item_ops = {
1013         .allow_link             = nvmet_port_subsys_allow_link,
1014         .drop_link              = nvmet_port_subsys_drop_link,
1015 };
1016
1017 static const struct config_item_type nvmet_port_subsys_type = {
1018         .ct_item_ops            = &nvmet_port_subsys_item_ops,
1019         .ct_owner               = THIS_MODULE,
1020 };
1021
1022 static int nvmet_allowed_hosts_allow_link(struct config_item *parent,
1023                 struct config_item *target)
1024 {
1025         struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
1026         struct nvmet_host *host;
1027         struct nvmet_host_link *link, *p;
1028         int ret;
1029
1030         if (target->ci_type != &nvmet_host_type) {
1031                 pr_err("can only link hosts into the allowed_hosts directory!\n");
1032                 return -EINVAL;
1033         }
1034
1035         host = to_host(target);
1036         link = kmalloc(sizeof(*link), GFP_KERNEL);
1037         if (!link)
1038                 return -ENOMEM;
1039         link->host = host;
1040
1041         down_write(&nvmet_config_sem);
1042         ret = -EINVAL;
1043         if (subsys->allow_any_host) {
1044                 pr_err("can't add hosts when allow_any_host is set!\n");
1045                 goto out_free_link;
1046         }
1047
1048         ret = -EEXIST;
1049         list_for_each_entry(p, &subsys->hosts, entry) {
1050                 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
1051                         goto out_free_link;
1052         }
1053         list_add_tail(&link->entry, &subsys->hosts);
1054         nvmet_subsys_disc_changed(subsys, host);
1055
1056         up_write(&nvmet_config_sem);
1057         return 0;
1058 out_free_link:
1059         up_write(&nvmet_config_sem);
1060         kfree(link);
1061         return ret;
1062 }
1063
1064 static void nvmet_allowed_hosts_drop_link(struct config_item *parent,
1065                 struct config_item *target)
1066 {
1067         struct nvmet_subsys *subsys = to_subsys(parent->ci_parent);
1068         struct nvmet_host *host = to_host(target);
1069         struct nvmet_host_link *p;
1070
1071         down_write(&nvmet_config_sem);
1072         list_for_each_entry(p, &subsys->hosts, entry) {
1073                 if (!strcmp(nvmet_host_name(p->host), nvmet_host_name(host)))
1074                         goto found;
1075         }
1076         up_write(&nvmet_config_sem);
1077         return;
1078
1079 found:
1080         list_del(&p->entry);
1081         nvmet_subsys_disc_changed(subsys, host);
1082
1083         up_write(&nvmet_config_sem);
1084         kfree(p);
1085 }
1086
1087 static struct configfs_item_operations nvmet_allowed_hosts_item_ops = {
1088         .allow_link             = nvmet_allowed_hosts_allow_link,
1089         .drop_link              = nvmet_allowed_hosts_drop_link,
1090 };
1091
1092 static const struct config_item_type nvmet_allowed_hosts_type = {
1093         .ct_item_ops            = &nvmet_allowed_hosts_item_ops,
1094         .ct_owner               = THIS_MODULE,
1095 };
1096
1097 static ssize_t nvmet_subsys_attr_allow_any_host_show(struct config_item *item,
1098                 char *page)
1099 {
1100         return snprintf(page, PAGE_SIZE, "%d\n",
1101                 to_subsys(item)->allow_any_host);
1102 }
1103
1104 static ssize_t nvmet_subsys_attr_allow_any_host_store(struct config_item *item,
1105                 const char *page, size_t count)
1106 {
1107         struct nvmet_subsys *subsys = to_subsys(item);
1108         bool allow_any_host;
1109         int ret = 0;
1110
1111         if (kstrtobool(page, &allow_any_host))
1112                 return -EINVAL;
1113
1114         down_write(&nvmet_config_sem);
1115         if (allow_any_host && !list_empty(&subsys->hosts)) {
1116                 pr_err("Can't set allow_any_host when explicit hosts are set!\n");
1117                 ret = -EINVAL;
1118                 goto out_unlock;
1119         }
1120
1121         if (subsys->allow_any_host != allow_any_host) {
1122                 subsys->allow_any_host = allow_any_host;
1123                 nvmet_subsys_disc_changed(subsys, NULL);
1124         }
1125
1126 out_unlock:
1127         up_write(&nvmet_config_sem);
1128         return ret ? ret : count;
1129 }
1130
1131 CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host);
1132
1133 static ssize_t nvmet_subsys_attr_version_show(struct config_item *item,
1134                                               char *page)
1135 {
1136         struct nvmet_subsys *subsys = to_subsys(item);
1137
1138         if (NVME_TERTIARY(subsys->ver))
1139                 return snprintf(page, PAGE_SIZE, "%llu.%llu.%llu\n",
1140                                 NVME_MAJOR(subsys->ver),
1141                                 NVME_MINOR(subsys->ver),
1142                                 NVME_TERTIARY(subsys->ver));
1143
1144         return snprintf(page, PAGE_SIZE, "%llu.%llu\n",
1145                         NVME_MAJOR(subsys->ver),
1146                         NVME_MINOR(subsys->ver));
1147 }
1148
1149 static ssize_t
1150 nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
1151                 const char *page, size_t count)
1152 {
1153         int major, minor, tertiary = 0;
1154         int ret;
1155
1156         if (subsys->subsys_discovered) {
1157                 if (NVME_TERTIARY(subsys->ver))
1158                         pr_err("Can't set version number. %llu.%llu.%llu is already assigned\n",
1159                                NVME_MAJOR(subsys->ver),
1160                                NVME_MINOR(subsys->ver),
1161                                NVME_TERTIARY(subsys->ver));
1162                 else
1163                         pr_err("Can't set version number. %llu.%llu is already assigned\n",
1164                                NVME_MAJOR(subsys->ver),
1165                                NVME_MINOR(subsys->ver));
1166                 return -EINVAL;
1167         }
1168
1169         /* passthru subsystems use the underlying controller's version */
1170         if (nvmet_is_passthru_subsys(subsys))
1171                 return -EINVAL;
1172
1173         ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
1174         if (ret != 2 && ret != 3)
1175                 return -EINVAL;
1176
1177         subsys->ver = NVME_VS(major, minor, tertiary);
1178
1179         return count;
1180 }
1181
1182 static ssize_t nvmet_subsys_attr_version_store(struct config_item *item,
1183                                                const char *page, size_t count)
1184 {
1185         struct nvmet_subsys *subsys = to_subsys(item);
1186         ssize_t ret;
1187
1188         down_write(&nvmet_config_sem);
1189         mutex_lock(&subsys->lock);
1190         ret = nvmet_subsys_attr_version_store_locked(subsys, page, count);
1191         mutex_unlock(&subsys->lock);
1192         up_write(&nvmet_config_sem);
1193
1194         return ret;
1195 }
1196 CONFIGFS_ATTR(nvmet_subsys_, attr_version);
1197
1198 /* See Section 1.5 of NVMe 1.4 */
1199 static bool nvmet_is_ascii(const char c)
1200 {
1201         return c >= 0x20 && c <= 0x7e;
1202 }
1203
1204 static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
1205                                              char *page)
1206 {
1207         struct nvmet_subsys *subsys = to_subsys(item);
1208
1209         return snprintf(page, PAGE_SIZE, "%.*s\n",
1210                         NVMET_SN_MAX_SIZE, subsys->serial);
1211 }
1212
1213 static ssize_t
1214 nvmet_subsys_attr_serial_store_locked(struct nvmet_subsys *subsys,
1215                 const char *page, size_t count)
1216 {
1217         int pos, len = strcspn(page, "\n");
1218
1219         if (subsys->subsys_discovered) {
1220                 pr_err("Can't set serial number. %s is already assigned\n",
1221                        subsys->serial);
1222                 return -EINVAL;
1223         }
1224
1225         if (!len || len > NVMET_SN_MAX_SIZE) {
1226                 pr_err("Serial Number can not be empty or exceed %d Bytes\n",
1227                        NVMET_SN_MAX_SIZE);
1228                 return -EINVAL;
1229         }
1230
1231         for (pos = 0; pos < len; pos++) {
1232                 if (!nvmet_is_ascii(page[pos])) {
1233                         pr_err("Serial Number must contain only ASCII strings\n");
1234                         return -EINVAL;
1235                 }
1236         }
1237
1238         memcpy_and_pad(subsys->serial, NVMET_SN_MAX_SIZE, page, len, ' ');
1239
1240         return count;
1241 }
1242
1243 static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item,
1244                                               const char *page, size_t count)
1245 {
1246         struct nvmet_subsys *subsys = to_subsys(item);
1247         ssize_t ret;
1248
1249         down_write(&nvmet_config_sem);
1250         mutex_lock(&subsys->lock);
1251         ret = nvmet_subsys_attr_serial_store_locked(subsys, page, count);
1252         mutex_unlock(&subsys->lock);
1253         up_write(&nvmet_config_sem);
1254
1255         return ret;
1256 }
1257 CONFIGFS_ATTR(nvmet_subsys_, attr_serial);
1258
1259 static ssize_t nvmet_subsys_attr_cntlid_min_show(struct config_item *item,
1260                                                  char *page)
1261 {
1262         return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_min);
1263 }
1264
1265 static ssize_t nvmet_subsys_attr_cntlid_min_store(struct config_item *item,
1266                                                   const char *page, size_t cnt)
1267 {
1268         u16 cntlid_min;
1269
1270         if (sscanf(page, "%hu\n", &cntlid_min) != 1)
1271                 return -EINVAL;
1272
1273         if (cntlid_min == 0)
1274                 return -EINVAL;
1275
1276         down_write(&nvmet_config_sem);
1277         if (cntlid_min >= to_subsys(item)->cntlid_max)
1278                 goto out_unlock;
1279         to_subsys(item)->cntlid_min = cntlid_min;
1280         up_write(&nvmet_config_sem);
1281         return cnt;
1282
1283 out_unlock:
1284         up_write(&nvmet_config_sem);
1285         return -EINVAL;
1286 }
1287 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_min);
1288
1289 static ssize_t nvmet_subsys_attr_cntlid_max_show(struct config_item *item,
1290                                                  char *page)
1291 {
1292         return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->cntlid_max);
1293 }
1294
1295 static ssize_t nvmet_subsys_attr_cntlid_max_store(struct config_item *item,
1296                                                   const char *page, size_t cnt)
1297 {
1298         u16 cntlid_max;
1299
1300         if (sscanf(page, "%hu\n", &cntlid_max) != 1)
1301                 return -EINVAL;
1302
1303         if (cntlid_max == 0)
1304                 return -EINVAL;
1305
1306         down_write(&nvmet_config_sem);
1307         if (cntlid_max <= to_subsys(item)->cntlid_min)
1308                 goto out_unlock;
1309         to_subsys(item)->cntlid_max = cntlid_max;
1310         up_write(&nvmet_config_sem);
1311         return cnt;
1312
1313 out_unlock:
1314         up_write(&nvmet_config_sem);
1315         return -EINVAL;
1316 }
1317 CONFIGFS_ATTR(nvmet_subsys_, attr_cntlid_max);
1318
1319 static ssize_t nvmet_subsys_attr_model_show(struct config_item *item,
1320                                             char *page)
1321 {
1322         struct nvmet_subsys *subsys = to_subsys(item);
1323
1324         return snprintf(page, PAGE_SIZE, "%s\n", subsys->model_number);
1325 }
1326
1327 static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys,
1328                 const char *page, size_t count)
1329 {
1330         int pos = 0, len;
1331         char *val;
1332
1333         if (subsys->subsys_discovered) {
1334                 pr_err("Can't set model number. %s is already assigned\n",
1335                        subsys->model_number);
1336                 return -EINVAL;
1337         }
1338
1339         len = strcspn(page, "\n");
1340         if (!len)
1341                 return -EINVAL;
1342
1343         if (len > NVMET_MN_MAX_SIZE) {
1344                 pr_err("Model number size can not exceed %d Bytes\n",
1345                        NVMET_MN_MAX_SIZE);
1346                 return -EINVAL;
1347         }
1348
1349         for (pos = 0; pos < len; pos++) {
1350                 if (!nvmet_is_ascii(page[pos]))
1351                         return -EINVAL;
1352         }
1353
1354         val = kmemdup_nul(page, len, GFP_KERNEL);
1355         if (!val)
1356                 return -ENOMEM;
1357         kfree(subsys->model_number);
1358         subsys->model_number = val;
1359         return count;
1360 }
1361
1362 static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
1363                                              const char *page, size_t count)
1364 {
1365         struct nvmet_subsys *subsys = to_subsys(item);
1366         ssize_t ret;
1367
1368         down_write(&nvmet_config_sem);
1369         mutex_lock(&subsys->lock);
1370         ret = nvmet_subsys_attr_model_store_locked(subsys, page, count);
1371         mutex_unlock(&subsys->lock);
1372         up_write(&nvmet_config_sem);
1373
1374         return ret;
1375 }
1376 CONFIGFS_ATTR(nvmet_subsys_, attr_model);
1377
1378 static ssize_t nvmet_subsys_attr_ieee_oui_show(struct config_item *item,
1379                                             char *page)
1380 {
1381         struct nvmet_subsys *subsys = to_subsys(item);
1382
1383         return sysfs_emit(page, "0x%06x\n", subsys->ieee_oui);
1384 }
1385
1386 static ssize_t nvmet_subsys_attr_ieee_oui_store_locked(struct nvmet_subsys *subsys,
1387                 const char *page, size_t count)
1388 {
1389         uint32_t val = 0;
1390         int ret;
1391
1392         if (subsys->subsys_discovered) {
1393                 pr_err("Can't set IEEE OUI. 0x%06x is already assigned\n",
1394                       subsys->ieee_oui);
1395                 return -EINVAL;
1396         }
1397
1398         ret = kstrtou32(page, 0, &val);
1399         if (ret < 0)
1400                 return ret;
1401
1402         if (val >= 0x1000000)
1403                 return -EINVAL;
1404
1405         subsys->ieee_oui = val;
1406
1407         return count;
1408 }
1409
1410 static ssize_t nvmet_subsys_attr_ieee_oui_store(struct config_item *item,
1411                                              const char *page, size_t count)
1412 {
1413         struct nvmet_subsys *subsys = to_subsys(item);
1414         ssize_t ret;
1415
1416         down_write(&nvmet_config_sem);
1417         mutex_lock(&subsys->lock);
1418         ret = nvmet_subsys_attr_ieee_oui_store_locked(subsys, page, count);
1419         mutex_unlock(&subsys->lock);
1420         up_write(&nvmet_config_sem);
1421
1422         return ret;
1423 }
1424 CONFIGFS_ATTR(nvmet_subsys_, attr_ieee_oui);
1425
1426 static ssize_t nvmet_subsys_attr_firmware_show(struct config_item *item,
1427                                             char *page)
1428 {
1429         struct nvmet_subsys *subsys = to_subsys(item);
1430
1431         return sysfs_emit(page, "%s\n", subsys->firmware_rev);
1432 }
1433
1434 static ssize_t nvmet_subsys_attr_firmware_store_locked(struct nvmet_subsys *subsys,
1435                 const char *page, size_t count)
1436 {
1437         int pos = 0, len;
1438         char *val;
1439
1440         if (subsys->subsys_discovered) {
1441                 pr_err("Can't set firmware revision. %s is already assigned\n",
1442                        subsys->firmware_rev);
1443                 return -EINVAL;
1444         }
1445
1446         len = strcspn(page, "\n");
1447         if (!len)
1448                 return -EINVAL;
1449
1450         if (len > NVMET_FR_MAX_SIZE) {
1451                 pr_err("Firmware revision size can not exceed %d Bytes\n",
1452                        NVMET_FR_MAX_SIZE);
1453                 return -EINVAL;
1454         }
1455
1456         for (pos = 0; pos < len; pos++) {
1457                 if (!nvmet_is_ascii(page[pos]))
1458                         return -EINVAL;
1459         }
1460
1461         val = kmemdup_nul(page, len, GFP_KERNEL);
1462         if (!val)
1463                 return -ENOMEM;
1464
1465         kfree(subsys->firmware_rev);
1466
1467         subsys->firmware_rev = val;
1468
1469         return count;
1470 }
1471
1472 static ssize_t nvmet_subsys_attr_firmware_store(struct config_item *item,
1473                                              const char *page, size_t count)
1474 {
1475         struct nvmet_subsys *subsys = to_subsys(item);
1476         ssize_t ret;
1477
1478         down_write(&nvmet_config_sem);
1479         mutex_lock(&subsys->lock);
1480         ret = nvmet_subsys_attr_firmware_store_locked(subsys, page, count);
1481         mutex_unlock(&subsys->lock);
1482         up_write(&nvmet_config_sem);
1483
1484         return ret;
1485 }
1486 CONFIGFS_ATTR(nvmet_subsys_, attr_firmware);
1487
1488 #ifdef CONFIG_BLK_DEV_INTEGRITY
1489 static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
1490                                                 char *page)
1491 {
1492         return snprintf(page, PAGE_SIZE, "%d\n", to_subsys(item)->pi_support);
1493 }
1494
1495 static ssize_t nvmet_subsys_attr_pi_enable_store(struct config_item *item,
1496                                                  const char *page, size_t count)
1497 {
1498         struct nvmet_subsys *subsys = to_subsys(item);
1499         bool pi_enable;
1500
1501         if (kstrtobool(page, &pi_enable))
1502                 return -EINVAL;
1503
1504         subsys->pi_support = pi_enable;
1505         return count;
1506 }
1507 CONFIGFS_ATTR(nvmet_subsys_, attr_pi_enable);
1508 #endif
1509
1510 static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item,
1511                                               char *page)
1512 {
1513         return snprintf(page, PAGE_SIZE, "%u\n", to_subsys(item)->max_qid);
1514 }
1515
1516 static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item,
1517                                                const char *page, size_t cnt)
1518 {
1519         struct nvmet_subsys *subsys = to_subsys(item);
1520         struct nvmet_ctrl *ctrl;
1521         u16 qid_max;
1522
1523         if (sscanf(page, "%hu\n", &qid_max) != 1)
1524                 return -EINVAL;
1525
1526         if (qid_max < 1 || qid_max > NVMET_NR_QUEUES)
1527                 return -EINVAL;
1528
1529         down_write(&nvmet_config_sem);
1530         subsys->max_qid = qid_max;
1531
1532         /* Force reconnect */
1533         list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1534                 ctrl->ops->delete_ctrl(ctrl);
1535         up_write(&nvmet_config_sem);
1536
1537         return cnt;
1538 }
1539 CONFIGFS_ATTR(nvmet_subsys_, attr_qid_max);
1540
1541 static struct configfs_attribute *nvmet_subsys_attrs[] = {
1542         &nvmet_subsys_attr_attr_allow_any_host,
1543         &nvmet_subsys_attr_attr_version,
1544         &nvmet_subsys_attr_attr_serial,
1545         &nvmet_subsys_attr_attr_cntlid_min,
1546         &nvmet_subsys_attr_attr_cntlid_max,
1547         &nvmet_subsys_attr_attr_model,
1548         &nvmet_subsys_attr_attr_qid_max,
1549         &nvmet_subsys_attr_attr_ieee_oui,
1550         &nvmet_subsys_attr_attr_firmware,
1551 #ifdef CONFIG_BLK_DEV_INTEGRITY
1552         &nvmet_subsys_attr_attr_pi_enable,
1553 #endif
1554         NULL,
1555 };
1556
1557 /*
1558  * Subsystem structures & folder operation functions below
1559  */
1560 static void nvmet_subsys_release(struct config_item *item)
1561 {
1562         struct nvmet_subsys *subsys = to_subsys(item);
1563
1564         nvmet_subsys_del_ctrls(subsys);
1565         nvmet_subsys_put(subsys);
1566 }
1567
1568 static struct configfs_item_operations nvmet_subsys_item_ops = {
1569         .release                = nvmet_subsys_release,
1570 };
1571
1572 static const struct config_item_type nvmet_subsys_type = {
1573         .ct_item_ops            = &nvmet_subsys_item_ops,
1574         .ct_attrs               = nvmet_subsys_attrs,
1575         .ct_owner               = THIS_MODULE,
1576 };
1577
1578 static struct config_group *nvmet_subsys_make(struct config_group *group,
1579                 const char *name)
1580 {
1581         struct nvmet_subsys *subsys;
1582
1583         if (sysfs_streq(name, NVME_DISC_SUBSYS_NAME)) {
1584                 pr_err("can't create discovery subsystem through configfs\n");
1585                 return ERR_PTR(-EINVAL);
1586         }
1587
1588         subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
1589         if (IS_ERR(subsys))
1590                 return ERR_CAST(subsys);
1591
1592         config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
1593
1594         config_group_init_type_name(&subsys->namespaces_group,
1595                         "namespaces", &nvmet_namespaces_type);
1596         configfs_add_default_group(&subsys->namespaces_group, &subsys->group);
1597
1598         config_group_init_type_name(&subsys->allowed_hosts_group,
1599                         "allowed_hosts", &nvmet_allowed_hosts_type);
1600         configfs_add_default_group(&subsys->allowed_hosts_group,
1601                         &subsys->group);
1602
1603         nvmet_add_passthru_group(subsys);
1604
1605         return &subsys->group;
1606 }
1607
1608 static struct configfs_group_operations nvmet_subsystems_group_ops = {
1609         .make_group             = nvmet_subsys_make,
1610 };
1611
1612 static const struct config_item_type nvmet_subsystems_type = {
1613         .ct_group_ops           = &nvmet_subsystems_group_ops,
1614         .ct_owner               = THIS_MODULE,
1615 };
1616
1617 static ssize_t nvmet_referral_enable_show(struct config_item *item,
1618                 char *page)
1619 {
1620         return snprintf(page, PAGE_SIZE, "%d\n", to_nvmet_port(item)->enabled);
1621 }
1622
1623 static ssize_t nvmet_referral_enable_store(struct config_item *item,
1624                 const char *page, size_t count)
1625 {
1626         struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1627         struct nvmet_port *port = to_nvmet_port(item);
1628         bool enable;
1629
1630         if (kstrtobool(page, &enable))
1631                 goto inval;
1632
1633         if (enable)
1634                 nvmet_referral_enable(parent, port);
1635         else
1636                 nvmet_referral_disable(parent, port);
1637
1638         return count;
1639 inval:
1640         pr_err("Invalid value '%s' for enable\n", page);
1641         return -EINVAL;
1642 }
1643
1644 CONFIGFS_ATTR(nvmet_referral_, enable);
1645
1646 /*
1647  * Discovery Service subsystem definitions
1648  */
1649 static struct configfs_attribute *nvmet_referral_attrs[] = {
1650         &nvmet_attr_addr_adrfam,
1651         &nvmet_attr_addr_portid,
1652         &nvmet_attr_addr_treq,
1653         &nvmet_attr_addr_traddr,
1654         &nvmet_attr_addr_trsvcid,
1655         &nvmet_attr_addr_trtype,
1656         &nvmet_referral_attr_enable,
1657         NULL,
1658 };
1659
1660 static void nvmet_referral_notify(struct config_group *group,
1661                 struct config_item *item)
1662 {
1663         struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
1664         struct nvmet_port *port = to_nvmet_port(item);
1665
1666         nvmet_referral_disable(parent, port);
1667 }
1668
1669 static void nvmet_referral_release(struct config_item *item)
1670 {
1671         struct nvmet_port *port = to_nvmet_port(item);
1672
1673         kfree(port);
1674 }
1675
1676 static struct configfs_item_operations nvmet_referral_item_ops = {
1677         .release        = nvmet_referral_release,
1678 };
1679
1680 static const struct config_item_type nvmet_referral_type = {
1681         .ct_owner       = THIS_MODULE,
1682         .ct_attrs       = nvmet_referral_attrs,
1683         .ct_item_ops    = &nvmet_referral_item_ops,
1684 };
1685
1686 static struct config_group *nvmet_referral_make(
1687                 struct config_group *group, const char *name)
1688 {
1689         struct nvmet_port *port;
1690
1691         port = kzalloc(sizeof(*port), GFP_KERNEL);
1692         if (!port)
1693                 return ERR_PTR(-ENOMEM);
1694
1695         INIT_LIST_HEAD(&port->entry);
1696         config_group_init_type_name(&port->group, name, &nvmet_referral_type);
1697
1698         return &port->group;
1699 }
1700
1701 static struct configfs_group_operations nvmet_referral_group_ops = {
1702         .make_group             = nvmet_referral_make,
1703         .disconnect_notify      = nvmet_referral_notify,
1704 };
1705
1706 static const struct config_item_type nvmet_referrals_type = {
1707         .ct_owner       = THIS_MODULE,
1708         .ct_group_ops   = &nvmet_referral_group_ops,
1709 };
1710
1711 static struct nvmet_type_name_map nvmet_ana_state[] = {
1712         { NVME_ANA_OPTIMIZED,           "optimized" },
1713         { NVME_ANA_NONOPTIMIZED,        "non-optimized" },
1714         { NVME_ANA_INACCESSIBLE,        "inaccessible" },
1715         { NVME_ANA_PERSISTENT_LOSS,     "persistent-loss" },
1716         { NVME_ANA_CHANGE,              "change" },
1717 };
1718
1719 static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item,
1720                 char *page)
1721 {
1722         struct nvmet_ana_group *grp = to_ana_group(item);
1723         enum nvme_ana_state state = grp->port->ana_state[grp->grpid];
1724         int i;
1725
1726         for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1727                 if (state == nvmet_ana_state[i].type)
1728                         return sprintf(page, "%s\n", nvmet_ana_state[i].name);
1729         }
1730
1731         return sprintf(page, "\n");
1732 }
1733
1734 static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item,
1735                 const char *page, size_t count)
1736 {
1737         struct nvmet_ana_group *grp = to_ana_group(item);
1738         enum nvme_ana_state *ana_state = grp->port->ana_state;
1739         int i;
1740
1741         for (i = 0; i < ARRAY_SIZE(nvmet_ana_state); i++) {
1742                 if (sysfs_streq(page, nvmet_ana_state[i].name))
1743                         goto found;
1744         }
1745
1746         pr_err("Invalid value '%s' for ana_state\n", page);
1747         return -EINVAL;
1748
1749 found:
1750         down_write(&nvmet_ana_sem);
1751         ana_state[grp->grpid] = (enum nvme_ana_state) nvmet_ana_state[i].type;
1752         nvmet_ana_chgcnt++;
1753         up_write(&nvmet_ana_sem);
1754         nvmet_port_send_ana_event(grp->port);
1755         return count;
1756 }
1757
1758 CONFIGFS_ATTR(nvmet_ana_group_, ana_state);
1759
1760 static struct configfs_attribute *nvmet_ana_group_attrs[] = {
1761         &nvmet_ana_group_attr_ana_state,
1762         NULL,
1763 };
1764
1765 static void nvmet_ana_group_release(struct config_item *item)
1766 {
1767         struct nvmet_ana_group *grp = to_ana_group(item);
1768
1769         if (grp == &grp->port->ana_default_group)
1770                 return;
1771
1772         down_write(&nvmet_ana_sem);
1773         grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE;
1774         nvmet_ana_group_enabled[grp->grpid]--;
1775         up_write(&nvmet_ana_sem);
1776
1777         nvmet_port_send_ana_event(grp->port);
1778         kfree(grp);
1779 }
1780
1781 static struct configfs_item_operations nvmet_ana_group_item_ops = {
1782         .release                = nvmet_ana_group_release,
1783 };
1784
1785 static const struct config_item_type nvmet_ana_group_type = {
1786         .ct_item_ops            = &nvmet_ana_group_item_ops,
1787         .ct_attrs               = nvmet_ana_group_attrs,
1788         .ct_owner               = THIS_MODULE,
1789 };
1790
1791 static struct config_group *nvmet_ana_groups_make_group(
1792                 struct config_group *group, const char *name)
1793 {
1794         struct nvmet_port *port = ana_groups_to_port(&group->cg_item);
1795         struct nvmet_ana_group *grp;
1796         u32 grpid;
1797         int ret;
1798
1799         ret = kstrtou32(name, 0, &grpid);
1800         if (ret)
1801                 goto out;
1802
1803         ret = -EINVAL;
1804         if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS)
1805                 goto out;
1806
1807         ret = -ENOMEM;
1808         grp = kzalloc(sizeof(*grp), GFP_KERNEL);
1809         if (!grp)
1810                 goto out;
1811         grp->port = port;
1812         grp->grpid = grpid;
1813
1814         down_write(&nvmet_ana_sem);
1815         nvmet_ana_group_enabled[grpid]++;
1816         up_write(&nvmet_ana_sem);
1817
1818         nvmet_port_send_ana_event(grp->port);
1819
1820         config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type);
1821         return &grp->group;
1822 out:
1823         return ERR_PTR(ret);
1824 }
1825
1826 static struct configfs_group_operations nvmet_ana_groups_group_ops = {
1827         .make_group             = nvmet_ana_groups_make_group,
1828 };
1829
1830 static const struct config_item_type nvmet_ana_groups_type = {
1831         .ct_group_ops           = &nvmet_ana_groups_group_ops,
1832         .ct_owner               = THIS_MODULE,
1833 };
1834
1835 /*
1836  * Ports definitions.
1837  */
1838 static void nvmet_port_release(struct config_item *item)
1839 {
1840         struct nvmet_port *port = to_nvmet_port(item);
1841
1842         /* Let inflight controllers teardown complete */
1843         flush_workqueue(nvmet_wq);
1844         list_del(&port->global_entry);
1845
1846         key_put(port->keyring);
1847         kfree(port->ana_state);
1848         kfree(port);
1849 }
1850
1851 static struct configfs_attribute *nvmet_port_attrs[] = {
1852         &nvmet_attr_addr_adrfam,
1853         &nvmet_attr_addr_treq,
1854         &nvmet_attr_addr_traddr,
1855         &nvmet_attr_addr_trsvcid,
1856         &nvmet_attr_addr_trtype,
1857         &nvmet_attr_addr_tsas,
1858         &nvmet_attr_param_inline_data_size,
1859 #ifdef CONFIG_BLK_DEV_INTEGRITY
1860         &nvmet_attr_param_pi_enable,
1861 #endif
1862         NULL,
1863 };
1864
1865 static struct configfs_item_operations nvmet_port_item_ops = {
1866         .release                = nvmet_port_release,
1867 };
1868
1869 static const struct config_item_type nvmet_port_type = {
1870         .ct_attrs               = nvmet_port_attrs,
1871         .ct_item_ops            = &nvmet_port_item_ops,
1872         .ct_owner               = THIS_MODULE,
1873 };
1874
1875 static struct config_group *nvmet_ports_make(struct config_group *group,
1876                 const char *name)
1877 {
1878         struct nvmet_port *port;
1879         u16 portid;
1880         u32 i;
1881
1882         if (kstrtou16(name, 0, &portid))
1883                 return ERR_PTR(-EINVAL);
1884
1885         port = kzalloc(sizeof(*port), GFP_KERNEL);
1886         if (!port)
1887                 return ERR_PTR(-ENOMEM);
1888
1889         port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1,
1890                         sizeof(*port->ana_state), GFP_KERNEL);
1891         if (!port->ana_state) {
1892                 kfree(port);
1893                 return ERR_PTR(-ENOMEM);
1894         }
1895
1896         if (nvme_keyring_id()) {
1897                 port->keyring = key_lookup(nvme_keyring_id());
1898                 if (IS_ERR(port->keyring)) {
1899                         pr_warn("NVMe keyring not available, disabling TLS\n");
1900                         port->keyring = NULL;
1901                 }
1902         }
1903
1904         for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) {
1905                 if (i == NVMET_DEFAULT_ANA_GRPID)
1906                         port->ana_state[1] = NVME_ANA_OPTIMIZED;
1907                 else
1908                         port->ana_state[i] = NVME_ANA_INACCESSIBLE;
1909         }
1910
1911         list_add(&port->global_entry, &nvmet_ports_list);
1912
1913         INIT_LIST_HEAD(&port->entry);
1914         INIT_LIST_HEAD(&port->subsystems);
1915         INIT_LIST_HEAD(&port->referrals);
1916         port->inline_data_size = -1;    /* < 0 == let the transport choose */
1917
1918         port->disc_addr.portid = cpu_to_le16(portid);
1919         port->disc_addr.adrfam = NVMF_ADDR_FAMILY_MAX;
1920         port->disc_addr.treq = NVMF_TREQ_DISABLE_SQFLOW;
1921         config_group_init_type_name(&port->group, name, &nvmet_port_type);
1922
1923         config_group_init_type_name(&port->subsys_group,
1924                         "subsystems", &nvmet_port_subsys_type);
1925         configfs_add_default_group(&port->subsys_group, &port->group);
1926
1927         config_group_init_type_name(&port->referrals_group,
1928                         "referrals", &nvmet_referrals_type);
1929         configfs_add_default_group(&port->referrals_group, &port->group);
1930
1931         config_group_init_type_name(&port->ana_groups_group,
1932                         "ana_groups", &nvmet_ana_groups_type);
1933         configfs_add_default_group(&port->ana_groups_group, &port->group);
1934
1935         port->ana_default_group.port = port;
1936         port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID;
1937         config_group_init_type_name(&port->ana_default_group.group,
1938                         __stringify(NVMET_DEFAULT_ANA_GRPID),
1939                         &nvmet_ana_group_type);
1940         configfs_add_default_group(&port->ana_default_group.group,
1941                         &port->ana_groups_group);
1942
1943         return &port->group;
1944 }
1945
1946 static struct configfs_group_operations nvmet_ports_group_ops = {
1947         .make_group             = nvmet_ports_make,
1948 };
1949
1950 static const struct config_item_type nvmet_ports_type = {
1951         .ct_group_ops           = &nvmet_ports_group_ops,
1952         .ct_owner               = THIS_MODULE,
1953 };
1954
1955 static struct config_group nvmet_subsystems_group;
1956 static struct config_group nvmet_ports_group;
1957
1958 #ifdef CONFIG_NVME_TARGET_AUTH
1959 static ssize_t nvmet_host_dhchap_key_show(struct config_item *item,
1960                 char *page)
1961 {
1962         u8 *dhchap_secret = to_host(item)->dhchap_secret;
1963
1964         if (!dhchap_secret)
1965                 return sprintf(page, "\n");
1966         return sprintf(page, "%s\n", dhchap_secret);
1967 }
1968
1969 static ssize_t nvmet_host_dhchap_key_store(struct config_item *item,
1970                 const char *page, size_t count)
1971 {
1972         struct nvmet_host *host = to_host(item);
1973         int ret;
1974
1975         ret = nvmet_auth_set_key(host, page, false);
1976         /*
1977          * Re-authentication is a soft state, so keep the
1978          * current authentication valid until the host
1979          * requests re-authentication.
1980          */
1981         return ret < 0 ? ret : count;
1982 }
1983
1984 CONFIGFS_ATTR(nvmet_host_, dhchap_key);
1985
1986 static ssize_t nvmet_host_dhchap_ctrl_key_show(struct config_item *item,
1987                 char *page)
1988 {
1989         u8 *dhchap_secret = to_host(item)->dhchap_ctrl_secret;
1990
1991         if (!dhchap_secret)
1992                 return sprintf(page, "\n");
1993         return sprintf(page, "%s\n", dhchap_secret);
1994 }
1995
1996 static ssize_t nvmet_host_dhchap_ctrl_key_store(struct config_item *item,
1997                 const char *page, size_t count)
1998 {
1999         struct nvmet_host *host = to_host(item);
2000         int ret;
2001
2002         ret = nvmet_auth_set_key(host, page, true);
2003         /*
2004          * Re-authentication is a soft state, so keep the
2005          * current authentication valid until the host
2006          * requests re-authentication.
2007          */
2008         return ret < 0 ? ret : count;
2009 }
2010
2011 CONFIGFS_ATTR(nvmet_host_, dhchap_ctrl_key);
2012
2013 static ssize_t nvmet_host_dhchap_hash_show(struct config_item *item,
2014                 char *page)
2015 {
2016         struct nvmet_host *host = to_host(item);
2017         const char *hash_name = nvme_auth_hmac_name(host->dhchap_hash_id);
2018
2019         return sprintf(page, "%s\n", hash_name ? hash_name : "none");
2020 }
2021
2022 static ssize_t nvmet_host_dhchap_hash_store(struct config_item *item,
2023                 const char *page, size_t count)
2024 {
2025         struct nvmet_host *host = to_host(item);
2026         u8 hmac_id;
2027
2028         hmac_id = nvme_auth_hmac_id(page);
2029         if (hmac_id == NVME_AUTH_HASH_INVALID)
2030                 return -EINVAL;
2031         if (!crypto_has_shash(nvme_auth_hmac_name(hmac_id), 0, 0))
2032                 return -ENOTSUPP;
2033         host->dhchap_hash_id = hmac_id;
2034         return count;
2035 }
2036
2037 CONFIGFS_ATTR(nvmet_host_, dhchap_hash);
2038
2039 static ssize_t nvmet_host_dhchap_dhgroup_show(struct config_item *item,
2040                 char *page)
2041 {
2042         struct nvmet_host *host = to_host(item);
2043         const char *dhgroup = nvme_auth_dhgroup_name(host->dhchap_dhgroup_id);
2044
2045         return sprintf(page, "%s\n", dhgroup ? dhgroup : "none");
2046 }
2047
2048 static ssize_t nvmet_host_dhchap_dhgroup_store(struct config_item *item,
2049                 const char *page, size_t count)
2050 {
2051         struct nvmet_host *host = to_host(item);
2052         int dhgroup_id;
2053
2054         dhgroup_id = nvme_auth_dhgroup_id(page);
2055         if (dhgroup_id == NVME_AUTH_DHGROUP_INVALID)
2056                 return -EINVAL;
2057         if (dhgroup_id != NVME_AUTH_DHGROUP_NULL) {
2058                 const char *kpp = nvme_auth_dhgroup_kpp(dhgroup_id);
2059
2060                 if (!crypto_has_kpp(kpp, 0, 0))
2061                         return -EINVAL;
2062         }
2063         host->dhchap_dhgroup_id = dhgroup_id;
2064         return count;
2065 }
2066
2067 CONFIGFS_ATTR(nvmet_host_, dhchap_dhgroup);
2068
2069 static struct configfs_attribute *nvmet_host_attrs[] = {
2070         &nvmet_host_attr_dhchap_key,
2071         &nvmet_host_attr_dhchap_ctrl_key,
2072         &nvmet_host_attr_dhchap_hash,
2073         &nvmet_host_attr_dhchap_dhgroup,
2074         NULL,
2075 };
2076 #endif /* CONFIG_NVME_TARGET_AUTH */
2077
2078 static void nvmet_host_release(struct config_item *item)
2079 {
2080         struct nvmet_host *host = to_host(item);
2081
2082 #ifdef CONFIG_NVME_TARGET_AUTH
2083         kfree(host->dhchap_secret);
2084         kfree(host->dhchap_ctrl_secret);
2085 #endif
2086         kfree(host);
2087 }
2088
2089 static struct configfs_item_operations nvmet_host_item_ops = {
2090         .release                = nvmet_host_release,
2091 };
2092
2093 static const struct config_item_type nvmet_host_type = {
2094         .ct_item_ops            = &nvmet_host_item_ops,
2095 #ifdef CONFIG_NVME_TARGET_AUTH
2096         .ct_attrs               = nvmet_host_attrs,
2097 #endif
2098         .ct_owner               = THIS_MODULE,
2099 };
2100
2101 static struct config_group *nvmet_hosts_make_group(struct config_group *group,
2102                 const char *name)
2103 {
2104         struct nvmet_host *host;
2105
2106         host = kzalloc(sizeof(*host), GFP_KERNEL);
2107         if (!host)
2108                 return ERR_PTR(-ENOMEM);
2109
2110 #ifdef CONFIG_NVME_TARGET_AUTH
2111         /* Default to SHA256 */
2112         host->dhchap_hash_id = NVME_AUTH_HASH_SHA256;
2113 #endif
2114
2115         config_group_init_type_name(&host->group, name, &nvmet_host_type);
2116
2117         return &host->group;
2118 }
2119
2120 static struct configfs_group_operations nvmet_hosts_group_ops = {
2121         .make_group             = nvmet_hosts_make_group,
2122 };
2123
2124 static const struct config_item_type nvmet_hosts_type = {
2125         .ct_group_ops           = &nvmet_hosts_group_ops,
2126         .ct_owner               = THIS_MODULE,
2127 };
2128
2129 static struct config_group nvmet_hosts_group;
2130
2131 static const struct config_item_type nvmet_root_type = {
2132         .ct_owner               = THIS_MODULE,
2133 };
2134
2135 static struct configfs_subsystem nvmet_configfs_subsystem = {
2136         .su_group = {
2137                 .cg_item = {
2138                         .ci_namebuf     = "nvmet",
2139                         .ci_type        = &nvmet_root_type,
2140                 },
2141         },
2142 };
2143
2144 int __init nvmet_init_configfs(void)
2145 {
2146         int ret;
2147
2148         config_group_init(&nvmet_configfs_subsystem.su_group);
2149         mutex_init(&nvmet_configfs_subsystem.su_mutex);
2150
2151         config_group_init_type_name(&nvmet_subsystems_group,
2152                         "subsystems", &nvmet_subsystems_type);
2153         configfs_add_default_group(&nvmet_subsystems_group,
2154                         &nvmet_configfs_subsystem.su_group);
2155
2156         config_group_init_type_name(&nvmet_ports_group,
2157                         "ports", &nvmet_ports_type);
2158         configfs_add_default_group(&nvmet_ports_group,
2159                         &nvmet_configfs_subsystem.su_group);
2160
2161         config_group_init_type_name(&nvmet_hosts_group,
2162                         "hosts", &nvmet_hosts_type);
2163         configfs_add_default_group(&nvmet_hosts_group,
2164                         &nvmet_configfs_subsystem.su_group);
2165
2166         ret = configfs_register_subsystem(&nvmet_configfs_subsystem);
2167         if (ret) {
2168                 pr_err("configfs_register_subsystem: %d\n", ret);
2169                 return ret;
2170         }
2171
2172         return 0;
2173 }
2174
2175 void __exit nvmet_exit_configfs(void)
2176 {
2177         configfs_unregister_subsystem(&nvmet_configfs_subsystem);
2178 }