1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
9 #include <linux/dma-mapping.h>
10 #include <linux/types.h>
11 #include <linux/device.h>
12 #include <linux/kref.h>
13 #include <linux/percpu-refcount.h>
14 #include <linux/list.h>
15 #include <linux/mutex.h>
16 #include <linux/uuid.h>
17 #include <linux/nvme.h>
18 #include <linux/configfs.h>
19 #include <linux/rcupdate.h>
20 #include <linux/blkdev.h>
21 #include <linux/radix-tree.h>
22 #include <linux/t10-pi.h>
24 #define NVMET_DEFAULT_VS NVME_VS(1, 3, 0)
26 #define NVMET_ASYNC_EVENTS 4
27 #define NVMET_ERROR_LOG_SLOTS 128
28 #define NVMET_NO_ERROR_LOC ((u16)-1)
29 #define NVMET_DEFAULT_CTRL_MODEL "Linux"
30 #define NVMET_MN_MAX_SIZE 40
31 #define NVMET_SN_MAX_SIZE 20
32 #define NVMET_FR_MAX_SIZE 8
35 * Supported optional AENs:
37 #define NVMET_AEN_CFG_OPTIONAL \
38 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
39 #define NVMET_DISC_AEN_CFG_OPTIONAL \
40 (NVME_AEN_CFG_DISC_CHANGE)
43 * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
45 #define NVMET_AEN_CFG_ALL \
46 (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
47 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
48 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
50 /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
51 * The 16 bit shift is to set IATTR bit to 1, which means offending
52 * offset starts in the data section of connect()
54 #define IPO_IATTR_CONNECT_DATA(x) \
55 (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
56 #define IPO_IATTR_CONNECT_SQE(x) \
57 (cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
60 struct percpu_ref ref;
61 struct block_device *bdev;
73 struct nvmet_subsys *subsys;
74 const char *device_path;
76 struct config_group device_group;
77 struct config_group group;
79 struct completion disable_done;
82 struct pci_dev *p2p_dev;
89 static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
91 return container_of(to_config_group(item), struct nvmet_ns, group);
94 static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
96 return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
105 struct nvmet_ctrl *ctrl;
106 struct percpu_ref ref;
111 #ifdef CONFIG_NVME_TARGET_AUTH
113 struct delayed_work auth_expired_work;
124 struct completion free_done;
125 struct completion confirm_done;
128 struct nvmet_ana_group {
129 struct config_group group;
130 struct nvmet_port *port;
134 static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
136 return container_of(to_config_group(item), struct nvmet_ana_group,
141 * struct nvmet_port - Common structure to keep port
142 * information for the target.
143 * @entry: Entry into referrals or transport list.
144 * @disc_addr: Address information is stored in a format defined
145 * for a discovery log page entry.
146 * @group: ConfigFS group for this element's folder.
147 * @priv: Private data for the transport.
150 struct list_head entry;
151 struct nvmf_disc_rsp_page_entry disc_addr;
152 struct config_group group;
153 struct config_group subsys_group;
154 struct list_head subsystems;
155 struct config_group referrals_group;
156 struct list_head referrals;
157 struct list_head global_entry;
158 struct config_group ana_groups_group;
159 struct nvmet_ana_group ana_default_group;
160 enum nvme_ana_state *ana_state;
163 int inline_data_size;
164 const struct nvmet_fabrics_ops *tr_ops;
168 static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
170 return container_of(to_config_group(item), struct nvmet_port,
174 static inline struct nvmet_port *ana_groups_to_port(
175 struct config_item *item)
177 return container_of(to_config_group(item), struct nvmet_port,
182 struct nvmet_subsys *subsys;
183 struct nvmet_sq **sqs;
196 struct nvmet_port *port;
199 unsigned long aen_masked;
200 struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS];
201 unsigned int nr_async_event_cmds;
202 struct list_head async_events;
203 struct work_struct async_event_work;
205 struct list_head subsys_entry;
207 struct delayed_work ka_work;
208 struct work_struct fatal_err_work;
210 const struct nvmet_fabrics_ops *ops;
212 __le32 *changed_ns_list;
215 char subsysnqn[NVMF_NQN_FIELD_LEN];
216 char hostnqn[NVMF_NQN_FIELD_LEN];
218 struct device *p2p_client;
219 struct radix_tree_root p2p_ns_map;
221 spinlock_t error_lock;
223 struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
225 #ifdef CONFIG_NVME_TARGET_AUTH
226 struct nvme_dhchap_key *host_key;
227 struct nvme_dhchap_key *ctrl_key;
229 struct crypto_kpp *dh_tfm;
236 struct nvmet_subsys {
237 enum nvme_subsys_type type;
242 struct xarray namespaces;
243 unsigned int nr_namespaces;
248 struct list_head ctrls;
250 struct list_head hosts;
256 char serial[NVMET_SN_MAX_SIZE];
257 bool subsys_discovered;
261 struct config_group group;
263 struct config_group namespaces_group;
264 struct config_group allowed_hosts_group;
270 #ifdef CONFIG_NVME_TARGET_PASSTHRU
271 struct nvme_ctrl *passthru_ctrl;
272 char *passthru_ctrl_path;
273 struct config_group passthru_group;
274 unsigned int admin_timeout;
275 unsigned int io_timeout;
276 unsigned int clear_ids;
277 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
279 #ifdef CONFIG_BLK_DEV_ZONED
281 #endif /* CONFIG_BLK_DEV_ZONED */
284 static inline struct nvmet_subsys *to_subsys(struct config_item *item)
286 return container_of(to_config_group(item), struct nvmet_subsys, group);
289 static inline struct nvmet_subsys *namespaces_to_subsys(
290 struct config_item *item)
292 return container_of(to_config_group(item), struct nvmet_subsys,
297 struct config_group group;
299 u8 *dhchap_ctrl_secret;
301 u8 dhchap_ctrl_key_hash;
303 u8 dhchap_dhgroup_id;
306 static inline struct nvmet_host *to_host(struct config_item *item)
308 return container_of(to_config_group(item), struct nvmet_host, group);
311 static inline char *nvmet_host_name(struct nvmet_host *host)
313 return config_item_name(&host->group.cg_item);
316 struct nvmet_host_link {
317 struct list_head entry;
318 struct nvmet_host *host;
321 struct nvmet_subsys_link {
322 struct list_head entry;
323 struct nvmet_subsys *subsys;
327 struct nvmet_fabrics_ops {
328 struct module *owner;
332 #define NVMF_KEYED_SGLS (1 << 0)
333 #define NVMF_METADATA_SUPPORTED (1 << 1)
334 void (*queue_response)(struct nvmet_req *req);
335 int (*add_port)(struct nvmet_port *port);
336 void (*remove_port)(struct nvmet_port *port);
337 void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
338 void (*disc_traddr)(struct nvmet_req *req,
339 struct nvmet_port *port, char *traddr);
340 u16 (*install_queue)(struct nvmet_sq *nvme_sq);
341 void (*discovery_chg)(struct nvmet_port *port);
342 u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
343 u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
346 #define NVMET_MAX_INLINE_BIOVEC 8
347 #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
350 struct nvme_command *cmd;
351 struct nvme_completion *cqe;
355 struct scatterlist *sg;
356 struct scatterlist *metadata_sg;
357 struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC];
360 struct bio inline_bio;
365 struct bio_vec *bvec;
366 struct work_struct work;
369 struct bio inline_bio;
371 struct work_struct work;
374 #ifdef CONFIG_BLK_DEV_ZONED
376 struct bio inline_bio;
377 struct work_struct zmgmt_work;
379 #endif /* CONFIG_BLK_DEV_ZONED */
383 /* data length as parsed from the SGL descriptor: */
387 struct nvmet_port *port;
389 void (*execute)(struct nvmet_req *req);
390 const struct nvmet_fabrics_ops *ops;
392 struct pci_dev *p2p_dev;
393 struct device *p2p_client;
398 #define NVMET_MAX_MPOOL_BVEC 16
399 extern struct kmem_cache *nvmet_bvec_cache;
400 extern struct workqueue_struct *buffered_io_wq;
401 extern struct workqueue_struct *zbd_wq;
402 extern struct workqueue_struct *nvmet_wq;
404 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
406 req->cqe->result.u32 = cpu_to_le32(result);
410 * NVMe command writes actually are DMA reads for us on the target side.
412 static inline enum dma_data_direction
413 nvmet_data_dir(struct nvmet_req *req)
415 return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
418 struct nvmet_async_event {
419 struct list_head entry;
425 static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
427 int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
430 clear_bit(bn, &req->sq->ctrl->aen_masked);
433 static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
435 if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
437 return test_and_set_bit(bn, &ctrl->aen_masked);
440 void nvmet_get_feat_kato(struct nvmet_req *req);
441 void nvmet_get_feat_async_event(struct nvmet_req *req);
442 u16 nvmet_set_feat_kato(struct nvmet_req *req);
443 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
444 void nvmet_execute_async_event(struct nvmet_req *req);
445 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
446 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
448 u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
449 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
450 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
451 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
452 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
453 u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
454 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
455 u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
456 u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
458 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
459 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
460 void nvmet_req_uninit(struct nvmet_req *req);
461 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
462 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
463 void nvmet_req_complete(struct nvmet_req *req, u16 status);
464 int nvmet_req_alloc_sgls(struct nvmet_req *req);
465 void nvmet_req_free_sgls(struct nvmet_req *req);
467 void nvmet_execute_set_features(struct nvmet_req *req);
468 void nvmet_execute_get_features(struct nvmet_req *req);
469 void nvmet_execute_keep_alive(struct nvmet_req *req);
471 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
473 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
475 void nvmet_sq_destroy(struct nvmet_sq *sq);
476 int nvmet_sq_init(struct nvmet_sq *sq);
478 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
480 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
481 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
482 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
483 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
484 const char *hostnqn, u16 cntlid,
485 struct nvmet_req *req);
486 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
487 u16 nvmet_check_ctrl_status(struct nvmet_req *req);
489 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
490 enum nvme_subsys_type type);
491 void nvmet_subsys_put(struct nvmet_subsys *subsys);
492 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
494 u16 nvmet_req_find_ns(struct nvmet_req *req);
495 void nvmet_put_namespace(struct nvmet_ns *ns);
496 int nvmet_ns_enable(struct nvmet_ns *ns);
497 void nvmet_ns_disable(struct nvmet_ns *ns);
498 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
499 void nvmet_ns_free(struct nvmet_ns *ns);
501 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
502 struct nvmet_port *port);
503 void nvmet_port_send_ana_event(struct nvmet_port *port);
505 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
506 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
508 void nvmet_port_del_ctrls(struct nvmet_port *port,
509 struct nvmet_subsys *subsys);
511 int nvmet_enable_port(struct nvmet_port *port);
512 void nvmet_disable_port(struct nvmet_port *port);
514 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
515 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
517 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
519 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
521 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
523 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
524 u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
526 extern struct list_head *nvmet_ports;
527 void nvmet_port_disc_changed(struct nvmet_port *port,
528 struct nvmet_subsys *subsys);
529 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
530 struct nvmet_host *host);
531 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
532 u8 event_info, u8 log_page);
534 #define NVMET_QUEUE_SIZE 1024
535 #define NVMET_NR_QUEUES 128
536 #define NVMET_MAX_CMD NVMET_QUEUE_SIZE
539 * Nice round number that makes a list of nsids fit into a page.
540 * Should become tunable at some point in the future.
542 #define NVMET_MAX_NAMESPACES 1024
545 * 0 is not a valid ANA group ID, so we start numbering at 1.
547 * ANA Group 1 exists without manual intervention, has namespaces assigned to it
548 * by default, and is available in an optimized state through all ports.
550 #define NVMET_MAX_ANAGRPS 128
551 #define NVMET_DEFAULT_ANA_GRPID 1
554 #define NVMET_DISC_KATO_MS 120000
556 int __init nvmet_init_configfs(void);
557 void __exit nvmet_exit_configfs(void);
559 int __init nvmet_init_discovery(void);
560 void nvmet_exit_discovery(void);
562 extern struct nvmet_subsys *nvmet_disc_subsys;
563 extern struct rw_semaphore nvmet_config_sem;
565 extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
566 extern u64 nvmet_ana_chgcnt;
567 extern struct rw_semaphore nvmet_ana_sem;
569 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
571 int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
572 int nvmet_file_ns_enable(struct nvmet_ns *ns);
573 void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
574 void nvmet_file_ns_disable(struct nvmet_ns *ns);
575 u16 nvmet_bdev_flush(struct nvmet_req *req);
576 u16 nvmet_file_flush(struct nvmet_req *req);
577 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
578 void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
579 void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
580 bool nvmet_ns_revalidate(struct nvmet_ns *ns);
581 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
583 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
584 void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req);
585 void nvmet_execute_identify_ns_zns(struct nvmet_req *req);
586 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
587 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
588 void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
590 static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
592 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
593 req->ns->blksize_shift;
596 static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
598 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
600 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
601 req->ns->metadata_size;
604 static inline u32 nvmet_dsm_len(struct nvmet_req *req)
606 return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
607 sizeof(struct nvme_dsm_range);
610 static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
612 return req->sq->ctrl->subsys;
615 static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
617 return subsys->type != NVME_NQN_NVME;
620 #ifdef CONFIG_NVME_TARGET_PASSTHRU
621 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
622 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
623 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
624 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
625 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
626 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
628 return subsys->passthru_ctrl;
630 #else /* CONFIG_NVME_TARGET_PASSTHRU */
631 static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
634 static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
637 static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
641 static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
645 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
649 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
651 static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
653 return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
656 void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
658 u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
659 u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
661 /* Convert a 32-bit number to a 16-bit 0's based number */
662 static inline __le16 to0based(u32 a)
664 return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
667 static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
669 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
671 return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
674 static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
676 return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
679 static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
681 return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
684 static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
686 return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
687 req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
690 static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
692 if (bio != &req->b.inline_bio)
696 #ifdef CONFIG_NVME_TARGET_AUTH
697 void nvmet_execute_auth_send(struct nvmet_req *req);
698 void nvmet_execute_auth_receive(struct nvmet_req *req);
699 int nvmet_auth_set_key(struct nvmet_host *host, const char *secret,
701 int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash);
702 int nvmet_setup_auth(struct nvmet_ctrl *ctrl);
703 void nvmet_auth_sq_init(struct nvmet_sq *sq);
704 void nvmet_destroy_auth(struct nvmet_ctrl *ctrl);
705 void nvmet_auth_sq_free(struct nvmet_sq *sq);
706 int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id);
707 bool nvmet_check_auth_status(struct nvmet_req *req);
708 int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
709 unsigned int hash_len);
710 int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
711 unsigned int hash_len);
712 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
714 return ctrl->host_key != NULL;
716 int nvmet_auth_ctrl_exponential(struct nvmet_req *req,
717 u8 *buf, int buf_size);
718 int nvmet_auth_ctrl_sesskey(struct nvmet_req *req,
719 u8 *buf, int buf_size);
721 static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
725 static inline void nvmet_auth_sq_init(struct nvmet_sq *sq)
728 static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {};
729 static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {};
730 static inline bool nvmet_check_auth_status(struct nvmet_req *req)
734 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl)
738 static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; }
741 #endif /* _NVMET_H */