1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP target.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/key.h>
12 #include <linux/nvme-tcp.h>
13 #include <linux/nvme-keyring.h>
17 #include <net/tls_prot.h>
18 #include <net/handshake.h>
19 #include <linux/inet.h>
20 #include <linux/llist.h>
21 #include <crypto/hash.h>
22 #include <trace/events/sock.h>
26 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
27 #define NVMET_TCP_MAXH2CDATA 0x400000 /* 16M arbitrary limit */
28 #define NVMET_TCP_BACKLOG 128
30 static int param_store_val(const char *str, int *val, int min, int max)
34 ret = kstrtoint(str, 10, &new_val);
38 if (new_val < min || new_val > max)
45 static int set_params(const char *str, const struct kernel_param *kp)
47 return param_store_val(str, kp->arg, 0, INT_MAX);
50 static const struct kernel_param_ops set_param_ops = {
55 /* Define the socket priority to use for connections were it is desirable
56 * that the NIC consider performing optimized packet processing or filtering.
57 * A non-zero value being sufficient to indicate general consideration of any
58 * possible optimization. Making it a module param allows for alternative
59 * values that may be unique for some NIC implementations.
61 static int so_priority;
62 device_param_cb(so_priority, &set_param_ops, &so_priority, 0644);
63 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority: Default 0");
65 /* Define a time period (in usecs) that io_work() shall sample an activated
66 * queue before determining it to be idle. This optional module behavior
67 * can enable NIC solutions that support socket optimized packet processing
68 * using advanced interrupt moderation techniques.
70 static int idle_poll_period_usecs;
71 device_param_cb(idle_poll_period_usecs, &set_param_ops,
72 &idle_poll_period_usecs, 0644);
73 MODULE_PARM_DESC(idle_poll_period_usecs,
74 "nvmet tcp io_work poll till idle time period in usecs: Default 0");
76 #ifdef CONFIG_NVME_TARGET_TCP_TLS
78 * TLS handshake timeout
80 static int tls_handshake_timeout = 10;
81 module_param(tls_handshake_timeout, int, 0644);
82 MODULE_PARM_DESC(tls_handshake_timeout,
83 "nvme TLS handshake timeout in seconds (default 10)");
86 #define NVMET_TCP_RECV_BUDGET 8
87 #define NVMET_TCP_SEND_BUDGET 8
88 #define NVMET_TCP_IO_WORK_BUDGET 64
90 enum nvmet_tcp_send_state {
91 NVMET_TCP_SEND_DATA_PDU,
95 NVMET_TCP_SEND_RESPONSE
98 enum nvmet_tcp_recv_state {
101 NVMET_TCP_RECV_DDGST,
106 NVMET_TCP_F_INIT_FAILED = (1 << 0),
109 struct nvmet_tcp_cmd {
110 struct nvmet_tcp_queue *queue;
111 struct nvmet_req req;
113 struct nvme_tcp_cmd_pdu *cmd_pdu;
114 struct nvme_tcp_rsp_pdu *rsp_pdu;
115 struct nvme_tcp_data_pdu *data_pdu;
116 struct nvme_tcp_r2t_pdu *r2t_pdu;
124 char recv_cbuf[CMSG_LEN(sizeof(char))];
125 struct msghdr recv_msg;
129 struct list_head entry;
130 struct llist_node lentry;
134 struct scatterlist *cur_sg;
135 enum nvmet_tcp_send_state state;
141 enum nvmet_tcp_queue_state {
142 NVMET_TCP_Q_CONNECTING,
143 NVMET_TCP_Q_TLS_HANDSHAKE,
145 NVMET_TCP_Q_DISCONNECTING,
149 struct nvmet_tcp_queue {
151 struct nvmet_tcp_port *port;
152 struct work_struct io_work;
153 struct nvmet_cq nvme_cq;
154 struct nvmet_sq nvme_sq;
158 struct nvmet_tcp_cmd *cmds;
159 unsigned int nr_cmds;
160 struct list_head free_list;
161 struct llist_head resp_list;
162 struct list_head resp_send_list;
164 struct nvmet_tcp_cmd *snd_cmd;
169 enum nvmet_tcp_recv_state rcv_state;
170 struct nvmet_tcp_cmd *cmd;
171 union nvme_tcp_pdu pdu;
176 struct ahash_request *snd_hash;
177 struct ahash_request *rcv_hash;
180 key_serial_t tls_pskid;
181 struct delayed_work tls_handshake_tmo_work;
183 unsigned long poll_end;
185 spinlock_t state_lock;
186 enum nvmet_tcp_queue_state state;
188 struct sockaddr_storage sockaddr;
189 struct sockaddr_storage sockaddr_peer;
190 struct work_struct release_work;
193 struct list_head queue_list;
195 struct nvmet_tcp_cmd connect;
197 struct page_frag_cache pf_cache;
199 void (*data_ready)(struct sock *);
200 void (*state_change)(struct sock *);
201 void (*write_space)(struct sock *);
204 struct nvmet_tcp_port {
206 struct work_struct accept_work;
207 struct nvmet_port *nport;
208 struct sockaddr_storage addr;
209 void (*data_ready)(struct sock *);
212 static DEFINE_IDA(nvmet_tcp_queue_ida);
213 static LIST_HEAD(nvmet_tcp_queue_list);
214 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
216 static struct workqueue_struct *nvmet_tcp_wq;
217 static const struct nvmet_fabrics_ops nvmet_tcp_ops;
218 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
219 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
221 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
222 struct nvmet_tcp_cmd *cmd)
224 if (unlikely(!queue->nr_cmds)) {
225 /* We didn't allocate cmds yet, send 0xffff */
229 return cmd - queue->cmds;
232 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
234 return nvme_is_write(cmd->req.cmd) &&
235 cmd->rbytes_done < cmd->req.transfer_len;
238 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
240 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
243 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
245 return !nvme_is_write(cmd->req.cmd) &&
246 cmd->req.transfer_len > 0 &&
247 !cmd->req.cqe->status;
250 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
252 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
256 static inline struct nvmet_tcp_cmd *
257 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
259 struct nvmet_tcp_cmd *cmd;
261 cmd = list_first_entry_or_null(&queue->free_list,
262 struct nvmet_tcp_cmd, entry);
265 list_del_init(&cmd->entry);
267 cmd->rbytes_done = cmd->wbytes_done = 0;
275 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
277 if (unlikely(cmd == &cmd->queue->connect))
280 list_add_tail(&cmd->entry, &cmd->queue->free_list);
283 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
285 return queue->sock->sk->sk_incoming_cpu;
288 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
290 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
293 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
295 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
298 static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
299 void *pdu, size_t len)
301 struct scatterlist sg;
303 sg_init_one(&sg, pdu, len);
304 ahash_request_set_crypt(hash, &sg, pdu + len, len);
305 crypto_ahash_digest(hash);
308 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
309 void *pdu, size_t len)
311 struct nvme_tcp_hdr *hdr = pdu;
315 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
316 pr_err("queue %d: header digest enabled but no header digest\n",
321 recv_digest = *(__le32 *)(pdu + hdr->hlen);
322 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
323 exp_digest = *(__le32 *)(pdu + hdr->hlen);
324 if (recv_digest != exp_digest) {
325 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
326 queue->idx, le32_to_cpu(recv_digest),
327 le32_to_cpu(exp_digest));
334 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
336 struct nvme_tcp_hdr *hdr = pdu;
337 u8 digest_len = nvmet_tcp_hdgst_len(queue);
340 len = le32_to_cpu(hdr->plen) - hdr->hlen -
341 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
343 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
344 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
351 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
354 sgl_free(cmd->req.sg);
359 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
361 struct bio_vec *iov = cmd->iov;
362 struct scatterlist *sg;
363 u32 length, offset, sg_offset;
366 length = cmd->pdu_len;
367 nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
368 offset = cmd->rbytes_done;
369 cmd->sg_idx = offset / PAGE_SIZE;
370 sg_offset = offset % PAGE_SIZE;
371 sg = &cmd->req.sg[cmd->sg_idx];
374 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
376 bvec_set_page(iov, sg_page(sg), iov_len,
377 sg->offset + sg_offset);
385 iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
386 nr_pages, cmd->pdu_len);
389 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
391 queue->rcv_state = NVMET_TCP_RECV_ERR;
392 if (queue->nvme_sq.ctrl)
393 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
395 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
398 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
400 queue->rcv_state = NVMET_TCP_RECV_ERR;
401 if (status == -EPIPE || status == -ECONNRESET)
402 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
404 nvmet_tcp_fatal_error(queue);
407 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
409 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
410 u32 len = le32_to_cpu(sgl->length);
415 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
416 NVME_SGL_FMT_OFFSET)) {
417 if (!nvme_is_write(cmd->req.cmd))
418 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
420 if (len > cmd->req.port->inline_data_size)
421 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
424 cmd->req.transfer_len += len;
426 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
428 return NVME_SC_INTERNAL;
429 cmd->cur_sg = cmd->req.sg;
431 if (nvmet_tcp_has_data_in(cmd)) {
432 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
433 sizeof(*cmd->iov), GFP_KERNEL);
440 nvmet_tcp_free_cmd_buffers(cmd);
441 return NVME_SC_INTERNAL;
444 static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
445 struct nvmet_tcp_cmd *cmd)
447 ahash_request_set_crypt(hash, cmd->req.sg,
448 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
449 crypto_ahash_digest(hash);
452 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
454 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
455 struct nvmet_tcp_queue *queue = cmd->queue;
456 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
457 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
460 cmd->state = NVMET_TCP_SEND_DATA_PDU;
462 pdu->hdr.type = nvme_tcp_c2h_data;
463 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
464 NVME_TCP_F_DATA_SUCCESS : 0);
465 pdu->hdr.hlen = sizeof(*pdu);
466 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
468 cpu_to_le32(pdu->hdr.hlen + hdgst +
469 cmd->req.transfer_len + ddgst);
470 pdu->command_id = cmd->req.cqe->command_id;
471 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
472 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
474 if (queue->data_digest) {
475 pdu->hdr.flags |= NVME_TCP_F_DDGST;
476 nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
479 if (cmd->queue->hdr_digest) {
480 pdu->hdr.flags |= NVME_TCP_F_HDGST;
481 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
485 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
487 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
488 struct nvmet_tcp_queue *queue = cmd->queue;
489 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
492 cmd->state = NVMET_TCP_SEND_R2T;
494 pdu->hdr.type = nvme_tcp_r2t;
496 pdu->hdr.hlen = sizeof(*pdu);
498 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
500 pdu->command_id = cmd->req.cmd->common.command_id;
501 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
502 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
503 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
504 if (cmd->queue->hdr_digest) {
505 pdu->hdr.flags |= NVME_TCP_F_HDGST;
506 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
510 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
512 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
513 struct nvmet_tcp_queue *queue = cmd->queue;
514 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
517 cmd->state = NVMET_TCP_SEND_RESPONSE;
519 pdu->hdr.type = nvme_tcp_rsp;
521 pdu->hdr.hlen = sizeof(*pdu);
523 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
524 if (cmd->queue->hdr_digest) {
525 pdu->hdr.flags |= NVME_TCP_F_HDGST;
526 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
530 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
532 struct llist_node *node;
533 struct nvmet_tcp_cmd *cmd;
535 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
536 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
537 list_add(&cmd->entry, &queue->resp_send_list);
538 queue->send_list_len++;
542 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
544 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
545 struct nvmet_tcp_cmd, entry);
546 if (!queue->snd_cmd) {
547 nvmet_tcp_process_resp_list(queue);
549 list_first_entry_or_null(&queue->resp_send_list,
550 struct nvmet_tcp_cmd, entry);
551 if (unlikely(!queue->snd_cmd))
555 list_del_init(&queue->snd_cmd->entry);
556 queue->send_list_len--;
558 if (nvmet_tcp_need_data_out(queue->snd_cmd))
559 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
560 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
561 nvmet_setup_r2t_pdu(queue->snd_cmd);
563 nvmet_setup_response_pdu(queue->snd_cmd);
565 return queue->snd_cmd;
568 static void nvmet_tcp_queue_response(struct nvmet_req *req)
570 struct nvmet_tcp_cmd *cmd =
571 container_of(req, struct nvmet_tcp_cmd, req);
572 struct nvmet_tcp_queue *queue = cmd->queue;
573 struct nvme_sgl_desc *sgl;
576 if (unlikely(cmd == queue->cmd)) {
577 sgl = &cmd->req.cmd->common.dptr.sgl;
578 len = le32_to_cpu(sgl->length);
581 * Wait for inline data before processing the response.
582 * Avoid using helpers, this might happen before
583 * nvmet_req_init is completed.
585 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
586 len && len <= cmd->req.port->inline_data_size &&
587 nvme_is_write(cmd->req.cmd))
591 llist_add(&cmd->lentry, &queue->resp_list);
592 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
595 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
597 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
598 nvmet_tcp_queue_response(&cmd->req);
600 cmd->req.execute(&cmd->req);
603 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
605 struct msghdr msg = {
606 .msg_flags = MSG_DONTWAIT | MSG_MORE | MSG_SPLICE_PAGES,
609 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
610 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
613 bvec_set_virt(&bvec, (void *)cmd->data_pdu + cmd->offset, left);
614 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
615 ret = sock_sendmsg(cmd->queue->sock, &msg);
625 cmd->state = NVMET_TCP_SEND_DATA;
630 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
632 struct nvmet_tcp_queue *queue = cmd->queue;
635 while (cmd->cur_sg) {
636 struct msghdr msg = {
637 .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
639 struct page *page = sg_page(cmd->cur_sg);
641 u32 left = cmd->cur_sg->length - cmd->offset;
643 if ((!last_in_batch && cmd->queue->send_list_len) ||
644 cmd->wbytes_done + left < cmd->req.transfer_len ||
645 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
646 msg.msg_flags |= MSG_MORE;
648 bvec_set_page(&bvec, page, left, cmd->offset);
649 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
650 ret = sock_sendmsg(cmd->queue->sock, &msg);
655 cmd->wbytes_done += ret;
658 if (cmd->offset == cmd->cur_sg->length) {
659 cmd->cur_sg = sg_next(cmd->cur_sg);
664 if (queue->data_digest) {
665 cmd->state = NVMET_TCP_SEND_DDGST;
668 if (queue->nvme_sq.sqhd_disabled) {
669 cmd->queue->snd_cmd = NULL;
670 nvmet_tcp_put_cmd(cmd);
672 nvmet_setup_response_pdu(cmd);
676 if (queue->nvme_sq.sqhd_disabled)
677 nvmet_tcp_free_cmd_buffers(cmd);
683 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
686 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
688 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
689 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
692 if (!last_in_batch && cmd->queue->send_list_len)
693 msg.msg_flags |= MSG_MORE;
695 msg.msg_flags |= MSG_EOR;
697 bvec_set_virt(&bvec, (void *)cmd->rsp_pdu + cmd->offset, left);
698 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
699 ret = sock_sendmsg(cmd->queue->sock, &msg);
708 nvmet_tcp_free_cmd_buffers(cmd);
709 cmd->queue->snd_cmd = NULL;
710 nvmet_tcp_put_cmd(cmd);
714 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
716 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES, };
718 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
719 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
722 if (!last_in_batch && cmd->queue->send_list_len)
723 msg.msg_flags |= MSG_MORE;
725 msg.msg_flags |= MSG_EOR;
727 bvec_set_virt(&bvec, (void *)cmd->r2t_pdu + cmd->offset, left);
728 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, left);
729 ret = sock_sendmsg(cmd->queue->sock, &msg);
738 cmd->queue->snd_cmd = NULL;
742 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
744 struct nvmet_tcp_queue *queue = cmd->queue;
745 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
746 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
748 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
753 if (!last_in_batch && cmd->queue->send_list_len)
754 msg.msg_flags |= MSG_MORE;
756 msg.msg_flags |= MSG_EOR;
758 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
759 if (unlikely(ret <= 0))
768 if (queue->nvme_sq.sqhd_disabled) {
769 cmd->queue->snd_cmd = NULL;
770 nvmet_tcp_put_cmd(cmd);
772 nvmet_setup_response_pdu(cmd);
777 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
780 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
783 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
784 cmd = nvmet_tcp_fetch_cmd(queue);
789 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
790 ret = nvmet_try_send_data_pdu(cmd);
795 if (cmd->state == NVMET_TCP_SEND_DATA) {
796 ret = nvmet_try_send_data(cmd, last_in_batch);
801 if (cmd->state == NVMET_TCP_SEND_DDGST) {
802 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
807 if (cmd->state == NVMET_TCP_SEND_R2T) {
808 ret = nvmet_try_send_r2t(cmd, last_in_batch);
813 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
814 ret = nvmet_try_send_response(cmd, last_in_batch);
826 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
827 int budget, int *sends)
831 for (i = 0; i < budget; i++) {
832 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
833 if (unlikely(ret < 0)) {
834 nvmet_tcp_socket_error(queue, ret);
836 } else if (ret == 0) {
845 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
848 queue->left = sizeof(struct nvme_tcp_hdr);
850 queue->rcv_state = NVMET_TCP_RECV_PDU;
853 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
855 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
857 ahash_request_free(queue->rcv_hash);
858 ahash_request_free(queue->snd_hash);
859 crypto_free_ahash(tfm);
862 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
864 struct crypto_ahash *tfm;
866 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
870 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
871 if (!queue->snd_hash)
873 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
875 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
876 if (!queue->rcv_hash)
878 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
882 ahash_request_free(queue->snd_hash);
884 crypto_free_ahash(tfm);
889 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
891 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
892 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
893 struct msghdr msg = {};
897 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
898 pr_err("bad nvme-tcp pdu length (%d)\n",
899 le32_to_cpu(icreq->hdr.plen));
900 nvmet_tcp_fatal_error(queue);
903 if (icreq->pfv != NVME_TCP_PFV_1_0) {
904 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
908 if (icreq->hpda != 0) {
909 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
914 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
915 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
916 if (queue->hdr_digest || queue->data_digest) {
917 ret = nvmet_tcp_alloc_crypto(queue);
922 memset(icresp, 0, sizeof(*icresp));
923 icresp->hdr.type = nvme_tcp_icresp;
924 icresp->hdr.hlen = sizeof(*icresp);
926 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
927 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
928 icresp->maxdata = cpu_to_le32(NVMET_TCP_MAXH2CDATA);
930 if (queue->hdr_digest)
931 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
932 if (queue->data_digest)
933 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
935 iov.iov_base = icresp;
936 iov.iov_len = sizeof(*icresp);
937 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
939 queue->state = NVMET_TCP_Q_FAILED;
940 return ret; /* queue removal will cleanup */
943 queue->state = NVMET_TCP_Q_LIVE;
944 nvmet_prepare_receive_pdu(queue);
948 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
949 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
951 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
955 * This command has not been processed yet, hence we are trying to
956 * figure out if there is still pending data left to receive. If
957 * we don't, we can simply prepare for the next pdu and bail out,
958 * otherwise we will need to prepare a buffer and receive the
959 * stale data before continuing forward.
961 if (!nvme_is_write(cmd->req.cmd) || !data_len ||
962 data_len > cmd->req.port->inline_data_size) {
963 nvmet_prepare_receive_pdu(queue);
967 ret = nvmet_tcp_map_data(cmd);
969 pr_err("queue %d: failed to map data\n", queue->idx);
970 nvmet_tcp_fatal_error(queue);
974 queue->rcv_state = NVMET_TCP_RECV_DATA;
975 nvmet_tcp_build_pdu_iovec(cmd);
976 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
979 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
981 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
982 struct nvmet_tcp_cmd *cmd;
983 unsigned int exp_data_len;
985 if (likely(queue->nr_cmds)) {
986 if (unlikely(data->ttag >= queue->nr_cmds)) {
987 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
988 queue->idx, data->ttag, queue->nr_cmds);
991 cmd = &queue->cmds[data->ttag];
993 cmd = &queue->connect;
996 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
997 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
998 data->ttag, le32_to_cpu(data->data_offset),
1003 exp_data_len = le32_to_cpu(data->hdr.plen) -
1004 nvmet_tcp_hdgst_len(queue) -
1005 nvmet_tcp_ddgst_len(queue) -
1008 cmd->pdu_len = le32_to_cpu(data->data_length);
1009 if (unlikely(cmd->pdu_len != exp_data_len ||
1010 cmd->pdu_len == 0 ||
1011 cmd->pdu_len > NVMET_TCP_MAXH2CDATA)) {
1012 pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
1016 nvmet_tcp_build_pdu_iovec(cmd);
1018 queue->rcv_state = NVMET_TCP_RECV_DATA;
1023 /* FIXME: use proper transport errors */
1024 nvmet_tcp_fatal_error(queue);
1028 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
1030 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1031 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
1032 struct nvmet_req *req;
1035 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1036 if (hdr->type != nvme_tcp_icreq) {
1037 pr_err("unexpected pdu type (%d) before icreq\n",
1039 nvmet_tcp_fatal_error(queue);
1042 return nvmet_tcp_handle_icreq(queue);
1045 if (unlikely(hdr->type == nvme_tcp_icreq)) {
1046 pr_err("queue %d: received icreq pdu in state %d\n",
1047 queue->idx, queue->state);
1048 nvmet_tcp_fatal_error(queue);
1052 if (hdr->type == nvme_tcp_h2c_data) {
1053 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
1059 queue->cmd = nvmet_tcp_get_cmd(queue);
1060 if (unlikely(!queue->cmd)) {
1061 /* This should never happen */
1062 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
1063 queue->idx, queue->nr_cmds, queue->send_list_len,
1064 nvme_cmd->common.opcode);
1065 nvmet_tcp_fatal_error(queue);
1069 req = &queue->cmd->req;
1070 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
1072 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
1073 &queue->nvme_sq, &nvmet_tcp_ops))) {
1074 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
1075 req->cmd, req->cmd->common.command_id,
1076 req->cmd->common.opcode,
1077 le32_to_cpu(req->cmd->common.dptr.sgl.length));
1079 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1083 ret = nvmet_tcp_map_data(queue->cmd);
1084 if (unlikely(ret)) {
1085 pr_err("queue %d: failed to map data\n", queue->idx);
1086 if (nvmet_tcp_has_inline_data(queue->cmd))
1087 nvmet_tcp_fatal_error(queue);
1089 nvmet_req_complete(req, ret);
1094 if (nvmet_tcp_need_data_in(queue->cmd)) {
1095 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1096 queue->rcv_state = NVMET_TCP_RECV_DATA;
1097 nvmet_tcp_build_pdu_iovec(queue->cmd);
1101 nvmet_tcp_queue_response(&queue->cmd->req);
1105 queue->cmd->req.execute(&queue->cmd->req);
1107 nvmet_prepare_receive_pdu(queue);
1111 static const u8 nvme_tcp_pdu_sizes[] = {
1112 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
1113 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
1114 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
1117 static inline u8 nvmet_tcp_pdu_size(u8 type)
1121 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1122 nvme_tcp_pdu_sizes[idx]) ?
1123 nvme_tcp_pdu_sizes[idx] : 0;
1126 static inline bool nvmet_tcp_pdu_valid(u8 type)
1129 case nvme_tcp_icreq:
1131 case nvme_tcp_h2c_data:
1139 static int nvmet_tcp_tls_record_ok(struct nvmet_tcp_queue *queue,
1140 struct msghdr *msg, char *cbuf)
1142 struct cmsghdr *cmsg = (struct cmsghdr *)cbuf;
1143 u8 ctype, level, description;
1146 ctype = tls_get_record_type(queue->sock->sk, cmsg);
1150 case TLS_RECORD_TYPE_DATA:
1152 case TLS_RECORD_TYPE_ALERT:
1153 tls_alert_recv(queue->sock->sk, msg, &level, &description);
1154 if (level == TLS_ALERT_LEVEL_FATAL) {
1155 pr_err("queue %d: TLS Alert desc %u\n",
1156 queue->idx, description);
1159 pr_warn("queue %d: TLS Alert desc %u\n",
1160 queue->idx, description);
1165 /* discard this record type */
1166 pr_err("queue %d: TLS record %d unhandled\n",
1174 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1176 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1179 char cbuf[CMSG_LEN(sizeof(char))] = {};
1180 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1183 iov.iov_base = (void *)&queue->pdu + queue->offset;
1184 iov.iov_len = queue->left;
1185 if (queue->tls_pskid) {
1186 msg.msg_control = cbuf;
1187 msg.msg_controllen = sizeof(cbuf);
1189 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1190 iov.iov_len, msg.msg_flags);
1191 if (unlikely(len < 0))
1193 if (queue->tls_pskid) {
1194 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1199 queue->offset += len;
1204 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1205 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1207 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1208 pr_err("unexpected pdu type %d\n", hdr->type);
1209 nvmet_tcp_fatal_error(queue);
1213 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1214 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1218 queue->left = hdr->hlen - queue->offset + hdgst;
1222 if (queue->hdr_digest &&
1223 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1224 nvmet_tcp_fatal_error(queue); /* fatal */
1228 if (queue->data_digest &&
1229 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1230 nvmet_tcp_fatal_error(queue); /* fatal */
1234 return nvmet_tcp_done_recv_pdu(queue);
1237 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1239 struct nvmet_tcp_queue *queue = cmd->queue;
1241 nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
1243 queue->left = NVME_TCP_DIGEST_LENGTH;
1244 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1247 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1249 struct nvmet_tcp_cmd *cmd = queue->cmd;
1252 while (msg_data_left(&cmd->recv_msg)) {
1253 len = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1254 cmd->recv_msg.msg_flags);
1257 if (queue->tls_pskid) {
1258 ret = nvmet_tcp_tls_record_ok(cmd->queue,
1259 &cmd->recv_msg, cmd->recv_cbuf);
1264 cmd->pdu_recv += len;
1265 cmd->rbytes_done += len;
1268 if (queue->data_digest) {
1269 nvmet_tcp_prep_recv_ddgst(cmd);
1273 if (cmd->rbytes_done == cmd->req.transfer_len)
1274 nvmet_tcp_execute_request(cmd);
1276 nvmet_prepare_receive_pdu(queue);
1280 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1282 struct nvmet_tcp_cmd *cmd = queue->cmd;
1284 char cbuf[CMSG_LEN(sizeof(char))] = {};
1285 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1287 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1288 .iov_len = queue->left
1291 if (queue->tls_pskid) {
1292 msg.msg_control = cbuf;
1293 msg.msg_controllen = sizeof(cbuf);
1295 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1296 iov.iov_len, msg.msg_flags);
1297 if (unlikely(len < 0))
1299 if (queue->tls_pskid) {
1300 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1305 queue->offset += len;
1310 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1311 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1312 queue->idx, cmd->req.cmd->common.command_id,
1313 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1314 le32_to_cpu(cmd->exp_ddgst));
1315 nvmet_req_uninit(&cmd->req);
1316 nvmet_tcp_free_cmd_buffers(cmd);
1317 nvmet_tcp_fatal_error(queue);
1322 if (cmd->rbytes_done == cmd->req.transfer_len)
1323 nvmet_tcp_execute_request(cmd);
1327 nvmet_prepare_receive_pdu(queue);
1331 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1335 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1338 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1339 result = nvmet_tcp_try_recv_pdu(queue);
1344 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1345 result = nvmet_tcp_try_recv_data(queue);
1350 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1351 result = nvmet_tcp_try_recv_ddgst(queue);
1358 if (result == -EAGAIN)
1365 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1366 int budget, int *recvs)
1370 for (i = 0; i < budget; i++) {
1371 ret = nvmet_tcp_try_recv_one(queue);
1372 if (unlikely(ret < 0)) {
1373 nvmet_tcp_socket_error(queue, ret);
1375 } else if (ret == 0) {
1384 static void nvmet_tcp_release_queue(struct kref *kref)
1386 struct nvmet_tcp_queue *queue =
1387 container_of(kref, struct nvmet_tcp_queue, kref);
1389 WARN_ON(queue->state != NVMET_TCP_Q_DISCONNECTING);
1390 queue_work(nvmet_wq, &queue->release_work);
1393 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1395 spin_lock_bh(&queue->state_lock);
1396 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1397 /* Socket closed during handshake */
1398 tls_handshake_cancel(queue->sock->sk);
1400 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1401 queue->state = NVMET_TCP_Q_DISCONNECTING;
1402 kref_put(&queue->kref, nvmet_tcp_release_queue);
1404 spin_unlock_bh(&queue->state_lock);
1407 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1409 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1412 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1415 if (!idle_poll_period_usecs)
1419 nvmet_tcp_arm_queue_deadline(queue);
1421 return !time_after(jiffies, queue->poll_end);
1424 static void nvmet_tcp_io_work(struct work_struct *w)
1426 struct nvmet_tcp_queue *queue =
1427 container_of(w, struct nvmet_tcp_queue, io_work);
1434 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1440 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1446 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1449 * Requeue the worker if idle deadline period is in progress or any
1450 * ops activity was recorded during the do-while loop above.
1452 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1453 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1456 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1457 struct nvmet_tcp_cmd *c)
1459 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1462 c->req.port = queue->port->nport;
1464 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1465 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1468 c->req.cmd = &c->cmd_pdu->cmd;
1470 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1471 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1474 c->req.cqe = &c->rsp_pdu->cqe;
1476 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1477 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1481 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1482 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1486 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1487 c->recv_msg.msg_control = c->recv_cbuf;
1488 c->recv_msg.msg_controllen = sizeof(c->recv_cbuf);
1490 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1492 list_add_tail(&c->entry, &queue->free_list);
1496 page_frag_free(c->data_pdu);
1498 page_frag_free(c->rsp_pdu);
1500 page_frag_free(c->cmd_pdu);
1504 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1506 page_frag_free(c->r2t_pdu);
1507 page_frag_free(c->data_pdu);
1508 page_frag_free(c->rsp_pdu);
1509 page_frag_free(c->cmd_pdu);
1512 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1514 struct nvmet_tcp_cmd *cmds;
1515 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1517 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1521 for (i = 0; i < nr_cmds; i++) {
1522 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1532 nvmet_tcp_free_cmd(cmds + i);
1538 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1540 struct nvmet_tcp_cmd *cmds = queue->cmds;
1543 for (i = 0; i < queue->nr_cmds; i++)
1544 nvmet_tcp_free_cmd(cmds + i);
1546 nvmet_tcp_free_cmd(&queue->connect);
1550 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1552 struct socket *sock = queue->sock;
1554 write_lock_bh(&sock->sk->sk_callback_lock);
1555 sock->sk->sk_data_ready = queue->data_ready;
1556 sock->sk->sk_state_change = queue->state_change;
1557 sock->sk->sk_write_space = queue->write_space;
1558 sock->sk->sk_user_data = NULL;
1559 write_unlock_bh(&sock->sk->sk_callback_lock);
1562 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1564 struct nvmet_tcp_cmd *cmd = queue->cmds;
1567 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1568 if (nvmet_tcp_need_data_in(cmd))
1569 nvmet_req_uninit(&cmd->req);
1572 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1573 /* failed in connect */
1574 nvmet_req_uninit(&queue->connect.req);
1578 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
1580 struct nvmet_tcp_cmd *cmd = queue->cmds;
1583 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1584 if (nvmet_tcp_need_data_in(cmd))
1585 nvmet_tcp_free_cmd_buffers(cmd);
1588 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
1589 nvmet_tcp_free_cmd_buffers(&queue->connect);
1592 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1595 struct nvmet_tcp_queue *queue =
1596 container_of(w, struct nvmet_tcp_queue, release_work);
1598 mutex_lock(&nvmet_tcp_queue_mutex);
1599 list_del_init(&queue->queue_list);
1600 mutex_unlock(&nvmet_tcp_queue_mutex);
1602 nvmet_tcp_restore_socket_callbacks(queue);
1603 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
1604 cancel_work_sync(&queue->io_work);
1605 /* stop accepting incoming data */
1606 queue->rcv_state = NVMET_TCP_RECV_ERR;
1608 nvmet_tcp_uninit_data_in_cmds(queue);
1609 nvmet_sq_destroy(&queue->nvme_sq);
1610 cancel_work_sync(&queue->io_work);
1611 nvmet_tcp_free_cmd_data_in_buffers(queue);
1612 /* ->sock will be released by fput() */
1613 fput(queue->sock->file);
1614 nvmet_tcp_free_cmds(queue);
1615 if (queue->hdr_digest || queue->data_digest)
1616 nvmet_tcp_free_crypto(queue);
1617 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1618 page = virt_to_head_page(queue->pf_cache.va);
1619 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1623 static void nvmet_tcp_data_ready(struct sock *sk)
1625 struct nvmet_tcp_queue *queue;
1627 trace_sk_data_ready(sk);
1629 read_lock_bh(&sk->sk_callback_lock);
1630 queue = sk->sk_user_data;
1631 if (likely(queue)) {
1632 if (queue->data_ready)
1633 queue->data_ready(sk);
1634 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)
1635 queue_work_on(queue_cpu(queue), nvmet_tcp_wq,
1638 read_unlock_bh(&sk->sk_callback_lock);
1641 static void nvmet_tcp_write_space(struct sock *sk)
1643 struct nvmet_tcp_queue *queue;
1645 read_lock_bh(&sk->sk_callback_lock);
1646 queue = sk->sk_user_data;
1647 if (unlikely(!queue))
1650 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1651 queue->write_space(sk);
1655 if (sk_stream_is_writeable(sk)) {
1656 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1657 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1660 read_unlock_bh(&sk->sk_callback_lock);
1663 static void nvmet_tcp_state_change(struct sock *sk)
1665 struct nvmet_tcp_queue *queue;
1667 read_lock_bh(&sk->sk_callback_lock);
1668 queue = sk->sk_user_data;
1672 switch (sk->sk_state) {
1677 case TCP_CLOSE_WAIT:
1680 nvmet_tcp_schedule_release_queue(queue);
1683 pr_warn("queue %d unhandled state %d\n",
1684 queue->idx, sk->sk_state);
1687 read_unlock_bh(&sk->sk_callback_lock);
1690 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1692 struct socket *sock = queue->sock;
1693 struct inet_sock *inet = inet_sk(sock->sk);
1696 ret = kernel_getsockname(sock,
1697 (struct sockaddr *)&queue->sockaddr);
1701 ret = kernel_getpeername(sock,
1702 (struct sockaddr *)&queue->sockaddr_peer);
1707 * Cleanup whatever is sitting in the TCP transmit queue on socket
1708 * close. This is done to prevent stale data from being sent should
1709 * the network connection be restored before TCP times out.
1711 sock_no_linger(sock->sk);
1713 if (so_priority > 0)
1714 sock_set_priority(sock->sk, so_priority);
1716 /* Set socket type of service */
1717 if (inet->rcv_tos > 0)
1718 ip_sock_set_tos(sock->sk, inet->rcv_tos);
1721 write_lock_bh(&sock->sk->sk_callback_lock);
1722 if (sock->sk->sk_state != TCP_ESTABLISHED) {
1724 * If the socket is already closing, don't even start
1729 sock->sk->sk_user_data = queue;
1730 queue->data_ready = sock->sk->sk_data_ready;
1731 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1732 queue->state_change = sock->sk->sk_state_change;
1733 sock->sk->sk_state_change = nvmet_tcp_state_change;
1734 queue->write_space = sock->sk->sk_write_space;
1735 sock->sk->sk_write_space = nvmet_tcp_write_space;
1736 if (idle_poll_period_usecs)
1737 nvmet_tcp_arm_queue_deadline(queue);
1738 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1740 write_unlock_bh(&sock->sk->sk_callback_lock);
1745 #ifdef CONFIG_NVME_TARGET_TCP_TLS
1746 static int nvmet_tcp_try_peek_pdu(struct nvmet_tcp_queue *queue)
1748 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1751 .iov_base = (u8 *)&queue->pdu + queue->offset,
1752 .iov_len = sizeof(struct nvme_tcp_hdr),
1754 char cbuf[CMSG_LEN(sizeof(char))] = {};
1755 struct msghdr msg = {
1756 .msg_control = cbuf,
1757 .msg_controllen = sizeof(cbuf),
1758 .msg_flags = MSG_PEEK,
1761 if (nvmet_port_secure_channel_required(queue->port->nport))
1764 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1765 iov.iov_len, msg.msg_flags);
1766 if (unlikely(len < 0)) {
1767 pr_debug("queue %d: peek error %d\n",
1772 ret = nvmet_tcp_tls_record_ok(queue, &msg, cbuf);
1776 if (len < sizeof(struct nvme_tcp_hdr)) {
1777 pr_debug("queue %d: short read, %d bytes missing\n",
1778 queue->idx, (int)iov.iov_len - len);
1781 pr_debug("queue %d: hdr type %d hlen %d plen %d size %d\n",
1782 queue->idx, hdr->type, hdr->hlen, hdr->plen,
1783 (int)sizeof(struct nvme_tcp_icreq_pdu));
1784 if (hdr->type == nvme_tcp_icreq &&
1785 hdr->hlen == sizeof(struct nvme_tcp_icreq_pdu) &&
1786 hdr->plen == cpu_to_le32(sizeof(struct nvme_tcp_icreq_pdu))) {
1787 pr_debug("queue %d: icreq detected\n",
1794 static void nvmet_tcp_tls_handshake_done(void *data, int status,
1795 key_serial_t peerid)
1797 struct nvmet_tcp_queue *queue = data;
1799 pr_debug("queue %d: TLS handshake done, key %x, status %d\n",
1800 queue->idx, peerid, status);
1801 spin_lock_bh(&queue->state_lock);
1802 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
1803 spin_unlock_bh(&queue->state_lock);
1807 queue->tls_pskid = peerid;
1808 queue->state = NVMET_TCP_Q_CONNECTING;
1810 queue->state = NVMET_TCP_Q_FAILED;
1811 spin_unlock_bh(&queue->state_lock);
1813 cancel_delayed_work_sync(&queue->tls_handshake_tmo_work);
1815 nvmet_tcp_schedule_release_queue(queue);
1817 nvmet_tcp_set_queue_sock(queue);
1818 kref_put(&queue->kref, nvmet_tcp_release_queue);
1821 static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w)
1823 struct nvmet_tcp_queue *queue = container_of(to_delayed_work(w),
1824 struct nvmet_tcp_queue, tls_handshake_tmo_work);
1826 pr_warn("queue %d: TLS handshake timeout\n", queue->idx);
1828 * If tls_handshake_cancel() fails we've lost the race with
1829 * nvmet_tcp_tls_handshake_done() */
1830 if (!tls_handshake_cancel(queue->sock->sk))
1832 spin_lock_bh(&queue->state_lock);
1833 if (WARN_ON(queue->state != NVMET_TCP_Q_TLS_HANDSHAKE)) {
1834 spin_unlock_bh(&queue->state_lock);
1837 queue->state = NVMET_TCP_Q_FAILED;
1838 spin_unlock_bh(&queue->state_lock);
1839 nvmet_tcp_schedule_release_queue(queue);
1840 kref_put(&queue->kref, nvmet_tcp_release_queue);
1843 static int nvmet_tcp_tls_handshake(struct nvmet_tcp_queue *queue)
1845 int ret = -EOPNOTSUPP;
1846 struct tls_handshake_args args;
1848 if (queue->state != NVMET_TCP_Q_TLS_HANDSHAKE) {
1849 pr_warn("cannot start TLS in state %d\n", queue->state);
1853 kref_get(&queue->kref);
1854 pr_debug("queue %d: TLS ServerHello\n", queue->idx);
1855 memset(&args, 0, sizeof(args));
1856 args.ta_sock = queue->sock;
1857 args.ta_done = nvmet_tcp_tls_handshake_done;
1858 args.ta_data = queue;
1859 args.ta_keyring = key_serial(queue->port->nport->keyring);
1860 args.ta_timeout_ms = tls_handshake_timeout * 1000;
1862 ret = tls_server_hello_psk(&args, GFP_KERNEL);
1864 kref_put(&queue->kref, nvmet_tcp_release_queue);
1865 pr_err("failed to start TLS, err=%d\n", ret);
1867 queue_delayed_work(nvmet_wq, &queue->tls_handshake_tmo_work,
1868 tls_handshake_timeout * HZ);
1873 static void nvmet_tcp_tls_handshake_timeout(struct work_struct *w) {}
1876 static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1877 struct socket *newsock)
1879 struct nvmet_tcp_queue *queue;
1880 struct file *sock_file = NULL;
1883 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1889 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1890 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1891 kref_init(&queue->kref);
1892 queue->sock = newsock;
1895 spin_lock_init(&queue->state_lock);
1896 if (queue->port->nport->disc_addr.tsas.tcp.sectype ==
1897 NVMF_TCP_SECTYPE_TLS13)
1898 queue->state = NVMET_TCP_Q_TLS_HANDSHAKE;
1900 queue->state = NVMET_TCP_Q_CONNECTING;
1901 INIT_LIST_HEAD(&queue->free_list);
1902 init_llist_head(&queue->resp_list);
1903 INIT_LIST_HEAD(&queue->resp_send_list);
1905 sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL);
1906 if (IS_ERR(sock_file)) {
1907 ret = PTR_ERR(sock_file);
1908 goto out_free_queue;
1911 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
1912 if (queue->idx < 0) {
1917 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1919 goto out_ida_remove;
1921 ret = nvmet_sq_init(&queue->nvme_sq);
1923 goto out_free_connect;
1925 nvmet_prepare_receive_pdu(queue);
1927 mutex_lock(&nvmet_tcp_queue_mutex);
1928 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1929 mutex_unlock(&nvmet_tcp_queue_mutex);
1931 INIT_DELAYED_WORK(&queue->tls_handshake_tmo_work,
1932 nvmet_tcp_tls_handshake_timeout);
1933 #ifdef CONFIG_NVME_TARGET_TCP_TLS
1934 if (queue->state == NVMET_TCP_Q_TLS_HANDSHAKE) {
1935 struct sock *sk = queue->sock->sk;
1937 /* Restore the default callbacks before starting upcall */
1938 read_lock_bh(&sk->sk_callback_lock);
1939 sk->sk_user_data = NULL;
1940 sk->sk_data_ready = port->data_ready;
1941 read_unlock_bh(&sk->sk_callback_lock);
1942 if (!nvmet_tcp_try_peek_pdu(queue)) {
1943 if (!nvmet_tcp_tls_handshake(queue))
1945 /* TLS handshake failed, terminate the connection */
1946 goto out_destroy_sq;
1948 /* Not a TLS connection, continue with normal processing */
1949 queue->state = NVMET_TCP_Q_CONNECTING;
1953 ret = nvmet_tcp_set_queue_sock(queue);
1955 goto out_destroy_sq;
1959 mutex_lock(&nvmet_tcp_queue_mutex);
1960 list_del_init(&queue->queue_list);
1961 mutex_unlock(&nvmet_tcp_queue_mutex);
1962 nvmet_sq_destroy(&queue->nvme_sq);
1964 nvmet_tcp_free_cmd(&queue->connect);
1966 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1968 fput(queue->sock->file);
1972 pr_err("failed to allocate queue, error %d\n", ret);
1974 sock_release(newsock);
1977 static void nvmet_tcp_accept_work(struct work_struct *w)
1979 struct nvmet_tcp_port *port =
1980 container_of(w, struct nvmet_tcp_port, accept_work);
1981 struct socket *newsock;
1985 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1988 pr_warn("failed to accept err=%d\n", ret);
1991 nvmet_tcp_alloc_queue(port, newsock);
1995 static void nvmet_tcp_listen_data_ready(struct sock *sk)
1997 struct nvmet_tcp_port *port;
1999 trace_sk_data_ready(sk);
2001 read_lock_bh(&sk->sk_callback_lock);
2002 port = sk->sk_user_data;
2006 if (sk->sk_state == TCP_LISTEN)
2007 queue_work(nvmet_wq, &port->accept_work);
2009 read_unlock_bh(&sk->sk_callback_lock);
2012 static int nvmet_tcp_add_port(struct nvmet_port *nport)
2014 struct nvmet_tcp_port *port;
2015 __kernel_sa_family_t af;
2018 port = kzalloc(sizeof(*port), GFP_KERNEL);
2022 switch (nport->disc_addr.adrfam) {
2023 case NVMF_ADDR_FAMILY_IP4:
2026 case NVMF_ADDR_FAMILY_IP6:
2030 pr_err("address family %d not supported\n",
2031 nport->disc_addr.adrfam);
2036 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
2037 nport->disc_addr.trsvcid, &port->addr);
2039 pr_err("malformed ip/port passed: %s:%s\n",
2040 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
2044 port->nport = nport;
2045 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
2046 if (port->nport->inline_data_size < 0)
2047 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
2049 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
2050 IPPROTO_TCP, &port->sock);
2052 pr_err("failed to create a socket\n");
2056 port->sock->sk->sk_user_data = port;
2057 port->data_ready = port->sock->sk->sk_data_ready;
2058 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
2059 sock_set_reuseaddr(port->sock->sk);
2060 tcp_sock_set_nodelay(port->sock->sk);
2061 if (so_priority > 0)
2062 sock_set_priority(port->sock->sk, so_priority);
2064 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
2065 sizeof(port->addr));
2067 pr_err("failed to bind port socket %d\n", ret);
2071 ret = kernel_listen(port->sock, NVMET_TCP_BACKLOG);
2073 pr_err("failed to listen %d on port sock\n", ret);
2078 pr_info("enabling port %d (%pISpc)\n",
2079 le16_to_cpu(nport->disc_addr.portid), &port->addr);
2084 sock_release(port->sock);
2090 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
2092 struct nvmet_tcp_queue *queue;
2094 mutex_lock(&nvmet_tcp_queue_mutex);
2095 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
2096 if (queue->port == port)
2097 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
2098 mutex_unlock(&nvmet_tcp_queue_mutex);
2101 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
2103 struct nvmet_tcp_port *port = nport->priv;
2105 write_lock_bh(&port->sock->sk->sk_callback_lock);
2106 port->sock->sk->sk_data_ready = port->data_ready;
2107 port->sock->sk->sk_user_data = NULL;
2108 write_unlock_bh(&port->sock->sk->sk_callback_lock);
2109 cancel_work_sync(&port->accept_work);
2111 * Destroy the remaining queues, which are not belong to any
2114 nvmet_tcp_destroy_port_queues(port);
2116 sock_release(port->sock);
2120 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
2122 struct nvmet_tcp_queue *queue;
2124 mutex_lock(&nvmet_tcp_queue_mutex);
2125 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
2126 if (queue->nvme_sq.ctrl == ctrl)
2127 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
2128 mutex_unlock(&nvmet_tcp_queue_mutex);
2131 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
2133 struct nvmet_tcp_queue *queue =
2134 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
2137 struct nvmet_tcp_queue *q;
2140 /* Check for pending controller teardown */
2141 mutex_lock(&nvmet_tcp_queue_mutex);
2142 list_for_each_entry(q, &nvmet_tcp_queue_list, queue_list) {
2143 if (q->nvme_sq.ctrl == sq->ctrl &&
2144 q->state == NVMET_TCP_Q_DISCONNECTING)
2147 mutex_unlock(&nvmet_tcp_queue_mutex);
2148 if (pending > NVMET_TCP_BACKLOG)
2149 return NVME_SC_CONNECT_CTRL_BUSY;
2152 queue->nr_cmds = sq->size * 2;
2153 if (nvmet_tcp_alloc_cmds(queue))
2154 return NVME_SC_INTERNAL;
2158 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
2159 struct nvmet_port *nport, char *traddr)
2161 struct nvmet_tcp_port *port = nport->priv;
2163 if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
2164 struct nvmet_tcp_cmd *cmd =
2165 container_of(req, struct nvmet_tcp_cmd, req);
2166 struct nvmet_tcp_queue *queue = cmd->queue;
2168 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
2170 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
2174 static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
2175 .owner = THIS_MODULE,
2176 .type = NVMF_TRTYPE_TCP,
2178 .add_port = nvmet_tcp_add_port,
2179 .remove_port = nvmet_tcp_remove_port,
2180 .queue_response = nvmet_tcp_queue_response,
2181 .delete_ctrl = nvmet_tcp_delete_ctrl,
2182 .install_queue = nvmet_tcp_install_queue,
2183 .disc_traddr = nvmet_tcp_disc_port_addr,
2186 static int __init nvmet_tcp_init(void)
2190 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
2191 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2195 ret = nvmet_register_transport(&nvmet_tcp_ops);
2201 destroy_workqueue(nvmet_tcp_wq);
2205 static void __exit nvmet_tcp_exit(void)
2207 struct nvmet_tcp_queue *queue;
2209 nvmet_unregister_transport(&nvmet_tcp_ops);
2211 flush_workqueue(nvmet_wq);
2212 mutex_lock(&nvmet_tcp_queue_mutex);
2213 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
2214 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
2215 mutex_unlock(&nvmet_tcp_queue_mutex);
2216 flush_workqueue(nvmet_wq);
2218 destroy_workqueue(nvmet_tcp_wq);
2219 ida_destroy(&nvmet_tcp_queue_ida);
2222 module_init(nvmet_tcp_init);
2223 module_exit(nvmet_tcp_exit);
2225 MODULE_DESCRIPTION("NVMe target TCP transport driver");
2226 MODULE_LICENSE("GPL v2");
2227 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */