1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP target.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
14 #include <linux/inet.h>
15 #include <linux/llist.h>
16 #include <crypto/hash.h>
20 #define NVMET_TCP_DEF_INLINE_DATA_SIZE (4 * PAGE_SIZE)
22 /* Define the socket priority to use for connections were it is desirable
23 * that the NIC consider performing optimized packet processing or filtering.
24 * A non-zero value being sufficient to indicate general consideration of any
25 * possible optimization. Making it a module param allows for alternative
26 * values that may be unique for some NIC implementations.
28 static int so_priority;
29 module_param(so_priority, int, 0644);
30 MODULE_PARM_DESC(so_priority, "nvmet tcp socket optimize priority");
32 /* Define a time period (in usecs) that io_work() shall sample an activated
33 * queue before determining it to be idle. This optional module behavior
34 * can enable NIC solutions that support socket optimized packet processing
35 * using advanced interrupt moderation techniques.
37 static int idle_poll_period_usecs;
38 module_param(idle_poll_period_usecs, int, 0644);
39 MODULE_PARM_DESC(idle_poll_period_usecs,
40 "nvmet tcp io_work poll till idle time period in usecs");
42 #define NVMET_TCP_RECV_BUDGET 8
43 #define NVMET_TCP_SEND_BUDGET 8
44 #define NVMET_TCP_IO_WORK_BUDGET 64
46 enum nvmet_tcp_send_state {
47 NVMET_TCP_SEND_DATA_PDU,
51 NVMET_TCP_SEND_RESPONSE
54 enum nvmet_tcp_recv_state {
62 NVMET_TCP_F_INIT_FAILED = (1 << 0),
65 struct nvmet_tcp_cmd {
66 struct nvmet_tcp_queue *queue;
69 struct nvme_tcp_cmd_pdu *cmd_pdu;
70 struct nvme_tcp_rsp_pdu *rsp_pdu;
71 struct nvme_tcp_data_pdu *data_pdu;
72 struct nvme_tcp_r2t_pdu *r2t_pdu;
80 struct msghdr recv_msg;
84 struct list_head entry;
85 struct llist_node lentry;
89 struct scatterlist *cur_sg;
90 enum nvmet_tcp_send_state state;
96 enum nvmet_tcp_queue_state {
97 NVMET_TCP_Q_CONNECTING,
99 NVMET_TCP_Q_DISCONNECTING,
102 struct nvmet_tcp_queue {
104 struct nvmet_tcp_port *port;
105 struct work_struct io_work;
106 struct nvmet_cq nvme_cq;
107 struct nvmet_sq nvme_sq;
110 struct nvmet_tcp_cmd *cmds;
111 unsigned int nr_cmds;
112 struct list_head free_list;
113 struct llist_head resp_list;
114 struct list_head resp_send_list;
116 struct nvmet_tcp_cmd *snd_cmd;
121 enum nvmet_tcp_recv_state rcv_state;
122 struct nvmet_tcp_cmd *cmd;
123 union nvme_tcp_pdu pdu;
128 struct ahash_request *snd_hash;
129 struct ahash_request *rcv_hash;
131 unsigned long poll_end;
133 spinlock_t state_lock;
134 enum nvmet_tcp_queue_state state;
136 struct sockaddr_storage sockaddr;
137 struct sockaddr_storage sockaddr_peer;
138 struct work_struct release_work;
141 struct list_head queue_list;
143 struct nvmet_tcp_cmd connect;
145 struct page_frag_cache pf_cache;
147 void (*data_ready)(struct sock *);
148 void (*state_change)(struct sock *);
149 void (*write_space)(struct sock *);
152 struct nvmet_tcp_port {
154 struct work_struct accept_work;
155 struct nvmet_port *nport;
156 struct sockaddr_storage addr;
157 void (*data_ready)(struct sock *);
160 static DEFINE_IDA(nvmet_tcp_queue_ida);
161 static LIST_HEAD(nvmet_tcp_queue_list);
162 static DEFINE_MUTEX(nvmet_tcp_queue_mutex);
164 static struct workqueue_struct *nvmet_tcp_wq;
165 static const struct nvmet_fabrics_ops nvmet_tcp_ops;
166 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c);
167 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd);
169 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue,
170 struct nvmet_tcp_cmd *cmd)
172 if (unlikely(!queue->nr_cmds)) {
173 /* We didn't allocate cmds yet, send 0xffff */
177 return cmd - queue->cmds;
180 static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
182 return nvme_is_write(cmd->req.cmd) &&
183 cmd->rbytes_done < cmd->req.transfer_len;
186 static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
188 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
191 static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
193 return !nvme_is_write(cmd->req.cmd) &&
194 cmd->req.transfer_len > 0 &&
195 !cmd->req.cqe->status;
198 static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
200 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
204 static inline struct nvmet_tcp_cmd *
205 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue)
207 struct nvmet_tcp_cmd *cmd;
209 cmd = list_first_entry_or_null(&queue->free_list,
210 struct nvmet_tcp_cmd, entry);
213 list_del_init(&cmd->entry);
215 cmd->rbytes_done = cmd->wbytes_done = 0;
223 static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd)
225 if (unlikely(cmd == &cmd->queue->connect))
228 list_add_tail(&cmd->entry, &cmd->queue->free_list);
231 static inline int queue_cpu(struct nvmet_tcp_queue *queue)
233 return queue->sock->sk->sk_incoming_cpu;
236 static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue)
238 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
241 static inline u8 nvmet_tcp_ddgst_len(struct nvmet_tcp_queue *queue)
243 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
246 static inline void nvmet_tcp_hdgst(struct ahash_request *hash,
247 void *pdu, size_t len)
249 struct scatterlist sg;
251 sg_init_one(&sg, pdu, len);
252 ahash_request_set_crypt(hash, &sg, pdu + len, len);
253 crypto_ahash_digest(hash);
256 static int nvmet_tcp_verify_hdgst(struct nvmet_tcp_queue *queue,
257 void *pdu, size_t len)
259 struct nvme_tcp_hdr *hdr = pdu;
263 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
264 pr_err("queue %d: header digest enabled but no header digest\n",
269 recv_digest = *(__le32 *)(pdu + hdr->hlen);
270 nvmet_tcp_hdgst(queue->rcv_hash, pdu, len);
271 exp_digest = *(__le32 *)(pdu + hdr->hlen);
272 if (recv_digest != exp_digest) {
273 pr_err("queue %d: header digest error: recv %#x expected %#x\n",
274 queue->idx, le32_to_cpu(recv_digest),
275 le32_to_cpu(exp_digest));
282 static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
284 struct nvme_tcp_hdr *hdr = pdu;
285 u8 digest_len = nvmet_tcp_hdgst_len(queue);
288 len = le32_to_cpu(hdr->plen) - hdr->hlen -
289 (hdr->flags & NVME_TCP_F_HDGST ? digest_len : 0);
291 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
292 pr_err("queue %d: data digest flag is cleared\n", queue->idx);
299 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
302 sgl_free(cmd->req.sg);
307 static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
309 struct bio_vec *iov = cmd->iov;
310 struct scatterlist *sg;
311 u32 length, offset, sg_offset;
314 length = cmd->pdu_len;
315 nr_pages = DIV_ROUND_UP(length, PAGE_SIZE);
316 offset = cmd->rbytes_done;
317 cmd->sg_idx = offset / PAGE_SIZE;
318 sg_offset = offset % PAGE_SIZE;
319 sg = &cmd->req.sg[cmd->sg_idx];
322 u32 iov_len = min_t(u32, length, sg->length - sg_offset);
324 iov->bv_page = sg_page(sg);
325 iov->bv_len = sg->length;
326 iov->bv_offset = sg->offset + sg_offset;
334 iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
335 nr_pages, cmd->pdu_len);
338 static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
340 queue->rcv_state = NVMET_TCP_RECV_ERR;
341 if (queue->nvme_sq.ctrl)
342 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
344 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
347 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
349 if (status == -EPIPE || status == -ECONNRESET)
350 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
352 nvmet_tcp_fatal_error(queue);
355 static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
357 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
358 u32 len = le32_to_cpu(sgl->length);
363 if (sgl->type == ((NVME_SGL_FMT_DATA_DESC << 4) |
364 NVME_SGL_FMT_OFFSET)) {
365 if (!nvme_is_write(cmd->req.cmd))
366 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
368 if (len > cmd->req.port->inline_data_size)
369 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
372 cmd->req.transfer_len += len;
374 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
376 return NVME_SC_INTERNAL;
377 cmd->cur_sg = cmd->req.sg;
379 if (nvmet_tcp_has_data_in(cmd)) {
380 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
381 sizeof(*cmd->iov), GFP_KERNEL);
388 nvmet_tcp_free_cmd_buffers(cmd);
389 return NVME_SC_INTERNAL;
392 static void nvmet_tcp_calc_ddgst(struct ahash_request *hash,
393 struct nvmet_tcp_cmd *cmd)
395 ahash_request_set_crypt(hash, cmd->req.sg,
396 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
397 crypto_ahash_digest(hash);
400 static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
402 struct nvme_tcp_data_pdu *pdu = cmd->data_pdu;
403 struct nvmet_tcp_queue *queue = cmd->queue;
404 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
405 u8 ddgst = nvmet_tcp_ddgst_len(cmd->queue);
408 cmd->state = NVMET_TCP_SEND_DATA_PDU;
410 pdu->hdr.type = nvme_tcp_c2h_data;
411 pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
412 NVME_TCP_F_DATA_SUCCESS : 0);
413 pdu->hdr.hlen = sizeof(*pdu);
414 pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
416 cpu_to_le32(pdu->hdr.hlen + hdgst +
417 cmd->req.transfer_len + ddgst);
418 pdu->command_id = cmd->req.cqe->command_id;
419 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
420 pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
422 if (queue->data_digest) {
423 pdu->hdr.flags |= NVME_TCP_F_DDGST;
424 nvmet_tcp_calc_ddgst(queue->snd_hash, cmd);
427 if (cmd->queue->hdr_digest) {
428 pdu->hdr.flags |= NVME_TCP_F_HDGST;
429 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
433 static void nvmet_setup_r2t_pdu(struct nvmet_tcp_cmd *cmd)
435 struct nvme_tcp_r2t_pdu *pdu = cmd->r2t_pdu;
436 struct nvmet_tcp_queue *queue = cmd->queue;
437 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
440 cmd->state = NVMET_TCP_SEND_R2T;
442 pdu->hdr.type = nvme_tcp_r2t;
444 pdu->hdr.hlen = sizeof(*pdu);
446 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
448 pdu->command_id = cmd->req.cmd->common.command_id;
449 pdu->ttag = nvmet_tcp_cmd_tag(cmd->queue, cmd);
450 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
451 pdu->r2t_offset = cpu_to_le32(cmd->rbytes_done);
452 if (cmd->queue->hdr_digest) {
453 pdu->hdr.flags |= NVME_TCP_F_HDGST;
454 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
458 static void nvmet_setup_response_pdu(struct nvmet_tcp_cmd *cmd)
460 struct nvme_tcp_rsp_pdu *pdu = cmd->rsp_pdu;
461 struct nvmet_tcp_queue *queue = cmd->queue;
462 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
465 cmd->state = NVMET_TCP_SEND_RESPONSE;
467 pdu->hdr.type = nvme_tcp_rsp;
469 pdu->hdr.hlen = sizeof(*pdu);
471 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
472 if (cmd->queue->hdr_digest) {
473 pdu->hdr.flags |= NVME_TCP_F_HDGST;
474 nvmet_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
478 static void nvmet_tcp_process_resp_list(struct nvmet_tcp_queue *queue)
480 struct llist_node *node;
481 struct nvmet_tcp_cmd *cmd;
483 for (node = llist_del_all(&queue->resp_list); node; node = node->next) {
484 cmd = llist_entry(node, struct nvmet_tcp_cmd, lentry);
485 list_add(&cmd->entry, &queue->resp_send_list);
486 queue->send_list_len++;
490 static struct nvmet_tcp_cmd *nvmet_tcp_fetch_cmd(struct nvmet_tcp_queue *queue)
492 queue->snd_cmd = list_first_entry_or_null(&queue->resp_send_list,
493 struct nvmet_tcp_cmd, entry);
494 if (!queue->snd_cmd) {
495 nvmet_tcp_process_resp_list(queue);
497 list_first_entry_or_null(&queue->resp_send_list,
498 struct nvmet_tcp_cmd, entry);
499 if (unlikely(!queue->snd_cmd))
503 list_del_init(&queue->snd_cmd->entry);
504 queue->send_list_len--;
506 if (nvmet_tcp_need_data_out(queue->snd_cmd))
507 nvmet_setup_c2h_data_pdu(queue->snd_cmd);
508 else if (nvmet_tcp_need_data_in(queue->snd_cmd))
509 nvmet_setup_r2t_pdu(queue->snd_cmd);
511 nvmet_setup_response_pdu(queue->snd_cmd);
513 return queue->snd_cmd;
516 static void nvmet_tcp_queue_response(struct nvmet_req *req)
518 struct nvmet_tcp_cmd *cmd =
519 container_of(req, struct nvmet_tcp_cmd, req);
520 struct nvmet_tcp_queue *queue = cmd->queue;
521 struct nvme_sgl_desc *sgl;
524 if (unlikely(cmd == queue->cmd)) {
525 sgl = &cmd->req.cmd->common.dptr.sgl;
526 len = le32_to_cpu(sgl->length);
529 * Wait for inline data before processing the response.
530 * Avoid using helpers, this might happen before
531 * nvmet_req_init is completed.
533 if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
534 len && len <= cmd->req.port->inline_data_size &&
535 nvme_is_write(cmd->req.cmd))
539 llist_add(&cmd->lentry, &queue->resp_list);
540 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
543 static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
545 if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
546 nvmet_tcp_queue_response(&cmd->req);
548 cmd->req.execute(&cmd->req);
551 static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
553 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
554 int left = sizeof(*cmd->data_pdu) - cmd->offset + hdgst;
557 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->data_pdu),
558 offset_in_page(cmd->data_pdu) + cmd->offset,
559 left, MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
569 cmd->state = NVMET_TCP_SEND_DATA;
574 static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
576 struct nvmet_tcp_queue *queue = cmd->queue;
579 while (cmd->cur_sg) {
580 struct page *page = sg_page(cmd->cur_sg);
581 u32 left = cmd->cur_sg->length - cmd->offset;
582 int flags = MSG_DONTWAIT;
584 if ((!last_in_batch && cmd->queue->send_list_len) ||
585 cmd->wbytes_done + left < cmd->req.transfer_len ||
586 queue->data_digest || !queue->nvme_sq.sqhd_disabled)
587 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
589 ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
595 cmd->wbytes_done += ret;
598 if (cmd->offset == cmd->cur_sg->length) {
599 cmd->cur_sg = sg_next(cmd->cur_sg);
604 if (queue->data_digest) {
605 cmd->state = NVMET_TCP_SEND_DDGST;
608 if (queue->nvme_sq.sqhd_disabled) {
609 cmd->queue->snd_cmd = NULL;
610 nvmet_tcp_put_cmd(cmd);
612 nvmet_setup_response_pdu(cmd);
616 if (queue->nvme_sq.sqhd_disabled)
617 nvmet_tcp_free_cmd_buffers(cmd);
623 static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
626 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
627 int left = sizeof(*cmd->rsp_pdu) - cmd->offset + hdgst;
628 int flags = MSG_DONTWAIT;
631 if (!last_in_batch && cmd->queue->send_list_len)
632 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
636 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->rsp_pdu),
637 offset_in_page(cmd->rsp_pdu) + cmd->offset, left, flags);
646 nvmet_tcp_free_cmd_buffers(cmd);
647 cmd->queue->snd_cmd = NULL;
648 nvmet_tcp_put_cmd(cmd);
652 static int nvmet_try_send_r2t(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
654 u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
655 int left = sizeof(*cmd->r2t_pdu) - cmd->offset + hdgst;
656 int flags = MSG_DONTWAIT;
659 if (!last_in_batch && cmd->queue->send_list_len)
660 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
664 ret = kernel_sendpage(cmd->queue->sock, virt_to_page(cmd->r2t_pdu),
665 offset_in_page(cmd->r2t_pdu) + cmd->offset, left, flags);
674 cmd->queue->snd_cmd = NULL;
678 static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
680 struct nvmet_tcp_queue *queue = cmd->queue;
681 int left = NVME_TCP_DIGEST_LENGTH - cmd->offset;
682 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
684 .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
689 if (!last_in_batch && cmd->queue->send_list_len)
690 msg.msg_flags |= MSG_MORE;
692 msg.msg_flags |= MSG_EOR;
694 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
695 if (unlikely(ret <= 0))
704 if (queue->nvme_sq.sqhd_disabled) {
705 cmd->queue->snd_cmd = NULL;
706 nvmet_tcp_put_cmd(cmd);
708 nvmet_setup_response_pdu(cmd);
713 static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
716 struct nvmet_tcp_cmd *cmd = queue->snd_cmd;
719 if (!cmd || queue->state == NVMET_TCP_Q_DISCONNECTING) {
720 cmd = nvmet_tcp_fetch_cmd(queue);
725 if (cmd->state == NVMET_TCP_SEND_DATA_PDU) {
726 ret = nvmet_try_send_data_pdu(cmd);
731 if (cmd->state == NVMET_TCP_SEND_DATA) {
732 ret = nvmet_try_send_data(cmd, last_in_batch);
737 if (cmd->state == NVMET_TCP_SEND_DDGST) {
738 ret = nvmet_try_send_ddgst(cmd, last_in_batch);
743 if (cmd->state == NVMET_TCP_SEND_R2T) {
744 ret = nvmet_try_send_r2t(cmd, last_in_batch);
749 if (cmd->state == NVMET_TCP_SEND_RESPONSE)
750 ret = nvmet_try_send_response(cmd, last_in_batch);
762 static int nvmet_tcp_try_send(struct nvmet_tcp_queue *queue,
763 int budget, int *sends)
767 for (i = 0; i < budget; i++) {
768 ret = nvmet_tcp_try_send_one(queue, i == budget - 1);
769 if (unlikely(ret < 0)) {
770 nvmet_tcp_socket_error(queue, ret);
772 } else if (ret == 0) {
781 static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
784 queue->left = sizeof(struct nvme_tcp_hdr);
786 queue->rcv_state = NVMET_TCP_RECV_PDU;
789 static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
791 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
793 ahash_request_free(queue->rcv_hash);
794 ahash_request_free(queue->snd_hash);
795 crypto_free_ahash(tfm);
798 static int nvmet_tcp_alloc_crypto(struct nvmet_tcp_queue *queue)
800 struct crypto_ahash *tfm;
802 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
806 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
807 if (!queue->snd_hash)
809 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
811 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
812 if (!queue->rcv_hash)
814 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
818 ahash_request_free(queue->snd_hash);
820 crypto_free_ahash(tfm);
825 static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
827 struct nvme_tcp_icreq_pdu *icreq = &queue->pdu.icreq;
828 struct nvme_tcp_icresp_pdu *icresp = &queue->pdu.icresp;
829 struct msghdr msg = {};
833 if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
834 pr_err("bad nvme-tcp pdu length (%d)\n",
835 le32_to_cpu(icreq->hdr.plen));
836 nvmet_tcp_fatal_error(queue);
839 if (icreq->pfv != NVME_TCP_PFV_1_0) {
840 pr_err("queue %d: bad pfv %d\n", queue->idx, icreq->pfv);
844 if (icreq->hpda != 0) {
845 pr_err("queue %d: unsupported hpda %d\n", queue->idx,
850 queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
851 queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
852 if (queue->hdr_digest || queue->data_digest) {
853 ret = nvmet_tcp_alloc_crypto(queue);
858 memset(icresp, 0, sizeof(*icresp));
859 icresp->hdr.type = nvme_tcp_icresp;
860 icresp->hdr.hlen = sizeof(*icresp);
862 icresp->hdr.plen = cpu_to_le32(icresp->hdr.hlen);
863 icresp->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
864 icresp->maxdata = cpu_to_le32(0x400000); /* 16M arbitrary limit */
866 if (queue->hdr_digest)
867 icresp->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
868 if (queue->data_digest)
869 icresp->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
871 iov.iov_base = icresp;
872 iov.iov_len = sizeof(*icresp);
873 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
877 queue->state = NVMET_TCP_Q_LIVE;
878 nvmet_prepare_receive_pdu(queue);
881 if (queue->hdr_digest || queue->data_digest)
882 nvmet_tcp_free_crypto(queue);
886 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
887 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
889 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
893 * This command has not been processed yet, hence we are trying to
894 * figure out if there is still pending data left to receive. If
895 * we don't, we can simply prepare for the next pdu and bail out,
896 * otherwise we will need to prepare a buffer and receive the
897 * stale data before continuing forward.
899 if (!nvme_is_write(cmd->req.cmd) || !data_len ||
900 data_len > cmd->req.port->inline_data_size) {
901 nvmet_prepare_receive_pdu(queue);
905 ret = nvmet_tcp_map_data(cmd);
907 pr_err("queue %d: failed to map data\n", queue->idx);
908 nvmet_tcp_fatal_error(queue);
912 queue->rcv_state = NVMET_TCP_RECV_DATA;
913 nvmet_tcp_build_pdu_iovec(cmd);
914 cmd->flags |= NVMET_TCP_F_INIT_FAILED;
917 static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
919 struct nvme_tcp_data_pdu *data = &queue->pdu.data;
920 struct nvmet_tcp_cmd *cmd;
922 if (likely(queue->nr_cmds)) {
923 if (unlikely(data->ttag >= queue->nr_cmds)) {
924 pr_err("queue %d: received out of bound ttag %u, nr_cmds %u\n",
925 queue->idx, data->ttag, queue->nr_cmds);
926 nvmet_tcp_fatal_error(queue);
929 cmd = &queue->cmds[data->ttag];
931 cmd = &queue->connect;
934 if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) {
935 pr_err("ttag %u unexpected data offset %u (expected %u)\n",
936 data->ttag, le32_to_cpu(data->data_offset),
938 /* FIXME: use path and transport errors */
939 nvmet_req_complete(&cmd->req,
940 NVME_SC_INVALID_FIELD | NVME_SC_DNR);
944 cmd->pdu_len = le32_to_cpu(data->data_length);
946 nvmet_tcp_build_pdu_iovec(cmd);
948 queue->rcv_state = NVMET_TCP_RECV_DATA;
953 static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
955 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
956 struct nvme_command *nvme_cmd = &queue->pdu.cmd.cmd;
957 struct nvmet_req *req;
960 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
961 if (hdr->type != nvme_tcp_icreq) {
962 pr_err("unexpected pdu type (%d) before icreq\n",
964 nvmet_tcp_fatal_error(queue);
967 return nvmet_tcp_handle_icreq(queue);
970 if (unlikely(hdr->type == nvme_tcp_icreq)) {
971 pr_err("queue %d: received icreq pdu in state %d\n",
972 queue->idx, queue->state);
973 nvmet_tcp_fatal_error(queue);
977 if (hdr->type == nvme_tcp_h2c_data) {
978 ret = nvmet_tcp_handle_h2c_data_pdu(queue);
984 queue->cmd = nvmet_tcp_get_cmd(queue);
985 if (unlikely(!queue->cmd)) {
986 /* This should never happen */
987 pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
988 queue->idx, queue->nr_cmds, queue->send_list_len,
989 nvme_cmd->common.opcode);
990 nvmet_tcp_fatal_error(queue);
994 req = &queue->cmd->req;
995 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
997 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
998 &queue->nvme_sq, &nvmet_tcp_ops))) {
999 pr_err("failed cmd %p id %d opcode %d, data_len: %d\n",
1000 req->cmd, req->cmd->common.command_id,
1001 req->cmd->common.opcode,
1002 le32_to_cpu(req->cmd->common.dptr.sgl.length));
1004 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1008 ret = nvmet_tcp_map_data(queue->cmd);
1009 if (unlikely(ret)) {
1010 pr_err("queue %d: failed to map data\n", queue->idx);
1011 if (nvmet_tcp_has_inline_data(queue->cmd))
1012 nvmet_tcp_fatal_error(queue);
1014 nvmet_req_complete(req, ret);
1019 if (nvmet_tcp_need_data_in(queue->cmd)) {
1020 if (nvmet_tcp_has_inline_data(queue->cmd)) {
1021 queue->rcv_state = NVMET_TCP_RECV_DATA;
1022 nvmet_tcp_build_pdu_iovec(queue->cmd);
1026 nvmet_tcp_queue_response(&queue->cmd->req);
1030 queue->cmd->req.execute(&queue->cmd->req);
1032 nvmet_prepare_receive_pdu(queue);
1036 static const u8 nvme_tcp_pdu_sizes[] = {
1037 [nvme_tcp_icreq] = sizeof(struct nvme_tcp_icreq_pdu),
1038 [nvme_tcp_cmd] = sizeof(struct nvme_tcp_cmd_pdu),
1039 [nvme_tcp_h2c_data] = sizeof(struct nvme_tcp_data_pdu),
1042 static inline u8 nvmet_tcp_pdu_size(u8 type)
1046 return (idx < ARRAY_SIZE(nvme_tcp_pdu_sizes) &&
1047 nvme_tcp_pdu_sizes[idx]) ?
1048 nvme_tcp_pdu_sizes[idx] : 0;
1051 static inline bool nvmet_tcp_pdu_valid(u8 type)
1054 case nvme_tcp_icreq:
1056 case nvme_tcp_h2c_data:
1064 static int nvmet_tcp_try_recv_pdu(struct nvmet_tcp_queue *queue)
1066 struct nvme_tcp_hdr *hdr = &queue->pdu.cmd.hdr;
1069 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1072 iov.iov_base = (void *)&queue->pdu + queue->offset;
1073 iov.iov_len = queue->left;
1074 len = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1075 iov.iov_len, msg.msg_flags);
1076 if (unlikely(len < 0))
1079 queue->offset += len;
1084 if (queue->offset == sizeof(struct nvme_tcp_hdr)) {
1085 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1087 if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
1088 pr_err("unexpected pdu type %d\n", hdr->type);
1089 nvmet_tcp_fatal_error(queue);
1093 if (unlikely(hdr->hlen != nvmet_tcp_pdu_size(hdr->type))) {
1094 pr_err("pdu %d bad hlen %d\n", hdr->type, hdr->hlen);
1098 queue->left = hdr->hlen - queue->offset + hdgst;
1102 if (queue->hdr_digest &&
1103 nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
1104 nvmet_tcp_fatal_error(queue); /* fatal */
1108 if (queue->data_digest &&
1109 nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
1110 nvmet_tcp_fatal_error(queue); /* fatal */
1114 return nvmet_tcp_done_recv_pdu(queue);
1117 static void nvmet_tcp_prep_recv_ddgst(struct nvmet_tcp_cmd *cmd)
1119 struct nvmet_tcp_queue *queue = cmd->queue;
1121 nvmet_tcp_calc_ddgst(queue->rcv_hash, cmd);
1123 queue->left = NVME_TCP_DIGEST_LENGTH;
1124 queue->rcv_state = NVMET_TCP_RECV_DDGST;
1127 static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
1129 struct nvmet_tcp_cmd *cmd = queue->cmd;
1132 while (msg_data_left(&cmd->recv_msg)) {
1133 ret = sock_recvmsg(cmd->queue->sock, &cmd->recv_msg,
1134 cmd->recv_msg.msg_flags);
1138 cmd->pdu_recv += ret;
1139 cmd->rbytes_done += ret;
1142 if (queue->data_digest) {
1143 nvmet_tcp_prep_recv_ddgst(cmd);
1147 if (cmd->rbytes_done == cmd->req.transfer_len)
1148 nvmet_tcp_execute_request(cmd);
1150 nvmet_prepare_receive_pdu(queue);
1154 static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
1156 struct nvmet_tcp_cmd *cmd = queue->cmd;
1158 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1160 .iov_base = (void *)&cmd->recv_ddgst + queue->offset,
1161 .iov_len = queue->left
1164 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1165 iov.iov_len, msg.msg_flags);
1166 if (unlikely(ret < 0))
1169 queue->offset += ret;
1174 if (queue->data_digest && cmd->exp_ddgst != cmd->recv_ddgst) {
1175 pr_err("queue %d: cmd %d pdu (%d) data digest error: recv %#x expected %#x\n",
1176 queue->idx, cmd->req.cmd->common.command_id,
1177 queue->pdu.cmd.hdr.type, le32_to_cpu(cmd->recv_ddgst),
1178 le32_to_cpu(cmd->exp_ddgst));
1179 nvmet_req_uninit(&cmd->req);
1180 nvmet_tcp_free_cmd_buffers(cmd);
1181 nvmet_tcp_fatal_error(queue);
1186 if (cmd->rbytes_done == cmd->req.transfer_len)
1187 nvmet_tcp_execute_request(cmd);
1191 nvmet_prepare_receive_pdu(queue);
1195 static int nvmet_tcp_try_recv_one(struct nvmet_tcp_queue *queue)
1199 if (unlikely(queue->rcv_state == NVMET_TCP_RECV_ERR))
1202 if (queue->rcv_state == NVMET_TCP_RECV_PDU) {
1203 result = nvmet_tcp_try_recv_pdu(queue);
1208 if (queue->rcv_state == NVMET_TCP_RECV_DATA) {
1209 result = nvmet_tcp_try_recv_data(queue);
1214 if (queue->rcv_state == NVMET_TCP_RECV_DDGST) {
1215 result = nvmet_tcp_try_recv_ddgst(queue);
1222 if (result == -EAGAIN)
1229 static int nvmet_tcp_try_recv(struct nvmet_tcp_queue *queue,
1230 int budget, int *recvs)
1234 for (i = 0; i < budget; i++) {
1235 ret = nvmet_tcp_try_recv_one(queue);
1236 if (unlikely(ret < 0)) {
1237 nvmet_tcp_socket_error(queue, ret);
1239 } else if (ret == 0) {
1248 static void nvmet_tcp_schedule_release_queue(struct nvmet_tcp_queue *queue)
1250 spin_lock(&queue->state_lock);
1251 if (queue->state != NVMET_TCP_Q_DISCONNECTING) {
1252 queue->state = NVMET_TCP_Q_DISCONNECTING;
1253 queue_work(nvmet_wq, &queue->release_work);
1255 spin_unlock(&queue->state_lock);
1258 static inline void nvmet_tcp_arm_queue_deadline(struct nvmet_tcp_queue *queue)
1260 queue->poll_end = jiffies + usecs_to_jiffies(idle_poll_period_usecs);
1263 static bool nvmet_tcp_check_queue_deadline(struct nvmet_tcp_queue *queue,
1266 if (!idle_poll_period_usecs)
1270 nvmet_tcp_arm_queue_deadline(queue);
1272 return !time_after(jiffies, queue->poll_end);
1275 static void nvmet_tcp_io_work(struct work_struct *w)
1277 struct nvmet_tcp_queue *queue =
1278 container_of(w, struct nvmet_tcp_queue, io_work);
1285 ret = nvmet_tcp_try_recv(queue, NVMET_TCP_RECV_BUDGET, &ops);
1291 ret = nvmet_tcp_try_send(queue, NVMET_TCP_SEND_BUDGET, &ops);
1297 } while (pending && ops < NVMET_TCP_IO_WORK_BUDGET);
1300 * Requeue the worker if idle deadline period is in progress or any
1301 * ops activity was recorded during the do-while loop above.
1303 if (nvmet_tcp_check_queue_deadline(queue, ops) || pending)
1304 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1307 static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
1308 struct nvmet_tcp_cmd *c)
1310 u8 hdgst = nvmet_tcp_hdgst_len(queue);
1313 c->req.port = queue->port->nport;
1315 c->cmd_pdu = page_frag_alloc(&queue->pf_cache,
1316 sizeof(*c->cmd_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1319 c->req.cmd = &c->cmd_pdu->cmd;
1321 c->rsp_pdu = page_frag_alloc(&queue->pf_cache,
1322 sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1325 c->req.cqe = &c->rsp_pdu->cqe;
1327 c->data_pdu = page_frag_alloc(&queue->pf_cache,
1328 sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1332 c->r2t_pdu = page_frag_alloc(&queue->pf_cache,
1333 sizeof(*c->r2t_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
1337 c->recv_msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1339 list_add_tail(&c->entry, &queue->free_list);
1343 page_frag_free(c->data_pdu);
1345 page_frag_free(c->rsp_pdu);
1347 page_frag_free(c->cmd_pdu);
1351 static void nvmet_tcp_free_cmd(struct nvmet_tcp_cmd *c)
1353 page_frag_free(c->r2t_pdu);
1354 page_frag_free(c->data_pdu);
1355 page_frag_free(c->rsp_pdu);
1356 page_frag_free(c->cmd_pdu);
1359 static int nvmet_tcp_alloc_cmds(struct nvmet_tcp_queue *queue)
1361 struct nvmet_tcp_cmd *cmds;
1362 int i, ret = -EINVAL, nr_cmds = queue->nr_cmds;
1364 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_tcp_cmd), GFP_KERNEL);
1368 for (i = 0; i < nr_cmds; i++) {
1369 ret = nvmet_tcp_alloc_cmd(queue, cmds + i);
1379 nvmet_tcp_free_cmd(cmds + i);
1385 static void nvmet_tcp_free_cmds(struct nvmet_tcp_queue *queue)
1387 struct nvmet_tcp_cmd *cmds = queue->cmds;
1390 for (i = 0; i < queue->nr_cmds; i++)
1391 nvmet_tcp_free_cmd(cmds + i);
1393 nvmet_tcp_free_cmd(&queue->connect);
1397 static void nvmet_tcp_restore_socket_callbacks(struct nvmet_tcp_queue *queue)
1399 struct socket *sock = queue->sock;
1401 write_lock_bh(&sock->sk->sk_callback_lock);
1402 sock->sk->sk_data_ready = queue->data_ready;
1403 sock->sk->sk_state_change = queue->state_change;
1404 sock->sk->sk_write_space = queue->write_space;
1405 sock->sk->sk_user_data = NULL;
1406 write_unlock_bh(&sock->sk->sk_callback_lock);
1409 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
1411 struct nvmet_tcp_cmd *cmd = queue->cmds;
1414 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1415 if (nvmet_tcp_need_data_in(cmd))
1416 nvmet_req_uninit(&cmd->req);
1419 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect)) {
1420 /* failed in connect */
1421 nvmet_req_uninit(&queue->connect.req);
1425 static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
1427 struct nvmet_tcp_cmd *cmd = queue->cmds;
1430 for (i = 0; i < queue->nr_cmds; i++, cmd++) {
1431 if (nvmet_tcp_need_data_in(cmd))
1432 nvmet_tcp_free_cmd_buffers(cmd);
1435 if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
1436 nvmet_tcp_free_cmd_buffers(&queue->connect);
1439 static void nvmet_tcp_release_queue_work(struct work_struct *w)
1442 struct nvmet_tcp_queue *queue =
1443 container_of(w, struct nvmet_tcp_queue, release_work);
1445 mutex_lock(&nvmet_tcp_queue_mutex);
1446 list_del_init(&queue->queue_list);
1447 mutex_unlock(&nvmet_tcp_queue_mutex);
1449 nvmet_tcp_restore_socket_callbacks(queue);
1450 cancel_work_sync(&queue->io_work);
1451 /* stop accepting incoming data */
1452 queue->rcv_state = NVMET_TCP_RECV_ERR;
1454 nvmet_tcp_uninit_data_in_cmds(queue);
1455 nvmet_sq_destroy(&queue->nvme_sq);
1456 cancel_work_sync(&queue->io_work);
1457 nvmet_tcp_free_cmd_data_in_buffers(queue);
1458 sock_release(queue->sock);
1459 nvmet_tcp_free_cmds(queue);
1460 if (queue->hdr_digest || queue->data_digest)
1461 nvmet_tcp_free_crypto(queue);
1462 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1464 page = virt_to_head_page(queue->pf_cache.va);
1465 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1469 static void nvmet_tcp_data_ready(struct sock *sk)
1471 struct nvmet_tcp_queue *queue;
1473 read_lock_bh(&sk->sk_callback_lock);
1474 queue = sk->sk_user_data;
1476 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1477 read_unlock_bh(&sk->sk_callback_lock);
1480 static void nvmet_tcp_write_space(struct sock *sk)
1482 struct nvmet_tcp_queue *queue;
1484 read_lock_bh(&sk->sk_callback_lock);
1485 queue = sk->sk_user_data;
1486 if (unlikely(!queue))
1489 if (unlikely(queue->state == NVMET_TCP_Q_CONNECTING)) {
1490 queue->write_space(sk);
1494 if (sk_stream_is_writeable(sk)) {
1495 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1496 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1499 read_unlock_bh(&sk->sk_callback_lock);
1502 static void nvmet_tcp_state_change(struct sock *sk)
1504 struct nvmet_tcp_queue *queue;
1506 read_lock_bh(&sk->sk_callback_lock);
1507 queue = sk->sk_user_data;
1511 switch (sk->sk_state) {
1516 case TCP_CLOSE_WAIT:
1519 nvmet_tcp_schedule_release_queue(queue);
1522 pr_warn("queue %d unhandled state %d\n",
1523 queue->idx, sk->sk_state);
1526 read_unlock_bh(&sk->sk_callback_lock);
1529 static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
1531 struct socket *sock = queue->sock;
1532 struct inet_sock *inet = inet_sk(sock->sk);
1535 ret = kernel_getsockname(sock,
1536 (struct sockaddr *)&queue->sockaddr);
1540 ret = kernel_getpeername(sock,
1541 (struct sockaddr *)&queue->sockaddr_peer);
1546 * Cleanup whatever is sitting in the TCP transmit queue on socket
1547 * close. This is done to prevent stale data from being sent should
1548 * the network connection be restored before TCP times out.
1550 sock_no_linger(sock->sk);
1552 if (so_priority > 0)
1553 sock_set_priority(sock->sk, so_priority);
1555 /* Set socket type of service */
1556 if (inet->rcv_tos > 0)
1557 ip_sock_set_tos(sock->sk, inet->rcv_tos);
1560 write_lock_bh(&sock->sk->sk_callback_lock);
1561 if (sock->sk->sk_state != TCP_ESTABLISHED) {
1563 * If the socket is already closing, don't even start
1568 sock->sk->sk_user_data = queue;
1569 queue->data_ready = sock->sk->sk_data_ready;
1570 sock->sk->sk_data_ready = nvmet_tcp_data_ready;
1571 queue->state_change = sock->sk->sk_state_change;
1572 sock->sk->sk_state_change = nvmet_tcp_state_change;
1573 queue->write_space = sock->sk->sk_write_space;
1574 sock->sk->sk_write_space = nvmet_tcp_write_space;
1575 if (idle_poll_period_usecs)
1576 nvmet_tcp_arm_queue_deadline(queue);
1577 queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work);
1579 write_unlock_bh(&sock->sk->sk_callback_lock);
1584 static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
1585 struct socket *newsock)
1587 struct nvmet_tcp_queue *queue;
1590 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1594 INIT_WORK(&queue->release_work, nvmet_tcp_release_queue_work);
1595 INIT_WORK(&queue->io_work, nvmet_tcp_io_work);
1596 queue->sock = newsock;
1599 spin_lock_init(&queue->state_lock);
1600 queue->state = NVMET_TCP_Q_CONNECTING;
1601 INIT_LIST_HEAD(&queue->free_list);
1602 init_llist_head(&queue->resp_list);
1603 INIT_LIST_HEAD(&queue->resp_send_list);
1605 queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
1606 if (queue->idx < 0) {
1608 goto out_free_queue;
1611 ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
1613 goto out_ida_remove;
1615 ret = nvmet_sq_init(&queue->nvme_sq);
1617 goto out_free_connect;
1619 nvmet_prepare_receive_pdu(queue);
1621 mutex_lock(&nvmet_tcp_queue_mutex);
1622 list_add_tail(&queue->queue_list, &nvmet_tcp_queue_list);
1623 mutex_unlock(&nvmet_tcp_queue_mutex);
1625 ret = nvmet_tcp_set_queue_sock(queue);
1627 goto out_destroy_sq;
1631 mutex_lock(&nvmet_tcp_queue_mutex);
1632 list_del_init(&queue->queue_list);
1633 mutex_unlock(&nvmet_tcp_queue_mutex);
1634 nvmet_sq_destroy(&queue->nvme_sq);
1636 nvmet_tcp_free_cmd(&queue->connect);
1638 ida_free(&nvmet_tcp_queue_ida, queue->idx);
1644 static void nvmet_tcp_accept_work(struct work_struct *w)
1646 struct nvmet_tcp_port *port =
1647 container_of(w, struct nvmet_tcp_port, accept_work);
1648 struct socket *newsock;
1652 ret = kernel_accept(port->sock, &newsock, O_NONBLOCK);
1655 pr_warn("failed to accept err=%d\n", ret);
1658 ret = nvmet_tcp_alloc_queue(port, newsock);
1660 pr_err("failed to allocate queue\n");
1661 sock_release(newsock);
1666 static void nvmet_tcp_listen_data_ready(struct sock *sk)
1668 struct nvmet_tcp_port *port;
1670 read_lock_bh(&sk->sk_callback_lock);
1671 port = sk->sk_user_data;
1675 if (sk->sk_state == TCP_LISTEN)
1676 queue_work(nvmet_wq, &port->accept_work);
1678 read_unlock_bh(&sk->sk_callback_lock);
1681 static int nvmet_tcp_add_port(struct nvmet_port *nport)
1683 struct nvmet_tcp_port *port;
1684 __kernel_sa_family_t af;
1687 port = kzalloc(sizeof(*port), GFP_KERNEL);
1691 switch (nport->disc_addr.adrfam) {
1692 case NVMF_ADDR_FAMILY_IP4:
1695 case NVMF_ADDR_FAMILY_IP6:
1699 pr_err("address family %d not supported\n",
1700 nport->disc_addr.adrfam);
1705 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1706 nport->disc_addr.trsvcid, &port->addr);
1708 pr_err("malformed ip/port passed: %s:%s\n",
1709 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1713 port->nport = nport;
1714 INIT_WORK(&port->accept_work, nvmet_tcp_accept_work);
1715 if (port->nport->inline_data_size < 0)
1716 port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE;
1718 ret = sock_create(port->addr.ss_family, SOCK_STREAM,
1719 IPPROTO_TCP, &port->sock);
1721 pr_err("failed to create a socket\n");
1725 port->sock->sk->sk_user_data = port;
1726 port->data_ready = port->sock->sk->sk_data_ready;
1727 port->sock->sk->sk_data_ready = nvmet_tcp_listen_data_ready;
1728 sock_set_reuseaddr(port->sock->sk);
1729 tcp_sock_set_nodelay(port->sock->sk);
1730 if (so_priority > 0)
1731 sock_set_priority(port->sock->sk, so_priority);
1733 ret = kernel_bind(port->sock, (struct sockaddr *)&port->addr,
1734 sizeof(port->addr));
1736 pr_err("failed to bind port socket %d\n", ret);
1740 ret = kernel_listen(port->sock, 128);
1742 pr_err("failed to listen %d on port sock\n", ret);
1747 pr_info("enabling port %d (%pISpc)\n",
1748 le16_to_cpu(nport->disc_addr.portid), &port->addr);
1753 sock_release(port->sock);
1759 static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
1761 struct nvmet_tcp_queue *queue;
1763 mutex_lock(&nvmet_tcp_queue_mutex);
1764 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1765 if (queue->port == port)
1766 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1767 mutex_unlock(&nvmet_tcp_queue_mutex);
1770 static void nvmet_tcp_remove_port(struct nvmet_port *nport)
1772 struct nvmet_tcp_port *port = nport->priv;
1774 write_lock_bh(&port->sock->sk->sk_callback_lock);
1775 port->sock->sk->sk_data_ready = port->data_ready;
1776 port->sock->sk->sk_user_data = NULL;
1777 write_unlock_bh(&port->sock->sk->sk_callback_lock);
1778 cancel_work_sync(&port->accept_work);
1780 * Destroy the remaining queues, which are not belong to any
1783 nvmet_tcp_destroy_port_queues(port);
1785 sock_release(port->sock);
1789 static void nvmet_tcp_delete_ctrl(struct nvmet_ctrl *ctrl)
1791 struct nvmet_tcp_queue *queue;
1793 mutex_lock(&nvmet_tcp_queue_mutex);
1794 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1795 if (queue->nvme_sq.ctrl == ctrl)
1796 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1797 mutex_unlock(&nvmet_tcp_queue_mutex);
1800 static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq)
1802 struct nvmet_tcp_queue *queue =
1803 container_of(sq, struct nvmet_tcp_queue, nvme_sq);
1806 /* Let inflight controller teardown complete */
1807 flush_workqueue(nvmet_wq);
1810 queue->nr_cmds = sq->size * 2;
1811 if (nvmet_tcp_alloc_cmds(queue))
1812 return NVME_SC_INTERNAL;
1816 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1817 struct nvmet_port *nport, char *traddr)
1819 struct nvmet_tcp_port *port = nport->priv;
1821 if (inet_addr_is_any((struct sockaddr *)&port->addr)) {
1822 struct nvmet_tcp_cmd *cmd =
1823 container_of(req, struct nvmet_tcp_cmd, req);
1824 struct nvmet_tcp_queue *queue = cmd->queue;
1826 sprintf(traddr, "%pISc", (struct sockaddr *)&queue->sockaddr);
1828 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
1832 static const struct nvmet_fabrics_ops nvmet_tcp_ops = {
1833 .owner = THIS_MODULE,
1834 .type = NVMF_TRTYPE_TCP,
1836 .add_port = nvmet_tcp_add_port,
1837 .remove_port = nvmet_tcp_remove_port,
1838 .queue_response = nvmet_tcp_queue_response,
1839 .delete_ctrl = nvmet_tcp_delete_ctrl,
1840 .install_queue = nvmet_tcp_install_queue,
1841 .disc_traddr = nvmet_tcp_disc_port_addr,
1844 static int __init nvmet_tcp_init(void)
1848 nvmet_tcp_wq = alloc_workqueue("nvmet_tcp_wq",
1849 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1853 ret = nvmet_register_transport(&nvmet_tcp_ops);
1859 destroy_workqueue(nvmet_tcp_wq);
1863 static void __exit nvmet_tcp_exit(void)
1865 struct nvmet_tcp_queue *queue;
1867 nvmet_unregister_transport(&nvmet_tcp_ops);
1869 flush_workqueue(nvmet_wq);
1870 mutex_lock(&nvmet_tcp_queue_mutex);
1871 list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
1872 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1873 mutex_unlock(&nvmet_tcp_queue_mutex);
1874 flush_workqueue(nvmet_wq);
1876 destroy_workqueue(nvmet_tcp_wq);
1879 module_init(nvmet_tcp_init);
1880 module_exit(nvmet_tcp_exit);
1882 MODULE_LICENSE("GPL v2");
1883 MODULE_ALIAS("nvmet-transport-3"); /* 3 == NVMF_TRTYPE_TCP */