1 // SPDX-License-Identifier: GPL-2.0
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/err.h>
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
15 #include <crypto/hash.h>
16 #include <net/busy_poll.h>
17 #include <trace/events/sock.h>
22 struct nvme_tcp_queue;
24 /* Define the socket priority to use for connections were it is desirable
25 * that the NIC consider performing optimized packet processing or filtering.
26 * A non-zero value being sufficient to indicate general consideration of any
27 * possible optimization. Making it a module param allows for alternative
28 * values that may be unique for some NIC implementations.
30 static int so_priority;
31 module_param(so_priority, int, 0644);
32 MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
34 #ifdef CONFIG_DEBUG_LOCK_ALLOC
35 /* lockdep can detect a circular dependency of the form
36 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
37 * because dependencies are tracked for both nvme-tcp and user contexts. Using
38 * a separate class prevents lockdep from conflating nvme-tcp socket use with
39 * user-space socket API use.
41 static struct lock_class_key nvme_tcp_sk_key[2];
42 static struct lock_class_key nvme_tcp_slock_key[2];
44 static void nvme_tcp_reclassify_socket(struct socket *sock)
46 struct sock *sk = sock->sk;
48 if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
51 switch (sk->sk_family) {
53 sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
54 &nvme_tcp_slock_key[0],
55 "sk_lock-AF_INET-NVME",
59 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
60 &nvme_tcp_slock_key[1],
61 "sk_lock-AF_INET6-NVME",
69 static void nvme_tcp_reclassify_socket(struct socket *sock) { }
72 enum nvme_tcp_send_state {
73 NVME_TCP_SEND_CMD_PDU = 0,
74 NVME_TCP_SEND_H2C_PDU,
79 struct nvme_tcp_request {
80 struct nvme_request req;
82 struct nvme_tcp_queue *queue;
90 struct list_head entry;
91 struct llist_node lentry;
100 enum nvme_tcp_send_state state;
103 enum nvme_tcp_queue_flags {
104 NVME_TCP_Q_ALLOCATED = 0,
106 NVME_TCP_Q_POLLING = 2,
109 enum nvme_tcp_recv_state {
110 NVME_TCP_RECV_PDU = 0,
115 struct nvme_tcp_ctrl;
116 struct nvme_tcp_queue {
118 struct work_struct io_work;
121 struct mutex queue_lock;
122 struct mutex send_mutex;
123 struct llist_head req_list;
124 struct list_head send_list;
130 size_t data_remaining;
131 size_t ddgst_remaining;
135 struct nvme_tcp_request *request;
138 size_t cmnd_capsule_len;
139 struct nvme_tcp_ctrl *ctrl;
145 struct ahash_request *rcv_hash;
146 struct ahash_request *snd_hash;
150 struct page_frag_cache pf_cache;
152 void (*state_change)(struct sock *);
153 void (*data_ready)(struct sock *);
154 void (*write_space)(struct sock *);
157 struct nvme_tcp_ctrl {
158 /* read only in the hot path */
159 struct nvme_tcp_queue *queues;
160 struct blk_mq_tag_set tag_set;
162 /* other member variables */
163 struct list_head list;
164 struct blk_mq_tag_set admin_tag_set;
165 struct sockaddr_storage addr;
166 struct sockaddr_storage src_addr;
167 struct nvme_ctrl ctrl;
169 struct work_struct err_work;
170 struct delayed_work connect_work;
171 struct nvme_tcp_request async_req;
172 u32 io_queues[HCTX_MAX_TYPES];
175 static LIST_HEAD(nvme_tcp_ctrl_list);
176 static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
177 static struct workqueue_struct *nvme_tcp_wq;
178 static const struct blk_mq_ops nvme_tcp_mq_ops;
179 static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
180 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
182 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
184 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
187 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
189 return queue - queue->ctrl->queues;
192 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
194 u32 queue_idx = nvme_tcp_queue_id(queue);
197 return queue->ctrl->admin_tag_set.tags[queue_idx];
198 return queue->ctrl->tag_set.tags[queue_idx - 1];
201 static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
203 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
206 static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
208 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
211 static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req)
216 static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req)
218 /* use the pdu space in the back for the data pdu */
219 return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) -
220 sizeof(struct nvme_tcp_data_pdu);
223 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
225 if (nvme_is_fabrics(req->req.cmd))
226 return NVME_TCP_ADMIN_CCSZ;
227 return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
230 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
232 return req == &req->queue->ctrl->async_req;
235 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
239 if (unlikely(nvme_tcp_async_req(req)))
240 return false; /* async events don't have a request */
242 rq = blk_mq_rq_from_pdu(req);
244 return rq_data_dir(rq) == WRITE && req->data_len &&
245 req->data_len <= nvme_tcp_inline_data_size(req);
248 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
250 return req->iter.bvec->bv_page;
253 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
255 return req->iter.bvec->bv_offset + req->iter.iov_offset;
258 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
260 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
261 req->pdu_len - req->pdu_sent);
264 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
266 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
267 req->pdu_len - req->pdu_sent : 0;
270 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
273 return nvme_tcp_pdu_data_left(req) <= len;
276 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
279 struct request *rq = blk_mq_rq_from_pdu(req);
285 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
286 vec = &rq->special_vec;
288 size = blk_rq_payload_bytes(rq);
291 struct bio *bio = req->curr_bio;
295 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
297 bio_for_each_bvec(bv, bio, bi) {
300 size = bio->bi_iter.bi_size;
301 offset = bio->bi_iter.bi_bvec_done;
304 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
305 req->iter.iov_offset = offset;
308 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
311 req->data_sent += len;
312 req->pdu_sent += len;
313 iov_iter_advance(&req->iter, len);
314 if (!iov_iter_count(&req->iter) &&
315 req->data_sent < req->data_len) {
316 req->curr_bio = req->curr_bio->bi_next;
317 nvme_tcp_init_iter(req, ITER_SOURCE);
321 static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
325 /* drain the send queue as much as we can... */
327 ret = nvme_tcp_try_send(queue);
331 static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
333 return !list_empty(&queue->send_list) ||
334 !llist_empty(&queue->req_list);
337 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
338 bool sync, bool last)
340 struct nvme_tcp_queue *queue = req->queue;
343 empty = llist_add(&req->lentry, &queue->req_list) &&
344 list_empty(&queue->send_list) && !queue->request;
347 * if we're the first on the send_list and we can try to send
348 * directly, otherwise queue io_work. Also, only do that if we
349 * are on the same cpu, so we don't introduce contention.
351 if (queue->io_cpu == raw_smp_processor_id() &&
352 sync && empty && mutex_trylock(&queue->send_mutex)) {
353 nvme_tcp_send_all(queue);
354 mutex_unlock(&queue->send_mutex);
357 if (last && nvme_tcp_queue_more(queue))
358 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
361 static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
363 struct nvme_tcp_request *req;
364 struct llist_node *node;
366 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
367 req = llist_entry(node, struct nvme_tcp_request, lentry);
368 list_add(&req->entry, &queue->send_list);
372 static inline struct nvme_tcp_request *
373 nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
375 struct nvme_tcp_request *req;
377 req = list_first_entry_or_null(&queue->send_list,
378 struct nvme_tcp_request, entry);
380 nvme_tcp_process_req_list(queue);
381 req = list_first_entry_or_null(&queue->send_list,
382 struct nvme_tcp_request, entry);
387 list_del(&req->entry);
391 static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
394 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
395 crypto_ahash_final(hash);
398 static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
399 struct page *page, off_t off, size_t len)
401 struct scatterlist sg;
403 sg_init_table(&sg, 1);
404 sg_set_page(&sg, page, len, off);
405 ahash_request_set_crypt(hash, &sg, NULL, len);
406 crypto_ahash_update(hash);
409 static inline void nvme_tcp_hdgst(struct ahash_request *hash,
410 void *pdu, size_t len)
412 struct scatterlist sg;
414 sg_init_one(&sg, pdu, len);
415 ahash_request_set_crypt(hash, &sg, pdu + len, len);
416 crypto_ahash_digest(hash);
419 static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
420 void *pdu, size_t pdu_len)
422 struct nvme_tcp_hdr *hdr = pdu;
426 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
427 dev_err(queue->ctrl->ctrl.device,
428 "queue %d: header digest flag is cleared\n",
429 nvme_tcp_queue_id(queue));
433 recv_digest = *(__le32 *)(pdu + hdr->hlen);
434 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
435 exp_digest = *(__le32 *)(pdu + hdr->hlen);
436 if (recv_digest != exp_digest) {
437 dev_err(queue->ctrl->ctrl.device,
438 "header digest error: recv %#x expected %#x\n",
439 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
446 static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
448 struct nvme_tcp_hdr *hdr = pdu;
449 u8 digest_len = nvme_tcp_hdgst_len(queue);
452 len = le32_to_cpu(hdr->plen) - hdr->hlen -
453 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
455 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
456 dev_err(queue->ctrl->ctrl.device,
457 "queue %d: data digest flag is cleared\n",
458 nvme_tcp_queue_id(queue));
461 crypto_ahash_init(queue->rcv_hash);
466 static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
467 struct request *rq, unsigned int hctx_idx)
469 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
471 page_frag_free(req->pdu);
474 static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
475 struct request *rq, unsigned int hctx_idx,
476 unsigned int numa_node)
478 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
479 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
480 struct nvme_tcp_cmd_pdu *pdu;
481 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
482 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
483 u8 hdgst = nvme_tcp_hdgst_len(queue);
485 req->pdu = page_frag_alloc(&queue->pf_cache,
486 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
487 GFP_KERNEL | __GFP_ZERO);
493 nvme_req(rq)->ctrl = &ctrl->ctrl;
494 nvme_req(rq)->cmd = &pdu->cmd;
499 static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
500 unsigned int hctx_idx)
502 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
503 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
505 hctx->driver_data = queue;
509 static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
510 unsigned int hctx_idx)
512 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
513 struct nvme_tcp_queue *queue = &ctrl->queues[0];
515 hctx->driver_data = queue;
519 static enum nvme_tcp_recv_state
520 nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
522 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
523 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
527 static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
529 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
530 nvme_tcp_hdgst_len(queue);
531 queue->pdu_offset = 0;
532 queue->data_remaining = -1;
533 queue->ddgst_remaining = 0;
536 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
538 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
541 dev_warn(ctrl->device, "starting error recovery\n");
542 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
545 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
546 struct nvme_completion *cqe)
548 struct nvme_tcp_request *req;
551 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
553 dev_err(queue->ctrl->ctrl.device,
554 "got bad cqe.command_id %#x on queue %d\n",
555 cqe->command_id, nvme_tcp_queue_id(queue));
556 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
560 req = blk_mq_rq_to_pdu(rq);
561 if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
562 req->status = cqe->status;
564 if (!nvme_try_complete_req(rq, req->status, cqe->result))
565 nvme_complete_rq(rq);
571 static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
572 struct nvme_tcp_data_pdu *pdu)
576 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
578 dev_err(queue->ctrl->ctrl.device,
579 "got bad c2hdata.command_id %#x on queue %d\n",
580 pdu->command_id, nvme_tcp_queue_id(queue));
584 if (!blk_rq_payload_bytes(rq)) {
585 dev_err(queue->ctrl->ctrl.device,
586 "queue %d tag %#x unexpected data\n",
587 nvme_tcp_queue_id(queue), rq->tag);
591 queue->data_remaining = le32_to_cpu(pdu->data_length);
593 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
594 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
595 dev_err(queue->ctrl->ctrl.device,
596 "queue %d tag %#x SUCCESS set but not last PDU\n",
597 nvme_tcp_queue_id(queue), rq->tag);
598 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
605 static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
606 struct nvme_tcp_rsp_pdu *pdu)
608 struct nvme_completion *cqe = &pdu->cqe;
612 * AEN requests are special as they don't time out and can
613 * survive any kind of queue freeze and often don't respond to
614 * aborts. We don't even bother to allocate a struct request
615 * for them but rather special case them here.
617 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
619 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
622 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
627 static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
629 struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req);
630 struct nvme_tcp_queue *queue = req->queue;
631 struct request *rq = blk_mq_rq_from_pdu(req);
632 u32 h2cdata_sent = req->pdu_len;
633 u8 hdgst = nvme_tcp_hdgst_len(queue);
634 u8 ddgst = nvme_tcp_ddgst_len(queue);
636 req->state = NVME_TCP_SEND_H2C_PDU;
638 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
640 req->h2cdata_left -= req->pdu_len;
641 req->h2cdata_offset += h2cdata_sent;
643 memset(data, 0, sizeof(*data));
644 data->hdr.type = nvme_tcp_h2c_data;
645 if (!req->h2cdata_left)
646 data->hdr.flags = NVME_TCP_F_DATA_LAST;
647 if (queue->hdr_digest)
648 data->hdr.flags |= NVME_TCP_F_HDGST;
649 if (queue->data_digest)
650 data->hdr.flags |= NVME_TCP_F_DDGST;
651 data->hdr.hlen = sizeof(*data);
652 data->hdr.pdo = data->hdr.hlen + hdgst;
654 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
655 data->ttag = req->ttag;
656 data->command_id = nvme_cid(rq);
657 data->data_offset = cpu_to_le32(req->h2cdata_offset);
658 data->data_length = cpu_to_le32(req->pdu_len);
661 static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
662 struct nvme_tcp_r2t_pdu *pdu)
664 struct nvme_tcp_request *req;
666 u32 r2t_length = le32_to_cpu(pdu->r2t_length);
667 u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
669 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
671 dev_err(queue->ctrl->ctrl.device,
672 "got bad r2t.command_id %#x on queue %d\n",
673 pdu->command_id, nvme_tcp_queue_id(queue));
676 req = blk_mq_rq_to_pdu(rq);
678 if (unlikely(!r2t_length)) {
679 dev_err(queue->ctrl->ctrl.device,
680 "req %d r2t len is %u, probably a bug...\n",
681 rq->tag, r2t_length);
685 if (unlikely(req->data_sent + r2t_length > req->data_len)) {
686 dev_err(queue->ctrl->ctrl.device,
687 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
688 rq->tag, r2t_length, req->data_len, req->data_sent);
692 if (unlikely(r2t_offset < req->data_sent)) {
693 dev_err(queue->ctrl->ctrl.device,
694 "req %d unexpected r2t offset %u (expected %zu)\n",
695 rq->tag, r2t_offset, req->data_sent);
700 req->h2cdata_left = r2t_length;
701 req->h2cdata_offset = r2t_offset;
702 req->ttag = pdu->ttag;
704 nvme_tcp_setup_h2c_data_pdu(req);
705 nvme_tcp_queue_request(req, false, true);
710 static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
711 unsigned int *offset, size_t *len)
713 struct nvme_tcp_hdr *hdr;
714 char *pdu = queue->pdu;
715 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
718 ret = skb_copy_bits(skb, *offset,
719 &pdu[queue->pdu_offset], rcv_len);
723 queue->pdu_remaining -= rcv_len;
724 queue->pdu_offset += rcv_len;
727 if (queue->pdu_remaining)
731 if (queue->hdr_digest) {
732 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
738 if (queue->data_digest) {
739 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
745 case nvme_tcp_c2h_data:
746 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
748 nvme_tcp_init_recv_ctx(queue);
749 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
751 nvme_tcp_init_recv_ctx(queue);
752 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
754 dev_err(queue->ctrl->ctrl.device,
755 "unsupported pdu type (%d)\n", hdr->type);
760 static inline void nvme_tcp_end_request(struct request *rq, u16 status)
762 union nvme_result res = {};
764 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
765 nvme_complete_rq(rq);
768 static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
769 unsigned int *offset, size_t *len)
771 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
773 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
774 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
779 recv_len = min_t(size_t, *len, queue->data_remaining);
783 if (!iov_iter_count(&req->iter)) {
784 req->curr_bio = req->curr_bio->bi_next;
787 * If we don`t have any bios it means that controller
788 * sent more data than we requested, hence error
790 if (!req->curr_bio) {
791 dev_err(queue->ctrl->ctrl.device,
792 "queue %d no space in request %#x",
793 nvme_tcp_queue_id(queue), rq->tag);
794 nvme_tcp_init_recv_ctx(queue);
797 nvme_tcp_init_iter(req, ITER_DEST);
800 /* we can read only from what is left in this bio */
801 recv_len = min_t(size_t, recv_len,
802 iov_iter_count(&req->iter));
804 if (queue->data_digest)
805 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
806 &req->iter, recv_len, queue->rcv_hash);
808 ret = skb_copy_datagram_iter(skb, *offset,
809 &req->iter, recv_len);
811 dev_err(queue->ctrl->ctrl.device,
812 "queue %d failed to copy request %#x data",
813 nvme_tcp_queue_id(queue), rq->tag);
819 queue->data_remaining -= recv_len;
822 if (!queue->data_remaining) {
823 if (queue->data_digest) {
824 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
825 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
827 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
828 nvme_tcp_end_request(rq,
829 le16_to_cpu(req->status));
832 nvme_tcp_init_recv_ctx(queue);
839 static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
840 struct sk_buff *skb, unsigned int *offset, size_t *len)
842 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
843 char *ddgst = (char *)&queue->recv_ddgst;
844 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
845 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
848 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
852 queue->ddgst_remaining -= recv_len;
855 if (queue->ddgst_remaining)
858 if (queue->recv_ddgst != queue->exp_ddgst) {
859 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
861 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
863 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
865 dev_err(queue->ctrl->ctrl.device,
866 "data digest error: recv %#x expected %#x\n",
867 le32_to_cpu(queue->recv_ddgst),
868 le32_to_cpu(queue->exp_ddgst));
871 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
872 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
874 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
876 nvme_tcp_end_request(rq, le16_to_cpu(req->status));
880 nvme_tcp_init_recv_ctx(queue);
884 static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
885 unsigned int offset, size_t len)
887 struct nvme_tcp_queue *queue = desc->arg.data;
888 size_t consumed = len;
891 if (unlikely(!queue->rd_enabled))
895 switch (nvme_tcp_recv_state(queue)) {
896 case NVME_TCP_RECV_PDU:
897 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
899 case NVME_TCP_RECV_DATA:
900 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
902 case NVME_TCP_RECV_DDGST:
903 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
909 dev_err(queue->ctrl->ctrl.device,
910 "receive failed: %d\n", result);
911 queue->rd_enabled = false;
912 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
920 static void nvme_tcp_data_ready(struct sock *sk)
922 struct nvme_tcp_queue *queue;
924 trace_sk_data_ready(sk);
926 read_lock_bh(&sk->sk_callback_lock);
927 queue = sk->sk_user_data;
928 if (likely(queue && queue->rd_enabled) &&
929 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
930 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
931 read_unlock_bh(&sk->sk_callback_lock);
934 static void nvme_tcp_write_space(struct sock *sk)
936 struct nvme_tcp_queue *queue;
938 read_lock_bh(&sk->sk_callback_lock);
939 queue = sk->sk_user_data;
940 if (likely(queue && sk_stream_is_writeable(sk))) {
941 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
942 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
944 read_unlock_bh(&sk->sk_callback_lock);
947 static void nvme_tcp_state_change(struct sock *sk)
949 struct nvme_tcp_queue *queue;
951 read_lock_bh(&sk->sk_callback_lock);
952 queue = sk->sk_user_data;
956 switch (sk->sk_state) {
962 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
965 dev_info(queue->ctrl->ctrl.device,
966 "queue %d socket state %d\n",
967 nvme_tcp_queue_id(queue), sk->sk_state);
970 queue->state_change(sk);
972 read_unlock_bh(&sk->sk_callback_lock);
975 static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
977 queue->request = NULL;
980 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
982 if (nvme_tcp_async_req(req)) {
983 union nvme_result res = {};
985 nvme_complete_async_event(&req->queue->ctrl->ctrl,
986 cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
988 nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
989 NVME_SC_HOST_PATH_ERROR);
993 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
995 struct nvme_tcp_queue *queue = req->queue;
996 int req_data_len = req->data_len;
997 u32 h2cdata_left = req->h2cdata_left;
1000 struct page *page = nvme_tcp_req_cur_page(req);
1001 size_t offset = nvme_tcp_req_cur_offset(req);
1002 size_t len = nvme_tcp_req_cur_length(req);
1003 bool last = nvme_tcp_pdu_last_send(req, len);
1004 int req_data_sent = req->data_sent;
1005 int ret, flags = MSG_DONTWAIT;
1007 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
1010 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
1012 if (sendpage_ok(page)) {
1013 ret = kernel_sendpage(queue->sock, page, offset, len,
1016 ret = sock_no_sendpage(queue->sock, page, offset, len,
1022 if (queue->data_digest)
1023 nvme_tcp_ddgst_update(queue->snd_hash, page,
1027 * update the request iterator except for the last payload send
1028 * in the request where we don't want to modify it as we may
1029 * compete with the RX path completing the request.
1031 if (req_data_sent + ret < req_data_len)
1032 nvme_tcp_advance_req(req, ret);
1034 /* fully successful last send in current PDU */
1035 if (last && ret == len) {
1036 if (queue->data_digest) {
1037 nvme_tcp_ddgst_final(queue->snd_hash,
1039 req->state = NVME_TCP_SEND_DDGST;
1043 nvme_tcp_setup_h2c_data_pdu(req);
1045 nvme_tcp_done_send_req(queue);
1053 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
1055 struct nvme_tcp_queue *queue = req->queue;
1056 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
1057 bool inline_data = nvme_tcp_has_inline_data(req);
1058 u8 hdgst = nvme_tcp_hdgst_len(queue);
1059 int len = sizeof(*pdu) + hdgst - req->offset;
1060 int flags = MSG_DONTWAIT;
1063 if (inline_data || nvme_tcp_queue_more(queue))
1064 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
1068 if (queue->hdr_digest && !req->offset)
1069 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1071 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1072 offset_in_page(pdu) + req->offset, len, flags);
1073 if (unlikely(ret <= 0))
1079 req->state = NVME_TCP_SEND_DATA;
1080 if (queue->data_digest)
1081 crypto_ahash_init(queue->snd_hash);
1083 nvme_tcp_done_send_req(queue);
1092 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1094 struct nvme_tcp_queue *queue = req->queue;
1095 struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
1096 u8 hdgst = nvme_tcp_hdgst_len(queue);
1097 int len = sizeof(*pdu) - req->offset + hdgst;
1100 if (queue->hdr_digest && !req->offset)
1101 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1103 if (!req->h2cdata_left)
1104 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1105 offset_in_page(pdu) + req->offset, len,
1106 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
1108 ret = sock_no_sendpage(queue->sock, virt_to_page(pdu),
1109 offset_in_page(pdu) + req->offset, len,
1110 MSG_DONTWAIT | MSG_MORE);
1111 if (unlikely(ret <= 0))
1116 req->state = NVME_TCP_SEND_DATA;
1117 if (queue->data_digest)
1118 crypto_ahash_init(queue->snd_hash);
1126 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1128 struct nvme_tcp_queue *queue = req->queue;
1129 size_t offset = req->offset;
1130 u32 h2cdata_left = req->h2cdata_left;
1132 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
1134 .iov_base = (u8 *)&req->ddgst + req->offset,
1135 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1138 if (nvme_tcp_queue_more(queue))
1139 msg.msg_flags |= MSG_MORE;
1141 msg.msg_flags |= MSG_EOR;
1143 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1144 if (unlikely(ret <= 0))
1147 if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
1149 nvme_tcp_setup_h2c_data_pdu(req);
1151 nvme_tcp_done_send_req(queue);
1159 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1161 struct nvme_tcp_request *req;
1162 unsigned int noreclaim_flag;
1165 if (!queue->request) {
1166 queue->request = nvme_tcp_fetch_request(queue);
1167 if (!queue->request)
1170 req = queue->request;
1172 noreclaim_flag = memalloc_noreclaim_save();
1173 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1174 ret = nvme_tcp_try_send_cmd_pdu(req);
1177 if (!nvme_tcp_has_inline_data(req))
1181 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1182 ret = nvme_tcp_try_send_data_pdu(req);
1187 if (req->state == NVME_TCP_SEND_DATA) {
1188 ret = nvme_tcp_try_send_data(req);
1193 if (req->state == NVME_TCP_SEND_DDGST)
1194 ret = nvme_tcp_try_send_ddgst(req);
1196 if (ret == -EAGAIN) {
1198 } else if (ret < 0) {
1199 dev_err(queue->ctrl->ctrl.device,
1200 "failed to send request %d\n", ret);
1201 nvme_tcp_fail_request(queue->request);
1202 nvme_tcp_done_send_req(queue);
1205 memalloc_noreclaim_restore(noreclaim_flag);
1209 static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1211 struct socket *sock = queue->sock;
1212 struct sock *sk = sock->sk;
1213 read_descriptor_t rd_desc;
1216 rd_desc.arg.data = queue;
1220 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
1225 static void nvme_tcp_io_work(struct work_struct *w)
1227 struct nvme_tcp_queue *queue =
1228 container_of(w, struct nvme_tcp_queue, io_work);
1229 unsigned long deadline = jiffies + msecs_to_jiffies(1);
1232 bool pending = false;
1235 if (mutex_trylock(&queue->send_mutex)) {
1236 result = nvme_tcp_try_send(queue);
1237 mutex_unlock(&queue->send_mutex);
1240 else if (unlikely(result < 0))
1244 result = nvme_tcp_try_recv(queue);
1247 else if (unlikely(result < 0))
1250 if (!pending || !queue->rd_enabled)
1253 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
1255 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1258 static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1260 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1262 ahash_request_free(queue->rcv_hash);
1263 ahash_request_free(queue->snd_hash);
1264 crypto_free_ahash(tfm);
1267 static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1269 struct crypto_ahash *tfm;
1271 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1273 return PTR_ERR(tfm);
1275 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1276 if (!queue->snd_hash)
1278 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1280 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1281 if (!queue->rcv_hash)
1283 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1287 ahash_request_free(queue->snd_hash);
1289 crypto_free_ahash(tfm);
1293 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1295 struct nvme_tcp_request *async = &ctrl->async_req;
1297 page_frag_free(async->pdu);
1300 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1302 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1303 struct nvme_tcp_request *async = &ctrl->async_req;
1304 u8 hdgst = nvme_tcp_hdgst_len(queue);
1306 async->pdu = page_frag_alloc(&queue->pf_cache,
1307 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1308 GFP_KERNEL | __GFP_ZERO);
1312 async->queue = &ctrl->queues[0];
1316 static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1319 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1320 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1321 unsigned int noreclaim_flag;
1323 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1326 if (queue->hdr_digest || queue->data_digest)
1327 nvme_tcp_free_crypto(queue);
1329 if (queue->pf_cache.va) {
1330 page = virt_to_head_page(queue->pf_cache.va);
1331 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1332 queue->pf_cache.va = NULL;
1335 noreclaim_flag = memalloc_noreclaim_save();
1336 sock_release(queue->sock);
1337 memalloc_noreclaim_restore(noreclaim_flag);
1340 mutex_destroy(&queue->send_mutex);
1341 mutex_destroy(&queue->queue_lock);
1344 static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1346 struct nvme_tcp_icreq_pdu *icreq;
1347 struct nvme_tcp_icresp_pdu *icresp;
1348 struct msghdr msg = {};
1350 bool ctrl_hdgst, ctrl_ddgst;
1354 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1358 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1364 icreq->hdr.type = nvme_tcp_icreq;
1365 icreq->hdr.hlen = sizeof(*icreq);
1367 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1368 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1369 icreq->maxr2t = 0; /* single inflight r2t supported */
1370 icreq->hpda = 0; /* no alignment constraint */
1371 if (queue->hdr_digest)
1372 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1373 if (queue->data_digest)
1374 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1376 iov.iov_base = icreq;
1377 iov.iov_len = sizeof(*icreq);
1378 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1382 memset(&msg, 0, sizeof(msg));
1383 iov.iov_base = icresp;
1384 iov.iov_len = sizeof(*icresp);
1385 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1386 iov.iov_len, msg.msg_flags);
1391 if (icresp->hdr.type != nvme_tcp_icresp) {
1392 pr_err("queue %d: bad type returned %d\n",
1393 nvme_tcp_queue_id(queue), icresp->hdr.type);
1397 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1398 pr_err("queue %d: bad pdu length returned %d\n",
1399 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1403 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1404 pr_err("queue %d: bad pfv returned %d\n",
1405 nvme_tcp_queue_id(queue), icresp->pfv);
1409 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1410 if ((queue->data_digest && !ctrl_ddgst) ||
1411 (!queue->data_digest && ctrl_ddgst)) {
1412 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1413 nvme_tcp_queue_id(queue),
1414 queue->data_digest ? "enabled" : "disabled",
1415 ctrl_ddgst ? "enabled" : "disabled");
1419 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1420 if ((queue->hdr_digest && !ctrl_hdgst) ||
1421 (!queue->hdr_digest && ctrl_hdgst)) {
1422 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1423 nvme_tcp_queue_id(queue),
1424 queue->hdr_digest ? "enabled" : "disabled",
1425 ctrl_hdgst ? "enabled" : "disabled");
1429 if (icresp->cpda != 0) {
1430 pr_err("queue %d: unsupported cpda returned %d\n",
1431 nvme_tcp_queue_id(queue), icresp->cpda);
1435 maxh2cdata = le32_to_cpu(icresp->maxdata);
1436 if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
1437 pr_err("queue %d: invalid maxh2cdata returned %u\n",
1438 nvme_tcp_queue_id(queue), maxh2cdata);
1441 queue->maxh2cdata = maxh2cdata;
1451 static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1453 return nvme_tcp_queue_id(queue) == 0;
1456 static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1458 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1459 int qid = nvme_tcp_queue_id(queue);
1461 return !nvme_tcp_admin_queue(queue) &&
1462 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1465 static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1467 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1468 int qid = nvme_tcp_queue_id(queue);
1470 return !nvme_tcp_admin_queue(queue) &&
1471 !nvme_tcp_default_queue(queue) &&
1472 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1473 ctrl->io_queues[HCTX_TYPE_READ];
1476 static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1478 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1479 int qid = nvme_tcp_queue_id(queue);
1481 return !nvme_tcp_admin_queue(queue) &&
1482 !nvme_tcp_default_queue(queue) &&
1483 !nvme_tcp_read_queue(queue) &&
1484 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1485 ctrl->io_queues[HCTX_TYPE_READ] +
1486 ctrl->io_queues[HCTX_TYPE_POLL];
1489 static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1491 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1492 int qid = nvme_tcp_queue_id(queue);
1495 if (nvme_tcp_default_queue(queue))
1497 else if (nvme_tcp_read_queue(queue))
1498 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1499 else if (nvme_tcp_poll_queue(queue))
1500 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1501 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1502 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1505 static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
1507 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1508 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1509 int ret, rcv_pdu_size;
1511 mutex_init(&queue->queue_lock);
1513 init_llist_head(&queue->req_list);
1514 INIT_LIST_HEAD(&queue->send_list);
1515 mutex_init(&queue->send_mutex);
1516 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
1519 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
1521 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1522 NVME_TCP_ADMIN_CCSZ;
1524 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1525 IPPROTO_TCP, &queue->sock);
1527 dev_err(nctrl->device,
1528 "failed to create socket: %d\n", ret);
1529 goto err_destroy_mutex;
1532 nvme_tcp_reclassify_socket(queue->sock);
1534 /* Single syn retry */
1535 tcp_sock_set_syncnt(queue->sock->sk, 1);
1537 /* Set TCP no delay */
1538 tcp_sock_set_nodelay(queue->sock->sk);
1541 * Cleanup whatever is sitting in the TCP transmit queue on socket
1542 * close. This is done to prevent stale data from being sent should
1543 * the network connection be restored before TCP times out.
1545 sock_no_linger(queue->sock->sk);
1547 if (so_priority > 0)
1548 sock_set_priority(queue->sock->sk, so_priority);
1550 /* Set socket type of service */
1551 if (nctrl->opts->tos >= 0)
1552 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
1554 /* Set 10 seconds timeout for icresp recvmsg */
1555 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1557 queue->sock->sk->sk_allocation = GFP_ATOMIC;
1558 queue->sock->sk->sk_use_task_frag = false;
1559 nvme_tcp_set_queue_io_cpu(queue);
1560 queue->request = NULL;
1561 queue->data_remaining = 0;
1562 queue->ddgst_remaining = 0;
1563 queue->pdu_remaining = 0;
1564 queue->pdu_offset = 0;
1565 sk_set_memalloc(queue->sock->sk);
1567 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
1568 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1569 sizeof(ctrl->src_addr));
1571 dev_err(nctrl->device,
1572 "failed to bind queue %d socket %d\n",
1578 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
1579 char *iface = nctrl->opts->host_iface;
1580 sockptr_t optval = KERNEL_SOCKPTR(iface);
1582 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
1583 optval, strlen(iface));
1585 dev_err(nctrl->device,
1586 "failed to bind to interface %s queue %d err %d\n",
1592 queue->hdr_digest = nctrl->opts->hdr_digest;
1593 queue->data_digest = nctrl->opts->data_digest;
1594 if (queue->hdr_digest || queue->data_digest) {
1595 ret = nvme_tcp_alloc_crypto(queue);
1597 dev_err(nctrl->device,
1598 "failed to allocate queue %d crypto\n", qid);
1603 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1604 nvme_tcp_hdgst_len(queue);
1605 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1611 dev_dbg(nctrl->device, "connecting queue %d\n",
1612 nvme_tcp_queue_id(queue));
1614 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1615 sizeof(ctrl->addr), 0);
1617 dev_err(nctrl->device,
1618 "failed to connect socket: %d\n", ret);
1622 ret = nvme_tcp_init_connection(queue);
1624 goto err_init_connect;
1626 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1631 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1635 if (queue->hdr_digest || queue->data_digest)
1636 nvme_tcp_free_crypto(queue);
1638 sock_release(queue->sock);
1641 mutex_destroy(&queue->send_mutex);
1642 mutex_destroy(&queue->queue_lock);
1646 static void nvme_tcp_restore_sock_ops(struct nvme_tcp_queue *queue)
1648 struct socket *sock = queue->sock;
1650 write_lock_bh(&sock->sk->sk_callback_lock);
1651 sock->sk->sk_user_data = NULL;
1652 sock->sk->sk_data_ready = queue->data_ready;
1653 sock->sk->sk_state_change = queue->state_change;
1654 sock->sk->sk_write_space = queue->write_space;
1655 write_unlock_bh(&sock->sk->sk_callback_lock);
1658 static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1660 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1661 nvme_tcp_restore_sock_ops(queue);
1662 cancel_work_sync(&queue->io_work);
1665 static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1667 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1668 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1670 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1673 mutex_lock(&queue->queue_lock);
1674 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1675 __nvme_tcp_stop_queue(queue);
1676 mutex_unlock(&queue->queue_lock);
1679 static void nvme_tcp_setup_sock_ops(struct nvme_tcp_queue *queue)
1681 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1682 queue->sock->sk->sk_user_data = queue;
1683 queue->state_change = queue->sock->sk->sk_state_change;
1684 queue->data_ready = queue->sock->sk->sk_data_ready;
1685 queue->write_space = queue->sock->sk->sk_write_space;
1686 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1687 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1688 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
1689 #ifdef CONFIG_NET_RX_BUSY_POLL
1690 queue->sock->sk->sk_ll_usec = 1;
1692 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1695 static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1697 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1698 struct nvme_tcp_queue *queue = &ctrl->queues[idx];
1701 queue->rd_enabled = true;
1702 nvme_tcp_init_recv_ctx(queue);
1703 nvme_tcp_setup_sock_ops(queue);
1706 ret = nvmf_connect_io_queue(nctrl, idx);
1708 ret = nvmf_connect_admin_queue(nctrl);
1711 set_bit(NVME_TCP_Q_LIVE, &queue->flags);
1713 if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1714 __nvme_tcp_stop_queue(queue);
1715 dev_err(nctrl->device,
1716 "failed to connect queue: %d ret=%d\n", idx, ret);
1721 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1723 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
1724 cancel_work_sync(&ctrl->async_event_work);
1725 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1726 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1729 nvme_tcp_free_queue(ctrl, 0);
1732 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1736 for (i = 1; i < ctrl->queue_count; i++)
1737 nvme_tcp_free_queue(ctrl, i);
1740 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1744 for (i = 1; i < ctrl->queue_count; i++)
1745 nvme_tcp_stop_queue(ctrl, i);
1748 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
1749 int first, int last)
1753 for (i = first; i < last; i++) {
1754 ret = nvme_tcp_start_queue(ctrl, i);
1756 goto out_stop_queues;
1762 for (i--; i >= first; i--)
1763 nvme_tcp_stop_queue(ctrl, i);
1767 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1771 ret = nvme_tcp_alloc_queue(ctrl, 0);
1775 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1777 goto out_free_queue;
1782 nvme_tcp_free_queue(ctrl, 0);
1786 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1790 for (i = 1; i < ctrl->queue_count; i++) {
1791 ret = nvme_tcp_alloc_queue(ctrl, i);
1793 goto out_free_queues;
1799 for (i--; i >= 1; i--)
1800 nvme_tcp_free_queue(ctrl, i);
1805 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1807 unsigned int nr_io_queues;
1809 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1810 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1811 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
1813 return nr_io_queues;
1816 static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1817 unsigned int nr_io_queues)
1819 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1820 struct nvmf_ctrl_options *opts = nctrl->opts;
1822 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1824 * separate read/write queues
1825 * hand out dedicated default queues only after we have
1826 * sufficient read queues.
1828 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1829 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1830 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1831 min(opts->nr_write_queues, nr_io_queues);
1832 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1835 * shared read/write queues
1836 * either no write queues were requested, or we don't have
1837 * sufficient queue count to have dedicated default queues.
1839 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1840 min(opts->nr_io_queues, nr_io_queues);
1841 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1844 if (opts->nr_poll_queues && nr_io_queues) {
1845 /* map dedicated poll queues only if we have queues left */
1846 ctrl->io_queues[HCTX_TYPE_POLL] =
1847 min(opts->nr_poll_queues, nr_io_queues);
1851 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
1853 unsigned int nr_io_queues;
1856 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1857 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1861 if (nr_io_queues == 0) {
1862 dev_err(ctrl->device,
1863 "unable to set any I/O queues\n");
1867 ctrl->queue_count = nr_io_queues + 1;
1868 dev_info(ctrl->device,
1869 "creating %d I/O queues.\n", nr_io_queues);
1871 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1873 return __nvme_tcp_alloc_io_queues(ctrl);
1876 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1878 nvme_tcp_stop_io_queues(ctrl);
1880 nvme_remove_io_tag_set(ctrl);
1881 nvme_tcp_free_io_queues(ctrl);
1884 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1888 ret = nvme_tcp_alloc_io_queues(ctrl);
1893 ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
1895 ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2,
1896 sizeof(struct nvme_tcp_request));
1898 goto out_free_io_queues;
1902 * Only start IO queues for which we have allocated the tagset
1903 * and limitted it to the available queues. On reconnects, the
1904 * queue number might have changed.
1906 nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
1907 ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues);
1909 goto out_cleanup_connect_q;
1912 nvme_unquiesce_io_queues(ctrl);
1913 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1915 * If we timed out waiting for freeze we are likely to
1916 * be stuck. Fail the controller initialization just
1920 goto out_wait_freeze_timed_out;
1922 blk_mq_update_nr_hw_queues(ctrl->tagset,
1923 ctrl->queue_count - 1);
1924 nvme_unfreeze(ctrl);
1928 * If the number of queues has increased (reconnect case)
1929 * start all new queues now.
1931 ret = nvme_tcp_start_io_queues(ctrl, nr_queues,
1932 ctrl->tagset->nr_hw_queues + 1);
1934 goto out_wait_freeze_timed_out;
1938 out_wait_freeze_timed_out:
1939 nvme_quiesce_io_queues(ctrl);
1940 nvme_sync_io_queues(ctrl);
1941 nvme_tcp_stop_io_queues(ctrl);
1942 out_cleanup_connect_q:
1943 nvme_cancel_tagset(ctrl);
1945 nvme_remove_io_tag_set(ctrl);
1947 nvme_tcp_free_io_queues(ctrl);
1951 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1953 nvme_tcp_stop_queue(ctrl, 0);
1955 nvme_remove_admin_tag_set(ctrl);
1956 nvme_tcp_free_admin_queue(ctrl);
1959 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1963 error = nvme_tcp_alloc_admin_queue(ctrl);
1968 error = nvme_alloc_admin_tag_set(ctrl,
1969 &to_tcp_ctrl(ctrl)->admin_tag_set,
1970 &nvme_tcp_admin_mq_ops,
1971 sizeof(struct nvme_tcp_request));
1973 goto out_free_queue;
1976 error = nvme_tcp_start_queue(ctrl, 0);
1978 goto out_cleanup_tagset;
1980 error = nvme_enable_ctrl(ctrl);
1982 goto out_stop_queue;
1984 nvme_unquiesce_admin_queue(ctrl);
1986 error = nvme_init_ctrl_finish(ctrl, false);
1988 goto out_quiesce_queue;
1993 nvme_quiesce_admin_queue(ctrl);
1994 blk_sync_queue(ctrl->admin_q);
1996 nvme_tcp_stop_queue(ctrl, 0);
1997 nvme_cancel_admin_tagset(ctrl);
2000 nvme_remove_admin_tag_set(ctrl);
2002 nvme_tcp_free_admin_queue(ctrl);
2006 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
2009 nvme_quiesce_admin_queue(ctrl);
2010 blk_sync_queue(ctrl->admin_q);
2011 nvme_tcp_stop_queue(ctrl, 0);
2012 nvme_cancel_admin_tagset(ctrl);
2014 nvme_unquiesce_admin_queue(ctrl);
2015 nvme_tcp_destroy_admin_queue(ctrl, remove);
2018 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
2021 if (ctrl->queue_count <= 1)
2023 nvme_quiesce_admin_queue(ctrl);
2024 nvme_start_freeze(ctrl);
2025 nvme_quiesce_io_queues(ctrl);
2026 nvme_sync_io_queues(ctrl);
2027 nvme_tcp_stop_io_queues(ctrl);
2028 nvme_cancel_tagset(ctrl);
2030 nvme_unquiesce_io_queues(ctrl);
2031 nvme_tcp_destroy_io_queues(ctrl, remove);
2034 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
2036 /* If we are resetting/deleting then do nothing */
2037 if (ctrl->state != NVME_CTRL_CONNECTING) {
2038 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
2039 ctrl->state == NVME_CTRL_LIVE);
2043 if (nvmf_should_reconnect(ctrl)) {
2044 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
2045 ctrl->opts->reconnect_delay);
2046 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
2047 ctrl->opts->reconnect_delay * HZ);
2049 dev_info(ctrl->device, "Removing controller...\n");
2050 nvme_delete_ctrl(ctrl);
2054 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
2056 struct nvmf_ctrl_options *opts = ctrl->opts;
2059 ret = nvme_tcp_configure_admin_queue(ctrl, new);
2065 dev_err(ctrl->device, "icdoff is not supported!\n");
2069 if (!nvme_ctrl_sgl_supported(ctrl)) {
2071 dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
2075 if (opts->queue_size > ctrl->sqsize + 1)
2076 dev_warn(ctrl->device,
2077 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2078 opts->queue_size, ctrl->sqsize + 1);
2080 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2081 dev_warn(ctrl->device,
2082 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2083 ctrl->sqsize + 1, ctrl->maxcmd);
2084 ctrl->sqsize = ctrl->maxcmd - 1;
2087 if (ctrl->queue_count > 1) {
2088 ret = nvme_tcp_configure_io_queues(ctrl, new);
2093 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
2095 * state change failure is ok if we started ctrl delete,
2096 * unless we're during creation of a new controller to
2097 * avoid races with teardown flow.
2099 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2100 ctrl->state != NVME_CTRL_DELETING_NOIO);
2106 nvme_start_ctrl(ctrl);
2110 if (ctrl->queue_count > 1) {
2111 nvme_quiesce_io_queues(ctrl);
2112 nvme_sync_io_queues(ctrl);
2113 nvme_tcp_stop_io_queues(ctrl);
2114 nvme_cancel_tagset(ctrl);
2115 nvme_tcp_destroy_io_queues(ctrl, new);
2118 nvme_quiesce_admin_queue(ctrl);
2119 blk_sync_queue(ctrl->admin_q);
2120 nvme_tcp_stop_queue(ctrl, 0);
2121 nvme_cancel_admin_tagset(ctrl);
2122 nvme_tcp_destroy_admin_queue(ctrl, new);
2126 static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2128 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2129 struct nvme_tcp_ctrl, connect_work);
2130 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2132 ++ctrl->nr_reconnects;
2134 if (nvme_tcp_setup_ctrl(ctrl, false))
2137 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
2138 ctrl->nr_reconnects);
2140 ctrl->nr_reconnects = 0;
2145 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2146 ctrl->nr_reconnects);
2147 nvme_tcp_reconnect_or_remove(ctrl);
2150 static void nvme_tcp_error_recovery_work(struct work_struct *work)
2152 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2153 struct nvme_tcp_ctrl, err_work);
2154 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2156 nvme_stop_keep_alive(ctrl);
2157 flush_work(&ctrl->async_event_work);
2158 nvme_tcp_teardown_io_queues(ctrl, false);
2159 /* unquiesce to fail fast pending requests */
2160 nvme_unquiesce_io_queues(ctrl);
2161 nvme_tcp_teardown_admin_queue(ctrl, false);
2162 nvme_unquiesce_admin_queue(ctrl);
2163 nvme_auth_stop(ctrl);
2165 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2166 /* state change failure is ok if we started ctrl delete */
2167 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2168 ctrl->state != NVME_CTRL_DELETING_NOIO);
2172 nvme_tcp_reconnect_or_remove(ctrl);
2175 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2177 nvme_tcp_teardown_io_queues(ctrl, shutdown);
2178 nvme_quiesce_admin_queue(ctrl);
2179 nvme_disable_ctrl(ctrl, shutdown);
2180 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2183 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2185 nvme_tcp_teardown_ctrl(ctrl, true);
2188 static void nvme_reset_ctrl_work(struct work_struct *work)
2190 struct nvme_ctrl *ctrl =
2191 container_of(work, struct nvme_ctrl, reset_work);
2193 nvme_stop_ctrl(ctrl);
2194 nvme_tcp_teardown_ctrl(ctrl, false);
2196 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
2197 /* state change failure is ok if we started ctrl delete */
2198 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2199 ctrl->state != NVME_CTRL_DELETING_NOIO);
2203 if (nvme_tcp_setup_ctrl(ctrl, false))
2209 ++ctrl->nr_reconnects;
2210 nvme_tcp_reconnect_or_remove(ctrl);
2213 static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
2215 flush_work(&to_tcp_ctrl(ctrl)->err_work);
2216 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2219 static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2221 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2223 if (list_empty(&ctrl->list))
2226 mutex_lock(&nvme_tcp_ctrl_mutex);
2227 list_del(&ctrl->list);
2228 mutex_unlock(&nvme_tcp_ctrl_mutex);
2230 nvmf_free_options(nctrl->opts);
2232 kfree(ctrl->queues);
2236 static void nvme_tcp_set_sg_null(struct nvme_command *c)
2238 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2242 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2243 NVME_SGL_FMT_TRANSPORT_A;
2246 static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2247 struct nvme_command *c, u32 data_len)
2249 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2251 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2252 sg->length = cpu_to_le32(data_len);
2253 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2256 static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2259 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2262 sg->length = cpu_to_le32(data_len);
2263 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2264 NVME_SGL_FMT_TRANSPORT_A;
2267 static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2269 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2270 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2271 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2272 struct nvme_command *cmd = &pdu->cmd;
2273 u8 hdgst = nvme_tcp_hdgst_len(queue);
2275 memset(pdu, 0, sizeof(*pdu));
2276 pdu->hdr.type = nvme_tcp_cmd;
2277 if (queue->hdr_digest)
2278 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2279 pdu->hdr.hlen = sizeof(*pdu);
2280 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2282 cmd->common.opcode = nvme_admin_async_event;
2283 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2284 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2285 nvme_tcp_set_sg_null(cmd);
2287 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2288 ctrl->async_req.offset = 0;
2289 ctrl->async_req.curr_bio = NULL;
2290 ctrl->async_req.data_len = 0;
2292 nvme_tcp_queue_request(&ctrl->async_req, true, true);
2295 static void nvme_tcp_complete_timed_out(struct request *rq)
2297 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2298 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2300 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2301 nvmf_complete_timed_out_request(rq);
2304 static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
2306 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2307 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2308 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2309 u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
2310 int qid = nvme_tcp_queue_id(req->queue);
2312 dev_warn(ctrl->device,
2313 "queue %d: timeout cid %#x type %d opcode %#x (%s)\n",
2314 nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
2315 opc, nvme_opcode_str(qid, opc, fctype));
2317 if (ctrl->state != NVME_CTRL_LIVE) {
2319 * If we are resetting, connecting or deleting we should
2320 * complete immediately because we may block controller
2321 * teardown or setup sequence
2322 * - ctrl disable/shutdown fabrics requests
2323 * - connect requests
2324 * - initialization admin requests
2325 * - I/O requests that entered after unquiescing and
2326 * the controller stopped responding
2328 * All other requests should be cancelled by the error
2329 * recovery work, so it's fine that we fail it here.
2331 nvme_tcp_complete_timed_out(rq);
2336 * LIVE state should trigger the normal error recovery which will
2337 * handle completing this request.
2339 nvme_tcp_error_recovery(ctrl);
2340 return BLK_EH_RESET_TIMER;
2343 static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2346 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2347 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2348 struct nvme_command *c = &pdu->cmd;
2350 c->common.flags |= NVME_CMD_SGL_METABUF;
2352 if (!blk_rq_nr_phys_segments(rq))
2353 nvme_tcp_set_sg_null(c);
2354 else if (rq_data_dir(rq) == WRITE &&
2355 req->data_len <= nvme_tcp_inline_data_size(req))
2356 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2358 nvme_tcp_set_sg_host_data(c, req->data_len);
2363 static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2366 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2367 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2368 struct nvme_tcp_queue *queue = req->queue;
2369 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2372 ret = nvme_setup_cmd(ns, rq);
2376 req->state = NVME_TCP_SEND_CMD_PDU;
2377 req->status = cpu_to_le16(NVME_SC_SUCCESS);
2382 req->h2cdata_left = 0;
2383 req->data_len = blk_rq_nr_phys_segments(rq) ?
2384 blk_rq_payload_bytes(rq) : 0;
2385 req->curr_bio = rq->bio;
2386 if (req->curr_bio && req->data_len)
2387 nvme_tcp_init_iter(req, rq_data_dir(rq));
2389 if (rq_data_dir(rq) == WRITE &&
2390 req->data_len <= nvme_tcp_inline_data_size(req))
2391 req->pdu_len = req->data_len;
2393 pdu->hdr.type = nvme_tcp_cmd;
2395 if (queue->hdr_digest)
2396 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2397 if (queue->data_digest && req->pdu_len) {
2398 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2399 ddgst = nvme_tcp_ddgst_len(queue);
2401 pdu->hdr.hlen = sizeof(*pdu);
2402 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2404 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2406 ret = nvme_tcp_map_data(queue, rq);
2407 if (unlikely(ret)) {
2408 nvme_cleanup_cmd(rq);
2409 dev_err(queue->ctrl->ctrl.device,
2410 "Failed to map data (%d)\n", ret);
2417 static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2419 struct nvme_tcp_queue *queue = hctx->driver_data;
2421 if (!llist_empty(&queue->req_list))
2422 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2425 static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2426 const struct blk_mq_queue_data *bd)
2428 struct nvme_ns *ns = hctx->queue->queuedata;
2429 struct nvme_tcp_queue *queue = hctx->driver_data;
2430 struct request *rq = bd->rq;
2431 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2432 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2435 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2436 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
2438 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2442 nvme_start_request(rq);
2444 nvme_tcp_queue_request(req, true, bd->last);
2449 static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
2451 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
2452 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2454 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
2455 /* separate read/write queues */
2456 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2457 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2458 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2459 set->map[HCTX_TYPE_READ].nr_queues =
2460 ctrl->io_queues[HCTX_TYPE_READ];
2461 set->map[HCTX_TYPE_READ].queue_offset =
2462 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2464 /* shared read/write queues */
2465 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2466 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2467 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2468 set->map[HCTX_TYPE_READ].nr_queues =
2469 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2470 set->map[HCTX_TYPE_READ].queue_offset = 0;
2472 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2473 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
2475 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2476 /* map dedicated poll queues only if we have queues left */
2477 set->map[HCTX_TYPE_POLL].nr_queues =
2478 ctrl->io_queues[HCTX_TYPE_POLL];
2479 set->map[HCTX_TYPE_POLL].queue_offset =
2480 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2481 ctrl->io_queues[HCTX_TYPE_READ];
2482 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2485 dev_info(ctrl->ctrl.device,
2486 "mapped %d/%d/%d default/read/poll queues.\n",
2487 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2488 ctrl->io_queues[HCTX_TYPE_READ],
2489 ctrl->io_queues[HCTX_TYPE_POLL]);
2492 static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
2494 struct nvme_tcp_queue *queue = hctx->driver_data;
2495 struct sock *sk = queue->sock->sk;
2497 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2500 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
2501 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
2502 sk_busy_loop(sk, true);
2503 nvme_tcp_try_recv(queue);
2504 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
2505 return queue->nr_cqe;
2508 static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
2510 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0];
2511 struct sockaddr_storage src_addr;
2514 len = nvmf_get_address(ctrl, buf, size);
2516 mutex_lock(&queue->queue_lock);
2518 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2520 ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
2523 len--; /* strip trailing newline */
2524 len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
2525 (len) ? "," : "", &src_addr);
2528 mutex_unlock(&queue->queue_lock);
2533 static const struct blk_mq_ops nvme_tcp_mq_ops = {
2534 .queue_rq = nvme_tcp_queue_rq,
2535 .commit_rqs = nvme_tcp_commit_rqs,
2536 .complete = nvme_complete_rq,
2537 .init_request = nvme_tcp_init_request,
2538 .exit_request = nvme_tcp_exit_request,
2539 .init_hctx = nvme_tcp_init_hctx,
2540 .timeout = nvme_tcp_timeout,
2541 .map_queues = nvme_tcp_map_queues,
2542 .poll = nvme_tcp_poll,
2545 static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
2546 .queue_rq = nvme_tcp_queue_rq,
2547 .complete = nvme_complete_rq,
2548 .init_request = nvme_tcp_init_request,
2549 .exit_request = nvme_tcp_exit_request,
2550 .init_hctx = nvme_tcp_init_admin_hctx,
2551 .timeout = nvme_tcp_timeout,
2554 static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2556 .module = THIS_MODULE,
2557 .flags = NVME_F_FABRICS | NVME_F_BLOCKING,
2558 .reg_read32 = nvmf_reg_read32,
2559 .reg_read64 = nvmf_reg_read64,
2560 .reg_write32 = nvmf_reg_write32,
2561 .free_ctrl = nvme_tcp_free_ctrl,
2562 .submit_async_event = nvme_tcp_submit_async_event,
2563 .delete_ctrl = nvme_tcp_delete_ctrl,
2564 .get_address = nvme_tcp_get_address,
2565 .stop_ctrl = nvme_tcp_stop_ctrl,
2569 nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2571 struct nvme_tcp_ctrl *ctrl;
2574 mutex_lock(&nvme_tcp_ctrl_mutex);
2575 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2576 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2580 mutex_unlock(&nvme_tcp_ctrl_mutex);
2585 static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2586 struct nvmf_ctrl_options *opts)
2588 struct nvme_tcp_ctrl *ctrl;
2591 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2593 return ERR_PTR(-ENOMEM);
2595 INIT_LIST_HEAD(&ctrl->list);
2596 ctrl->ctrl.opts = opts;
2597 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2598 opts->nr_poll_queues + 1;
2599 ctrl->ctrl.sqsize = opts->queue_size - 1;
2600 ctrl->ctrl.kato = opts->kato;
2602 INIT_DELAYED_WORK(&ctrl->connect_work,
2603 nvme_tcp_reconnect_ctrl_work);
2604 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2605 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2607 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2609 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2610 if (!opts->trsvcid) {
2614 opts->mask |= NVMF_OPT_TRSVCID;
2617 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2618 opts->traddr, opts->trsvcid, &ctrl->addr);
2620 pr_err("malformed address passed: %s:%s\n",
2621 opts->traddr, opts->trsvcid);
2625 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2626 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2627 opts->host_traddr, NULL, &ctrl->src_addr);
2629 pr_err("malformed src address passed: %s\n",
2635 if (opts->mask & NVMF_OPT_HOST_IFACE) {
2636 if (!__dev_get_by_name(&init_net, opts->host_iface)) {
2637 pr_err("invalid interface passed: %s\n",
2644 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2649 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
2651 if (!ctrl->queues) {
2656 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2658 goto out_kfree_queues;
2660 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2663 goto out_uninit_ctrl;
2666 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2668 goto out_uninit_ctrl;
2670 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
2671 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
2673 mutex_lock(&nvme_tcp_ctrl_mutex);
2674 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2675 mutex_unlock(&nvme_tcp_ctrl_mutex);
2680 nvme_uninit_ctrl(&ctrl->ctrl);
2681 nvme_put_ctrl(&ctrl->ctrl);
2684 return ERR_PTR(ret);
2686 kfree(ctrl->queues);
2689 return ERR_PTR(ret);
2692 static struct nvmf_transport_ops nvme_tcp_transport = {
2694 .module = THIS_MODULE,
2695 .required_opts = NVMF_OPT_TRADDR,
2696 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2697 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
2698 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
2699 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2700 NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
2701 .create_ctrl = nvme_tcp_create_ctrl,
2704 static int __init nvme_tcp_init_module(void)
2706 BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
2707 BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
2708 BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
2709 BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24);
2710 BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24);
2711 BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128);
2712 BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
2713 BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
2715 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2716 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2720 nvmf_register_transport(&nvme_tcp_transport);
2724 static void __exit nvme_tcp_cleanup_module(void)
2726 struct nvme_tcp_ctrl *ctrl;
2728 nvmf_unregister_transport(&nvme_tcp_transport);
2730 mutex_lock(&nvme_tcp_ctrl_mutex);
2731 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2732 nvme_delete_ctrl(&ctrl->ctrl);
2733 mutex_unlock(&nvme_tcp_ctrl_mutex);
2734 flush_workqueue(nvme_delete_wq);
2736 destroy_workqueue(nvme_tcp_wq);
2739 module_init(nvme_tcp_init_module);
2740 module_exit(nvme_tcp_cleanup_module);
2742 MODULE_LICENSE("GPL v2");