nvme-tcp: replace sg_init_marker() with sg_init_table()
[linux-block.git] / drivers / nvme / host / tcp.c
CommitLineData
3f2304f8
SG
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVMe over Fabrics TCP host.
4 * Copyright (c) 2018 Lightbits Labs. All rights reserved.
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/err.h>
11#include <linux/nvme-tcp.h>
12#include <net/sock.h>
13#include <net/tcp.h>
14#include <linux/blk-mq.h>
15#include <crypto/hash.h>
1a9460ce 16#include <net/busy_poll.h>
3f2304f8
SG
17
18#include "nvme.h"
19#include "fabrics.h"
20
21struct nvme_tcp_queue;
22
9912ade3
WM
23/* Define the socket priority to use for connections were it is desirable
24 * that the NIC consider performing optimized packet processing or filtering.
25 * A non-zero value being sufficient to indicate general consideration of any
26 * possible optimization. Making it a module param allows for alternative
27 * values that may be unique for some NIC implementations.
28 */
29static int so_priority;
30module_param(so_priority, int, 0644);
31MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
32
841aee4d
CL
33#ifdef CONFIG_DEBUG_LOCK_ALLOC
34/* lockdep can detect a circular dependency of the form
35 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
36 * because dependencies are tracked for both nvme-tcp and user contexts. Using
37 * a separate class prevents lockdep from conflating nvme-tcp socket use with
38 * user-space socket API use.
39 */
40static struct lock_class_key nvme_tcp_sk_key[2];
41static struct lock_class_key nvme_tcp_slock_key[2];
42
43static void nvme_tcp_reclassify_socket(struct socket *sock)
44{
45 struct sock *sk = sock->sk;
46
47 if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
48 return;
49
50 switch (sk->sk_family) {
51 case AF_INET:
52 sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME",
53 &nvme_tcp_slock_key[0],
54 "sk_lock-AF_INET-NVME",
55 &nvme_tcp_sk_key[0]);
56 break;
57 case AF_INET6:
58 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME",
59 &nvme_tcp_slock_key[1],
60 "sk_lock-AF_INET6-NVME",
61 &nvme_tcp_sk_key[1]);
62 break;
63 default:
64 WARN_ON_ONCE(1);
65 }
66}
67#else
68static void nvme_tcp_reclassify_socket(struct socket *sock) { }
69#endif
70
3f2304f8
SG
71enum nvme_tcp_send_state {
72 NVME_TCP_SEND_CMD_PDU = 0,
73 NVME_TCP_SEND_H2C_PDU,
74 NVME_TCP_SEND_DATA,
75 NVME_TCP_SEND_DDGST,
76};
77
78struct nvme_tcp_request {
79 struct nvme_request req;
80 void *pdu;
81 struct nvme_tcp_queue *queue;
82 u32 data_len;
83 u32 pdu_len;
84 u32 pdu_sent;
c2700d28
VP
85 u32 h2cdata_left;
86 u32 h2cdata_offset;
3f2304f8 87 u16 ttag;
1ba2e507 88 __le16 status;
3f2304f8 89 struct list_head entry;
15ec928a 90 struct llist_node lentry;
a7273d40 91 __le32 ddgst;
3f2304f8
SG
92
93 struct bio *curr_bio;
94 struct iov_iter iter;
95
96 /* send state */
97 size_t offset;
98 size_t data_sent;
99 enum nvme_tcp_send_state state;
100};
101
102enum nvme_tcp_queue_flags {
103 NVME_TCP_Q_ALLOCATED = 0,
104 NVME_TCP_Q_LIVE = 1,
72e5d757 105 NVME_TCP_Q_POLLING = 2,
3f2304f8
SG
106};
107
108enum nvme_tcp_recv_state {
109 NVME_TCP_RECV_PDU = 0,
110 NVME_TCP_RECV_DATA,
111 NVME_TCP_RECV_DDGST,
112};
113
114struct nvme_tcp_ctrl;
115struct nvme_tcp_queue {
116 struct socket *sock;
117 struct work_struct io_work;
118 int io_cpu;
119
9ebbfe49 120 struct mutex queue_lock;
db5ad6b7 121 struct mutex send_mutex;
15ec928a 122 struct llist_head req_list;
3f2304f8
SG
123 struct list_head send_list;
124
125 /* recv state */
126 void *pdu;
127 int pdu_remaining;
128 int pdu_offset;
129 size_t data_remaining;
130 size_t ddgst_remaining;
1a9460ce 131 unsigned int nr_cqe;
3f2304f8
SG
132
133 /* send state */
134 struct nvme_tcp_request *request;
135
c2700d28 136 u32 maxh2cdata;
3f2304f8
SG
137 size_t cmnd_capsule_len;
138 struct nvme_tcp_ctrl *ctrl;
139 unsigned long flags;
140 bool rd_enabled;
141
142 bool hdr_digest;
143 bool data_digest;
144 struct ahash_request *rcv_hash;
145 struct ahash_request *snd_hash;
146 __le32 exp_ddgst;
147 __le32 recv_ddgst;
148
149 struct page_frag_cache pf_cache;
150
151 void (*state_change)(struct sock *);
152 void (*data_ready)(struct sock *);
153 void (*write_space)(struct sock *);
154};
155
156struct nvme_tcp_ctrl {
157 /* read only in the hot path */
158 struct nvme_tcp_queue *queues;
159 struct blk_mq_tag_set tag_set;
160
161 /* other member variables */
162 struct list_head list;
163 struct blk_mq_tag_set admin_tag_set;
164 struct sockaddr_storage addr;
165 struct sockaddr_storage src_addr;
166 struct nvme_ctrl ctrl;
167
168 struct work_struct err_work;
169 struct delayed_work connect_work;
170 struct nvme_tcp_request async_req;
64861993 171 u32 io_queues[HCTX_MAX_TYPES];
3f2304f8
SG
172};
173
174static LIST_HEAD(nvme_tcp_ctrl_list);
175static DEFINE_MUTEX(nvme_tcp_ctrl_mutex);
176static struct workqueue_struct *nvme_tcp_wq;
6acbd961
RF
177static const struct blk_mq_ops nvme_tcp_mq_ops;
178static const struct blk_mq_ops nvme_tcp_admin_mq_ops;
db5ad6b7 179static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
3f2304f8
SG
180
181static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl)
182{
183 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl);
184}
185
186static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
187{
188 return queue - queue->ctrl->queues;
189}
190
191static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue)
192{
193 u32 queue_idx = nvme_tcp_queue_id(queue);
194
195 if (queue_idx == 0)
196 return queue->ctrl->admin_tag_set.tags[queue_idx];
197 return queue->ctrl->tag_set.tags[queue_idx - 1];
198}
199
200static inline u8 nvme_tcp_hdgst_len(struct nvme_tcp_queue *queue)
201{
202 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0;
203}
204
205static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue)
206{
207 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0;
208}
209
53ee9e29 210static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
3f2304f8 211{
53ee9e29
CS
212 if (nvme_is_fabrics(req->req.cmd))
213 return NVME_TCP_ADMIN_CCSZ;
214 return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
3f2304f8
SG
215}
216
217static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
218{
219 return req == &req->queue->ctrl->async_req;
220}
221
222static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
223{
224 struct request *rq;
3f2304f8
SG
225
226 if (unlikely(nvme_tcp_async_req(req)))
227 return false; /* async events don't have a request */
228
229 rq = blk_mq_rq_from_pdu(req);
3f2304f8 230
25e5cb78 231 return rq_data_dir(rq) == WRITE && req->data_len &&
53ee9e29 232 req->data_len <= nvme_tcp_inline_data_size(req);
3f2304f8
SG
233}
234
235static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
236{
237 return req->iter.bvec->bv_page;
238}
239
240static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
241{
242 return req->iter.bvec->bv_offset + req->iter.iov_offset;
243}
244
245static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
246{
ca1ff67d 247 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
3f2304f8
SG
248 req->pdu_len - req->pdu_sent);
249}
250
3f2304f8
SG
251static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
252{
253 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
254 req->pdu_len - req->pdu_sent : 0;
255}
256
257static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
258 int len)
259{
260 return nvme_tcp_pdu_data_left(req) <= len;
261}
262
263static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
264 unsigned int dir)
265{
266 struct request *rq = blk_mq_rq_from_pdu(req);
267 struct bio_vec *vec;
268 unsigned int size;
0dc9edaf 269 int nr_bvec;
3f2304f8
SG
270 size_t offset;
271
272 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
273 vec = &rq->special_vec;
0dc9edaf 274 nr_bvec = 1;
3f2304f8
SG
275 size = blk_rq_payload_bytes(rq);
276 offset = 0;
277 } else {
278 struct bio *bio = req->curr_bio;
0dc9edaf
SG
279 struct bvec_iter bi;
280 struct bio_vec bv;
3f2304f8
SG
281
282 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
0dc9edaf
SG
283 nr_bvec = 0;
284 bio_for_each_bvec(bv, bio, bi) {
285 nr_bvec++;
286 }
3f2304f8
SG
287 size = bio->bi_iter.bi_size;
288 offset = bio->bi_iter.bi_bvec_done;
289 }
290
0dc9edaf 291 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
3f2304f8
SG
292 req->iter.iov_offset = offset;
293}
294
295static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
296 int len)
297{
298 req->data_sent += len;
299 req->pdu_sent += len;
300 iov_iter_advance(&req->iter, len);
301 if (!iov_iter_count(&req->iter) &&
302 req->data_sent < req->data_len) {
303 req->curr_bio = req->curr_bio->bi_next;
304 nvme_tcp_init_iter(req, WRITE);
305 }
306}
307
5c11f7d9
SG
308static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
309{
310 int ret;
311
312 /* drain the send queue as much as we can... */
313 do {
314 ret = nvme_tcp_try_send(queue);
315 } while (ret > 0);
316}
317
70f437fb
KB
318static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
319{
320 return !list_empty(&queue->send_list) ||
3770a42b 321 !llist_empty(&queue->req_list);
70f437fb
KB
322}
323
db5ad6b7 324static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
86f0348a 325 bool sync, bool last)
3f2304f8
SG
326{
327 struct nvme_tcp_queue *queue = req->queue;
db5ad6b7 328 bool empty;
3f2304f8 329
15ec928a
SG
330 empty = llist_add(&req->lentry, &queue->req_list) &&
331 list_empty(&queue->send_list) && !queue->request;
3f2304f8 332
db5ad6b7
SG
333 /*
334 * if we're the first on the send_list and we can try to send
335 * directly, otherwise queue io_work. Also, only do that if we
336 * are on the same cpu, so we don't introduce contention.
337 */
bb833370 338 if (queue->io_cpu == raw_smp_processor_id() &&
db5ad6b7 339 sync && empty && mutex_trylock(&queue->send_mutex)) {
5c11f7d9 340 nvme_tcp_send_all(queue);
db5ad6b7 341 mutex_unlock(&queue->send_mutex);
db5ad6b7 342 }
70f437fb
KB
343
344 if (last && nvme_tcp_queue_more(queue))
345 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
3f2304f8
SG
346}
347
15ec928a
SG
348static void nvme_tcp_process_req_list(struct nvme_tcp_queue *queue)
349{
350 struct nvme_tcp_request *req;
351 struct llist_node *node;
352
353 for (node = llist_del_all(&queue->req_list); node; node = node->next) {
354 req = llist_entry(node, struct nvme_tcp_request, lentry);
355 list_add(&req->entry, &queue->send_list);
356 }
357}
358
3f2304f8
SG
359static inline struct nvme_tcp_request *
360nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
361{
362 struct nvme_tcp_request *req;
363
3f2304f8
SG
364 req = list_first_entry_or_null(&queue->send_list,
365 struct nvme_tcp_request, entry);
15ec928a
SG
366 if (!req) {
367 nvme_tcp_process_req_list(queue);
368 req = list_first_entry_or_null(&queue->send_list,
369 struct nvme_tcp_request, entry);
370 if (unlikely(!req))
371 return NULL;
372 }
3f2304f8 373
15ec928a 374 list_del(&req->entry);
3f2304f8
SG
375 return req;
376}
377
a7273d40
CH
378static inline void nvme_tcp_ddgst_final(struct ahash_request *hash,
379 __le32 *dgst)
3f2304f8
SG
380{
381 ahash_request_set_crypt(hash, NULL, (u8 *)dgst, 0);
382 crypto_ahash_final(hash);
383}
384
385static inline void nvme_tcp_ddgst_update(struct ahash_request *hash,
386 struct page *page, off_t off, size_t len)
387{
388 struct scatterlist sg;
389
5fa9add6 390 sg_init_table(&sg, 1);
3f2304f8
SG
391 sg_set_page(&sg, page, len, off);
392 ahash_request_set_crypt(hash, &sg, NULL, len);
393 crypto_ahash_update(hash);
394}
395
396static inline void nvme_tcp_hdgst(struct ahash_request *hash,
397 void *pdu, size_t len)
398{
399 struct scatterlist sg;
400
401 sg_init_one(&sg, pdu, len);
402 ahash_request_set_crypt(hash, &sg, pdu + len, len);
403 crypto_ahash_digest(hash);
404}
405
406static int nvme_tcp_verify_hdgst(struct nvme_tcp_queue *queue,
407 void *pdu, size_t pdu_len)
408{
409 struct nvme_tcp_hdr *hdr = pdu;
410 __le32 recv_digest;
411 __le32 exp_digest;
412
413 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) {
414 dev_err(queue->ctrl->ctrl.device,
415 "queue %d: header digest flag is cleared\n",
416 nvme_tcp_queue_id(queue));
417 return -EPROTO;
418 }
419
420 recv_digest = *(__le32 *)(pdu + hdr->hlen);
421 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len);
422 exp_digest = *(__le32 *)(pdu + hdr->hlen);
423 if (recv_digest != exp_digest) {
424 dev_err(queue->ctrl->ctrl.device,
425 "header digest error: recv %#x expected %#x\n",
426 le32_to_cpu(recv_digest), le32_to_cpu(exp_digest));
427 return -EIO;
428 }
429
430 return 0;
431}
432
433static int nvme_tcp_check_ddgst(struct nvme_tcp_queue *queue, void *pdu)
434{
435 struct nvme_tcp_hdr *hdr = pdu;
436 u8 digest_len = nvme_tcp_hdgst_len(queue);
437 u32 len;
438
439 len = le32_to_cpu(hdr->plen) - hdr->hlen -
440 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0);
441
442 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) {
443 dev_err(queue->ctrl->ctrl.device,
444 "queue %d: data digest flag is cleared\n",
445 nvme_tcp_queue_id(queue));
446 return -EPROTO;
447 }
448 crypto_ahash_init(queue->rcv_hash);
449
450 return 0;
451}
452
453static void nvme_tcp_exit_request(struct blk_mq_tag_set *set,
454 struct request *rq, unsigned int hctx_idx)
455{
456 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
457
458 page_frag_free(req->pdu);
459}
460
461static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
462 struct request *rq, unsigned int hctx_idx,
463 unsigned int numa_node)
464{
06427ca0 465 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
3f2304f8 466 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
f4b9e6c9 467 struct nvme_tcp_cmd_pdu *pdu;
3f2304f8
SG
468 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
469 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx];
470 u8 hdgst = nvme_tcp_hdgst_len(queue);
471
472 req->pdu = page_frag_alloc(&queue->pf_cache,
473 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
474 GFP_KERNEL | __GFP_ZERO);
475 if (!req->pdu)
476 return -ENOMEM;
477
f4b9e6c9 478 pdu = req->pdu;
3f2304f8
SG
479 req->queue = queue;
480 nvme_req(rq)->ctrl = &ctrl->ctrl;
f4b9e6c9 481 nvme_req(rq)->cmd = &pdu->cmd;
3f2304f8
SG
482
483 return 0;
484}
485
486static int nvme_tcp_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
487 unsigned int hctx_idx)
488{
06427ca0 489 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
3f2304f8
SG
490 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1];
491
492 hctx->driver_data = queue;
493 return 0;
494}
495
496static int nvme_tcp_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
497 unsigned int hctx_idx)
498{
06427ca0 499 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data);
3f2304f8
SG
500 struct nvme_tcp_queue *queue = &ctrl->queues[0];
501
502 hctx->driver_data = queue;
503 return 0;
504}
505
506static enum nvme_tcp_recv_state
507nvme_tcp_recv_state(struct nvme_tcp_queue *queue)
508{
509 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU :
510 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST :
511 NVME_TCP_RECV_DATA;
512}
513
514static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
515{
516 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) +
517 nvme_tcp_hdgst_len(queue);
518 queue->pdu_offset = 0;
519 queue->data_remaining = -1;
520 queue->ddgst_remaining = 0;
521}
522
523static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
524{
525 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
526 return;
527
236187c4 528 dev_warn(ctrl->device, "starting error recovery\n");
97b2512a 529 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
3f2304f8
SG
530}
531
532static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
533 struct nvme_completion *cqe)
534{
1ba2e507 535 struct nvme_tcp_request *req;
3f2304f8
SG
536 struct request *rq;
537
e7006de6 538 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id);
3f2304f8
SG
539 if (!rq) {
540 dev_err(queue->ctrl->ctrl.device,
e7006de6
SG
541 "got bad cqe.command_id %#x on queue %d\n",
542 cqe->command_id, nvme_tcp_queue_id(queue));
3f2304f8
SG
543 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
544 return -EINVAL;
545 }
546
1ba2e507
DW
547 req = blk_mq_rq_to_pdu(rq);
548 if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
549 req->status = cqe->status;
550
551 if (!nvme_try_complete_req(rq, req->status, cqe->result))
ff029451 552 nvme_complete_rq(rq);
1a9460ce 553 queue->nr_cqe++;
3f2304f8
SG
554
555 return 0;
556}
557
558static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
559 struct nvme_tcp_data_pdu *pdu)
560{
561 struct request *rq;
562
e7006de6 563 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
3f2304f8
SG
564 if (!rq) {
565 dev_err(queue->ctrl->ctrl.device,
e7006de6
SG
566 "got bad c2hdata.command_id %#x on queue %d\n",
567 pdu->command_id, nvme_tcp_queue_id(queue));
3f2304f8
SG
568 return -ENOENT;
569 }
570
571 if (!blk_rq_payload_bytes(rq)) {
572 dev_err(queue->ctrl->ctrl.device,
573 "queue %d tag %#x unexpected data\n",
574 nvme_tcp_queue_id(queue), rq->tag);
575 return -EIO;
576 }
577
578 queue->data_remaining = le32_to_cpu(pdu->data_length);
579
602d674c
SG
580 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
581 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
582 dev_err(queue->ctrl->ctrl.device,
583 "queue %d tag %#x SUCCESS set but not last PDU\n",
584 nvme_tcp_queue_id(queue), rq->tag);
585 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
586 return -EPROTO;
587 }
588
3f2304f8 589 return 0;
3f2304f8
SG
590}
591
592static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue,
593 struct nvme_tcp_rsp_pdu *pdu)
594{
595 struct nvme_completion *cqe = &pdu->cqe;
596 int ret = 0;
597
598 /*
599 * AEN requests are special as they don't time out and can
600 * survive any kind of queue freeze and often don't respond to
601 * aborts. We don't even bother to allocate a struct request
602 * for them but rather special case them here.
603 */
58a8df67
IR
604 if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue),
605 cqe->command_id)))
3f2304f8
SG
606 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
607 &cqe->result);
608 else
609 ret = nvme_tcp_process_nvme_cqe(queue, cqe);
610
611 return ret;
612}
613
c2700d28 614static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
3f2304f8
SG
615{
616 struct nvme_tcp_data_pdu *data = req->pdu;
617 struct nvme_tcp_queue *queue = req->queue;
618 struct request *rq = blk_mq_rq_from_pdu(req);
c2700d28 619 u32 h2cdata_sent = req->pdu_len;
3f2304f8
SG
620 u8 hdgst = nvme_tcp_hdgst_len(queue);
621 u8 ddgst = nvme_tcp_ddgst_len(queue);
622
1d3ef9c3
VP
623 req->state = NVME_TCP_SEND_H2C_PDU;
624 req->offset = 0;
c2700d28 625 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
3f2304f8 626 req->pdu_sent = 0;
c2700d28
VP
627 req->h2cdata_left -= req->pdu_len;
628 req->h2cdata_offset += h2cdata_sent;
3f2304f8 629
3f2304f8
SG
630 memset(data, 0, sizeof(*data));
631 data->hdr.type = nvme_tcp_h2c_data;
c2700d28
VP
632 if (!req->h2cdata_left)
633 data->hdr.flags = NVME_TCP_F_DATA_LAST;
3f2304f8
SG
634 if (queue->hdr_digest)
635 data->hdr.flags |= NVME_TCP_F_HDGST;
636 if (queue->data_digest)
637 data->hdr.flags |= NVME_TCP_F_DDGST;
638 data->hdr.hlen = sizeof(*data);
639 data->hdr.pdo = data->hdr.hlen + hdgst;
640 data->hdr.plen =
641 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
c2700d28 642 data->ttag = req->ttag;
e7006de6 643 data->command_id = nvme_cid(rq);
c2700d28 644 data->data_offset = cpu_to_le32(req->h2cdata_offset);
3f2304f8 645 data->data_length = cpu_to_le32(req->pdu_len);
3f2304f8
SG
646}
647
648static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
649 struct nvme_tcp_r2t_pdu *pdu)
650{
651 struct nvme_tcp_request *req;
652 struct request *rq;
1d3ef9c3 653 u32 r2t_length = le32_to_cpu(pdu->r2t_length);
c2700d28 654 u32 r2t_offset = le32_to_cpu(pdu->r2t_offset);
3f2304f8 655
e7006de6 656 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id);
3f2304f8
SG
657 if (!rq) {
658 dev_err(queue->ctrl->ctrl.device,
e7006de6
SG
659 "got bad r2t.command_id %#x on queue %d\n",
660 pdu->command_id, nvme_tcp_queue_id(queue));
3f2304f8
SG
661 return -ENOENT;
662 }
663 req = blk_mq_rq_to_pdu(rq);
664
1d3ef9c3
VP
665 if (unlikely(!r2t_length)) {
666 dev_err(queue->ctrl->ctrl.device,
667 "req %d r2t len is %u, probably a bug...\n",
668 rq->tag, r2t_length);
669 return -EPROTO;
670 }
3f2304f8 671
1d3ef9c3
VP
672 if (unlikely(req->data_sent + r2t_length > req->data_len)) {
673 dev_err(queue->ctrl->ctrl.device,
674 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
675 rq->tag, r2t_length, req->data_len, req->data_sent);
676 return -EPROTO;
677 }
678
c2700d28 679 if (unlikely(r2t_offset < req->data_sent)) {
1d3ef9c3
VP
680 dev_err(queue->ctrl->ctrl.device,
681 "req %d unexpected r2t offset %u (expected %zu)\n",
c2700d28 682 rq->tag, r2t_offset, req->data_sent);
1d3ef9c3
VP
683 return -EPROTO;
684 }
3f2304f8 685
c2700d28
VP
686 req->pdu_len = 0;
687 req->h2cdata_left = r2t_length;
688 req->h2cdata_offset = r2t_offset;
689 req->ttag = pdu->ttag;
690
691 nvme_tcp_setup_h2c_data_pdu(req);
86f0348a 692 nvme_tcp_queue_request(req, false, true);
3f2304f8
SG
693
694 return 0;
695}
696
697static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
698 unsigned int *offset, size_t *len)
699{
700 struct nvme_tcp_hdr *hdr;
701 char *pdu = queue->pdu;
702 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining);
703 int ret;
704
705 ret = skb_copy_bits(skb, *offset,
706 &pdu[queue->pdu_offset], rcv_len);
707 if (unlikely(ret))
708 return ret;
709
710 queue->pdu_remaining -= rcv_len;
711 queue->pdu_offset += rcv_len;
712 *offset += rcv_len;
713 *len -= rcv_len;
714 if (queue->pdu_remaining)
715 return 0;
716
717 hdr = queue->pdu;
718 if (queue->hdr_digest) {
719 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
720 if (unlikely(ret))
721 return ret;
722 }
723
724
725 if (queue->data_digest) {
726 ret = nvme_tcp_check_ddgst(queue, queue->pdu);
727 if (unlikely(ret))
728 return ret;
729 }
730
731 switch (hdr->type) {
732 case nvme_tcp_c2h_data:
6be18260 733 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu);
3f2304f8
SG
734 case nvme_tcp_rsp:
735 nvme_tcp_init_recv_ctx(queue);
6be18260 736 return nvme_tcp_handle_comp(queue, (void *)queue->pdu);
3f2304f8
SG
737 case nvme_tcp_r2t:
738 nvme_tcp_init_recv_ctx(queue);
6be18260 739 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
3f2304f8
SG
740 default:
741 dev_err(queue->ctrl->ctrl.device,
742 "unsupported pdu type (%d)\n", hdr->type);
743 return -EINVAL;
744 }
3f2304f8
SG
745}
746
988aef9e 747static inline void nvme_tcp_end_request(struct request *rq, u16 status)
602d674c
SG
748{
749 union nvme_result res = {};
750
2eb81a33 751 if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
ff029451 752 nvme_complete_rq(rq);
602d674c
SG
753}
754
3f2304f8
SG
755static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
756 unsigned int *offset, size_t *len)
757{
758 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
3b01a9d0 759 struct request *rq =
e7006de6 760 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id);
3b01a9d0 761 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
3f2304f8
SG
762
763 while (true) {
764 int recv_len, ret;
765
766 recv_len = min_t(size_t, *len, queue->data_remaining);
767 if (!recv_len)
768 break;
769
770 if (!iov_iter_count(&req->iter)) {
771 req->curr_bio = req->curr_bio->bi_next;
772
773 /*
774 * If we don`t have any bios it means that controller
775 * sent more data than we requested, hence error
776 */
777 if (!req->curr_bio) {
778 dev_err(queue->ctrl->ctrl.device,
779 "queue %d no space in request %#x",
780 nvme_tcp_queue_id(queue), rq->tag);
781 nvme_tcp_init_recv_ctx(queue);
782 return -EIO;
783 }
784 nvme_tcp_init_iter(req, READ);
785 }
786
787 /* we can read only from what is left in this bio */
788 recv_len = min_t(size_t, recv_len,
789 iov_iter_count(&req->iter));
790
791 if (queue->data_digest)
792 ret = skb_copy_and_hash_datagram_iter(skb, *offset,
793 &req->iter, recv_len, queue->rcv_hash);
794 else
795 ret = skb_copy_datagram_iter(skb, *offset,
796 &req->iter, recv_len);
797 if (ret) {
798 dev_err(queue->ctrl->ctrl.device,
799 "queue %d failed to copy request %#x data",
800 nvme_tcp_queue_id(queue), rq->tag);
801 return ret;
802 }
803
804 *len -= recv_len;
805 *offset += recv_len;
806 queue->data_remaining -= recv_len;
807 }
808
809 if (!queue->data_remaining) {
810 if (queue->data_digest) {
811 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
812 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
813 } else {
1a9460ce 814 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
1ba2e507
DW
815 nvme_tcp_end_request(rq,
816 le16_to_cpu(req->status));
1a9460ce
SG
817 queue->nr_cqe++;
818 }
3f2304f8
SG
819 nvme_tcp_init_recv_ctx(queue);
820 }
821 }
822
823 return 0;
824}
825
826static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
827 struct sk_buff *skb, unsigned int *offset, size_t *len)
828{
602d674c 829 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
3f2304f8
SG
830 char *ddgst = (char *)&queue->recv_ddgst;
831 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
832 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
833 int ret;
834
835 ret = skb_copy_bits(skb, *offset, &ddgst[off], recv_len);
836 if (unlikely(ret))
837 return ret;
838
839 queue->ddgst_remaining -= recv_len;
840 *offset += recv_len;
841 *len -= recv_len;
842 if (queue->ddgst_remaining)
843 return 0;
844
845 if (queue->recv_ddgst != queue->exp_ddgst) {
1ba2e507
DW
846 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
847 pdu->command_id);
848 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
849
850 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
851
3f2304f8
SG
852 dev_err(queue->ctrl->ctrl.device,
853 "data digest error: recv %#x expected %#x\n",
854 le32_to_cpu(queue->recv_ddgst),
855 le32_to_cpu(queue->exp_ddgst));
3f2304f8
SG
856 }
857
602d674c 858 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
e7006de6
SG
859 struct request *rq = nvme_cid_to_rq(nvme_tcp_tagset(queue),
860 pdu->command_id);
1ba2e507 861 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
602d674c 862
1ba2e507 863 nvme_tcp_end_request(rq, le16_to_cpu(req->status));
1a9460ce 864 queue->nr_cqe++;
602d674c
SG
865 }
866
3f2304f8
SG
867 nvme_tcp_init_recv_ctx(queue);
868 return 0;
869}
870
871static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
872 unsigned int offset, size_t len)
873{
874 struct nvme_tcp_queue *queue = desc->arg.data;
875 size_t consumed = len;
876 int result;
877
878 while (len) {
879 switch (nvme_tcp_recv_state(queue)) {
880 case NVME_TCP_RECV_PDU:
881 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len);
882 break;
883 case NVME_TCP_RECV_DATA:
884 result = nvme_tcp_recv_data(queue, skb, &offset, &len);
885 break;
886 case NVME_TCP_RECV_DDGST:
887 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len);
888 break;
889 default:
890 result = -EFAULT;
891 }
892 if (result) {
893 dev_err(queue->ctrl->ctrl.device,
894 "receive failed: %d\n", result);
895 queue->rd_enabled = false;
896 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
897 return result;
898 }
899 }
900
901 return consumed;
902}
903
904static void nvme_tcp_data_ready(struct sock *sk)
905{
906 struct nvme_tcp_queue *queue;
907
386e5e6e 908 read_lock_bh(&sk->sk_callback_lock);
3f2304f8 909 queue = sk->sk_user_data;
72e5d757
SG
910 if (likely(queue && queue->rd_enabled) &&
911 !test_bit(NVME_TCP_Q_POLLING, &queue->flags))
3f2304f8 912 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
386e5e6e 913 read_unlock_bh(&sk->sk_callback_lock);
3f2304f8
SG
914}
915
916static void nvme_tcp_write_space(struct sock *sk)
917{
918 struct nvme_tcp_queue *queue;
919
920 read_lock_bh(&sk->sk_callback_lock);
921 queue = sk->sk_user_data;
922 if (likely(queue && sk_stream_is_writeable(sk))) {
923 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
924 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
925 }
926 read_unlock_bh(&sk->sk_callback_lock);
927}
928
929static void nvme_tcp_state_change(struct sock *sk)
930{
931 struct nvme_tcp_queue *queue;
932
8b73b45d 933 read_lock_bh(&sk->sk_callback_lock);
3f2304f8
SG
934 queue = sk->sk_user_data;
935 if (!queue)
936 goto done;
937
938 switch (sk->sk_state) {
939 case TCP_CLOSE:
940 case TCP_CLOSE_WAIT:
941 case TCP_LAST_ACK:
942 case TCP_FIN_WAIT1:
943 case TCP_FIN_WAIT2:
3f2304f8
SG
944 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
945 break;
946 default:
947 dev_info(queue->ctrl->ctrl.device,
948 "queue %d socket state %d\n",
949 nvme_tcp_queue_id(queue), sk->sk_state);
950 }
951
952 queue->state_change(sk);
953done:
8b73b45d 954 read_unlock_bh(&sk->sk_callback_lock);
3f2304f8
SG
955}
956
957static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
958{
959 queue->request = NULL;
960}
961
962static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
963{
63573807
SG
964 if (nvme_tcp_async_req(req)) {
965 union nvme_result res = {};
966
967 nvme_complete_async_event(&req->queue->ctrl->ctrl,
968 cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
969 } else {
970 nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
971 NVME_SC_HOST_PATH_ERROR);
972 }
3f2304f8
SG
973}
974
975static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
976{
977 struct nvme_tcp_queue *queue = req->queue;
25e1f67e 978 int req_data_len = req->data_len;
c2700d28 979 u32 h2cdata_left = req->h2cdata_left;
3f2304f8
SG
980
981 while (true) {
982 struct page *page = nvme_tcp_req_cur_page(req);
983 size_t offset = nvme_tcp_req_cur_offset(req);
984 size_t len = nvme_tcp_req_cur_length(req);
985 bool last = nvme_tcp_pdu_last_send(req, len);
25e1f67e 986 int req_data_sent = req->data_sent;
3f2304f8
SG
987 int ret, flags = MSG_DONTWAIT;
988
122e5b9f 989 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
3f2304f8
SG
990 flags |= MSG_EOR;
991 else
5bb052d7 992 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
3f2304f8 993
7d4194ab
CL
994 if (sendpage_ok(page)) {
995 ret = kernel_sendpage(queue->sock, page, offset, len,
37c15219
MS
996 flags);
997 } else {
7d4194ab 998 ret = sock_no_sendpage(queue->sock, page, offset, len,
37c15219
MS
999 flags);
1000 }
3f2304f8
SG
1001 if (ret <= 0)
1002 return ret;
1003
3f2304f8
SG
1004 if (queue->data_digest)
1005 nvme_tcp_ddgst_update(queue->snd_hash, page,
1006 offset, ret);
1007
e371af03
SG
1008 /*
1009 * update the request iterator except for the last payload send
1010 * in the request where we don't want to modify it as we may
1011 * compete with the RX path completing the request.
1012 */
25e1f67e 1013 if (req_data_sent + ret < req_data_len)
e371af03
SG
1014 nvme_tcp_advance_req(req, ret);
1015
1016 /* fully successful last send in current PDU */
3f2304f8
SG
1017 if (last && ret == len) {
1018 if (queue->data_digest) {
1019 nvme_tcp_ddgst_final(queue->snd_hash,
1020 &req->ddgst);
1021 req->state = NVME_TCP_SEND_DDGST;
1022 req->offset = 0;
1023 } else {
c2700d28
VP
1024 if (h2cdata_left)
1025 nvme_tcp_setup_h2c_data_pdu(req);
1026 else
1027 nvme_tcp_done_send_req(queue);
3f2304f8
SG
1028 }
1029 return 1;
1030 }
1031 }
1032 return -EAGAIN;
1033}
1034
1035static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
1036{
1037 struct nvme_tcp_queue *queue = req->queue;
1038 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
1039 bool inline_data = nvme_tcp_has_inline_data(req);
3f2304f8
SG
1040 u8 hdgst = nvme_tcp_hdgst_len(queue);
1041 int len = sizeof(*pdu) + hdgst - req->offset;
5bb052d7 1042 int flags = MSG_DONTWAIT;
3f2304f8
SG
1043 int ret;
1044
122e5b9f 1045 if (inline_data || nvme_tcp_queue_more(queue))
5bb052d7
SG
1046 flags |= MSG_MORE | MSG_SENDPAGE_NOTLAST;
1047 else
1048 flags |= MSG_EOR;
1049
3f2304f8
SG
1050 if (queue->hdr_digest && !req->offset)
1051 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1052
1053 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1054 offset_in_page(pdu) + req->offset, len, flags);
1055 if (unlikely(ret <= 0))
1056 return ret;
1057
1058 len -= ret;
1059 if (!len) {
1060 if (inline_data) {
1061 req->state = NVME_TCP_SEND_DATA;
1062 if (queue->data_digest)
1063 crypto_ahash_init(queue->snd_hash);
3f2304f8
SG
1064 } else {
1065 nvme_tcp_done_send_req(queue);
1066 }
1067 return 1;
1068 }
1069 req->offset += ret;
1070
1071 return -EAGAIN;
1072}
1073
1074static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1075{
1076 struct nvme_tcp_queue *queue = req->queue;
1077 struct nvme_tcp_data_pdu *pdu = req->pdu;
1078 u8 hdgst = nvme_tcp_hdgst_len(queue);
1079 int len = sizeof(*pdu) - req->offset + hdgst;
1080 int ret;
1081
1082 if (queue->hdr_digest && !req->offset)
1083 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu));
1084
c2700d28
VP
1085 if (!req->h2cdata_left)
1086 ret = kernel_sendpage(queue->sock, virt_to_page(pdu),
1087 offset_in_page(pdu) + req->offset, len,
1088 MSG_DONTWAIT | MSG_MORE | MSG_SENDPAGE_NOTLAST);
1089 else
1090 ret = sock_no_sendpage(queue->sock, virt_to_page(pdu),
1091 offset_in_page(pdu) + req->offset, len,
1092 MSG_DONTWAIT | MSG_MORE);
3f2304f8
SG
1093 if (unlikely(ret <= 0))
1094 return ret;
1095
1096 len -= ret;
1097 if (!len) {
1098 req->state = NVME_TCP_SEND_DATA;
1099 if (queue->data_digest)
1100 crypto_ahash_init(queue->snd_hash);
3f2304f8
SG
1101 return 1;
1102 }
1103 req->offset += ret;
1104
1105 return -EAGAIN;
1106}
1107
1108static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1109{
1110 struct nvme_tcp_queue *queue = req->queue;
ce7723e9 1111 size_t offset = req->offset;
c2700d28 1112 u32 h2cdata_left = req->h2cdata_left;
3f2304f8 1113 int ret;
122e5b9f 1114 struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
3f2304f8 1115 struct kvec iov = {
d89b9f3b 1116 .iov_base = (u8 *)&req->ddgst + req->offset,
3f2304f8
SG
1117 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1118 };
1119
122e5b9f
SG
1120 if (nvme_tcp_queue_more(queue))
1121 msg.msg_flags |= MSG_MORE;
1122 else
1123 msg.msg_flags |= MSG_EOR;
1124
3f2304f8
SG
1125 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1126 if (unlikely(ret <= 0))
1127 return ret;
1128
ce7723e9 1129 if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
c2700d28
VP
1130 if (h2cdata_left)
1131 nvme_tcp_setup_h2c_data_pdu(req);
1132 else
1133 nvme_tcp_done_send_req(queue);
3f2304f8
SG
1134 return 1;
1135 }
1136
1137 req->offset += ret;
1138 return -EAGAIN;
1139}
1140
1141static int nvme_tcp_try_send(struct nvme_tcp_queue *queue)
1142{
1143 struct nvme_tcp_request *req;
1144 int ret = 1;
1145
1146 if (!queue->request) {
1147 queue->request = nvme_tcp_fetch_request(queue);
1148 if (!queue->request)
1149 return 0;
1150 }
1151 req = queue->request;
1152
1153 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1154 ret = nvme_tcp_try_send_cmd_pdu(req);
1155 if (ret <= 0)
1156 goto done;
1157 if (!nvme_tcp_has_inline_data(req))
1158 return ret;
1159 }
1160
1161 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1162 ret = nvme_tcp_try_send_data_pdu(req);
1163 if (ret <= 0)
1164 goto done;
1165 }
1166
1167 if (req->state == NVME_TCP_SEND_DATA) {
1168 ret = nvme_tcp_try_send_data(req);
1169 if (ret <= 0)
1170 goto done;
1171 }
1172
1173 if (req->state == NVME_TCP_SEND_DDGST)
1174 ret = nvme_tcp_try_send_ddgst(req);
1175done:
5ff4e112 1176 if (ret == -EAGAIN) {
3f2304f8 1177 ret = 0;
5ff4e112
SG
1178 } else if (ret < 0) {
1179 dev_err(queue->ctrl->ctrl.device,
1180 "failed to send request %d\n", ret);
41d07df7 1181 nvme_tcp_fail_request(queue->request);
5ff4e112
SG
1182 nvme_tcp_done_send_req(queue);
1183 }
3f2304f8
SG
1184 return ret;
1185}
1186
1187static int nvme_tcp_try_recv(struct nvme_tcp_queue *queue)
1188{
10407ec9
PBT
1189 struct socket *sock = queue->sock;
1190 struct sock *sk = sock->sk;
3f2304f8
SG
1191 read_descriptor_t rd_desc;
1192 int consumed;
1193
1194 rd_desc.arg.data = queue;
1195 rd_desc.count = 1;
1196 lock_sock(sk);
1a9460ce 1197 queue->nr_cqe = 0;
10407ec9 1198 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb);
3f2304f8
SG
1199 release_sock(sk);
1200 return consumed;
1201}
1202
1203static void nvme_tcp_io_work(struct work_struct *w)
1204{
1205 struct nvme_tcp_queue *queue =
1206 container_of(w, struct nvme_tcp_queue, io_work);
ddef2957 1207 unsigned long deadline = jiffies + msecs_to_jiffies(1);
3f2304f8
SG
1208
1209 do {
1210 bool pending = false;
1211 int result;
1212
db5ad6b7
SG
1213 if (mutex_trylock(&queue->send_mutex)) {
1214 result = nvme_tcp_try_send(queue);
1215 mutex_unlock(&queue->send_mutex);
1216 if (result > 0)
1217 pending = true;
1218 else if (unlikely(result < 0))
1219 break;
70f437fb 1220 }
3f2304f8
SG
1221
1222 result = nvme_tcp_try_recv(queue);
1223 if (result > 0)
1224 pending = true;
761ad26c 1225 else if (unlikely(result < 0))
39d06079 1226 return;
3f2304f8 1227
160f3549 1228 if (!pending || !queue->rd_enabled)
3f2304f8
SG
1229 return;
1230
ddef2957 1231 } while (!time_after(jiffies, deadline)); /* quota is exhausted */
3f2304f8
SG
1232
1233 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
1234}
1235
1236static void nvme_tcp_free_crypto(struct nvme_tcp_queue *queue)
1237{
1238 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash);
1239
1240 ahash_request_free(queue->rcv_hash);
1241 ahash_request_free(queue->snd_hash);
1242 crypto_free_ahash(tfm);
1243}
1244
1245static int nvme_tcp_alloc_crypto(struct nvme_tcp_queue *queue)
1246{
1247 struct crypto_ahash *tfm;
1248
1249 tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
1250 if (IS_ERR(tfm))
1251 return PTR_ERR(tfm);
1252
1253 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1254 if (!queue->snd_hash)
1255 goto free_tfm;
1256 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL);
1257
1258 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL);
1259 if (!queue->rcv_hash)
1260 goto free_snd_hash;
1261 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL);
1262
1263 return 0;
1264free_snd_hash:
1265 ahash_request_free(queue->snd_hash);
1266free_tfm:
1267 crypto_free_ahash(tfm);
1268 return -ENOMEM;
1269}
1270
1271static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl)
1272{
1273 struct nvme_tcp_request *async = &ctrl->async_req;
1274
1275 page_frag_free(async->pdu);
1276}
1277
1278static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl)
1279{
1280 struct nvme_tcp_queue *queue = &ctrl->queues[0];
1281 struct nvme_tcp_request *async = &ctrl->async_req;
1282 u8 hdgst = nvme_tcp_hdgst_len(queue);
1283
1284 async->pdu = page_frag_alloc(&queue->pf_cache,
1285 sizeof(struct nvme_tcp_cmd_pdu) + hdgst,
1286 GFP_KERNEL | __GFP_ZERO);
1287 if (!async->pdu)
1288 return -ENOMEM;
1289
1290 async->queue = &ctrl->queues[0];
1291 return 0;
1292}
1293
1294static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid)
1295{
a5053c92 1296 struct page *page;
3f2304f8
SG
1297 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1298 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1299
1300 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1301 return;
1302
1303 if (queue->hdr_digest || queue->data_digest)
1304 nvme_tcp_free_crypto(queue);
1305
a5053c92
ML
1306 if (queue->pf_cache.va) {
1307 page = virt_to_head_page(queue->pf_cache.va);
1308 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
1309 queue->pf_cache.va = NULL;
1310 }
3f2304f8
SG
1311 sock_release(queue->sock);
1312 kfree(queue->pdu);
d48f92cd 1313 mutex_destroy(&queue->send_mutex);
9ebbfe49 1314 mutex_destroy(&queue->queue_lock);
3f2304f8
SG
1315}
1316
1317static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
1318{
1319 struct nvme_tcp_icreq_pdu *icreq;
1320 struct nvme_tcp_icresp_pdu *icresp;
1321 struct msghdr msg = {};
1322 struct kvec iov;
1323 bool ctrl_hdgst, ctrl_ddgst;
c2700d28 1324 u32 maxh2cdata;
3f2304f8
SG
1325 int ret;
1326
1327 icreq = kzalloc(sizeof(*icreq), GFP_KERNEL);
1328 if (!icreq)
1329 return -ENOMEM;
1330
1331 icresp = kzalloc(sizeof(*icresp), GFP_KERNEL);
1332 if (!icresp) {
1333 ret = -ENOMEM;
1334 goto free_icreq;
1335 }
1336
1337 icreq->hdr.type = nvme_tcp_icreq;
1338 icreq->hdr.hlen = sizeof(*icreq);
1339 icreq->hdr.pdo = 0;
1340 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen);
1341 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0);
1342 icreq->maxr2t = 0; /* single inflight r2t supported */
1343 icreq->hpda = 0; /* no alignment constraint */
1344 if (queue->hdr_digest)
1345 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE;
1346 if (queue->data_digest)
1347 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE;
1348
1349 iov.iov_base = icreq;
1350 iov.iov_len = sizeof(*icreq);
1351 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
1352 if (ret < 0)
1353 goto free_icresp;
1354
1355 memset(&msg, 0, sizeof(msg));
1356 iov.iov_base = icresp;
1357 iov.iov_len = sizeof(*icresp);
1358 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
1359 iov.iov_len, msg.msg_flags);
1360 if (ret < 0)
1361 goto free_icresp;
1362
1363 ret = -EINVAL;
1364 if (icresp->hdr.type != nvme_tcp_icresp) {
1365 pr_err("queue %d: bad type returned %d\n",
1366 nvme_tcp_queue_id(queue), icresp->hdr.type);
1367 goto free_icresp;
1368 }
1369
1370 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) {
1371 pr_err("queue %d: bad pdu length returned %d\n",
1372 nvme_tcp_queue_id(queue), icresp->hdr.plen);
1373 goto free_icresp;
1374 }
1375
1376 if (icresp->pfv != NVME_TCP_PFV_1_0) {
1377 pr_err("queue %d: bad pfv returned %d\n",
1378 nvme_tcp_queue_id(queue), icresp->pfv);
1379 goto free_icresp;
1380 }
1381
1382 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE);
1383 if ((queue->data_digest && !ctrl_ddgst) ||
1384 (!queue->data_digest && ctrl_ddgst)) {
1385 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n",
1386 nvme_tcp_queue_id(queue),
1387 queue->data_digest ? "enabled" : "disabled",
1388 ctrl_ddgst ? "enabled" : "disabled");
1389 goto free_icresp;
1390 }
1391
1392 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE);
1393 if ((queue->hdr_digest && !ctrl_hdgst) ||
1394 (!queue->hdr_digest && ctrl_hdgst)) {
1395 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n",
1396 nvme_tcp_queue_id(queue),
1397 queue->hdr_digest ? "enabled" : "disabled",
1398 ctrl_hdgst ? "enabled" : "disabled");
1399 goto free_icresp;
1400 }
1401
1402 if (icresp->cpda != 0) {
1403 pr_err("queue %d: unsupported cpda returned %d\n",
1404 nvme_tcp_queue_id(queue), icresp->cpda);
1405 goto free_icresp;
1406 }
1407
c2700d28
VP
1408 maxh2cdata = le32_to_cpu(icresp->maxdata);
1409 if ((maxh2cdata % 4) || (maxh2cdata < NVME_TCP_MIN_MAXH2CDATA)) {
1410 pr_err("queue %d: invalid maxh2cdata returned %u\n",
1411 nvme_tcp_queue_id(queue), maxh2cdata);
1412 goto free_icresp;
1413 }
1414 queue->maxh2cdata = maxh2cdata;
1415
3f2304f8
SG
1416 ret = 0;
1417free_icresp:
1418 kfree(icresp);
1419free_icreq:
1420 kfree(icreq);
1421 return ret;
1422}
1423
40510a63
SG
1424static bool nvme_tcp_admin_queue(struct nvme_tcp_queue *queue)
1425{
1426 return nvme_tcp_queue_id(queue) == 0;
1427}
1428
1429static bool nvme_tcp_default_queue(struct nvme_tcp_queue *queue)
1430{
1431 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1432 int qid = nvme_tcp_queue_id(queue);
1433
1434 return !nvme_tcp_admin_queue(queue) &&
1435 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT];
1436}
1437
1438static bool nvme_tcp_read_queue(struct nvme_tcp_queue *queue)
1439{
1440 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1441 int qid = nvme_tcp_queue_id(queue);
1442
1443 return !nvme_tcp_admin_queue(queue) &&
1444 !nvme_tcp_default_queue(queue) &&
1445 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1446 ctrl->io_queues[HCTX_TYPE_READ];
1447}
1448
1449static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
1450{
1451 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1452 int qid = nvme_tcp_queue_id(queue);
1453
1454 return !nvme_tcp_admin_queue(queue) &&
1455 !nvme_tcp_default_queue(queue) &&
1456 !nvme_tcp_read_queue(queue) &&
1457 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] +
1458 ctrl->io_queues[HCTX_TYPE_READ] +
1459 ctrl->io_queues[HCTX_TYPE_POLL];
1460}
1461
1462static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
1463{
1464 struct nvme_tcp_ctrl *ctrl = queue->ctrl;
1465 int qid = nvme_tcp_queue_id(queue);
1466 int n = 0;
1467
1468 if (nvme_tcp_default_queue(queue))
1469 n = qid - 1;
1470 else if (nvme_tcp_read_queue(queue))
1471 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1;
1472 else if (nvme_tcp_poll_queue(queue))
1473 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
1474 ctrl->io_queues[HCTX_TYPE_READ] - 1;
1475 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
1476}
1477
fb8745d0 1478static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, int qid)
3f2304f8
SG
1479{
1480 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1481 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
6ebf71ba 1482 int ret, rcv_pdu_size;
3f2304f8 1483
9ebbfe49 1484 mutex_init(&queue->queue_lock);
3f2304f8 1485 queue->ctrl = ctrl;
15ec928a 1486 init_llist_head(&queue->req_list);
3f2304f8 1487 INIT_LIST_HEAD(&queue->send_list);
db5ad6b7 1488 mutex_init(&queue->send_mutex);
3f2304f8 1489 INIT_WORK(&queue->io_work, nvme_tcp_io_work);
3f2304f8
SG
1490
1491 if (qid > 0)
9924b030 1492 queue->cmnd_capsule_len = nctrl->ioccsz * 16;
3f2304f8
SG
1493 else
1494 queue->cmnd_capsule_len = sizeof(struct nvme_command) +
1495 NVME_TCP_ADMIN_CCSZ;
1496
1497 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM,
1498 IPPROTO_TCP, &queue->sock);
1499 if (ret) {
9924b030 1500 dev_err(nctrl->device,
3f2304f8 1501 "failed to create socket: %d\n", ret);
9ebbfe49 1502 goto err_destroy_mutex;
3f2304f8
SG
1503 }
1504
841aee4d
CL
1505 nvme_tcp_reclassify_socket(queue->sock);
1506
3f2304f8 1507 /* Single syn retry */
557eadfc 1508 tcp_sock_set_syncnt(queue->sock->sk, 1);
3f2304f8
SG
1509
1510 /* Set TCP no delay */
12abc5ee 1511 tcp_sock_set_nodelay(queue->sock->sk);
3f2304f8
SG
1512
1513 /*
1514 * Cleanup whatever is sitting in the TCP transmit queue on socket
1515 * close. This is done to prevent stale data from being sent should
1516 * the network connection be restored before TCP times out.
1517 */
c433594c 1518 sock_no_linger(queue->sock->sk);
3f2304f8 1519
6e434967
CH
1520 if (so_priority > 0)
1521 sock_set_priority(queue->sock->sk, so_priority);
9912ade3 1522
bb13985d 1523 /* Set socket type of service */
6ebf71ba
CH
1524 if (nctrl->opts->tos >= 0)
1525 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos);
bb13985d 1526
adc99fd3
SG
1527 /* Set 10 seconds timeout for icresp recvmsg */
1528 queue->sock->sk->sk_rcvtimeo = 10 * HZ;
1529
3f2304f8 1530 queue->sock->sk->sk_allocation = GFP_ATOMIC;
40510a63 1531 nvme_tcp_set_queue_io_cpu(queue);
3f2304f8
SG
1532 queue->request = NULL;
1533 queue->data_remaining = 0;
1534 queue->ddgst_remaining = 0;
1535 queue->pdu_remaining = 0;
1536 queue->pdu_offset = 0;
1537 sk_set_memalloc(queue->sock->sk);
1538
9924b030 1539 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) {
3f2304f8
SG
1540 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr,
1541 sizeof(ctrl->src_addr));
1542 if (ret) {
9924b030 1543 dev_err(nctrl->device,
3f2304f8
SG
1544 "failed to bind queue %d socket %d\n",
1545 qid, ret);
1546 goto err_sock;
1547 }
1548 }
1549
3ede8f72
MB
1550 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) {
1551 char *iface = nctrl->opts->host_iface;
1552 sockptr_t optval = KERNEL_SOCKPTR(iface);
1553
1554 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE,
1555 optval, strlen(iface));
1556 if (ret) {
1557 dev_err(nctrl->device,
1558 "failed to bind to interface %s queue %d err %d\n",
1559 iface, qid, ret);
1560 goto err_sock;
1561 }
1562 }
1563
3f2304f8
SG
1564 queue->hdr_digest = nctrl->opts->hdr_digest;
1565 queue->data_digest = nctrl->opts->data_digest;
1566 if (queue->hdr_digest || queue->data_digest) {
1567 ret = nvme_tcp_alloc_crypto(queue);
1568 if (ret) {
9924b030 1569 dev_err(nctrl->device,
3f2304f8
SG
1570 "failed to allocate queue %d crypto\n", qid);
1571 goto err_sock;
1572 }
1573 }
1574
1575 rcv_pdu_size = sizeof(struct nvme_tcp_rsp_pdu) +
1576 nvme_tcp_hdgst_len(queue);
1577 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL);
1578 if (!queue->pdu) {
1579 ret = -ENOMEM;
1580 goto err_crypto;
1581 }
1582
9924b030 1583 dev_dbg(nctrl->device, "connecting queue %d\n",
3f2304f8
SG
1584 nvme_tcp_queue_id(queue));
1585
1586 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr,
1587 sizeof(ctrl->addr), 0);
1588 if (ret) {
9924b030 1589 dev_err(nctrl->device,
3f2304f8
SG
1590 "failed to connect socket: %d\n", ret);
1591 goto err_rcv_pdu;
1592 }
1593
1594 ret = nvme_tcp_init_connection(queue);
1595 if (ret)
1596 goto err_init_connect;
1597
1598 queue->rd_enabled = true;
1599 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags);
1600 nvme_tcp_init_recv_ctx(queue);
1601
1602 write_lock_bh(&queue->sock->sk->sk_callback_lock);
1603 queue->sock->sk->sk_user_data = queue;
1604 queue->state_change = queue->sock->sk->sk_state_change;
1605 queue->data_ready = queue->sock->sk->sk_data_ready;
1606 queue->write_space = queue->sock->sk->sk_write_space;
1607 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
1608 queue->sock->sk->sk_state_change = nvme_tcp_state_change;
1609 queue->sock->sk->sk_write_space = nvme_tcp_write_space;
ac1c4e18 1610#ifdef CONFIG_NET_RX_BUSY_POLL
1a9460ce 1611 queue->sock->sk->sk_ll_usec = 1;
ac1c4e18 1612#endif
3f2304f8
SG
1613 write_unlock_bh(&queue->sock->sk->sk_callback_lock);
1614
1615 return 0;
1616
1617err_init_connect:
1618 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1619err_rcv_pdu:
1620 kfree(queue->pdu);
1621err_crypto:
1622 if (queue->hdr_digest || queue->data_digest)
1623 nvme_tcp_free_crypto(queue);
1624err_sock:
1625 sock_release(queue->sock);
1626 queue->sock = NULL;
9ebbfe49 1627err_destroy_mutex:
d48f92cd 1628 mutex_destroy(&queue->send_mutex);
9ebbfe49 1629 mutex_destroy(&queue->queue_lock);
3f2304f8
SG
1630 return ret;
1631}
1632
1633static void nvme_tcp_restore_sock_calls(struct nvme_tcp_queue *queue)
1634{
1635 struct socket *sock = queue->sock;
1636
1637 write_lock_bh(&sock->sk->sk_callback_lock);
1638 sock->sk->sk_user_data = NULL;
1639 sock->sk->sk_data_ready = queue->data_ready;
1640 sock->sk->sk_state_change = queue->state_change;
1641 sock->sk->sk_write_space = queue->write_space;
1642 write_unlock_bh(&sock->sk->sk_callback_lock);
1643}
1644
1645static void __nvme_tcp_stop_queue(struct nvme_tcp_queue *queue)
1646{
1647 kernel_sock_shutdown(queue->sock, SHUT_RDWR);
1648 nvme_tcp_restore_sock_calls(queue);
1649 cancel_work_sync(&queue->io_work);
1650}
1651
1652static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid)
1653{
1654 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1655 struct nvme_tcp_queue *queue = &ctrl->queues[qid];
1656
2bff487f
ML
1657 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags))
1658 return;
1659
9ebbfe49
CL
1660 mutex_lock(&queue->queue_lock);
1661 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags))
1662 __nvme_tcp_stop_queue(queue);
1663 mutex_unlock(&queue->queue_lock);
3f2304f8
SG
1664}
1665
1666static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx)
1667{
1668 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1669 int ret;
1670
1671 if (idx)
be42a33b 1672 ret = nvmf_connect_io_queue(nctrl, idx);
3f2304f8
SG
1673 else
1674 ret = nvmf_connect_admin_queue(nctrl);
1675
1676 if (!ret) {
1677 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags);
1678 } else {
f34e2589
SG
1679 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags))
1680 __nvme_tcp_stop_queue(&ctrl->queues[idx]);
3f2304f8
SG
1681 dev_err(nctrl->device,
1682 "failed to connect queue: %d ret=%d\n", idx, ret);
1683 }
1684 return ret;
1685}
1686
3f2304f8
SG
1687static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
1688{
1689 if (to_tcp_ctrl(ctrl)->async_req.pdu) {
ceb1e087 1690 cancel_work_sync(&ctrl->async_event_work);
3f2304f8
SG
1691 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
1692 to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
1693 }
1694
1695 nvme_tcp_free_queue(ctrl, 0);
1696}
1697
1698static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl)
1699{
1700 int i;
1701
1702 for (i = 1; i < ctrl->queue_count; i++)
1703 nvme_tcp_free_queue(ctrl, i);
1704}
1705
1706static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl)
1707{
1708 int i;
1709
1710 for (i = 1; i < ctrl->queue_count; i++)
1711 nvme_tcp_stop_queue(ctrl, i);
1712}
1713
09035f86
DW
1714static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl,
1715 int first, int last)
3f2304f8 1716{
462b8b2d 1717 int i, ret;
3f2304f8 1718
09035f86 1719 for (i = first; i < last; i++) {
3f2304f8
SG
1720 ret = nvme_tcp_start_queue(ctrl, i);
1721 if (ret)
1722 goto out_stop_queues;
1723 }
1724
1725 return 0;
1726
1727out_stop_queues:
09035f86 1728 for (i--; i >= first; i--)
3f2304f8
SG
1729 nvme_tcp_stop_queue(ctrl, i);
1730 return ret;
1731}
1732
1733static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl)
1734{
1735 int ret;
1736
fb8745d0 1737 ret = nvme_tcp_alloc_queue(ctrl, 0);
3f2304f8
SG
1738 if (ret)
1739 return ret;
1740
1741 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl));
1742 if (ret)
1743 goto out_free_queue;
1744
1745 return 0;
1746
1747out_free_queue:
1748 nvme_tcp_free_queue(ctrl, 0);
1749 return ret;
1750}
1751
efb973b1 1752static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
3f2304f8
SG
1753{
1754 int i, ret;
1755
1756 for (i = 1; i < ctrl->queue_count; i++) {
fb8745d0 1757 ret = nvme_tcp_alloc_queue(ctrl, i);
3f2304f8
SG
1758 if (ret)
1759 goto out_free_queues;
1760 }
1761
1762 return 0;
1763
1764out_free_queues:
1765 for (i--; i >= 1; i--)
1766 nvme_tcp_free_queue(ctrl, i);
1767
1768 return ret;
1769}
1770
1771static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
1772{
873946f4
SG
1773 unsigned int nr_io_queues;
1774
1775 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus());
1776 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus());
1a9460ce 1777 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus());
873946f4
SG
1778
1779 return nr_io_queues;
3f2304f8
SG
1780}
1781
64861993
SG
1782static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
1783 unsigned int nr_io_queues)
1784{
1785 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
1786 struct nvmf_ctrl_options *opts = nctrl->opts;
1787
1788 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
1789 /*
1790 * separate read/write queues
1791 * hand out dedicated default queues only after we have
1792 * sufficient read queues.
1793 */
1794 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
1795 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
1796 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1797 min(opts->nr_write_queues, nr_io_queues);
1798 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1799 } else {
1800 /*
1801 * shared read/write queues
1802 * either no write queues were requested, or we don't have
1803 * sufficient queue count to have dedicated default queues.
1804 */
1805 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
1806 min(opts->nr_io_queues, nr_io_queues);
1807 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
1808 }
1a9460ce
SG
1809
1810 if (opts->nr_poll_queues && nr_io_queues) {
1811 /* map dedicated poll queues only if we have queues left */
1812 ctrl->io_queues[HCTX_TYPE_POLL] =
1813 min(opts->nr_poll_queues, nr_io_queues);
1814 }
64861993
SG
1815}
1816
efb973b1 1817static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
3f2304f8
SG
1818{
1819 unsigned int nr_io_queues;
1820 int ret;
1821
1822 nr_io_queues = nvme_tcp_nr_io_queues(ctrl);
1823 ret = nvme_set_queue_count(ctrl, &nr_io_queues);
1824 if (ret)
1825 return ret;
1826
664227fd 1827 if (nr_io_queues == 0) {
72f57242
SG
1828 dev_err(ctrl->device,
1829 "unable to set any I/O queues\n");
1830 return -ENOMEM;
1831 }
3f2304f8 1832
664227fd 1833 ctrl->queue_count = nr_io_queues + 1;
3f2304f8
SG
1834 dev_info(ctrl->device,
1835 "creating %d I/O queues.\n", nr_io_queues);
1836
64861993
SG
1837 nvme_tcp_set_io_queues(ctrl, nr_io_queues);
1838
efb973b1 1839 return __nvme_tcp_alloc_io_queues(ctrl);
3f2304f8
SG
1840}
1841
1842static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove)
1843{
1844 nvme_tcp_stop_io_queues(ctrl);
de777825
CH
1845 if (remove)
1846 nvme_remove_io_tag_set(ctrl);
3f2304f8
SG
1847 nvme_tcp_free_io_queues(ctrl);
1848}
1849
1850static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new)
1851{
09035f86 1852 int ret, nr_queues;
3f2304f8 1853
efb973b1 1854 ret = nvme_tcp_alloc_io_queues(ctrl);
3f2304f8
SG
1855 if (ret)
1856 return ret;
1857
1858 if (new) {
de777825
CH
1859 ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set,
1860 &nvme_tcp_mq_ops,
1861 BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING,
1862 sizeof(struct nvme_tcp_request));
2f7a7e5d 1863 if (ret)
3f2304f8 1864 goto out_free_io_queues;
3f2304f8
SG
1865 }
1866
09035f86
DW
1867 /*
1868 * Only start IO queues for which we have allocated the tagset
1869 * and limitted it to the available queues. On reconnects, the
1870 * queue number might have changed.
1871 */
1872 nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count);
1873 ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues);
3f2304f8
SG
1874 if (ret)
1875 goto out_cleanup_connect_q;
1876
2875b0ae
SG
1877 if (!new) {
1878 nvme_start_queues(ctrl);
e5c01f4f
SG
1879 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) {
1880 /*
1881 * If we timed out waiting for freeze we are likely to
1882 * be stuck. Fail the controller initialization just
1883 * to be safe.
1884 */
1885 ret = -ENODEV;
1886 goto out_wait_freeze_timed_out;
1887 }
2875b0ae
SG
1888 blk_mq_update_nr_hw_queues(ctrl->tagset,
1889 ctrl->queue_count - 1);
1890 nvme_unfreeze(ctrl);
1891 }
1892
09035f86
DW
1893 /*
1894 * If the number of queues has increased (reconnect case)
1895 * start all new queues now.
1896 */
1897 ret = nvme_tcp_start_io_queues(ctrl, nr_queues,
1898 ctrl->tagset->nr_hw_queues + 1);
1899 if (ret)
1900 goto out_wait_freeze_timed_out;
1901
3f2304f8
SG
1902 return 0;
1903
e5c01f4f
SG
1904out_wait_freeze_timed_out:
1905 nvme_stop_queues(ctrl);
70a99574 1906 nvme_sync_io_queues(ctrl);
e5c01f4f 1907 nvme_tcp_stop_io_queues(ctrl);
3f2304f8 1908out_cleanup_connect_q:
70a99574 1909 nvme_cancel_tagset(ctrl);
e85037a2 1910 if (new)
de777825 1911 nvme_remove_io_tag_set(ctrl);
3f2304f8
SG
1912out_free_io_queues:
1913 nvme_tcp_free_io_queues(ctrl);
1914 return ret;
1915}
1916
1917static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove)
1918{
1919 nvme_tcp_stop_queue(ctrl, 0);
de777825
CH
1920 if (remove)
1921 nvme_remove_admin_tag_set(ctrl);
3f2304f8
SG
1922 nvme_tcp_free_admin_queue(ctrl);
1923}
1924
1925static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
1926{
1927 int error;
1928
1929 error = nvme_tcp_alloc_admin_queue(ctrl);
1930 if (error)
1931 return error;
1932
1933 if (new) {
de777825
CH
1934 error = nvme_alloc_admin_tag_set(ctrl,
1935 &to_tcp_ctrl(ctrl)->admin_tag_set,
1936 &nvme_tcp_admin_mq_ops, BLK_MQ_F_BLOCKING,
1937 sizeof(struct nvme_tcp_request));
2f7a7e5d 1938 if (error)
3f2304f8 1939 goto out_free_queue;
3f2304f8
SG
1940 }
1941
1942 error = nvme_tcp_start_queue(ctrl, 0);
1943 if (error)
de777825 1944 goto out_cleanup_tagset;
3f2304f8 1945
c0f2f45b 1946 error = nvme_enable_ctrl(ctrl);
3f2304f8
SG
1947 if (error)
1948 goto out_stop_queue;
1949
6ca1d902 1950 nvme_start_admin_queue(ctrl);
e7832cb4 1951
f21c4769 1952 error = nvme_init_ctrl_finish(ctrl);
3f2304f8 1953 if (error)
70a99574 1954 goto out_quiesce_queue;
3f2304f8
SG
1955
1956 return 0;
1957
70a99574 1958out_quiesce_queue:
6ca1d902 1959 nvme_stop_admin_queue(ctrl);
70a99574 1960 blk_sync_queue(ctrl->admin_q);
3f2304f8
SG
1961out_stop_queue:
1962 nvme_tcp_stop_queue(ctrl, 0);
70a99574 1963 nvme_cancel_admin_tagset(ctrl);
de777825 1964out_cleanup_tagset:
e7832cb4 1965 if (new)
de777825 1966 nvme_remove_admin_tag_set(ctrl);
3f2304f8
SG
1967out_free_queue:
1968 nvme_tcp_free_admin_queue(ctrl);
1969 return error;
1970}
1971
1972static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
1973 bool remove)
1974{
6ca1d902 1975 nvme_stop_admin_queue(ctrl);
d6f66210 1976 blk_sync_queue(ctrl->admin_q);
3f2304f8 1977 nvme_tcp_stop_queue(ctrl, 0);
563c8158 1978 nvme_cancel_admin_tagset(ctrl);
e7832cb4 1979 if (remove)
6ca1d902 1980 nvme_start_admin_queue(ctrl);
3f2304f8
SG
1981 nvme_tcp_destroy_admin_queue(ctrl, remove);
1982}
1983
1984static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
1985 bool remove)
1986{
1987 if (ctrl->queue_count <= 1)
d6f66210 1988 return;
6ca1d902 1989 nvme_stop_admin_queue(ctrl);
2875b0ae 1990 nvme_start_freeze(ctrl);
3f2304f8 1991 nvme_stop_queues(ctrl);
d6f66210 1992 nvme_sync_io_queues(ctrl);
3f2304f8 1993 nvme_tcp_stop_io_queues(ctrl);
563c8158 1994 nvme_cancel_tagset(ctrl);
3f2304f8
SG
1995 if (remove)
1996 nvme_start_queues(ctrl);
1997 nvme_tcp_destroy_io_queues(ctrl, remove);
1998}
1999
2000static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl)
2001{
2002 /* If we are resetting/deleting then do nothing */
2003 if (ctrl->state != NVME_CTRL_CONNECTING) {
2004 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW ||
2005 ctrl->state == NVME_CTRL_LIVE);
2006 return;
2007 }
2008
2009 if (nvmf_should_reconnect(ctrl)) {
2010 dev_info(ctrl->device, "Reconnecting in %d seconds...\n",
2011 ctrl->opts->reconnect_delay);
2012 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work,
2013 ctrl->opts->reconnect_delay * HZ);
2014 } else {
2015 dev_info(ctrl->device, "Removing controller...\n");
2016 nvme_delete_ctrl(ctrl);
2017 }
2018}
2019
2020static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new)
2021{
2022 struct nvmf_ctrl_options *opts = ctrl->opts;
312910f4 2023 int ret;
3f2304f8
SG
2024
2025 ret = nvme_tcp_configure_admin_queue(ctrl, new);
2026 if (ret)
2027 return ret;
2028
2029 if (ctrl->icdoff) {
522af60c 2030 ret = -EOPNOTSUPP;
3f2304f8
SG
2031 dev_err(ctrl->device, "icdoff is not supported!\n");
2032 goto destroy_admin;
2033 }
2034
3b54064f 2035 if (!nvme_ctrl_sgl_supported(ctrl)) {
522af60c 2036 ret = -EOPNOTSUPP;
73ffcefc
MG
2037 dev_err(ctrl->device, "Mandatory sgls are not supported!\n");
2038 goto destroy_admin;
2039 }
2040
3f2304f8
SG
2041 if (opts->queue_size > ctrl->sqsize + 1)
2042 dev_warn(ctrl->device,
2043 "queue_size %zu > ctrl sqsize %u, clamping down\n",
2044 opts->queue_size, ctrl->sqsize + 1);
2045
2046 if (ctrl->sqsize + 1 > ctrl->maxcmd) {
2047 dev_warn(ctrl->device,
2048 "sqsize %u > ctrl maxcmd %u, clamping down\n",
2049 ctrl->sqsize + 1, ctrl->maxcmd);
2050 ctrl->sqsize = ctrl->maxcmd - 1;
2051 }
2052
2053 if (ctrl->queue_count > 1) {
2054 ret = nvme_tcp_configure_io_queues(ctrl, new);
2055 if (ret)
2056 goto destroy_admin;
2057 }
2058
2059 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) {
bea54ef5 2060 /*
ecca390e 2061 * state change failure is ok if we started ctrl delete,
bea54ef5
IR
2062 * unless we're during creation of a new controller to
2063 * avoid races with teardown flow.
2064 */
ecca390e
SG
2065 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2066 ctrl->state != NVME_CTRL_DELETING_NOIO);
bea54ef5 2067 WARN_ON_ONCE(new);
3f2304f8
SG
2068 ret = -EINVAL;
2069 goto destroy_io;
2070 }
2071
2072 nvme_start_ctrl(ctrl);
2073 return 0;
2074
2075destroy_io:
70a99574
CL
2076 if (ctrl->queue_count > 1) {
2077 nvme_stop_queues(ctrl);
2078 nvme_sync_io_queues(ctrl);
2079 nvme_tcp_stop_io_queues(ctrl);
2080 nvme_cancel_tagset(ctrl);
3f2304f8 2081 nvme_tcp_destroy_io_queues(ctrl, new);
70a99574 2082 }
3f2304f8 2083destroy_admin:
6ca1d902 2084 nvme_stop_admin_queue(ctrl);
70a99574 2085 blk_sync_queue(ctrl->admin_q);
3f2304f8 2086 nvme_tcp_stop_queue(ctrl, 0);
70a99574 2087 nvme_cancel_admin_tagset(ctrl);
3f2304f8
SG
2088 nvme_tcp_destroy_admin_queue(ctrl, new);
2089 return ret;
2090}
2091
2092static void nvme_tcp_reconnect_ctrl_work(struct work_struct *work)
2093{
2094 struct nvme_tcp_ctrl *tcp_ctrl = container_of(to_delayed_work(work),
2095 struct nvme_tcp_ctrl, connect_work);
2096 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2097
2098 ++ctrl->nr_reconnects;
2099
2100 if (nvme_tcp_setup_ctrl(ctrl, false))
2101 goto requeue;
2102
56a77d26 2103 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n",
3f2304f8
SG
2104 ctrl->nr_reconnects);
2105
2106 ctrl->nr_reconnects = 0;
2107
2108 return;
2109
2110requeue:
2111 dev_info(ctrl->device, "Failed reconnect attempt %d\n",
2112 ctrl->nr_reconnects);
2113 nvme_tcp_reconnect_or_remove(ctrl);
2114}
2115
2116static void nvme_tcp_error_recovery_work(struct work_struct *work)
2117{
2118 struct nvme_tcp_ctrl *tcp_ctrl = container_of(work,
2119 struct nvme_tcp_ctrl, err_work);
2120 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
2121
f50fff73 2122 nvme_auth_stop(ctrl);
3f2304f8 2123 nvme_stop_keep_alive(ctrl);
ff9fc7eb 2124 flush_work(&ctrl->async_event_work);
3f2304f8
SG
2125 nvme_tcp_teardown_io_queues(ctrl, false);
2126 /* unquiesce to fail fast pending requests */
2127 nvme_start_queues(ctrl);
2128 nvme_tcp_teardown_admin_queue(ctrl, false);
6ca1d902 2129 nvme_start_admin_queue(ctrl);
3f2304f8
SG
2130
2131 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
ecca390e
SG
2132 /* state change failure is ok if we started ctrl delete */
2133 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2134 ctrl->state != NVME_CTRL_DELETING_NOIO);
3f2304f8
SG
2135 return;
2136 }
2137
2138 nvme_tcp_reconnect_or_remove(ctrl);
2139}
2140
2141static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
2142{
2143 nvme_tcp_teardown_io_queues(ctrl, shutdown);
6ca1d902 2144 nvme_stop_admin_queue(ctrl);
3f2304f8
SG
2145 if (shutdown)
2146 nvme_shutdown_ctrl(ctrl);
2147 else
b5b05048 2148 nvme_disable_ctrl(ctrl);
3f2304f8
SG
2149 nvme_tcp_teardown_admin_queue(ctrl, shutdown);
2150}
2151
2152static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl)
2153{
2154 nvme_tcp_teardown_ctrl(ctrl, true);
2155}
2156
2157static void nvme_reset_ctrl_work(struct work_struct *work)
2158{
2159 struct nvme_ctrl *ctrl =
2160 container_of(work, struct nvme_ctrl, reset_work);
2161
2162 nvme_stop_ctrl(ctrl);
2163 nvme_tcp_teardown_ctrl(ctrl, false);
2164
2165 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
ecca390e
SG
2166 /* state change failure is ok if we started ctrl delete */
2167 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING &&
2168 ctrl->state != NVME_CTRL_DELETING_NOIO);
3f2304f8
SG
2169 return;
2170 }
2171
2172 if (nvme_tcp_setup_ctrl(ctrl, false))
2173 goto out_fail;
2174
2175 return;
2176
2177out_fail:
2178 ++ctrl->nr_reconnects;
2179 nvme_tcp_reconnect_or_remove(ctrl);
2180}
2181
f7f70f4a
RL
2182static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl)
2183{
c4abd875 2184 flush_work(&to_tcp_ctrl(ctrl)->err_work);
f7f70f4a
RL
2185 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
2186}
2187
3f2304f8
SG
2188static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl)
2189{
2190 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
2191
2192 if (list_empty(&ctrl->list))
2193 goto free_ctrl;
2194
2195 mutex_lock(&nvme_tcp_ctrl_mutex);
2196 list_del(&ctrl->list);
2197 mutex_unlock(&nvme_tcp_ctrl_mutex);
2198
2199 nvmf_free_options(nctrl->opts);
2200free_ctrl:
2201 kfree(ctrl->queues);
2202 kfree(ctrl);
2203}
2204
2205static void nvme_tcp_set_sg_null(struct nvme_command *c)
2206{
2207 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2208
2209 sg->addr = 0;
2210 sg->length = 0;
2211 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2212 NVME_SGL_FMT_TRANSPORT_A;
2213}
2214
2215static void nvme_tcp_set_sg_inline(struct nvme_tcp_queue *queue,
2216 struct nvme_command *c, u32 data_len)
2217{
2218 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2219
2220 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
2221 sg->length = cpu_to_le32(data_len);
2222 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
2223}
2224
2225static void nvme_tcp_set_sg_host_data(struct nvme_command *c,
2226 u32 data_len)
2227{
2228 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
2229
2230 sg->addr = 0;
2231 sg->length = cpu_to_le32(data_len);
2232 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2233 NVME_SGL_FMT_TRANSPORT_A;
2234}
2235
2236static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
2237{
2238 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg);
2239 struct nvme_tcp_queue *queue = &ctrl->queues[0];
2240 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu;
2241 struct nvme_command *cmd = &pdu->cmd;
2242 u8 hdgst = nvme_tcp_hdgst_len(queue);
2243
2244 memset(pdu, 0, sizeof(*pdu));
2245 pdu->hdr.type = nvme_tcp_cmd;
2246 if (queue->hdr_digest)
2247 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2248 pdu->hdr.hlen = sizeof(*pdu);
2249 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst);
2250
2251 cmd->common.opcode = nvme_admin_async_event;
2252 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
2253 cmd->common.flags |= NVME_CMD_SGL_METABUF;
2254 nvme_tcp_set_sg_null(cmd);
2255
2256 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU;
2257 ctrl->async_req.offset = 0;
2258 ctrl->async_req.curr_bio = NULL;
2259 ctrl->async_req.data_len = 0;
2260
86f0348a 2261 nvme_tcp_queue_request(&ctrl->async_req, true, true);
3f2304f8
SG
2262}
2263
236187c4
SG
2264static void nvme_tcp_complete_timed_out(struct request *rq)
2265{
2266 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2267 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2268
236187c4 2269 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
93ba75c9 2270 nvmf_complete_timed_out_request(rq);
236187c4
SG
2271}
2272
9bdb4833 2273static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
3f2304f8
SG
2274{
2275 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
236187c4 2276 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
3f2304f8
SG
2277 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2278
236187c4 2279 dev_warn(ctrl->device,
3f2304f8 2280 "queue %d: timeout request %#x type %d\n",
39d57757 2281 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
3f2304f8 2282
236187c4 2283 if (ctrl->state != NVME_CTRL_LIVE) {
39d57757 2284 /*
236187c4
SG
2285 * If we are resetting, connecting or deleting we should
2286 * complete immediately because we may block controller
2287 * teardown or setup sequence
2288 * - ctrl disable/shutdown fabrics requests
2289 * - connect requests
2290 * - initialization admin requests
2291 * - I/O requests that entered after unquiescing and
2292 * the controller stopped responding
2293 *
2294 * All other requests should be cancelled by the error
2295 * recovery work, so it's fine that we fail it here.
39d57757 2296 */
236187c4 2297 nvme_tcp_complete_timed_out(rq);
3f2304f8
SG
2298 return BLK_EH_DONE;
2299 }
2300
236187c4
SG
2301 /*
2302 * LIVE state should trigger the normal error recovery which will
2303 * handle completing this request.
2304 */
2305 nvme_tcp_error_recovery(ctrl);
3f2304f8
SG
2306 return BLK_EH_RESET_TIMER;
2307}
2308
2309static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
2310 struct request *rq)
2311{
2312 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2313 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2314 struct nvme_command *c = &pdu->cmd;
2315
2316 c->common.flags |= NVME_CMD_SGL_METABUF;
2317
25e5cb78
SG
2318 if (!blk_rq_nr_phys_segments(rq))
2319 nvme_tcp_set_sg_null(c);
2320 else if (rq_data_dir(rq) == WRITE &&
53ee9e29 2321 req->data_len <= nvme_tcp_inline_data_size(req))
3f2304f8
SG
2322 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2323 else
2324 nvme_tcp_set_sg_host_data(c, req->data_len);
2325
2326 return 0;
2327}
2328
2329static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
2330 struct request *rq)
2331{
2332 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2333 struct nvme_tcp_cmd_pdu *pdu = req->pdu;
2334 struct nvme_tcp_queue *queue = req->queue;
2335 u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0;
2336 blk_status_t ret;
2337
f4b9e6c9 2338 ret = nvme_setup_cmd(ns, rq);
3f2304f8
SG
2339 if (ret)
2340 return ret;
2341
2342 req->state = NVME_TCP_SEND_CMD_PDU;
1ba2e507 2343 req->status = cpu_to_le16(NVME_SC_SUCCESS);
3f2304f8
SG
2344 req->offset = 0;
2345 req->data_sent = 0;
2346 req->pdu_len = 0;
2347 req->pdu_sent = 0;
c2700d28 2348 req->h2cdata_left = 0;
25e5cb78
SG
2349 req->data_len = blk_rq_nr_phys_segments(rq) ?
2350 blk_rq_payload_bytes(rq) : 0;
3f2304f8 2351 req->curr_bio = rq->bio;
e11e5116 2352 if (req->curr_bio && req->data_len)
cb9b870f 2353 nvme_tcp_init_iter(req, rq_data_dir(rq));
3f2304f8
SG
2354
2355 if (rq_data_dir(rq) == WRITE &&
53ee9e29 2356 req->data_len <= nvme_tcp_inline_data_size(req))
3f2304f8 2357 req->pdu_len = req->data_len;
3f2304f8
SG
2358
2359 pdu->hdr.type = nvme_tcp_cmd;
2360 pdu->hdr.flags = 0;
2361 if (queue->hdr_digest)
2362 pdu->hdr.flags |= NVME_TCP_F_HDGST;
2363 if (queue->data_digest && req->pdu_len) {
2364 pdu->hdr.flags |= NVME_TCP_F_DDGST;
2365 ddgst = nvme_tcp_ddgst_len(queue);
2366 }
2367 pdu->hdr.hlen = sizeof(*pdu);
2368 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2369 pdu->hdr.plen =
2370 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2371
2372 ret = nvme_tcp_map_data(queue, rq);
2373 if (unlikely(ret)) {
28a4cac4 2374 nvme_cleanup_cmd(rq);
3f2304f8
SG
2375 dev_err(queue->ctrl->ctrl.device,
2376 "Failed to map data (%d)\n", ret);
2377 return ret;
2378 }
2379
2380 return 0;
2381}
2382
86f0348a
SG
2383static void nvme_tcp_commit_rqs(struct blk_mq_hw_ctx *hctx)
2384{
2385 struct nvme_tcp_queue *queue = hctx->driver_data;
2386
2387 if (!llist_empty(&queue->req_list))
2388 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
2389}
2390
3f2304f8
SG
2391static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
2392 const struct blk_mq_queue_data *bd)
2393{
2394 struct nvme_ns *ns = hctx->queue->queuedata;
2395 struct nvme_tcp_queue *queue = hctx->driver_data;
2396 struct request *rq = bd->rq;
2397 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2398 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags);
2399 blk_status_t ret;
2400
a9715744
TC
2401 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2402 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq);
3f2304f8
SG
2403
2404 ret = nvme_tcp_setup_cmd_pdu(ns, rq);
2405 if (unlikely(ret))
2406 return ret;
2407
2408 blk_mq_start_request(rq);
2409
86f0348a 2410 nvme_tcp_queue_request(req, true, bd->last);
3f2304f8
SG
2411
2412 return BLK_STS_OK;
2413}
2414
a4e1d0b7 2415static void nvme_tcp_map_queues(struct blk_mq_tag_set *set)
873946f4 2416{
06427ca0 2417 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data);
64861993 2418 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
873946f4 2419
64861993 2420 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
873946f4
SG
2421 /* separate read/write queues */
2422 set->map[HCTX_TYPE_DEFAULT].nr_queues =
64861993
SG
2423 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2424 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2425 set->map[HCTX_TYPE_READ].nr_queues =
2426 ctrl->io_queues[HCTX_TYPE_READ];
873946f4 2427 set->map[HCTX_TYPE_READ].queue_offset =
64861993 2428 ctrl->io_queues[HCTX_TYPE_DEFAULT];
873946f4 2429 } else {
64861993 2430 /* shared read/write queues */
873946f4 2431 set->map[HCTX_TYPE_DEFAULT].nr_queues =
64861993
SG
2432 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2433 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2434 set->map[HCTX_TYPE_READ].nr_queues =
2435 ctrl->io_queues[HCTX_TYPE_DEFAULT];
873946f4
SG
2436 set->map[HCTX_TYPE_READ].queue_offset = 0;
2437 }
2438 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
2439 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
64861993 2440
1a9460ce
SG
2441 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2442 /* map dedicated poll queues only if we have queues left */
2443 set->map[HCTX_TYPE_POLL].nr_queues =
2444 ctrl->io_queues[HCTX_TYPE_POLL];
2445 set->map[HCTX_TYPE_POLL].queue_offset =
2446 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2447 ctrl->io_queues[HCTX_TYPE_READ];
2448 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2449 }
2450
64861993 2451 dev_info(ctrl->ctrl.device,
1a9460ce 2452 "mapped %d/%d/%d default/read/poll queues.\n",
64861993 2453 ctrl->io_queues[HCTX_TYPE_DEFAULT],
1a9460ce
SG
2454 ctrl->io_queues[HCTX_TYPE_READ],
2455 ctrl->io_queues[HCTX_TYPE_POLL]);
873946f4
SG
2456}
2457
5a72e899 2458static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
1a9460ce
SG
2459{
2460 struct nvme_tcp_queue *queue = hctx->driver_data;
2461 struct sock *sk = queue->sock->sk;
2462
f86e5bf8
SG
2463 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
2464 return 0;
2465
72e5d757 2466 set_bit(NVME_TCP_Q_POLLING, &queue->flags);
3f926af3 2467 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
1a9460ce
SG
2468 sk_busy_loop(sk, true);
2469 nvme_tcp_try_recv(queue);
72e5d757 2470 clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
1a9460ce
SG
2471 return queue->nr_cqe;
2472}
2473
02c57a82
MB
2474static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
2475{
2476 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0];
2477 struct sockaddr_storage src_addr;
2478 int ret, len;
2479
2480 len = nvmf_get_address(ctrl, buf, size);
2481
2482 ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr);
2483 if (ret > 0) {
2484 if (len > 0)
2485 len--; /* strip trailing newline */
2486 len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n",
2487 (len) ? "," : "", &src_addr);
2488 }
2489
2490 return len;
2491}
2492
6acbd961 2493static const struct blk_mq_ops nvme_tcp_mq_ops = {
3f2304f8 2494 .queue_rq = nvme_tcp_queue_rq,
86f0348a 2495 .commit_rqs = nvme_tcp_commit_rqs,
3f2304f8
SG
2496 .complete = nvme_complete_rq,
2497 .init_request = nvme_tcp_init_request,
2498 .exit_request = nvme_tcp_exit_request,
2499 .init_hctx = nvme_tcp_init_hctx,
2500 .timeout = nvme_tcp_timeout,
873946f4 2501 .map_queues = nvme_tcp_map_queues,
1a9460ce 2502 .poll = nvme_tcp_poll,
3f2304f8
SG
2503};
2504
6acbd961 2505static const struct blk_mq_ops nvme_tcp_admin_mq_ops = {
3f2304f8
SG
2506 .queue_rq = nvme_tcp_queue_rq,
2507 .complete = nvme_complete_rq,
2508 .init_request = nvme_tcp_init_request,
2509 .exit_request = nvme_tcp_exit_request,
2510 .init_hctx = nvme_tcp_init_admin_hctx,
2511 .timeout = nvme_tcp_timeout,
2512};
2513
2514static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = {
2515 .name = "tcp",
2516 .module = THIS_MODULE,
2517 .flags = NVME_F_FABRICS,
2518 .reg_read32 = nvmf_reg_read32,
2519 .reg_read64 = nvmf_reg_read64,
2520 .reg_write32 = nvmf_reg_write32,
2521 .free_ctrl = nvme_tcp_free_ctrl,
2522 .submit_async_event = nvme_tcp_submit_async_event,
2523 .delete_ctrl = nvme_tcp_delete_ctrl,
02c57a82 2524 .get_address = nvme_tcp_get_address,
f7f70f4a 2525 .stop_ctrl = nvme_tcp_stop_ctrl,
3f2304f8
SG
2526};
2527
2528static bool
2529nvme_tcp_existing_controller(struct nvmf_ctrl_options *opts)
2530{
2531 struct nvme_tcp_ctrl *ctrl;
2532 bool found = false;
2533
2534 mutex_lock(&nvme_tcp_ctrl_mutex);
2535 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) {
2536 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
2537 if (found)
2538 break;
2539 }
2540 mutex_unlock(&nvme_tcp_ctrl_mutex);
2541
2542 return found;
2543}
2544
2545static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
2546 struct nvmf_ctrl_options *opts)
2547{
2548 struct nvme_tcp_ctrl *ctrl;
2549 int ret;
2550
2551 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2552 if (!ctrl)
2553 return ERR_PTR(-ENOMEM);
2554
2555 INIT_LIST_HEAD(&ctrl->list);
2556 ctrl->ctrl.opts = opts;
1a9460ce
SG
2557 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2558 opts->nr_poll_queues + 1;
3f2304f8
SG
2559 ctrl->ctrl.sqsize = opts->queue_size - 1;
2560 ctrl->ctrl.kato = opts->kato;
2561
2562 INIT_DELAYED_WORK(&ctrl->connect_work,
2563 nvme_tcp_reconnect_ctrl_work);
2564 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work);
2565 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work);
2566
2567 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2568 opts->trsvcid =
2569 kstrdup(__stringify(NVME_TCP_DISC_PORT), GFP_KERNEL);
2570 if (!opts->trsvcid) {
2571 ret = -ENOMEM;
2572 goto out_free_ctrl;
2573 }
2574 opts->mask |= NVMF_OPT_TRSVCID;
2575 }
2576
2577 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2578 opts->traddr, opts->trsvcid, &ctrl->addr);
2579 if (ret) {
2580 pr_err("malformed address passed: %s:%s\n",
2581 opts->traddr, opts->trsvcid);
2582 goto out_free_ctrl;
2583 }
2584
2585 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
2586 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2587 opts->host_traddr, NULL, &ctrl->src_addr);
2588 if (ret) {
2589 pr_err("malformed src address passed: %s\n",
2590 opts->host_traddr);
2591 goto out_free_ctrl;
2592 }
2593 }
2594
3ede8f72 2595 if (opts->mask & NVMF_OPT_HOST_IFACE) {
8b43ced6 2596 if (!__dev_get_by_name(&init_net, opts->host_iface)) {
3ede8f72
MB
2597 pr_err("invalid interface passed: %s\n",
2598 opts->host_iface);
2599 ret = -ENODEV;
2600 goto out_free_ctrl;
2601 }
2602 }
2603
3f2304f8
SG
2604 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) {
2605 ret = -EALREADY;
2606 goto out_free_ctrl;
2607 }
2608
873946f4 2609 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
3f2304f8
SG
2610 GFP_KERNEL);
2611 if (!ctrl->queues) {
2612 ret = -ENOMEM;
2613 goto out_free_ctrl;
2614 }
2615
2616 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0);
2617 if (ret)
2618 goto out_kfree_queues;
2619
2620 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
2621 WARN_ON_ONCE(1);
2622 ret = -EINTR;
2623 goto out_uninit_ctrl;
2624 }
2625
2626 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true);
2627 if (ret)
2628 goto out_uninit_ctrl;
2629
2630 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
e5ea42fa 2631 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
3f2304f8 2632
3f2304f8
SG
2633 mutex_lock(&nvme_tcp_ctrl_mutex);
2634 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
2635 mutex_unlock(&nvme_tcp_ctrl_mutex);
2636
2637 return &ctrl->ctrl;
2638
2639out_uninit_ctrl:
2640 nvme_uninit_ctrl(&ctrl->ctrl);
2641 nvme_put_ctrl(&ctrl->ctrl);
2642 if (ret > 0)
2643 ret = -EIO;
2644 return ERR_PTR(ret);
2645out_kfree_queues:
2646 kfree(ctrl->queues);
2647out_free_ctrl:
2648 kfree(ctrl);
2649 return ERR_PTR(ret);
2650}
2651
2652static struct nvmf_transport_ops nvme_tcp_transport = {
2653 .name = "tcp",
2654 .module = THIS_MODULE,
2655 .required_opts = NVMF_OPT_TRADDR,
2656 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
2657 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
873946f4 2658 NVMF_OPT_HDR_DIGEST | NVMF_OPT_DATA_DIGEST |
bb13985d 2659 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
3ede8f72 2660 NVMF_OPT_TOS | NVMF_OPT_HOST_IFACE,
3f2304f8
SG
2661 .create_ctrl = nvme_tcp_create_ctrl,
2662};
2663
2664static int __init nvme_tcp_init_module(void)
2665{
2666 nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
2667 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
2668 if (!nvme_tcp_wq)
2669 return -ENOMEM;
2670
2671 nvmf_register_transport(&nvme_tcp_transport);
2672 return 0;
2673}
2674
2675static void __exit nvme_tcp_cleanup_module(void)
2676{
2677 struct nvme_tcp_ctrl *ctrl;
2678
2679 nvmf_unregister_transport(&nvme_tcp_transport);
2680
2681 mutex_lock(&nvme_tcp_ctrl_mutex);
2682 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list)
2683 nvme_delete_ctrl(&ctrl->ctrl);
2684 mutex_unlock(&nvme_tcp_ctrl_mutex);
2685 flush_workqueue(nvme_delete_wq);
2686
2687 destroy_workqueue(nvme_tcp_wq);
2688}
2689
2690module_init(nvme_tcp_init_module);
2691module_exit(nvme_tcp_cleanup_module);
2692
2693MODULE_LICENSE("GPL v2");