nvme: rename and document nvme_end_request
[linux-2.6-block.git] / drivers / nvme / host / rdma.c
CommitLineData
5d8762d5 1// SPDX-License-Identifier: GPL-2.0
71102307
CH
2/*
3 * NVMe over Fabrics RDMA host code.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
71102307
CH
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
71102307
CH
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
f41725bb 10#include <rdma/mr_pool.h>
71102307
CH
11#include <linux/err.h>
12#include <linux/string.h>
71102307
CH
13#include <linux/atomic.h>
14#include <linux/blk-mq.h>
0b36658c 15#include <linux/blk-mq-rdma.h>
71102307
CH
16#include <linux/types.h>
17#include <linux/list.h>
18#include <linux/mutex.h>
19#include <linux/scatterlist.h>
20#include <linux/nvme.h>
71102307
CH
21#include <asm/unaligned.h>
22
23#include <rdma/ib_verbs.h>
24#include <rdma/rdma_cm.h>
71102307
CH
25#include <linux/nvme-rdma.h>
26
27#include "nvme.h"
28#include "fabrics.h"
29
30
782d820c 31#define NVME_RDMA_CONNECT_TIMEOUT_MS 3000 /* 3 second */
71102307 32
71102307
CH
33#define NVME_RDMA_MAX_SEGMENTS 256
34
64a741c1 35#define NVME_RDMA_MAX_INLINE_SEGMENTS 4
71102307 36
5ec5d3bd
MG
37#define NVME_RDMA_DATA_SGL_SIZE \
38 (sizeof(struct scatterlist) * NVME_INLINE_SG_CNT)
39#define NVME_RDMA_METADATA_SGL_SIZE \
40 (sizeof(struct scatterlist) * NVME_INLINE_METADATA_SG_CNT)
41
71102307 42struct nvme_rdma_device {
f87c89ad
MG
43 struct ib_device *dev;
44 struct ib_pd *pd;
71102307
CH
45 struct kref ref;
46 struct list_head entry;
64a741c1 47 unsigned int num_inline_segments;
71102307
CH
48};
49
50struct nvme_rdma_qe {
51 struct ib_cqe cqe;
52 void *data;
53 u64 dma;
54};
55
324d9e78
IR
56struct nvme_rdma_sgl {
57 int nents;
58 struct sg_table sg_table;
59};
60
71102307
CH
61struct nvme_rdma_queue;
62struct nvme_rdma_request {
d49187e9 63 struct nvme_request req;
71102307
CH
64 struct ib_mr *mr;
65 struct nvme_rdma_qe sqe;
4af7f7ff
SG
66 union nvme_result result;
67 __le16 status;
68 refcount_t ref;
71102307
CH
69 struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
70 u32 num_sge;
71102307
CH
71 struct ib_reg_wr reg_wr;
72 struct ib_cqe reg_cqe;
73 struct nvme_rdma_queue *queue;
324d9e78 74 struct nvme_rdma_sgl data_sgl;
5ec5d3bd
MG
75 struct nvme_rdma_sgl *metadata_sgl;
76 bool use_sig_mr;
71102307
CH
77};
78
79enum nvme_rdma_queue_flags {
5013e98b
SG
80 NVME_RDMA_Q_ALLOCATED = 0,
81 NVME_RDMA_Q_LIVE = 1,
eb1bd249 82 NVME_RDMA_Q_TR_READY = 2,
71102307
CH
83};
84
85struct nvme_rdma_queue {
86 struct nvme_rdma_qe *rsp_ring;
71102307
CH
87 int queue_size;
88 size_t cmnd_capsule_len;
89 struct nvme_rdma_ctrl *ctrl;
90 struct nvme_rdma_device *device;
91 struct ib_cq *ib_cq;
92 struct ib_qp *qp;
93
94 unsigned long flags;
95 struct rdma_cm_id *cm_id;
96 int cm_error;
97 struct completion cm_done;
5ec5d3bd 98 bool pi_support;
287f329e 99 int cq_size;
71102307
CH
100};
101
102struct nvme_rdma_ctrl {
71102307
CH
103 /* read only in the hot path */
104 struct nvme_rdma_queue *queues;
71102307
CH
105
106 /* other member variables */
71102307 107 struct blk_mq_tag_set tag_set;
71102307
CH
108 struct work_struct err_work;
109
110 struct nvme_rdma_qe async_event_sqe;
111
71102307
CH
112 struct delayed_work reconnect_work;
113
114 struct list_head list;
115
116 struct blk_mq_tag_set admin_tag_set;
117 struct nvme_rdma_device *device;
118
71102307
CH
119 u32 max_fr_pages;
120
0928f9b4
SG
121 struct sockaddr_storage addr;
122 struct sockaddr_storage src_addr;
71102307
CH
123
124 struct nvme_ctrl ctrl;
64a741c1 125 bool use_inline_data;
b1064d3e 126 u32 io_queues[HCTX_MAX_TYPES];
71102307
CH
127};
128
129static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl)
130{
131 return container_of(ctrl, struct nvme_rdma_ctrl, ctrl);
132}
133
134static LIST_HEAD(device_list);
135static DEFINE_MUTEX(device_list_mutex);
136
137static LIST_HEAD(nvme_rdma_ctrl_list);
138static DEFINE_MUTEX(nvme_rdma_ctrl_mutex);
139
71102307
CH
140/*
141 * Disabling this option makes small I/O goes faster, but is fundamentally
142 * unsafe. With it turned off we will have to register a global rkey that
143 * allows read and write access to all physical memory.
144 */
145static bool register_always = true;
146module_param(register_always, bool, 0444);
147MODULE_PARM_DESC(register_always,
148 "Use memory registration even for contiguous memory regions");
149
150static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
151 struct rdma_cm_event *event);
152static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
ff029451 153static void nvme_rdma_complete_rq(struct request *rq);
71102307 154
90af3512
SG
155static const struct blk_mq_ops nvme_rdma_mq_ops;
156static const struct blk_mq_ops nvme_rdma_admin_mq_ops;
157
71102307
CH
158static inline int nvme_rdma_queue_idx(struct nvme_rdma_queue *queue)
159{
160 return queue - queue->ctrl->queues;
161}
162
ff8519f9
SG
163static bool nvme_rdma_poll_queue(struct nvme_rdma_queue *queue)
164{
165 return nvme_rdma_queue_idx(queue) >
b1064d3e
SG
166 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] +
167 queue->ctrl->io_queues[HCTX_TYPE_READ];
ff8519f9
SG
168}
169
71102307
CH
170static inline size_t nvme_rdma_inline_data_size(struct nvme_rdma_queue *queue)
171{
172 return queue->cmnd_capsule_len - sizeof(struct nvme_command);
173}
174
175static void nvme_rdma_free_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
176 size_t capsule_size, enum dma_data_direction dir)
177{
178 ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir);
179 kfree(qe->data);
180}
181
182static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
183 size_t capsule_size, enum dma_data_direction dir)
184{
185 qe->data = kzalloc(capsule_size, GFP_KERNEL);
186 if (!qe->data)
187 return -ENOMEM;
188
189 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
190 if (ib_dma_mapping_error(ibdev, qe->dma)) {
191 kfree(qe->data);
6344d02d 192 qe->data = NULL;
71102307
CH
193 return -ENOMEM;
194 }
195
196 return 0;
197}
198
199static void nvme_rdma_free_ring(struct ib_device *ibdev,
200 struct nvme_rdma_qe *ring, size_t ib_queue_size,
201 size_t capsule_size, enum dma_data_direction dir)
202{
203 int i;
204
205 for (i = 0; i < ib_queue_size; i++)
206 nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir);
207 kfree(ring);
208}
209
210static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
211 size_t ib_queue_size, size_t capsule_size,
212 enum dma_data_direction dir)
213{
214 struct nvme_rdma_qe *ring;
215 int i;
216
217 ring = kcalloc(ib_queue_size, sizeof(struct nvme_rdma_qe), GFP_KERNEL);
218 if (!ring)
219 return NULL;
220
62f99b62
MG
221 /*
222 * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue
223 * lifetime. It's safe, since any chage in the underlying RDMA device
224 * will issue error recovery and queue re-creation.
225 */
71102307
CH
226 for (i = 0; i < ib_queue_size; i++) {
227 if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
228 goto out_free_ring;
229 }
230
231 return ring;
232
233out_free_ring:
234 nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir);
235 return NULL;
236}
237
238static void nvme_rdma_qp_event(struct ib_event *event, void *context)
239{
27a4beef
MG
240 pr_debug("QP event %s (%d)\n",
241 ib_event_msg(event->event), event->event);
242
71102307
CH
243}
244
245static int nvme_rdma_wait_for_cm(struct nvme_rdma_queue *queue)
246{
35da77d5
BVA
247 int ret;
248
249 ret = wait_for_completion_interruptible_timeout(&queue->cm_done,
71102307 250 msecs_to_jiffies(NVME_RDMA_CONNECT_TIMEOUT_MS) + 1);
35da77d5
BVA
251 if (ret < 0)
252 return ret;
253 if (ret == 0)
254 return -ETIMEDOUT;
255 WARN_ON_ONCE(queue->cm_error > 0);
71102307
CH
256 return queue->cm_error;
257}
258
259static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
260{
261 struct nvme_rdma_device *dev = queue->device;
262 struct ib_qp_init_attr init_attr;
263 int ret;
264
265 memset(&init_attr, 0, sizeof(init_attr));
266 init_attr.event_handler = nvme_rdma_qp_event;
267 /* +1 for drain */
268 init_attr.cap.max_send_wr = factor * queue->queue_size + 1;
269 /* +1 for drain */
270 init_attr.cap.max_recv_wr = queue->queue_size + 1;
271 init_attr.cap.max_recv_sge = 1;
64a741c1 272 init_attr.cap.max_send_sge = 1 + dev->num_inline_segments;
71102307
CH
273 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
274 init_attr.qp_type = IB_QPT_RC;
275 init_attr.send_cq = queue->ib_cq;
276 init_attr.recv_cq = queue->ib_cq;
5ec5d3bd
MG
277 if (queue->pi_support)
278 init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
287f329e 279 init_attr.qp_context = queue;
71102307
CH
280
281 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr);
282
283 queue->qp = queue->cm_id->qp;
284 return ret;
285}
286
385475ee
CH
287static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
288 struct request *rq, unsigned int hctx_idx)
71102307
CH
289{
290 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
71102307 291
62f99b62 292 kfree(req->sqe.data);
71102307
CH
293}
294
385475ee
CH
295static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
296 struct request *rq, unsigned int hctx_idx,
297 unsigned int numa_node)
71102307 298{
385475ee 299 struct nvme_rdma_ctrl *ctrl = set->driver_data;
71102307 300 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
385475ee 301 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
71102307 302 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
71102307 303
59e29ce6 304 nvme_req(rq)->ctrl = &ctrl->ctrl;
62f99b62
MG
305 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL);
306 if (!req->sqe.data)
307 return -ENOMEM;
71102307 308
5ec5d3bd
MG
309 /* metadata nvme_rdma_sgl struct is located after command's data SGL */
310 if (queue->pi_support)
311 req->metadata_sgl = (void *)nvme_req(rq) +
312 sizeof(struct nvme_rdma_request) +
313 NVME_RDMA_DATA_SGL_SIZE;
314
71102307
CH
315 req->queue = queue;
316
317 return 0;
71102307
CH
318}
319
71102307
CH
320static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
321 unsigned int hctx_idx)
322{
323 struct nvme_rdma_ctrl *ctrl = data;
324 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1];
325
d858e5f0 326 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
71102307
CH
327
328 hctx->driver_data = queue;
329 return 0;
330}
331
332static int nvme_rdma_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
333 unsigned int hctx_idx)
334{
335 struct nvme_rdma_ctrl *ctrl = data;
336 struct nvme_rdma_queue *queue = &ctrl->queues[0];
337
338 BUG_ON(hctx_idx != 0);
339
340 hctx->driver_data = queue;
341 return 0;
342}
343
344static void nvme_rdma_free_dev(struct kref *ref)
345{
346 struct nvme_rdma_device *ndev =
347 container_of(ref, struct nvme_rdma_device, ref);
348
349 mutex_lock(&device_list_mutex);
350 list_del(&ndev->entry);
351 mutex_unlock(&device_list_mutex);
352
71102307 353 ib_dealloc_pd(ndev->pd);
71102307
CH
354 kfree(ndev);
355}
356
357static void nvme_rdma_dev_put(struct nvme_rdma_device *dev)
358{
359 kref_put(&dev->ref, nvme_rdma_free_dev);
360}
361
362static int nvme_rdma_dev_get(struct nvme_rdma_device *dev)
363{
364 return kref_get_unless_zero(&dev->ref);
365}
366
367static struct nvme_rdma_device *
368nvme_rdma_find_get_device(struct rdma_cm_id *cm_id)
369{
370 struct nvme_rdma_device *ndev;
371
372 mutex_lock(&device_list_mutex);
373 list_for_each_entry(ndev, &device_list, entry) {
374 if (ndev->dev->node_guid == cm_id->device->node_guid &&
375 nvme_rdma_dev_get(ndev))
376 goto out_unlock;
377 }
378
379 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
380 if (!ndev)
381 goto out_err;
382
383 ndev->dev = cm_id->device;
384 kref_init(&ndev->ref);
385
11975e01
CH
386 ndev->pd = ib_alloc_pd(ndev->dev,
387 register_always ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
71102307
CH
388 if (IS_ERR(ndev->pd))
389 goto out_free_dev;
390
71102307
CH
391 if (!(ndev->dev->attrs.device_cap_flags &
392 IB_DEVICE_MEM_MGT_EXTENSIONS)) {
393 dev_err(&ndev->dev->dev,
394 "Memory registrations not supported.\n");
11975e01 395 goto out_free_pd;
71102307
CH
396 }
397
64a741c1 398 ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS,
0a3173a5 399 ndev->dev->attrs.max_send_sge - 1);
71102307
CH
400 list_add(&ndev->entry, &device_list);
401out_unlock:
402 mutex_unlock(&device_list_mutex);
403 return ndev;
404
71102307
CH
405out_free_pd:
406 ib_dealloc_pd(ndev->pd);
407out_free_dev:
408 kfree(ndev);
409out_err:
410 mutex_unlock(&device_list_mutex);
411 return NULL;
412}
413
287f329e
YF
414static void nvme_rdma_free_cq(struct nvme_rdma_queue *queue)
415{
416 if (nvme_rdma_poll_queue(queue))
417 ib_free_cq(queue->ib_cq);
418 else
419 ib_cq_pool_put(queue->ib_cq, queue->cq_size);
420}
421
71102307
CH
422static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
423{
eb1bd249
MG
424 struct nvme_rdma_device *dev;
425 struct ib_device *ibdev;
426
427 if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags))
428 return;
429
430 dev = queue->device;
431 ibdev = dev->dev;
71102307 432
5ec5d3bd
MG
433 if (queue->pi_support)
434 ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs);
f41725bb
IR
435 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
436
eb1bd249
MG
437 /*
438 * The cm_id object might have been destroyed during RDMA connection
439 * establishment error flow to avoid getting other cma events, thus
440 * the destruction of the QP shouldn't use rdma_cm API.
441 */
442 ib_destroy_qp(queue->qp);
287f329e 443 nvme_rdma_free_cq(queue);
71102307
CH
444
445 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
446 sizeof(struct nvme_completion), DMA_FROM_DEVICE);
447
448 nvme_rdma_dev_put(dev);
449}
450
5ec5d3bd 451static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev, bool pi_support)
f41725bb 452{
5ec5d3bd
MG
453 u32 max_page_list_len;
454
455 if (pi_support)
456 max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len;
457 else
458 max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len;
459
460 return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1);
f41725bb
IR
461}
462
287f329e
YF
463static int nvme_rdma_create_cq(struct ib_device *ibdev,
464 struct nvme_rdma_queue *queue)
465{
466 int ret, comp_vector, idx = nvme_rdma_queue_idx(queue);
467 enum ib_poll_context poll_ctx;
468
469 /*
470 * Spread I/O queues completion vectors according their queue index.
471 * Admin queues can always go on completion vector 0.
472 */
473 comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
474
475 /* Polling queues need direct cq polling context */
476 if (nvme_rdma_poll_queue(queue)) {
477 poll_ctx = IB_POLL_DIRECT;
478 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size,
479 comp_vector, poll_ctx);
480 } else {
481 poll_ctx = IB_POLL_SOFTIRQ;
482 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size,
483 comp_vector, poll_ctx);
484 }
485
486 if (IS_ERR(queue->ib_cq)) {
487 ret = PTR_ERR(queue->ib_cq);
488 return ret;
489 }
490
491 return 0;
492}
493
ca6e95bb 494static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
71102307 495{
ca6e95bb 496 struct ib_device *ibdev;
71102307
CH
497 const int send_wr_factor = 3; /* MR, SEND, INV */
498 const int cq_factor = send_wr_factor + 1; /* + RECV */
ff13c1b8 499 int ret, pages_per_mr;
71102307 500
ca6e95bb
SG
501 queue->device = nvme_rdma_find_get_device(queue->cm_id);
502 if (!queue->device) {
503 dev_err(queue->cm_id->device->dev.parent,
504 "no client data found!\n");
505 return -ECONNREFUSED;
506 }
507 ibdev = queue->device->dev;
71102307 508
71102307 509 /* +1 for ib_stop_cq */
287f329e
YF
510 queue->cq_size = cq_factor * queue->queue_size + 1;
511
512 ret = nvme_rdma_create_cq(ibdev, queue);
513 if (ret)
ca6e95bb 514 goto out_put_dev;
71102307
CH
515
516 ret = nvme_rdma_create_qp(queue, send_wr_factor);
517 if (ret)
518 goto out_destroy_ib_cq;
519
520 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size,
521 sizeof(struct nvme_completion), DMA_FROM_DEVICE);
522 if (!queue->rsp_ring) {
523 ret = -ENOMEM;
524 goto out_destroy_qp;
525 }
526
ff13c1b8
MG
527 /*
528 * Currently we don't use SG_GAPS MR's so if the first entry is
529 * misaligned we'll end up using two entries for a single data page,
530 * so one additional entry is required.
531 */
5ec5d3bd 532 pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1;
f41725bb
IR
533 ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
534 queue->queue_size,
535 IB_MR_TYPE_MEM_REG,
ff13c1b8 536 pages_per_mr, 0);
f41725bb
IR
537 if (ret) {
538 dev_err(queue->ctrl->ctrl.device,
539 "failed to initialize MR pool sized %d for QID %d\n",
287f329e 540 queue->queue_size, nvme_rdma_queue_idx(queue));
f41725bb
IR
541 goto out_destroy_ring;
542 }
543
5ec5d3bd
MG
544 if (queue->pi_support) {
545 ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs,
546 queue->queue_size, IB_MR_TYPE_INTEGRITY,
547 pages_per_mr, pages_per_mr);
548 if (ret) {
549 dev_err(queue->ctrl->ctrl.device,
550 "failed to initialize PI MR pool sized %d for QID %d\n",
287f329e 551 queue->queue_size, nvme_rdma_queue_idx(queue));
5ec5d3bd
MG
552 goto out_destroy_mr_pool;
553 }
554 }
555
eb1bd249
MG
556 set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
557
71102307
CH
558 return 0;
559
5ec5d3bd
MG
560out_destroy_mr_pool:
561 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
f41725bb
IR
562out_destroy_ring:
563 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
564 sizeof(struct nvme_completion), DMA_FROM_DEVICE);
71102307 565out_destroy_qp:
1f61def9 566 rdma_destroy_qp(queue->cm_id);
71102307 567out_destroy_ib_cq:
287f329e 568 nvme_rdma_free_cq(queue);
ca6e95bb
SG
569out_put_dev:
570 nvme_rdma_dev_put(queue->device);
71102307
CH
571 return ret;
572}
573
41e8cfa1 574static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl,
71102307
CH
575 int idx, size_t queue_size)
576{
577 struct nvme_rdma_queue *queue;
8f4e8dac 578 struct sockaddr *src_addr = NULL;
71102307
CH
579 int ret;
580
581 queue = &ctrl->queues[idx];
582 queue->ctrl = ctrl;
5ec5d3bd
MG
583 if (idx && ctrl->ctrl.max_integrity_segments)
584 queue->pi_support = true;
585 else
586 queue->pi_support = false;
71102307
CH
587 init_completion(&queue->cm_done);
588
589 if (idx > 0)
590 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
591 else
592 queue->cmnd_capsule_len = sizeof(struct nvme_command);
593
594 queue->queue_size = queue_size;
595
596 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
597 RDMA_PS_TCP, IB_QPT_RC);
598 if (IS_ERR(queue->cm_id)) {
599 dev_info(ctrl->ctrl.device,
600 "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id));
601 return PTR_ERR(queue->cm_id);
602 }
603
8f4e8dac 604 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR)
0928f9b4 605 src_addr = (struct sockaddr *)&ctrl->src_addr;
8f4e8dac 606
0928f9b4
SG
607 queue->cm_error = -ETIMEDOUT;
608 ret = rdma_resolve_addr(queue->cm_id, src_addr,
609 (struct sockaddr *)&ctrl->addr,
71102307
CH
610 NVME_RDMA_CONNECT_TIMEOUT_MS);
611 if (ret) {
612 dev_info(ctrl->ctrl.device,
613 "rdma_resolve_addr failed (%d).\n", ret);
614 goto out_destroy_cm_id;
615 }
616
617 ret = nvme_rdma_wait_for_cm(queue);
618 if (ret) {
619 dev_info(ctrl->ctrl.device,
d8bfceeb 620 "rdma connection establishment failed (%d)\n", ret);
71102307
CH
621 goto out_destroy_cm_id;
622 }
623
5013e98b 624 set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags);
71102307
CH
625
626 return 0;
627
628out_destroy_cm_id:
629 rdma_destroy_id(queue->cm_id);
eb1bd249 630 nvme_rdma_destroy_queue_ib(queue);
71102307
CH
631 return ret;
632}
633
d94211b8
SG
634static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
635{
636 rdma_disconnect(queue->cm_id);
637 ib_drain_qp(queue->qp);
638}
639
71102307
CH
640static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
641{
a57bd541
SG
642 if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
643 return;
d94211b8 644 __nvme_rdma_stop_queue(queue);
71102307
CH
645}
646
647static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
648{
5013e98b 649 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
a57bd541
SG
650 return;
651
71102307
CH
652 nvme_rdma_destroy_queue_ib(queue);
653 rdma_destroy_id(queue->cm_id);
654}
655
a57bd541 656static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl)
71102307 657{
a57bd541
SG
658 int i;
659
660 for (i = 1; i < ctrl->ctrl.queue_count; i++)
661 nvme_rdma_free_queue(&ctrl->queues[i]);
71102307
CH
662}
663
a57bd541 664static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl)
71102307
CH
665{
666 int i;
667
d858e5f0 668 for (i = 1; i < ctrl->ctrl.queue_count; i++)
a57bd541 669 nvme_rdma_stop_queue(&ctrl->queues[i]);
71102307
CH
670}
671
68e16fcf
SG
672static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
673{
ff8519f9
SG
674 struct nvme_rdma_queue *queue = &ctrl->queues[idx];
675 bool poll = nvme_rdma_poll_queue(queue);
68e16fcf
SG
676 int ret;
677
678 if (idx)
ff8519f9 679 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll);
68e16fcf
SG
680 else
681 ret = nvmf_connect_admin_queue(&ctrl->ctrl);
682
d94211b8 683 if (!ret) {
ff8519f9 684 set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
d94211b8 685 } else {
67b483dd
SG
686 if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
687 __nvme_rdma_stop_queue(queue);
68e16fcf
SG
688 dev_info(ctrl->ctrl.device,
689 "failed to connect queue: %d ret=%d\n", idx, ret);
d94211b8 690 }
68e16fcf
SG
691 return ret;
692}
693
694static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl)
71102307
CH
695{
696 int i, ret = 0;
697
d858e5f0 698 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
68e16fcf
SG
699 ret = nvme_rdma_start_queue(ctrl, i);
700 if (ret)
a57bd541 701 goto out_stop_queues;
71102307
CH
702 }
703
c8dbc37c
SW
704 return 0;
705
a57bd541 706out_stop_queues:
68e16fcf
SG
707 for (i--; i >= 1; i--)
708 nvme_rdma_stop_queue(&ctrl->queues[i]);
71102307
CH
709 return ret;
710}
711
41e8cfa1 712static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
71102307 713{
c248c643 714 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
0b36658c 715 struct ib_device *ibdev = ctrl->device->dev;
5651cd3c
SG
716 unsigned int nr_io_queues, nr_default_queues;
717 unsigned int nr_read_queues, nr_poll_queues;
71102307
CH
718 int i, ret;
719
5651cd3c
SG
720 nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors,
721 min(opts->nr_io_queues, num_online_cpus()));
722 nr_default_queues = min_t(unsigned int, ibdev->num_comp_vectors,
723 min(opts->nr_write_queues, num_online_cpus()));
724 nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus());
725 nr_io_queues = nr_read_queues + nr_default_queues + nr_poll_queues;
b65bb777 726
c248c643
SG
727 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
728 if (ret)
729 return ret;
730
d858e5f0
SG
731 ctrl->ctrl.queue_count = nr_io_queues + 1;
732 if (ctrl->ctrl.queue_count < 2)
c248c643
SG
733 return 0;
734
735 dev_info(ctrl->ctrl.device,
736 "creating %d I/O queues.\n", nr_io_queues);
737
5651cd3c
SG
738 if (opts->nr_write_queues && nr_read_queues < nr_io_queues) {
739 /*
740 * separate read/write queues
741 * hand out dedicated default queues only after we have
742 * sufficient read queues.
743 */
744 ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues;
745 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
746 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
747 min(nr_default_queues, nr_io_queues);
748 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
749 } else {
750 /*
751 * shared read/write queues
752 * either no write queues were requested, or we don't have
753 * sufficient queue count to have dedicated default queues.
754 */
755 ctrl->io_queues[HCTX_TYPE_DEFAULT] =
756 min(nr_read_queues, nr_io_queues);
757 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
758 }
759
760 if (opts->nr_poll_queues && nr_io_queues) {
761 /* map dedicated poll queues only if we have queues left */
762 ctrl->io_queues[HCTX_TYPE_POLL] =
763 min(nr_poll_queues, nr_io_queues);
764 }
765
d858e5f0 766 for (i = 1; i < ctrl->ctrl.queue_count; i++) {
41e8cfa1
SG
767 ret = nvme_rdma_alloc_queue(ctrl, i,
768 ctrl->ctrl.sqsize + 1);
769 if (ret)
71102307 770 goto out_free_queues;
71102307
CH
771 }
772
773 return 0;
774
775out_free_queues:
f361e5a0 776 for (i--; i >= 1; i--)
a57bd541 777 nvme_rdma_free_queue(&ctrl->queues[i]);
71102307
CH
778
779 return ret;
780}
781
b28a308e
SG
782static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
783 bool admin)
784{
785 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
786 struct blk_mq_tag_set *set;
787 int ret;
788
789 if (admin) {
790 set = &ctrl->admin_tag_set;
791 memset(set, 0, sizeof(*set));
792 set->ops = &nvme_rdma_admin_mq_ops;
38dabe21 793 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
b28a308e 794 set->reserved_tags = 2; /* connect + keep-alive */
103e515e 795 set->numa_node = nctrl->numa_node;
b28a308e 796 set->cmd_size = sizeof(struct nvme_rdma_request) +
5ec5d3bd 797 NVME_RDMA_DATA_SGL_SIZE;
b28a308e
SG
798 set->driver_data = ctrl;
799 set->nr_hw_queues = 1;
800 set->timeout = ADMIN_TIMEOUT;
94f29d4f 801 set->flags = BLK_MQ_F_NO_SCHED;
b28a308e
SG
802 } else {
803 set = &ctrl->tag_set;
804 memset(set, 0, sizeof(*set));
805 set->ops = &nvme_rdma_mq_ops;
5e77d61c 806 set->queue_depth = nctrl->sqsize + 1;
b28a308e 807 set->reserved_tags = 1; /* fabric connect */
103e515e 808 set->numa_node = nctrl->numa_node;
b28a308e
SG
809 set->flags = BLK_MQ_F_SHOULD_MERGE;
810 set->cmd_size = sizeof(struct nvme_rdma_request) +
5ec5d3bd
MG
811 NVME_RDMA_DATA_SGL_SIZE;
812 if (nctrl->max_integrity_segments)
813 set->cmd_size += sizeof(struct nvme_rdma_sgl) +
814 NVME_RDMA_METADATA_SGL_SIZE;
b28a308e
SG
815 set->driver_data = ctrl;
816 set->nr_hw_queues = nctrl->queue_count - 1;
817 set->timeout = NVME_IO_TIMEOUT;
ff8519f9 818 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2;
b28a308e
SG
819 }
820
821 ret = blk_mq_alloc_tag_set(set);
822 if (ret)
87fd1253 823 return ERR_PTR(ret);
b28a308e
SG
824
825 return set;
b28a308e
SG
826}
827
3f02fffb
SG
828static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
829 bool remove)
71102307 830{
3f02fffb
SG
831 if (remove) {
832 blk_cleanup_queue(ctrl->ctrl.admin_q);
e7832cb4 833 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
87fd1253 834 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
3f02fffb 835 }
682630f0
SG
836 if (ctrl->async_event_sqe.data) {
837 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
838 sizeof(struct nvme_command), DMA_TO_DEVICE);
839 ctrl->async_event_sqe.data = NULL;
840 }
a57bd541 841 nvme_rdma_free_queue(&ctrl->queues[0]);
71102307
CH
842}
843
3f02fffb
SG
844static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
845 bool new)
90af3512 846{
5ec5d3bd 847 bool pi_capable = false;
90af3512
SG
848 int error;
849
41e8cfa1 850 error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH);
90af3512
SG
851 if (error)
852 return error;
853
854 ctrl->device = ctrl->queues[0].device;
103e515e 855 ctrl->ctrl.numa_node = dev_to_node(ctrl->device->dev->dma_device);
90af3512 856
5ec5d3bd
MG
857 /* T10-PI support */
858 if (ctrl->device->dev->attrs.device_cap_flags &
859 IB_DEVICE_INTEGRITY_HANDOVER)
860 pi_capable = true;
861
862 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev,
863 pi_capable);
90af3512 864
62f99b62
MG
865 /*
866 * Bind the async event SQE DMA mapping to the admin queue lifetime.
867 * It's safe, since any chage in the underlying RDMA device will issue
868 * error recovery and queue re-creation.
869 */
94e42213
SG
870 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
871 sizeof(struct nvme_command), DMA_TO_DEVICE);
872 if (error)
873 goto out_free_queue;
874
3f02fffb
SG
875 if (new) {
876 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
f04b9cc8
SG
877 if (IS_ERR(ctrl->ctrl.admin_tagset)) {
878 error = PTR_ERR(ctrl->ctrl.admin_tagset);
94e42213 879 goto out_free_async_qe;
f04b9cc8 880 }
90af3512 881
e7832cb4
SG
882 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
883 if (IS_ERR(ctrl->ctrl.fabrics_q)) {
884 error = PTR_ERR(ctrl->ctrl.fabrics_q);
885 goto out_free_tagset;
886 }
887
3f02fffb
SG
888 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
889 if (IS_ERR(ctrl->ctrl.admin_q)) {
890 error = PTR_ERR(ctrl->ctrl.admin_q);
e7832cb4 891 goto out_cleanup_fabrics_q;
3f02fffb 892 }
90af3512
SG
893 }
894
68e16fcf 895 error = nvme_rdma_start_queue(ctrl, 0);
90af3512
SG
896 if (error)
897 goto out_cleanup_queue;
898
c0f2f45b 899 error = nvme_enable_ctrl(&ctrl->ctrl);
90af3512 900 if (error)
2e050f00 901 goto out_stop_queue;
90af3512 902
ff13c1b8
MG
903 ctrl->ctrl.max_segments = ctrl->max_fr_pages;
904 ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
5ec5d3bd
MG
905 if (pi_capable)
906 ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages;
907 else
908 ctrl->ctrl.max_integrity_segments = 0;
90af3512 909
e7832cb4
SG
910 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
911
90af3512
SG
912 error = nvme_init_identify(&ctrl->ctrl);
913 if (error)
2e050f00 914 goto out_stop_queue;
90af3512 915
90af3512
SG
916 return 0;
917
2e050f00
JW
918out_stop_queue:
919 nvme_rdma_stop_queue(&ctrl->queues[0]);
90af3512 920out_cleanup_queue:
3f02fffb
SG
921 if (new)
922 blk_cleanup_queue(ctrl->ctrl.admin_q);
e7832cb4
SG
923out_cleanup_fabrics_q:
924 if (new)
925 blk_cleanup_queue(ctrl->ctrl.fabrics_q);
90af3512 926out_free_tagset:
3f02fffb 927 if (new)
87fd1253 928 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
94e42213 929out_free_async_qe:
9134ae2a
PS
930 if (ctrl->async_event_sqe.data) {
931 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
932 sizeof(struct nvme_command), DMA_TO_DEVICE);
933 ctrl->async_event_sqe.data = NULL;
934 }
90af3512
SG
935out_free_queue:
936 nvme_rdma_free_queue(&ctrl->queues[0]);
937 return error;
938}
939
a57bd541
SG
940static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
941 bool remove)
942{
a57bd541
SG
943 if (remove) {
944 blk_cleanup_queue(ctrl->ctrl.connect_q);
87fd1253 945 blk_mq_free_tag_set(ctrl->ctrl.tagset);
a57bd541
SG
946 }
947 nvme_rdma_free_io_queues(ctrl);
948}
949
950static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new)
951{
952 int ret;
953
41e8cfa1 954 ret = nvme_rdma_alloc_io_queues(ctrl);
a57bd541
SG
955 if (ret)
956 return ret;
957
958 if (new) {
959 ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false);
f04b9cc8
SG
960 if (IS_ERR(ctrl->ctrl.tagset)) {
961 ret = PTR_ERR(ctrl->ctrl.tagset);
a57bd541 962 goto out_free_io_queues;
f04b9cc8 963 }
a57bd541
SG
964
965 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
966 if (IS_ERR(ctrl->ctrl.connect_q)) {
967 ret = PTR_ERR(ctrl->ctrl.connect_q);
968 goto out_free_tag_set;
969 }
a57bd541
SG
970 }
971
68e16fcf 972 ret = nvme_rdma_start_io_queues(ctrl);
a57bd541
SG
973 if (ret)
974 goto out_cleanup_connect_q;
975
9f98772b
SG
976 if (!new) {
977 nvme_start_queues(&ctrl->ctrl);
978 nvme_wait_freeze(&ctrl->ctrl);
979 blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset,
980 ctrl->ctrl.queue_count - 1);
981 nvme_unfreeze(&ctrl->ctrl);
982 }
983
a57bd541
SG
984 return 0;
985
986out_cleanup_connect_q:
987 if (new)
988 blk_cleanup_queue(ctrl->ctrl.connect_q);
989out_free_tag_set:
990 if (new)
87fd1253 991 blk_mq_free_tag_set(ctrl->ctrl.tagset);
a57bd541
SG
992out_free_io_queues:
993 nvme_rdma_free_io_queues(ctrl);
994 return ret;
71102307
CH
995}
996
75862c72
SG
997static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
998 bool remove)
999{
1000 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
1001 nvme_rdma_stop_queue(&ctrl->queues[0]);
622b8b68 1002 if (ctrl->ctrl.admin_tagset) {
1007709d
SG
1003 blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
1004 nvme_cancel_request, &ctrl->ctrl);
622b8b68
ML
1005 blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset);
1006 }
e7832cb4
SG
1007 if (remove)
1008 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
75862c72
SG
1009 nvme_rdma_destroy_admin_queue(ctrl, remove);
1010}
1011
1012static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
1013 bool remove)
1014{
1015 if (ctrl->ctrl.queue_count > 1) {
9f98772b 1016 nvme_start_freeze(&ctrl->ctrl);
75862c72
SG
1017 nvme_stop_queues(&ctrl->ctrl);
1018 nvme_rdma_stop_io_queues(ctrl);
622b8b68 1019 if (ctrl->ctrl.tagset) {
1007709d
SG
1020 blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
1021 nvme_cancel_request, &ctrl->ctrl);
622b8b68
ML
1022 blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset);
1023 }
75862c72
SG
1024 if (remove)
1025 nvme_start_queues(&ctrl->ctrl);
1026 nvme_rdma_destroy_io_queues(ctrl, remove);
1027 }
1028}
1029
71102307
CH
1030static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
1031{
1032 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
1033
1034 if (list_empty(&ctrl->list))
1035 goto free_ctrl;
1036
1037 mutex_lock(&nvme_rdma_ctrl_mutex);
1038 list_del(&ctrl->list);
1039 mutex_unlock(&nvme_rdma_ctrl_mutex);
1040
71102307
CH
1041 nvmf_free_options(nctrl->opts);
1042free_ctrl:
3d064101 1043 kfree(ctrl->queues);
71102307
CH
1044 kfree(ctrl);
1045}
1046
fd8563ce
SG
1047static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
1048{
1049 /* If we are resetting/deleting then do nothing */
ad6a0a52 1050 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
fd8563ce
SG
1051 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
1052 ctrl->ctrl.state == NVME_CTRL_LIVE);
1053 return;
1054 }
1055
1056 if (nvmf_should_reconnect(&ctrl->ctrl)) {
1057 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n",
1058 ctrl->ctrl.opts->reconnect_delay);
9a6327d2 1059 queue_delayed_work(nvme_wq, &ctrl->reconnect_work,
fd8563ce
SG
1060 ctrl->ctrl.opts->reconnect_delay * HZ);
1061 } else {
12fa1304 1062 nvme_delete_ctrl(&ctrl->ctrl);
fd8563ce
SG
1063 }
1064}
1065
c66e2998 1066static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
71102307 1067{
c66e2998 1068 int ret = -EINVAL;
71102307 1069 bool changed;
71102307 1070
c66e2998 1071 ret = nvme_rdma_configure_admin_queue(ctrl, new);
71102307 1072 if (ret)
c66e2998
SG
1073 return ret;
1074
1075 if (ctrl->ctrl.icdoff) {
1076 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
1077 goto destroy_admin;
1078 }
1079
1080 if (!(ctrl->ctrl.sgls & (1 << 2))) {
1081 dev_err(ctrl->ctrl.device,
1082 "Mandatory keyed sgls are not supported!\n");
1083 goto destroy_admin;
1084 }
1085
1086 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) {
1087 dev_warn(ctrl->ctrl.device,
1088 "queue_size %zu > ctrl sqsize %u, clamping down\n",
1089 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
1090 }
1091
1092 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
1093 dev_warn(ctrl->ctrl.device,
1094 "sqsize %u > ctrl maxcmd %u, clamping down\n",
1095 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
1096 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
1097 }
71102307 1098
64a741c1
SW
1099 if (ctrl->ctrl.sgls & (1 << 20))
1100 ctrl->use_inline_data = true;
71102307 1101
d858e5f0 1102 if (ctrl->ctrl.queue_count > 1) {
c66e2998 1103 ret = nvme_rdma_configure_io_queues(ctrl, new);
71102307 1104 if (ret)
5e1fe61d 1105 goto destroy_admin;
71102307
CH
1106 }
1107
1108 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
0a960afd 1109 if (!changed) {
96135862 1110 /*
ecca390e 1111 * state change failure is ok if we started ctrl delete,
96135862
IR
1112 * unless we're during creation of a new controller to
1113 * avoid races with teardown flow.
1114 */
ecca390e
SG
1115 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
1116 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
96135862 1117 WARN_ON_ONCE(new);
c66e2998
SG
1118 ret = -EINVAL;
1119 goto destroy_io;
0a960afd
SG
1120 }
1121
d09f2b45 1122 nvme_start_ctrl(&ctrl->ctrl);
c66e2998
SG
1123 return 0;
1124
1125destroy_io:
1126 if (ctrl->ctrl.queue_count > 1)
1127 nvme_rdma_destroy_io_queues(ctrl, new);
1128destroy_admin:
1129 nvme_rdma_stop_queue(&ctrl->queues[0]);
1130 nvme_rdma_destroy_admin_queue(ctrl, new);
1131 return ret;
1132}
1133
1134static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
1135{
1136 struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work),
1137 struct nvme_rdma_ctrl, reconnect_work);
1138
1139 ++ctrl->ctrl.nr_reconnects;
1140
1141 if (nvme_rdma_setup_ctrl(ctrl, false))
1142 goto requeue;
71102307 1143
5e1fe61d
SG
1144 dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n",
1145 ctrl->ctrl.nr_reconnects);
1146
1147 ctrl->ctrl.nr_reconnects = 0;
71102307
CH
1148
1149 return;
1150
71102307 1151requeue:
fd8563ce 1152 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
fdf9dfa8 1153 ctrl->ctrl.nr_reconnects);
fd8563ce 1154 nvme_rdma_reconnect_or_remove(ctrl);
71102307
CH
1155}
1156
1157static void nvme_rdma_error_recovery_work(struct work_struct *work)
1158{
1159 struct nvme_rdma_ctrl *ctrl = container_of(work,
1160 struct nvme_rdma_ctrl, err_work);
1161
e4d753d7 1162 nvme_stop_keep_alive(&ctrl->ctrl);
75862c72 1163 nvme_rdma_teardown_io_queues(ctrl, false);
e818a5b4 1164 nvme_start_queues(&ctrl->ctrl);
75862c72 1165 nvme_rdma_teardown_admin_queue(ctrl, false);
e7832cb4 1166 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
e818a5b4 1167
ad6a0a52 1168 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
ecca390e
SG
1169 /* state change failure is ok if we started ctrl delete */
1170 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING &&
1171 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO);
d5bf4b7f
SG
1172 return;
1173 }
1174
fd8563ce 1175 nvme_rdma_reconnect_or_remove(ctrl);
71102307
CH
1176}
1177
1178static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
1179{
d5bf4b7f 1180 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
71102307
CH
1181 return;
1182
97b2512a 1183 queue_work(nvme_reset_wq, &ctrl->err_work);
71102307
CH
1184}
1185
8446546c
CH
1186static void nvme_rdma_end_request(struct nvme_rdma_request *req)
1187{
1188 struct request *rq = blk_mq_rq_from_pdu(req);
1189
1190 if (!refcount_dec_and_test(&req->ref))
1191 return;
2eb81a33 1192 if (!nvme_try_complete_req(rq, req->status, req->result))
ff029451 1193 nvme_rdma_complete_rq(rq);
8446546c
CH
1194}
1195
71102307
CH
1196static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
1197 const char *op)
1198{
287f329e 1199 struct nvme_rdma_queue *queue = wc->qp->qp_context;
71102307
CH
1200 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1201
1202 if (ctrl->ctrl.state == NVME_CTRL_LIVE)
1203 dev_info(ctrl->ctrl.device,
1204 "%s for CQE 0x%p failed with status %s (%d)\n",
1205 op, wc->wr_cqe,
1206 ib_wc_status_msg(wc->status), wc->status);
1207 nvme_rdma_error_recovery(ctrl);
1208}
1209
1210static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc)
1211{
1212 if (unlikely(wc->status != IB_WC_SUCCESS))
1213 nvme_rdma_wr_error(cq, wc, "MEMREG");
1214}
1215
1216static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
1217{
2f122e4f
SG
1218 struct nvme_rdma_request *req =
1219 container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
2f122e4f 1220
8446546c 1221 if (unlikely(wc->status != IB_WC_SUCCESS))
71102307 1222 nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
8446546c
CH
1223 else
1224 nvme_rdma_end_request(req);
71102307
CH
1225}
1226
1227static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
1228 struct nvme_rdma_request *req)
1229{
71102307
CH
1230 struct ib_send_wr wr = {
1231 .opcode = IB_WR_LOCAL_INV,
1232 .next = NULL,
1233 .num_sge = 0,
2f122e4f 1234 .send_flags = IB_SEND_SIGNALED,
71102307
CH
1235 .ex.invalidate_rkey = req->mr->rkey,
1236 };
1237
1238 req->reg_cqe.done = nvme_rdma_inv_rkey_done;
1239 wr.wr_cqe = &req->reg_cqe;
1240
45e3cc1a 1241 return ib_post_send(queue->qp, &wr, NULL);
71102307
CH
1242}
1243
1244static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue,
1245 struct request *rq)
1246{
1247 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
71102307
CH
1248 struct nvme_rdma_device *dev = queue->device;
1249 struct ib_device *ibdev = dev->dev;
5ec5d3bd 1250 struct list_head *pool = &queue->qp->rdma_mrs;
71102307 1251
34e08191 1252 if (!blk_rq_nr_phys_segments(rq))
71102307
CH
1253 return;
1254
5ec5d3bd
MG
1255 if (blk_integrity_rq(rq)) {
1256 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
1257 req->metadata_sgl->nents, rq_dma_dir(rq));
1258 sg_free_table_chained(&req->metadata_sgl->sg_table,
1259 NVME_INLINE_METADATA_SG_CNT);
1260 }
1261
1262 if (req->use_sig_mr)
1263 pool = &queue->qp->sig_mrs;
1264
f41725bb 1265 if (req->mr) {
5ec5d3bd 1266 ib_mr_pool_put(queue->qp, pool, req->mr);
f41725bb
IR
1267 req->mr = NULL;
1268 }
1269
324d9e78
IR
1270 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
1271 rq_dma_dir(rq));
1272 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
71102307
CH
1273}
1274
1275static int nvme_rdma_set_sg_null(struct nvme_command *c)
1276{
1277 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1278
1279 sg->addr = 0;
1280 put_unaligned_le24(0, sg->length);
1281 put_unaligned_le32(0, sg->key);
1282 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
1283 return 0;
1284}
1285
1286static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue,
64a741c1
SW
1287 struct nvme_rdma_request *req, struct nvme_command *c,
1288 int count)
71102307
CH
1289{
1290 struct nvme_sgl_desc *sg = &c->common.dptr.sgl;
324d9e78 1291 struct scatterlist *sgl = req->data_sgl.sg_table.sgl;
64a741c1
SW
1292 struct ib_sge *sge = &req->sge[1];
1293 u32 len = 0;
1294 int i;
71102307 1295
64a741c1
SW
1296 for (i = 0; i < count; i++, sgl++, sge++) {
1297 sge->addr = sg_dma_address(sgl);
1298 sge->length = sg_dma_len(sgl);
1299 sge->lkey = queue->device->pd->local_dma_lkey;
1300 len += sge->length;
1301 }
71102307
CH
1302
1303 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff);
64a741c1 1304 sg->length = cpu_to_le32(len);
71102307
CH
1305 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET;
1306
64a741c1 1307 req->num_sge += count;
71102307
CH
1308 return 0;
1309}
1310
1311static int nvme_rdma_map_sg_single(struct nvme_rdma_queue *queue,
1312 struct nvme_rdma_request *req, struct nvme_command *c)
1313{
1314 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1315
324d9e78
IR
1316 sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl));
1317 put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length);
11975e01 1318 put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key);
71102307
CH
1319 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
1320 return 0;
1321}
1322
1323static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
1324 struct nvme_rdma_request *req, struct nvme_command *c,
1325 int count)
1326{
1327 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1328 int nr;
1329
f41725bb
IR
1330 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
1331 if (WARN_ON_ONCE(!req->mr))
1332 return -EAGAIN;
1333
b925a2dc
MG
1334 /*
1335 * Align the MR to a 4K page size to match the ctrl page size and
1336 * the block virtual boundary.
1337 */
324d9e78
IR
1338 nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL,
1339 SZ_4K);
a7b7c7a1 1340 if (unlikely(nr < count)) {
f41725bb
IR
1341 ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
1342 req->mr = NULL;
71102307
CH
1343 if (nr < 0)
1344 return nr;
1345 return -EINVAL;
1346 }
1347
1348 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1349
1350 req->reg_cqe.done = nvme_rdma_memreg_done;
1351 memset(&req->reg_wr, 0, sizeof(req->reg_wr));
1352 req->reg_wr.wr.opcode = IB_WR_REG_MR;
1353 req->reg_wr.wr.wr_cqe = &req->reg_cqe;
1354 req->reg_wr.wr.num_sge = 0;
1355 req->reg_wr.mr = req->mr;
1356 req->reg_wr.key = req->mr->rkey;
1357 req->reg_wr.access = IB_ACCESS_LOCAL_WRITE |
1358 IB_ACCESS_REMOTE_READ |
1359 IB_ACCESS_REMOTE_WRITE;
1360
71102307
CH
1361 sg->addr = cpu_to_le64(req->mr->iova);
1362 put_unaligned_le24(req->mr->length, sg->length);
1363 put_unaligned_le32(req->mr->rkey, sg->key);
1364 sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) |
1365 NVME_SGL_FMT_INVALIDATE;
1366
1367 return 0;
1368}
1369
5ec5d3bd
MG
1370static void nvme_rdma_set_sig_domain(struct blk_integrity *bi,
1371 struct nvme_command *cmd, struct ib_sig_domain *domain,
1372 u16 control, u8 pi_type)
1373{
1374 domain->sig_type = IB_SIG_TYPE_T10_DIF;
1375 domain->sig.dif.bg_type = IB_T10DIF_CRC;
1376 domain->sig.dif.pi_interval = 1 << bi->interval_exp;
1377 domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
1378 if (control & NVME_RW_PRINFO_PRCHK_REF)
1379 domain->sig.dif.ref_remap = true;
1380
1381 domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
1382 domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
1383 domain->sig.dif.app_escape = true;
1384 if (pi_type == NVME_NS_DPS_PI_TYPE3)
1385 domain->sig.dif.ref_escape = true;
1386}
1387
1388static void nvme_rdma_set_sig_attrs(struct blk_integrity *bi,
1389 struct nvme_command *cmd, struct ib_sig_attrs *sig_attrs,
1390 u8 pi_type)
1391{
1392 u16 control = le16_to_cpu(cmd->rw.control);
1393
1394 memset(sig_attrs, 0, sizeof(*sig_attrs));
1395 if (control & NVME_RW_PRINFO_PRACT) {
1396 /* for WRITE_INSERT/READ_STRIP no memory domain */
1397 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
1398 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
1399 pi_type);
1400 /* Clear the PRACT bit since HCA will generate/verify the PI */
1401 control &= ~NVME_RW_PRINFO_PRACT;
1402 cmd->rw.control = cpu_to_le16(control);
1403 } else {
1404 /* for WRITE_PASS/READ_PASS both wire/memory domains exist */
1405 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
1406 pi_type);
1407 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
1408 pi_type);
1409 }
1410}
1411
1412static void nvme_rdma_set_prot_checks(struct nvme_command *cmd, u8 *mask)
1413{
1414 *mask = 0;
1415 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF)
1416 *mask |= IB_SIG_CHECK_REFTAG;
1417 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD)
1418 *mask |= IB_SIG_CHECK_GUARD;
1419}
1420
1421static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc)
1422{
1423 if (unlikely(wc->status != IB_WC_SUCCESS))
1424 nvme_rdma_wr_error(cq, wc, "SIG");
1425}
1426
1427static int nvme_rdma_map_sg_pi(struct nvme_rdma_queue *queue,
1428 struct nvme_rdma_request *req, struct nvme_command *c,
1429 int count, int pi_count)
1430{
1431 struct nvme_rdma_sgl *sgl = &req->data_sgl;
1432 struct ib_reg_wr *wr = &req->reg_wr;
1433 struct request *rq = blk_mq_rq_from_pdu(req);
1434 struct nvme_ns *ns = rq->q->queuedata;
1435 struct bio *bio = rq->bio;
1436 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
1437 int nr;
1438
1439 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs);
1440 if (WARN_ON_ONCE(!req->mr))
1441 return -EAGAIN;
1442
1443 nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL,
1444 req->metadata_sgl->sg_table.sgl, pi_count, NULL,
1445 SZ_4K);
1446 if (unlikely(nr))
1447 goto mr_put;
1448
1449 nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_disk), c,
1450 req->mr->sig_attrs, ns->pi_type);
1451 nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask);
1452
1453 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
1454
1455 req->reg_cqe.done = nvme_rdma_sig_done;
1456 memset(wr, 0, sizeof(*wr));
1457 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
1458 wr->wr.wr_cqe = &req->reg_cqe;
1459 wr->wr.num_sge = 0;
1460 wr->wr.send_flags = 0;
1461 wr->mr = req->mr;
1462 wr->key = req->mr->rkey;
1463 wr->access = IB_ACCESS_LOCAL_WRITE |
1464 IB_ACCESS_REMOTE_READ |
1465 IB_ACCESS_REMOTE_WRITE;
1466
1467 sg->addr = cpu_to_le64(req->mr->iova);
1468 put_unaligned_le24(req->mr->length, sg->length);
1469 put_unaligned_le32(req->mr->rkey, sg->key);
1470 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4;
1471
1472 return 0;
1473
1474mr_put:
1475 ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr);
1476 req->mr = NULL;
1477 if (nr < 0)
1478 return nr;
1479 return -EINVAL;
1480}
1481
71102307 1482static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
b131c61d 1483 struct request *rq, struct nvme_command *c)
71102307
CH
1484{
1485 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1486 struct nvme_rdma_device *dev = queue->device;
1487 struct ib_device *ibdev = dev->dev;
5ec5d3bd 1488 int pi_count = 0;
f9d03f96 1489 int count, ret;
71102307
CH
1490
1491 req->num_sge = 1;
4af7f7ff 1492 refcount_set(&req->ref, 2); /* send and recv completions */
71102307
CH
1493
1494 c->common.flags |= NVME_CMD_SGL_METABUF;
1495
34e08191 1496 if (!blk_rq_nr_phys_segments(rq))
71102307
CH
1497 return nvme_rdma_set_sg_null(c);
1498
324d9e78
IR
1499 req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1);
1500 ret = sg_alloc_table_chained(&req->data_sgl.sg_table,
1501 blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl,
38e18002 1502 NVME_INLINE_SG_CNT);
71102307
CH
1503 if (ret)
1504 return -ENOMEM;
1505
324d9e78
IR
1506 req->data_sgl.nents = blk_rq_map_sg(rq->q, rq,
1507 req->data_sgl.sg_table.sgl);
71102307 1508
324d9e78
IR
1509 count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl,
1510 req->data_sgl.nents, rq_dma_dir(rq));
71102307 1511 if (unlikely(count <= 0)) {
94423a8f
MG
1512 ret = -EIO;
1513 goto out_free_table;
71102307
CH
1514 }
1515
5ec5d3bd
MG
1516 if (blk_integrity_rq(rq)) {
1517 req->metadata_sgl->sg_table.sgl =
1518 (struct scatterlist *)(req->metadata_sgl + 1);
1519 ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table,
1520 blk_rq_count_integrity_sg(rq->q, rq->bio),
1521 req->metadata_sgl->sg_table.sgl,
1522 NVME_INLINE_METADATA_SG_CNT);
1523 if (unlikely(ret)) {
1524 ret = -ENOMEM;
1525 goto out_unmap_sg;
1526 }
1527
1528 req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q,
1529 rq->bio, req->metadata_sgl->sg_table.sgl);
1530 pi_count = ib_dma_map_sg(ibdev,
1531 req->metadata_sgl->sg_table.sgl,
1532 req->metadata_sgl->nents,
1533 rq_dma_dir(rq));
1534 if (unlikely(pi_count <= 0)) {
1535 ret = -EIO;
1536 goto out_free_pi_table;
1537 }
1538 }
1539
1540 if (req->use_sig_mr) {
1541 ret = nvme_rdma_map_sg_pi(queue, req, c, count, pi_count);
1542 goto out;
1543 }
1544
64a741c1 1545 if (count <= dev->num_inline_segments) {
b131c61d 1546 if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
64a741c1 1547 queue->ctrl->use_inline_data &&
b131c61d 1548 blk_rq_payload_bytes(rq) <=
94423a8f 1549 nvme_rdma_inline_data_size(queue)) {
64a741c1 1550 ret = nvme_rdma_map_sg_inline(queue, req, c, count);
94423a8f
MG
1551 goto out;
1552 }
71102307 1553
64a741c1 1554 if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
94423a8f
MG
1555 ret = nvme_rdma_map_sg_single(queue, req, c);
1556 goto out;
1557 }
71102307
CH
1558 }
1559
94423a8f
MG
1560 ret = nvme_rdma_map_sg_fr(queue, req, c, count);
1561out:
1562 if (unlikely(ret))
5ec5d3bd 1563 goto out_unmap_pi_sg;
94423a8f
MG
1564
1565 return 0;
1566
5ec5d3bd
MG
1567out_unmap_pi_sg:
1568 if (blk_integrity_rq(rq))
1569 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl,
1570 req->metadata_sgl->nents, rq_dma_dir(rq));
1571out_free_pi_table:
1572 if (blk_integrity_rq(rq))
1573 sg_free_table_chained(&req->metadata_sgl->sg_table,
1574 NVME_INLINE_METADATA_SG_CNT);
94423a8f 1575out_unmap_sg:
324d9e78
IR
1576 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents,
1577 rq_dma_dir(rq));
94423a8f 1578out_free_table:
324d9e78 1579 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT);
94423a8f 1580 return ret;
71102307
CH
1581}
1582
1583static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
1584{
4af7f7ff
SG
1585 struct nvme_rdma_qe *qe =
1586 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
1587 struct nvme_rdma_request *req =
1588 container_of(qe, struct nvme_rdma_request, sqe);
4af7f7ff 1589
8446546c 1590 if (unlikely(wc->status != IB_WC_SUCCESS))
71102307 1591 nvme_rdma_wr_error(cq, wc, "SEND");
8446546c
CH
1592 else
1593 nvme_rdma_end_request(req);
71102307
CH
1594}
1595
1596static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
1597 struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
b4b591c8 1598 struct ib_send_wr *first)
71102307 1599{
45e3cc1a 1600 struct ib_send_wr wr;
71102307
CH
1601 int ret;
1602
1603 sge->addr = qe->dma;
a62315b8 1604 sge->length = sizeof(struct nvme_command);
71102307
CH
1605 sge->lkey = queue->device->pd->local_dma_lkey;
1606
71102307
CH
1607 wr.next = NULL;
1608 wr.wr_cqe = &qe->cqe;
1609 wr.sg_list = sge;
1610 wr.num_sge = num_sge;
1611 wr.opcode = IB_WR_SEND;
b4b591c8 1612 wr.send_flags = IB_SEND_SIGNALED;
71102307
CH
1613
1614 if (first)
1615 first->next = &wr;
1616 else
1617 first = &wr;
1618
45e3cc1a 1619 ret = ib_post_send(queue->qp, first, NULL);
a7b7c7a1 1620 if (unlikely(ret)) {
71102307
CH
1621 dev_err(queue->ctrl->ctrl.device,
1622 "%s failed with error code %d\n", __func__, ret);
1623 }
1624 return ret;
1625}
1626
1627static int nvme_rdma_post_recv(struct nvme_rdma_queue *queue,
1628 struct nvme_rdma_qe *qe)
1629{
45e3cc1a 1630 struct ib_recv_wr wr;
71102307
CH
1631 struct ib_sge list;
1632 int ret;
1633
1634 list.addr = qe->dma;
1635 list.length = sizeof(struct nvme_completion);
1636 list.lkey = queue->device->pd->local_dma_lkey;
1637
1638 qe->cqe.done = nvme_rdma_recv_done;
1639
1640 wr.next = NULL;
1641 wr.wr_cqe = &qe->cqe;
1642 wr.sg_list = &list;
1643 wr.num_sge = 1;
1644
45e3cc1a 1645 ret = ib_post_recv(queue->qp, &wr, NULL);
a7b7c7a1 1646 if (unlikely(ret)) {
71102307
CH
1647 dev_err(queue->ctrl->ctrl.device,
1648 "%s failed with error code %d\n", __func__, ret);
1649 }
1650 return ret;
1651}
1652
1653static struct blk_mq_tags *nvme_rdma_tagset(struct nvme_rdma_queue *queue)
1654{
1655 u32 queue_idx = nvme_rdma_queue_idx(queue);
1656
1657 if (queue_idx == 0)
1658 return queue->ctrl->admin_tag_set.tags[queue_idx];
1659 return queue->ctrl->tag_set.tags[queue_idx - 1];
1660}
1661
b4b591c8
SG
1662static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
1663{
1664 if (unlikely(wc->status != IB_WC_SUCCESS))
1665 nvme_rdma_wr_error(cq, wc, "ASYNC");
1666}
1667
ad22c355 1668static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
71102307
CH
1669{
1670 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
1671 struct nvme_rdma_queue *queue = &ctrl->queues[0];
1672 struct ib_device *dev = queue->device->dev;
1673 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe;
1674 struct nvme_command *cmd = sqe->data;
1675 struct ib_sge sge;
1676 int ret;
1677
71102307
CH
1678 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE);
1679
1680 memset(cmd, 0, sizeof(*cmd));
1681 cmd->common.opcode = nvme_admin_async_event;
38dabe21 1682 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH;
71102307
CH
1683 cmd->common.flags |= NVME_CMD_SGL_METABUF;
1684 nvme_rdma_set_sg_null(cmd);
1685
b4b591c8
SG
1686 sqe->cqe.done = nvme_rdma_async_done;
1687
71102307
CH
1688 ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
1689 DMA_TO_DEVICE);
1690
b4b591c8 1691 ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
71102307
CH
1692 WARN_ON_ONCE(ret);
1693}
1694
1052b8ac
JA
1695static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
1696 struct nvme_completion *cqe, struct ib_wc *wc)
71102307 1697{
71102307
CH
1698 struct request *rq;
1699 struct nvme_rdma_request *req;
71102307 1700
71102307
CH
1701 rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
1702 if (!rq) {
1703 dev_err(queue->ctrl->ctrl.device,
1704 "tag 0x%x on QP %#x not found\n",
1705 cqe->command_id, queue->qp->qp_num);
1706 nvme_rdma_error_recovery(queue->ctrl);
1052b8ac 1707 return;
71102307
CH
1708 }
1709 req = blk_mq_rq_to_pdu(rq);
1710
4af7f7ff
SG
1711 req->status = cqe->status;
1712 req->result = cqe->result;
71102307 1713
3ef0279b
SG
1714 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
1715 if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) {
1716 dev_err(queue->ctrl->ctrl.device,
1717 "Bogus remote invalidation for rkey %#x\n",
1718 req->mr->rkey);
1719 nvme_rdma_error_recovery(queue->ctrl);
1720 }
f41725bb 1721 } else if (req->mr) {
1052b8ac
JA
1722 int ret;
1723
2f122e4f
SG
1724 ret = nvme_rdma_inv_rkey(queue, req);
1725 if (unlikely(ret < 0)) {
1726 dev_err(queue->ctrl->ctrl.device,
1727 "Queueing INV WR for rkey %#x failed (%d)\n",
1728 req->mr->rkey, ret);
1729 nvme_rdma_error_recovery(queue->ctrl);
1730 }
1731 /* the local invalidation completion will end the request */
7a804c34 1732 return;
2f122e4f 1733 }
7a804c34
CH
1734
1735 nvme_rdma_end_request(req);
71102307
CH
1736}
1737
1052b8ac 1738static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
71102307
CH
1739{
1740 struct nvme_rdma_qe *qe =
1741 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
287f329e 1742 struct nvme_rdma_queue *queue = wc->qp->qp_context;
71102307
CH
1743 struct ib_device *ibdev = queue->device->dev;
1744 struct nvme_completion *cqe = qe->data;
1745 const size_t len = sizeof(struct nvme_completion);
71102307
CH
1746
1747 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1748 nvme_rdma_wr_error(cq, wc, "RECV");
1052b8ac 1749 return;
71102307
CH
1750 }
1751
1752 ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE);
1753 /*
1754 * AEN requests are special as they don't time out and can
1755 * survive any kind of queue freeze and often don't respond to
1756 * aborts. We don't even bother to allocate a struct request
1757 * for them but rather special case them here.
1758 */
58a8df67
IR
1759 if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue),
1760 cqe->command_id)))
7bf58533
CH
1761 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
1762 &cqe->result);
71102307 1763 else
1052b8ac 1764 nvme_rdma_process_nvme_rsp(queue, cqe, wc);
71102307
CH
1765 ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE);
1766
1767 nvme_rdma_post_recv(queue, qe);
71102307
CH
1768}
1769
1770static int nvme_rdma_conn_established(struct nvme_rdma_queue *queue)
1771{
1772 int ret, i;
1773
1774 for (i = 0; i < queue->queue_size; i++) {
1775 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]);
1776 if (ret)
1777 goto out_destroy_queue_ib;
1778 }
1779
1780 return 0;
1781
1782out_destroy_queue_ib:
1783 nvme_rdma_destroy_queue_ib(queue);
1784 return ret;
1785}
1786
1787static int nvme_rdma_conn_rejected(struct nvme_rdma_queue *queue,
1788 struct rdma_cm_event *ev)
1789{
7f03953c
SW
1790 struct rdma_cm_id *cm_id = queue->cm_id;
1791 int status = ev->status;
1792 const char *rej_msg;
1793 const struct nvme_rdma_cm_rej *rej_data;
1794 u8 rej_data_len;
1795
1796 rej_msg = rdma_reject_msg(cm_id, status);
1797 rej_data = rdma_consumer_reject_data(cm_id, ev, &rej_data_len);
1798
1799 if (rej_data && rej_data_len >= sizeof(u16)) {
1800 u16 sts = le16_to_cpu(rej_data->sts);
71102307
CH
1801
1802 dev_err(queue->ctrl->ctrl.device,
7f03953c
SW
1803 "Connect rejected: status %d (%s) nvme status %d (%s).\n",
1804 status, rej_msg, sts, nvme_rdma_cm_msg(sts));
71102307
CH
1805 } else {
1806 dev_err(queue->ctrl->ctrl.device,
7f03953c 1807 "Connect rejected: status %d (%s).\n", status, rej_msg);
71102307
CH
1808 }
1809
1810 return -ECONNRESET;
1811}
1812
1813static int nvme_rdma_addr_resolved(struct nvme_rdma_queue *queue)
1814{
e63440d6 1815 struct nvme_ctrl *ctrl = &queue->ctrl->ctrl;
71102307
CH
1816 int ret;
1817
ca6e95bb
SG
1818 ret = nvme_rdma_create_queue_ib(queue);
1819 if (ret)
1820 return ret;
71102307 1821
e63440d6
IR
1822 if (ctrl->opts->tos >= 0)
1823 rdma_set_service_type(queue->cm_id, ctrl->opts->tos);
71102307
CH
1824 ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS);
1825 if (ret) {
e63440d6 1826 dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n",
71102307
CH
1827 queue->cm_error);
1828 goto out_destroy_queue;
1829 }
1830
1831 return 0;
1832
1833out_destroy_queue:
1834 nvme_rdma_destroy_queue_ib(queue);
71102307
CH
1835 return ret;
1836}
1837
1838static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue)
1839{
1840 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
1841 struct rdma_conn_param param = { };
0b857b44 1842 struct nvme_rdma_cm_req priv = { };
71102307
CH
1843 int ret;
1844
1845 param.qp_num = queue->qp->qp_num;
1846 param.flow_control = 1;
1847
1848 param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom;
2ac17c28
SG
1849 /* maximum retry count */
1850 param.retry_count = 7;
71102307
CH
1851 param.rnr_retry_count = 7;
1852 param.private_data = &priv;
1853 param.private_data_len = sizeof(priv);
1854
1855 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1856 priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue));
f994d9dc
JF
1857 /*
1858 * set the admin queue depth to the minimum size
1859 * specified by the Fabrics standard.
1860 */
1861 if (priv.qid == 0) {
7aa1f427
SG
1862 priv.hrqsize = cpu_to_le16(NVME_AQ_DEPTH);
1863 priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1);
f994d9dc 1864 } else {
c5af8654
JF
1865 /*
1866 * current interpretation of the fabrics spec
1867 * is at minimum you make hrqsize sqsize+1, or a
1868 * 1's based representation of sqsize.
1869 */
f994d9dc 1870 priv.hrqsize = cpu_to_le16(queue->queue_size);
c5af8654 1871 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize);
f994d9dc 1872 }
71102307
CH
1873
1874 ret = rdma_connect(queue->cm_id, &param);
1875 if (ret) {
1876 dev_err(ctrl->ctrl.device,
1877 "rdma_connect failed (%d).\n", ret);
1878 goto out_destroy_queue_ib;
1879 }
1880
1881 return 0;
1882
1883out_destroy_queue_ib:
1884 nvme_rdma_destroy_queue_ib(queue);
1885 return ret;
1886}
1887
71102307
CH
1888static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
1889 struct rdma_cm_event *ev)
1890{
1891 struct nvme_rdma_queue *queue = cm_id->context;
1892 int cm_error = 0;
1893
1894 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n",
1895 rdma_event_msg(ev->event), ev->event,
1896 ev->status, cm_id);
1897
1898 switch (ev->event) {
1899 case RDMA_CM_EVENT_ADDR_RESOLVED:
1900 cm_error = nvme_rdma_addr_resolved(queue);
1901 break;
1902 case RDMA_CM_EVENT_ROUTE_RESOLVED:
1903 cm_error = nvme_rdma_route_resolved(queue);
1904 break;
1905 case RDMA_CM_EVENT_ESTABLISHED:
1906 queue->cm_error = nvme_rdma_conn_established(queue);
1907 /* complete cm_done regardless of success/failure */
1908 complete(&queue->cm_done);
1909 return 0;
1910 case RDMA_CM_EVENT_REJECTED:
abf87d5e 1911 nvme_rdma_destroy_queue_ib(queue);
71102307
CH
1912 cm_error = nvme_rdma_conn_rejected(queue, ev);
1913 break;
71102307
CH
1914 case RDMA_CM_EVENT_ROUTE_ERROR:
1915 case RDMA_CM_EVENT_CONNECT_ERROR:
1916 case RDMA_CM_EVENT_UNREACHABLE:
abf87d5e 1917 nvme_rdma_destroy_queue_ib(queue);
249090f9 1918 /* fall through */
abf87d5e 1919 case RDMA_CM_EVENT_ADDR_ERROR:
71102307
CH
1920 dev_dbg(queue->ctrl->ctrl.device,
1921 "CM error event %d\n", ev->event);
1922 cm_error = -ECONNRESET;
1923 break;
1924 case RDMA_CM_EVENT_DISCONNECTED:
1925 case RDMA_CM_EVENT_ADDR_CHANGE:
1926 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
1927 dev_dbg(queue->ctrl->ctrl.device,
1928 "disconnect received - connection closed\n");
1929 nvme_rdma_error_recovery(queue->ctrl);
1930 break;
1931 case RDMA_CM_EVENT_DEVICE_REMOVAL:
e87a911f
SW
1932 /* device removal is handled via the ib_client API */
1933 break;
71102307
CH
1934 default:
1935 dev_err(queue->ctrl->ctrl.device,
1936 "Unexpected RDMA CM event (%d)\n", ev->event);
1937 nvme_rdma_error_recovery(queue->ctrl);
1938 break;
1939 }
1940
1941 if (cm_error) {
1942 queue->cm_error = cm_error;
1943 complete(&queue->cm_done);
1944 }
1945
1946 return 0;
1947}
1948
1949static enum blk_eh_timer_return
1950nvme_rdma_timeout(struct request *rq, bool reserved)
1951{
1952 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
4c174e63
SG
1953 struct nvme_rdma_queue *queue = req->queue;
1954 struct nvme_rdma_ctrl *ctrl = queue->ctrl;
71102307 1955
4c174e63
SG
1956 dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
1957 rq->tag, nvme_rdma_queue_idx(queue));
e62a538d 1958
92b98e88
KB
1959 /*
1960 * Restart the timer if a controller reset is already scheduled. Any
1961 * timed out commands would be handled before entering the connecting
1962 * state.
1963 */
1964 if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
1965 return BLK_EH_RESET_TIMER;
1966
4c174e63
SG
1967 if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
1968 /*
1969 * Teardown immediately if controller times out while starting
1970 * or we are already started error recovery. all outstanding
1971 * requests are completed on shutdown, so we return BLK_EH_DONE.
1972 */
1973 flush_work(&ctrl->err_work);
1974 nvme_rdma_teardown_io_queues(ctrl, false);
1975 nvme_rdma_teardown_admin_queue(ctrl, false);
1976 return BLK_EH_DONE;
1977 }
71102307 1978
4c174e63
SG
1979 dev_warn(ctrl->ctrl.device, "starting error recovery\n");
1980 nvme_rdma_error_recovery(ctrl);
71102307 1981
4c174e63 1982 return BLK_EH_RESET_TIMER;
71102307
CH
1983}
1984
fc17b653 1985static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
71102307
CH
1986 const struct blk_mq_queue_data *bd)
1987{
1988 struct nvme_ns *ns = hctx->queue->queuedata;
1989 struct nvme_rdma_queue *queue = hctx->driver_data;
1990 struct request *rq = bd->rq;
1991 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
1992 struct nvme_rdma_qe *sqe = &req->sqe;
1993 struct nvme_command *c = sqe->data;
71102307 1994 struct ib_device *dev;
3bc32bb1 1995 bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags);
fc17b653
CH
1996 blk_status_t ret;
1997 int err;
71102307
CH
1998
1999 WARN_ON_ONCE(rq->tag < 0);
2000
3bc32bb1 2001 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
6cdefc6e 2002 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
553cd9ef 2003
71102307 2004 dev = queue->device->dev;
62f99b62
MG
2005
2006 req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
2007 sizeof(struct nvme_command),
2008 DMA_TO_DEVICE);
2009 err = ib_dma_mapping_error(dev, req->sqe.dma);
2010 if (unlikely(err))
2011 return BLK_STS_RESOURCE;
2012
71102307
CH
2013 ib_dma_sync_single_for_cpu(dev, sqe->dma,
2014 sizeof(struct nvme_command), DMA_TO_DEVICE);
2015
2016 ret = nvme_setup_cmd(ns, rq, c);
fc17b653 2017 if (ret)
62f99b62 2018 goto unmap_qe;
71102307 2019
71102307
CH
2020 blk_mq_start_request(rq);
2021
5ec5d3bd
MG
2022 if (IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY) &&
2023 queue->pi_support &&
2024 (c->common.opcode == nvme_cmd_write ||
2025 c->common.opcode == nvme_cmd_read) &&
2026 nvme_ns_has_pi(ns))
2027 req->use_sig_mr = true;
2028 else
2029 req->use_sig_mr = false;
2030
fc17b653 2031 err = nvme_rdma_map_data(queue, rq, c);
a7b7c7a1 2032 if (unlikely(err < 0)) {
71102307 2033 dev_err(queue->ctrl->ctrl.device,
fc17b653 2034 "Failed to map data (%d)\n", err);
71102307
CH
2035 goto err;
2036 }
2037
b4b591c8
SG
2038 sqe->cqe.done = nvme_rdma_send_done;
2039
71102307
CH
2040 ib_dma_sync_single_for_device(dev, sqe->dma,
2041 sizeof(struct nvme_command), DMA_TO_DEVICE);
2042
fc17b653 2043 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
f41725bb 2044 req->mr ? &req->reg_wr.wr : NULL);
16686f3a
MG
2045 if (unlikely(err))
2046 goto err_unmap;
71102307 2047
fc17b653 2048 return BLK_STS_OK;
62f99b62 2049
16686f3a
MG
2050err_unmap:
2051 nvme_rdma_unmap_data(queue, rq);
71102307 2052err:
fc17b653 2053 if (err == -ENOMEM || err == -EAGAIN)
62f99b62
MG
2054 ret = BLK_STS_RESOURCE;
2055 else
2056 ret = BLK_STS_IOERR;
16686f3a 2057 nvme_cleanup_cmd(rq);
62f99b62
MG
2058unmap_qe:
2059 ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
2060 DMA_TO_DEVICE);
2061 return ret;
71102307
CH
2062}
2063
ff8519f9
SG
2064static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
2065{
2066 struct nvme_rdma_queue *queue = hctx->driver_data;
2067
2068 return ib_process_cq_direct(queue->ib_cq, -1);
2069}
2070
5ec5d3bd
MG
2071static void nvme_rdma_check_pi_status(struct nvme_rdma_request *req)
2072{
2073 struct request *rq = blk_mq_rq_from_pdu(req);
2074 struct ib_mr_status mr_status;
2075 int ret;
2076
2077 ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
2078 if (ret) {
2079 pr_err("ib_check_mr_status failed, ret %d\n", ret);
2080 nvme_req(rq)->status = NVME_SC_INVALID_PI;
2081 return;
2082 }
2083
2084 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
2085 switch (mr_status.sig_err.err_type) {
2086 case IB_SIG_BAD_GUARD:
2087 nvme_req(rq)->status = NVME_SC_GUARD_CHECK;
2088 break;
2089 case IB_SIG_BAD_REFTAG:
2090 nvme_req(rq)->status = NVME_SC_REFTAG_CHECK;
2091 break;
2092 case IB_SIG_BAD_APPTAG:
2093 nvme_req(rq)->status = NVME_SC_APPTAG_CHECK;
2094 break;
2095 }
2096 pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
2097 mr_status.sig_err.err_type, mr_status.sig_err.expected,
2098 mr_status.sig_err.actual);
2099 }
2100}
2101
71102307
CH
2102static void nvme_rdma_complete_rq(struct request *rq)
2103{
2104 struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
62f99b62
MG
2105 struct nvme_rdma_queue *queue = req->queue;
2106 struct ib_device *ibdev = queue->device->dev;
71102307 2107
5ec5d3bd
MG
2108 if (req->use_sig_mr)
2109 nvme_rdma_check_pi_status(req);
2110
62f99b62
MG
2111 nvme_rdma_unmap_data(queue, rq);
2112 ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),
2113 DMA_TO_DEVICE);
77f02a7a 2114 nvme_complete_rq(rq);
71102307
CH
2115}
2116
0b36658c
SG
2117static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
2118{
2119 struct nvme_rdma_ctrl *ctrl = set->driver_data;
5651cd3c 2120 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
0b36658c 2121
5651cd3c 2122 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
b65bb777 2123 /* separate read/write queues */
5651cd3c
SG
2124 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2125 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2126 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2127 set->map[HCTX_TYPE_READ].nr_queues =
2128 ctrl->io_queues[HCTX_TYPE_READ];
b65bb777 2129 set->map[HCTX_TYPE_READ].queue_offset =
5651cd3c 2130 ctrl->io_queues[HCTX_TYPE_DEFAULT];
b65bb777 2131 } else {
5651cd3c
SG
2132 /* shared read/write queues */
2133 set->map[HCTX_TYPE_DEFAULT].nr_queues =
2134 ctrl->io_queues[HCTX_TYPE_DEFAULT];
2135 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
2136 set->map[HCTX_TYPE_READ].nr_queues =
2137 ctrl->io_queues[HCTX_TYPE_DEFAULT];
b65bb777
SG
2138 set->map[HCTX_TYPE_READ].queue_offset = 0;
2139 }
2140 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
2141 ctrl->device->dev, 0);
2142 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ],
2143 ctrl->device->dev, 0);
ff8519f9 2144
5651cd3c
SG
2145 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
2146 /* map dedicated poll queues only if we have queues left */
ff8519f9 2147 set->map[HCTX_TYPE_POLL].nr_queues =
b1064d3e 2148 ctrl->io_queues[HCTX_TYPE_POLL];
ff8519f9 2149 set->map[HCTX_TYPE_POLL].queue_offset =
5651cd3c
SG
2150 ctrl->io_queues[HCTX_TYPE_DEFAULT] +
2151 ctrl->io_queues[HCTX_TYPE_READ];
ff8519f9
SG
2152 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
2153 }
5651cd3c
SG
2154
2155 dev_info(ctrl->ctrl.device,
2156 "mapped %d/%d/%d default/read/poll queues.\n",
2157 ctrl->io_queues[HCTX_TYPE_DEFAULT],
2158 ctrl->io_queues[HCTX_TYPE_READ],
2159 ctrl->io_queues[HCTX_TYPE_POLL]);
2160
b65bb777 2161 return 0;
0b36658c
SG
2162}
2163
f363b089 2164static const struct blk_mq_ops nvme_rdma_mq_ops = {
71102307
CH
2165 .queue_rq = nvme_rdma_queue_rq,
2166 .complete = nvme_rdma_complete_rq,
71102307
CH
2167 .init_request = nvme_rdma_init_request,
2168 .exit_request = nvme_rdma_exit_request,
71102307 2169 .init_hctx = nvme_rdma_init_hctx,
71102307 2170 .timeout = nvme_rdma_timeout,
0b36658c 2171 .map_queues = nvme_rdma_map_queues,
ff8519f9 2172 .poll = nvme_rdma_poll,
71102307
CH
2173};
2174
f363b089 2175static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
71102307
CH
2176 .queue_rq = nvme_rdma_queue_rq,
2177 .complete = nvme_rdma_complete_rq,
385475ee
CH
2178 .init_request = nvme_rdma_init_request,
2179 .exit_request = nvme_rdma_exit_request,
71102307
CH
2180 .init_hctx = nvme_rdma_init_admin_hctx,
2181 .timeout = nvme_rdma_timeout,
2182};
2183
18398af2 2184static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
71102307 2185{
794a4cb3
SG
2186 cancel_work_sync(&ctrl->err_work);
2187 cancel_delayed_work_sync(&ctrl->reconnect_work);
2188
75862c72 2189 nvme_rdma_teardown_io_queues(ctrl, shutdown);
e7832cb4 2190 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
18398af2 2191 if (shutdown)
71102307 2192 nvme_shutdown_ctrl(&ctrl->ctrl);
18398af2 2193 else
b5b05048 2194 nvme_disable_ctrl(&ctrl->ctrl);
75862c72 2195 nvme_rdma_teardown_admin_queue(ctrl, shutdown);
71102307
CH
2196}
2197
c5017e85 2198static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl)
2461a8dd 2199{
e9bc2587 2200 nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true);
71102307
CH
2201}
2202
71102307
CH
2203static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
2204{
d86c4d8e
CH
2205 struct nvme_rdma_ctrl *ctrl =
2206 container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work);
71102307 2207
d09f2b45 2208 nvme_stop_ctrl(&ctrl->ctrl);
18398af2 2209 nvme_rdma_shutdown_ctrl(ctrl, false);
71102307 2210
ad6a0a52 2211 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
d5bf4b7f
SG
2212 /* state change failure should never happen */
2213 WARN_ON_ONCE(1);
2214 return;
2215 }
2216
c66e2998 2217 if (nvme_rdma_setup_ctrl(ctrl, false))
370ae6e4 2218 goto out_fail;
71102307 2219
71102307
CH
2220 return;
2221
370ae6e4 2222out_fail:
8000d1fd
NC
2223 ++ctrl->ctrl.nr_reconnects;
2224 nvme_rdma_reconnect_or_remove(ctrl);
71102307
CH
2225}
2226
71102307
CH
2227static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
2228 .name = "rdma",
2229 .module = THIS_MODULE,
5ec5d3bd 2230 .flags = NVME_F_FABRICS | NVME_F_METADATA_SUPPORTED,
71102307
CH
2231 .reg_read32 = nvmf_reg_read32,
2232 .reg_read64 = nvmf_reg_read64,
2233 .reg_write32 = nvmf_reg_write32,
71102307
CH
2234 .free_ctrl = nvme_rdma_free_ctrl,
2235 .submit_async_event = nvme_rdma_submit_async_event,
c5017e85 2236 .delete_ctrl = nvme_rdma_delete_ctrl,
71102307
CH
2237 .get_address = nvmf_get_address,
2238};
2239
36e835f2
JS
2240/*
2241 * Fails a connection request if it matches an existing controller
2242 * (association) with the same tuple:
2243 * <Host NQN, Host ID, local address, remote address, remote port, SUBSYS NQN>
2244 *
2245 * if local address is not specified in the request, it will match an
2246 * existing controller with all the other parameters the same and no
2247 * local port address specified as well.
2248 *
2249 * The ports don't need to be compared as they are intrinsically
2250 * already matched by the port pointers supplied.
2251 */
2252static bool
2253nvme_rdma_existing_controller(struct nvmf_ctrl_options *opts)
2254{
2255 struct nvme_rdma_ctrl *ctrl;
2256 bool found = false;
2257
2258 mutex_lock(&nvme_rdma_ctrl_mutex);
2259 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
b7c7be6f 2260 found = nvmf_ip_options_match(&ctrl->ctrl, opts);
36e835f2
JS
2261 if (found)
2262 break;
2263 }
2264 mutex_unlock(&nvme_rdma_ctrl_mutex);
2265
2266 return found;
2267}
2268
71102307
CH
2269static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
2270 struct nvmf_ctrl_options *opts)
2271{
2272 struct nvme_rdma_ctrl *ctrl;
2273 int ret;
2274 bool changed;
2275
2276 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
2277 if (!ctrl)
2278 return ERR_PTR(-ENOMEM);
2279 ctrl->ctrl.opts = opts;
2280 INIT_LIST_HEAD(&ctrl->list);
2281
bb59b8e5
SG
2282 if (!(opts->mask & NVMF_OPT_TRSVCID)) {
2283 opts->trsvcid =
2284 kstrdup(__stringify(NVME_RDMA_IP_PORT), GFP_KERNEL);
2285 if (!opts->trsvcid) {
2286 ret = -ENOMEM;
2287 goto out_free_ctrl;
2288 }
2289 opts->mask |= NVMF_OPT_TRSVCID;
2290 }
0928f9b4
SG
2291
2292 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
bb59b8e5 2293 opts->traddr, opts->trsvcid, &ctrl->addr);
71102307 2294 if (ret) {
bb59b8e5
SG
2295 pr_err("malformed address passed: %s:%s\n",
2296 opts->traddr, opts->trsvcid);
71102307
CH
2297 goto out_free_ctrl;
2298 }
2299
8f4e8dac 2300 if (opts->mask & NVMF_OPT_HOST_TRADDR) {
0928f9b4
SG
2301 ret = inet_pton_with_scope(&init_net, AF_UNSPEC,
2302 opts->host_traddr, NULL, &ctrl->src_addr);
8f4e8dac 2303 if (ret) {
0928f9b4 2304 pr_err("malformed src address passed: %s\n",
8f4e8dac
MG
2305 opts->host_traddr);
2306 goto out_free_ctrl;
2307 }
2308 }
2309
36e835f2
JS
2310 if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) {
2311 ret = -EALREADY;
2312 goto out_free_ctrl;
2313 }
2314
71102307
CH
2315 INIT_DELAYED_WORK(&ctrl->reconnect_work,
2316 nvme_rdma_reconnect_ctrl_work);
2317 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
d86c4d8e 2318 INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
71102307 2319
ff8519f9
SG
2320 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues +
2321 opts->nr_poll_queues + 1;
c5af8654 2322 ctrl->ctrl.sqsize = opts->queue_size - 1;
71102307
CH
2323 ctrl->ctrl.kato = opts->kato;
2324
2325 ret = -ENOMEM;
d858e5f0 2326 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
71102307
CH
2327 GFP_KERNEL);
2328 if (!ctrl->queues)
3d064101
SG
2329 goto out_free_ctrl;
2330
2331 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
2332 0 /* no quirks, we're perfect! */);
2333 if (ret)
2334 goto out_kfree_queues;
71102307 2335
b754a32c
MG
2336 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
2337 WARN_ON_ONCE(!changed);
2338
c66e2998 2339 ret = nvme_rdma_setup_ctrl(ctrl, true);
71102307 2340 if (ret)
3d064101 2341 goto out_uninit_ctrl;
71102307 2342
0928f9b4 2343 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
71102307
CH
2344 ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
2345
71102307
CH
2346 mutex_lock(&nvme_rdma_ctrl_mutex);
2347 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
2348 mutex_unlock(&nvme_rdma_ctrl_mutex);
2349
71102307
CH
2350 return &ctrl->ctrl;
2351
71102307
CH
2352out_uninit_ctrl:
2353 nvme_uninit_ctrl(&ctrl->ctrl);
2354 nvme_put_ctrl(&ctrl->ctrl);
2355 if (ret > 0)
2356 ret = -EIO;
2357 return ERR_PTR(ret);
3d064101
SG
2358out_kfree_queues:
2359 kfree(ctrl->queues);
71102307
CH
2360out_free_ctrl:
2361 kfree(ctrl);
2362 return ERR_PTR(ret);
2363}
2364
2365static struct nvmf_transport_ops nvme_rdma_transport = {
2366 .name = "rdma",
0de5cd36 2367 .module = THIS_MODULE,
71102307 2368 .required_opts = NVMF_OPT_TRADDR,
8f4e8dac 2369 .allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
b65bb777 2370 NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
e63440d6
IR
2371 NVMF_OPT_NR_WRITE_QUEUES | NVMF_OPT_NR_POLL_QUEUES |
2372 NVMF_OPT_TOS,
71102307
CH
2373 .create_ctrl = nvme_rdma_create_ctrl,
2374};
2375
e87a911f
SW
2376static void nvme_rdma_remove_one(struct ib_device *ib_device, void *client_data)
2377{
2378 struct nvme_rdma_ctrl *ctrl;
9bad0404
MG
2379 struct nvme_rdma_device *ndev;
2380 bool found = false;
2381
2382 mutex_lock(&device_list_mutex);
2383 list_for_each_entry(ndev, &device_list, entry) {
2384 if (ndev->dev == ib_device) {
2385 found = true;
2386 break;
2387 }
2388 }
2389 mutex_unlock(&device_list_mutex);
2390
2391 if (!found)
2392 return;
e87a911f
SW
2393
2394 /* Delete all controllers using this device */
2395 mutex_lock(&nvme_rdma_ctrl_mutex);
2396 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) {
2397 if (ctrl->device->dev != ib_device)
2398 continue;
c5017e85 2399 nvme_delete_ctrl(&ctrl->ctrl);
e87a911f
SW
2400 }
2401 mutex_unlock(&nvme_rdma_ctrl_mutex);
2402
b227c59b 2403 flush_workqueue(nvme_delete_wq);
e87a911f
SW
2404}
2405
2406static struct ib_client nvme_rdma_ib_client = {
2407 .name = "nvme_rdma",
e87a911f
SW
2408 .remove = nvme_rdma_remove_one
2409};
2410
71102307
CH
2411static int __init nvme_rdma_init_module(void)
2412{
e87a911f
SW
2413 int ret;
2414
e87a911f 2415 ret = ib_register_client(&nvme_rdma_ib_client);
a56c79cf 2416 if (ret)
9a6327d2 2417 return ret;
a56c79cf
SG
2418
2419 ret = nvmf_register_transport(&nvme_rdma_transport);
2420 if (ret)
2421 goto err_unreg_client;
e87a911f 2422
a56c79cf 2423 return 0;
e87a911f 2424
a56c79cf
SG
2425err_unreg_client:
2426 ib_unregister_client(&nvme_rdma_ib_client);
a56c79cf 2427 return ret;
71102307
CH
2428}
2429
2430static void __exit nvme_rdma_cleanup_module(void)
2431{
9ad9e8d6
MG
2432 struct nvme_rdma_ctrl *ctrl;
2433
71102307 2434 nvmf_unregister_transport(&nvme_rdma_transport);
e87a911f 2435 ib_unregister_client(&nvme_rdma_ib_client);
9ad9e8d6
MG
2436
2437 mutex_lock(&nvme_rdma_ctrl_mutex);
2438 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list)
2439 nvme_delete_ctrl(&ctrl->ctrl);
2440 mutex_unlock(&nvme_rdma_ctrl_mutex);
2441 flush_workqueue(nvme_delete_wq);
71102307
CH
2442}
2443
2444module_init(nvme_rdma_init_module);
2445module_exit(nvme_rdma_cleanup_module);
2446
2447MODULE_LICENSE("GPL v2");