Merge tag 'kvmarm-fixes-6.8-3' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmar...
[linux-2.6-block.git] / drivers / nvme / target / rdma.c
CommitLineData
3641bd32 1// SPDX-License-Identifier: GPL-2.0
8f000cac
CH
2/*
3 * NVMe over Fabrics RDMA target.
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
8f000cac
CH
5 */
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7#include <linux/atomic.h>
fe45e630 8#include <linux/blk-integrity.h>
8f000cac
CH
9#include <linux/ctype.h>
10#include <linux/delay.h>
11#include <linux/err.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/nvme.h>
15#include <linux/slab.h>
16#include <linux/string.h>
17#include <linux/wait.h>
18#include <linux/inet.h>
19#include <asm/unaligned.h>
20
21#include <rdma/ib_verbs.h>
22#include <rdma/rdma_cm.h>
23#include <rdma/rw.h>
8094ba0a 24#include <rdma/ib_cm.h>
8f000cac
CH
25
26#include <linux/nvme-rdma.h>
27#include "nvmet.h"
28
29/*
0d5ee2b2 30 * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data
8f000cac 31 */
0d5ee2b2
SW
32#define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE
33#define NVMET_RDMA_MAX_INLINE_SGE 4
34#define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE)
8f000cac 35
ec6d20e1
MG
36/* Assume mpsmin == device_page_size == 4KB */
37#define NVMET_RDMA_MAX_MDTS 8
b09160c3 38#define NVMET_RDMA_MAX_METADATA_MDTS 5
ec6d20e1 39
31deaeb1
HR
40#define NVMET_RDMA_BACKLOG 128
41
b0012dd3
MG
42struct nvmet_rdma_srq;
43
8f000cac 44struct nvmet_rdma_cmd {
0d5ee2b2 45 struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1];
8f000cac
CH
46 struct ib_cqe cqe;
47 struct ib_recv_wr wr;
0d5ee2b2 48 struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE];
8f000cac
CH
49 struct nvme_command *nvme_cmd;
50 struct nvmet_rdma_queue *queue;
b0012dd3 51 struct nvmet_rdma_srq *nsrq;
8f000cac
CH
52};
53
54enum {
55 NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
56 NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
57};
58
59struct nvmet_rdma_rsp {
60 struct ib_sge send_sge;
61 struct ib_cqe send_cqe;
62 struct ib_send_wr send_wr;
63
64 struct nvmet_rdma_cmd *cmd;
65 struct nvmet_rdma_queue *queue;
66
67 struct ib_cqe read_cqe;
b09160c3 68 struct ib_cqe write_cqe;
8f000cac
CH
69 struct rdma_rw_ctx rw;
70
71 struct nvmet_req req;
72
8407879c 73 bool allocated;
8f000cac
CH
74 u8 n_rdma;
75 u32 flags;
76 u32 invalidate_rkey;
77
78 struct list_head wait_list;
79 struct list_head free_list;
80};
81
82enum nvmet_rdma_queue_state {
83 NVMET_RDMA_Q_CONNECTING,
84 NVMET_RDMA_Q_LIVE,
85 NVMET_RDMA_Q_DISCONNECTING,
86};
87
88struct nvmet_rdma_queue {
89 struct rdma_cm_id *cm_id;
21f90243 90 struct ib_qp *qp;
8f000cac
CH
91 struct nvmet_port *port;
92 struct ib_cq *cq;
93 atomic_t sq_wr_avail;
94 struct nvmet_rdma_device *dev;
b0012dd3 95 struct nvmet_rdma_srq *nsrq;
8f000cac
CH
96 spinlock_t state_lock;
97 enum nvmet_rdma_queue_state state;
98 struct nvmet_cq nvme_cq;
99 struct nvmet_sq nvme_sq;
100
101 struct nvmet_rdma_rsp *rsps;
102 struct list_head free_rsps;
103 spinlock_t rsps_lock;
104 struct nvmet_rdma_cmd *cmds;
105
106 struct work_struct release_work;
107 struct list_head rsp_wait_list;
108 struct list_head rsp_wr_wait_list;
109 spinlock_t rsp_wr_wait_lock;
110
111 int idx;
112 int host_qid;
b0012dd3 113 int comp_vector;
8f000cac
CH
114 int recv_queue_size;
115 int send_queue_size;
116
117 struct list_head queue_list;
118};
119
a032e4f6
SG
120struct nvmet_rdma_port {
121 struct nvmet_port *nport;
122 struct sockaddr_storage addr;
123 struct rdma_cm_id *cm_id;
124 struct delayed_work repair_work;
125};
126
b0012dd3
MG
127struct nvmet_rdma_srq {
128 struct ib_srq *srq;
129 struct nvmet_rdma_cmd *cmds;
130 struct nvmet_rdma_device *ndev;
131};
132
8f000cac
CH
133struct nvmet_rdma_device {
134 struct ib_device *device;
135 struct ib_pd *pd;
b0012dd3
MG
136 struct nvmet_rdma_srq **srqs;
137 int srq_count;
8f000cac
CH
138 size_t srq_size;
139 struct kref ref;
140 struct list_head entry;
0d5ee2b2
SW
141 int inline_data_size;
142 int inline_page_count;
8f000cac
CH
143};
144
145static bool nvmet_rdma_use_srq;
146module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
147MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
148
b0012dd3
MG
149static int srq_size_set(const char *val, const struct kernel_param *kp);
150static const struct kernel_param_ops srq_size_ops = {
151 .set = srq_size_set,
152 .get = param_get_int,
153};
154
155static int nvmet_rdma_srq_size = 1024;
156module_param_cb(srq_size, &srq_size_ops, &nvmet_rdma_srq_size, 0644);
157MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)");
158
8f000cac
CH
159static DEFINE_IDA(nvmet_rdma_queue_ida);
160static LIST_HEAD(nvmet_rdma_queue_list);
161static DEFINE_MUTEX(nvmet_rdma_queue_mutex);
162
163static LIST_HEAD(device_list);
164static DEFINE_MUTEX(device_list_mutex);
165
166static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
167static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
168static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
169static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
b09160c3 170static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc);
8f000cac
CH
171static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
172static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
5cbab630
RR
173static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
174 struct nvmet_rdma_rsp *r);
175static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
176 struct nvmet_rdma_rsp *r);
8f000cac 177
e929f06d 178static const struct nvmet_fabrics_ops nvmet_rdma_ops;
8f000cac 179
b0012dd3
MG
180static int srq_size_set(const char *val, const struct kernel_param *kp)
181{
182 int n = 0, ret;
183
184 ret = kstrtoint(val, 10, &n);
185 if (ret != 0 || n < 256)
186 return -EINVAL;
187
188 return param_set_int(val, kp);
189}
190
0d5ee2b2
SW
191static int num_pages(int len)
192{
193 return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
194}
195
8f000cac
CH
196static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
197{
198 return nvme_is_write(rsp->req.cmd) &&
5e62d5c9 199 rsp->req.transfer_len &&
8f000cac
CH
200 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
201}
202
203static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
204{
205 return !nvme_is_write(rsp->req.cmd) &&
5e62d5c9 206 rsp->req.transfer_len &&
fc6c9730 207 !rsp->req.cqe->status &&
8f000cac
CH
208 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
209}
210
211static inline struct nvmet_rdma_rsp *
212nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
213{
214 struct nvmet_rdma_rsp *rsp;
215 unsigned long flags;
216
217 spin_lock_irqsave(&queue->rsps_lock, flags);
8407879c 218 rsp = list_first_entry_or_null(&queue->free_rsps,
8f000cac 219 struct nvmet_rdma_rsp, free_list);
8407879c
SG
220 if (likely(rsp))
221 list_del(&rsp->free_list);
8f000cac
CH
222 spin_unlock_irqrestore(&queue->rsps_lock, flags);
223
8407879c 224 if (unlikely(!rsp)) {
5cbab630
RR
225 int ret;
226
227 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
8407879c
SG
228 if (unlikely(!rsp))
229 return NULL;
5cbab630
RR
230 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
231 if (unlikely(ret)) {
232 kfree(rsp);
233 return NULL;
234 }
235
8407879c
SG
236 rsp->allocated = true;
237 }
238
8f000cac
CH
239 return rsp;
240}
241
242static inline void
243nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
244{
245 unsigned long flags;
246
ad1f8249 247 if (unlikely(rsp->allocated)) {
5cbab630 248 nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
8407879c
SG
249 kfree(rsp);
250 return;
251 }
252
8f000cac
CH
253 spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
254 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
255 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
256}
257
0d5ee2b2
SW
258static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev,
259 struct nvmet_rdma_cmd *c)
260{
261 struct scatterlist *sg;
262 struct ib_sge *sge;
263 int i;
264
265 if (!ndev->inline_data_size)
266 return;
267
268 sg = c->inline_sg;
269 sge = &c->sge[1];
270
271 for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
272 if (sge->length)
273 ib_dma_unmap_page(ndev->device, sge->addr,
274 sge->length, DMA_FROM_DEVICE);
275 if (sg_page(sg))
276 __free_page(sg_page(sg));
277 }
278}
279
280static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev,
281 struct nvmet_rdma_cmd *c)
282{
283 struct scatterlist *sg;
284 struct ib_sge *sge;
285 struct page *pg;
286 int len;
287 int i;
288
289 if (!ndev->inline_data_size)
290 return 0;
291
292 sg = c->inline_sg;
293 sg_init_table(sg, ndev->inline_page_count);
294 sge = &c->sge[1];
295 len = ndev->inline_data_size;
296
297 for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) {
298 pg = alloc_page(GFP_KERNEL);
299 if (!pg)
300 goto out_err;
301 sg_assign_page(sg, pg);
302 sge->addr = ib_dma_map_page(ndev->device,
303 pg, 0, PAGE_SIZE, DMA_FROM_DEVICE);
304 if (ib_dma_mapping_error(ndev->device, sge->addr))
305 goto out_err;
306 sge->length = min_t(int, len, PAGE_SIZE);
307 sge->lkey = ndev->pd->local_dma_lkey;
308 len -= sge->length;
309 }
310
311 return 0;
312out_err:
313 for (; i >= 0; i--, sg--, sge--) {
314 if (sge->length)
315 ib_dma_unmap_page(ndev->device, sge->addr,
316 sge->length, DMA_FROM_DEVICE);
317 if (sg_page(sg))
318 __free_page(sg_page(sg));
319 }
320 return -ENOMEM;
321}
322
8f000cac
CH
323static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev,
324 struct nvmet_rdma_cmd *c, bool admin)
325{
326 /* NVMe command / RDMA RECV */
327 c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL);
328 if (!c->nvme_cmd)
329 goto out;
330
331 c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd,
332 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
333 if (ib_dma_mapping_error(ndev->device, c->sge[0].addr))
334 goto out_free_cmd;
335
336 c->sge[0].length = sizeof(*c->nvme_cmd);
337 c->sge[0].lkey = ndev->pd->local_dma_lkey;
338
0d5ee2b2
SW
339 if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c))
340 goto out_unmap_cmd;
8f000cac
CH
341
342 c->cqe.done = nvmet_rdma_recv_done;
343
344 c->wr.wr_cqe = &c->cqe;
345 c->wr.sg_list = c->sge;
0d5ee2b2 346 c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1;
8f000cac
CH
347
348 return 0;
349
8f000cac
CH
350out_unmap_cmd:
351 ib_dma_unmap_single(ndev->device, c->sge[0].addr,
352 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
353out_free_cmd:
354 kfree(c->nvme_cmd);
355
356out:
357 return -ENOMEM;
358}
359
360static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev,
361 struct nvmet_rdma_cmd *c, bool admin)
362{
0d5ee2b2
SW
363 if (!admin)
364 nvmet_rdma_free_inline_pages(ndev, c);
8f000cac
CH
365 ib_dma_unmap_single(ndev->device, c->sge[0].addr,
366 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE);
367 kfree(c->nvme_cmd);
368}
369
370static struct nvmet_rdma_cmd *
371nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev,
372 int nr_cmds, bool admin)
373{
374 struct nvmet_rdma_cmd *cmds;
375 int ret = -EINVAL, i;
376
377 cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL);
378 if (!cmds)
379 goto out;
380
381 for (i = 0; i < nr_cmds; i++) {
382 ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin);
383 if (ret)
384 goto out_free;
385 }
386
387 return cmds;
388
389out_free:
390 while (--i >= 0)
391 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
392 kfree(cmds);
393out:
394 return ERR_PTR(ret);
395}
396
397static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev,
398 struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin)
399{
400 int i;
401
402 for (i = 0; i < nr_cmds; i++)
403 nvmet_rdma_free_cmd(ndev, cmds + i, admin);
404 kfree(cmds);
405}
406
407static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
408 struct nvmet_rdma_rsp *r)
409{
410 /* NVMe CQE / RDMA SEND */
fc6c9730
MG
411 r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
412 if (!r->req.cqe)
8f000cac
CH
413 goto out;
414
fc6c9730
MG
415 r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
416 sizeof(*r->req.cqe), DMA_TO_DEVICE);
8f000cac
CH
417 if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
418 goto out_free_rsp;
419
495758bb 420 if (ib_dma_pci_p2p_dma_supported(ndev->device))
5a7a9e03 421 r->req.p2p_client = &ndev->device->dev;
fc6c9730 422 r->send_sge.length = sizeof(*r->req.cqe);
8f000cac
CH
423 r->send_sge.lkey = ndev->pd->local_dma_lkey;
424
425 r->send_cqe.done = nvmet_rdma_send_done;
426
427 r->send_wr.wr_cqe = &r->send_cqe;
428 r->send_wr.sg_list = &r->send_sge;
429 r->send_wr.num_sge = 1;
430 r->send_wr.send_flags = IB_SEND_SIGNALED;
431
432 /* Data In / RDMA READ */
433 r->read_cqe.done = nvmet_rdma_read_data_done;
b09160c3
IR
434 /* Data Out / RDMA WRITE */
435 r->write_cqe.done = nvmet_rdma_write_data_done;
436
8f000cac
CH
437 return 0;
438
439out_free_rsp:
fc6c9730 440 kfree(r->req.cqe);
8f000cac
CH
441out:
442 return -ENOMEM;
443}
444
445static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
446 struct nvmet_rdma_rsp *r)
447{
448 ib_dma_unmap_single(ndev->device, r->send_sge.addr,
fc6c9730
MG
449 sizeof(*r->req.cqe), DMA_TO_DEVICE);
450 kfree(r->req.cqe);
8f000cac
CH
451}
452
453static int
454nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue)
455{
456 struct nvmet_rdma_device *ndev = queue->dev;
457 int nr_rsps = queue->recv_queue_size * 2;
458 int ret = -EINVAL, i;
459
460 queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp),
461 GFP_KERNEL);
462 if (!queue->rsps)
463 goto out;
464
465 for (i = 0; i < nr_rsps; i++) {
466 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
467
468 ret = nvmet_rdma_alloc_rsp(ndev, rsp);
469 if (ret)
470 goto out_free;
471
472 list_add_tail(&rsp->free_list, &queue->free_rsps);
473 }
474
475 return 0;
476
477out_free:
478 while (--i >= 0) {
479 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
480
481 list_del(&rsp->free_list);
482 nvmet_rdma_free_rsp(ndev, rsp);
483 }
484 kfree(queue->rsps);
485out:
486 return ret;
487}
488
489static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue)
490{
491 struct nvmet_rdma_device *ndev = queue->dev;
492 int i, nr_rsps = queue->recv_queue_size * 2;
493
494 for (i = 0; i < nr_rsps; i++) {
495 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
496
497 list_del(&rsp->free_list);
498 nvmet_rdma_free_rsp(ndev, rsp);
499 }
500 kfree(queue->rsps);
501}
502
503static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
504 struct nvmet_rdma_cmd *cmd)
505{
20209384 506 int ret;
8f000cac 507
748ff840
PP
508 ib_dma_sync_single_for_device(ndev->device,
509 cmd->sge[0].addr, cmd->sge[0].length,
510 DMA_FROM_DEVICE);
511
b0012dd3
MG
512 if (cmd->nsrq)
513 ret = ib_post_srq_recv(cmd->nsrq->srq, &cmd->wr, NULL);
20209384 514 else
21f90243 515 ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
20209384
MG
516
517 if (unlikely(ret))
518 pr_err("post_recv cmd failed\n");
519
520 return ret;
8f000cac
CH
521}
522
523static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue)
524{
525 spin_lock(&queue->rsp_wr_wait_lock);
526 while (!list_empty(&queue->rsp_wr_wait_list)) {
527 struct nvmet_rdma_rsp *rsp;
528 bool ret;
529
530 rsp = list_entry(queue->rsp_wr_wait_list.next,
531 struct nvmet_rdma_rsp, wait_list);
532 list_del(&rsp->wait_list);
533
534 spin_unlock(&queue->rsp_wr_wait_lock);
535 ret = nvmet_rdma_execute_command(rsp);
536 spin_lock(&queue->rsp_wr_wait_lock);
537
538 if (!ret) {
539 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
540 break;
541 }
542 }
543 spin_unlock(&queue->rsp_wr_wait_lock);
544}
545
b09160c3
IR
546static u16 nvmet_rdma_check_pi_status(struct ib_mr *sig_mr)
547{
548 struct ib_mr_status mr_status;
549 int ret;
550 u16 status = 0;
551
552 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
553 if (ret) {
554 pr_err("ib_check_mr_status failed, ret %d\n", ret);
555 return NVME_SC_INVALID_PI;
556 }
557
558 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
559 switch (mr_status.sig_err.err_type) {
560 case IB_SIG_BAD_GUARD:
561 status = NVME_SC_GUARD_CHECK;
562 break;
563 case IB_SIG_BAD_REFTAG:
564 status = NVME_SC_REFTAG_CHECK;
565 break;
566 case IB_SIG_BAD_APPTAG:
567 status = NVME_SC_APPTAG_CHECK;
568 break;
569 }
570 pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n",
571 mr_status.sig_err.err_type,
572 mr_status.sig_err.expected,
573 mr_status.sig_err.actual);
574 }
575
576 return status;
577}
578
579static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi,
580 struct nvme_command *cmd, struct ib_sig_domain *domain,
581 u16 control, u8 pi_type)
582{
583 domain->sig_type = IB_SIG_TYPE_T10_DIF;
584 domain->sig.dif.bg_type = IB_T10DIF_CRC;
585 domain->sig.dif.pi_interval = 1 << bi->interval_exp;
586 domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag);
587 if (control & NVME_RW_PRINFO_PRCHK_REF)
588 domain->sig.dif.ref_remap = true;
589
590 domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag);
591 domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask);
592 domain->sig.dif.app_escape = true;
593 if (pi_type == NVME_NS_DPS_PI_TYPE3)
594 domain->sig.dif.ref_escape = true;
595}
596
597static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req,
598 struct ib_sig_attrs *sig_attrs)
599{
600 struct nvme_command *cmd = req->cmd;
601 u16 control = le16_to_cpu(cmd->rw.control);
602 u8 pi_type = req->ns->pi_type;
603 struct blk_integrity *bi;
604
605 bi = bdev_get_integrity(req->ns->bdev);
606
607 memset(sig_attrs, 0, sizeof(*sig_attrs));
608
609 if (control & NVME_RW_PRINFO_PRACT) {
610 /* for WRITE_INSERT/READ_STRIP no wire domain */
611 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
612 nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
613 pi_type);
614 /* Clear the PRACT bit since HCA will generate/verify the PI */
615 control &= ~NVME_RW_PRINFO_PRACT;
616 cmd->rw.control = cpu_to_le16(control);
617 /* PI is added by the HW */
618 req->transfer_len += req->metadata_len;
619 } else {
620 /* for WRITE_PASS/READ_PASS both wire/memory domains exist */
621 nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control,
622 pi_type);
623 nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control,
624 pi_type);
625 }
626
627 if (control & NVME_RW_PRINFO_PRCHK_REF)
628 sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG;
629 if (control & NVME_RW_PRINFO_PRCHK_GUARD)
630 sig_attrs->check_mask |= IB_SIG_CHECK_GUARD;
631 if (control & NVME_RW_PRINFO_PRCHK_APP)
632 sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG;
633}
634
635static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key,
636 struct ib_sig_attrs *sig_attrs)
637{
638 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
639 struct nvmet_req *req = &rsp->req;
640 int ret;
641
642 if (req->metadata_len)
643 ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp,
644 cm_id->port_num, req->sg, req->sg_cnt,
645 req->metadata_sg, req->metadata_sg_cnt, sig_attrs,
646 addr, key, nvmet_data_dir(req));
647 else
648 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
649 req->sg, req->sg_cnt, 0, addr, key,
650 nvmet_data_dir(req));
651
652 return ret;
653}
654
655static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp)
656{
657 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
658 struct nvmet_req *req = &rsp->req;
659
660 if (req->metadata_len)
661 rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp,
662 cm_id->port_num, req->sg, req->sg_cnt,
663 req->metadata_sg, req->metadata_sg_cnt,
664 nvmet_data_dir(req));
665 else
666 rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num,
667 req->sg, req->sg_cnt, nvmet_data_dir(req));
668}
8f000cac
CH
669
670static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
671{
672 struct nvmet_rdma_queue *queue = rsp->queue;
673
674 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
675
b09160c3
IR
676 if (rsp->n_rdma)
677 nvmet_rdma_rw_ctx_destroy(rsp);
8f000cac 678
0d5ee2b2 679 if (rsp->req.sg != rsp->cmd->inline_sg)
c6e3f133 680 nvmet_req_free_sgls(&rsp->req);
8f000cac
CH
681
682 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
683 nvmet_rdma_process_wr_wait_list(queue);
684
685 nvmet_rdma_put_rsp(rsp);
686}
687
688static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue)
689{
690 if (queue->nvme_sq.ctrl) {
691 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
692 } else {
693 /*
694 * we didn't setup the controller yet in case
695 * of admin connect error, just disconnect and
696 * cleanup the queue
697 */
698 nvmet_rdma_queue_disconnect(queue);
699 }
700}
701
702static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
703{
704 struct nvmet_rdma_rsp *rsp =
705 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
8cc365f9 706 struct nvmet_rdma_queue *queue = wc->qp->qp_context;
8f000cac
CH
707
708 nvmet_rdma_release_rsp(rsp);
709
710 if (unlikely(wc->status != IB_WC_SUCCESS &&
711 wc->status != IB_WC_WR_FLUSH_ERR)) {
712 pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
713 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
d7dcdf9d 714 nvmet_rdma_error_comp(queue);
8f000cac
CH
715 }
716}
717
718static void nvmet_rdma_queue_response(struct nvmet_req *req)
719{
720 struct nvmet_rdma_rsp *rsp =
721 container_of(req, struct nvmet_rdma_rsp, req);
722 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
23f96d1f 723 struct ib_send_wr *first_wr;
8f000cac
CH
724
725 if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
726 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
727 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
728 } else {
729 rsp->send_wr.opcode = IB_WR_SEND;
730 }
731
b09160c3
IR
732 if (nvmet_rdma_need_data_out(rsp)) {
733 if (rsp->req.metadata_len)
734 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
735 cm_id->port_num, &rsp->write_cqe, NULL);
736 else
737 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
738 cm_id->port_num, NULL, &rsp->send_wr);
739 } else {
8f000cac 740 first_wr = &rsp->send_wr;
b09160c3 741 }
8f000cac
CH
742
743 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
748ff840
PP
744
745 ib_dma_sync_single_for_device(rsp->queue->dev->device,
746 rsp->send_sge.addr, rsp->send_sge.length,
747 DMA_TO_DEVICE);
748
0a3173a5 749 if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) {
8f000cac
CH
750 pr_err("sending cmd response failed\n");
751 nvmet_rdma_release_rsp(rsp);
752 }
753}
754
755static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
756{
757 struct nvmet_rdma_rsp *rsp =
758 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe);
ca0f1a80 759 struct nvmet_rdma_queue *queue = wc->qp->qp_context;
b09160c3 760 u16 status = 0;
8f000cac
CH
761
762 WARN_ON(rsp->n_rdma <= 0);
763 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
8f000cac
CH
764 rsp->n_rdma = 0;
765
766 if (unlikely(wc->status != IB_WC_SUCCESS)) {
b09160c3 767 nvmet_rdma_rw_ctx_destroy(rsp);
549f01ae 768 nvmet_req_uninit(&rsp->req);
8f000cac
CH
769 nvmet_rdma_release_rsp(rsp);
770 if (wc->status != IB_WC_WR_FLUSH_ERR) {
771 pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n",
772 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
773 nvmet_rdma_error_comp(queue);
774 }
775 return;
776 }
777
b09160c3
IR
778 if (rsp->req.metadata_len)
779 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
780 nvmet_rdma_rw_ctx_destroy(rsp);
781
782 if (unlikely(status))
783 nvmet_req_complete(&rsp->req, status);
784 else
785 rsp->req.execute(&rsp->req);
786}
787
788static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
789{
790 struct nvmet_rdma_rsp *rsp =
791 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
8cc365f9 792 struct nvmet_rdma_queue *queue = wc->qp->qp_context;
b09160c3
IR
793 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
794 u16 status;
795
796 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
797 return;
798
799 WARN_ON(rsp->n_rdma <= 0);
800 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
801 rsp->n_rdma = 0;
802
803 if (unlikely(wc->status != IB_WC_SUCCESS)) {
804 nvmet_rdma_rw_ctx_destroy(rsp);
805 nvmet_req_uninit(&rsp->req);
806 nvmet_rdma_release_rsp(rsp);
807 if (wc->status != IB_WC_WR_FLUSH_ERR) {
abec6561
LY
808 pr_info("RDMA WRITE for CQE failed with status %s (%d).\n",
809 ib_wc_status_msg(wc->status), wc->status);
b09160c3
IR
810 nvmet_rdma_error_comp(queue);
811 }
812 return;
813 }
814
815 /*
816 * Upon RDMA completion check the signature status
817 * - if succeeded send good NVMe response
818 * - if failed send bad NVMe response with appropriate error
819 */
820 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
821 if (unlikely(status))
822 rsp->req.cqe->status = cpu_to_le16(status << 1);
823 nvmet_rdma_rw_ctx_destroy(rsp);
824
825 if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) {
826 pr_err("sending cmd response failed\n");
827 nvmet_rdma_release_rsp(rsp);
828 }
8f000cac
CH
829}
830
831static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
832 u64 off)
833{
0d5ee2b2
SW
834 int sg_count = num_pages(len);
835 struct scatterlist *sg;
836 int i;
837
838 sg = rsp->cmd->inline_sg;
839 for (i = 0; i < sg_count; i++, sg++) {
840 if (i < sg_count - 1)
841 sg_unmark_end(sg);
842 else
843 sg_mark_end(sg);
844 sg->offset = off;
845 sg->length = min_t(int, len, PAGE_SIZE - off);
846 len -= sg->length;
847 if (!i)
848 off = 0;
849 }
850
851 rsp->req.sg = rsp->cmd->inline_sg;
852 rsp->req.sg_cnt = sg_count;
8f000cac
CH
853}
854
855static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
856{
857 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
858 u64 off = le64_to_cpu(sgl->addr);
859 u32 len = le32_to_cpu(sgl->length);
860
762a11df
CK
861 if (!nvme_is_write(rsp->req.cmd)) {
862 rsp->req.error_loc =
863 offsetof(struct nvme_common_command, opcode);
8f000cac 864 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
762a11df 865 }
8f000cac 866
0d5ee2b2 867 if (off + len > rsp->queue->dev->inline_data_size) {
8f000cac
CH
868 pr_err("invalid inline data offset!\n");
869 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
870 }
871
872 /* no data command? */
873 if (!len)
874 return 0;
875
876 nvmet_rdma_use_inline_sg(rsp, len, off);
877 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
5e62d5c9 878 rsp->req.transfer_len += len;
8f000cac
CH
879 return 0;
880}
881
882static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
883 struct nvme_keyed_sgl_desc *sgl, bool invalidate)
884{
8f000cac 885 u64 addr = le64_to_cpu(sgl->addr);
8f000cac 886 u32 key = get_unaligned_le32(sgl->key);
b09160c3 887 struct ib_sig_attrs sig_attrs;
8f000cac 888 int ret;
8f000cac 889
5b2322e4
LG
890 rsp->req.transfer_len = get_unaligned_le24(sgl->length);
891
8f000cac 892 /* no data command? */
5b2322e4 893 if (!rsp->req.transfer_len)
8f000cac
CH
894 return 0;
895
b09160c3
IR
896 if (rsp->req.metadata_len)
897 nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs);
898
c6e3f133 899 ret = nvmet_req_alloc_sgls(&rsp->req);
59534b9d 900 if (unlikely(ret < 0))
5b2322e4 901 goto error_out;
8f000cac 902
b09160c3 903 ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs);
59534b9d 904 if (unlikely(ret < 0))
5b2322e4 905 goto error_out;
8f000cac
CH
906 rsp->n_rdma += ret;
907
908 if (invalidate) {
909 rsp->invalidate_rkey = key;
910 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
911 }
912
913 return 0;
5b2322e4
LG
914
915error_out:
916 rsp->req.transfer_len = 0;
917 return NVME_SC_INTERNAL;
8f000cac
CH
918}
919
920static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
921{
922 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
923
924 switch (sgl->type >> 4) {
925 case NVME_SGL_FMT_DATA_DESC:
926 switch (sgl->type & 0xf) {
927 case NVME_SGL_FMT_OFFSET:
928 return nvmet_rdma_map_sgl_inline(rsp);
929 default:
930 pr_err("invalid SGL subtype: %#x\n", sgl->type);
762a11df
CK
931 rsp->req.error_loc =
932 offsetof(struct nvme_common_command, dptr);
8f000cac
CH
933 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
934 }
935 case NVME_KEY_SGL_FMT_DATA_DESC:
936 switch (sgl->type & 0xf) {
937 case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
938 return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
939 case NVME_SGL_FMT_ADDRESS:
940 return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
941 default:
942 pr_err("invalid SGL subtype: %#x\n", sgl->type);
762a11df
CK
943 rsp->req.error_loc =
944 offsetof(struct nvme_common_command, dptr);
8f000cac
CH
945 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
946 }
947 default:
948 pr_err("invalid SGL type: %#x\n", sgl->type);
762a11df 949 rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
8f000cac
CH
950 return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
951 }
952}
953
954static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
955{
956 struct nvmet_rdma_queue *queue = rsp->queue;
957
958 if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
959 &queue->sq_wr_avail) < 0)) {
960 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n",
961 1 + rsp->n_rdma, queue->idx,
962 queue->nvme_sq.ctrl->cntlid);
963 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
964 return false;
965 }
966
967 if (nvmet_rdma_need_data_in(rsp)) {
21f90243 968 if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
8f000cac
CH
969 queue->cm_id->port_num, &rsp->read_cqe, NULL))
970 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
971 } else {
be3f3114 972 rsp->req.execute(&rsp->req);
8f000cac
CH
973 }
974
975 return true;
976}
977
978static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
979 struct nvmet_rdma_rsp *cmd)
980{
981 u16 status;
982
748ff840
PP
983 ib_dma_sync_single_for_cpu(queue->dev->device,
984 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
985 DMA_FROM_DEVICE);
986 ib_dma_sync_single_for_cpu(queue->dev->device,
987 cmd->send_sge.addr, cmd->send_sge.length,
988 DMA_TO_DEVICE);
989
8f000cac
CH
990 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
991 &queue->nvme_sq, &nvmet_rdma_ops))
992 return;
993
994 status = nvmet_rdma_map_sgl(cmd);
995 if (status)
996 goto out_err;
997
998 if (unlikely(!nvmet_rdma_execute_command(cmd))) {
999 spin_lock(&queue->rsp_wr_wait_lock);
1000 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list);
1001 spin_unlock(&queue->rsp_wr_wait_lock);
1002 }
1003
1004 return;
1005
1006out_err:
1007 nvmet_req_complete(&cmd->req, status);
1008}
1009
1010static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
1011{
1012 struct nvmet_rdma_cmd *cmd =
1013 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
ca0f1a80 1014 struct nvmet_rdma_queue *queue = wc->qp->qp_context;
8f000cac
CH
1015 struct nvmet_rdma_rsp *rsp;
1016
1017 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1018 if (wc->status != IB_WC_WR_FLUSH_ERR) {
1019 pr_err("RECV for CQE 0x%p failed with status %s (%d)\n",
1020 wc->wr_cqe, ib_wc_status_msg(wc->status),
1021 wc->status);
1022 nvmet_rdma_error_comp(queue);
1023 }
1024 return;
1025 }
1026
1027 if (unlikely(wc->byte_len < sizeof(struct nvme_command))) {
1028 pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n");
1029 nvmet_rdma_error_comp(queue);
1030 return;
1031 }
1032
1033 cmd->queue = queue;
1034 rsp = nvmet_rdma_get_rsp(queue);
8407879c
SG
1035 if (unlikely(!rsp)) {
1036 /*
1037 * we get here only under memory pressure,
1038 * silently drop and have the host retry
1039 * as we can't even fail it.
1040 */
1041 nvmet_rdma_post_recv(queue->dev, cmd);
1042 return;
1043 }
8d61413d 1044 rsp->queue = queue;
8f000cac
CH
1045 rsp->cmd = cmd;
1046 rsp->flags = 0;
1047 rsp->req.cmd = cmd->nvme_cmd;
8d61413d
SG
1048 rsp->req.port = queue->port;
1049 rsp->n_rdma = 0;
8f000cac
CH
1050
1051 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
1052 unsigned long flags;
1053
1054 spin_lock_irqsave(&queue->state_lock, flags);
1055 if (queue->state == NVMET_RDMA_Q_CONNECTING)
1056 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
1057 else
1058 nvmet_rdma_put_rsp(rsp);
1059 spin_unlock_irqrestore(&queue->state_lock, flags);
1060 return;
1061 }
1062
1063 nvmet_rdma_handle_command(queue, rsp);
1064}
1065
b0012dd3
MG
1066static void nvmet_rdma_destroy_srq(struct nvmet_rdma_srq *nsrq)
1067{
1068 nvmet_rdma_free_cmds(nsrq->ndev, nsrq->cmds, nsrq->ndev->srq_size,
1069 false);
1070 ib_destroy_srq(nsrq->srq);
1071
1072 kfree(nsrq);
1073}
1074
1075static void nvmet_rdma_destroy_srqs(struct nvmet_rdma_device *ndev)
8f000cac 1076{
b0012dd3
MG
1077 int i;
1078
1079 if (!ndev->srqs)
8f000cac
CH
1080 return;
1081
b0012dd3
MG
1082 for (i = 0; i < ndev->srq_count; i++)
1083 nvmet_rdma_destroy_srq(ndev->srqs[i]);
1084
1085 kfree(ndev->srqs);
8f000cac
CH
1086}
1087
b0012dd3
MG
1088static struct nvmet_rdma_srq *
1089nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev)
8f000cac
CH
1090{
1091 struct ib_srq_init_attr srq_attr = { NULL, };
b0012dd3
MG
1092 size_t srq_size = ndev->srq_size;
1093 struct nvmet_rdma_srq *nsrq;
8f000cac 1094 struct ib_srq *srq;
8f000cac
CH
1095 int ret, i;
1096
b0012dd3
MG
1097 nsrq = kzalloc(sizeof(*nsrq), GFP_KERNEL);
1098 if (!nsrq)
1099 return ERR_PTR(-ENOMEM);
8f000cac
CH
1100
1101 srq_attr.attr.max_wr = srq_size;
0d5ee2b2 1102 srq_attr.attr.max_sge = 1 + ndev->inline_page_count;
8f000cac
CH
1103 srq_attr.attr.srq_limit = 0;
1104 srq_attr.srq_type = IB_SRQT_BASIC;
1105 srq = ib_create_srq(ndev->pd, &srq_attr);
1106 if (IS_ERR(srq)) {
b0012dd3
MG
1107 ret = PTR_ERR(srq);
1108 goto out_free;
8f000cac
CH
1109 }
1110
b0012dd3
MG
1111 nsrq->cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false);
1112 if (IS_ERR(nsrq->cmds)) {
1113 ret = PTR_ERR(nsrq->cmds);
8f000cac
CH
1114 goto out_destroy_srq;
1115 }
1116
b0012dd3
MG
1117 nsrq->srq = srq;
1118 nsrq->ndev = ndev;
8f000cac 1119
20209384 1120 for (i = 0; i < srq_size; i++) {
b0012dd3
MG
1121 nsrq->cmds[i].nsrq = nsrq;
1122 ret = nvmet_rdma_post_recv(ndev, &nsrq->cmds[i]);
20209384
MG
1123 if (ret)
1124 goto out_free_cmds;
1125 }
8f000cac 1126
b0012dd3 1127 return nsrq;
8f000cac 1128
20209384 1129out_free_cmds:
b0012dd3 1130 nvmet_rdma_free_cmds(ndev, nsrq->cmds, srq_size, false);
8f000cac
CH
1131out_destroy_srq:
1132 ib_destroy_srq(srq);
b0012dd3
MG
1133out_free:
1134 kfree(nsrq);
1135 return ERR_PTR(ret);
1136}
1137
1138static int nvmet_rdma_init_srqs(struct nvmet_rdma_device *ndev)
1139{
1140 int i, ret;
1141
1142 if (!ndev->device->attrs.max_srq_wr || !ndev->device->attrs.max_srq) {
1143 /*
1144 * If SRQs aren't supported we just go ahead and use normal
1145 * non-shared receive queues.
1146 */
1147 pr_info("SRQ requested but not supported.\n");
1148 return 0;
1149 }
1150
1151 ndev->srq_size = min(ndev->device->attrs.max_srq_wr,
1152 nvmet_rdma_srq_size);
1153 ndev->srq_count = min(ndev->device->num_comp_vectors,
1154 ndev->device->attrs.max_srq);
1155
1156 ndev->srqs = kcalloc(ndev->srq_count, sizeof(*ndev->srqs), GFP_KERNEL);
1157 if (!ndev->srqs)
1158 return -ENOMEM;
1159
1160 for (i = 0; i < ndev->srq_count; i++) {
1161 ndev->srqs[i] = nvmet_rdma_init_srq(ndev);
1162 if (IS_ERR(ndev->srqs[i])) {
1163 ret = PTR_ERR(ndev->srqs[i]);
1164 goto err_srq;
1165 }
1166 }
1167
1168 return 0;
1169
1170err_srq:
1171 while (--i >= 0)
1172 nvmet_rdma_destroy_srq(ndev->srqs[i]);
1173 kfree(ndev->srqs);
8f000cac
CH
1174 return ret;
1175}
1176
1177static void nvmet_rdma_free_dev(struct kref *ref)
1178{
1179 struct nvmet_rdma_device *ndev =
1180 container_of(ref, struct nvmet_rdma_device, ref);
1181
1182 mutex_lock(&device_list_mutex);
1183 list_del(&ndev->entry);
1184 mutex_unlock(&device_list_mutex);
1185
b0012dd3 1186 nvmet_rdma_destroy_srqs(ndev);
8f000cac
CH
1187 ib_dealloc_pd(ndev->pd);
1188
1189 kfree(ndev);
1190}
1191
1192static struct nvmet_rdma_device *
1193nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
1194{
a032e4f6
SG
1195 struct nvmet_rdma_port *port = cm_id->context;
1196 struct nvmet_port *nport = port->nport;
8f000cac 1197 struct nvmet_rdma_device *ndev;
0d5ee2b2
SW
1198 int inline_page_count;
1199 int inline_sge_count;
8f000cac
CH
1200 int ret;
1201
1202 mutex_lock(&device_list_mutex);
1203 list_for_each_entry(ndev, &device_list, entry) {
1204 if (ndev->device->node_guid == cm_id->device->node_guid &&
1205 kref_get_unless_zero(&ndev->ref))
1206 goto out_unlock;
1207 }
1208
1209 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
1210 if (!ndev)
1211 goto out_err;
1212
a032e4f6 1213 inline_page_count = num_pages(nport->inline_data_size);
0d5ee2b2 1214 inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
0a3173a5 1215 cm_id->device->attrs.max_recv_sge) - 1;
0d5ee2b2
SW
1216 if (inline_page_count > inline_sge_count) {
1217 pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
a032e4f6 1218 nport->inline_data_size, cm_id->device->name,
0d5ee2b2 1219 inline_sge_count * PAGE_SIZE);
a032e4f6 1220 nport->inline_data_size = inline_sge_count * PAGE_SIZE;
0d5ee2b2
SW
1221 inline_page_count = inline_sge_count;
1222 }
a032e4f6 1223 ndev->inline_data_size = nport->inline_data_size;
0d5ee2b2 1224 ndev->inline_page_count = inline_page_count;
7a846656 1225
e945c653
JG
1226 if (nport->pi_enable && !(cm_id->device->attrs.kernel_cap_flags &
1227 IBK_INTEGRITY_HANDOVER)) {
7a846656
IR
1228 pr_warn("T10-PI is not supported by device %s. Disabling it\n",
1229 cm_id->device->name);
1230 nport->pi_enable = false;
1231 }
1232
8f000cac
CH
1233 ndev->device = cm_id->device;
1234 kref_init(&ndev->ref);
1235
ed082d36 1236 ndev->pd = ib_alloc_pd(ndev->device, 0);
8f000cac
CH
1237 if (IS_ERR(ndev->pd))
1238 goto out_free_dev;
1239
1240 if (nvmet_rdma_use_srq) {
b0012dd3 1241 ret = nvmet_rdma_init_srqs(ndev);
8f000cac
CH
1242 if (ret)
1243 goto out_free_pd;
1244 }
1245
1246 list_add(&ndev->entry, &device_list);
1247out_unlock:
1248 mutex_unlock(&device_list_mutex);
1249 pr_debug("added %s.\n", ndev->device->name);
1250 return ndev;
1251
1252out_free_pd:
1253 ib_dealloc_pd(ndev->pd);
1254out_free_dev:
1255 kfree(ndev);
1256out_err:
1257 mutex_unlock(&device_list_mutex);
1258 return NULL;
1259}
1260
1261static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
1262{
8abd7e2a 1263 struct ib_qp_init_attr qp_attr = { };
8f000cac 1264 struct nvmet_rdma_device *ndev = queue->dev;
b0012dd3 1265 int nr_cqe, ret, i, factor;
8f000cac
CH
1266
1267 /*
1268 * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND.
1269 */
1270 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size;
1271
ca0f1a80
YF
1272 queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1,
1273 queue->comp_vector, IB_POLL_WORKQUEUE);
8f000cac
CH
1274 if (IS_ERR(queue->cq)) {
1275 ret = PTR_ERR(queue->cq);
1276 pr_err("failed to create CQ cqe= %d ret= %d\n",
1277 nr_cqe + 1, ret);
1278 goto out;
1279 }
1280
8f000cac
CH
1281 qp_attr.qp_context = queue;
1282 qp_attr.event_handler = nvmet_rdma_qp_event;
1283 qp_attr.send_cq = queue->cq;
1284 qp_attr.recv_cq = queue->cq;
1285 qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
1286 qp_attr.qp_type = IB_QPT_RC;
1287 /* +1 for drain */
1288 qp_attr.cap.max_send_wr = queue->send_queue_size + 1;
c363f249
MG
1289 factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num,
1290 1 << NVMET_RDMA_MAX_MDTS);
1291 qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor;
8f000cac 1292 qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd,
33023fb8 1293 ndev->device->attrs.max_send_sge);
8f000cac 1294
b0012dd3
MG
1295 if (queue->nsrq) {
1296 qp_attr.srq = queue->nsrq->srq;
8f000cac
CH
1297 } else {
1298 /* +1 for drain */
1299 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size;
0d5ee2b2 1300 qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count;
8f000cac
CH
1301 }
1302
b09160c3
IR
1303 if (queue->port->pi_enable && queue->host_qid)
1304 qp_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
1305
8f000cac
CH
1306 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr);
1307 if (ret) {
1308 pr_err("failed to create_qp ret= %d\n", ret);
1309 goto err_destroy_cq;
1310 }
21f90243 1311 queue->qp = queue->cm_id->qp;
8f000cac
CH
1312
1313 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
1314
1315 pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
1316 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
1317 qp_attr.cap.max_send_wr, queue->cm_id);
1318
b0012dd3 1319 if (!queue->nsrq) {
8f000cac
CH
1320 for (i = 0; i < queue->recv_queue_size; i++) {
1321 queue->cmds[i].queue = queue;
20209384
MG
1322 ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]);
1323 if (ret)
1324 goto err_destroy_qp;
8f000cac
CH
1325 }
1326 }
1327
1328out:
1329 return ret;
1330
20209384
MG
1331err_destroy_qp:
1332 rdma_destroy_qp(queue->cm_id);
8f000cac 1333err_destroy_cq:
ca0f1a80 1334 ib_cq_pool_put(queue->cq, nr_cqe + 1);
8f000cac
CH
1335 goto out;
1336}
1337
1338static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
1339{
21f90243
IR
1340 ib_drain_qp(queue->qp);
1341 if (queue->cm_id)
1342 rdma_destroy_id(queue->cm_id);
1343 ib_destroy_qp(queue->qp);
ca0f1a80
YF
1344 ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 *
1345 queue->send_queue_size + 1);
8f000cac
CH
1346}
1347
1348static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue)
1349{
424125a0 1350 pr_debug("freeing queue %d\n", queue->idx);
8f000cac
CH
1351
1352 nvmet_sq_destroy(&queue->nvme_sq);
1353
1354 nvmet_rdma_destroy_queue_ib(queue);
b0012dd3 1355 if (!queue->nsrq) {
8f000cac
CH
1356 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1357 queue->recv_queue_size,
1358 !queue->host_qid);
1359 }
1360 nvmet_rdma_free_rsps(queue);
7c256639 1361 ida_free(&nvmet_rdma_queue_ida, queue->idx);
8f000cac
CH
1362 kfree(queue);
1363}
1364
1365static void nvmet_rdma_release_queue_work(struct work_struct *w)
1366{
1367 struct nvmet_rdma_queue *queue =
1368 container_of(w, struct nvmet_rdma_queue, release_work);
8f000cac
CH
1369 struct nvmet_rdma_device *dev = queue->dev;
1370
1371 nvmet_rdma_free_queue(queue);
d8f7750a 1372
8f000cac
CH
1373 kref_put(&dev->ref, nvmet_rdma_free_dev);
1374}
1375
1376static int
1377nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn,
1378 struct nvmet_rdma_queue *queue)
1379{
1380 struct nvme_rdma_cm_req *req;
1381
1382 req = (struct nvme_rdma_cm_req *)conn->private_data;
1383 if (!req || conn->private_data_len == 0)
1384 return NVME_RDMA_CM_INVALID_LEN;
1385
1386 if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0)
1387 return NVME_RDMA_CM_INVALID_RECFMT;
1388
1389 queue->host_qid = le16_to_cpu(req->qid);
1390
1391 /*
b825b44c 1392 * req->hsqsize corresponds to our recv queue size plus 1
8f000cac
CH
1393 * req->hrqsize corresponds to our send queue size
1394 */
b825b44c 1395 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1;
8f000cac
CH
1396 queue->send_queue_size = le16_to_cpu(req->hrqsize);
1397
7aa1f427 1398 if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH)
8f000cac
CH
1399 return NVME_RDMA_CM_INVALID_HSQSIZE;
1400
1401 /* XXX: Should we enforce some kind of max for IO queues? */
1402
1403 return 0;
1404}
1405
1406static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id,
1407 enum nvme_rdma_cm_status status)
1408{
1409 struct nvme_rdma_cm_rej rej;
1410
7a01a6ea
MG
1411 pr_debug("rejecting connect request: status %d (%s)\n",
1412 status, nvme_rdma_cm_msg(status));
1413
8f000cac
CH
1414 rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1415 rej.sts = cpu_to_le16(status);
1416
8094ba0a
LR
1417 return rdma_reject(cm_id, (void *)&rej, sizeof(rej),
1418 IB_CM_REJ_CONSUMER_DEFINED);
8f000cac
CH
1419}
1420
1421static struct nvmet_rdma_queue *
1422nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev,
1423 struct rdma_cm_id *cm_id,
1424 struct rdma_cm_event *event)
1425{
b09160c3 1426 struct nvmet_rdma_port *port = cm_id->context;
8f000cac
CH
1427 struct nvmet_rdma_queue *queue;
1428 int ret;
1429
1430 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1431 if (!queue) {
1432 ret = NVME_RDMA_CM_NO_RSC;
1433 goto out_reject;
1434 }
1435
1436 ret = nvmet_sq_init(&queue->nvme_sq);
70d4281c
BVA
1437 if (ret) {
1438 ret = NVME_RDMA_CM_NO_RSC;
8f000cac 1439 goto out_free_queue;
70d4281c 1440 }
8f000cac
CH
1441
1442 ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue);
1443 if (ret)
1444 goto out_destroy_sq;
1445
1446 /*
1447 * Schedules the actual release because calling rdma_destroy_id from
1448 * inside a CM callback would trigger a deadlock. (great API design..)
1449 */
1450 INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work);
1451 queue->dev = ndev;
1452 queue->cm_id = cm_id;
b09160c3 1453 queue->port = port->nport;
8f000cac
CH
1454
1455 spin_lock_init(&queue->state_lock);
1456 queue->state = NVMET_RDMA_Q_CONNECTING;
1457 INIT_LIST_HEAD(&queue->rsp_wait_list);
1458 INIT_LIST_HEAD(&queue->rsp_wr_wait_list);
1459 spin_lock_init(&queue->rsp_wr_wait_lock);
1460 INIT_LIST_HEAD(&queue->free_rsps);
1461 spin_lock_init(&queue->rsps_lock);
766dbb17 1462 INIT_LIST_HEAD(&queue->queue_list);
8f000cac 1463
7c256639 1464 queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL);
8f000cac
CH
1465 if (queue->idx < 0) {
1466 ret = NVME_RDMA_CM_NO_RSC;
6ccaeb56 1467 goto out_destroy_sq;
8f000cac
CH
1468 }
1469
b0012dd3
MG
1470 /*
1471 * Spread the io queues across completion vectors,
1472 * but still keep all admin queues on vector 0.
1473 */
1474 queue->comp_vector = !queue->host_qid ? 0 :
1475 queue->idx % ndev->device->num_comp_vectors;
1476
1477
8f000cac
CH
1478 ret = nvmet_rdma_alloc_rsps(queue);
1479 if (ret) {
1480 ret = NVME_RDMA_CM_NO_RSC;
1481 goto out_ida_remove;
1482 }
1483
b0012dd3
MG
1484 if (ndev->srqs) {
1485 queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count];
1486 } else {
8f000cac
CH
1487 queue->cmds = nvmet_rdma_alloc_cmds(ndev,
1488 queue->recv_queue_size,
1489 !queue->host_qid);
1490 if (IS_ERR(queue->cmds)) {
1491 ret = NVME_RDMA_CM_NO_RSC;
1492 goto out_free_responses;
1493 }
1494 }
1495
1496 ret = nvmet_rdma_create_queue_ib(queue);
1497 if (ret) {
1498 pr_err("%s: creating RDMA queue failed (%d).\n",
1499 __func__, ret);
1500 ret = NVME_RDMA_CM_NO_RSC;
1501 goto out_free_cmds;
1502 }
1503
1504 return queue;
1505
1506out_free_cmds:
b0012dd3 1507 if (!queue->nsrq) {
8f000cac
CH
1508 nvmet_rdma_free_cmds(queue->dev, queue->cmds,
1509 queue->recv_queue_size,
1510 !queue->host_qid);
1511 }
1512out_free_responses:
1513 nvmet_rdma_free_rsps(queue);
1514out_ida_remove:
7c256639 1515 ida_free(&nvmet_rdma_queue_ida, queue->idx);
8f000cac
CH
1516out_destroy_sq:
1517 nvmet_sq_destroy(&queue->nvme_sq);
1518out_free_queue:
1519 kfree(queue);
1520out_reject:
1521 nvmet_rdma_cm_reject(cm_id, ret);
1522 return NULL;
1523}
1524
1525static void nvmet_rdma_qp_event(struct ib_event *event, void *priv)
1526{
1527 struct nvmet_rdma_queue *queue = priv;
1528
1529 switch (event->event) {
1530 case IB_EVENT_COMM_EST:
1531 rdma_notify(queue->cm_id, event->event);
1532 break;
b0012dd3
MG
1533 case IB_EVENT_QP_LAST_WQE_REACHED:
1534 pr_debug("received last WQE reached event for queue=0x%p\n",
1535 queue);
1536 break;
8f000cac 1537 default:
675796be
MG
1538 pr_err("received IB QP event: %s (%d)\n",
1539 ib_event_msg(event->event), event->event);
8f000cac
CH
1540 break;
1541 }
1542}
1543
1544static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
1545 struct nvmet_rdma_queue *queue,
1546 struct rdma_conn_param *p)
1547{
1548 struct rdma_conn_param param = { };
1549 struct nvme_rdma_cm_rep priv = { };
1550 int ret = -ENOMEM;
1551
1552 param.rnr_retry_count = 7;
1553 param.flow_control = 1;
1554 param.initiator_depth = min_t(u8, p->initiator_depth,
1555 queue->dev->device->attrs.max_qp_init_rd_atom);
1556 param.private_data = &priv;
1557 param.private_data_len = sizeof(priv);
1558 priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0);
1559 priv.crqsize = cpu_to_le16(queue->recv_queue_size);
1560
1561 ret = rdma_accept(cm_id, &param);
1562 if (ret)
1563 pr_err("rdma_accept failed (error code = %d)\n", ret);
1564
1565 return ret;
1566}
1567
1568static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1569 struct rdma_cm_event *event)
1570{
1571 struct nvmet_rdma_device *ndev;
1572 struct nvmet_rdma_queue *queue;
1573 int ret = -EINVAL;
1574
1575 ndev = nvmet_rdma_find_get_device(cm_id);
1576 if (!ndev) {
8f000cac
CH
1577 nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC);
1578 return -ECONNREFUSED;
1579 }
1580
1581 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event);
1582 if (!queue) {
1583 ret = -ENOMEM;
1584 goto put_device;
1585 }
8f000cac 1586
777dc823 1587 if (queue->host_qid == 0) {
31deaeb1
HR
1588 struct nvmet_rdma_queue *q;
1589 int pending = 0;
1590
1591 /* Check for pending controller teardown */
1592 mutex_lock(&nvmet_rdma_queue_mutex);
1593 list_for_each_entry(q, &nvmet_rdma_queue_list, queue_list) {
1594 if (q->nvme_sq.ctrl == queue->nvme_sq.ctrl &&
1595 q->state == NVMET_RDMA_Q_DISCONNECTING)
1596 pending++;
1597 }
1598 mutex_unlock(&nvmet_rdma_queue_mutex);
1599 if (pending > NVMET_RDMA_BACKLOG)
1600 return NVME_SC_CONNECT_CTRL_BUSY;
777dc823
SG
1601 }
1602
8f000cac 1603 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
e1a2ee24 1604 if (ret) {
21f90243
IR
1605 /*
1606 * Don't destroy the cm_id in free path, as we implicitly
1607 * destroy the cm_id here with non-zero ret code.
1608 */
1609 queue->cm_id = NULL;
1610 goto free_queue;
e1a2ee24 1611 }
8f000cac
CH
1612
1613 mutex_lock(&nvmet_rdma_queue_mutex);
1614 list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list);
1615 mutex_unlock(&nvmet_rdma_queue_mutex);
1616
1617 return 0;
1618
21f90243
IR
1619free_queue:
1620 nvmet_rdma_free_queue(queue);
8f000cac
CH
1621put_device:
1622 kref_put(&ndev->ref, nvmet_rdma_free_dev);
1623
1624 return ret;
1625}
1626
1627static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue)
1628{
1629 unsigned long flags;
1630
1631 spin_lock_irqsave(&queue->state_lock, flags);
1632 if (queue->state != NVMET_RDMA_Q_CONNECTING) {
1633 pr_warn("trying to establish a connected queue\n");
1634 goto out_unlock;
1635 }
1636 queue->state = NVMET_RDMA_Q_LIVE;
1637
1638 while (!list_empty(&queue->rsp_wait_list)) {
1639 struct nvmet_rdma_rsp *cmd;
1640
1641 cmd = list_first_entry(&queue->rsp_wait_list,
1642 struct nvmet_rdma_rsp, wait_list);
1643 list_del(&cmd->wait_list);
1644
1645 spin_unlock_irqrestore(&queue->state_lock, flags);
1646 nvmet_rdma_handle_command(queue, cmd);
1647 spin_lock_irqsave(&queue->state_lock, flags);
1648 }
1649
1650out_unlock:
1651 spin_unlock_irqrestore(&queue->state_lock, flags);
1652}
1653
1654static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1655{
1656 bool disconnect = false;
1657 unsigned long flags;
1658
1659 pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state);
1660
1661 spin_lock_irqsave(&queue->state_lock, flags);
1662 switch (queue->state) {
1663 case NVMET_RDMA_Q_CONNECTING:
9ceb7863
IR
1664 while (!list_empty(&queue->rsp_wait_list)) {
1665 struct nvmet_rdma_rsp *rsp;
1666
1667 rsp = list_first_entry(&queue->rsp_wait_list,
1668 struct nvmet_rdma_rsp,
1669 wait_list);
1670 list_del(&rsp->wait_list);
1671 nvmet_rdma_put_rsp(rsp);
1672 }
1673 fallthrough;
8f000cac 1674 case NVMET_RDMA_Q_LIVE:
8f000cac 1675 queue->state = NVMET_RDMA_Q_DISCONNECTING;
d8f7750a 1676 disconnect = true;
8f000cac
CH
1677 break;
1678 case NVMET_RDMA_Q_DISCONNECTING:
1679 break;
1680 }
1681 spin_unlock_irqrestore(&queue->state_lock, flags);
1682
1683 if (disconnect) {
1684 rdma_disconnect(queue->cm_id);
8832cf92 1685 queue_work(nvmet_wq, &queue->release_work);
8f000cac
CH
1686 }
1687}
1688
1689static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1690{
1691 bool disconnect = false;
1692
1693 mutex_lock(&nvmet_rdma_queue_mutex);
1694 if (!list_empty(&queue->queue_list)) {
1695 list_del_init(&queue->queue_list);
1696 disconnect = true;
1697 }
1698 mutex_unlock(&nvmet_rdma_queue_mutex);
1699
1700 if (disconnect)
1701 __nvmet_rdma_queue_disconnect(queue);
1702}
1703
1704static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1705 struct nvmet_rdma_queue *queue)
1706{
1707 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING);
1708
766dbb17
SG
1709 mutex_lock(&nvmet_rdma_queue_mutex);
1710 if (!list_empty(&queue->queue_list))
1711 list_del_init(&queue->queue_list);
1712 mutex_unlock(&nvmet_rdma_queue_mutex);
1713
1714 pr_err("failed to connect queue %d\n", queue->idx);
8832cf92 1715 queue_work(nvmet_wq, &queue->release_work);
8f000cac
CH
1716}
1717
d8f7750a 1718/**
a8adf0cd 1719 * nvmet_rdma_device_removal() - Handle RDMA device removal
f1d4ef7d 1720 * @cm_id: rdma_cm id, used for nvmet port
d8f7750a 1721 * @queue: nvmet rdma queue (cm id qp_context)
d8f7750a
SG
1722 *
1723 * DEVICE_REMOVAL event notifies us that the RDMA device is about
f1d4ef7d
SG
1724 * to unplug. Note that this event can be generated on a normal
1725 * queue cm_id and/or a device bound listener cm_id (where in this
1726 * case queue will be null).
d8f7750a 1727 *
f1d4ef7d
SG
1728 * We registered an ib_client to handle device removal for queues,
1729 * so we only need to handle the listening port cm_ids. In this case
d8f7750a
SG
1730 * we nullify the priv to prevent double cm_id destruction and destroying
1731 * the cm_id implicitely by returning a non-zero rc to the callout.
1732 */
1733static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
1734 struct nvmet_rdma_queue *queue)
1735{
a032e4f6 1736 struct nvmet_rdma_port *port;
d8f7750a 1737
f1d4ef7d 1738 if (queue) {
d8f7750a 1739 /*
f1d4ef7d
SG
1740 * This is a queue cm_id. we have registered
1741 * an ib_client to handle queues removal
1742 * so don't interfear and just return.
d8f7750a 1743 */
f1d4ef7d 1744 return 0;
d8f7750a
SG
1745 }
1746
f1d4ef7d
SG
1747 port = cm_id->context;
1748
1749 /*
1750 * This is a listener cm_id. Make sure that
1751 * future remove_port won't invoke a double
1752 * cm_id destroy. use atomic xchg to make sure
1753 * we don't compete with remove_port.
1754 */
a032e4f6 1755 if (xchg(&port->cm_id, NULL) != cm_id)
f1d4ef7d
SG
1756 return 0;
1757
d8f7750a
SG
1758 /*
1759 * We need to return 1 so that the core will destroy
1760 * it's own ID. What a great API design..
1761 */
1762 return 1;
1763}
1764
8f000cac
CH
1765static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
1766 struct rdma_cm_event *event)
1767{
1768 struct nvmet_rdma_queue *queue = NULL;
1769 int ret = 0;
1770
1771 if (cm_id->qp)
1772 queue = cm_id->qp->qp_context;
1773
1774 pr_debug("%s (%d): status %d id %p\n",
1775 rdma_event_msg(event->event), event->event,
1776 event->status, cm_id);
1777
1778 switch (event->event) {
1779 case RDMA_CM_EVENT_CONNECT_REQUEST:
1780 ret = nvmet_rdma_queue_connect(cm_id, event);
1781 break;
1782 case RDMA_CM_EVENT_ESTABLISHED:
1783 nvmet_rdma_queue_established(queue);
1784 break;
1785 case RDMA_CM_EVENT_ADDR_CHANGE:
a032e4f6
SG
1786 if (!queue) {
1787 struct nvmet_rdma_port *port = cm_id->context;
1788
8832cf92 1789 queue_delayed_work(nvmet_wq, &port->repair_work, 0);
a032e4f6
SG
1790 break;
1791 }
df561f66 1792 fallthrough;
8f000cac 1793 case RDMA_CM_EVENT_DISCONNECTED:
8f000cac 1794 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
e1a2ee24 1795 nvmet_rdma_queue_disconnect(queue);
d8f7750a
SG
1796 break;
1797 case RDMA_CM_EVENT_DEVICE_REMOVAL:
1798 ret = nvmet_rdma_device_removal(cm_id, queue);
8f000cac
CH
1799 break;
1800 case RDMA_CM_EVENT_REJECTED:
512fb1b3
SW
1801 pr_debug("Connection rejected: %s\n",
1802 rdma_reject_msg(cm_id, event->status));
df561f66 1803 fallthrough;
8f000cac
CH
1804 case RDMA_CM_EVENT_UNREACHABLE:
1805 case RDMA_CM_EVENT_CONNECT_ERROR:
1806 nvmet_rdma_queue_connect_fail(cm_id, queue);
1807 break;
1808 default:
1809 pr_err("received unrecognized RDMA CM event %d\n",
1810 event->event);
1811 break;
1812 }
1813
1814 return ret;
1815}
1816
1817static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl)
1818{
1819 struct nvmet_rdma_queue *queue;
1820
1821restart:
1822 mutex_lock(&nvmet_rdma_queue_mutex);
1823 list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) {
1824 if (queue->nvme_sq.ctrl == ctrl) {
1825 list_del_init(&queue->queue_list);
1826 mutex_unlock(&nvmet_rdma_queue_mutex);
1827
1828 __nvmet_rdma_queue_disconnect(queue);
1829 goto restart;
1830 }
1831 }
1832 mutex_unlock(&nvmet_rdma_queue_mutex);
1833}
1834
fcf73a80
IR
1835static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port)
1836{
1837 struct nvmet_rdma_queue *queue, *tmp;
1838 struct nvmet_port *nport = port->nport;
1839
1840 mutex_lock(&nvmet_rdma_queue_mutex);
1841 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
1842 queue_list) {
1843 if (queue->port != nport)
1844 continue;
1845
1846 list_del_init(&queue->queue_list);
1847 __nvmet_rdma_queue_disconnect(queue);
1848 }
1849 mutex_unlock(&nvmet_rdma_queue_mutex);
1850}
1851
a032e4f6 1852static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
8f000cac 1853{
a032e4f6 1854 struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
8f000cac 1855
a032e4f6
SG
1856 if (cm_id)
1857 rdma_destroy_id(cm_id);
fcf73a80
IR
1858
1859 /*
1860 * Destroy the remaining queues, which are not belong to any
1861 * controller yet. Do it here after the RDMA-CM was destroyed
1862 * guarantees that no new queue will be created.
1863 */
1864 nvmet_rdma_destroy_port_queues(port);
a032e4f6 1865}
0d5ee2b2 1866
a032e4f6
SG
1867static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
1868{
1869 struct sockaddr *addr = (struct sockaddr *)&port->addr;
1870 struct rdma_cm_id *cm_id;
1871 int ret;
8f000cac
CH
1872
1873 cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
1874 RDMA_PS_TCP, IB_QPT_RC);
1875 if (IS_ERR(cm_id)) {
1876 pr_err("CM ID creation failed\n");
1877 return PTR_ERR(cm_id);
1878 }
1879
670c2a3a
SG
1880 /*
1881 * Allow both IPv4 and IPv6 sockets to bind a single port
1882 * at the same time.
1883 */
1884 ret = rdma_set_afonly(cm_id, 1);
1885 if (ret) {
1886 pr_err("rdma_set_afonly failed (%d)\n", ret);
1887 goto out_destroy_id;
1888 }
1889
a032e4f6 1890 ret = rdma_bind_addr(cm_id, addr);
8f000cac 1891 if (ret) {
a032e4f6 1892 pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret);
8f000cac
CH
1893 goto out_destroy_id;
1894 }
1895
31deaeb1 1896 ret = rdma_listen(cm_id, NVMET_RDMA_BACKLOG);
8f000cac 1897 if (ret) {
a032e4f6 1898 pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
8f000cac
CH
1899 goto out_destroy_id;
1900 }
1901
a032e4f6 1902 port->cm_id = cm_id;
8f000cac
CH
1903 return 0;
1904
1905out_destroy_id:
1906 rdma_destroy_id(cm_id);
1907 return ret;
1908}
1909
a032e4f6 1910static void nvmet_rdma_repair_port_work(struct work_struct *w)
8f000cac 1911{
a032e4f6
SG
1912 struct nvmet_rdma_port *port = container_of(to_delayed_work(w),
1913 struct nvmet_rdma_port, repair_work);
1914 int ret;
8f000cac 1915
a032e4f6
SG
1916 nvmet_rdma_disable_port(port);
1917 ret = nvmet_rdma_enable_port(port);
1918 if (ret)
8832cf92 1919 queue_delayed_work(nvmet_wq, &port->repair_work, 5 * HZ);
a032e4f6
SG
1920}
1921
1922static int nvmet_rdma_add_port(struct nvmet_port *nport)
1923{
1924 struct nvmet_rdma_port *port;
1925 __kernel_sa_family_t af;
1926 int ret;
1927
1928 port = kzalloc(sizeof(*port), GFP_KERNEL);
1929 if (!port)
1930 return -ENOMEM;
1931
1932 nport->priv = port;
1933 port->nport = nport;
1934 INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work);
1935
1936 switch (nport->disc_addr.adrfam) {
1937 case NVMF_ADDR_FAMILY_IP4:
1938 af = AF_INET;
1939 break;
1940 case NVMF_ADDR_FAMILY_IP6:
1941 af = AF_INET6;
1942 break;
1943 default:
1944 pr_err("address family %d not supported\n",
1945 nport->disc_addr.adrfam);
1946 ret = -EINVAL;
1947 goto out_free_port;
1948 }
1949
1950 if (nport->inline_data_size < 0) {
1951 nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
1952 } else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
1953 pr_warn("inline_data_size %u is too large, reducing to %u\n",
1954 nport->inline_data_size,
1955 NVMET_RDMA_MAX_INLINE_DATA_SIZE);
1956 nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
1957 }
1958
1959 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
1960 nport->disc_addr.trsvcid, &port->addr);
1961 if (ret) {
1962 pr_err("malformed ip/port passed: %s:%s\n",
1963 nport->disc_addr.traddr, nport->disc_addr.trsvcid);
1964 goto out_free_port;
1965 }
1966
1967 ret = nvmet_rdma_enable_port(port);
1968 if (ret)
1969 goto out_free_port;
1970
1971 pr_info("enabling port %d (%pISpcs)\n",
1972 le16_to_cpu(nport->disc_addr.portid),
1973 (struct sockaddr *)&port->addr);
1974
1975 return 0;
1976
1977out_free_port:
1978 kfree(port);
1979 return ret;
1980}
1981
1982static void nvmet_rdma_remove_port(struct nvmet_port *nport)
8f000cac 1983{
a032e4f6 1984 struct nvmet_rdma_port *port = nport->priv;
8f000cac 1985
a032e4f6
SG
1986 cancel_delayed_work_sync(&port->repair_work);
1987 nvmet_rdma_disable_port(port);
1988 kfree(port);
8f000cac
CH
1989}
1990
4c652685 1991static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
a032e4f6 1992 struct nvmet_port *nport, char *traddr)
4c652685 1993{
a032e4f6
SG
1994 struct nvmet_rdma_port *port = nport->priv;
1995 struct rdma_cm_id *cm_id = port->cm_id;
4c652685
SG
1996
1997 if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
1998 struct nvmet_rdma_rsp *rsp =
1999 container_of(req, struct nvmet_rdma_rsp, req);
2000 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;
2001 struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr;
2002
2003 sprintf(traddr, "%pISc", addr);
2004 } else {
a032e4f6 2005 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
4c652685
SG
2006 }
2007}
2008
ec6d20e1
MG
2009static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
2010{
b09160c3
IR
2011 if (ctrl->pi_support)
2012 return NVMET_RDMA_MAX_METADATA_MDTS;
ec6d20e1
MG
2013 return NVMET_RDMA_MAX_MDTS;
2014}
2015
c7d792f9
MG
2016static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
2017{
2018 return NVME_RDMA_MAX_QUEUE_SIZE;
2019}
2020
e929f06d 2021static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
8f000cac
CH
2022 .owner = THIS_MODULE,
2023 .type = NVMF_TRTYPE_RDMA,
8f000cac 2024 .msdbd = 1,
6fa350f7 2025 .flags = NVMF_KEYED_SGLS | NVMF_METADATA_SUPPORTED,
8f000cac
CH
2026 .add_port = nvmet_rdma_add_port,
2027 .remove_port = nvmet_rdma_remove_port,
2028 .queue_response = nvmet_rdma_queue_response,
2029 .delete_ctrl = nvmet_rdma_delete_ctrl,
4c652685 2030 .disc_traddr = nvmet_rdma_disc_port_addr,
ec6d20e1 2031 .get_mdts = nvmet_rdma_get_mdts,
c7d792f9 2032 .get_max_queue_size = nvmet_rdma_get_max_queue_size,
8f000cac
CH
2033};
2034
f1d4ef7d
SG
2035static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
2036{
43b92fd2 2037 struct nvmet_rdma_queue *queue, *tmp;
a3dd7d00
MG
2038 struct nvmet_rdma_device *ndev;
2039 bool found = false;
2040
2041 mutex_lock(&device_list_mutex);
2042 list_for_each_entry(ndev, &device_list, entry) {
2043 if (ndev->device == ib_device) {
2044 found = true;
2045 break;
2046 }
2047 }
2048 mutex_unlock(&device_list_mutex);
2049
2050 if (!found)
2051 return;
f1d4ef7d 2052
a3dd7d00
MG
2053 /*
2054 * IB Device that is used by nvmet controllers is being removed,
2055 * delete all queues using this device.
2056 */
f1d4ef7d 2057 mutex_lock(&nvmet_rdma_queue_mutex);
43b92fd2
IR
2058 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
2059 queue_list) {
f1d4ef7d
SG
2060 if (queue->dev->device != ib_device)
2061 continue;
2062
2063 pr_info("Removing queue %d\n", queue->idx);
43b92fd2 2064 list_del_init(&queue->queue_list);
f1d4ef7d
SG
2065 __nvmet_rdma_queue_disconnect(queue);
2066 }
2067 mutex_unlock(&nvmet_rdma_queue_mutex);
2068
8832cf92 2069 flush_workqueue(nvmet_wq);
f1d4ef7d
SG
2070}
2071
2072static struct ib_client nvmet_rdma_ib_client = {
2073 .name = "nvmet_rdma",
f1d4ef7d
SG
2074 .remove = nvmet_rdma_remove_one
2075};
2076
8f000cac
CH
2077static int __init nvmet_rdma_init(void)
2078{
f1d4ef7d
SG
2079 int ret;
2080
2081 ret = ib_register_client(&nvmet_rdma_ib_client);
2082 if (ret)
2083 return ret;
2084
2085 ret = nvmet_register_transport(&nvmet_rdma_ops);
2086 if (ret)
2087 goto err_ib_client;
2088
2089 return 0;
2090
2091err_ib_client:
2092 ib_unregister_client(&nvmet_rdma_ib_client);
2093 return ret;
8f000cac
CH
2094}
2095
2096static void __exit nvmet_rdma_exit(void)
2097{
8f000cac 2098 nvmet_unregister_transport(&nvmet_rdma_ops);
f1d4ef7d 2099 ib_unregister_client(&nvmet_rdma_ib_client);
cb4876e8 2100 WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
8f000cac
CH
2101 ida_destroy(&nvmet_rdma_queue_ida);
2102}
2103
2104module_init(nvmet_rdma_init);
2105module_exit(nvmet_rdma_exit);
2106
41951f83 2107MODULE_DESCRIPTION("NVMe target RDMA transport driver");
8f000cac
CH
2108MODULE_LICENSE("GPL v2");
2109MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */