Commit | Line | Data |
---|---|---|
3641bd32 | 1 | // SPDX-License-Identifier: GPL-2.0 |
8f000cac CH |
2 | /* |
3 | * NVMe over Fabrics RDMA target. | |
4 | * Copyright (c) 2015-2016 HGST, a Western Digital Company. | |
8f000cac CH |
5 | */ |
6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
7 | #include <linux/atomic.h> | |
8 | #include <linux/ctype.h> | |
9 | #include <linux/delay.h> | |
10 | #include <linux/err.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/nvme.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/string.h> | |
16 | #include <linux/wait.h> | |
17 | #include <linux/inet.h> | |
18 | #include <asm/unaligned.h> | |
19 | ||
20 | #include <rdma/ib_verbs.h> | |
21 | #include <rdma/rdma_cm.h> | |
22 | #include <rdma/rw.h> | |
8094ba0a | 23 | #include <rdma/ib_cm.h> |
8f000cac CH |
24 | |
25 | #include <linux/nvme-rdma.h> | |
26 | #include "nvmet.h" | |
27 | ||
28 | /* | |
0d5ee2b2 | 29 | * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data |
8f000cac | 30 | */ |
0d5ee2b2 SW |
31 | #define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE |
32 | #define NVMET_RDMA_MAX_INLINE_SGE 4 | |
33 | #define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE) | |
8f000cac | 34 | |
ec6d20e1 MG |
35 | /* Assume mpsmin == device_page_size == 4KB */ |
36 | #define NVMET_RDMA_MAX_MDTS 8 | |
b09160c3 | 37 | #define NVMET_RDMA_MAX_METADATA_MDTS 5 |
ec6d20e1 | 38 | |
b0012dd3 MG |
39 | struct nvmet_rdma_srq; |
40 | ||
8f000cac | 41 | struct nvmet_rdma_cmd { |
0d5ee2b2 | 42 | struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1]; |
8f000cac CH |
43 | struct ib_cqe cqe; |
44 | struct ib_recv_wr wr; | |
0d5ee2b2 | 45 | struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE]; |
8f000cac CH |
46 | struct nvme_command *nvme_cmd; |
47 | struct nvmet_rdma_queue *queue; | |
b0012dd3 | 48 | struct nvmet_rdma_srq *nsrq; |
8f000cac CH |
49 | }; |
50 | ||
51 | enum { | |
52 | NVMET_RDMA_REQ_INLINE_DATA = (1 << 0), | |
53 | NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1), | |
54 | }; | |
55 | ||
56 | struct nvmet_rdma_rsp { | |
57 | struct ib_sge send_sge; | |
58 | struct ib_cqe send_cqe; | |
59 | struct ib_send_wr send_wr; | |
60 | ||
61 | struct nvmet_rdma_cmd *cmd; | |
62 | struct nvmet_rdma_queue *queue; | |
63 | ||
64 | struct ib_cqe read_cqe; | |
b09160c3 | 65 | struct ib_cqe write_cqe; |
8f000cac CH |
66 | struct rdma_rw_ctx rw; |
67 | ||
68 | struct nvmet_req req; | |
69 | ||
8407879c | 70 | bool allocated; |
8f000cac CH |
71 | u8 n_rdma; |
72 | u32 flags; | |
73 | u32 invalidate_rkey; | |
74 | ||
75 | struct list_head wait_list; | |
76 | struct list_head free_list; | |
77 | }; | |
78 | ||
79 | enum nvmet_rdma_queue_state { | |
80 | NVMET_RDMA_Q_CONNECTING, | |
81 | NVMET_RDMA_Q_LIVE, | |
82 | NVMET_RDMA_Q_DISCONNECTING, | |
83 | }; | |
84 | ||
85 | struct nvmet_rdma_queue { | |
86 | struct rdma_cm_id *cm_id; | |
21f90243 | 87 | struct ib_qp *qp; |
8f000cac CH |
88 | struct nvmet_port *port; |
89 | struct ib_cq *cq; | |
90 | atomic_t sq_wr_avail; | |
91 | struct nvmet_rdma_device *dev; | |
b0012dd3 | 92 | struct nvmet_rdma_srq *nsrq; |
8f000cac CH |
93 | spinlock_t state_lock; |
94 | enum nvmet_rdma_queue_state state; | |
95 | struct nvmet_cq nvme_cq; | |
96 | struct nvmet_sq nvme_sq; | |
97 | ||
98 | struct nvmet_rdma_rsp *rsps; | |
99 | struct list_head free_rsps; | |
100 | spinlock_t rsps_lock; | |
101 | struct nvmet_rdma_cmd *cmds; | |
102 | ||
103 | struct work_struct release_work; | |
104 | struct list_head rsp_wait_list; | |
105 | struct list_head rsp_wr_wait_list; | |
106 | spinlock_t rsp_wr_wait_lock; | |
107 | ||
108 | int idx; | |
109 | int host_qid; | |
b0012dd3 | 110 | int comp_vector; |
8f000cac CH |
111 | int recv_queue_size; |
112 | int send_queue_size; | |
113 | ||
114 | struct list_head queue_list; | |
115 | }; | |
116 | ||
a032e4f6 SG |
117 | struct nvmet_rdma_port { |
118 | struct nvmet_port *nport; | |
119 | struct sockaddr_storage addr; | |
120 | struct rdma_cm_id *cm_id; | |
121 | struct delayed_work repair_work; | |
122 | }; | |
123 | ||
b0012dd3 MG |
124 | struct nvmet_rdma_srq { |
125 | struct ib_srq *srq; | |
126 | struct nvmet_rdma_cmd *cmds; | |
127 | struct nvmet_rdma_device *ndev; | |
128 | }; | |
129 | ||
8f000cac CH |
130 | struct nvmet_rdma_device { |
131 | struct ib_device *device; | |
132 | struct ib_pd *pd; | |
b0012dd3 MG |
133 | struct nvmet_rdma_srq **srqs; |
134 | int srq_count; | |
8f000cac CH |
135 | size_t srq_size; |
136 | struct kref ref; | |
137 | struct list_head entry; | |
0d5ee2b2 SW |
138 | int inline_data_size; |
139 | int inline_page_count; | |
8f000cac CH |
140 | }; |
141 | ||
142 | static bool nvmet_rdma_use_srq; | |
143 | module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); | |
144 | MODULE_PARM_DESC(use_srq, "Use shared receive queue."); | |
145 | ||
b0012dd3 MG |
146 | static int srq_size_set(const char *val, const struct kernel_param *kp); |
147 | static const struct kernel_param_ops srq_size_ops = { | |
148 | .set = srq_size_set, | |
149 | .get = param_get_int, | |
150 | }; | |
151 | ||
152 | static int nvmet_rdma_srq_size = 1024; | |
153 | module_param_cb(srq_size, &srq_size_ops, &nvmet_rdma_srq_size, 0644); | |
154 | MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)"); | |
155 | ||
8f000cac CH |
156 | static DEFINE_IDA(nvmet_rdma_queue_ida); |
157 | static LIST_HEAD(nvmet_rdma_queue_list); | |
158 | static DEFINE_MUTEX(nvmet_rdma_queue_mutex); | |
159 | ||
160 | static LIST_HEAD(device_list); | |
161 | static DEFINE_MUTEX(device_list_mutex); | |
162 | ||
163 | static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp); | |
164 | static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc); | |
165 | static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); | |
166 | static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc); | |
b09160c3 | 167 | static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc); |
8f000cac CH |
168 | static void nvmet_rdma_qp_event(struct ib_event *event, void *priv); |
169 | static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); | |
5cbab630 RR |
170 | static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, |
171 | struct nvmet_rdma_rsp *r); | |
172 | static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, | |
173 | struct nvmet_rdma_rsp *r); | |
8f000cac | 174 | |
e929f06d | 175 | static const struct nvmet_fabrics_ops nvmet_rdma_ops; |
8f000cac | 176 | |
b0012dd3 MG |
177 | static int srq_size_set(const char *val, const struct kernel_param *kp) |
178 | { | |
179 | int n = 0, ret; | |
180 | ||
181 | ret = kstrtoint(val, 10, &n); | |
182 | if (ret != 0 || n < 256) | |
183 | return -EINVAL; | |
184 | ||
185 | return param_set_int(val, kp); | |
186 | } | |
187 | ||
0d5ee2b2 SW |
188 | static int num_pages(int len) |
189 | { | |
190 | return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT); | |
191 | } | |
192 | ||
8f000cac CH |
193 | static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp) |
194 | { | |
195 | return nvme_is_write(rsp->req.cmd) && | |
5e62d5c9 | 196 | rsp->req.transfer_len && |
8f000cac CH |
197 | !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); |
198 | } | |
199 | ||
200 | static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp) | |
201 | { | |
202 | return !nvme_is_write(rsp->req.cmd) && | |
5e62d5c9 | 203 | rsp->req.transfer_len && |
fc6c9730 | 204 | !rsp->req.cqe->status && |
8f000cac CH |
205 | !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); |
206 | } | |
207 | ||
208 | static inline struct nvmet_rdma_rsp * | |
209 | nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) | |
210 | { | |
211 | struct nvmet_rdma_rsp *rsp; | |
212 | unsigned long flags; | |
213 | ||
214 | spin_lock_irqsave(&queue->rsps_lock, flags); | |
8407879c | 215 | rsp = list_first_entry_or_null(&queue->free_rsps, |
8f000cac | 216 | struct nvmet_rdma_rsp, free_list); |
8407879c SG |
217 | if (likely(rsp)) |
218 | list_del(&rsp->free_list); | |
8f000cac CH |
219 | spin_unlock_irqrestore(&queue->rsps_lock, flags); |
220 | ||
8407879c | 221 | if (unlikely(!rsp)) { |
5cbab630 RR |
222 | int ret; |
223 | ||
224 | rsp = kzalloc(sizeof(*rsp), GFP_KERNEL); | |
8407879c SG |
225 | if (unlikely(!rsp)) |
226 | return NULL; | |
5cbab630 RR |
227 | ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); |
228 | if (unlikely(ret)) { | |
229 | kfree(rsp); | |
230 | return NULL; | |
231 | } | |
232 | ||
8407879c SG |
233 | rsp->allocated = true; |
234 | } | |
235 | ||
8f000cac CH |
236 | return rsp; |
237 | } | |
238 | ||
239 | static inline void | |
240 | nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) | |
241 | { | |
242 | unsigned long flags; | |
243 | ||
ad1f8249 | 244 | if (unlikely(rsp->allocated)) { |
5cbab630 | 245 | nvmet_rdma_free_rsp(rsp->queue->dev, rsp); |
8407879c SG |
246 | kfree(rsp); |
247 | return; | |
248 | } | |
249 | ||
8f000cac CH |
250 | spin_lock_irqsave(&rsp->queue->rsps_lock, flags); |
251 | list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); | |
252 | spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); | |
253 | } | |
254 | ||
0d5ee2b2 SW |
255 | static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev, |
256 | struct nvmet_rdma_cmd *c) | |
257 | { | |
258 | struct scatterlist *sg; | |
259 | struct ib_sge *sge; | |
260 | int i; | |
261 | ||
262 | if (!ndev->inline_data_size) | |
263 | return; | |
264 | ||
265 | sg = c->inline_sg; | |
266 | sge = &c->sge[1]; | |
267 | ||
268 | for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { | |
269 | if (sge->length) | |
270 | ib_dma_unmap_page(ndev->device, sge->addr, | |
271 | sge->length, DMA_FROM_DEVICE); | |
272 | if (sg_page(sg)) | |
273 | __free_page(sg_page(sg)); | |
274 | } | |
275 | } | |
276 | ||
277 | static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev, | |
278 | struct nvmet_rdma_cmd *c) | |
279 | { | |
280 | struct scatterlist *sg; | |
281 | struct ib_sge *sge; | |
282 | struct page *pg; | |
283 | int len; | |
284 | int i; | |
285 | ||
286 | if (!ndev->inline_data_size) | |
287 | return 0; | |
288 | ||
289 | sg = c->inline_sg; | |
290 | sg_init_table(sg, ndev->inline_page_count); | |
291 | sge = &c->sge[1]; | |
292 | len = ndev->inline_data_size; | |
293 | ||
294 | for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { | |
295 | pg = alloc_page(GFP_KERNEL); | |
296 | if (!pg) | |
297 | goto out_err; | |
298 | sg_assign_page(sg, pg); | |
299 | sge->addr = ib_dma_map_page(ndev->device, | |
300 | pg, 0, PAGE_SIZE, DMA_FROM_DEVICE); | |
301 | if (ib_dma_mapping_error(ndev->device, sge->addr)) | |
302 | goto out_err; | |
303 | sge->length = min_t(int, len, PAGE_SIZE); | |
304 | sge->lkey = ndev->pd->local_dma_lkey; | |
305 | len -= sge->length; | |
306 | } | |
307 | ||
308 | return 0; | |
309 | out_err: | |
310 | for (; i >= 0; i--, sg--, sge--) { | |
311 | if (sge->length) | |
312 | ib_dma_unmap_page(ndev->device, sge->addr, | |
313 | sge->length, DMA_FROM_DEVICE); | |
314 | if (sg_page(sg)) | |
315 | __free_page(sg_page(sg)); | |
316 | } | |
317 | return -ENOMEM; | |
318 | } | |
319 | ||
8f000cac CH |
320 | static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, |
321 | struct nvmet_rdma_cmd *c, bool admin) | |
322 | { | |
323 | /* NVMe command / RDMA RECV */ | |
324 | c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL); | |
325 | if (!c->nvme_cmd) | |
326 | goto out; | |
327 | ||
328 | c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd, | |
329 | sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); | |
330 | if (ib_dma_mapping_error(ndev->device, c->sge[0].addr)) | |
331 | goto out_free_cmd; | |
332 | ||
333 | c->sge[0].length = sizeof(*c->nvme_cmd); | |
334 | c->sge[0].lkey = ndev->pd->local_dma_lkey; | |
335 | ||
0d5ee2b2 SW |
336 | if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c)) |
337 | goto out_unmap_cmd; | |
8f000cac CH |
338 | |
339 | c->cqe.done = nvmet_rdma_recv_done; | |
340 | ||
341 | c->wr.wr_cqe = &c->cqe; | |
342 | c->wr.sg_list = c->sge; | |
0d5ee2b2 | 343 | c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1; |
8f000cac CH |
344 | |
345 | return 0; | |
346 | ||
8f000cac CH |
347 | out_unmap_cmd: |
348 | ib_dma_unmap_single(ndev->device, c->sge[0].addr, | |
349 | sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); | |
350 | out_free_cmd: | |
351 | kfree(c->nvme_cmd); | |
352 | ||
353 | out: | |
354 | return -ENOMEM; | |
355 | } | |
356 | ||
357 | static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev, | |
358 | struct nvmet_rdma_cmd *c, bool admin) | |
359 | { | |
0d5ee2b2 SW |
360 | if (!admin) |
361 | nvmet_rdma_free_inline_pages(ndev, c); | |
8f000cac CH |
362 | ib_dma_unmap_single(ndev->device, c->sge[0].addr, |
363 | sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); | |
364 | kfree(c->nvme_cmd); | |
365 | } | |
366 | ||
367 | static struct nvmet_rdma_cmd * | |
368 | nvmet_rdma_alloc_cmds(struct nvmet_rdma_device *ndev, | |
369 | int nr_cmds, bool admin) | |
370 | { | |
371 | struct nvmet_rdma_cmd *cmds; | |
372 | int ret = -EINVAL, i; | |
373 | ||
374 | cmds = kcalloc(nr_cmds, sizeof(struct nvmet_rdma_cmd), GFP_KERNEL); | |
375 | if (!cmds) | |
376 | goto out; | |
377 | ||
378 | for (i = 0; i < nr_cmds; i++) { | |
379 | ret = nvmet_rdma_alloc_cmd(ndev, cmds + i, admin); | |
380 | if (ret) | |
381 | goto out_free; | |
382 | } | |
383 | ||
384 | return cmds; | |
385 | ||
386 | out_free: | |
387 | while (--i >= 0) | |
388 | nvmet_rdma_free_cmd(ndev, cmds + i, admin); | |
389 | kfree(cmds); | |
390 | out: | |
391 | return ERR_PTR(ret); | |
392 | } | |
393 | ||
394 | static void nvmet_rdma_free_cmds(struct nvmet_rdma_device *ndev, | |
395 | struct nvmet_rdma_cmd *cmds, int nr_cmds, bool admin) | |
396 | { | |
397 | int i; | |
398 | ||
399 | for (i = 0; i < nr_cmds; i++) | |
400 | nvmet_rdma_free_cmd(ndev, cmds + i, admin); | |
401 | kfree(cmds); | |
402 | } | |
403 | ||
404 | static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev, | |
405 | struct nvmet_rdma_rsp *r) | |
406 | { | |
407 | /* NVMe CQE / RDMA SEND */ | |
fc6c9730 MG |
408 | r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL); |
409 | if (!r->req.cqe) | |
8f000cac CH |
410 | goto out; |
411 | ||
fc6c9730 MG |
412 | r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe, |
413 | sizeof(*r->req.cqe), DMA_TO_DEVICE); | |
8f000cac CH |
414 | if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) |
415 | goto out_free_rsp; | |
416 | ||
8dc2ed3f | 417 | r->req.p2p_client = &ndev->device->dev; |
fc6c9730 | 418 | r->send_sge.length = sizeof(*r->req.cqe); |
8f000cac CH |
419 | r->send_sge.lkey = ndev->pd->local_dma_lkey; |
420 | ||
421 | r->send_cqe.done = nvmet_rdma_send_done; | |
422 | ||
423 | r->send_wr.wr_cqe = &r->send_cqe; | |
424 | r->send_wr.sg_list = &r->send_sge; | |
425 | r->send_wr.num_sge = 1; | |
426 | r->send_wr.send_flags = IB_SEND_SIGNALED; | |
427 | ||
428 | /* Data In / RDMA READ */ | |
429 | r->read_cqe.done = nvmet_rdma_read_data_done; | |
b09160c3 IR |
430 | /* Data Out / RDMA WRITE */ |
431 | r->write_cqe.done = nvmet_rdma_write_data_done; | |
432 | ||
8f000cac CH |
433 | return 0; |
434 | ||
435 | out_free_rsp: | |
fc6c9730 | 436 | kfree(r->req.cqe); |
8f000cac CH |
437 | out: |
438 | return -ENOMEM; | |
439 | } | |
440 | ||
441 | static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev, | |
442 | struct nvmet_rdma_rsp *r) | |
443 | { | |
444 | ib_dma_unmap_single(ndev->device, r->send_sge.addr, | |
fc6c9730 MG |
445 | sizeof(*r->req.cqe), DMA_TO_DEVICE); |
446 | kfree(r->req.cqe); | |
8f000cac CH |
447 | } |
448 | ||
449 | static int | |
450 | nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) | |
451 | { | |
452 | struct nvmet_rdma_device *ndev = queue->dev; | |
453 | int nr_rsps = queue->recv_queue_size * 2; | |
454 | int ret = -EINVAL, i; | |
455 | ||
456 | queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), | |
457 | GFP_KERNEL); | |
458 | if (!queue->rsps) | |
459 | goto out; | |
460 | ||
461 | for (i = 0; i < nr_rsps; i++) { | |
462 | struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; | |
463 | ||
464 | ret = nvmet_rdma_alloc_rsp(ndev, rsp); | |
465 | if (ret) | |
466 | goto out_free; | |
467 | ||
468 | list_add_tail(&rsp->free_list, &queue->free_rsps); | |
469 | } | |
470 | ||
471 | return 0; | |
472 | ||
473 | out_free: | |
474 | while (--i >= 0) { | |
475 | struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; | |
476 | ||
477 | list_del(&rsp->free_list); | |
478 | nvmet_rdma_free_rsp(ndev, rsp); | |
479 | } | |
480 | kfree(queue->rsps); | |
481 | out: | |
482 | return ret; | |
483 | } | |
484 | ||
485 | static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) | |
486 | { | |
487 | struct nvmet_rdma_device *ndev = queue->dev; | |
488 | int i, nr_rsps = queue->recv_queue_size * 2; | |
489 | ||
490 | for (i = 0; i < nr_rsps; i++) { | |
491 | struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; | |
492 | ||
493 | list_del(&rsp->free_list); | |
494 | nvmet_rdma_free_rsp(ndev, rsp); | |
495 | } | |
496 | kfree(queue->rsps); | |
497 | } | |
498 | ||
499 | static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, | |
500 | struct nvmet_rdma_cmd *cmd) | |
501 | { | |
20209384 | 502 | int ret; |
8f000cac | 503 | |
748ff840 PP |
504 | ib_dma_sync_single_for_device(ndev->device, |
505 | cmd->sge[0].addr, cmd->sge[0].length, | |
506 | DMA_FROM_DEVICE); | |
507 | ||
b0012dd3 MG |
508 | if (cmd->nsrq) |
509 | ret = ib_post_srq_recv(cmd->nsrq->srq, &cmd->wr, NULL); | |
20209384 | 510 | else |
21f90243 | 511 | ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL); |
20209384 MG |
512 | |
513 | if (unlikely(ret)) | |
514 | pr_err("post_recv cmd failed\n"); | |
515 | ||
516 | return ret; | |
8f000cac CH |
517 | } |
518 | ||
519 | static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) | |
520 | { | |
521 | spin_lock(&queue->rsp_wr_wait_lock); | |
522 | while (!list_empty(&queue->rsp_wr_wait_list)) { | |
523 | struct nvmet_rdma_rsp *rsp; | |
524 | bool ret; | |
525 | ||
526 | rsp = list_entry(queue->rsp_wr_wait_list.next, | |
527 | struct nvmet_rdma_rsp, wait_list); | |
528 | list_del(&rsp->wait_list); | |
529 | ||
530 | spin_unlock(&queue->rsp_wr_wait_lock); | |
531 | ret = nvmet_rdma_execute_command(rsp); | |
532 | spin_lock(&queue->rsp_wr_wait_lock); | |
533 | ||
534 | if (!ret) { | |
535 | list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); | |
536 | break; | |
537 | } | |
538 | } | |
539 | spin_unlock(&queue->rsp_wr_wait_lock); | |
540 | } | |
541 | ||
b09160c3 IR |
542 | static u16 nvmet_rdma_check_pi_status(struct ib_mr *sig_mr) |
543 | { | |
544 | struct ib_mr_status mr_status; | |
545 | int ret; | |
546 | u16 status = 0; | |
547 | ||
548 | ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status); | |
549 | if (ret) { | |
550 | pr_err("ib_check_mr_status failed, ret %d\n", ret); | |
551 | return NVME_SC_INVALID_PI; | |
552 | } | |
553 | ||
554 | if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) { | |
555 | switch (mr_status.sig_err.err_type) { | |
556 | case IB_SIG_BAD_GUARD: | |
557 | status = NVME_SC_GUARD_CHECK; | |
558 | break; | |
559 | case IB_SIG_BAD_REFTAG: | |
560 | status = NVME_SC_REFTAG_CHECK; | |
561 | break; | |
562 | case IB_SIG_BAD_APPTAG: | |
563 | status = NVME_SC_APPTAG_CHECK; | |
564 | break; | |
565 | } | |
566 | pr_err("PI error found type %d expected 0x%x vs actual 0x%x\n", | |
567 | mr_status.sig_err.err_type, | |
568 | mr_status.sig_err.expected, | |
569 | mr_status.sig_err.actual); | |
570 | } | |
571 | ||
572 | return status; | |
573 | } | |
574 | ||
575 | static void nvmet_rdma_set_sig_domain(struct blk_integrity *bi, | |
576 | struct nvme_command *cmd, struct ib_sig_domain *domain, | |
577 | u16 control, u8 pi_type) | |
578 | { | |
579 | domain->sig_type = IB_SIG_TYPE_T10_DIF; | |
580 | domain->sig.dif.bg_type = IB_T10DIF_CRC; | |
581 | domain->sig.dif.pi_interval = 1 << bi->interval_exp; | |
582 | domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag); | |
583 | if (control & NVME_RW_PRINFO_PRCHK_REF) | |
584 | domain->sig.dif.ref_remap = true; | |
585 | ||
586 | domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag); | |
587 | domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask); | |
588 | domain->sig.dif.app_escape = true; | |
589 | if (pi_type == NVME_NS_DPS_PI_TYPE3) | |
590 | domain->sig.dif.ref_escape = true; | |
591 | } | |
592 | ||
593 | static void nvmet_rdma_set_sig_attrs(struct nvmet_req *req, | |
594 | struct ib_sig_attrs *sig_attrs) | |
595 | { | |
596 | struct nvme_command *cmd = req->cmd; | |
597 | u16 control = le16_to_cpu(cmd->rw.control); | |
598 | u8 pi_type = req->ns->pi_type; | |
599 | struct blk_integrity *bi; | |
600 | ||
601 | bi = bdev_get_integrity(req->ns->bdev); | |
602 | ||
603 | memset(sig_attrs, 0, sizeof(*sig_attrs)); | |
604 | ||
605 | if (control & NVME_RW_PRINFO_PRACT) { | |
606 | /* for WRITE_INSERT/READ_STRIP no wire domain */ | |
607 | sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; | |
608 | nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, | |
609 | pi_type); | |
610 | /* Clear the PRACT bit since HCA will generate/verify the PI */ | |
611 | control &= ~NVME_RW_PRINFO_PRACT; | |
612 | cmd->rw.control = cpu_to_le16(control); | |
613 | /* PI is added by the HW */ | |
614 | req->transfer_len += req->metadata_len; | |
615 | } else { | |
616 | /* for WRITE_PASS/READ_PASS both wire/memory domains exist */ | |
617 | nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, | |
618 | pi_type); | |
619 | nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, | |
620 | pi_type); | |
621 | } | |
622 | ||
623 | if (control & NVME_RW_PRINFO_PRCHK_REF) | |
624 | sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG; | |
625 | if (control & NVME_RW_PRINFO_PRCHK_GUARD) | |
626 | sig_attrs->check_mask |= IB_SIG_CHECK_GUARD; | |
627 | if (control & NVME_RW_PRINFO_PRCHK_APP) | |
628 | sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG; | |
629 | } | |
630 | ||
631 | static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key, | |
632 | struct ib_sig_attrs *sig_attrs) | |
633 | { | |
634 | struct rdma_cm_id *cm_id = rsp->queue->cm_id; | |
635 | struct nvmet_req *req = &rsp->req; | |
636 | int ret; | |
637 | ||
638 | if (req->metadata_len) | |
639 | ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp, | |
640 | cm_id->port_num, req->sg, req->sg_cnt, | |
641 | req->metadata_sg, req->metadata_sg_cnt, sig_attrs, | |
642 | addr, key, nvmet_data_dir(req)); | |
643 | else | |
644 | ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, | |
645 | req->sg, req->sg_cnt, 0, addr, key, | |
646 | nvmet_data_dir(req)); | |
647 | ||
648 | return ret; | |
649 | } | |
650 | ||
651 | static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp) | |
652 | { | |
653 | struct rdma_cm_id *cm_id = rsp->queue->cm_id; | |
654 | struct nvmet_req *req = &rsp->req; | |
655 | ||
656 | if (req->metadata_len) | |
657 | rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp, | |
658 | cm_id->port_num, req->sg, req->sg_cnt, | |
659 | req->metadata_sg, req->metadata_sg_cnt, | |
660 | nvmet_data_dir(req)); | |
661 | else | |
662 | rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num, | |
663 | req->sg, req->sg_cnt, nvmet_data_dir(req)); | |
664 | } | |
8f000cac CH |
665 | |
666 | static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) | |
667 | { | |
668 | struct nvmet_rdma_queue *queue = rsp->queue; | |
669 | ||
670 | atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); | |
671 | ||
b09160c3 IR |
672 | if (rsp->n_rdma) |
673 | nvmet_rdma_rw_ctx_destroy(rsp); | |
8f000cac | 674 | |
0d5ee2b2 | 675 | if (rsp->req.sg != rsp->cmd->inline_sg) |
c6e3f133 | 676 | nvmet_req_free_sgls(&rsp->req); |
8f000cac CH |
677 | |
678 | if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) | |
679 | nvmet_rdma_process_wr_wait_list(queue); | |
680 | ||
681 | nvmet_rdma_put_rsp(rsp); | |
682 | } | |
683 | ||
684 | static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) | |
685 | { | |
686 | if (queue->nvme_sq.ctrl) { | |
687 | nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); | |
688 | } else { | |
689 | /* | |
690 | * we didn't setup the controller yet in case | |
691 | * of admin connect error, just disconnect and | |
692 | * cleanup the queue | |
693 | */ | |
694 | nvmet_rdma_queue_disconnect(queue); | |
695 | } | |
696 | } | |
697 | ||
698 | static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) | |
699 | { | |
700 | struct nvmet_rdma_rsp *rsp = | |
701 | container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); | |
d7dcdf9d | 702 | struct nvmet_rdma_queue *queue = cq->cq_context; |
8f000cac CH |
703 | |
704 | nvmet_rdma_release_rsp(rsp); | |
705 | ||
706 | if (unlikely(wc->status != IB_WC_SUCCESS && | |
707 | wc->status != IB_WC_WR_FLUSH_ERR)) { | |
708 | pr_err("SEND for CQE 0x%p failed with status %s (%d).\n", | |
709 | wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); | |
d7dcdf9d | 710 | nvmet_rdma_error_comp(queue); |
8f000cac CH |
711 | } |
712 | } | |
713 | ||
714 | static void nvmet_rdma_queue_response(struct nvmet_req *req) | |
715 | { | |
716 | struct nvmet_rdma_rsp *rsp = | |
717 | container_of(req, struct nvmet_rdma_rsp, req); | |
718 | struct rdma_cm_id *cm_id = rsp->queue->cm_id; | |
23f96d1f | 719 | struct ib_send_wr *first_wr; |
8f000cac CH |
720 | |
721 | if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { | |
722 | rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; | |
723 | rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; | |
724 | } else { | |
725 | rsp->send_wr.opcode = IB_WR_SEND; | |
726 | } | |
727 | ||
b09160c3 IR |
728 | if (nvmet_rdma_need_data_out(rsp)) { |
729 | if (rsp->req.metadata_len) | |
730 | first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, | |
731 | cm_id->port_num, &rsp->write_cqe, NULL); | |
732 | else | |
733 | first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, | |
734 | cm_id->port_num, NULL, &rsp->send_wr); | |
735 | } else { | |
8f000cac | 736 | first_wr = &rsp->send_wr; |
b09160c3 | 737 | } |
8f000cac CH |
738 | |
739 | nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); | |
748ff840 PP |
740 | |
741 | ib_dma_sync_single_for_device(rsp->queue->dev->device, | |
742 | rsp->send_sge.addr, rsp->send_sge.length, | |
743 | DMA_TO_DEVICE); | |
744 | ||
0a3173a5 | 745 | if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) { |
8f000cac CH |
746 | pr_err("sending cmd response failed\n"); |
747 | nvmet_rdma_release_rsp(rsp); | |
748 | } | |
749 | } | |
750 | ||
751 | static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) | |
752 | { | |
753 | struct nvmet_rdma_rsp *rsp = | |
754 | container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); | |
755 | struct nvmet_rdma_queue *queue = cq->cq_context; | |
b09160c3 | 756 | u16 status = 0; |
8f000cac CH |
757 | |
758 | WARN_ON(rsp->n_rdma <= 0); | |
759 | atomic_add(rsp->n_rdma, &queue->sq_wr_avail); | |
8f000cac CH |
760 | rsp->n_rdma = 0; |
761 | ||
762 | if (unlikely(wc->status != IB_WC_SUCCESS)) { | |
b09160c3 | 763 | nvmet_rdma_rw_ctx_destroy(rsp); |
549f01ae | 764 | nvmet_req_uninit(&rsp->req); |
8f000cac CH |
765 | nvmet_rdma_release_rsp(rsp); |
766 | if (wc->status != IB_WC_WR_FLUSH_ERR) { | |
767 | pr_info("RDMA READ for CQE 0x%p failed with status %s (%d).\n", | |
768 | wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); | |
769 | nvmet_rdma_error_comp(queue); | |
770 | } | |
771 | return; | |
772 | } | |
773 | ||
b09160c3 IR |
774 | if (rsp->req.metadata_len) |
775 | status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr); | |
776 | nvmet_rdma_rw_ctx_destroy(rsp); | |
777 | ||
778 | if (unlikely(status)) | |
779 | nvmet_req_complete(&rsp->req, status); | |
780 | else | |
781 | rsp->req.execute(&rsp->req); | |
782 | } | |
783 | ||
784 | static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc) | |
785 | { | |
786 | struct nvmet_rdma_rsp *rsp = | |
787 | container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe); | |
788 | struct nvmet_rdma_queue *queue = cq->cq_context; | |
789 | struct rdma_cm_id *cm_id = rsp->queue->cm_id; | |
790 | u16 status; | |
791 | ||
792 | if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) | |
793 | return; | |
794 | ||
795 | WARN_ON(rsp->n_rdma <= 0); | |
796 | atomic_add(rsp->n_rdma, &queue->sq_wr_avail); | |
797 | rsp->n_rdma = 0; | |
798 | ||
799 | if (unlikely(wc->status != IB_WC_SUCCESS)) { | |
800 | nvmet_rdma_rw_ctx_destroy(rsp); | |
801 | nvmet_req_uninit(&rsp->req); | |
802 | nvmet_rdma_release_rsp(rsp); | |
803 | if (wc->status != IB_WC_WR_FLUSH_ERR) { | |
804 | pr_info("RDMA WRITE for CQE 0x%p failed with status %s (%d).\n", | |
805 | wc->wr_cqe, ib_wc_status_msg(wc->status), | |
806 | wc->status); | |
807 | nvmet_rdma_error_comp(queue); | |
808 | } | |
809 | return; | |
810 | } | |
811 | ||
812 | /* | |
813 | * Upon RDMA completion check the signature status | |
814 | * - if succeeded send good NVMe response | |
815 | * - if failed send bad NVMe response with appropriate error | |
816 | */ | |
817 | status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr); | |
818 | if (unlikely(status)) | |
819 | rsp->req.cqe->status = cpu_to_le16(status << 1); | |
820 | nvmet_rdma_rw_ctx_destroy(rsp); | |
821 | ||
822 | if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) { | |
823 | pr_err("sending cmd response failed\n"); | |
824 | nvmet_rdma_release_rsp(rsp); | |
825 | } | |
8f000cac CH |
826 | } |
827 | ||
828 | static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, | |
829 | u64 off) | |
830 | { | |
0d5ee2b2 SW |
831 | int sg_count = num_pages(len); |
832 | struct scatterlist *sg; | |
833 | int i; | |
834 | ||
835 | sg = rsp->cmd->inline_sg; | |
836 | for (i = 0; i < sg_count; i++, sg++) { | |
837 | if (i < sg_count - 1) | |
838 | sg_unmark_end(sg); | |
839 | else | |
840 | sg_mark_end(sg); | |
841 | sg->offset = off; | |
842 | sg->length = min_t(int, len, PAGE_SIZE - off); | |
843 | len -= sg->length; | |
844 | if (!i) | |
845 | off = 0; | |
846 | } | |
847 | ||
848 | rsp->req.sg = rsp->cmd->inline_sg; | |
849 | rsp->req.sg_cnt = sg_count; | |
8f000cac CH |
850 | } |
851 | ||
852 | static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) | |
853 | { | |
854 | struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; | |
855 | u64 off = le64_to_cpu(sgl->addr); | |
856 | u32 len = le32_to_cpu(sgl->length); | |
857 | ||
762a11df CK |
858 | if (!nvme_is_write(rsp->req.cmd)) { |
859 | rsp->req.error_loc = | |
860 | offsetof(struct nvme_common_command, opcode); | |
8f000cac | 861 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
762a11df | 862 | } |
8f000cac | 863 | |
0d5ee2b2 | 864 | if (off + len > rsp->queue->dev->inline_data_size) { |
8f000cac CH |
865 | pr_err("invalid inline data offset!\n"); |
866 | return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; | |
867 | } | |
868 | ||
869 | /* no data command? */ | |
870 | if (!len) | |
871 | return 0; | |
872 | ||
873 | nvmet_rdma_use_inline_sg(rsp, len, off); | |
874 | rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; | |
5e62d5c9 | 875 | rsp->req.transfer_len += len; |
8f000cac CH |
876 | return 0; |
877 | } | |
878 | ||
879 | static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp, | |
880 | struct nvme_keyed_sgl_desc *sgl, bool invalidate) | |
881 | { | |
8f000cac | 882 | u64 addr = le64_to_cpu(sgl->addr); |
8f000cac | 883 | u32 key = get_unaligned_le32(sgl->key); |
b09160c3 | 884 | struct ib_sig_attrs sig_attrs; |
8f000cac | 885 | int ret; |
8f000cac | 886 | |
5b2322e4 LG |
887 | rsp->req.transfer_len = get_unaligned_le24(sgl->length); |
888 | ||
8f000cac | 889 | /* no data command? */ |
5b2322e4 | 890 | if (!rsp->req.transfer_len) |
8f000cac CH |
891 | return 0; |
892 | ||
b09160c3 IR |
893 | if (rsp->req.metadata_len) |
894 | nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs); | |
895 | ||
c6e3f133 | 896 | ret = nvmet_req_alloc_sgls(&rsp->req); |
59534b9d | 897 | if (unlikely(ret < 0)) |
5b2322e4 | 898 | goto error_out; |
8f000cac | 899 | |
b09160c3 | 900 | ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs); |
59534b9d | 901 | if (unlikely(ret < 0)) |
5b2322e4 | 902 | goto error_out; |
8f000cac CH |
903 | rsp->n_rdma += ret; |
904 | ||
905 | if (invalidate) { | |
906 | rsp->invalidate_rkey = key; | |
907 | rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; | |
908 | } | |
909 | ||
910 | return 0; | |
5b2322e4 LG |
911 | |
912 | error_out: | |
913 | rsp->req.transfer_len = 0; | |
914 | return NVME_SC_INTERNAL; | |
8f000cac CH |
915 | } |
916 | ||
917 | static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp) | |
918 | { | |
919 | struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; | |
920 | ||
921 | switch (sgl->type >> 4) { | |
922 | case NVME_SGL_FMT_DATA_DESC: | |
923 | switch (sgl->type & 0xf) { | |
924 | case NVME_SGL_FMT_OFFSET: | |
925 | return nvmet_rdma_map_sgl_inline(rsp); | |
926 | default: | |
927 | pr_err("invalid SGL subtype: %#x\n", sgl->type); | |
762a11df CK |
928 | rsp->req.error_loc = |
929 | offsetof(struct nvme_common_command, dptr); | |
8f000cac CH |
930 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
931 | } | |
932 | case NVME_KEY_SGL_FMT_DATA_DESC: | |
933 | switch (sgl->type & 0xf) { | |
934 | case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE: | |
935 | return nvmet_rdma_map_sgl_keyed(rsp, sgl, true); | |
936 | case NVME_SGL_FMT_ADDRESS: | |
937 | return nvmet_rdma_map_sgl_keyed(rsp, sgl, false); | |
938 | default: | |
939 | pr_err("invalid SGL subtype: %#x\n", sgl->type); | |
762a11df CK |
940 | rsp->req.error_loc = |
941 | offsetof(struct nvme_common_command, dptr); | |
8f000cac CH |
942 | return NVME_SC_INVALID_FIELD | NVME_SC_DNR; |
943 | } | |
944 | default: | |
945 | pr_err("invalid SGL type: %#x\n", sgl->type); | |
762a11df | 946 | rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); |
8f000cac CH |
947 | return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR; |
948 | } | |
949 | } | |
950 | ||
951 | static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp) | |
952 | { | |
953 | struct nvmet_rdma_queue *queue = rsp->queue; | |
954 | ||
955 | if (unlikely(atomic_sub_return(1 + rsp->n_rdma, | |
956 | &queue->sq_wr_avail) < 0)) { | |
957 | pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", | |
958 | 1 + rsp->n_rdma, queue->idx, | |
959 | queue->nvme_sq.ctrl->cntlid); | |
960 | atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); | |
961 | return false; | |
962 | } | |
963 | ||
964 | if (nvmet_rdma_need_data_in(rsp)) { | |
21f90243 | 965 | if (rdma_rw_ctx_post(&rsp->rw, queue->qp, |
8f000cac CH |
966 | queue->cm_id->port_num, &rsp->read_cqe, NULL)) |
967 | nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); | |
968 | } else { | |
be3f3114 | 969 | rsp->req.execute(&rsp->req); |
8f000cac CH |
970 | } |
971 | ||
972 | return true; | |
973 | } | |
974 | ||
975 | static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, | |
976 | struct nvmet_rdma_rsp *cmd) | |
977 | { | |
978 | u16 status; | |
979 | ||
748ff840 PP |
980 | ib_dma_sync_single_for_cpu(queue->dev->device, |
981 | cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, | |
982 | DMA_FROM_DEVICE); | |
983 | ib_dma_sync_single_for_cpu(queue->dev->device, | |
984 | cmd->send_sge.addr, cmd->send_sge.length, | |
985 | DMA_TO_DEVICE); | |
986 | ||
8f000cac CH |
987 | if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, |
988 | &queue->nvme_sq, &nvmet_rdma_ops)) | |
989 | return; | |
990 | ||
991 | status = nvmet_rdma_map_sgl(cmd); | |
992 | if (status) | |
993 | goto out_err; | |
994 | ||
995 | if (unlikely(!nvmet_rdma_execute_command(cmd))) { | |
996 | spin_lock(&queue->rsp_wr_wait_lock); | |
997 | list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); | |
998 | spin_unlock(&queue->rsp_wr_wait_lock); | |
999 | } | |
1000 | ||
1001 | return; | |
1002 | ||
1003 | out_err: | |
1004 | nvmet_req_complete(&cmd->req, status); | |
1005 | } | |
1006 | ||
1007 | static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) | |
1008 | { | |
1009 | struct nvmet_rdma_cmd *cmd = | |
1010 | container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); | |
1011 | struct nvmet_rdma_queue *queue = cq->cq_context; | |
1012 | struct nvmet_rdma_rsp *rsp; | |
1013 | ||
1014 | if (unlikely(wc->status != IB_WC_SUCCESS)) { | |
1015 | if (wc->status != IB_WC_WR_FLUSH_ERR) { | |
1016 | pr_err("RECV for CQE 0x%p failed with status %s (%d)\n", | |
1017 | wc->wr_cqe, ib_wc_status_msg(wc->status), | |
1018 | wc->status); | |
1019 | nvmet_rdma_error_comp(queue); | |
1020 | } | |
1021 | return; | |
1022 | } | |
1023 | ||
1024 | if (unlikely(wc->byte_len < sizeof(struct nvme_command))) { | |
1025 | pr_err("Ctrl Fatal Error: capsule size less than 64 bytes\n"); | |
1026 | nvmet_rdma_error_comp(queue); | |
1027 | return; | |
1028 | } | |
1029 | ||
1030 | cmd->queue = queue; | |
1031 | rsp = nvmet_rdma_get_rsp(queue); | |
8407879c SG |
1032 | if (unlikely(!rsp)) { |
1033 | /* | |
1034 | * we get here only under memory pressure, | |
1035 | * silently drop and have the host retry | |
1036 | * as we can't even fail it. | |
1037 | */ | |
1038 | nvmet_rdma_post_recv(queue->dev, cmd); | |
1039 | return; | |
1040 | } | |
8d61413d | 1041 | rsp->queue = queue; |
8f000cac CH |
1042 | rsp->cmd = cmd; |
1043 | rsp->flags = 0; | |
1044 | rsp->req.cmd = cmd->nvme_cmd; | |
8d61413d SG |
1045 | rsp->req.port = queue->port; |
1046 | rsp->n_rdma = 0; | |
8f000cac CH |
1047 | |
1048 | if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { | |
1049 | unsigned long flags; | |
1050 | ||
1051 | spin_lock_irqsave(&queue->state_lock, flags); | |
1052 | if (queue->state == NVMET_RDMA_Q_CONNECTING) | |
1053 | list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); | |
1054 | else | |
1055 | nvmet_rdma_put_rsp(rsp); | |
1056 | spin_unlock_irqrestore(&queue->state_lock, flags); | |
1057 | return; | |
1058 | } | |
1059 | ||
1060 | nvmet_rdma_handle_command(queue, rsp); | |
1061 | } | |
1062 | ||
b0012dd3 MG |
1063 | static void nvmet_rdma_destroy_srq(struct nvmet_rdma_srq *nsrq) |
1064 | { | |
1065 | nvmet_rdma_free_cmds(nsrq->ndev, nsrq->cmds, nsrq->ndev->srq_size, | |
1066 | false); | |
1067 | ib_destroy_srq(nsrq->srq); | |
1068 | ||
1069 | kfree(nsrq); | |
1070 | } | |
1071 | ||
1072 | static void nvmet_rdma_destroy_srqs(struct nvmet_rdma_device *ndev) | |
8f000cac | 1073 | { |
b0012dd3 MG |
1074 | int i; |
1075 | ||
1076 | if (!ndev->srqs) | |
8f000cac CH |
1077 | return; |
1078 | ||
b0012dd3 MG |
1079 | for (i = 0; i < ndev->srq_count; i++) |
1080 | nvmet_rdma_destroy_srq(ndev->srqs[i]); | |
1081 | ||
1082 | kfree(ndev->srqs); | |
8f000cac CH |
1083 | } |
1084 | ||
b0012dd3 MG |
1085 | static struct nvmet_rdma_srq * |
1086 | nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) | |
8f000cac CH |
1087 | { |
1088 | struct ib_srq_init_attr srq_attr = { NULL, }; | |
b0012dd3 MG |
1089 | size_t srq_size = ndev->srq_size; |
1090 | struct nvmet_rdma_srq *nsrq; | |
8f000cac | 1091 | struct ib_srq *srq; |
8f000cac CH |
1092 | int ret, i; |
1093 | ||
b0012dd3 MG |
1094 | nsrq = kzalloc(sizeof(*nsrq), GFP_KERNEL); |
1095 | if (!nsrq) | |
1096 | return ERR_PTR(-ENOMEM); | |
8f000cac CH |
1097 | |
1098 | srq_attr.attr.max_wr = srq_size; | |
0d5ee2b2 | 1099 | srq_attr.attr.max_sge = 1 + ndev->inline_page_count; |
8f000cac CH |
1100 | srq_attr.attr.srq_limit = 0; |
1101 | srq_attr.srq_type = IB_SRQT_BASIC; | |
1102 | srq = ib_create_srq(ndev->pd, &srq_attr); | |
1103 | if (IS_ERR(srq)) { | |
b0012dd3 MG |
1104 | ret = PTR_ERR(srq); |
1105 | goto out_free; | |
8f000cac CH |
1106 | } |
1107 | ||
b0012dd3 MG |
1108 | nsrq->cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false); |
1109 | if (IS_ERR(nsrq->cmds)) { | |
1110 | ret = PTR_ERR(nsrq->cmds); | |
8f000cac CH |
1111 | goto out_destroy_srq; |
1112 | } | |
1113 | ||
b0012dd3 MG |
1114 | nsrq->srq = srq; |
1115 | nsrq->ndev = ndev; | |
8f000cac | 1116 | |
20209384 | 1117 | for (i = 0; i < srq_size; i++) { |
b0012dd3 MG |
1118 | nsrq->cmds[i].nsrq = nsrq; |
1119 | ret = nvmet_rdma_post_recv(ndev, &nsrq->cmds[i]); | |
20209384 MG |
1120 | if (ret) |
1121 | goto out_free_cmds; | |
1122 | } | |
8f000cac | 1123 | |
b0012dd3 | 1124 | return nsrq; |
8f000cac | 1125 | |
20209384 | 1126 | out_free_cmds: |
b0012dd3 | 1127 | nvmet_rdma_free_cmds(ndev, nsrq->cmds, srq_size, false); |
8f000cac CH |
1128 | out_destroy_srq: |
1129 | ib_destroy_srq(srq); | |
b0012dd3 MG |
1130 | out_free: |
1131 | kfree(nsrq); | |
1132 | return ERR_PTR(ret); | |
1133 | } | |
1134 | ||
1135 | static int nvmet_rdma_init_srqs(struct nvmet_rdma_device *ndev) | |
1136 | { | |
1137 | int i, ret; | |
1138 | ||
1139 | if (!ndev->device->attrs.max_srq_wr || !ndev->device->attrs.max_srq) { | |
1140 | /* | |
1141 | * If SRQs aren't supported we just go ahead and use normal | |
1142 | * non-shared receive queues. | |
1143 | */ | |
1144 | pr_info("SRQ requested but not supported.\n"); | |
1145 | return 0; | |
1146 | } | |
1147 | ||
1148 | ndev->srq_size = min(ndev->device->attrs.max_srq_wr, | |
1149 | nvmet_rdma_srq_size); | |
1150 | ndev->srq_count = min(ndev->device->num_comp_vectors, | |
1151 | ndev->device->attrs.max_srq); | |
1152 | ||
1153 | ndev->srqs = kcalloc(ndev->srq_count, sizeof(*ndev->srqs), GFP_KERNEL); | |
1154 | if (!ndev->srqs) | |
1155 | return -ENOMEM; | |
1156 | ||
1157 | for (i = 0; i < ndev->srq_count; i++) { | |
1158 | ndev->srqs[i] = nvmet_rdma_init_srq(ndev); | |
1159 | if (IS_ERR(ndev->srqs[i])) { | |
1160 | ret = PTR_ERR(ndev->srqs[i]); | |
1161 | goto err_srq; | |
1162 | } | |
1163 | } | |
1164 | ||
1165 | return 0; | |
1166 | ||
1167 | err_srq: | |
1168 | while (--i >= 0) | |
1169 | nvmet_rdma_destroy_srq(ndev->srqs[i]); | |
1170 | kfree(ndev->srqs); | |
8f000cac CH |
1171 | return ret; |
1172 | } | |
1173 | ||
1174 | static void nvmet_rdma_free_dev(struct kref *ref) | |
1175 | { | |
1176 | struct nvmet_rdma_device *ndev = | |
1177 | container_of(ref, struct nvmet_rdma_device, ref); | |
1178 | ||
1179 | mutex_lock(&device_list_mutex); | |
1180 | list_del(&ndev->entry); | |
1181 | mutex_unlock(&device_list_mutex); | |
1182 | ||
b0012dd3 | 1183 | nvmet_rdma_destroy_srqs(ndev); |
8f000cac CH |
1184 | ib_dealloc_pd(ndev->pd); |
1185 | ||
1186 | kfree(ndev); | |
1187 | } | |
1188 | ||
1189 | static struct nvmet_rdma_device * | |
1190 | nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) | |
1191 | { | |
a032e4f6 SG |
1192 | struct nvmet_rdma_port *port = cm_id->context; |
1193 | struct nvmet_port *nport = port->nport; | |
8f000cac | 1194 | struct nvmet_rdma_device *ndev; |
0d5ee2b2 SW |
1195 | int inline_page_count; |
1196 | int inline_sge_count; | |
8f000cac CH |
1197 | int ret; |
1198 | ||
1199 | mutex_lock(&device_list_mutex); | |
1200 | list_for_each_entry(ndev, &device_list, entry) { | |
1201 | if (ndev->device->node_guid == cm_id->device->node_guid && | |
1202 | kref_get_unless_zero(&ndev->ref)) | |
1203 | goto out_unlock; | |
1204 | } | |
1205 | ||
1206 | ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); | |
1207 | if (!ndev) | |
1208 | goto out_err; | |
1209 | ||
a032e4f6 | 1210 | inline_page_count = num_pages(nport->inline_data_size); |
0d5ee2b2 | 1211 | inline_sge_count = max(cm_id->device->attrs.max_sge_rd, |
0a3173a5 | 1212 | cm_id->device->attrs.max_recv_sge) - 1; |
0d5ee2b2 SW |
1213 | if (inline_page_count > inline_sge_count) { |
1214 | pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n", | |
a032e4f6 | 1215 | nport->inline_data_size, cm_id->device->name, |
0d5ee2b2 | 1216 | inline_sge_count * PAGE_SIZE); |
a032e4f6 | 1217 | nport->inline_data_size = inline_sge_count * PAGE_SIZE; |
0d5ee2b2 SW |
1218 | inline_page_count = inline_sge_count; |
1219 | } | |
a032e4f6 | 1220 | ndev->inline_data_size = nport->inline_data_size; |
0d5ee2b2 | 1221 | ndev->inline_page_count = inline_page_count; |
8f000cac CH |
1222 | ndev->device = cm_id->device; |
1223 | kref_init(&ndev->ref); | |
1224 | ||
ed082d36 | 1225 | ndev->pd = ib_alloc_pd(ndev->device, 0); |
8f000cac CH |
1226 | if (IS_ERR(ndev->pd)) |
1227 | goto out_free_dev; | |
1228 | ||
1229 | if (nvmet_rdma_use_srq) { | |
b0012dd3 | 1230 | ret = nvmet_rdma_init_srqs(ndev); |
8f000cac CH |
1231 | if (ret) |
1232 | goto out_free_pd; | |
1233 | } | |
1234 | ||
1235 | list_add(&ndev->entry, &device_list); | |
1236 | out_unlock: | |
1237 | mutex_unlock(&device_list_mutex); | |
1238 | pr_debug("added %s.\n", ndev->device->name); | |
1239 | return ndev; | |
1240 | ||
1241 | out_free_pd: | |
1242 | ib_dealloc_pd(ndev->pd); | |
1243 | out_free_dev: | |
1244 | kfree(ndev); | |
1245 | out_err: | |
1246 | mutex_unlock(&device_list_mutex); | |
1247 | return NULL; | |
1248 | } | |
1249 | ||
1250 | static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) | |
1251 | { | |
1252 | struct ib_qp_init_attr qp_attr; | |
1253 | struct nvmet_rdma_device *ndev = queue->dev; | |
b0012dd3 | 1254 | int nr_cqe, ret, i, factor; |
8f000cac CH |
1255 | |
1256 | /* | |
1257 | * Reserve CQ slots for RECV + RDMA_READ/RDMA_WRITE + RDMA_SEND. | |
1258 | */ | |
1259 | nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; | |
1260 | ||
1261 | queue->cq = ib_alloc_cq(ndev->device, queue, | |
b0012dd3 | 1262 | nr_cqe + 1, queue->comp_vector, |
8f000cac CH |
1263 | IB_POLL_WORKQUEUE); |
1264 | if (IS_ERR(queue->cq)) { | |
1265 | ret = PTR_ERR(queue->cq); | |
1266 | pr_err("failed to create CQ cqe= %d ret= %d\n", | |
1267 | nr_cqe + 1, ret); | |
1268 | goto out; | |
1269 | } | |
1270 | ||
1271 | memset(&qp_attr, 0, sizeof(qp_attr)); | |
1272 | qp_attr.qp_context = queue; | |
1273 | qp_attr.event_handler = nvmet_rdma_qp_event; | |
1274 | qp_attr.send_cq = queue->cq; | |
1275 | qp_attr.recv_cq = queue->cq; | |
1276 | qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | |
1277 | qp_attr.qp_type = IB_QPT_RC; | |
1278 | /* +1 for drain */ | |
1279 | qp_attr.cap.max_send_wr = queue->send_queue_size + 1; | |
c363f249 MG |
1280 | factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num, |
1281 | 1 << NVMET_RDMA_MAX_MDTS); | |
1282 | qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor; | |
8f000cac | 1283 | qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, |
33023fb8 | 1284 | ndev->device->attrs.max_send_sge); |
8f000cac | 1285 | |
b0012dd3 MG |
1286 | if (queue->nsrq) { |
1287 | qp_attr.srq = queue->nsrq->srq; | |
8f000cac CH |
1288 | } else { |
1289 | /* +1 for drain */ | |
1290 | qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; | |
0d5ee2b2 | 1291 | qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count; |
8f000cac CH |
1292 | } |
1293 | ||
b09160c3 IR |
1294 | if (queue->port->pi_enable && queue->host_qid) |
1295 | qp_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; | |
1296 | ||
8f000cac CH |
1297 | ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); |
1298 | if (ret) { | |
1299 | pr_err("failed to create_qp ret= %d\n", ret); | |
1300 | goto err_destroy_cq; | |
1301 | } | |
21f90243 | 1302 | queue->qp = queue->cm_id->qp; |
8f000cac CH |
1303 | |
1304 | atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); | |
1305 | ||
1306 | pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n", | |
1307 | __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, | |
1308 | qp_attr.cap.max_send_wr, queue->cm_id); | |
1309 | ||
b0012dd3 | 1310 | if (!queue->nsrq) { |
8f000cac CH |
1311 | for (i = 0; i < queue->recv_queue_size; i++) { |
1312 | queue->cmds[i].queue = queue; | |
20209384 MG |
1313 | ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]); |
1314 | if (ret) | |
1315 | goto err_destroy_qp; | |
8f000cac CH |
1316 | } |
1317 | } | |
1318 | ||
1319 | out: | |
1320 | return ret; | |
1321 | ||
20209384 MG |
1322 | err_destroy_qp: |
1323 | rdma_destroy_qp(queue->cm_id); | |
8f000cac CH |
1324 | err_destroy_cq: |
1325 | ib_free_cq(queue->cq); | |
1326 | goto out; | |
1327 | } | |
1328 | ||
1329 | static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) | |
1330 | { | |
21f90243 IR |
1331 | ib_drain_qp(queue->qp); |
1332 | if (queue->cm_id) | |
1333 | rdma_destroy_id(queue->cm_id); | |
1334 | ib_destroy_qp(queue->qp); | |
8f000cac CH |
1335 | ib_free_cq(queue->cq); |
1336 | } | |
1337 | ||
1338 | static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) | |
1339 | { | |
424125a0 | 1340 | pr_debug("freeing queue %d\n", queue->idx); |
8f000cac CH |
1341 | |
1342 | nvmet_sq_destroy(&queue->nvme_sq); | |
1343 | ||
1344 | nvmet_rdma_destroy_queue_ib(queue); | |
b0012dd3 | 1345 | if (!queue->nsrq) { |
8f000cac CH |
1346 | nvmet_rdma_free_cmds(queue->dev, queue->cmds, |
1347 | queue->recv_queue_size, | |
1348 | !queue->host_qid); | |
1349 | } | |
1350 | nvmet_rdma_free_rsps(queue); | |
1351 | ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); | |
1352 | kfree(queue); | |
1353 | } | |
1354 | ||
1355 | static void nvmet_rdma_release_queue_work(struct work_struct *w) | |
1356 | { | |
1357 | struct nvmet_rdma_queue *queue = | |
1358 | container_of(w, struct nvmet_rdma_queue, release_work); | |
8f000cac CH |
1359 | struct nvmet_rdma_device *dev = queue->dev; |
1360 | ||
1361 | nvmet_rdma_free_queue(queue); | |
d8f7750a | 1362 | |
8f000cac CH |
1363 | kref_put(&dev->ref, nvmet_rdma_free_dev); |
1364 | } | |
1365 | ||
1366 | static int | |
1367 | nvmet_rdma_parse_cm_connect_req(struct rdma_conn_param *conn, | |
1368 | struct nvmet_rdma_queue *queue) | |
1369 | { | |
1370 | struct nvme_rdma_cm_req *req; | |
1371 | ||
1372 | req = (struct nvme_rdma_cm_req *)conn->private_data; | |
1373 | if (!req || conn->private_data_len == 0) | |
1374 | return NVME_RDMA_CM_INVALID_LEN; | |
1375 | ||
1376 | if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0) | |
1377 | return NVME_RDMA_CM_INVALID_RECFMT; | |
1378 | ||
1379 | queue->host_qid = le16_to_cpu(req->qid); | |
1380 | ||
1381 | /* | |
b825b44c | 1382 | * req->hsqsize corresponds to our recv queue size plus 1 |
8f000cac CH |
1383 | * req->hrqsize corresponds to our send queue size |
1384 | */ | |
b825b44c | 1385 | queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; |
8f000cac CH |
1386 | queue->send_queue_size = le16_to_cpu(req->hrqsize); |
1387 | ||
7aa1f427 | 1388 | if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) |
8f000cac CH |
1389 | return NVME_RDMA_CM_INVALID_HSQSIZE; |
1390 | ||
1391 | /* XXX: Should we enforce some kind of max for IO queues? */ | |
1392 | ||
1393 | return 0; | |
1394 | } | |
1395 | ||
1396 | static int nvmet_rdma_cm_reject(struct rdma_cm_id *cm_id, | |
1397 | enum nvme_rdma_cm_status status) | |
1398 | { | |
1399 | struct nvme_rdma_cm_rej rej; | |
1400 | ||
7a01a6ea MG |
1401 | pr_debug("rejecting connect request: status %d (%s)\n", |
1402 | status, nvme_rdma_cm_msg(status)); | |
1403 | ||
8f000cac CH |
1404 | rej.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); |
1405 | rej.sts = cpu_to_le16(status); | |
1406 | ||
8094ba0a LR |
1407 | return rdma_reject(cm_id, (void *)&rej, sizeof(rej), |
1408 | IB_CM_REJ_CONSUMER_DEFINED); | |
8f000cac CH |
1409 | } |
1410 | ||
1411 | static struct nvmet_rdma_queue * | |
1412 | nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, | |
1413 | struct rdma_cm_id *cm_id, | |
1414 | struct rdma_cm_event *event) | |
1415 | { | |
b09160c3 | 1416 | struct nvmet_rdma_port *port = cm_id->context; |
8f000cac CH |
1417 | struct nvmet_rdma_queue *queue; |
1418 | int ret; | |
1419 | ||
1420 | queue = kzalloc(sizeof(*queue), GFP_KERNEL); | |
1421 | if (!queue) { | |
1422 | ret = NVME_RDMA_CM_NO_RSC; | |
1423 | goto out_reject; | |
1424 | } | |
1425 | ||
1426 | ret = nvmet_sq_init(&queue->nvme_sq); | |
70d4281c BVA |
1427 | if (ret) { |
1428 | ret = NVME_RDMA_CM_NO_RSC; | |
8f000cac | 1429 | goto out_free_queue; |
70d4281c | 1430 | } |
8f000cac CH |
1431 | |
1432 | ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); | |
1433 | if (ret) | |
1434 | goto out_destroy_sq; | |
1435 | ||
1436 | /* | |
1437 | * Schedules the actual release because calling rdma_destroy_id from | |
1438 | * inside a CM callback would trigger a deadlock. (great API design..) | |
1439 | */ | |
1440 | INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); | |
1441 | queue->dev = ndev; | |
1442 | queue->cm_id = cm_id; | |
b09160c3 | 1443 | queue->port = port->nport; |
8f000cac CH |
1444 | |
1445 | spin_lock_init(&queue->state_lock); | |
1446 | queue->state = NVMET_RDMA_Q_CONNECTING; | |
1447 | INIT_LIST_HEAD(&queue->rsp_wait_list); | |
1448 | INIT_LIST_HEAD(&queue->rsp_wr_wait_list); | |
1449 | spin_lock_init(&queue->rsp_wr_wait_lock); | |
1450 | INIT_LIST_HEAD(&queue->free_rsps); | |
1451 | spin_lock_init(&queue->rsps_lock); | |
766dbb17 | 1452 | INIT_LIST_HEAD(&queue->queue_list); |
8f000cac CH |
1453 | |
1454 | queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); | |
1455 | if (queue->idx < 0) { | |
1456 | ret = NVME_RDMA_CM_NO_RSC; | |
6ccaeb56 | 1457 | goto out_destroy_sq; |
8f000cac CH |
1458 | } |
1459 | ||
b0012dd3 MG |
1460 | /* |
1461 | * Spread the io queues across completion vectors, | |
1462 | * but still keep all admin queues on vector 0. | |
1463 | */ | |
1464 | queue->comp_vector = !queue->host_qid ? 0 : | |
1465 | queue->idx % ndev->device->num_comp_vectors; | |
1466 | ||
1467 | ||
8f000cac CH |
1468 | ret = nvmet_rdma_alloc_rsps(queue); |
1469 | if (ret) { | |
1470 | ret = NVME_RDMA_CM_NO_RSC; | |
1471 | goto out_ida_remove; | |
1472 | } | |
1473 | ||
b0012dd3 MG |
1474 | if (ndev->srqs) { |
1475 | queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count]; | |
1476 | } else { | |
8f000cac CH |
1477 | queue->cmds = nvmet_rdma_alloc_cmds(ndev, |
1478 | queue->recv_queue_size, | |
1479 | !queue->host_qid); | |
1480 | if (IS_ERR(queue->cmds)) { | |
1481 | ret = NVME_RDMA_CM_NO_RSC; | |
1482 | goto out_free_responses; | |
1483 | } | |
1484 | } | |
1485 | ||
1486 | ret = nvmet_rdma_create_queue_ib(queue); | |
1487 | if (ret) { | |
1488 | pr_err("%s: creating RDMA queue failed (%d).\n", | |
1489 | __func__, ret); | |
1490 | ret = NVME_RDMA_CM_NO_RSC; | |
1491 | goto out_free_cmds; | |
1492 | } | |
1493 | ||
1494 | return queue; | |
1495 | ||
1496 | out_free_cmds: | |
b0012dd3 | 1497 | if (!queue->nsrq) { |
8f000cac CH |
1498 | nvmet_rdma_free_cmds(queue->dev, queue->cmds, |
1499 | queue->recv_queue_size, | |
1500 | !queue->host_qid); | |
1501 | } | |
1502 | out_free_responses: | |
1503 | nvmet_rdma_free_rsps(queue); | |
1504 | out_ida_remove: | |
1505 | ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); | |
1506 | out_destroy_sq: | |
1507 | nvmet_sq_destroy(&queue->nvme_sq); | |
1508 | out_free_queue: | |
1509 | kfree(queue); | |
1510 | out_reject: | |
1511 | nvmet_rdma_cm_reject(cm_id, ret); | |
1512 | return NULL; | |
1513 | } | |
1514 | ||
1515 | static void nvmet_rdma_qp_event(struct ib_event *event, void *priv) | |
1516 | { | |
1517 | struct nvmet_rdma_queue *queue = priv; | |
1518 | ||
1519 | switch (event->event) { | |
1520 | case IB_EVENT_COMM_EST: | |
1521 | rdma_notify(queue->cm_id, event->event); | |
1522 | break; | |
b0012dd3 MG |
1523 | case IB_EVENT_QP_LAST_WQE_REACHED: |
1524 | pr_debug("received last WQE reached event for queue=0x%p\n", | |
1525 | queue); | |
1526 | break; | |
8f000cac | 1527 | default: |
675796be MG |
1528 | pr_err("received IB QP event: %s (%d)\n", |
1529 | ib_event_msg(event->event), event->event); | |
8f000cac CH |
1530 | break; |
1531 | } | |
1532 | } | |
1533 | ||
1534 | static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id, | |
1535 | struct nvmet_rdma_queue *queue, | |
1536 | struct rdma_conn_param *p) | |
1537 | { | |
1538 | struct rdma_conn_param param = { }; | |
1539 | struct nvme_rdma_cm_rep priv = { }; | |
1540 | int ret = -ENOMEM; | |
1541 | ||
1542 | param.rnr_retry_count = 7; | |
1543 | param.flow_control = 1; | |
1544 | param.initiator_depth = min_t(u8, p->initiator_depth, | |
1545 | queue->dev->device->attrs.max_qp_init_rd_atom); | |
1546 | param.private_data = &priv; | |
1547 | param.private_data_len = sizeof(priv); | |
1548 | priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); | |
1549 | priv.crqsize = cpu_to_le16(queue->recv_queue_size); | |
1550 | ||
1551 | ret = rdma_accept(cm_id, ¶m); | |
1552 | if (ret) | |
1553 | pr_err("rdma_accept failed (error code = %d)\n", ret); | |
1554 | ||
1555 | return ret; | |
1556 | } | |
1557 | ||
1558 | static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, | |
1559 | struct rdma_cm_event *event) | |
1560 | { | |
1561 | struct nvmet_rdma_device *ndev; | |
1562 | struct nvmet_rdma_queue *queue; | |
1563 | int ret = -EINVAL; | |
1564 | ||
1565 | ndev = nvmet_rdma_find_get_device(cm_id); | |
1566 | if (!ndev) { | |
8f000cac CH |
1567 | nvmet_rdma_cm_reject(cm_id, NVME_RDMA_CM_NO_RSC); |
1568 | return -ECONNREFUSED; | |
1569 | } | |
1570 | ||
1571 | queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); | |
1572 | if (!queue) { | |
1573 | ret = -ENOMEM; | |
1574 | goto put_device; | |
1575 | } | |
8f000cac | 1576 | |
777dc823 SG |
1577 | if (queue->host_qid == 0) { |
1578 | /* Let inflight controller teardown complete */ | |
d39aa497 | 1579 | flush_scheduled_work(); |
777dc823 SG |
1580 | } |
1581 | ||
8f000cac | 1582 | ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); |
e1a2ee24 | 1583 | if (ret) { |
21f90243 IR |
1584 | /* |
1585 | * Don't destroy the cm_id in free path, as we implicitly | |
1586 | * destroy the cm_id here with non-zero ret code. | |
1587 | */ | |
1588 | queue->cm_id = NULL; | |
1589 | goto free_queue; | |
e1a2ee24 | 1590 | } |
8f000cac CH |
1591 | |
1592 | mutex_lock(&nvmet_rdma_queue_mutex); | |
1593 | list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); | |
1594 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1595 | ||
1596 | return 0; | |
1597 | ||
21f90243 IR |
1598 | free_queue: |
1599 | nvmet_rdma_free_queue(queue); | |
8f000cac CH |
1600 | put_device: |
1601 | kref_put(&ndev->ref, nvmet_rdma_free_dev); | |
1602 | ||
1603 | return ret; | |
1604 | } | |
1605 | ||
1606 | static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) | |
1607 | { | |
1608 | unsigned long flags; | |
1609 | ||
1610 | spin_lock_irqsave(&queue->state_lock, flags); | |
1611 | if (queue->state != NVMET_RDMA_Q_CONNECTING) { | |
1612 | pr_warn("trying to establish a connected queue\n"); | |
1613 | goto out_unlock; | |
1614 | } | |
1615 | queue->state = NVMET_RDMA_Q_LIVE; | |
1616 | ||
1617 | while (!list_empty(&queue->rsp_wait_list)) { | |
1618 | struct nvmet_rdma_rsp *cmd; | |
1619 | ||
1620 | cmd = list_first_entry(&queue->rsp_wait_list, | |
1621 | struct nvmet_rdma_rsp, wait_list); | |
1622 | list_del(&cmd->wait_list); | |
1623 | ||
1624 | spin_unlock_irqrestore(&queue->state_lock, flags); | |
1625 | nvmet_rdma_handle_command(queue, cmd); | |
1626 | spin_lock_irqsave(&queue->state_lock, flags); | |
1627 | } | |
1628 | ||
1629 | out_unlock: | |
1630 | spin_unlock_irqrestore(&queue->state_lock, flags); | |
1631 | } | |
1632 | ||
1633 | static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) | |
1634 | { | |
1635 | bool disconnect = false; | |
1636 | unsigned long flags; | |
1637 | ||
1638 | pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); | |
1639 | ||
1640 | spin_lock_irqsave(&queue->state_lock, flags); | |
1641 | switch (queue->state) { | |
1642 | case NVMET_RDMA_Q_CONNECTING: | |
1643 | case NVMET_RDMA_Q_LIVE: | |
8f000cac | 1644 | queue->state = NVMET_RDMA_Q_DISCONNECTING; |
d8f7750a | 1645 | disconnect = true; |
8f000cac CH |
1646 | break; |
1647 | case NVMET_RDMA_Q_DISCONNECTING: | |
1648 | break; | |
1649 | } | |
1650 | spin_unlock_irqrestore(&queue->state_lock, flags); | |
1651 | ||
1652 | if (disconnect) { | |
1653 | rdma_disconnect(queue->cm_id); | |
d39aa497 | 1654 | schedule_work(&queue->release_work); |
8f000cac CH |
1655 | } |
1656 | } | |
1657 | ||
1658 | static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) | |
1659 | { | |
1660 | bool disconnect = false; | |
1661 | ||
1662 | mutex_lock(&nvmet_rdma_queue_mutex); | |
1663 | if (!list_empty(&queue->queue_list)) { | |
1664 | list_del_init(&queue->queue_list); | |
1665 | disconnect = true; | |
1666 | } | |
1667 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1668 | ||
1669 | if (disconnect) | |
1670 | __nvmet_rdma_queue_disconnect(queue); | |
1671 | } | |
1672 | ||
1673 | static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, | |
1674 | struct nvmet_rdma_queue *queue) | |
1675 | { | |
1676 | WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); | |
1677 | ||
766dbb17 SG |
1678 | mutex_lock(&nvmet_rdma_queue_mutex); |
1679 | if (!list_empty(&queue->queue_list)) | |
1680 | list_del_init(&queue->queue_list); | |
1681 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1682 | ||
1683 | pr_err("failed to connect queue %d\n", queue->idx); | |
d39aa497 | 1684 | schedule_work(&queue->release_work); |
8f000cac CH |
1685 | } |
1686 | ||
d8f7750a SG |
1687 | /** |
1688 | * nvme_rdma_device_removal() - Handle RDMA device removal | |
f1d4ef7d | 1689 | * @cm_id: rdma_cm id, used for nvmet port |
d8f7750a | 1690 | * @queue: nvmet rdma queue (cm id qp_context) |
d8f7750a SG |
1691 | * |
1692 | * DEVICE_REMOVAL event notifies us that the RDMA device is about | |
f1d4ef7d SG |
1693 | * to unplug. Note that this event can be generated on a normal |
1694 | * queue cm_id and/or a device bound listener cm_id (where in this | |
1695 | * case queue will be null). | |
d8f7750a | 1696 | * |
f1d4ef7d SG |
1697 | * We registered an ib_client to handle device removal for queues, |
1698 | * so we only need to handle the listening port cm_ids. In this case | |
d8f7750a SG |
1699 | * we nullify the priv to prevent double cm_id destruction and destroying |
1700 | * the cm_id implicitely by returning a non-zero rc to the callout. | |
1701 | */ | |
1702 | static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id, | |
1703 | struct nvmet_rdma_queue *queue) | |
1704 | { | |
a032e4f6 | 1705 | struct nvmet_rdma_port *port; |
d8f7750a | 1706 | |
f1d4ef7d | 1707 | if (queue) { |
d8f7750a | 1708 | /* |
f1d4ef7d SG |
1709 | * This is a queue cm_id. we have registered |
1710 | * an ib_client to handle queues removal | |
1711 | * so don't interfear and just return. | |
d8f7750a | 1712 | */ |
f1d4ef7d | 1713 | return 0; |
d8f7750a SG |
1714 | } |
1715 | ||
f1d4ef7d SG |
1716 | port = cm_id->context; |
1717 | ||
1718 | /* | |
1719 | * This is a listener cm_id. Make sure that | |
1720 | * future remove_port won't invoke a double | |
1721 | * cm_id destroy. use atomic xchg to make sure | |
1722 | * we don't compete with remove_port. | |
1723 | */ | |
a032e4f6 | 1724 | if (xchg(&port->cm_id, NULL) != cm_id) |
f1d4ef7d SG |
1725 | return 0; |
1726 | ||
d8f7750a SG |
1727 | /* |
1728 | * We need to return 1 so that the core will destroy | |
1729 | * it's own ID. What a great API design.. | |
1730 | */ | |
1731 | return 1; | |
1732 | } | |
1733 | ||
8f000cac CH |
1734 | static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, |
1735 | struct rdma_cm_event *event) | |
1736 | { | |
1737 | struct nvmet_rdma_queue *queue = NULL; | |
1738 | int ret = 0; | |
1739 | ||
1740 | if (cm_id->qp) | |
1741 | queue = cm_id->qp->qp_context; | |
1742 | ||
1743 | pr_debug("%s (%d): status %d id %p\n", | |
1744 | rdma_event_msg(event->event), event->event, | |
1745 | event->status, cm_id); | |
1746 | ||
1747 | switch (event->event) { | |
1748 | case RDMA_CM_EVENT_CONNECT_REQUEST: | |
1749 | ret = nvmet_rdma_queue_connect(cm_id, event); | |
1750 | break; | |
1751 | case RDMA_CM_EVENT_ESTABLISHED: | |
1752 | nvmet_rdma_queue_established(queue); | |
1753 | break; | |
1754 | case RDMA_CM_EVENT_ADDR_CHANGE: | |
a032e4f6 SG |
1755 | if (!queue) { |
1756 | struct nvmet_rdma_port *port = cm_id->context; | |
1757 | ||
1758 | schedule_delayed_work(&port->repair_work, 0); | |
1759 | break; | |
1760 | } | |
1761 | /* FALLTHROUGH */ | |
8f000cac | 1762 | case RDMA_CM_EVENT_DISCONNECTED: |
8f000cac | 1763 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: |
e1a2ee24 | 1764 | nvmet_rdma_queue_disconnect(queue); |
d8f7750a SG |
1765 | break; |
1766 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | |
1767 | ret = nvmet_rdma_device_removal(cm_id, queue); | |
8f000cac CH |
1768 | break; |
1769 | case RDMA_CM_EVENT_REJECTED: | |
512fb1b3 SW |
1770 | pr_debug("Connection rejected: %s\n", |
1771 | rdma_reject_msg(cm_id, event->status)); | |
1772 | /* FALLTHROUGH */ | |
8f000cac CH |
1773 | case RDMA_CM_EVENT_UNREACHABLE: |
1774 | case RDMA_CM_EVENT_CONNECT_ERROR: | |
1775 | nvmet_rdma_queue_connect_fail(cm_id, queue); | |
1776 | break; | |
1777 | default: | |
1778 | pr_err("received unrecognized RDMA CM event %d\n", | |
1779 | event->event); | |
1780 | break; | |
1781 | } | |
1782 | ||
1783 | return ret; | |
1784 | } | |
1785 | ||
1786 | static void nvmet_rdma_delete_ctrl(struct nvmet_ctrl *ctrl) | |
1787 | { | |
1788 | struct nvmet_rdma_queue *queue; | |
1789 | ||
1790 | restart: | |
1791 | mutex_lock(&nvmet_rdma_queue_mutex); | |
1792 | list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { | |
1793 | if (queue->nvme_sq.ctrl == ctrl) { | |
1794 | list_del_init(&queue->queue_list); | |
1795 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1796 | ||
1797 | __nvmet_rdma_queue_disconnect(queue); | |
1798 | goto restart; | |
1799 | } | |
1800 | } | |
1801 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
1802 | } | |
1803 | ||
a032e4f6 | 1804 | static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port) |
8f000cac | 1805 | { |
a032e4f6 | 1806 | struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL); |
8f000cac | 1807 | |
a032e4f6 SG |
1808 | if (cm_id) |
1809 | rdma_destroy_id(cm_id); | |
1810 | } | |
0d5ee2b2 | 1811 | |
a032e4f6 SG |
1812 | static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port) |
1813 | { | |
1814 | struct sockaddr *addr = (struct sockaddr *)&port->addr; | |
1815 | struct rdma_cm_id *cm_id; | |
1816 | int ret; | |
8f000cac CH |
1817 | |
1818 | cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port, | |
1819 | RDMA_PS_TCP, IB_QPT_RC); | |
1820 | if (IS_ERR(cm_id)) { | |
1821 | pr_err("CM ID creation failed\n"); | |
1822 | return PTR_ERR(cm_id); | |
1823 | } | |
1824 | ||
670c2a3a SG |
1825 | /* |
1826 | * Allow both IPv4 and IPv6 sockets to bind a single port | |
1827 | * at the same time. | |
1828 | */ | |
1829 | ret = rdma_set_afonly(cm_id, 1); | |
1830 | if (ret) { | |
1831 | pr_err("rdma_set_afonly failed (%d)\n", ret); | |
1832 | goto out_destroy_id; | |
1833 | } | |
1834 | ||
a032e4f6 | 1835 | ret = rdma_bind_addr(cm_id, addr); |
8f000cac | 1836 | if (ret) { |
a032e4f6 | 1837 | pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret); |
8f000cac CH |
1838 | goto out_destroy_id; |
1839 | } | |
1840 | ||
1841 | ret = rdma_listen(cm_id, 128); | |
1842 | if (ret) { | |
a032e4f6 | 1843 | pr_err("listening to %pISpcs failed (%d)\n", addr, ret); |
8f000cac CH |
1844 | goto out_destroy_id; |
1845 | } | |
1846 | ||
b09160c3 IR |
1847 | if (port->nport->pi_enable && |
1848 | !(cm_id->device->attrs.device_cap_flags & | |
1849 | IB_DEVICE_INTEGRITY_HANDOVER)) { | |
1850 | pr_err("T10-PI is not supported for %pISpcs\n", addr); | |
1851 | ret = -EINVAL; | |
1852 | goto out_destroy_id; | |
1853 | } | |
1854 | ||
a032e4f6 | 1855 | port->cm_id = cm_id; |
8f000cac CH |
1856 | return 0; |
1857 | ||
1858 | out_destroy_id: | |
1859 | rdma_destroy_id(cm_id); | |
1860 | return ret; | |
1861 | } | |
1862 | ||
a032e4f6 | 1863 | static void nvmet_rdma_repair_port_work(struct work_struct *w) |
8f000cac | 1864 | { |
a032e4f6 SG |
1865 | struct nvmet_rdma_port *port = container_of(to_delayed_work(w), |
1866 | struct nvmet_rdma_port, repair_work); | |
1867 | int ret; | |
8f000cac | 1868 | |
a032e4f6 SG |
1869 | nvmet_rdma_disable_port(port); |
1870 | ret = nvmet_rdma_enable_port(port); | |
1871 | if (ret) | |
1872 | schedule_delayed_work(&port->repair_work, 5 * HZ); | |
1873 | } | |
1874 | ||
1875 | static int nvmet_rdma_add_port(struct nvmet_port *nport) | |
1876 | { | |
1877 | struct nvmet_rdma_port *port; | |
1878 | __kernel_sa_family_t af; | |
1879 | int ret; | |
1880 | ||
1881 | port = kzalloc(sizeof(*port), GFP_KERNEL); | |
1882 | if (!port) | |
1883 | return -ENOMEM; | |
1884 | ||
1885 | nport->priv = port; | |
1886 | port->nport = nport; | |
1887 | INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work); | |
1888 | ||
1889 | switch (nport->disc_addr.adrfam) { | |
1890 | case NVMF_ADDR_FAMILY_IP4: | |
1891 | af = AF_INET; | |
1892 | break; | |
1893 | case NVMF_ADDR_FAMILY_IP6: | |
1894 | af = AF_INET6; | |
1895 | break; | |
1896 | default: | |
1897 | pr_err("address family %d not supported\n", | |
1898 | nport->disc_addr.adrfam); | |
1899 | ret = -EINVAL; | |
1900 | goto out_free_port; | |
1901 | } | |
1902 | ||
1903 | if (nport->inline_data_size < 0) { | |
1904 | nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE; | |
1905 | } else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) { | |
1906 | pr_warn("inline_data_size %u is too large, reducing to %u\n", | |
1907 | nport->inline_data_size, | |
1908 | NVMET_RDMA_MAX_INLINE_DATA_SIZE); | |
1909 | nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE; | |
1910 | } | |
1911 | ||
1912 | ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, | |
1913 | nport->disc_addr.trsvcid, &port->addr); | |
1914 | if (ret) { | |
1915 | pr_err("malformed ip/port passed: %s:%s\n", | |
1916 | nport->disc_addr.traddr, nport->disc_addr.trsvcid); | |
1917 | goto out_free_port; | |
1918 | } | |
1919 | ||
1920 | ret = nvmet_rdma_enable_port(port); | |
1921 | if (ret) | |
1922 | goto out_free_port; | |
1923 | ||
1924 | pr_info("enabling port %d (%pISpcs)\n", | |
1925 | le16_to_cpu(nport->disc_addr.portid), | |
1926 | (struct sockaddr *)&port->addr); | |
1927 | ||
1928 | return 0; | |
1929 | ||
1930 | out_free_port: | |
1931 | kfree(port); | |
1932 | return ret; | |
1933 | } | |
1934 | ||
1935 | static void nvmet_rdma_remove_port(struct nvmet_port *nport) | |
8f000cac | 1936 | { |
a032e4f6 | 1937 | struct nvmet_rdma_port *port = nport->priv; |
8f000cac | 1938 | |
a032e4f6 SG |
1939 | cancel_delayed_work_sync(&port->repair_work); |
1940 | nvmet_rdma_disable_port(port); | |
1941 | kfree(port); | |
8f000cac CH |
1942 | } |
1943 | ||
4c652685 | 1944 | static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, |
a032e4f6 | 1945 | struct nvmet_port *nport, char *traddr) |
4c652685 | 1946 | { |
a032e4f6 SG |
1947 | struct nvmet_rdma_port *port = nport->priv; |
1948 | struct rdma_cm_id *cm_id = port->cm_id; | |
4c652685 SG |
1949 | |
1950 | if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) { | |
1951 | struct nvmet_rdma_rsp *rsp = | |
1952 | container_of(req, struct nvmet_rdma_rsp, req); | |
1953 | struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; | |
1954 | struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr; | |
1955 | ||
1956 | sprintf(traddr, "%pISc", addr); | |
1957 | } else { | |
a032e4f6 | 1958 | memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); |
4c652685 SG |
1959 | } |
1960 | } | |
1961 | ||
ec6d20e1 MG |
1962 | static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl) |
1963 | { | |
b09160c3 IR |
1964 | if (ctrl->pi_support) |
1965 | return NVMET_RDMA_MAX_METADATA_MDTS; | |
ec6d20e1 MG |
1966 | return NVMET_RDMA_MAX_MDTS; |
1967 | } | |
1968 | ||
e929f06d | 1969 | static const struct nvmet_fabrics_ops nvmet_rdma_ops = { |
8f000cac CH |
1970 | .owner = THIS_MODULE, |
1971 | .type = NVMF_TRTYPE_RDMA, | |
8f000cac | 1972 | .msdbd = 1, |
6fa350f7 | 1973 | .flags = NVMF_KEYED_SGLS | NVMF_METADATA_SUPPORTED, |
8f000cac CH |
1974 | .add_port = nvmet_rdma_add_port, |
1975 | .remove_port = nvmet_rdma_remove_port, | |
1976 | .queue_response = nvmet_rdma_queue_response, | |
1977 | .delete_ctrl = nvmet_rdma_delete_ctrl, | |
4c652685 | 1978 | .disc_traddr = nvmet_rdma_disc_port_addr, |
ec6d20e1 | 1979 | .get_mdts = nvmet_rdma_get_mdts, |
8f000cac CH |
1980 | }; |
1981 | ||
f1d4ef7d SG |
1982 | static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data) |
1983 | { | |
43b92fd2 | 1984 | struct nvmet_rdma_queue *queue, *tmp; |
a3dd7d00 MG |
1985 | struct nvmet_rdma_device *ndev; |
1986 | bool found = false; | |
1987 | ||
1988 | mutex_lock(&device_list_mutex); | |
1989 | list_for_each_entry(ndev, &device_list, entry) { | |
1990 | if (ndev->device == ib_device) { | |
1991 | found = true; | |
1992 | break; | |
1993 | } | |
1994 | } | |
1995 | mutex_unlock(&device_list_mutex); | |
1996 | ||
1997 | if (!found) | |
1998 | return; | |
f1d4ef7d | 1999 | |
a3dd7d00 MG |
2000 | /* |
2001 | * IB Device that is used by nvmet controllers is being removed, | |
2002 | * delete all queues using this device. | |
2003 | */ | |
f1d4ef7d | 2004 | mutex_lock(&nvmet_rdma_queue_mutex); |
43b92fd2 IR |
2005 | list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, |
2006 | queue_list) { | |
f1d4ef7d SG |
2007 | if (queue->dev->device != ib_device) |
2008 | continue; | |
2009 | ||
2010 | pr_info("Removing queue %d\n", queue->idx); | |
43b92fd2 | 2011 | list_del_init(&queue->queue_list); |
f1d4ef7d SG |
2012 | __nvmet_rdma_queue_disconnect(queue); |
2013 | } | |
2014 | mutex_unlock(&nvmet_rdma_queue_mutex); | |
2015 | ||
2016 | flush_scheduled_work(); | |
2017 | } | |
2018 | ||
2019 | static struct ib_client nvmet_rdma_ib_client = { | |
2020 | .name = "nvmet_rdma", | |
f1d4ef7d SG |
2021 | .remove = nvmet_rdma_remove_one |
2022 | }; | |
2023 | ||
8f000cac CH |
2024 | static int __init nvmet_rdma_init(void) |
2025 | { | |
f1d4ef7d SG |
2026 | int ret; |
2027 | ||
2028 | ret = ib_register_client(&nvmet_rdma_ib_client); | |
2029 | if (ret) | |
2030 | return ret; | |
2031 | ||
2032 | ret = nvmet_register_transport(&nvmet_rdma_ops); | |
2033 | if (ret) | |
2034 | goto err_ib_client; | |
2035 | ||
2036 | return 0; | |
2037 | ||
2038 | err_ib_client: | |
2039 | ib_unregister_client(&nvmet_rdma_ib_client); | |
2040 | return ret; | |
8f000cac CH |
2041 | } |
2042 | ||
2043 | static void __exit nvmet_rdma_exit(void) | |
2044 | { | |
8f000cac | 2045 | nvmet_unregister_transport(&nvmet_rdma_ops); |
f1d4ef7d | 2046 | ib_unregister_client(&nvmet_rdma_ib_client); |
cb4876e8 | 2047 | WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); |
8f000cac CH |
2048 | ida_destroy(&nvmet_rdma_queue_ida); |
2049 | } | |
2050 | ||
2051 | module_init(nvmet_rdma_init); | |
2052 | module_exit(nvmet_rdma_exit); | |
2053 | ||
2054 | MODULE_LICENSE("GPL v2"); | |
2055 | MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */ |