Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
[linux-2.6-block.git] / net / smc / smc_ib.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
a4cf0443
UB
2/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * IB infrastructure:
6 * Establish SMC-R as an Infiniband Client to be notified about added and
7 * removed IB devices of type RDMA.
8 * Determine device and port characteristics for these IB devices.
9 *
10 * Copyright IBM Corp. 2016
11 *
12 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
13 */
14
15#include <linux/random.h>
bd4ad577 16#include <linux/workqueue.h>
10428dd8 17#include <linux/scatterlist.h>
a4cf0443
UB
18#include <rdma/ib_verbs.h>
19
6812baab 20#include "smc_pnet.h"
a4cf0443 21#include "smc_ib.h"
cd6851f3 22#include "smc_core.h"
f38ba179 23#include "smc_wr.h"
a4cf0443
UB
24#include "smc.h"
25
bd4ad577
UB
26#define SMC_QP_MIN_RNR_TIMER 5
27#define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */
28#define SMC_QP_RETRY_CNT 7 /* 7: infinite */
29#define SMC_QP_RNR_RETRY 7 /* 7: infinite */
30
a4cf0443
UB
31struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */
32 .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock),
33 .list = LIST_HEAD_INIT(smc_ib_devices.list),
34};
35
36#define SMC_LOCAL_SYSTEMID_RESET "%%%%%%%"
37
38u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system
39 * identifier
40 */
41
bd4ad577
UB
42static int smc_ib_modify_qp_init(struct smc_link *lnk)
43{
44 struct ib_qp_attr qp_attr;
45
46 memset(&qp_attr, 0, sizeof(qp_attr));
47 qp_attr.qp_state = IB_QPS_INIT;
48 qp_attr.pkey_index = 0;
49 qp_attr.port_num = lnk->ibport;
50 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE
51 | IB_ACCESS_REMOTE_WRITE;
52 return ib_modify_qp(lnk->roce_qp, &qp_attr,
53 IB_QP_STATE | IB_QP_PKEY_INDEX |
54 IB_QP_ACCESS_FLAGS | IB_QP_PORT);
55}
56
57static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
58{
59 enum ib_qp_attr_mask qp_attr_mask =
60 IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
61 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
62 struct ib_qp_attr qp_attr;
63
64 memset(&qp_attr, 0, sizeof(qp_attr));
65 qp_attr.qp_state = IB_QPS_RTR;
66 qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
44c58487 67 qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
d8966fcd
DC
68 rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport);
69 rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, 0, 1, 0);
70 rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid);
44c58487 71 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
bd4ad577
UB
72 sizeof(lnk->peer_mac));
73 qp_attr.dest_qp_num = lnk->peer_qpn;
74 qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */
75 qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming
76 * requests
77 */
78 qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER;
79
80 return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask);
81}
82
83int smc_ib_modify_qp_rts(struct smc_link *lnk)
84{
85 struct ib_qp_attr qp_attr;
86
87 memset(&qp_attr, 0, sizeof(qp_attr));
88 qp_attr.qp_state = IB_QPS_RTS;
89 qp_attr.timeout = SMC_QP_TIMEOUT; /* local ack timeout */
90 qp_attr.retry_cnt = SMC_QP_RETRY_CNT; /* retry count */
91 qp_attr.rnr_retry = SMC_QP_RNR_RETRY; /* RNR retries, 7=infinite */
92 qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */
93 qp_attr.max_rd_atomic = 1; /* # of outstanding RDMA reads and
94 * atomic ops allowed
95 */
96 return ib_modify_qp(lnk->roce_qp, &qp_attr,
97 IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
98 IB_QP_SQ_PSN | IB_QP_RNR_RETRY |
99 IB_QP_MAX_QP_RD_ATOMIC);
100}
101
102int smc_ib_modify_qp_reset(struct smc_link *lnk)
103{
104 struct ib_qp_attr qp_attr;
105
106 memset(&qp_attr, 0, sizeof(qp_attr));
107 qp_attr.qp_state = IB_QPS_RESET;
108 return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
109}
110
111int smc_ib_ready_link(struct smc_link *lnk)
112{
113 struct smc_link_group *lgr =
114 container_of(lnk, struct smc_link_group, lnk[0]);
115 int rc = 0;
116
117 rc = smc_ib_modify_qp_init(lnk);
118 if (rc)
119 goto out;
120
121 rc = smc_ib_modify_qp_rtr(lnk);
122 if (rc)
123 goto out;
124 smc_wr_remember_qp_attr(lnk);
125 rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
126 IB_CQ_SOLICITED_MASK);
127 if (rc)
128 goto out;
129 rc = smc_wr_rx_post_init(lnk);
130 if (rc)
131 goto out;
132 smc_wr_remember_qp_attr(lnk);
133
134 if (lgr->role == SMC_SERV) {
135 rc = smc_ib_modify_qp_rts(lnk);
136 if (rc)
137 goto out;
138 smc_wr_remember_qp_attr(lnk);
139 }
140out:
141 return rc;
142}
143
da05bf29
UB
144static void smc_ib_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
145{
146 struct smc_link_group *lgr, *l;
147
148 list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
149 if (lgr->lnk[SMC_SINGLE_LINK].smcibdev == smcibdev &&
150 lgr->lnk[SMC_SINGLE_LINK].ibport == ibport)
151 smc_lgr_terminate(lgr);
152 }
153}
154
bd4ad577
UB
155/* process context wrapper for might_sleep smc_ib_remember_port_attr */
156static void smc_ib_port_event_work(struct work_struct *work)
157{
158 struct smc_ib_device *smcibdev = container_of(
159 work, struct smc_ib_device, port_event_work);
160 u8 port_idx;
161
162 for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
163 smc_ib_remember_port_attr(smcibdev, port_idx + 1);
164 clear_bit(port_idx, &smcibdev->port_event_mask);
da05bf29
UB
165 if (!smc_ib_port_active(smcibdev, port_idx + 1))
166 smc_ib_port_terminate(smcibdev, port_idx + 1);
bd4ad577
UB
167 }
168}
169
170/* can be called in IRQ context */
171static void smc_ib_global_event_handler(struct ib_event_handler *handler,
172 struct ib_event *ibevent)
173{
174 struct smc_ib_device *smcibdev;
175 u8 port_idx;
176
177 smcibdev = container_of(handler, struct smc_ib_device, event_handler);
bd4ad577
UB
178
179 switch (ibevent->event) {
180 case IB_EVENT_PORT_ERR:
bd4ad577 181 case IB_EVENT_DEVICE_FATAL:
bd4ad577
UB
182 case IB_EVENT_PORT_ACTIVE:
183 port_idx = ibevent->element.port_num - 1;
184 set_bit(port_idx, &smcibdev->port_event_mask);
185 schedule_work(&smcibdev->port_event_work);
186 break;
187 default:
188 break;
189 }
190}
191
f38ba179
UB
192void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
193{
da05bf29
UB
194 if (lnk->roce_pd)
195 ib_dealloc_pd(lnk->roce_pd);
f38ba179
UB
196 lnk->roce_pd = NULL;
197}
198
199int smc_ib_create_protection_domain(struct smc_link *lnk)
200{
201 int rc;
202
897e1c24 203 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
f38ba179
UB
204 rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
205 if (IS_ERR(lnk->roce_pd))
206 lnk->roce_pd = NULL;
207 return rc;
208}
209
210static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
211{
da05bf29
UB
212 struct smc_ib_device *smcibdev =
213 (struct smc_ib_device *)ibevent->device;
214 u8 port_idx;
215
f38ba179
UB
216 switch (ibevent->event) {
217 case IB_EVENT_DEVICE_FATAL:
218 case IB_EVENT_GID_CHANGE:
219 case IB_EVENT_PORT_ERR:
220 case IB_EVENT_QP_ACCESS_ERR:
da05bf29
UB
221 port_idx = ibevent->element.port_num - 1;
222 set_bit(port_idx, &smcibdev->port_event_mask);
223 schedule_work(&smcibdev->port_event_work);
f38ba179
UB
224 break;
225 default:
226 break;
227 }
228}
229
230void smc_ib_destroy_queue_pair(struct smc_link *lnk)
231{
da05bf29
UB
232 if (lnk->roce_qp)
233 ib_destroy_qp(lnk->roce_qp);
f38ba179
UB
234 lnk->roce_qp = NULL;
235}
236
237/* create a queue pair within the protection domain for a link */
238int smc_ib_create_queue_pair(struct smc_link *lnk)
239{
240 struct ib_qp_init_attr qp_attr = {
241 .event_handler = smc_ib_qp_event_handler,
242 .qp_context = lnk,
243 .send_cq = lnk->smcibdev->roce_cq_send,
244 .recv_cq = lnk->smcibdev->roce_cq_recv,
245 .srq = NULL,
246 .cap = {
f38ba179
UB
247 /* include unsolicited rdma_writes as well,
248 * there are max. 2 RDMA_WRITE per 1 WR_SEND
249 */
652a1e41 250 .max_send_wr = SMC_WR_BUF_CNT * 3,
f38ba179
UB
251 .max_recv_wr = SMC_WR_BUF_CNT * 3,
252 .max_send_sge = SMC_IB_MAX_SEND_SGE,
253 .max_recv_sge = 1,
f38ba179
UB
254 },
255 .sq_sig_type = IB_SIGNAL_REQ_WR,
256 .qp_type = IB_QPT_RC,
257 };
258 int rc;
259
260 lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr);
261 rc = PTR_ERR_OR_ZERO(lnk->roce_qp);
262 if (IS_ERR(lnk->roce_qp))
263 lnk->roce_qp = NULL;
264 else
265 smc_wr_remember_qp_attr(lnk);
266 return rc;
267}
268
897e1c24
UB
269void smc_ib_put_memory_region(struct ib_mr *mr)
270{
271 ib_dereg_mr(mr);
272}
273
274static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot)
275{
276 unsigned int offset = 0;
277 int sg_num;
278
279 /* map the largest prefix of a dma mapped SG list */
280 sg_num = ib_map_mr_sg(buf_slot->mr_rx[SMC_SINGLE_LINK],
281 buf_slot->sgt[SMC_SINGLE_LINK].sgl,
282 buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
283 &offset, PAGE_SIZE);
284
285 return sg_num;
286}
287
288/* Allocate a memory region and map the dma mapped SG list of buf_slot */
289int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
290 struct smc_buf_desc *buf_slot)
291{
292 if (buf_slot->mr_rx[SMC_SINGLE_LINK])
293 return 0; /* already done */
294
295 buf_slot->mr_rx[SMC_SINGLE_LINK] =
296 ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order);
297 if (IS_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK])) {
298 int rc;
299
300 rc = PTR_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK]);
301 buf_slot->mr_rx[SMC_SINGLE_LINK] = NULL;
302 return rc;
303 }
304
305 if (smc_ib_map_mr_sg(buf_slot) != 1)
306 return -EINVAL;
307
308 return 0;
309}
310
10428dd8
UB
311/* synchronize buffer usage for cpu access */
312void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev,
313 struct smc_buf_desc *buf_slot,
314 enum dma_data_direction data_direction)
315{
316 struct scatterlist *sg;
317 unsigned int i;
318
319 /* for now there is just one DMA address */
320 for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg,
321 buf_slot->sgt[SMC_SINGLE_LINK].nents, i) {
322 if (!sg_dma_len(sg))
323 break;
324 ib_dma_sync_single_for_cpu(smcibdev->ibdev,
325 sg_dma_address(sg),
326 sg_dma_len(sg),
327 data_direction);
328 }
329}
330
331/* synchronize buffer usage for device access */
332void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev,
333 struct smc_buf_desc *buf_slot,
334 enum dma_data_direction data_direction)
335{
336 struct scatterlist *sg;
337 unsigned int i;
338
339 /* for now there is just one DMA address */
340 for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg,
341 buf_slot->sgt[SMC_SINGLE_LINK].nents, i) {
342 if (!sg_dma_len(sg))
343 break;
344 ib_dma_sync_single_for_device(smcibdev->ibdev,
345 sg_dma_address(sg),
346 sg_dma_len(sg),
347 data_direction);
348 }
349}
350
a3fe3d01
UB
351/* Map a new TX or RX buffer SG-table to DMA */
352int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev,
353 struct smc_buf_desc *buf_slot,
354 enum dma_data_direction data_direction)
355{
356 int mapped_nents;
357
358 mapped_nents = ib_dma_map_sg(smcibdev->ibdev,
359 buf_slot->sgt[SMC_SINGLE_LINK].sgl,
360 buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
361 data_direction);
362 if (!mapped_nents)
363 return -ENOMEM;
364
365 return mapped_nents;
366}
367
368void smc_ib_buf_unmap_sg(struct smc_ib_device *smcibdev,
369 struct smc_buf_desc *buf_slot,
370 enum dma_data_direction data_direction)
371{
372 if (!buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address)
373 return; /* already unmapped */
374
375 ib_dma_unmap_sg(smcibdev->ibdev,
376 buf_slot->sgt[SMC_SINGLE_LINK].sgl,
377 buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
378 data_direction);
379 buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address = 0;
380}
381
a4cf0443
UB
382static int smc_ib_fill_gid_and_mac(struct smc_ib_device *smcibdev, u8 ibport)
383{
d921c420 384 struct ib_gid_attr gattr;
a4cf0443
UB
385 int rc;
386
387 rc = ib_query_gid(smcibdev->ibdev, ibport, 0,
d921c420
UB
388 &smcibdev->gid[ibport - 1], &gattr);
389 if (rc || !gattr.ndev)
390 return -ENODEV;
391
392 memcpy(smcibdev->mac[ibport - 1], gattr.ndev->dev_addr, ETH_ALEN);
393 dev_put(gattr.ndev);
394 return 0;
a4cf0443
UB
395}
396
397/* Create an identifier unique for this instance of SMC-R.
398 * The MAC-address of the first active registered IB device
399 * plus a random 2-byte number is used to create this identifier.
400 * This name is delivered to the peer during connection initialization.
401 */
402static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
403 u8 ibport)
404{
405 memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
406 sizeof(smcibdev->mac[ibport - 1]));
407 get_random_bytes(&local_systemid[0], 2);
408}
409
410bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
411{
412 return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
413}
414
415int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
416{
417 int rc;
418
419 memset(&smcibdev->pattr[ibport - 1], 0,
420 sizeof(smcibdev->pattr[ibport - 1]));
421 rc = ib_query_port(smcibdev->ibdev, ibport,
422 &smcibdev->pattr[ibport - 1]);
423 if (rc)
424 goto out;
d921c420 425 /* the SMC protocol requires specification of the RoCE MAC address */
a4cf0443
UB
426 rc = smc_ib_fill_gid_and_mac(smcibdev, ibport);
427 if (rc)
428 goto out;
429 if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET,
430 sizeof(local_systemid)) &&
431 smc_ib_port_active(smcibdev, ibport))
432 /* create unique system identifier */
433 smc_ib_define_local_systemid(smcibdev, ibport);
434out:
435 return rc;
436}
437
bd4ad577
UB
438long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
439{
440 struct ib_cq_init_attr cqattr = {
441 .cqe = SMC_WR_MAX_CQE, .comp_vector = 0 };
442 long rc;
443
444 smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
445 smc_wr_tx_cq_handler, NULL,
446 smcibdev, &cqattr);
447 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send);
448 if (IS_ERR(smcibdev->roce_cq_send)) {
449 smcibdev->roce_cq_send = NULL;
450 return rc;
451 }
452 smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev,
453 smc_wr_rx_cq_handler, NULL,
454 smcibdev, &cqattr);
455 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv);
456 if (IS_ERR(smcibdev->roce_cq_recv)) {
457 smcibdev->roce_cq_recv = NULL;
458 goto err;
459 }
460 INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
461 smc_ib_global_event_handler);
462 ib_register_event_handler(&smcibdev->event_handler);
463 smc_wr_add_dev(smcibdev);
464 smcibdev->initialized = 1;
465 return rc;
466
467err:
468 ib_destroy_cq(smcibdev->roce_cq_send);
469 return rc;
470}
471
472static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
473{
474 if (!smcibdev->initialized)
475 return;
da05bf29 476 smcibdev->initialized = 0;
bd4ad577
UB
477 smc_wr_remove_dev(smcibdev);
478 ib_unregister_event_handler(&smcibdev->event_handler);
479 ib_destroy_cq(smcibdev->roce_cq_recv);
480 ib_destroy_cq(smcibdev->roce_cq_send);
481}
482
a4cf0443
UB
483static struct ib_client smc_ib_client;
484
485/* callback function for ib_register_client() */
486static void smc_ib_add_dev(struct ib_device *ibdev)
487{
488 struct smc_ib_device *smcibdev;
489
490 if (ibdev->node_type != RDMA_NODE_IB_CA)
491 return;
492
493 smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL);
494 if (!smcibdev)
495 return;
496
497 smcibdev->ibdev = ibdev;
bd4ad577 498 INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
a4cf0443
UB
499
500 spin_lock(&smc_ib_devices.lock);
501 list_add_tail(&smcibdev->list, &smc_ib_devices.list);
502 spin_unlock(&smc_ib_devices.lock);
503 ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
504}
505
506/* callback function for ib_register_client() */
507static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
508{
509 struct smc_ib_device *smcibdev;
510
511 smcibdev = ib_get_client_data(ibdev, &smc_ib_client);
512 ib_set_client_data(ibdev, &smc_ib_client, NULL);
513 spin_lock(&smc_ib_devices.lock);
514 list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
515 spin_unlock(&smc_ib_devices.lock);
6812baab 516 smc_pnet_remove_by_ibdev(smcibdev);
bd4ad577 517 smc_ib_cleanup_per_ibdev(smcibdev);
a4cf0443
UB
518 kfree(smcibdev);
519}
520
521static struct ib_client smc_ib_client = {
522 .name = "smc_ib",
523 .add = smc_ib_add_dev,
524 .remove = smc_ib_remove_dev,
525};
526
527int __init smc_ib_register_client(void)
528{
529 return ib_register_client(&smc_ib_client);
530}
531
532void smc_ib_unregister_client(void)
533{
534 ib_unregister_client(&smc_ib_client);
535}