1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
6 * Establish SMC-R as an Infiniband Client to be notified about added and
7 * removed IB devices of type RDMA.
8 * Determine device and port characteristics for these IB devices.
10 * Copyright IBM Corp. 2016
12 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
15 #include <linux/random.h>
16 #include <linux/workqueue.h>
17 #include <linux/scatterlist.h>
18 #include <rdma/ib_verbs.h>
19 #include <rdma/ib_cache.h>
27 #define SMC_MAX_CQE 32766 /* max. # of completion queue elements */
29 #define SMC_QP_MIN_RNR_TIMER 5
30 #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */
31 #define SMC_QP_RETRY_CNT 7 /* 7: infinite */
32 #define SMC_QP_RNR_RETRY 7 /* 7: infinite */
34 struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */
35 .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock),
36 .list = LIST_HEAD_INIT(smc_ib_devices.list),
39 #define SMC_LOCAL_SYSTEMID_RESET "%%%%%%%"
41 u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system
45 static int smc_ib_modify_qp_init(struct smc_link *lnk)
47 struct ib_qp_attr qp_attr;
49 memset(&qp_attr, 0, sizeof(qp_attr));
50 qp_attr.qp_state = IB_QPS_INIT;
51 qp_attr.pkey_index = 0;
52 qp_attr.port_num = lnk->ibport;
53 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE
54 | IB_ACCESS_REMOTE_WRITE;
55 return ib_modify_qp(lnk->roce_qp, &qp_attr,
56 IB_QP_STATE | IB_QP_PKEY_INDEX |
57 IB_QP_ACCESS_FLAGS | IB_QP_PORT);
60 static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
62 enum ib_qp_attr_mask qp_attr_mask =
63 IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
64 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
65 struct ib_qp_attr qp_attr;
67 memset(&qp_attr, 0, sizeof(qp_attr));
68 qp_attr.qp_state = IB_QPS_RTR;
69 qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
70 qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
71 rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport);
72 rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, 1, 0);
73 rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid);
74 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
75 sizeof(lnk->peer_mac));
76 qp_attr.dest_qp_num = lnk->peer_qpn;
77 qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */
78 qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming
81 qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER;
83 return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask);
86 int smc_ib_modify_qp_rts(struct smc_link *lnk)
88 struct ib_qp_attr qp_attr;
90 memset(&qp_attr, 0, sizeof(qp_attr));
91 qp_attr.qp_state = IB_QPS_RTS;
92 qp_attr.timeout = SMC_QP_TIMEOUT; /* local ack timeout */
93 qp_attr.retry_cnt = SMC_QP_RETRY_CNT; /* retry count */
94 qp_attr.rnr_retry = SMC_QP_RNR_RETRY; /* RNR retries, 7=infinite */
95 qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */
96 qp_attr.max_rd_atomic = 1; /* # of outstanding RDMA reads and
99 return ib_modify_qp(lnk->roce_qp, &qp_attr,
100 IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
101 IB_QP_SQ_PSN | IB_QP_RNR_RETRY |
102 IB_QP_MAX_QP_RD_ATOMIC);
105 int smc_ib_modify_qp_reset(struct smc_link *lnk)
107 struct ib_qp_attr qp_attr;
109 memset(&qp_attr, 0, sizeof(qp_attr));
110 qp_attr.qp_state = IB_QPS_RESET;
111 return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
114 int smc_ib_ready_link(struct smc_link *lnk)
116 struct smc_link_group *lgr = smc_get_lgr(lnk);
119 rc = smc_ib_modify_qp_init(lnk);
123 rc = smc_ib_modify_qp_rtr(lnk);
126 smc_wr_remember_qp_attr(lnk);
127 rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
128 IB_CQ_SOLICITED_MASK);
131 rc = smc_wr_rx_post_init(lnk);
134 smc_wr_remember_qp_attr(lnk);
136 if (lgr->role == SMC_SERV) {
137 rc = smc_ib_modify_qp_rts(lnk);
140 smc_wr_remember_qp_attr(lnk);
146 static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport)
148 struct ib_gid_attr gattr;
152 rc = ib_query_gid(smcibdev->ibdev, ibport, 0, &gid, &gattr);
153 if (rc || !gattr.ndev)
156 memcpy(smcibdev->mac[ibport - 1], gattr.ndev->dev_addr, ETH_ALEN);
161 /* Create an identifier unique for this instance of SMC-R.
162 * The MAC-address of the first active registered IB device
163 * plus a random 2-byte number is used to create this identifier.
164 * This name is delivered to the peer during connection initialization.
166 static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
169 memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
170 sizeof(smcibdev->mac[ibport - 1]));
171 get_random_bytes(&local_systemid[0], 2);
174 bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
176 return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
179 /* determine the gid for an ib-device port and vlan id */
180 int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
181 unsigned short vlan_id, u8 gid[], u8 *sgid_index)
183 struct ib_gid_attr gattr;
187 for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) {
188 memset(&_gid, 0, SMC_GID_SIZE);
189 memset(&gattr, 0, sizeof(gattr));
190 if (ib_query_gid(smcibdev->ibdev, ibport, i, &_gid, &gattr))
194 if (((!vlan_id && !is_vlan_dev(gattr.ndev)) ||
195 (vlan_id && is_vlan_dev(gattr.ndev) &&
196 vlan_dev_vlan_id(gattr.ndev) == vlan_id)) &&
197 gattr.gid_type == IB_GID_TYPE_IB) {
199 memcpy(gid, &_gid, SMC_GID_SIZE);
210 static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
214 memset(&smcibdev->pattr[ibport - 1], 0,
215 sizeof(smcibdev->pattr[ibport - 1]));
216 rc = ib_query_port(smcibdev->ibdev, ibport,
217 &smcibdev->pattr[ibport - 1]);
220 /* the SMC protocol requires specification of the RoCE MAC address */
221 rc = smc_ib_fill_mac(smcibdev, ibport);
224 if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET,
225 sizeof(local_systemid)) &&
226 smc_ib_port_active(smcibdev, ibport))
227 /* create unique system identifier */
228 smc_ib_define_local_systemid(smcibdev, ibport);
233 /* process context wrapper for might_sleep smc_ib_remember_port_attr */
234 static void smc_ib_port_event_work(struct work_struct *work)
236 struct smc_ib_device *smcibdev = container_of(
237 work, struct smc_ib_device, port_event_work);
240 for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
241 smc_ib_remember_port_attr(smcibdev, port_idx + 1);
242 clear_bit(port_idx, &smcibdev->port_event_mask);
243 if (!smc_ib_port_active(smcibdev, port_idx + 1))
244 smc_port_terminate(smcibdev, port_idx + 1);
248 /* can be called in IRQ context */
249 static void smc_ib_global_event_handler(struct ib_event_handler *handler,
250 struct ib_event *ibevent)
252 struct smc_ib_device *smcibdev;
255 smcibdev = container_of(handler, struct smc_ib_device, event_handler);
257 switch (ibevent->event) {
258 case IB_EVENT_PORT_ERR:
259 case IB_EVENT_DEVICE_FATAL:
260 case IB_EVENT_PORT_ACTIVE:
261 port_idx = ibevent->element.port_num - 1;
262 set_bit(port_idx, &smcibdev->port_event_mask);
263 schedule_work(&smcibdev->port_event_work);
270 void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
273 ib_dealloc_pd(lnk->roce_pd);
277 int smc_ib_create_protection_domain(struct smc_link *lnk)
281 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
282 rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
283 if (IS_ERR(lnk->roce_pd))
288 static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
290 struct smc_ib_device *smcibdev =
291 (struct smc_ib_device *)ibevent->device;
294 switch (ibevent->event) {
295 case IB_EVENT_DEVICE_FATAL:
296 case IB_EVENT_GID_CHANGE:
297 case IB_EVENT_PORT_ERR:
298 case IB_EVENT_QP_ACCESS_ERR:
299 port_idx = ibevent->element.port_num - 1;
300 set_bit(port_idx, &smcibdev->port_event_mask);
301 schedule_work(&smcibdev->port_event_work);
308 void smc_ib_destroy_queue_pair(struct smc_link *lnk)
311 ib_destroy_qp(lnk->roce_qp);
315 /* create a queue pair within the protection domain for a link */
316 int smc_ib_create_queue_pair(struct smc_link *lnk)
318 struct ib_qp_init_attr qp_attr = {
319 .event_handler = smc_ib_qp_event_handler,
321 .send_cq = lnk->smcibdev->roce_cq_send,
322 .recv_cq = lnk->smcibdev->roce_cq_recv,
325 /* include unsolicited rdma_writes as well,
326 * there are max. 2 RDMA_WRITE per 1 WR_SEND
328 .max_send_wr = SMC_WR_BUF_CNT * 3,
329 .max_recv_wr = SMC_WR_BUF_CNT * 3,
330 .max_send_sge = SMC_IB_MAX_SEND_SGE,
333 .sq_sig_type = IB_SIGNAL_REQ_WR,
334 .qp_type = IB_QPT_RC,
338 lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr);
339 rc = PTR_ERR_OR_ZERO(lnk->roce_qp);
340 if (IS_ERR(lnk->roce_qp))
343 smc_wr_remember_qp_attr(lnk);
347 void smc_ib_put_memory_region(struct ib_mr *mr)
352 static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot)
354 unsigned int offset = 0;
357 /* map the largest prefix of a dma mapped SG list */
358 sg_num = ib_map_mr_sg(buf_slot->mr_rx[SMC_SINGLE_LINK],
359 buf_slot->sgt[SMC_SINGLE_LINK].sgl,
360 buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
366 /* Allocate a memory region and map the dma mapped SG list of buf_slot */
367 int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
368 struct smc_buf_desc *buf_slot)
370 if (buf_slot->mr_rx[SMC_SINGLE_LINK])
371 return 0; /* already done */
373 buf_slot->mr_rx[SMC_SINGLE_LINK] =
374 ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order);
375 if (IS_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK])) {
378 rc = PTR_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK]);
379 buf_slot->mr_rx[SMC_SINGLE_LINK] = NULL;
383 if (smc_ib_map_mr_sg(buf_slot) != 1)
389 /* synchronize buffer usage for cpu access */
390 void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev,
391 struct smc_buf_desc *buf_slot,
392 enum dma_data_direction data_direction)
394 struct scatterlist *sg;
397 /* for now there is just one DMA address */
398 for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg,
399 buf_slot->sgt[SMC_SINGLE_LINK].nents, i) {
402 ib_dma_sync_single_for_cpu(smcibdev->ibdev,
409 /* synchronize buffer usage for device access */
410 void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev,
411 struct smc_buf_desc *buf_slot,
412 enum dma_data_direction data_direction)
414 struct scatterlist *sg;
417 /* for now there is just one DMA address */
418 for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg,
419 buf_slot->sgt[SMC_SINGLE_LINK].nents, i) {
422 ib_dma_sync_single_for_device(smcibdev->ibdev,
429 /* Map a new TX or RX buffer SG-table to DMA */
430 int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev,
431 struct smc_buf_desc *buf_slot,
432 enum dma_data_direction data_direction)
436 mapped_nents = ib_dma_map_sg(smcibdev->ibdev,
437 buf_slot->sgt[SMC_SINGLE_LINK].sgl,
438 buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
446 void smc_ib_buf_unmap_sg(struct smc_ib_device *smcibdev,
447 struct smc_buf_desc *buf_slot,
448 enum dma_data_direction data_direction)
450 if (!buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address)
451 return; /* already unmapped */
453 ib_dma_unmap_sg(smcibdev->ibdev,
454 buf_slot->sgt[SMC_SINGLE_LINK].sgl,
455 buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
457 buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address = 0;
460 long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
462 struct ib_cq_init_attr cqattr = {
463 .cqe = SMC_MAX_CQE, .comp_vector = 0 };
464 int cqe_size_order, smc_order;
467 /* the calculated number of cq entries fits to mlx5 cq allocation */
468 cqe_size_order = cache_line_size() == 128 ? 7 : 6;
469 smc_order = MAX_ORDER - cqe_size_order - 1;
470 if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
471 cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
472 smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
473 smc_wr_tx_cq_handler, NULL,
475 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send);
476 if (IS_ERR(smcibdev->roce_cq_send)) {
477 smcibdev->roce_cq_send = NULL;
480 smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev,
481 smc_wr_rx_cq_handler, NULL,
483 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv);
484 if (IS_ERR(smcibdev->roce_cq_recv)) {
485 smcibdev->roce_cq_recv = NULL;
488 smc_wr_add_dev(smcibdev);
489 smcibdev->initialized = 1;
493 ib_destroy_cq(smcibdev->roce_cq_send);
497 static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
499 if (!smcibdev->initialized)
501 smcibdev->initialized = 0;
502 smc_wr_remove_dev(smcibdev);
503 ib_destroy_cq(smcibdev->roce_cq_recv);
504 ib_destroy_cq(smcibdev->roce_cq_send);
507 static struct ib_client smc_ib_client;
509 /* callback function for ib_register_client() */
510 static void smc_ib_add_dev(struct ib_device *ibdev)
512 struct smc_ib_device *smcibdev;
516 if (ibdev->node_type != RDMA_NODE_IB_CA)
519 smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL);
523 smcibdev->ibdev = ibdev;
524 INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
526 spin_lock(&smc_ib_devices.lock);
527 list_add_tail(&smcibdev->list, &smc_ib_devices.list);
528 spin_unlock(&smc_ib_devices.lock);
529 ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
530 INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
531 smc_ib_global_event_handler);
532 ib_register_event_handler(&smcibdev->event_handler);
534 /* trigger reading of the port attributes */
535 port_cnt = smcibdev->ibdev->phys_port_cnt;
537 i < min_t(size_t, port_cnt, SMC_MAX_PORTS);
539 set_bit(i, &smcibdev->port_event_mask);
540 /* determine pnetids of the port */
541 smc_pnetid_by_dev_port(ibdev->dev.parent, i,
542 smcibdev->pnetid[i]);
544 schedule_work(&smcibdev->port_event_work);
547 /* callback function for ib_register_client() */
548 static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
550 struct smc_ib_device *smcibdev;
552 smcibdev = ib_get_client_data(ibdev, &smc_ib_client);
553 ib_set_client_data(ibdev, &smc_ib_client, NULL);
554 spin_lock(&smc_ib_devices.lock);
555 list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
556 spin_unlock(&smc_ib_devices.lock);
557 smc_pnet_remove_by_ibdev(smcibdev);
558 smc_ib_cleanup_per_ibdev(smcibdev);
559 ib_unregister_event_handler(&smcibdev->event_handler);
563 static struct ib_client smc_ib_client = {
565 .add = smc_ib_add_dev,
566 .remove = smc_ib_remove_dev,
569 int __init smc_ib_register_client(void)
571 return ib_register_client(&smc_ib_client);
574 void smc_ib_unregister_client(void)
576 ib_unregister_client(&smc_ib_client);