1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * Basic Transport Functions exploiting Infiniband API
7 * Copyright IBM Corp. 2016
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
12 #include <linux/socket.h>
13 #include <linux/if_vlan.h>
14 #include <linux/random.h>
15 #include <linux/workqueue.h>
18 #include <rdma/ib_verbs.h>
27 #include "smc_close.h"
29 #define SMC_LGR_NUM_INCR 256
30 #define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
31 #define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10)
33 static u32 smc_lgr_num; /* unique link group number */
35 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
37 /* client link group creation always follows the server link group
38 * creation. For client use a somewhat higher removal delay time,
39 * otherwise there is a risk of out-of-sync link groups.
41 mod_delayed_work(system_wq, &lgr->free_work,
42 lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT :
43 SMC_LGR_FREE_DELAY_SERV);
46 /* Register connection's alert token in our lookup structure.
47 * To use rbtrees we have to implement our own insert core.
48 * Requires @conns_lock
49 * @smc connection to register
50 * Returns 0 on success, != otherwise.
52 static void smc_lgr_add_alert_token(struct smc_connection *conn)
54 struct rb_node **link, *parent = NULL;
55 u32 token = conn->alert_token_local;
57 link = &conn->lgr->conns_all.rb_node;
59 struct smc_connection *cur = rb_entry(*link,
60 struct smc_connection, alert_node);
63 if (cur->alert_token_local > token)
64 link = &parent->rb_left;
66 link = &parent->rb_right;
68 /* Put the new node there */
69 rb_link_node(&conn->alert_node, parent, link);
70 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
73 /* Register connection in link group by assigning an alert token
74 * registered in a search tree.
75 * Requires @conns_lock
76 * Note that '0' is a reserved value and not assigned.
78 static void smc_lgr_register_conn(struct smc_connection *conn)
80 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
81 static atomic_t nexttoken = ATOMIC_INIT(0);
83 /* find a new alert_token_local value not yet used by some connection
86 sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
87 while (!conn->alert_token_local) {
88 conn->alert_token_local = atomic_inc_return(&nexttoken);
89 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
90 conn->alert_token_local = 0;
92 smc_lgr_add_alert_token(conn);
93 conn->lgr->conns_num++;
96 /* Unregister connection and reset the alert token of the given connection<
98 static void __smc_lgr_unregister_conn(struct smc_connection *conn)
100 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
101 struct smc_link_group *lgr = conn->lgr;
103 rb_erase(&conn->alert_node, &lgr->conns_all);
105 conn->alert_token_local = 0;
107 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
110 /* Unregister connection and trigger lgr freeing if applicable
112 static void smc_lgr_unregister_conn(struct smc_connection *conn)
114 struct smc_link_group *lgr = conn->lgr;
117 write_lock_bh(&lgr->conns_lock);
118 if (conn->alert_token_local) {
120 __smc_lgr_unregister_conn(conn);
122 write_unlock_bh(&lgr->conns_lock);
123 if (!reduced || lgr->conns_num)
125 smc_lgr_schedule_free_work(lgr);
128 static void smc_lgr_free_work(struct work_struct *work)
130 struct smc_link_group *lgr = container_of(to_delayed_work(work),
131 struct smc_link_group,
135 spin_lock_bh(&smc_lgr_list.lock);
136 if (list_empty(&lgr->list))
138 read_lock_bh(&lgr->conns_lock);
139 conns = RB_EMPTY_ROOT(&lgr->conns_all);
140 read_unlock_bh(&lgr->conns_lock);
141 if (!conns) { /* number of lgr connections is no longer zero */
142 spin_unlock_bh(&smc_lgr_list.lock);
145 list_del_init(&lgr->list); /* remove from smc_lgr_list */
147 spin_unlock_bh(&smc_lgr_list.lock);
148 if (!delayed_work_pending(&lgr->free_work))
152 /* create a new SMC link group */
153 static int smc_lgr_create(struct smc_sock *smc,
154 struct smc_ib_device *smcibdev, u8 ibport,
155 char *peer_systemid, unsigned short vlan_id)
157 struct smc_link_group *lgr;
158 struct smc_link *lnk;
163 lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
168 lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
169 lgr->sync_err = false;
170 memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN);
171 lgr->vlan_id = vlan_id;
172 rwlock_init(&lgr->sndbufs_lock);
173 rwlock_init(&lgr->rmbs_lock);
174 for (i = 0; i < SMC_RMBE_SIZES; i++) {
175 INIT_LIST_HEAD(&lgr->sndbufs[i]);
176 INIT_LIST_HEAD(&lgr->rmbs[i]);
178 smc_lgr_num += SMC_LGR_NUM_INCR;
179 memcpy(&lgr->id, (u8 *)&smc_lgr_num, SMC_LGR_ID_SIZE);
180 INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
181 lgr->conns_all = RB_ROOT;
183 lnk = &lgr->lnk[SMC_SINGLE_LINK];
184 /* initialize link */
185 lnk->state = SMC_LNK_ACTIVATING;
186 lnk->link_id = SMC_SINGLE_LINK;
187 lnk->smcibdev = smcibdev;
188 lnk->ibport = ibport;
189 lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
190 if (!smcibdev->initialized)
191 smc_ib_setup_per_ibdev(smcibdev);
192 get_random_bytes(rndvec, sizeof(rndvec));
193 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16);
194 rc = smc_wr_alloc_link_mem(lnk);
197 rc = smc_ib_create_protection_domain(lnk);
200 rc = smc_ib_create_queue_pair(lnk);
203 rc = smc_wr_create_link(lnk);
206 init_completion(&lnk->llc_confirm);
207 init_completion(&lnk->llc_confirm_resp);
208 init_completion(&lnk->llc_add);
209 init_completion(&lnk->llc_add_resp);
212 rwlock_init(&lgr->conns_lock);
213 spin_lock_bh(&smc_lgr_list.lock);
214 list_add(&lgr->list, &smc_lgr_list.list);
215 spin_unlock_bh(&smc_lgr_list.lock);
219 smc_ib_destroy_queue_pair(lnk);
221 smc_ib_dealloc_protection_domain(lnk);
223 smc_wr_free_link_mem(lnk);
230 static void smc_buf_unuse(struct smc_connection *conn)
232 if (conn->sndbuf_desc) {
233 conn->sndbuf_desc->used = 0;
234 conn->sndbuf_size = 0;
236 if (conn->rmb_desc) {
237 conn->rmb_desc->reused = true;
238 conn->rmb_desc->used = 0;
243 /* remove a finished connection from its link group */
244 void smc_conn_free(struct smc_connection *conn)
248 smc_cdc_tx_dismiss_slots(conn);
249 smc_lgr_unregister_conn(conn);
253 static void smc_link_clear(struct smc_link *lnk)
256 smc_ib_modify_qp_reset(lnk);
257 smc_wr_free_link(lnk);
258 smc_ib_destroy_queue_pair(lnk);
259 smc_ib_dealloc_protection_domain(lnk);
260 smc_wr_free_link_mem(lnk);
263 static void smc_buf_free(struct smc_buf_desc *buf_desc, struct smc_link *lnk,
267 if (buf_desc->mr_rx[SMC_SINGLE_LINK])
268 smc_ib_put_memory_region(
269 buf_desc->mr_rx[SMC_SINGLE_LINK]);
270 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
273 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
276 sg_free_table(&buf_desc->sgt[SMC_SINGLE_LINK]);
277 if (buf_desc->cpu_addr)
278 free_pages((unsigned long)buf_desc->cpu_addr, buf_desc->order);
282 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
284 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
285 struct smc_buf_desc *buf_desc, *bf_desc;
286 struct list_head *buf_list;
289 for (i = 0; i < SMC_RMBE_SIZES; i++) {
291 buf_list = &lgr->rmbs[i];
293 buf_list = &lgr->sndbufs[i];
294 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
296 list_del(&buf_desc->list);
297 smc_buf_free(buf_desc, lnk, is_rmb);
302 static void smc_lgr_free_bufs(struct smc_link_group *lgr)
304 /* free send buffers */
305 __smc_lgr_free_bufs(lgr, false);
307 __smc_lgr_free_bufs(lgr, true);
310 /* remove a link group */
311 void smc_lgr_free(struct smc_link_group *lgr)
313 smc_lgr_free_bufs(lgr);
314 smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
318 void smc_lgr_forget(struct smc_link_group *lgr)
320 spin_lock_bh(&smc_lgr_list.lock);
321 /* do not use this link group for new connections */
322 if (!list_empty(&lgr->list))
323 list_del_init(&lgr->list);
324 spin_unlock_bh(&smc_lgr_list.lock);
327 /* terminate linkgroup abnormally */
328 void smc_lgr_terminate(struct smc_link_group *lgr)
330 struct smc_connection *conn;
331 struct smc_sock *smc;
332 struct rb_node *node;
336 write_lock_bh(&lgr->conns_lock);
337 node = rb_first(&lgr->conns_all);
339 conn = rb_entry(node, struct smc_connection, alert_node);
340 smc = container_of(conn, struct smc_sock, conn);
341 sock_hold(&smc->sk); /* sock_put in close work */
342 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
343 __smc_lgr_unregister_conn(conn);
344 write_unlock_bh(&lgr->conns_lock);
345 if (!schedule_work(&conn->close_work))
347 write_lock_bh(&lgr->conns_lock);
348 node = rb_first(&lgr->conns_all);
350 write_unlock_bh(&lgr->conns_lock);
351 wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
352 smc_lgr_schedule_free_work(lgr);
355 /* Determine vlan of internal TCP socket.
356 * @vlan_id: address to store the determined vlan id into
358 static int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
360 struct dst_entry *dst = sk_dst_get(clcsock->sk);
373 if (is_vlan_dev(dst->dev))
374 *vlan_id = vlan_dev_vlan_id(dst->dev);
382 /* determine the link gid matching the vlan id of the link group */
383 static int smc_link_determine_gid(struct smc_link_group *lgr)
385 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
386 struct ib_gid_attr gattr;
391 lnk->gid = lnk->smcibdev->gid[lnk->ibport - 1];
395 for (i = 0; i < lnk->smcibdev->pattr[lnk->ibport - 1].gid_tbl_len;
397 if (ib_query_gid(lnk->smcibdev->ibdev, lnk->ibport, i, &gid,
401 if (is_vlan_dev(gattr.ndev) &&
402 vlan_dev_vlan_id(gattr.ndev) == lgr->vlan_id) {
413 /* create a new SMC connection (and a new link group if necessary) */
414 int smc_conn_create(struct smc_sock *smc,
415 struct smc_ib_device *smcibdev, u8 ibport,
416 struct smc_clc_msg_local *lcl, int srv_first_contact)
418 struct smc_connection *conn = &smc->conn;
419 struct smc_link_group *lgr;
420 unsigned short vlan_id;
421 enum smc_lgr_role role;
422 int local_contact = SMC_FIRST_CONTACT;
425 role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
426 rc = smc_vlan_by_tcpsk(smc->clcsock, &vlan_id);
430 if ((role == SMC_CLNT) && srv_first_contact)
431 /* create new link group as well */
434 /* determine if an existing link group can be reused */
435 spin_lock_bh(&smc_lgr_list.lock);
436 list_for_each_entry(lgr, &smc_lgr_list.list, list) {
437 write_lock_bh(&lgr->conns_lock);
438 if (!memcmp(lgr->peer_systemid, lcl->id_for_peer,
440 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
442 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
445 (lgr->role == role) &&
446 (lgr->vlan_id == vlan_id) &&
447 ((role == SMC_CLNT) ||
448 (lgr->conns_num < SMC_RMBS_PER_LGR_MAX))) {
449 /* link group found */
450 local_contact = SMC_REUSE_CONTACT;
452 smc_lgr_register_conn(conn); /* add smc conn to lgr */
453 write_unlock_bh(&lgr->conns_lock);
456 write_unlock_bh(&lgr->conns_lock);
458 spin_unlock_bh(&smc_lgr_list.lock);
460 if (role == SMC_CLNT && !srv_first_contact &&
461 (local_contact == SMC_FIRST_CONTACT)) {
462 /* Server reuses a link group, but Client wants to start
464 * send out_of_sync decline, reason synchr. error
470 if (local_contact == SMC_FIRST_CONTACT) {
471 rc = smc_lgr_create(smc, smcibdev, ibport,
472 lcl->id_for_peer, vlan_id);
475 smc_lgr_register_conn(conn); /* add smc conn to lgr */
476 rc = smc_link_determine_gid(conn->lgr);
478 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
479 conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
480 #ifndef KERNEL_HAS_ATOMIC64
481 spin_lock_init(&conn->acurs_lock);
485 return rc ? rc : local_contact;
488 /* try to reuse a sndbuf or rmb description slot for a certain
489 * buffer size; if not available, return NULL
492 struct smc_buf_desc *smc_buf_get_slot(struct smc_link_group *lgr,
493 int compressed_bufsize,
495 struct list_head *buf_list)
497 struct smc_buf_desc *buf_slot;
500 list_for_each_entry(buf_slot, buf_list, list) {
501 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
502 read_unlock_bh(lock);
506 read_unlock_bh(lock);
510 /* one of the conditions for announcing a receiver's current window size is
511 * that it "results in a minimum increase in the window size of 10% of the
512 * receive buffer space" [RFC7609]
514 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
516 return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
519 static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr,
520 bool is_rmb, int bufsize)
522 struct smc_buf_desc *buf_desc;
523 struct smc_link *lnk;
526 /* try to alloc a new buffer */
527 buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
529 return ERR_PTR(-ENOMEM);
532 (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN |
534 __GFP_NORETRY | __GFP_ZERO,
536 if (!buf_desc->cpu_addr) {
538 return ERR_PTR(-EAGAIN);
540 buf_desc->order = get_order(bufsize);
542 /* build the sg table from the pages */
543 lnk = &lgr->lnk[SMC_SINGLE_LINK];
544 rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1,
547 smc_buf_free(buf_desc, lnk, is_rmb);
550 sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl,
551 buf_desc->cpu_addr, bufsize);
553 /* map sg table to DMA address */
554 rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc,
555 is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
556 /* SMC protocol depends on mapping to one DMA address only */
558 smc_buf_free(buf_desc, lnk, is_rmb);
559 return ERR_PTR(-EAGAIN);
562 /* create a new memory region for the RMB */
564 rc = smc_ib_get_memory_region(lnk->roce_pd,
565 IB_ACCESS_REMOTE_WRITE |
566 IB_ACCESS_LOCAL_WRITE,
569 smc_buf_free(buf_desc, lnk, is_rmb);
577 static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
579 struct smc_connection *conn = &smc->conn;
580 struct smc_link_group *lgr = conn->lgr;
581 struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
582 struct list_head *buf_list;
583 int bufsize, bufsize_short;
588 /* use socket recv buffer size (w/o overhead) as start value */
589 sk_buf_size = smc->sk.sk_rcvbuf / 2;
591 /* use socket send buffer size (w/o overhead) as start value */
592 sk_buf_size = smc->sk.sk_sndbuf / 2;
594 for (bufsize_short = smc_compress_bufsize(sk_buf_size);
595 bufsize_short >= 0; bufsize_short--) {
598 lock = &lgr->rmbs_lock;
599 buf_list = &lgr->rmbs[bufsize_short];
601 lock = &lgr->sndbufs_lock;
602 buf_list = &lgr->sndbufs[bufsize_short];
604 bufsize = smc_uncompress_bufsize(bufsize_short);
605 if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
608 /* check for reusable slot in the link group */
609 buf_desc = smc_buf_get_slot(lgr, bufsize_short, lock, buf_list);
611 memset(buf_desc->cpu_addr, 0, bufsize);
612 break; /* found reusable slot */
615 buf_desc = smc_new_buf_create(lgr, is_rmb, bufsize);
616 if (PTR_ERR(buf_desc) == -ENOMEM)
618 if (IS_ERR(buf_desc))
623 list_add(&buf_desc->list, buf_list);
624 write_unlock_bh(lock);
628 if (IS_ERR(buf_desc))
632 conn->rmb_desc = buf_desc;
633 conn->rmbe_size = bufsize;
634 conn->rmbe_size_short = bufsize_short;
635 smc->sk.sk_rcvbuf = bufsize * 2;
636 atomic_set(&conn->bytes_to_rcv, 0);
637 conn->rmbe_update_limit = smc_rmb_wnd_update_limit(bufsize);
639 conn->sndbuf_desc = buf_desc;
640 conn->sndbuf_size = bufsize;
641 smc->sk.sk_sndbuf = bufsize * 2;
642 atomic_set(&conn->sndbuf_space, bufsize);
647 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
649 struct smc_link_group *lgr = conn->lgr;
651 smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
652 conn->sndbuf_desc, DMA_TO_DEVICE);
655 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
657 struct smc_link_group *lgr = conn->lgr;
659 smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
660 conn->sndbuf_desc, DMA_TO_DEVICE);
663 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
665 struct smc_link_group *lgr = conn->lgr;
667 smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
668 conn->rmb_desc, DMA_FROM_DEVICE);
671 void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
673 struct smc_link_group *lgr = conn->lgr;
675 smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
676 conn->rmb_desc, DMA_FROM_DEVICE);
679 /* create the send and receive buffer for an SMC socket;
680 * receive buffers are called RMBs;
681 * (even though the SMC protocol allows more than one RMB-element per RMB,
682 * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
683 * extra RMB for every connection in a link group
685 int smc_buf_create(struct smc_sock *smc)
689 /* create send buffer */
690 rc = __smc_buf_create(smc, false);
694 rc = __smc_buf_create(smc, true);
696 smc_buf_free(smc->conn.sndbuf_desc,
697 &smc->conn.lgr->lnk[SMC_SINGLE_LINK], false);
701 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
705 for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
706 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
712 /* add a new rtoken from peer */
713 int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey)
715 u64 dma_addr = be64_to_cpu(nw_vaddr);
716 u32 rkey = ntohl(nw_rkey);
719 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
720 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
721 (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
722 test_bit(i, lgr->rtokens_used_mask)) {
723 /* already in list */
727 i = smc_rmb_reserve_rtoken_idx(lgr);
730 lgr->rtokens[i][SMC_SINGLE_LINK].rkey = rkey;
731 lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = dma_addr;
735 /* delete an rtoken */
736 int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey)
738 u32 rkey = ntohl(nw_rkey);
741 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
742 if (lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey &&
743 test_bit(i, lgr->rtokens_used_mask)) {
744 lgr->rtokens[i][SMC_SINGLE_LINK].rkey = 0;
745 lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = 0;
747 clear_bit(i, lgr->rtokens_used_mask);
754 /* save rkey and dma_addr received from peer during clc handshake */
755 int smc_rmb_rtoken_handling(struct smc_connection *conn,
756 struct smc_clc_msg_accept_confirm *clc)
758 conn->rtoken_idx = smc_rtoken_add(conn->lgr, clc->rmb_dma_addr,
760 if (conn->rtoken_idx < 0)
761 return conn->rtoken_idx;