i2c: core: ACPI: Log device not acking errors at dbg loglevel
[linux-2.6-block.git] / net / smc / smc_core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Shared Memory Communications over RDMA (SMC-R) and RoCE
4  *
5  *  Basic Transport Functions exploiting Infiniband API
6  *
7  *  Copyright IBM Corp. 2016
8  *
9  *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
10  */
11
12 #include <linux/socket.h>
13 #include <linux/if_vlan.h>
14 #include <linux/random.h>
15 #include <linux/workqueue.h>
16 #include <net/tcp.h>
17 #include <net/sock.h>
18 #include <rdma/ib_verbs.h>
19
20 #include "smc.h"
21 #include "smc_clc.h"
22 #include "smc_core.h"
23 #include "smc_ib.h"
24 #include "smc_wr.h"
25 #include "smc_llc.h"
26 #include "smc_cdc.h"
27 #include "smc_close.h"
28
29 #define SMC_LGR_NUM_INCR                256
30 #define SMC_LGR_FREE_DELAY_SERV         (600 * HZ)
31 #define SMC_LGR_FREE_DELAY_CLNT         (SMC_LGR_FREE_DELAY_SERV + 10)
32
33 static u32 smc_lgr_num;                 /* unique link group number */
34
35 static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
36 {
37         /* client link group creation always follows the server link group
38          * creation. For client use a somewhat higher removal delay time,
39          * otherwise there is a risk of out-of-sync link groups.
40          */
41         mod_delayed_work(system_wq, &lgr->free_work,
42                          lgr->role == SMC_CLNT ? SMC_LGR_FREE_DELAY_CLNT :
43                                                  SMC_LGR_FREE_DELAY_SERV);
44 }
45
46 /* Register connection's alert token in our lookup structure.
47  * To use rbtrees we have to implement our own insert core.
48  * Requires @conns_lock
49  * @smc         connection to register
50  * Returns 0 on success, != otherwise.
51  */
52 static void smc_lgr_add_alert_token(struct smc_connection *conn)
53 {
54         struct rb_node **link, *parent = NULL;
55         u32 token = conn->alert_token_local;
56
57         link = &conn->lgr->conns_all.rb_node;
58         while (*link) {
59                 struct smc_connection *cur = rb_entry(*link,
60                                         struct smc_connection, alert_node);
61
62                 parent = *link;
63                 if (cur->alert_token_local > token)
64                         link = &parent->rb_left;
65                 else
66                         link = &parent->rb_right;
67         }
68         /* Put the new node there */
69         rb_link_node(&conn->alert_node, parent, link);
70         rb_insert_color(&conn->alert_node, &conn->lgr->conns_all);
71 }
72
73 /* Register connection in link group by assigning an alert token
74  * registered in a search tree.
75  * Requires @conns_lock
76  * Note that '0' is a reserved value and not assigned.
77  */
78 static void smc_lgr_register_conn(struct smc_connection *conn)
79 {
80         struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
81         static atomic_t nexttoken = ATOMIC_INIT(0);
82
83         /* find a new alert_token_local value not yet used by some connection
84          * in this link group
85          */
86         sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */
87         while (!conn->alert_token_local) {
88                 conn->alert_token_local = atomic_inc_return(&nexttoken);
89                 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr))
90                         conn->alert_token_local = 0;
91         }
92         smc_lgr_add_alert_token(conn);
93         conn->lgr->conns_num++;
94 }
95
96 /* Unregister connection and reset the alert token of the given connection<
97  */
98 static void __smc_lgr_unregister_conn(struct smc_connection *conn)
99 {
100         struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
101         struct smc_link_group *lgr = conn->lgr;
102
103         rb_erase(&conn->alert_node, &lgr->conns_all);
104         lgr->conns_num--;
105         conn->alert_token_local = 0;
106         conn->lgr = NULL;
107         sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */
108 }
109
110 /* Unregister connection and trigger lgr freeing if applicable
111  */
112 static void smc_lgr_unregister_conn(struct smc_connection *conn)
113 {
114         struct smc_link_group *lgr = conn->lgr;
115         int reduced = 0;
116
117         write_lock_bh(&lgr->conns_lock);
118         if (conn->alert_token_local) {
119                 reduced = 1;
120                 __smc_lgr_unregister_conn(conn);
121         }
122         write_unlock_bh(&lgr->conns_lock);
123         if (!reduced || lgr->conns_num)
124                 return;
125         smc_lgr_schedule_free_work(lgr);
126 }
127
128 static void smc_lgr_free_work(struct work_struct *work)
129 {
130         struct smc_link_group *lgr = container_of(to_delayed_work(work),
131                                                   struct smc_link_group,
132                                                   free_work);
133         bool conns;
134
135         spin_lock_bh(&smc_lgr_list.lock);
136         if (list_empty(&lgr->list))
137                 goto free;
138         read_lock_bh(&lgr->conns_lock);
139         conns = RB_EMPTY_ROOT(&lgr->conns_all);
140         read_unlock_bh(&lgr->conns_lock);
141         if (!conns) { /* number of lgr connections is no longer zero */
142                 spin_unlock_bh(&smc_lgr_list.lock);
143                 return;
144         }
145         list_del_init(&lgr->list); /* remove from smc_lgr_list */
146 free:
147         spin_unlock_bh(&smc_lgr_list.lock);
148         if (!delayed_work_pending(&lgr->free_work))
149                 smc_lgr_free(lgr);
150 }
151
152 /* create a new SMC link group */
153 static int smc_lgr_create(struct smc_sock *smc,
154                           struct smc_ib_device *smcibdev, u8 ibport,
155                           char *peer_systemid, unsigned short vlan_id)
156 {
157         struct smc_link_group *lgr;
158         struct smc_link *lnk;
159         u8 rndvec[3];
160         int rc = 0;
161         int i;
162
163         lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
164         if (!lgr) {
165                 rc = -ENOMEM;
166                 goto out;
167         }
168         lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
169         lgr->sync_err = false;
170         memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN);
171         lgr->vlan_id = vlan_id;
172         rwlock_init(&lgr->sndbufs_lock);
173         rwlock_init(&lgr->rmbs_lock);
174         for (i = 0; i < SMC_RMBE_SIZES; i++) {
175                 INIT_LIST_HEAD(&lgr->sndbufs[i]);
176                 INIT_LIST_HEAD(&lgr->rmbs[i]);
177         }
178         smc_lgr_num += SMC_LGR_NUM_INCR;
179         memcpy(&lgr->id, (u8 *)&smc_lgr_num, SMC_LGR_ID_SIZE);
180         INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work);
181         lgr->conns_all = RB_ROOT;
182
183         lnk = &lgr->lnk[SMC_SINGLE_LINK];
184         /* initialize link */
185         lnk->state = SMC_LNK_ACTIVATING;
186         lnk->link_id = SMC_SINGLE_LINK;
187         lnk->smcibdev = smcibdev;
188         lnk->ibport = ibport;
189         lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
190         if (!smcibdev->initialized)
191                 smc_ib_setup_per_ibdev(smcibdev);
192         get_random_bytes(rndvec, sizeof(rndvec));
193         lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16);
194         rc = smc_wr_alloc_link_mem(lnk);
195         if (rc)
196                 goto free_lgr;
197         rc = smc_ib_create_protection_domain(lnk);
198         if (rc)
199                 goto free_link_mem;
200         rc = smc_ib_create_queue_pair(lnk);
201         if (rc)
202                 goto dealloc_pd;
203         rc = smc_wr_create_link(lnk);
204         if (rc)
205                 goto destroy_qp;
206         init_completion(&lnk->llc_confirm);
207         init_completion(&lnk->llc_confirm_resp);
208         init_completion(&lnk->llc_add);
209         init_completion(&lnk->llc_add_resp);
210
211         smc->conn.lgr = lgr;
212         rwlock_init(&lgr->conns_lock);
213         spin_lock_bh(&smc_lgr_list.lock);
214         list_add(&lgr->list, &smc_lgr_list.list);
215         spin_unlock_bh(&smc_lgr_list.lock);
216         return 0;
217
218 destroy_qp:
219         smc_ib_destroy_queue_pair(lnk);
220 dealloc_pd:
221         smc_ib_dealloc_protection_domain(lnk);
222 free_link_mem:
223         smc_wr_free_link_mem(lnk);
224 free_lgr:
225         kfree(lgr);
226 out:
227         return rc;
228 }
229
230 static void smc_buf_unuse(struct smc_connection *conn)
231 {
232         if (conn->sndbuf_desc) {
233                 conn->sndbuf_desc->used = 0;
234                 conn->sndbuf_size = 0;
235         }
236         if (conn->rmb_desc) {
237                 conn->rmb_desc->reused = true;
238                 conn->rmb_desc->used = 0;
239                 conn->rmbe_size = 0;
240         }
241 }
242
243 /* remove a finished connection from its link group */
244 void smc_conn_free(struct smc_connection *conn)
245 {
246         if (!conn->lgr)
247                 return;
248         smc_cdc_tx_dismiss_slots(conn);
249         smc_lgr_unregister_conn(conn);
250         smc_buf_unuse(conn);
251 }
252
253 static void smc_link_clear(struct smc_link *lnk)
254 {
255         lnk->peer_qpn = 0;
256         smc_ib_modify_qp_reset(lnk);
257         smc_wr_free_link(lnk);
258         smc_ib_destroy_queue_pair(lnk);
259         smc_ib_dealloc_protection_domain(lnk);
260         smc_wr_free_link_mem(lnk);
261 }
262
263 static void smc_buf_free(struct smc_buf_desc *buf_desc, struct smc_link *lnk,
264                          bool is_rmb)
265 {
266         if (is_rmb) {
267                 if (buf_desc->mr_rx[SMC_SINGLE_LINK])
268                         smc_ib_put_memory_region(
269                                         buf_desc->mr_rx[SMC_SINGLE_LINK]);
270                 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
271                                     DMA_FROM_DEVICE);
272         } else {
273                 smc_ib_buf_unmap_sg(lnk->smcibdev, buf_desc,
274                                     DMA_TO_DEVICE);
275         }
276         sg_free_table(&buf_desc->sgt[SMC_SINGLE_LINK]);
277         if (buf_desc->cpu_addr)
278                 free_pages((unsigned long)buf_desc->cpu_addr, buf_desc->order);
279         kfree(buf_desc);
280 }
281
282 static void __smc_lgr_free_bufs(struct smc_link_group *lgr, bool is_rmb)
283 {
284         struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
285         struct smc_buf_desc *buf_desc, *bf_desc;
286         struct list_head *buf_list;
287         int i;
288
289         for (i = 0; i < SMC_RMBE_SIZES; i++) {
290                 if (is_rmb)
291                         buf_list = &lgr->rmbs[i];
292                 else
293                         buf_list = &lgr->sndbufs[i];
294                 list_for_each_entry_safe(buf_desc, bf_desc, buf_list,
295                                          list) {
296                         list_del(&buf_desc->list);
297                         smc_buf_free(buf_desc, lnk, is_rmb);
298                 }
299         }
300 }
301
302 static void smc_lgr_free_bufs(struct smc_link_group *lgr)
303 {
304         /* free send buffers */
305         __smc_lgr_free_bufs(lgr, false);
306         /* free rmbs */
307         __smc_lgr_free_bufs(lgr, true);
308 }
309
310 /* remove a link group */
311 void smc_lgr_free(struct smc_link_group *lgr)
312 {
313         smc_lgr_free_bufs(lgr);
314         smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
315         kfree(lgr);
316 }
317
318 void smc_lgr_forget(struct smc_link_group *lgr)
319 {
320         spin_lock_bh(&smc_lgr_list.lock);
321         /* do not use this link group for new connections */
322         if (!list_empty(&lgr->list))
323                 list_del_init(&lgr->list);
324         spin_unlock_bh(&smc_lgr_list.lock);
325 }
326
327 /* terminate linkgroup abnormally */
328 void smc_lgr_terminate(struct smc_link_group *lgr)
329 {
330         struct smc_connection *conn;
331         struct smc_sock *smc;
332         struct rb_node *node;
333
334         smc_lgr_forget(lgr);
335
336         write_lock_bh(&lgr->conns_lock);
337         node = rb_first(&lgr->conns_all);
338         while (node) {
339                 conn = rb_entry(node, struct smc_connection, alert_node);
340                 smc = container_of(conn, struct smc_sock, conn);
341                 sock_hold(&smc->sk); /* sock_put in close work */
342                 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
343                 __smc_lgr_unregister_conn(conn);
344                 write_unlock_bh(&lgr->conns_lock);
345                 if (!schedule_work(&conn->close_work))
346                         sock_put(&smc->sk);
347                 write_lock_bh(&lgr->conns_lock);
348                 node = rb_first(&lgr->conns_all);
349         }
350         write_unlock_bh(&lgr->conns_lock);
351         wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
352         smc_lgr_schedule_free_work(lgr);
353 }
354
355 /* Determine vlan of internal TCP socket.
356  * @vlan_id: address to store the determined vlan id into
357  */
358 static int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id)
359 {
360         struct dst_entry *dst = sk_dst_get(clcsock->sk);
361         int rc = 0;
362
363         *vlan_id = 0;
364         if (!dst) {
365                 rc = -ENOTCONN;
366                 goto out;
367         }
368         if (!dst->dev) {
369                 rc = -ENODEV;
370                 goto out_rel;
371         }
372
373         if (is_vlan_dev(dst->dev))
374                 *vlan_id = vlan_dev_vlan_id(dst->dev);
375
376 out_rel:
377         dst_release(dst);
378 out:
379         return rc;
380 }
381
382 /* determine the link gid matching the vlan id of the link group */
383 static int smc_link_determine_gid(struct smc_link_group *lgr)
384 {
385         struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
386         struct ib_gid_attr gattr;
387         union ib_gid gid;
388         int i;
389
390         if (!lgr->vlan_id) {
391                 lnk->gid = lnk->smcibdev->gid[lnk->ibport - 1];
392                 return 0;
393         }
394
395         for (i = 0; i < lnk->smcibdev->pattr[lnk->ibport - 1].gid_tbl_len;
396              i++) {
397                 if (ib_query_gid(lnk->smcibdev->ibdev, lnk->ibport, i, &gid,
398                                  &gattr))
399                         continue;
400                 if (gattr.ndev) {
401                         if (is_vlan_dev(gattr.ndev) &&
402                             vlan_dev_vlan_id(gattr.ndev) == lgr->vlan_id) {
403                                 lnk->gid = gid;
404                                 dev_put(gattr.ndev);
405                                 return 0;
406                         }
407                         dev_put(gattr.ndev);
408                 }
409         }
410         return -ENODEV;
411 }
412
413 /* create a new SMC connection (and a new link group if necessary) */
414 int smc_conn_create(struct smc_sock *smc,
415                     struct smc_ib_device *smcibdev, u8 ibport,
416                     struct smc_clc_msg_local *lcl, int srv_first_contact)
417 {
418         struct smc_connection *conn = &smc->conn;
419         struct smc_link_group *lgr;
420         unsigned short vlan_id;
421         enum smc_lgr_role role;
422         int local_contact = SMC_FIRST_CONTACT;
423         int rc = 0;
424
425         role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
426         rc = smc_vlan_by_tcpsk(smc->clcsock, &vlan_id);
427         if (rc)
428                 return rc;
429
430         if ((role == SMC_CLNT) && srv_first_contact)
431                 /* create new link group as well */
432                 goto create;
433
434         /* determine if an existing link group can be reused */
435         spin_lock_bh(&smc_lgr_list.lock);
436         list_for_each_entry(lgr, &smc_lgr_list.list, list) {
437                 write_lock_bh(&lgr->conns_lock);
438                 if (!memcmp(lgr->peer_systemid, lcl->id_for_peer,
439                             SMC_SYSTEMID_LEN) &&
440                     !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid,
441                             SMC_GID_SIZE) &&
442                     !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
443                             sizeof(lcl->mac)) &&
444                     !lgr->sync_err &&
445                     (lgr->role == role) &&
446                     (lgr->vlan_id == vlan_id) &&
447                     ((role == SMC_CLNT) ||
448                      (lgr->conns_num < SMC_RMBS_PER_LGR_MAX))) {
449                         /* link group found */
450                         local_contact = SMC_REUSE_CONTACT;
451                         conn->lgr = lgr;
452                         smc_lgr_register_conn(conn); /* add smc conn to lgr */
453                         write_unlock_bh(&lgr->conns_lock);
454                         break;
455                 }
456                 write_unlock_bh(&lgr->conns_lock);
457         }
458         spin_unlock_bh(&smc_lgr_list.lock);
459
460         if (role == SMC_CLNT && !srv_first_contact &&
461             (local_contact == SMC_FIRST_CONTACT)) {
462                 /* Server reuses a link group, but Client wants to start
463                  * a new one
464                  * send out_of_sync decline, reason synchr. error
465                  */
466                 return -ENOLINK;
467         }
468
469 create:
470         if (local_contact == SMC_FIRST_CONTACT) {
471                 rc = smc_lgr_create(smc, smcibdev, ibport,
472                                     lcl->id_for_peer, vlan_id);
473                 if (rc)
474                         goto out;
475                 smc_lgr_register_conn(conn); /* add smc conn to lgr */
476                 rc = smc_link_determine_gid(conn->lgr);
477         }
478         conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE;
479         conn->local_tx_ctrl.len = SMC_WR_TX_SIZE;
480 #ifndef KERNEL_HAS_ATOMIC64
481         spin_lock_init(&conn->acurs_lock);
482 #endif
483
484 out:
485         return rc ? rc : local_contact;
486 }
487
488 /* try to reuse a sndbuf or rmb description slot for a certain
489  * buffer size; if not available, return NULL
490  */
491 static inline
492 struct smc_buf_desc *smc_buf_get_slot(struct smc_link_group *lgr,
493                                       int compressed_bufsize,
494                                       rwlock_t *lock,
495                                       struct list_head *buf_list)
496 {
497         struct smc_buf_desc *buf_slot;
498
499         read_lock_bh(lock);
500         list_for_each_entry(buf_slot, buf_list, list) {
501                 if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
502                         read_unlock_bh(lock);
503                         return buf_slot;
504                 }
505         }
506         read_unlock_bh(lock);
507         return NULL;
508 }
509
510 /* one of the conditions for announcing a receiver's current window size is
511  * that it "results in a minimum increase in the window size of 10% of the
512  * receive buffer space" [RFC7609]
513  */
514 static inline int smc_rmb_wnd_update_limit(int rmbe_size)
515 {
516         return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
517 }
518
519 static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr,
520                                                bool is_rmb, int bufsize)
521 {
522         struct smc_buf_desc *buf_desc;
523         struct smc_link *lnk;
524         int rc;
525
526         /* try to alloc a new buffer */
527         buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
528         if (!buf_desc)
529                 return ERR_PTR(-ENOMEM);
530
531         buf_desc->cpu_addr =
532                 (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN |
533                                          __GFP_NOMEMALLOC |
534                                          __GFP_NORETRY | __GFP_ZERO,
535                                          get_order(bufsize));
536         if (!buf_desc->cpu_addr) {
537                 kfree(buf_desc);
538                 return ERR_PTR(-EAGAIN);
539         }
540         buf_desc->order = get_order(bufsize);
541
542         /* build the sg table from the pages */
543         lnk = &lgr->lnk[SMC_SINGLE_LINK];
544         rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1,
545                             GFP_KERNEL);
546         if (rc) {
547                 smc_buf_free(buf_desc, lnk, is_rmb);
548                 return ERR_PTR(rc);
549         }
550         sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl,
551                    buf_desc->cpu_addr, bufsize);
552
553         /* map sg table to DMA address */
554         rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc,
555                                is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
556         /* SMC protocol depends on mapping to one DMA address only */
557         if (rc != 1)  {
558                 smc_buf_free(buf_desc, lnk, is_rmb);
559                 return ERR_PTR(-EAGAIN);
560         }
561
562         /* create a new memory region for the RMB */
563         if (is_rmb) {
564                 rc = smc_ib_get_memory_region(lnk->roce_pd,
565                                               IB_ACCESS_REMOTE_WRITE |
566                                               IB_ACCESS_LOCAL_WRITE,
567                                               buf_desc);
568                 if (rc) {
569                         smc_buf_free(buf_desc, lnk, is_rmb);
570                         return ERR_PTR(rc);
571                 }
572         }
573
574         return buf_desc;
575 }
576
577 static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
578 {
579         struct smc_connection *conn = &smc->conn;
580         struct smc_link_group *lgr = conn->lgr;
581         struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
582         struct list_head *buf_list;
583         int bufsize, bufsize_short;
584         int sk_buf_size;
585         rwlock_t *lock;
586
587         if (is_rmb)
588                 /* use socket recv buffer size (w/o overhead) as start value */
589                 sk_buf_size = smc->sk.sk_rcvbuf / 2;
590         else
591                 /* use socket send buffer size (w/o overhead) as start value */
592                 sk_buf_size = smc->sk.sk_sndbuf / 2;
593
594         for (bufsize_short = smc_compress_bufsize(sk_buf_size);
595              bufsize_short >= 0; bufsize_short--) {
596
597                 if (is_rmb) {
598                         lock = &lgr->rmbs_lock;
599                         buf_list = &lgr->rmbs[bufsize_short];
600                 } else {
601                         lock = &lgr->sndbufs_lock;
602                         buf_list = &lgr->sndbufs[bufsize_short];
603                 }
604                 bufsize = smc_uncompress_bufsize(bufsize_short);
605                 if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
606                         continue;
607
608                 /* check for reusable slot in the link group */
609                 buf_desc = smc_buf_get_slot(lgr, bufsize_short, lock, buf_list);
610                 if (buf_desc) {
611                         memset(buf_desc->cpu_addr, 0, bufsize);
612                         break; /* found reusable slot */
613                 }
614
615                 buf_desc = smc_new_buf_create(lgr, is_rmb, bufsize);
616                 if (PTR_ERR(buf_desc) == -ENOMEM)
617                         break;
618                 if (IS_ERR(buf_desc))
619                         continue;
620
621                 buf_desc->used = 1;
622                 write_lock_bh(lock);
623                 list_add(&buf_desc->list, buf_list);
624                 write_unlock_bh(lock);
625                 break; /* found */
626         }
627
628         if (IS_ERR(buf_desc))
629                 return -ENOMEM;
630
631         if (is_rmb) {
632                 conn->rmb_desc = buf_desc;
633                 conn->rmbe_size = bufsize;
634                 conn->rmbe_size_short = bufsize_short;
635                 smc->sk.sk_rcvbuf = bufsize * 2;
636                 atomic_set(&conn->bytes_to_rcv, 0);
637                 conn->rmbe_update_limit = smc_rmb_wnd_update_limit(bufsize);
638         } else {
639                 conn->sndbuf_desc = buf_desc;
640                 conn->sndbuf_size = bufsize;
641                 smc->sk.sk_sndbuf = bufsize * 2;
642                 atomic_set(&conn->sndbuf_space, bufsize);
643         }
644         return 0;
645 }
646
647 void smc_sndbuf_sync_sg_for_cpu(struct smc_connection *conn)
648 {
649         struct smc_link_group *lgr = conn->lgr;
650
651         smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
652                                conn->sndbuf_desc, DMA_TO_DEVICE);
653 }
654
655 void smc_sndbuf_sync_sg_for_device(struct smc_connection *conn)
656 {
657         struct smc_link_group *lgr = conn->lgr;
658
659         smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
660                                   conn->sndbuf_desc, DMA_TO_DEVICE);
661 }
662
663 void smc_rmb_sync_sg_for_cpu(struct smc_connection *conn)
664 {
665         struct smc_link_group *lgr = conn->lgr;
666
667         smc_ib_sync_sg_for_cpu(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
668                                conn->rmb_desc, DMA_FROM_DEVICE);
669 }
670
671 void smc_rmb_sync_sg_for_device(struct smc_connection *conn)
672 {
673         struct smc_link_group *lgr = conn->lgr;
674
675         smc_ib_sync_sg_for_device(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
676                                   conn->rmb_desc, DMA_FROM_DEVICE);
677 }
678
679 /* create the send and receive buffer for an SMC socket;
680  * receive buffers are called RMBs;
681  * (even though the SMC protocol allows more than one RMB-element per RMB,
682  * the Linux implementation uses just one RMB-element per RMB, i.e. uses an
683  * extra RMB for every connection in a link group
684  */
685 int smc_buf_create(struct smc_sock *smc)
686 {
687         int rc;
688
689         /* create send buffer */
690         rc = __smc_buf_create(smc, false);
691         if (rc)
692                 return rc;
693         /* create rmb */
694         rc = __smc_buf_create(smc, true);
695         if (rc)
696                 smc_buf_free(smc->conn.sndbuf_desc,
697                              &smc->conn.lgr->lnk[SMC_SINGLE_LINK], false);
698         return rc;
699 }
700
701 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr)
702 {
703         int i;
704
705         for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) {
706                 if (!test_and_set_bit(i, lgr->rtokens_used_mask))
707                         return i;
708         }
709         return -ENOSPC;
710 }
711
712 /* add a new rtoken from peer */
713 int smc_rtoken_add(struct smc_link_group *lgr, __be64 nw_vaddr, __be32 nw_rkey)
714 {
715         u64 dma_addr = be64_to_cpu(nw_vaddr);
716         u32 rkey = ntohl(nw_rkey);
717         int i;
718
719         for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
720                 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
721                     (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
722                     test_bit(i, lgr->rtokens_used_mask)) {
723                         /* already in list */
724                         return i;
725                 }
726         }
727         i = smc_rmb_reserve_rtoken_idx(lgr);
728         if (i < 0)
729                 return i;
730         lgr->rtokens[i][SMC_SINGLE_LINK].rkey = rkey;
731         lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = dma_addr;
732         return i;
733 }
734
735 /* delete an rtoken */
736 int smc_rtoken_delete(struct smc_link_group *lgr, __be32 nw_rkey)
737 {
738         u32 rkey = ntohl(nw_rkey);
739         int i;
740
741         for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
742                 if (lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey &&
743                     test_bit(i, lgr->rtokens_used_mask)) {
744                         lgr->rtokens[i][SMC_SINGLE_LINK].rkey = 0;
745                         lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr = 0;
746
747                         clear_bit(i, lgr->rtokens_used_mask);
748                         return 0;
749                 }
750         }
751         return -ENOENT;
752 }
753
754 /* save rkey and dma_addr received from peer during clc handshake */
755 int smc_rmb_rtoken_handling(struct smc_connection *conn,
756                             struct smc_clc_msg_accept_confirm *clc)
757 {
758         conn->rtoken_idx = smc_rtoken_add(conn->lgr, clc->rmb_dma_addr,
759                                           clc->rmb_rkey);
760         if (conn->rtoken_idx < 0)
761                 return conn->rtoken_idx;
762         return 0;
763 }