staging: lustre: o2iblnd: make rdma_create_id() support containers
[linux-2.6-block.git] / drivers / staging / lustre / lnet / klnds / o2iblnd / o2iblnd.h
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
1dc563a6 30 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lnet/klnds/o2iblnd/o2iblnd.h
37 *
38 * Author: Eric Barton <eric@bartonsoftware.com>
39 */
40
41#include <linux/module.h>
42#include <linux/kernel.h>
43#include <linux/mm.h>
44#include <linux/string.h>
45#include <linux/stat.h>
46#include <linux/errno.h>
47#include <linux/unistd.h>
48#include <linux/uio.h>
e8fd99fd 49#include <linux/uaccess.h>
d7e09d03 50
6255049d 51#include <linux/io.h>
d7e09d03 52
d7e09d03
PT
53#include <linux/fs.h>
54#include <linux/file.h>
d7e09d03
PT
55#include <linux/list.h>
56#include <linux/kmod.h>
57#include <linux/sysctl.h>
58#include <linux/pci.h>
59
60#include <net/sock.h>
61#include <linux/in.h>
62
18ddb13c
JS
63#include <rdma/rdma_cm.h>
64#include <rdma/ib_cm.h>
65#include <rdma/ib_verbs.h>
66#include <rdma/ib_fmr_pool.h>
67
d7e09d03
PT
68#define DEBUG_SUBSYSTEM S_LND
69
490e7dd4
GKH
70#include "../../../include/linux/libcfs/libcfs.h"
71#include "../../../include/linux/lnet/lnet.h"
72#include "../../../include/linux/lnet/lib-lnet.h"
d7e09d03 73
d7e09d03
PT
74#define IBLND_PEER_HASH_SIZE 101 /* # peer lists */
75/* # scheduler loops before reschedule */
76#define IBLND_RESCHED 100
77
78#define IBLND_N_SCHED 2
79#define IBLND_N_SCHED_HIGH 4
80
75c49d40 81typedef struct {
d0bed035
JB
82 int *kib_dev_failover; /* HCA failover */
83 unsigned int *kib_service; /* IB service number */
84 int *kib_min_reconnect_interval; /* first failed connection retry... */
85 int *kib_max_reconnect_interval; /* exponentially increasing to this */
86 int *kib_cksum; /* checksum kib_msg_t? */
87 int *kib_timeout; /* comms timeout (seconds) */
88 int *kib_keepalive; /* keepalive timeout (seconds) */
89 int *kib_ntx; /* # tx descs */
d0bed035
JB
90 char **kib_default_ipif; /* default IPoIB interface */
91 int *kib_retry_count;
92 int *kib_rnr_retry_count;
d0bed035 93 int *kib_ib_mtu; /* IB MTU */
d0bed035
JB
94 int *kib_require_priv_port; /* accept only privileged ports */
95 int *kib_use_priv_port; /* use privileged port for active connect */
96 int *kib_nscheds; /* # threads on each CPT */
d7e09d03
PT
97} kib_tunables_t;
98
99extern kib_tunables_t kiblnd_tunables;
100
ec3d17c0
MS
101#define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
102#define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
d7e09d03 103
ec3d17c0 104#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
e5c2f7b5 105#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1) /* Max # of peer credits */
d7e09d03 106
32c8deb8
AS
107/* when eagerly to return credits */
108#define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \
109 IBLND_CREDIT_HIGHWATER_V1 : \
110 t->lnd_peercredits_hiw)
d7e09d03 111
494025c6 112#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(current->nsproxy->net_ns, \
fa20105e
GS
113 cb, dev, \
114 ps, qpt)
d7e09d03 115
d7e09d03
PT
116/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
117#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
118#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
119
51078e25 120#define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */
d7e09d03 121#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
d7e09d03
PT
122
123/************************/
124/* derived constants... */
125/* Pools (shared by connections on each CPT) */
126/* These pools can grow at runtime, so don't need give a very large value */
127#define IBLND_TX_POOL 256
d7e09d03
PT
128#define IBLND_FMR_POOL 256
129#define IBLND_FMR_POOL_FLUSH 192
130
2fb44f2b
JF
131#define IBLND_RX_MSGS(c) \
132 ((c->ibc_queue_depth) * 2 + IBLND_OOB_MSGS(c->ibc_version))
133#define IBLND_RX_MSG_BYTES(c) (IBLND_RX_MSGS(c) * IBLND_MSG_SIZE)
134#define IBLND_RX_MSG_PAGES(c) \
135 ((IBLND_RX_MSG_BYTES(c) + PAGE_SIZE - 1) / PAGE_SIZE)
d7e09d03
PT
136
137/* WRs and CQEs (per connection) */
2fb44f2b
JF
138#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
139#define IBLND_SEND_WRS(c) \
9e7d5bf3
AS
140 ((c->ibc_max_frags + 1) * kiblnd_concurrent_sends(c->ibc_version, \
141 c->ibc_peer->ibp_ni))
2fb44f2b 142#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c))
d7e09d03
PT
143
144struct kib_hca_dev;
145
146/* o2iblnd can run over aliased interface */
147#ifdef IFALIASZ
148#define KIB_IFNAME_SIZE IFALIASZ
149#else
150#define KIB_IFNAME_SIZE 256
151#endif
152
75c49d40 153typedef struct {
ec3d17c0
MS
154 struct list_head ibd_list; /* chain on kib_devs */
155 struct list_head ibd_fail_list; /* chain on kib_failed_devs */
156 __u32 ibd_ifip; /* IPoIB interface IP */
157
158 /* IPoIB interface name */
159 char ibd_ifname[KIB_IFNAME_SIZE];
160 int ibd_nnets; /* # nets extant */
161
162 unsigned long ibd_next_failover;
163 int ibd_failed_failover; /* # failover failures */
164 unsigned int ibd_failover; /* failover in progress */
d0bed035 165 unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
ec3d17c0
MS
166 struct list_head ibd_nets;
167 struct kib_hca_dev *ibd_hdev;
d7e09d03
PT
168} kib_dev_t;
169
75c49d40 170typedef struct kib_hca_dev {
ec3d17c0
MS
171 struct rdma_cm_id *ibh_cmid; /* listener cmid */
172 struct ib_device *ibh_ibdev; /* IB device */
173 int ibh_page_shift; /* page shift of current HCA */
174 int ibh_page_size; /* page size of current HCA */
175 __u64 ibh_page_mask; /* page mask of current HCA */
176 int ibh_mr_shift; /* bits shift of max MR size */
177 __u64 ibh_mr_size; /* size of MR */
7cadcc7c 178 struct ib_mr *ibh_mrs; /* global MR */
ec3d17c0
MS
179 struct ib_pd *ibh_pd; /* PD */
180 kib_dev_t *ibh_dev; /* owner */
181 atomic_t ibh_ref; /* refcount */
d7e09d03
PT
182} kib_hca_dev_t;
183
184/** # of seconds to keep pool alive */
185#define IBLND_POOL_DEADLINE 300
186/** # of seconds to retry if allocation failed */
187#define IBLND_POOL_RETRY 1
188
75c49d40 189typedef struct {
ec3d17c0
MS
190 int ibp_npages; /* # pages */
191 struct page *ibp_pages[0]; /* page array */
d7e09d03
PT
192} kib_pages_t;
193
d7e09d03
PT
194struct kib_pool;
195struct kib_poolset;
196
197typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps,
198 int inc, struct kib_pool **pp_po);
199typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
200typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
201typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
202
203struct kib_net;
204
205#define IBLND_POOL_NAME_LEN 32
206
75c49d40 207typedef struct kib_poolset {
ec3d17c0
MS
208 spinlock_t ps_lock; /* serialize */
209 struct kib_net *ps_net; /* network it belongs to */
210 char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
211 struct list_head ps_pool_list; /* list of pools */
212 struct list_head ps_failed_pool_list;/* failed pool list */
d0bed035
JB
213 unsigned long ps_next_retry; /* time stamp for retry if */
214 /* failed to allocate */
ec3d17c0
MS
215 int ps_increasing; /* is allocating new pool */
216 int ps_pool_size; /* new pool size */
217 int ps_cpt; /* CPT id */
218
219 kib_ps_pool_create_t ps_pool_create; /* create a new pool */
220 kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
d0bed035 221 kib_ps_node_init_t ps_node_init; /* initialize new allocated node */
ec3d17c0 222 kib_ps_node_fini_t ps_node_fini; /* finalize node */
d7e09d03
PT
223} kib_poolset_t;
224
75c49d40 225typedef struct kib_pool {
d0bed035
JB
226 struct list_head po_list; /* chain on pool list */
227 struct list_head po_free_list; /* pre-allocated node */
228 kib_poolset_t *po_owner; /* pool_set of this pool */
229 unsigned long po_deadline; /* deadline of this pool */
230 int po_allocated; /* # of elements in use */
231 int po_failed; /* pool is created on failed HCA */
232 int po_size; /* # of pre-allocated elements */
d7e09d03
PT
233} kib_pool_t;
234
235typedef struct {
ec3d17c0
MS
236 kib_poolset_t tps_poolset; /* pool-set */
237 __u64 tps_next_tx_cookie; /* cookie of TX */
d7e09d03
PT
238} kib_tx_poolset_t;
239
240typedef struct {
ec3d17c0
MS
241 kib_pool_t tpo_pool; /* pool */
242 struct kib_hca_dev *tpo_hdev; /* device for this pool */
243 struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
244 kib_pages_t *tpo_tx_pages; /* premapped tx msg pages */
d7e09d03
PT
245} kib_tx_pool_t;
246
75c49d40 247typedef struct {
ec3d17c0
MS
248 spinlock_t fps_lock; /* serialize */
249 struct kib_net *fps_net; /* IB network */
250 struct list_head fps_pool_list; /* FMR pool list */
251 struct list_head fps_failed_pool_list;/* FMR pool list */
252 __u64 fps_version; /* validity stamp */
253 int fps_cpt; /* CPT id */
254 int fps_pool_size;
255 int fps_flush_trigger;
32c8deb8 256 int fps_cache;
ec3d17c0 257 int fps_increasing; /* is allocating new pool */
d0bed035
JB
258 unsigned long fps_next_retry; /* time stamp for retry if*/
259 /* failed to allocate */
d7e09d03
PT
260} kib_fmr_poolset_t;
261
80e05b34
DE
262struct kib_fast_reg_descriptor { /* For fast registration */
263 struct list_head frd_list;
264 struct ib_send_wr frd_inv_wr;
265 struct ib_reg_wr frd_fastreg_wr;
266 struct ib_mr *frd_mr;
267 bool frd_valid;
268};
269
75c49d40 270typedef struct {
ec3d17c0
MS
271 struct list_head fpo_list; /* chain on pool list */
272 struct kib_hca_dev *fpo_hdev; /* device for this pool */
273 kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
87f4f6f5
DE
274 union {
275 struct {
276 struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
277 } fmr;
80e05b34
DE
278 struct { /* For fast registration */
279 struct list_head fpo_pool_list;
280 int fpo_pool_size;
281 } fast_reg;
87f4f6f5 282 };
ec3d17c0
MS
283 unsigned long fpo_deadline; /* deadline of this pool */
284 int fpo_failed; /* fmr pool is failed */
285 int fpo_map_count; /* # of mapped FMR */
80e05b34 286 int fpo_is_fmr;
d7e09d03
PT
287} kib_fmr_pool_t;
288
289typedef struct {
80e05b34
DE
290 kib_fmr_pool_t *fmr_pool; /* pool of FMR */
291 struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
292 struct kib_fast_reg_descriptor *fmr_frd;
293 u32 fmr_key;
d7e09d03
PT
294} kib_fmr_t;
295
75c49d40 296typedef struct kib_net {
ec3d17c0
MS
297 struct list_head ibn_list; /* chain on kib_dev_t::ibd_nets */
298 __u64 ibn_incarnation;/* my epoch */
299 int ibn_init; /* initialisation state */
300 int ibn_shutdown; /* shutting down? */
d7e09d03 301
ec3d17c0
MS
302 atomic_t ibn_npeers; /* # peers extant */
303 atomic_t ibn_nconns; /* # connections extant */
d7e09d03 304
ec3d17c0
MS
305 kib_tx_poolset_t **ibn_tx_ps; /* tx pool-set */
306 kib_fmr_poolset_t **ibn_fmr_ps; /* fmr pool-set */
d7e09d03 307
ec3d17c0 308 kib_dev_t *ibn_dev; /* underlying IB device */
d7e09d03
PT
309} kib_net_t;
310
311#define KIB_THREAD_SHIFT 16
312#define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid))
313#define KIB_THREAD_CPT(id) ((id) >> KIB_THREAD_SHIFT)
314#define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1))
315
316struct kib_sched_info {
ec3d17c0
MS
317 spinlock_t ibs_lock; /* serialise */
318 wait_queue_head_t ibs_waitq; /* schedulers sleep here */
319 struct list_head ibs_conns; /* conns to check for rx completions */
320 int ibs_nthreads; /* number of scheduler threads */
321 int ibs_nthreads_max; /* max allowed scheduler threads */
322 int ibs_cpt; /* CPT id */
d7e09d03
PT
323};
324
75c49d40 325typedef struct {
d0bed035
JB
326 int kib_init; /* initialisation state */
327 int kib_shutdown; /* shut down? */
328 struct list_head kib_devs; /* IB devices extant */
329 struct list_head kib_failed_devs; /* list head of failed devices */
330 wait_queue_head_t kib_failover_waitq; /* schedulers sleep here */
331 atomic_t kib_nthreads; /* # live threads */
332 rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn ops */
333 struct list_head *kib_peers; /* hash table of all my known peers */
334 int kib_peer_hash_size; /* size of kib_peers */
335 void *kib_connd; /* the connd task (serialisation assertions) */
336 struct list_head kib_connd_conns; /* connections to setup/teardown */
337 struct list_head kib_connd_zombies; /* connections with zero refcount */
4d99b258
LZ
338 /* connections to reconnect */
339 struct list_head kib_reconn_list;
340 /* peers wait for reconnection */
341 struct list_head kib_reconn_wait;
342 /**
343 * The second that peers are pulled out from \a kib_reconn_wait
344 * for reconnection.
345 */
346 time64_t kib_reconn_sec;
347
d0bed035
JB
348 wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps here */
349 spinlock_t kib_connd_lock; /* serialise */
350 struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
351 struct kib_sched_info **kib_scheds; /* percpt data for schedulers */
d7e09d03
PT
352} kib_data_t;
353
ec3d17c0
MS
354#define IBLND_INIT_NOTHING 0
355#define IBLND_INIT_DATA 1
356#define IBLND_INIT_ALL 2
d7e09d03
PT
357
358/************************************************************************
359 * IB Wire message format.
360 * These are sent in sender's byte order (i.e. receiver flips).
361 */
362
75c49d40 363typedef struct kib_connparams {
ec3d17c0
MS
364 __u16 ibcp_queue_depth;
365 __u16 ibcp_max_frags;
366 __u32 ibcp_max_msg_size;
d7e09d03
PT
367} WIRE_ATTR kib_connparams_t;
368
75c49d40 369typedef struct {
ec3d17c0
MS
370 lnet_hdr_t ibim_hdr; /* portals header */
371 char ibim_payload[0]; /* piggy-backed payload */
d7e09d03
PT
372} WIRE_ATTR kib_immediate_msg_t;
373
75c49d40 374typedef struct {
ec3d17c0
MS
375 __u32 rf_nob; /* # bytes this frag */
376 __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */
d7e09d03
PT
377} WIRE_ATTR kib_rdma_frag_t;
378
75c49d40 379typedef struct {
ec3d17c0
MS
380 __u32 rd_key; /* local/remote key */
381 __u32 rd_nfrags; /* # fragments */
382 kib_rdma_frag_t rd_frags[0]; /* buffer frags */
d7e09d03
PT
383} WIRE_ATTR kib_rdma_desc_t;
384
75c49d40 385typedef struct {
ec3d17c0
MS
386 lnet_hdr_t ibprm_hdr; /* portals header */
387 __u64 ibprm_cookie; /* opaque completion cookie */
d7e09d03
PT
388} WIRE_ATTR kib_putreq_msg_t;
389
75c49d40 390typedef struct {
ec3d17c0
MS
391 __u64 ibpam_src_cookie; /* reflected completion cookie */
392 __u64 ibpam_dst_cookie; /* opaque completion cookie */
393 kib_rdma_desc_t ibpam_rd; /* sender's sink buffer */
d7e09d03
PT
394} WIRE_ATTR kib_putack_msg_t;
395
75c49d40 396typedef struct {
ec3d17c0
MS
397 lnet_hdr_t ibgm_hdr; /* portals header */
398 __u64 ibgm_cookie; /* opaque completion cookie */
399 kib_rdma_desc_t ibgm_rd; /* rdma descriptor */
d7e09d03
PT
400} WIRE_ATTR kib_get_msg_t;
401
75c49d40 402typedef struct {
ec3d17c0
MS
403 __u64 ibcm_cookie; /* opaque completion cookie */
404 __s32 ibcm_status; /* < 0 failure: >= 0 length */
d7e09d03
PT
405} WIRE_ATTR kib_completion_msg_t;
406
75c49d40 407typedef struct {
d7e09d03 408 /* First 2 fields fixed FOR ALL TIME */
ec3d17c0
MS
409 __u32 ibm_magic; /* I'm an ibnal message */
410 __u16 ibm_version; /* this is my version number */
411
412 __u8 ibm_type; /* msg type */
413 __u8 ibm_credits; /* returned credits */
414 __u32 ibm_nob; /* # bytes in whole message */
415 __u32 ibm_cksum; /* checksum (0 == no checksum) */
416 __u64 ibm_srcnid; /* sender's NID */
417 __u64 ibm_srcstamp; /* sender's incarnation */
418 __u64 ibm_dstnid; /* destination's NID */
419 __u64 ibm_dststamp; /* destination's incarnation */
d7e09d03
PT
420
421 union {
ec3d17c0
MS
422 kib_connparams_t connparams;
423 kib_immediate_msg_t immediate;
424 kib_putreq_msg_t putreq;
425 kib_putack_msg_t putack;
426 kib_get_msg_t get;
427 kib_completion_msg_t completion;
d7e09d03
PT
428 } WIRE_ATTR ibm_u;
429} WIRE_ATTR kib_msg_t;
430
ec3d17c0 431#define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */
d7e09d03 432
ec3d17c0
MS
433#define IBLND_MSG_VERSION_1 0x11
434#define IBLND_MSG_VERSION_2 0x12
435#define IBLND_MSG_VERSION IBLND_MSG_VERSION_2
d7e09d03 436
ec3d17c0
MS
437#define IBLND_MSG_CONNREQ 0xc0 /* connection request */
438#define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */
439#define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */
440#define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */
441#define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */
442#define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */
443#define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */
444#define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */
445#define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */
446#define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */
d7e09d03
PT
447
448typedef struct {
ec3d17c0
MS
449 __u32 ibr_magic; /* sender's magic */
450 __u16 ibr_version; /* sender's version */
451 __u8 ibr_why; /* reject reason */
452 __u8 ibr_padding; /* padding */
453 __u64 ibr_incarnation; /* incarnation of peer */
454 kib_connparams_t ibr_cp; /* connection parameters */
d7e09d03
PT
455} WIRE_ATTR kib_rej_t;
456
457/* connection rejection reasons */
ec3d17c0
MS
458#define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */
459#define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */
460#define IBLND_REJECT_FATAL 3 /* Anything else */
461#define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */
462#define IBLND_REJECT_CONN_STALE 5 /* stale peer */
2fb44f2b
JF
463/* peer's rdma frags doesn't match mine */
464#define IBLND_REJECT_RDMA_FRAGS 6
465/* peer's msg queue size doesn't match mine */
466#define IBLND_REJECT_MSG_QUEUE_SIZE 7
d7e09d03
PT
467
468/***********************************************************************/
469
ec3d17c0 470typedef struct kib_rx /* receive message */
d7e09d03 471{
ec3d17c0
MS
472 struct list_head rx_list; /* queue for attention */
473 struct kib_conn *rx_conn; /* owning conn */
d0bed035 474 int rx_nob; /* # bytes received (-1 while posted) */
ec3d17c0
MS
475 enum ib_wc_status rx_status; /* completion status */
476 kib_msg_t *rx_msg; /* message buffer (host vaddr) */
477 __u64 rx_msgaddr; /* message buffer (I/O addr) */
270f0c31 478 DECLARE_PCI_UNMAP_ADDR(rx_msgunmap); /* for dma_unmap_single() */
ec3d17c0
MS
479 struct ib_recv_wr rx_wrq; /* receive work item... */
480 struct ib_sge rx_sge; /* ...and its memory */
d7e09d03
PT
481} kib_rx_t;
482
d0bed035
JB
483#define IBLND_POSTRX_DONT_POST 0 /* don't post */
484#define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
485#define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */
486#define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give self back 1 reserved credit */
d7e09d03 487
ec3d17c0 488typedef struct kib_tx /* transmit message */
d7e09d03 489{
d0bed035
JB
490 struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */
491 kib_tx_pool_t *tx_pool; /* pool I'm from */
492 struct kib_conn *tx_conn; /* owning conn */
493 short tx_sending; /* # tx callbacks outstanding */
494 short tx_queued; /* queued for sending */
495 short tx_waiting; /* waiting for peer */
496 int tx_status; /* LNET completion status */
497 unsigned long tx_deadline; /* completion deadline */
498 __u64 tx_cookie; /* completion cookie */
499 lnet_msg_t *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
500 kib_msg_t *tx_msg; /* message buffer (host vaddr) */
501 __u64 tx_msgaddr; /* message buffer (I/O addr) */
270f0c31 502 DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */
d0bed035 503 int tx_nwrq; /* # send work items */
ab9f2faf 504 struct ib_rdma_wr *tx_wrq; /* send work items... */
d0bed035
JB
505 struct ib_sge *tx_sge; /* ...and their memory */
506 kib_rdma_desc_t *tx_rd; /* rdma descriptor */
507 int tx_nfrags; /* # entries in... */
508 struct scatterlist *tx_frags; /* dma_map_sg descriptor */
509 __u64 *tx_pages; /* rdma phys page addrs */
510 kib_fmr_t fmr; /* FMR */
511 int tx_dmadir; /* dma direction */
d7e09d03
PT
512} kib_tx_t;
513
75c49d40 514typedef struct kib_connvars {
ec3d17c0 515 kib_msg_t cv_msg; /* connection-in-progress variables */
d7e09d03
PT
516} kib_connvars_t;
517
75c49d40 518typedef struct kib_conn {
d0bed035
JB
519 struct kib_sched_info *ibc_sched; /* scheduler information */
520 struct kib_peer *ibc_peer; /* owning peer */
521 kib_hca_dev_t *ibc_hdev; /* HCA bound on */
522 struct list_head ibc_list; /* stash on peer's conn list */
523 struct list_head ibc_sched_list; /* schedule for attention */
524 __u16 ibc_version; /* version of connection */
4d99b258
LZ
525 /* reconnect later */
526 __u16 ibc_reconnect:1;
d0bed035
JB
527 __u64 ibc_incarnation; /* which instance of the peer */
528 atomic_t ibc_refcount; /* # users */
529 int ibc_state; /* what's happening */
530 int ibc_nsends_posted; /* # uncompleted sends */
531 int ibc_noops_posted; /* # uncompleted NOOPs */
532 int ibc_credits; /* # credits I have */
ec3d17c0
MS
533 int ibc_outstanding_credits; /* # credits to return */
534 int ibc_reserved_credits; /* # ACK/DONE msg credits */
d0bed035 535 int ibc_comms_error; /* set on comms error */
2fb44f2b
JF
536 /* connections queue depth */
537 __u16 ibc_queue_depth;
538 /* connections max frags */
539 __u16 ibc_max_frags;
d0bed035
JB
540 unsigned int ibc_nrx:16; /* receive buffers owned */
541 unsigned int ibc_scheduled:1; /* scheduled for attention */
542 unsigned int ibc_ready:1; /* CQ callback fired */
543 unsigned long ibc_last_send; /* time of last send */
544 struct list_head ibc_connd_list; /* link chain for */
545 /* kiblnd_check_conns only */
546 struct list_head ibc_early_rxs; /* rxs completed before ESTABLISHED */
547 struct list_head ibc_tx_noops; /* IBLND_MSG_NOOPs for */
548 /* IBLND_MSG_VERSION_1 */
549 struct list_head ibc_tx_queue; /* sends that need a credit */
550 struct list_head ibc_tx_queue_nocred; /* sends that don't need a */
551 /* credit */
552 struct list_head ibc_tx_queue_rsrvd; /* sends that need to */
553 /* reserve an ACK/DONE msg */
554 struct list_head ibc_active_txs; /* active tx awaiting completion */
555 spinlock_t ibc_lock; /* serialise */
556 kib_rx_t *ibc_rxs; /* the rx descs */
557 kib_pages_t *ibc_rx_pages; /* premapped rx msg pages */
558
559 struct rdma_cm_id *ibc_cmid; /* CM id */
560 struct ib_cq *ibc_cq; /* completion queue */
561
562 kib_connvars_t *ibc_connvars; /* in-progress connection state */
d7e09d03
PT
563} kib_conn_t;
564
ec3d17c0
MS
565#define IBLND_CONN_INIT 0 /* being initialised */
566#define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */
567#define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */
568#define IBLND_CONN_ESTABLISHED 3 /* connection established */
569#define IBLND_CONN_CLOSING 4 /* being closed */
570#define IBLND_CONN_DISCONNECTED 5 /* disconnected */
d7e09d03 571
75c49d40 572typedef struct kib_peer {
ec3d17c0
MS
573 struct list_head ibp_list; /* stash on global peer list */
574 lnet_nid_t ibp_nid; /* who's on the other end(s) */
575 lnet_ni_t *ibp_ni; /* LNet interface */
ec3d17c0
MS
576 struct list_head ibp_conns; /* all active connections */
577 struct list_head ibp_tx_queue; /* msgs waiting for a conn */
ec3d17c0 578 __u64 ibp_incarnation; /* incarnation of peer */
4d99b258
LZ
579 /* when (in jiffies) I was last alive */
580 unsigned long ibp_last_alive;
581 /* # users */
582 atomic_t ibp_refcount;
583 /* version of peer */
584 __u16 ibp_version;
585 /* current passive connection attempts */
586 unsigned short ibp_accepting;
587 /* current active connection attempts */
588 unsigned short ibp_connecting;
589 /* reconnect this peer later */
590 unsigned short ibp_reconnecting:1;
591 /* # consecutive reconnection attempts to this peer */
592 unsigned int ibp_reconnected;
593 /* errno on closing this peer */
594 int ibp_error;
a01fa108
AS
595 /* max map_on_demand */
596 __u16 ibp_max_frags;
597 /* max_peer_credits */
598 __u16 ibp_queue_depth;
d7e09d03
PT
599} kib_peer_t;
600
ec3d17c0 601extern kib_data_t kiblnd_data;
d7e09d03 602
8150a97f 603void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
d7e09d03 604
9e7d5bf3
AS
605int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
606
607/* max # of fragments configured by user */
608static inline int
609kiblnd_cfg_rdma_frags(struct lnet_ni *ni)
610{
32c8deb8
AS
611 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
612 int mod;
613
614 tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
615 mod = tunables->lnd_map_on_demand;
9e7d5bf3
AS
616 return mod ? mod : IBLND_MAX_RDMA_FRAGS;
617}
618
619static inline int
620kiblnd_rdma_frags(int version, struct lnet_ni *ni)
621{
622 return version == IBLND_MSG_VERSION_1 ?
623 IBLND_MAX_RDMA_FRAGS :
624 kiblnd_cfg_rdma_frags(ni);
625}
626
627static inline int
628kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
629{
32c8deb8 630 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
9e7d5bf3
AS
631 int concurrent_sends;
632
32c8deb8
AS
633 tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
634 concurrent_sends = tunables->lnd_concurrent_sends;
9e7d5bf3
AS
635
636 if (version == IBLND_MSG_VERSION_1) {
637 if (concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
638 return IBLND_MSG_QUEUE_SIZE_V1 * 2;
639
640 if (concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
641 return IBLND_MSG_QUEUE_SIZE_V1 / 2;
642 }
643
644 return concurrent_sends;
645}
646
d7e09d03
PT
647static inline void
648kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
649{
270f0c31 650 LASSERT(atomic_read(&hdev->ibh_ref) > 0);
d7e09d03
PT
651 atomic_inc(&hdev->ibh_ref);
652}
653
654static inline void
655kiblnd_hdev_decref(kib_hca_dev_t *hdev)
656{
270f0c31 657 LASSERT(atomic_read(&hdev->ibh_ref) > 0);
d7e09d03
PT
658 if (atomic_dec_and_test(&hdev->ibh_ref))
659 kiblnd_hdev_destroy(hdev);
660}
661
662static inline int
663kiblnd_dev_can_failover(kib_dev_t *dev)
664{
665 if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
666 return 0;
667
5fd88337 668 if (!*kiblnd_tunables.kib_dev_failover) /* disabled */
d7e09d03
PT
669 return 0;
670
671 if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
672 return 1;
673
674 return dev->ibd_can_failover;
675}
676
677#define kiblnd_conn_addref(conn) \
678do { \
679 CDEBUG(D_NET, "conn[%p] (%d)++\n", \
680 (conn), atomic_read(&(conn)->ibc_refcount)); \
681 atomic_inc(&(conn)->ibc_refcount); \
682} while (0)
683
684#define kiblnd_conn_decref(conn) \
685do { \
686 unsigned long flags; \
687 \
688 CDEBUG(D_NET, "conn[%p] (%d)--\n", \
689 (conn), atomic_read(&(conn)->ibc_refcount)); \
690 LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
691 if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
692 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
693 list_add_tail(&(conn)->ibc_list, \
694 &kiblnd_data.kib_connd_zombies); \
695 wake_up(&kiblnd_data.kib_connd_waitq); \
696 spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
697 } \
698} while (0)
699
700#define kiblnd_peer_addref(peer) \
701do { \
702 CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
703 (peer), libcfs_nid2str((peer)->ibp_nid), \
270f0c31 704 atomic_read(&(peer)->ibp_refcount)); \
d7e09d03
PT
705 atomic_inc(&(peer)->ibp_refcount); \
706} while (0)
707
708#define kiblnd_peer_decref(peer) \
709do { \
710 CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
711 (peer), libcfs_nid2str((peer)->ibp_nid), \
270f0c31 712 atomic_read(&(peer)->ibp_refcount)); \
d7e09d03
PT
713 LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
714 if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
715 kiblnd_destroy_peer(peer); \
716} while (0)
717
4d99b258
LZ
718static inline bool
719kiblnd_peer_connecting(kib_peer_t *peer)
720{
721 return peer->ibp_connecting ||
722 peer->ibp_reconnecting ||
723 peer->ibp_accepting;
724}
725
726static inline bool
727kiblnd_peer_idle(kib_peer_t *peer)
728{
729 return !kiblnd_peer_connecting(peer) && list_empty(&peer->ibp_conns);
730}
731
d7e09d03 732static inline struct list_head *
270f0c31 733kiblnd_nid2peerlist(lnet_nid_t nid)
d7e09d03
PT
734{
735 unsigned int hash =
736 ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
737
8a1d7b09 738 return &kiblnd_data.kib_peers[hash];
d7e09d03
PT
739}
740
741static inline int
270f0c31 742kiblnd_peer_active(kib_peer_t *peer)
d7e09d03
PT
743{
744 /* Am I in the peer hash table? */
8a1d7b09 745 return !list_empty(&peer->ibp_list);
d7e09d03
PT
746}
747
748static inline kib_conn_t *
270f0c31 749kiblnd_get_conn_locked(kib_peer_t *peer)
d7e09d03 750{
270f0c31 751 LASSERT(!list_empty(&peer->ibp_conns));
d7e09d03
PT
752
753 /* just return the first connection */
754 return list_entry(peer->ibp_conns.next, kib_conn_t, ibc_list);
755}
756
757static inline int
758kiblnd_send_keepalive(kib_conn_t *conn)
759{
760 return (*kiblnd_tunables.kib_keepalive > 0) &&
761 cfs_time_after(jiffies, conn->ibc_last_send +
27d81ace
JY
762 msecs_to_jiffies(*kiblnd_tunables.kib_keepalive *
763 MSEC_PER_SEC));
d7e09d03
PT
764}
765
766static inline int
767kiblnd_need_noop(kib_conn_t *conn)
768{
32c8deb8
AS
769 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
770 lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
771
270f0c31 772 LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
32c8deb8 773 tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
d7e09d03
PT
774
775 if (conn->ibc_outstanding_credits <
32c8deb8 776 IBLND_CREDITS_HIGHWATER(tunables, conn->ibc_version) &&
d7e09d03
PT
777 !kiblnd_send_keepalive(conn))
778 return 0; /* No need to send NOOP */
779
780 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
781 if (!list_empty(&conn->ibc_tx_queue_nocred))
782 return 0; /* NOOP can be piggybacked */
783
784 /* No tx to piggyback NOOP onto or no credit to send a tx */
785 return (list_empty(&conn->ibc_tx_queue) ||
5fd88337 786 !conn->ibc_credits);
d7e09d03
PT
787 }
788
789 if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
790 !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
5fd88337 791 !conn->ibc_credits) /* no credit */
d7e09d03
PT
792 return 0;
793
794 if (conn->ibc_credits == 1 && /* last credit reserved for */
5fd88337 795 !conn->ibc_outstanding_credits) /* giving back credits */
d7e09d03
PT
796 return 0;
797
798 /* No tx to piggyback NOOP onto or no credit to send a tx */
799 return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
800}
801
802static inline void
803kiblnd_abort_receives(kib_conn_t *conn)
804{
805 ib_modify_qp(conn->ibc_cmid->qp,
806 &kiblnd_data.kib_error_qpa, IB_QP_STATE);
807}
808
809static inline const char *
270f0c31 810kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
d7e09d03
PT
811{
812 if (q == &conn->ibc_tx_queue)
813 return "tx_queue";
814
815 if (q == &conn->ibc_tx_queue_rsrvd)
816 return "tx_queue_rsrvd";
817
818 if (q == &conn->ibc_tx_queue_nocred)
819 return "tx_queue_nocred";
820
821 if (q == &conn->ibc_active_txs)
822 return "active_txs";
823
824 LBUG();
825 return NULL;
826}
827
d0bed035
JB
828/* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the */
829/* lowest bits of the work request id to stash the work item type. */
d7e09d03 830
82fffff4
LZ
831#define IBLND_WID_INVAL 0
832#define IBLND_WID_TX 1
833#define IBLND_WID_RX 2
834#define IBLND_WID_RDMA 3
4d65730b
DE
835#define IBLND_WID_MR 4
836#define IBLND_WID_MASK 7UL
d7e09d03
PT
837
838static inline __u64
270f0c31 839kiblnd_ptr2wreqid(void *ptr, int type)
d7e09d03
PT
840{
841 unsigned long lptr = (unsigned long)ptr;
842
5fd88337
JS
843 LASSERT(!(lptr & IBLND_WID_MASK));
844 LASSERT(!(type & ~IBLND_WID_MASK));
d7e09d03
PT
845 return (__u64)(lptr | type);
846}
847
848static inline void *
270f0c31 849kiblnd_wreqid2ptr(__u64 wreqid)
d7e09d03
PT
850{
851 return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
852}
853
854static inline int
270f0c31 855kiblnd_wreqid2type(__u64 wreqid)
d7e09d03 856{
8a1d7b09 857 return wreqid & IBLND_WID_MASK;
d7e09d03
PT
858}
859
860static inline void
270f0c31 861kiblnd_set_conn_state(kib_conn_t *conn, int state)
d7e09d03
PT
862{
863 conn->ibc_state = state;
864 mb();
865}
866
867static inline void
270f0c31 868kiblnd_init_msg(kib_msg_t *msg, int type, int body_nob)
d7e09d03
PT
869{
870 msg->ibm_type = type;
871 msg->ibm_nob = offsetof(kib_msg_t, ibm_u) + body_nob;
872}
873
874static inline int
270f0c31 875kiblnd_rd_size(kib_rdma_desc_t *rd)
d7e09d03
PT
876{
877 int i;
878 int size;
879
880 for (i = size = 0; i < rd->rd_nfrags; i++)
881 size += rd->rd_frags[i].rf_nob;
882
883 return size;
884}
885
886static inline __u64
887kiblnd_rd_frag_addr(kib_rdma_desc_t *rd, int index)
888{
889 return rd->rd_frags[index].rf_addr;
890}
891
892static inline __u32
893kiblnd_rd_frag_size(kib_rdma_desc_t *rd, int index)
894{
895 return rd->rd_frags[index].rf_nob;
896}
897
898static inline __u32
899kiblnd_rd_frag_key(kib_rdma_desc_t *rd, int index)
900{
901 return rd->rd_key;
902}
903
904static inline int
905kiblnd_rd_consume_frag(kib_rdma_desc_t *rd, int index, __u32 nob)
906{
907 if (nob < rd->rd_frags[index].rf_nob) {
908 rd->rd_frags[index].rf_addr += nob;
909 rd->rd_frags[index].rf_nob -= nob;
910 } else {
83b912c6 911 index++;
d7e09d03
PT
912 }
913
914 return index;
915}
916
917static inline int
918kiblnd_rd_msg_size(kib_rdma_desc_t *rd, int msgtype, int n)
919{
270f0c31
MBD
920 LASSERT(msgtype == IBLND_MSG_GET_REQ ||
921 msgtype == IBLND_MSG_PUT_ACK);
d7e09d03
PT
922
923 return msgtype == IBLND_MSG_GET_REQ ?
924 offsetof(kib_get_msg_t, ibgm_rd.rd_frags[n]) :
925 offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[n]);
926}
927
d7e09d03
PT
928static inline __u64
929kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
930{
931 return ib_dma_mapping_error(dev, dma_addr);
932}
933
934static inline __u64 kiblnd_dma_map_single(struct ib_device *dev,
935 void *msg, size_t size,
936 enum dma_data_direction direction)
937{
938 return ib_dma_map_single(dev, msg, size, direction);
939}
940
941static inline void kiblnd_dma_unmap_single(struct ib_device *dev,
942 __u64 addr, size_t size,
943 enum dma_data_direction direction)
944{
945 ib_dma_unmap_single(dev, addr, size, direction);
946}
947
948#define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0)
949#define KIBLND_UNMAP_ADDR(p, m, a) (a)
950
951static inline int kiblnd_dma_map_sg(struct ib_device *dev,
952 struct scatterlist *sg, int nents,
953 enum dma_data_direction direction)
954{
955 return ib_dma_map_sg(dev, sg, nents, direction);
956}
957
958static inline void kiblnd_dma_unmap_sg(struct ib_device *dev,
959 struct scatterlist *sg, int nents,
960 enum dma_data_direction direction)
961{
962 ib_dma_unmap_sg(dev, sg, nents, direction);
963}
964
965static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
966 struct scatterlist *sg)
967{
968 return ib_sg_dma_address(dev, sg);
969}
970
971static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
972 struct scatterlist *sg)
973{
974 return ib_sg_dma_len(dev, sg);
975}
976
d0bed035
JB
977/* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly */
978/* right because OFED1.2 defines it as const, to use it we have to add */
979/* (void *) cast to overcome "const" */
d7e09d03 980
ec3d17c0
MS
981#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
982#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
d7e09d03 983
32c8deb8 984struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
2fb44f2b 985 int negotiated_nfrags);
d7e09d03
PT
986void kiblnd_map_rx_descs(kib_conn_t *conn);
987void kiblnd_unmap_rx_descs(kib_conn_t *conn);
d7e09d03
PT
988void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
989struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
990
80e05b34
DE
991int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
992 kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
993 kib_fmr_t *fmr);
d7e09d03
PT
994void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
995
f6e50066 996int kiblnd_tunables_setup(struct lnet_ni *ni);
025ba826 997void kiblnd_tunables_init(void);
d7e09d03 998
270f0c31 999int kiblnd_connd(void *arg);
d7e09d03
PT
1000int kiblnd_scheduler(void *arg);
1001int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
270f0c31 1002int kiblnd_failover_thread(void *arg);
d7e09d03
PT
1003
1004int kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages);
d7e09d03
PT
1005
1006int kiblnd_cm_callback(struct rdma_cm_id *cmid,
1007 struct rdma_cm_event *event);
1008int kiblnd_translate_mtu(int value);
1009
1010int kiblnd_dev_failover(kib_dev_t *dev);
270f0c31
MBD
1011int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid);
1012void kiblnd_destroy_peer(kib_peer_t *peer);
4d99b258 1013bool kiblnd_reconnect_peer(kib_peer_t *peer);
270f0c31
MBD
1014void kiblnd_destroy_dev(kib_dev_t *dev);
1015void kiblnd_unlink_peer_locked(kib_peer_t *peer);
270f0c31 1016kib_peer_t *kiblnd_find_peer_locked(lnet_nid_t nid);
270f0c31 1017int kiblnd_close_stale_conns_locked(kib_peer_t *peer,
c314c319 1018 int version, __u64 incarnation);
270f0c31 1019int kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why);
d7e09d03 1020
270f0c31 1021kib_conn_t *kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
a01fa108 1022 int state, int version);
4d99b258 1023void kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn);
270f0c31
MBD
1024void kiblnd_close_conn(kib_conn_t *conn, int error);
1025void kiblnd_close_conn_locked(kib_conn_t *conn, int error);
d7e09d03 1026
270f0c31 1027void kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid);
270f0c31 1028void kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist,
c314c319 1029 int status);
d7e09d03
PT
1030
1031void kiblnd_qp_event(struct ib_event *event, void *arg);
1032void kiblnd_cq_event(struct ib_event *event, void *arg);
1033void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
1034
270f0c31 1035void kiblnd_pack_msg(lnet_ni_t *ni, kib_msg_t *msg, int version,
c314c319 1036 int credits, lnet_nid_t dstnid, __u64 dststamp);
d7e09d03 1037int kiblnd_unpack_msg(kib_msg_t *msg, int nob);
270f0c31 1038int kiblnd_post_rx(kib_rx_t *rx, int credit);
d7e09d03
PT
1039
1040int kiblnd_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg);
1041int kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
f351bad2 1042 unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
d7e09d03 1043 unsigned int offset, unsigned int mlen, unsigned int rlen);