Commit | Line | Data |
---|---|---|
aa735edf | 1 | /* |
e7eacd36 | 2 | * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. |
aa735edf BS |
3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | #ifndef IPATH_VERBS_H | |
35 | #define IPATH_VERBS_H | |
36 | ||
37 | #include <linux/types.h> | |
38 | #include <linux/spinlock.h> | |
39 | #include <linux/kernel.h> | |
40 | #include <linux/interrupt.h> | |
373d9915 | 41 | #include <linux/kref.h> |
aa735edf | 42 | #include <rdma/ib_pack.h> |
40b90430 | 43 | #include <rdma/ib_user_verbs.h> |
aa735edf | 44 | |
4ee97180 RC |
45 | #include "ipath_kernel.h" |
46 | ||
3859e39d RC |
47 | #define IPATH_MAX_RDMA_ATOMIC 4 |
48 | ||
aa735edf BS |
49 | #define QPN_MAX (1 << 24) |
50 | #define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) | |
51 | ||
52 | /* | |
53 | * Increment this value if any changes that break userspace ABI | |
54 | * compatibility are made. | |
55 | */ | |
373d9915 | 56 | #define IPATH_UVERBS_ABI_VERSION 2 |
aa735edf BS |
57 | |
58 | /* | |
59 | * Define an ib_cq_notify value that is not valid so we know when CQ | |
60 | * notifications are armed. | |
61 | */ | |
62 | #define IB_CQ_NONE (IB_CQ_NEXT_COMP + 1) | |
63 | ||
4ee97180 | 64 | /* AETH NAK opcode values */ |
aa735edf BS |
65 | #define IB_RNR_NAK 0x20 |
66 | #define IB_NAK_PSN_ERROR 0x60 | |
67 | #define IB_NAK_INVALID_REQUEST 0x61 | |
68 | #define IB_NAK_REMOTE_ACCESS_ERROR 0x62 | |
69 | #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63 | |
70 | #define IB_NAK_INVALID_RD_REQUEST 0x64 | |
71 | ||
4ee97180 | 72 | /* Flags for checking QP state (see ib_ipath_state_ops[]) */ |
aa735edf BS |
73 | #define IPATH_POST_SEND_OK 0x01 |
74 | #define IPATH_POST_RECV_OK 0x02 | |
75 | #define IPATH_PROCESS_RECV_OK 0x04 | |
76 | #define IPATH_PROCESS_SEND_OK 0x08 | |
e509be89 RC |
77 | #define IPATH_PROCESS_NEXT_SEND_OK 0x10 |
78 | #define IPATH_FLUSH_SEND 0x20 | |
79 | #define IPATH_FLUSH_RECV 0x40 | |
80 | #define IPATH_PROCESS_OR_FLUSH_SEND \ | |
81 | (IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND) | |
aa735edf BS |
82 | |
83 | /* IB Performance Manager status values */ | |
84 | #define IB_PMA_SAMPLE_STATUS_DONE 0x00 | |
85 | #define IB_PMA_SAMPLE_STATUS_STARTED 0x01 | |
86 | #define IB_PMA_SAMPLE_STATUS_RUNNING 0x02 | |
87 | ||
88 | /* Mandatory IB performance counter select values. */ | |
9c3da099 HH |
89 | #define IB_PMA_PORT_XMIT_DATA cpu_to_be16(0x0001) |
90 | #define IB_PMA_PORT_RCV_DATA cpu_to_be16(0x0002) | |
91 | #define IB_PMA_PORT_XMIT_PKTS cpu_to_be16(0x0003) | |
92 | #define IB_PMA_PORT_RCV_PKTS cpu_to_be16(0x0004) | |
93 | #define IB_PMA_PORT_XMIT_WAIT cpu_to_be16(0x0005) | |
aa735edf BS |
94 | |
95 | struct ib_reth { | |
96 | __be64 vaddr; | |
97 | __be32 rkey; | |
98 | __be32 length; | |
99 | } __attribute__ ((packed)); | |
100 | ||
101 | struct ib_atomic_eth { | |
3859e39d | 102 | __be32 vaddr[2]; /* unaligned so access as 2 32-bit words */ |
aa735edf BS |
103 | __be32 rkey; |
104 | __be64 swap_data; | |
105 | __be64 compare_data; | |
106 | } __attribute__ ((packed)); | |
107 | ||
108 | struct ipath_other_headers { | |
109 | __be32 bth[3]; | |
110 | union { | |
111 | struct { | |
112 | __be32 deth[2]; | |
113 | __be32 imm_data; | |
114 | } ud; | |
115 | struct { | |
116 | struct ib_reth reth; | |
117 | __be32 imm_data; | |
118 | } rc; | |
119 | struct { | |
120 | __be32 aeth; | |
3859e39d | 121 | __be32 atomic_ack_eth[2]; |
aa735edf BS |
122 | } at; |
123 | __be32 imm_data; | |
124 | __be32 aeth; | |
125 | struct ib_atomic_eth atomic_eth; | |
126 | } u; | |
127 | } __attribute__ ((packed)); | |
128 | ||
129 | /* | |
130 | * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes | |
131 | * long (72 w/ imm_data). Only the first 56 bytes of the IB header | |
132 | * will be in the eager header buffer. The remaining 12 or 16 bytes | |
133 | * are in the data buffer. | |
134 | */ | |
135 | struct ipath_ib_header { | |
136 | __be16 lrh[4]; | |
137 | union { | |
138 | struct { | |
139 | struct ib_grh grh; | |
140 | struct ipath_other_headers oth; | |
141 | } l; | |
142 | struct ipath_other_headers oth; | |
143 | } u; | |
144 | } __attribute__ ((packed)); | |
145 | ||
afce688b RC |
146 | struct ipath_pio_header { |
147 | __le32 pbc[2]; | |
148 | struct ipath_ib_header hdr; | |
149 | } __attribute__ ((packed)); | |
150 | ||
aa735edf BS |
151 | /* |
152 | * There is one struct ipath_mcast for each multicast GID. | |
153 | * All attached QPs are then stored as a list of | |
154 | * struct ipath_mcast_qp. | |
155 | */ | |
156 | struct ipath_mcast_qp { | |
157 | struct list_head list; | |
158 | struct ipath_qp *qp; | |
159 | }; | |
160 | ||
161 | struct ipath_mcast { | |
162 | struct rb_node rb_node; | |
163 | union ib_gid mgid; | |
164 | struct list_head qp_list; | |
165 | wait_queue_head_t wait; | |
166 | atomic_t refcount; | |
fe62546a | 167 | int n_attached; |
aa735edf BS |
168 | }; |
169 | ||
aa735edf BS |
170 | /* Protection domain */ |
171 | struct ipath_pd { | |
172 | struct ib_pd ibpd; | |
173 | int user; /* non-zero if created from user space */ | |
174 | }; | |
175 | ||
176 | /* Address Handle */ | |
177 | struct ipath_ah { | |
178 | struct ib_ah ibah; | |
179 | struct ib_ah_attr attr; | |
180 | }; | |
181 | ||
182 | /* | |
373d9915 RC |
183 | * This structure is used by ipath_mmap() to validate an offset |
184 | * when an mmap() request is made. The vm_area_struct then uses | |
185 | * this as its vm_private_data. | |
aa735edf | 186 | */ |
373d9915 | 187 | struct ipath_mmap_info { |
6b66b2da | 188 | struct list_head pending_mmaps; |
373d9915 RC |
189 | struct ib_ucontext *context; |
190 | void *obj; | |
6b66b2da | 191 | __u64 offset; |
373d9915 RC |
192 | struct kref ref; |
193 | unsigned size; | |
373d9915 | 194 | }; |
aa735edf | 195 | |
373d9915 RC |
196 | /* |
197 | * This structure is used to contain the head pointer, tail pointer, | |
198 | * and completion queue entries as a single memory allocation so | |
199 | * it can be mmap'ed into user space. | |
200 | */ | |
201 | struct ipath_cq_wc { | |
202 | u32 head; /* index of next entry to fill */ | |
203 | u32 tail; /* index of next ib_poll_cq() entry */ | |
6cff2faa RC |
204 | union { |
205 | /* these are actually size ibcq.cqe + 1 */ | |
206 | struct ib_uverbs_wc uqueue[0]; | |
207 | struct ib_wc kqueue[0]; | |
208 | }; | |
373d9915 RC |
209 | }; |
210 | ||
211 | /* | |
212 | * The completion queue structure. | |
213 | */ | |
aa735edf BS |
214 | struct ipath_cq { |
215 | struct ib_cq ibcq; | |
216 | struct tasklet_struct comptask; | |
217 | spinlock_t lock; | |
218 | u8 notify; | |
219 | u8 triggered; | |
373d9915 RC |
220 | struct ipath_cq_wc *queue; |
221 | struct ipath_mmap_info *ip; | |
aa735edf BS |
222 | }; |
223 | ||
34b2aafe BS |
224 | /* |
225 | * A segment is a linear region of low physical memory. | |
226 | * XXX Maybe we should use phys addr here and kmap()/kunmap(). | |
227 | * Used by the verbs layer. | |
228 | */ | |
229 | struct ipath_seg { | |
230 | void *vaddr; | |
231 | size_t length; | |
232 | }; | |
233 | ||
234 | /* The number of ipath_segs that fit in a page. */ | |
235 | #define IPATH_SEGSZ (PAGE_SIZE / sizeof (struct ipath_seg)) | |
236 | ||
237 | struct ipath_segarray { | |
238 | struct ipath_seg segs[IPATH_SEGSZ]; | |
239 | }; | |
240 | ||
241 | struct ipath_mregion { | |
6a553af2 | 242 | struct ib_pd *pd; /* shares refcnt of ibmr.pd */ |
34b2aafe BS |
243 | u64 user_base; /* User's address for this region */ |
244 | u64 iova; /* IB start address of this region */ | |
245 | size_t length; | |
246 | u32 lkey; | |
247 | u32 offset; /* offset (bytes) to start of region */ | |
248 | int access_flags; | |
249 | u32 max_segs; /* number of ipath_segs in all the arrays */ | |
250 | u32 mapsz; /* size of the map array */ | |
251 | struct ipath_segarray *map[0]; /* the segments */ | |
252 | }; | |
253 | ||
254 | /* | |
255 | * These keep track of the copy progress within a memory region. | |
256 | * Used by the verbs layer. | |
257 | */ | |
258 | struct ipath_sge { | |
259 | struct ipath_mregion *mr; | |
4ee97180 | 260 | void *vaddr; /* kernel virtual address of segment */ |
34b2aafe BS |
261 | u32 sge_length; /* length of the SGE */ |
262 | u32 length; /* remaining length of the segment */ | |
263 | u16 m; /* current index: mr->map[m] */ | |
264 | u16 n; /* current index: mr->map[m]->segs[n] */ | |
265 | }; | |
266 | ||
267 | /* Memory region */ | |
268 | struct ipath_mr { | |
269 | struct ib_mr ibmr; | |
f7c6a7b5 | 270 | struct ib_umem *umem; |
34b2aafe BS |
271 | struct ipath_mregion mr; /* must be last */ |
272 | }; | |
273 | ||
aa735edf BS |
274 | /* |
275 | * Send work request queue entry. | |
276 | * The size of the sg_list is determined when the QP is created and stored | |
277 | * in qp->s_max_sge. | |
278 | */ | |
279 | struct ipath_swqe { | |
280 | struct ib_send_wr wr; /* don't use wr.sg_list */ | |
281 | u32 psn; /* first packet sequence number */ | |
282 | u32 lpsn; /* last packet sequence number */ | |
283 | u32 ssn; /* send sequence number */ | |
284 | u32 length; /* total length of data in sg_list */ | |
285 | struct ipath_sge sg_list[0]; | |
286 | }; | |
287 | ||
288 | /* | |
289 | * Receive work request queue entry. | |
373d9915 RC |
290 | * The size of the sg_list is determined when the QP (or SRQ) is created |
291 | * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). | |
aa735edf BS |
292 | */ |
293 | struct ipath_rwqe { | |
294 | u64 wr_id; | |
aa735edf | 295 | u8 num_sge; |
373d9915 | 296 | struct ib_sge sg_list[0]; |
aa735edf BS |
297 | }; |
298 | ||
373d9915 RC |
299 | /* |
300 | * This structure is used to contain the head pointer, tail pointer, | |
301 | * and receive work queue entries as a single memory allocation so | |
302 | * it can be mmap'ed into user space. | |
303 | * Note that the wq array elements are variable size so you can't | |
304 | * just index into the array to get the N'th element; | |
305 | * use get_rwqe_ptr() instead. | |
306 | */ | |
307 | struct ipath_rwq { | |
aa735edf BS |
308 | u32 head; /* new work requests posted to the head */ |
309 | u32 tail; /* receives pull requests from here. */ | |
373d9915 RC |
310 | struct ipath_rwqe wq[0]; |
311 | }; | |
312 | ||
313 | struct ipath_rq { | |
314 | struct ipath_rwq *wq; | |
315 | spinlock_t lock; | |
aa735edf BS |
316 | u32 size; /* size of RWQE array */ |
317 | u8 max_sge; | |
aa735edf BS |
318 | }; |
319 | ||
320 | struct ipath_srq { | |
321 | struct ib_srq ibsrq; | |
322 | struct ipath_rq rq; | |
373d9915 | 323 | struct ipath_mmap_info *ip; |
aa735edf BS |
324 | /* send signal when number of RWQEs < limit */ |
325 | u32 limit; | |
326 | }; | |
327 | ||
34b2aafe BS |
328 | struct ipath_sge_state { |
329 | struct ipath_sge *sg_list; /* next SGE to be used if any */ | |
330 | struct ipath_sge sge; /* progress state for the current SGE */ | |
331 | u8 num_sge; | |
afce688b | 332 | u8 static_rate; |
34b2aafe BS |
333 | }; |
334 | ||
3859e39d RC |
335 | /* |
336 | * This structure holds the information that the send tasklet needs | |
337 | * to send a RDMA read response or atomic operation. | |
338 | */ | |
339 | struct ipath_ack_entry { | |
340 | u8 opcode; | |
d781b129 | 341 | u8 sent; |
3859e39d RC |
342 | u32 psn; |
343 | union { | |
344 | struct ipath_sge_state rdma_sge; | |
345 | u64 atomic_data; | |
346 | }; | |
347 | }; | |
348 | ||
aa735edf BS |
349 | /* |
350 | * Variables prefixed with s_ are for the requester (sender). | |
351 | * Variables prefixed with r_ are for the responder (receiver). | |
352 | * Variables prefixed with ack_ are for responder replies. | |
353 | * | |
354 | * Common variables are protected by both r_rq.lock and s_lock in that order | |
355 | * which only happens in modify_qp() or changing the QP 'state'. | |
356 | */ | |
357 | struct ipath_qp { | |
358 | struct ib_qp ibqp; | |
9b2017f1 BS |
359 | struct ipath_qp *next; /* link list for QPN hash table */ |
360 | struct ipath_qp *timer_next; /* link list for ipath_ib_timer() */ | |
e509be89 | 361 | struct ipath_qp *pio_next; /* link for ipath_ib_piobufavail() */ |
aa735edf BS |
362 | struct list_head piowait; /* link for wait PIO buf */ |
363 | struct list_head timerwait; /* link for waiting for timeouts */ | |
364 | struct ib_ah_attr remote_ah_attr; | |
365 | struct ipath_ib_header s_hdr; /* next packet header to send */ | |
366 | atomic_t refcount; | |
367 | wait_queue_head_t wait; | |
e509be89 | 368 | wait_queue_head_t wait_dma; |
aa735edf | 369 | struct tasklet_struct s_task; |
373d9915 | 370 | struct ipath_mmap_info *ip; |
aa735edf | 371 | struct ipath_sge_state *s_cur_sge; |
afce688b | 372 | struct ipath_verbs_txreq *s_tx; |
aa735edf | 373 | struct ipath_sge_state s_sge; /* current send request data */ |
3859e39d RC |
374 | struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1]; |
375 | struct ipath_sge_state s_ack_rdma_sge; | |
376 | struct ipath_sge_state s_rdma_read_sge; | |
aa735edf BS |
377 | struct ipath_sge_state r_sge; /* current receive data */ |
378 | spinlock_t s_lock; | |
e509be89 | 379 | atomic_t s_dma_busy; |
afce688b RC |
380 | u16 s_pkt_delay; |
381 | u16 s_hdrwords; /* size of s_hdr in 32 bit words */ | |
aa735edf BS |
382 | u32 s_cur_size; /* size of send packet in bytes */ |
383 | u32 s_len; /* total length of s_sge */ | |
3859e39d | 384 | u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ |
aa735edf BS |
385 | u32 s_next_psn; /* PSN for next request */ |
386 | u32 s_last_psn; /* last response PSN processed */ | |
387 | u32 s_psn; /* current packet sequence number */ | |
3859e39d RC |
388 | u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ |
389 | u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ | |
aa735edf | 390 | u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */ |
12eef41f | 391 | u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ |
aa735edf | 392 | u64 r_wr_id; /* ID for current receive WQE */ |
e509be89 | 393 | unsigned long r_aflags; |
aa735edf BS |
394 | u32 r_len; /* total length of r_sge */ |
395 | u32 r_rcv_len; /* receive data len processed */ | |
396 | u32 r_psn; /* expected rcv packet sequence number */ | |
12eef41f | 397 | u32 r_msn; /* message sequence number */ |
aa735edf BS |
398 | u8 state; /* QP state */ |
399 | u8 s_state; /* opcode of last packet sent */ | |
400 | u8 s_ack_state; /* opcode of packet to ACK */ | |
401 | u8 s_nak_state; /* non-zero if NAK is pending */ | |
402 | u8 r_state; /* opcode of last packet received */ | |
12eef41f BS |
403 | u8 r_nak_state; /* non-zero if NAK is pending */ |
404 | u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ | |
e509be89 | 405 | u8 r_flags; |
3859e39d RC |
406 | u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ |
407 | u8 r_head_ack_queue; /* index into s_ack_queue[] */ | |
aa735edf | 408 | u8 qp_access_flags; |
12eef41f | 409 | u8 s_max_sge; /* size of s_wq->sg_list */ |
aa735edf BS |
410 | u8 s_retry_cnt; /* number of times to retry */ |
411 | u8 s_rnr_retry_cnt; | |
aa735edf BS |
412 | u8 s_retry; /* requester retry counter */ |
413 | u8 s_rnr_retry; /* requester RNR retry counter */ | |
414 | u8 s_pkey_index; /* PKEY index to use */ | |
3859e39d RC |
415 | u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ |
416 | u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ | |
417 | u8 s_tail_ack_queue; /* index into s_ack_queue[] */ | |
418 | u8 s_flags; | |
afce688b | 419 | u8 s_dmult; |
e509be89 | 420 | u8 s_draining; |
fc8cf8cd | 421 | u8 timeout; /* Timeout for this QP */ |
aa735edf | 422 | enum ib_mtu path_mtu; |
aa735edf BS |
423 | u32 remote_qpn; |
424 | u32 qkey; /* QKEY for this QP (for UD or RD) */ | |
425 | u32 s_size; /* send work queue size */ | |
426 | u32 s_head; /* new entries added here */ | |
427 | u32 s_tail; /* next entry to process */ | |
428 | u32 s_cur; /* current work queue entry */ | |
429 | u32 s_last; /* last un-ACK'ed entry */ | |
430 | u32 s_ssn; /* SSN of tail entry */ | |
431 | u32 s_lsn; /* limit sequence number (credit) */ | |
432 | struct ipath_swqe *s_wq; /* send work queue */ | |
4ee97180 | 433 | struct ipath_swqe *s_wqe; |
7c37d744 | 434 | struct ipath_sge *r_ud_sg_list; |
373d9915 RC |
435 | struct ipath_rq r_rq; /* receive work queue */ |
436 | struct ipath_sge r_sg_list[0]; /* verified SGEs */ | |
aa735edf BS |
437 | }; |
438 | ||
e509be89 RC |
439 | /* |
440 | * Atomic bit definitions for r_aflags. | |
441 | */ | |
442 | #define IPATH_R_WRID_VALID 0 | |
443 | ||
444 | /* | |
445 | * Bit definitions for r_flags. | |
446 | */ | |
447 | #define IPATH_R_REUSE_SGE 0x01 | |
74116f58 | 448 | #define IPATH_R_RDMAR_SEQ 0x02 |
3859e39d | 449 | |
aa735edf BS |
450 | /* |
451 | * Bit definitions for s_flags. | |
e509be89 RC |
452 | * |
453 | * IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs | |
454 | * before processing the next SWQE | |
455 | * IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs | |
456 | * before processing the next SWQE | |
457 | * IPATH_S_WAITING - waiting for RNR timeout or send buffer available. | |
458 | * IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE | |
459 | * IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating | |
460 | * next send completion entry not via send DMA. | |
aa735edf | 461 | */ |
3859e39d RC |
462 | #define IPATH_S_SIGNAL_REQ_WR 0x01 |
463 | #define IPATH_S_FENCE_PENDING 0x02 | |
464 | #define IPATH_S_RDMAR_PENDING 0x04 | |
465 | #define IPATH_S_ACK_PENDING 0x08 | |
e509be89 RC |
466 | #define IPATH_S_BUSY 0x10 |
467 | #define IPATH_S_WAITING 0x20 | |
468 | #define IPATH_S_WAIT_SSN_CREDIT 0x40 | |
469 | #define IPATH_S_WAIT_DMA 0x80 | |
470 | ||
471 | #define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \ | |
472 | IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA) | |
aa735edf | 473 | |
c3af664a | 474 | #define IPATH_PSN_CREDIT 512 |
6022943e | 475 | |
aa735edf BS |
476 | /* |
477 | * Since struct ipath_swqe is not a fixed size, we can't simply index into | |
478 | * struct ipath_qp.s_wq. This function does the array index computation. | |
479 | */ | |
480 | static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp, | |
481 | unsigned n) | |
482 | { | |
483 | return (struct ipath_swqe *)((char *)qp->s_wq + | |
484 | (sizeof(struct ipath_swqe) + | |
485 | qp->s_max_sge * | |
486 | sizeof(struct ipath_sge)) * n); | |
487 | } | |
488 | ||
489 | /* | |
490 | * Since struct ipath_rwqe is not a fixed size, we can't simply index into | |
373d9915 | 491 | * struct ipath_rwq.wq. This function does the array index computation. |
aa735edf BS |
492 | */ |
493 | static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq, | |
494 | unsigned n) | |
495 | { | |
496 | return (struct ipath_rwqe *) | |
373d9915 | 497 | ((char *) rq->wq->wq + |
aa735edf | 498 | (sizeof(struct ipath_rwqe) + |
373d9915 | 499 | rq->max_sge * sizeof(struct ib_sge)) * n); |
aa735edf BS |
500 | } |
501 | ||
502 | /* | |
503 | * QPN-map pages start out as NULL, they get allocated upon | |
504 | * first use and are never deallocated. This way, | |
505 | * large bitmaps are not allocated unless large numbers of QPs are used. | |
506 | */ | |
507 | struct qpn_map { | |
508 | atomic_t n_free; | |
509 | void *page; | |
510 | }; | |
511 | ||
512 | struct ipath_qp_table { | |
513 | spinlock_t lock; | |
514 | u32 last; /* last QP number allocated */ | |
515 | u32 max; /* size of the hash table */ | |
516 | u32 nmaps; /* size of the map table */ | |
517 | struct ipath_qp **table; | |
518 | /* bit map of free numbers */ | |
519 | struct qpn_map map[QPNMAP_ENTRIES]; | |
520 | }; | |
521 | ||
522 | struct ipath_lkey_table { | |
523 | spinlock_t lock; | |
524 | u32 next; /* next unused index (speeds search) */ | |
525 | u32 gen; /* generation count */ | |
526 | u32 max; /* size of the table */ | |
527 | struct ipath_mregion **table; | |
528 | }; | |
529 | ||
530 | struct ipath_opcode_stats { | |
531 | u64 n_packets; /* number of packets */ | |
532 | u64 n_bytes; /* total number of bytes */ | |
533 | }; | |
534 | ||
535 | struct ipath_ibdev { | |
536 | struct ib_device ibdev; | |
aa735edf | 537 | struct ipath_devdata *dd; |
6b66b2da RW |
538 | struct list_head pending_mmaps; |
539 | spinlock_t mmap_offset_lock; | |
540 | u32 mmap_offset; | |
aa735edf BS |
541 | int ib_unit; /* This is the device number */ |
542 | u16 sm_lid; /* in host order */ | |
543 | u8 sm_sl; | |
542869a1 | 544 | u8 mkeyprot; |
aa735edf BS |
545 | /* non-zero when timer is set */ |
546 | unsigned long mkey_lease_timeout; | |
547 | ||
548 | /* The following fields are really per port. */ | |
549 | struct ipath_qp_table qp_table; | |
550 | struct ipath_lkey_table lk_table; | |
551 | struct list_head pending[3]; /* FIFO of QPs waiting for ACKs */ | |
552 | struct list_head piowait; /* list for wait PIO buf */ | |
afce688b RC |
553 | struct list_head txreq_free; |
554 | void *txreq_bufs; | |
aa735edf BS |
555 | /* list of QPs waiting for RNR timer */ |
556 | struct list_head rnrwait; | |
557 | spinlock_t pending_lock; | |
558 | __be64 sys_image_guid; /* in network order */ | |
559 | __be64 gid_prefix; /* in network order */ | |
560 | __be64 mkey; | |
c27fef26 | 561 | |
fe62546a | 562 | u32 n_pds_allocated; /* number of PDs allocated for device */ |
c27fef26 | 563 | spinlock_t n_pds_lock; |
fe62546a | 564 | u32 n_ahs_allocated; /* number of AHs allocated for device */ |
c27fef26 | 565 | spinlock_t n_ahs_lock; |
fe62546a | 566 | u32 n_cqs_allocated; /* number of CQs allocated for device */ |
c27fef26 | 567 | spinlock_t n_cqs_lock; |
0b81e4f7 BS |
568 | u32 n_qps_allocated; /* number of QPs allocated for device */ |
569 | spinlock_t n_qps_lock; | |
fe62546a | 570 | u32 n_srqs_allocated; /* number of SRQs allocated for device */ |
c27fef26 | 571 | spinlock_t n_srqs_lock; |
fe62546a | 572 | u32 n_mcast_grps_allocated; /* number of mcast groups allocated */ |
c27fef26 BS |
573 | spinlock_t n_mcast_grps_lock; |
574 | ||
aa735edf BS |
575 | u64 ipath_sword; /* total dwords sent (sample result) */ |
576 | u64 ipath_rword; /* total dwords received (sample result) */ | |
577 | u64 ipath_spkts; /* total packets sent (sample result) */ | |
578 | u64 ipath_rpkts; /* total packets received (sample result) */ | |
579 | /* # of ticks no data sent (sample result) */ | |
580 | u64 ipath_xmit_wait; | |
581 | u64 rcv_errors; /* # of packets with SW detected rcv errs */ | |
582 | u64 n_unicast_xmit; /* total unicast packets sent */ | |
583 | u64 n_unicast_rcv; /* total unicast packets received */ | |
584 | u64 n_multicast_xmit; /* total multicast packets sent */ | |
585 | u64 n_multicast_rcv; /* total multicast packets received */ | |
443a64ab BS |
586 | u64 z_symbol_error_counter; /* starting count for PMA */ |
587 | u64 z_link_error_recovery_counter; /* starting count for PMA */ | |
588 | u64 z_link_downed_counter; /* starting count for PMA */ | |
589 | u64 z_port_rcv_errors; /* starting count for PMA */ | |
590 | u64 z_port_rcv_remphys_errors; /* starting count for PMA */ | |
591 | u64 z_port_xmit_discards; /* starting count for PMA */ | |
592 | u64 z_port_xmit_data; /* starting count for PMA */ | |
593 | u64 z_port_rcv_data; /* starting count for PMA */ | |
594 | u64 z_port_xmit_packets; /* starting count for PMA */ | |
595 | u64 z_port_rcv_packets; /* starting count for PMA */ | |
596 | u32 z_pkey_violations; /* starting count for PMA */ | |
fba75200 BS |
597 | u32 z_local_link_integrity_errors; /* starting count for PMA */ |
598 | u32 z_excessive_buffer_overrun_errors; /* starting count for PMA */ | |
6c719cae | 599 | u32 z_vl15_dropped; /* starting count for PMA */ |
aa735edf BS |
600 | u32 n_rc_resends; |
601 | u32 n_rc_acks; | |
602 | u32 n_rc_qacks; | |
603 | u32 n_seq_naks; | |
604 | u32 n_rdma_seq; | |
605 | u32 n_rnr_naks; | |
606 | u32 n_other_naks; | |
607 | u32 n_timeouts; | |
608 | u32 n_pkt_drops; | |
fba75200 | 609 | u32 n_vl15_dropped; |
aa735edf BS |
610 | u32 n_wqe_errs; |
611 | u32 n_rdma_dup_busy; | |
612 | u32 n_piowait; | |
afce688b | 613 | u32 n_unaligned; |
aa735edf BS |
614 | u32 port_cap_flags; |
615 | u32 pma_sample_start; | |
616 | u32 pma_sample_interval; | |
617 | __be16 pma_counter_select[5]; | |
618 | u16 pma_tag; | |
619 | u16 qkey_violations; | |
620 | u16 mkey_violations; | |
621 | u16 mkey_lease_period; | |
622 | u16 pending_index; /* which pending queue is active */ | |
623 | u8 pma_sample_status; | |
624 | u8 subnet_timeout; | |
aa735edf BS |
625 | u8 vl_high_limit; |
626 | struct ipath_opcode_stats opstats[128]; | |
627 | }; | |
628 | ||
34b2aafe BS |
629 | struct ipath_verbs_counters { |
630 | u64 symbol_error_counter; | |
631 | u64 link_error_recovery_counter; | |
632 | u64 link_downed_counter; | |
633 | u64 port_rcv_errors; | |
634 | u64 port_rcv_remphys_errors; | |
635 | u64 port_xmit_discards; | |
636 | u64 port_xmit_data; | |
637 | u64 port_rcv_data; | |
638 | u64 port_xmit_packets; | |
639 | u64 port_rcv_packets; | |
640 | u32 local_link_integrity_errors; | |
641 | u32 excessive_buffer_overrun_errors; | |
6c719cae | 642 | u32 vl15_dropped; |
aa735edf BS |
643 | }; |
644 | ||
afce688b RC |
645 | struct ipath_verbs_txreq { |
646 | struct ipath_qp *qp; | |
647 | struct ipath_swqe *wqe; | |
648 | u32 map_len; | |
649 | u32 len; | |
650 | struct ipath_sge_state *ss; | |
651 | struct ipath_pio_header hdr; | |
652 | struct ipath_sdma_txreq txreq; | |
653 | }; | |
654 | ||
aa735edf BS |
655 | static inline struct ipath_mr *to_imr(struct ib_mr *ibmr) |
656 | { | |
657 | return container_of(ibmr, struct ipath_mr, ibmr); | |
658 | } | |
659 | ||
aa735edf BS |
660 | static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd) |
661 | { | |
662 | return container_of(ibpd, struct ipath_pd, ibpd); | |
663 | } | |
664 | ||
665 | static inline struct ipath_ah *to_iah(struct ib_ah *ibah) | |
666 | { | |
667 | return container_of(ibah, struct ipath_ah, ibah); | |
668 | } | |
669 | ||
670 | static inline struct ipath_cq *to_icq(struct ib_cq *ibcq) | |
671 | { | |
672 | return container_of(ibcq, struct ipath_cq, ibcq); | |
673 | } | |
674 | ||
675 | static inline struct ipath_srq *to_isrq(struct ib_srq *ibsrq) | |
676 | { | |
677 | return container_of(ibsrq, struct ipath_srq, ibsrq); | |
678 | } | |
679 | ||
680 | static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp) | |
681 | { | |
682 | return container_of(ibqp, struct ipath_qp, ibqp); | |
683 | } | |
684 | ||
685 | static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev) | |
686 | { | |
687 | return container_of(ibdev, struct ipath_ibdev, ibdev); | |
688 | } | |
689 | ||
e509be89 RC |
690 | /* |
691 | * This must be called with s_lock held. | |
692 | */ | |
693 | static inline void ipath_schedule_send(struct ipath_qp *qp) | |
694 | { | |
695 | if (qp->s_flags & IPATH_S_ANY_WAIT) | |
696 | qp->s_flags &= ~IPATH_S_ANY_WAIT; | |
697 | if (!(qp->s_flags & IPATH_S_BUSY)) | |
698 | tasklet_hi_schedule(&qp->s_task); | |
699 | } | |
700 | ||
aa735edf BS |
701 | int ipath_process_mad(struct ib_device *ibdev, |
702 | int mad_flags, | |
703 | u8 port_num, | |
a97e2d86 IW |
704 | const struct ib_wc *in_wc, |
705 | const struct ib_grh *in_grh, | |
706 | const struct ib_mad *in_mad, struct ib_mad *out_mad); | |
aa735edf | 707 | |
aa735edf BS |
708 | /* |
709 | * Compare the lower 24 bits of the two values. | |
710 | * Returns an integer <, ==, or > than zero. | |
711 | */ | |
712 | static inline int ipath_cmp24(u32 a, u32 b) | |
713 | { | |
714 | return (((int) a) - ((int) b)) << 8; | |
715 | } | |
716 | ||
717 | struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid); | |
718 | ||
34b2aafe BS |
719 | int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords, |
720 | u64 *rwords, u64 *spkts, u64 *rpkts, | |
721 | u64 *xmit_wait); | |
722 | ||
723 | int ipath_get_counters(struct ipath_devdata *dd, | |
724 | struct ipath_verbs_counters *cntrs); | |
725 | ||
aa735edf BS |
726 | int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); |
727 | ||
728 | int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); | |
729 | ||
730 | int ipath_mcast_tree_empty(void); | |
731 | ||
732 | __be32 ipath_compute_aeth(struct ipath_qp *qp); | |
733 | ||
734 | struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn); | |
735 | ||
736 | struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |
737 | struct ib_qp_init_attr *init_attr, | |
738 | struct ib_udata *udata); | |
739 | ||
740 | int ipath_destroy_qp(struct ib_qp *ibqp); | |
741 | ||
d42b01b5 | 742 | int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err); |
8d0208cb | 743 | |
aa735edf | 744 | int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
9bc57e2d | 745 | int attr_mask, struct ib_udata *udata); |
aa735edf BS |
746 | |
747 | int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
748 | int attr_mask, struct ib_qp_init_attr *init_attr); | |
749 | ||
e509be89 | 750 | unsigned ipath_free_all_qps(struct ipath_qp_table *qpt); |
aa735edf BS |
751 | |
752 | int ipath_init_qp_table(struct ipath_ibdev *idev, int size); | |
753 | ||
aa735edf BS |
754 | void ipath_get_credit(struct ipath_qp *qp, u32 aeth); |
755 | ||
afce688b RC |
756 | unsigned ipath_ib_rate_to_mult(enum ib_rate rate); |
757 | ||
4ee97180 RC |
758 | int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr, |
759 | u32 hdrwords, struct ipath_sge_state *ss, u32 len); | |
34b2aafe | 760 | |
aa735edf BS |
761 | void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length); |
762 | ||
763 | void ipath_skip_sge(struct ipath_sge_state *ss, u32 length); | |
764 | ||
aa735edf BS |
765 | void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, |
766 | int has_grh, void *data, u32 tlen, struct ipath_qp *qp); | |
767 | ||
768 | void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |
769 | int has_grh, void *data, u32 tlen, struct ipath_qp *qp); | |
770 | ||
53dc1ca1 RC |
771 | void ipath_restart_rc(struct ipath_qp *qp, u32 psn); |
772 | ||
773 | void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err); | |
aa735edf | 774 | |
aa735edf BS |
775 | int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr); |
776 | ||
777 | void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr, | |
778 | int has_grh, void *data, u32 tlen, struct ipath_qp *qp); | |
779 | ||
780 | int ipath_alloc_lkey(struct ipath_lkey_table *rkt, | |
781 | struct ipath_mregion *mr); | |
782 | ||
783 | void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey); | |
784 | ||
6a553af2 | 785 | int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge, |
aa735edf BS |
786 | struct ib_sge *sge, int acc); |
787 | ||
6a553af2 | 788 | int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss, |
aa735edf BS |
789 | u32 len, u64 vaddr, u32 rkey, int acc); |
790 | ||
791 | int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr, | |
792 | struct ib_recv_wr **bad_wr); | |
793 | ||
794 | struct ib_srq *ipath_create_srq(struct ib_pd *ibpd, | |
795 | struct ib_srq_init_attr *srq_init_attr, | |
796 | struct ib_udata *udata); | |
797 | ||
798 | int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |
9bc57e2d RC |
799 | enum ib_srq_attr_mask attr_mask, |
800 | struct ib_udata *udata); | |
aa735edf BS |
801 | |
802 | int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr); | |
803 | ||
804 | int ipath_destroy_srq(struct ib_srq *ibsrq); | |
805 | ||
4ee97180 RC |
806 | void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig); |
807 | ||
aa735edf BS |
808 | int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); |
809 | ||
f4fd0b22 | 810 | struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vector, |
aa735edf BS |
811 | struct ib_ucontext *context, |
812 | struct ib_udata *udata); | |
813 | ||
814 | int ipath_destroy_cq(struct ib_cq *ibcq); | |
815 | ||
ed23a727 | 816 | int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags); |
aa735edf BS |
817 | |
818 | int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); | |
819 | ||
820 | struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc); | |
821 | ||
822 | struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd, | |
823 | struct ib_phys_buf *buffer_list, | |
824 | int num_phys_buf, int acc, u64 *iova_start); | |
825 | ||
f7c6a7b5 RD |
826 | struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, |
827 | u64 virt_addr, int mr_access_flags, | |
aa735edf BS |
828 | struct ib_udata *udata); |
829 | ||
830 | int ipath_dereg_mr(struct ib_mr *ibmr); | |
831 | ||
832 | struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | |
833 | struct ib_fmr_attr *fmr_attr); | |
834 | ||
835 | int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list, | |
836 | int list_len, u64 iova); | |
837 | ||
838 | int ipath_unmap_fmr(struct list_head *fmr_list); | |
839 | ||
840 | int ipath_dealloc_fmr(struct ib_fmr *ibfmr); | |
841 | ||
373d9915 RC |
842 | void ipath_release_mmap_info(struct kref *ref); |
843 | ||
6b66b2da RW |
844 | struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev, |
845 | u32 size, | |
846 | struct ib_ucontext *context, | |
847 | void *obj); | |
848 | ||
849 | void ipath_update_mmap_info(struct ipath_ibdev *dev, | |
850 | struct ipath_mmap_info *ip, | |
851 | u32 size, void *obj); | |
852 | ||
373d9915 RC |
853 | int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); |
854 | ||
aa735edf BS |
855 | void ipath_insert_rnr_queue(struct ipath_qp *qp); |
856 | ||
4ee97180 RC |
857 | int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe, |
858 | u32 *lengthp, struct ipath_sge_state *ss); | |
859 | ||
aa735edf BS |
860 | int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only); |
861 | ||
ddd4bb22 BS |
862 | u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr, |
863 | struct ib_global_route *grh, u32 hwords, u32 nwords); | |
864 | ||
4ee97180 RC |
865 | void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp, |
866 | struct ipath_other_headers *ohdr, | |
867 | u32 bth0, u32 bth2); | |
868 | ||
869 | void ipath_do_send(unsigned long data); | |
870 | ||
871 | void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe, | |
872 | enum ib_wc_status status); | |
873 | ||
874 | int ipath_make_rc_req(struct ipath_qp *qp); | |
ddd4bb22 | 875 | |
4ee97180 | 876 | int ipath_make_uc_req(struct ipath_qp *qp); |
ddd4bb22 | 877 | |
4ee97180 | 878 | int ipath_make_ud_req(struct ipath_qp *qp); |
aa735edf | 879 | |
b1c1b6a3 BS |
880 | int ipath_register_ib_device(struct ipath_devdata *); |
881 | ||
882 | void ipath_unregister_ib_device(struct ipath_ibdev *); | |
883 | ||
884 | void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32); | |
885 | ||
886 | int ipath_ib_piobufavail(struct ipath_ibdev *); | |
887 | ||
34b2aafe BS |
888 | unsigned ipath_get_npkeys(struct ipath_devdata *); |
889 | ||
890 | u32 ipath_get_cr_errpkey(struct ipath_devdata *); | |
891 | ||
892 | unsigned ipath_get_pkey(struct ipath_devdata *, unsigned); | |
893 | ||
aa735edf BS |
894 | extern const enum ib_wc_opcode ib_ipath_wc_opcode[]; |
895 | ||
3d68ea32 RC |
896 | /* |
897 | * Below converts HCA-specific LinkTrainingState to IB PhysPortState | |
898 | * values. | |
899 | */ | |
aa735edf | 900 | extern const u8 ipath_cvt_physportstate[]; |
3d68ea32 RC |
901 | #define IB_PHYSPORTSTATE_SLEEP 1 |
902 | #define IB_PHYSPORTSTATE_POLL 2 | |
903 | #define IB_PHYSPORTSTATE_DISABLED 3 | |
904 | #define IB_PHYSPORTSTATE_CFG_TRAIN 4 | |
905 | #define IB_PHYSPORTSTATE_LINKUP 5 | |
906 | #define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6 | |
aa735edf BS |
907 | |
908 | extern const int ib_ipath_state_ops[]; | |
909 | ||
910 | extern unsigned int ib_ipath_lkey_table_size; | |
911 | ||
fe62546a BS |
912 | extern unsigned int ib_ipath_max_cqes; |
913 | ||
914 | extern unsigned int ib_ipath_max_cqs; | |
915 | ||
916 | extern unsigned int ib_ipath_max_qp_wrs; | |
917 | ||
0b81e4f7 BS |
918 | extern unsigned int ib_ipath_max_qps; |
919 | ||
fe62546a BS |
920 | extern unsigned int ib_ipath_max_sges; |
921 | ||
922 | extern unsigned int ib_ipath_max_mcast_grps; | |
923 | ||
924 | extern unsigned int ib_ipath_max_mcast_qp_attached; | |
925 | ||
926 | extern unsigned int ib_ipath_max_srqs; | |
927 | ||
928 | extern unsigned int ib_ipath_max_srq_sges; | |
929 | ||
930 | extern unsigned int ib_ipath_max_srq_wrs; | |
931 | ||
aa735edf BS |
932 | extern const u32 ib_ipath_rnr_table[]; |
933 | ||
f2cbb660 RC |
934 | extern struct ib_dma_mapping_ops ipath_dma_mapping_ops; |
935 | ||
aa735edf | 936 | #endif /* IPATH_VERBS_H */ |