Commit | Line | Data |
---|---|---|
b4e64397 DD |
1 | #ifndef DEF_RDMAVT_INCQP_H |
2 | #define DEF_RDMAVT_INCQP_H | |
3 | ||
4 | /* | |
fe314195 | 5 | * Copyright(c) 2016 Intel Corporation. |
b4e64397 DD |
6 | * |
7 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
8 | * redistributing this file, you may do so under either license. | |
9 | * | |
10 | * GPL LICENSE SUMMARY | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of version 2 of the GNU General Public License as | |
14 | * published by the Free Software Foundation. | |
15 | * | |
16 | * This program is distributed in the hope that it will be useful, but | |
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 | * General Public License for more details. | |
20 | * | |
21 | * BSD LICENSE | |
22 | * | |
23 | * Redistribution and use in source and binary forms, with or without | |
24 | * modification, are permitted provided that the following conditions | |
25 | * are met: | |
26 | * | |
27 | * - Redistributions of source code must retain the above copyright | |
28 | * notice, this list of conditions and the following disclaimer. | |
29 | * - Redistributions in binary form must reproduce the above copyright | |
30 | * notice, this list of conditions and the following disclaimer in | |
31 | * the documentation and/or other materials provided with the | |
32 | * distribution. | |
33 | * - Neither the name of Intel Corporation nor the names of its | |
34 | * contributors may be used to endorse or promote products derived | |
35 | * from this software without specific prior written permission. | |
36 | * | |
37 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
38 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
39 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
40 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
41 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
42 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
43 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
44 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
45 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
46 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
47 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
48 | * | |
49 | */ | |
50 | ||
5a9cf6f2 | 51 | #include <rdma/rdma_vt.h> |
050eb7fb | 52 | #include <rdma/ib_pack.h> |
4e74080b | 53 | #include <rdma/ib_verbs.h> |
f2dc9cdc | 54 | #include <rdma/rdmavt_cq.h> |
050eb7fb DD |
55 | /* |
56 | * Atomic bit definitions for r_aflags. | |
57 | */ | |
58 | #define RVT_R_WRID_VALID 0 | |
59 | #define RVT_R_REWIND_SGE 1 | |
60 | ||
61 | /* | |
62 | * Bit definitions for r_flags. | |
63 | */ | |
64 | #define RVT_R_REUSE_SGE 0x01 | |
65 | #define RVT_R_RDMAR_SEQ 0x02 | |
66 | #define RVT_R_RSP_NAK 0x04 | |
67 | #define RVT_R_RSP_SEND 0x08 | |
68 | #define RVT_R_COMM_EST 0x10 | |
69 | ||
70 | /* | |
71 | * Bit definitions for s_flags. | |
72 | * | |
73 | * RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled | |
74 | * RVT_S_BUSY - send tasklet is processing the QP | |
75 | * RVT_S_TIMER - the RC retry timer is active | |
76 | * RVT_S_ACK_PENDING - an ACK is waiting to be sent after RDMA read/atomics | |
77 | * RVT_S_WAIT_FENCE - waiting for all prior RDMA read or atomic SWQEs | |
78 | * before processing the next SWQE | |
79 | * RVT_S_WAIT_RDMAR - waiting for a RDMA read or atomic SWQE to complete | |
80 | * before processing the next SWQE | |
81 | * RVT_S_WAIT_RNR - waiting for RNR timeout | |
82 | * RVT_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE | |
83 | * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating | |
84 | * next send completion entry not via send DMA | |
85 | * RVT_S_WAIT_PIO - waiting for a send buffer to be available | |
14553ca1 | 86 | * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets |
050eb7fb DD |
87 | * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available |
88 | * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available | |
89 | * RVT_S_WAIT_KMEM - waiting for kernel memory to be available | |
90 | * RVT_S_WAIT_PSN - waiting for a packet to exit the send DMA queue | |
91 | * RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests | |
92 | * RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK | |
93 | * RVT_S_ECN - a BECN was queued to the send engine | |
94 | */ | |
95 | #define RVT_S_SIGNAL_REQ_WR 0x0001 | |
96 | #define RVT_S_BUSY 0x0002 | |
97 | #define RVT_S_TIMER 0x0004 | |
98 | #define RVT_S_RESP_PENDING 0x0008 | |
99 | #define RVT_S_ACK_PENDING 0x0010 | |
100 | #define RVT_S_WAIT_FENCE 0x0020 | |
101 | #define RVT_S_WAIT_RDMAR 0x0040 | |
102 | #define RVT_S_WAIT_RNR 0x0080 | |
103 | #define RVT_S_WAIT_SSN_CREDIT 0x0100 | |
104 | #define RVT_S_WAIT_DMA 0x0200 | |
105 | #define RVT_S_WAIT_PIO 0x0400 | |
14553ca1 MM |
106 | #define RVT_S_WAIT_PIO_DRAIN 0x0800 |
107 | #define RVT_S_WAIT_TX 0x1000 | |
108 | #define RVT_S_WAIT_DMA_DESC 0x2000 | |
109 | #define RVT_S_WAIT_KMEM 0x4000 | |
110 | #define RVT_S_WAIT_PSN 0x8000 | |
111 | #define RVT_S_WAIT_ACK 0x10000 | |
112 | #define RVT_S_SEND_ONE 0x20000 | |
113 | #define RVT_S_UNLIMITED_CREDIT 0x40000 | |
114 | #define RVT_S_AHG_VALID 0x80000 | |
115 | #define RVT_S_AHG_CLEAR 0x100000 | |
116 | #define RVT_S_ECN 0x200000 | |
050eb7fb DD |
117 | |
118 | /* | |
119 | * Wait flags that would prevent any packet type from being sent. | |
120 | */ | |
f39cc34d MM |
121 | #define RVT_S_ANY_WAIT_IO \ |
122 | (RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN | RVT_S_WAIT_TX | \ | |
123 | RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM) | |
050eb7fb DD |
124 | |
125 | /* | |
126 | * Wait flags that would prevent send work requests from making progress. | |
127 | */ | |
128 | #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \ | |
129 | RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \ | |
130 | RVT_S_WAIT_PSN | RVT_S_WAIT_ACK) | |
131 | ||
132 | #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND) | |
133 | ||
134 | /* Number of bits to pay attention to in the opcode for checking qp type */ | |
135 | #define RVT_OPCODE_QP_MASK 0xE0 | |
136 | ||
bfbac097 DD |
137 | /* Flags for checking QP state (see ib_rvt_state_ops[]) */ |
138 | #define RVT_POST_SEND_OK 0x01 | |
139 | #define RVT_POST_RECV_OK 0x02 | |
140 | #define RVT_PROCESS_RECV_OK 0x04 | |
141 | #define RVT_PROCESS_SEND_OK 0x08 | |
142 | #define RVT_PROCESS_NEXT_SEND_OK 0x10 | |
143 | #define RVT_FLUSH_SEND 0x20 | |
144 | #define RVT_FLUSH_RECV 0x40 | |
145 | #define RVT_PROCESS_OR_FLUSH_SEND \ | |
146 | (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND) | |
147 | ||
856cc4c2 MM |
148 | /* |
149 | * Internal send flags | |
150 | */ | |
151 | #define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START | |
d9b13c20 | 152 | #define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1) |
856cc4c2 | 153 | |
b4e64397 DD |
154 | /* |
155 | * Send work request queue entry. | |
156 | * The size of the sg_list is determined when the QP is created and stored | |
157 | * in qp->s_max_sge. | |
158 | */ | |
159 | struct rvt_swqe { | |
160 | union { | |
161 | struct ib_send_wr wr; /* don't use wr.sg_list */ | |
162 | struct ib_ud_wr ud_wr; | |
163 | struct ib_reg_wr reg_wr; | |
164 | struct ib_rdma_wr rdma_wr; | |
165 | struct ib_atomic_wr atomic_wr; | |
166 | }; | |
167 | u32 psn; /* first packet sequence number */ | |
168 | u32 lpsn; /* last packet sequence number */ | |
169 | u32 ssn; /* send sequence number */ | |
170 | u32 length; /* total length of data in sg_list */ | |
171 | struct rvt_sge sg_list[0]; | |
172 | }; | |
173 | ||
174 | /* | |
175 | * Receive work request queue entry. | |
176 | * The size of the sg_list is determined when the QP (or SRQ) is created | |
177 | * and stored in qp->r_rq.max_sge (or srq->rq.max_sge). | |
178 | */ | |
179 | struct rvt_rwqe { | |
180 | u64 wr_id; | |
181 | u8 num_sge; | |
182 | struct ib_sge sg_list[0]; | |
183 | }; | |
184 | ||
185 | /* | |
186 | * This structure is used to contain the head pointer, tail pointer, | |
187 | * and receive work queue entries as a single memory allocation so | |
188 | * it can be mmap'ed into user space. | |
189 | * Note that the wq array elements are variable size so you can't | |
190 | * just index into the array to get the N'th element; | |
191 | * use get_rwqe_ptr() instead. | |
192 | */ | |
193 | struct rvt_rwq { | |
194 | u32 head; /* new work requests posted to the head */ | |
195 | u32 tail; /* receives pull requests from here. */ | |
196 | struct rvt_rwqe wq[0]; | |
197 | }; | |
198 | ||
199 | struct rvt_rq { | |
200 | struct rvt_rwq *wq; | |
201 | u32 size; /* size of RWQE array */ | |
202 | u8 max_sge; | |
203 | /* protect changes in this struct */ | |
204 | spinlock_t lock ____cacheline_aligned_in_smp; | |
205 | }; | |
206 | ||
207 | /* | |
208 | * This structure is used by rvt_mmap() to validate an offset | |
209 | * when an mmap() request is made. The vm_area_struct then uses | |
210 | * this as its vm_private_data. | |
211 | */ | |
212 | struct rvt_mmap_info { | |
213 | struct list_head pending_mmaps; | |
214 | struct ib_ucontext *context; | |
215 | void *obj; | |
216 | __u64 offset; | |
217 | struct kref ref; | |
218 | unsigned size; | |
219 | }; | |
220 | ||
b4e64397 DD |
221 | /* |
222 | * This structure holds the information that the send tasklet needs | |
223 | * to send a RDMA read response or atomic operation. | |
224 | */ | |
225 | struct rvt_ack_entry { | |
fe508272 IW |
226 | struct rvt_sge rdma_sge; |
227 | u64 atomic_data; | |
b4e64397 DD |
228 | u32 psn; |
229 | u32 lpsn; | |
fe508272 IW |
230 | u8 opcode; |
231 | u8 sent; | |
b4e64397 DD |
232 | }; |
233 | ||
bfee5e32 VM |
234 | #define RC_QP_SCALING_INTERVAL 5 |
235 | ||
afcf8f76 MM |
236 | #define RVT_OPERATION_PRIV 0x00000001 |
237 | #define RVT_OPERATION_ATOMIC 0x00000002 | |
238 | #define RVT_OPERATION_ATOMIC_SGE 0x00000004 | |
d9f87239 | 239 | #define RVT_OPERATION_LOCAL 0x00000008 |
856cc4c2 | 240 | #define RVT_OPERATION_USE_RESERVE 0x00000010 |
afcf8f76 MM |
241 | |
242 | #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1) | |
243 | ||
244 | /** | |
245 | * rvt_operation_params - op table entry | |
246 | * @length - the length to copy into the swqe entry | |
247 | * @qpt_support - a bit mask indicating QP type support | |
248 | * @flags - RVT_OPERATION flags (see above) | |
249 | * | |
250 | * This supports table driven post send so that | |
251 | * the driver can have differing an potentially | |
252 | * different sets of operations. | |
b4e64397 | 253 | * |
afcf8f76 MM |
254 | **/ |
255 | ||
256 | struct rvt_operation_params { | |
257 | size_t length; | |
258 | u32 qpt_support; | |
259 | u32 flags; | |
260 | }; | |
261 | ||
262 | /* | |
b4e64397 DD |
263 | * Common variables are protected by both r_rq.lock and s_lock in that order |
264 | * which only happens in modify_qp() or changing the QP 'state'. | |
265 | */ | |
266 | struct rvt_qp { | |
267 | struct ib_qp ibqp; | |
268 | void *priv; /* Driver private data */ | |
269 | /* read mostly fields above and below */ | |
270 | struct ib_ah_attr remote_ah_attr; | |
271 | struct ib_ah_attr alt_ah_attr; | |
272 | struct rvt_qp __rcu *next; /* link list for QPN hash table */ | |
273 | struct rvt_swqe *s_wq; /* send work queue */ | |
274 | struct rvt_mmap_info *ip; | |
275 | ||
276 | unsigned long timeout_jiffies; /* computed from timeout */ | |
277 | ||
278 | enum ib_mtu path_mtu; | |
279 | int srate_mbps; /* s_srate (below) converted to Mbit/s */ | |
ef086c0d | 280 | pid_t pid; /* pid for user mode QPs */ |
b4e64397 | 281 | u32 remote_qpn; |
b4e64397 DD |
282 | u32 qkey; /* QKEY for this QP (for UD or RD) */ |
283 | u32 s_size; /* send work queue size */ | |
b4e64397 DD |
284 | u32 s_ahgpsn; /* set to the psn in the copy of the header */ |
285 | ||
46a80d62 MM |
286 | u16 pmtu; /* decoded from path_mtu */ |
287 | u8 log_pmtu; /* shift for pmtu */ | |
b4e64397 DD |
288 | u8 state; /* QP state */ |
289 | u8 allowed_ops; /* high order bits of allowed opcodes */ | |
290 | u8 qp_access_flags; | |
291 | u8 alt_timeout; /* Alternate path timeout for this QP */ | |
292 | u8 timeout; /* Timeout for this QP */ | |
293 | u8 s_srate; | |
294 | u8 s_mig_state; | |
295 | u8 port_num; | |
296 | u8 s_pkey_index; /* PKEY index to use */ | |
297 | u8 s_alt_pkey_index; /* Alternate path PKEY index to use */ | |
298 | u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */ | |
299 | u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */ | |
300 | u8 s_retry_cnt; /* number of times to retry */ | |
301 | u8 s_rnr_retry_cnt; | |
302 | u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */ | |
303 | u8 s_max_sge; /* size of s_wq->sg_list */ | |
304 | u8 s_draining; | |
305 | ||
306 | /* start of read/write fields */ | |
307 | atomic_t refcount ____cacheline_aligned_in_smp; | |
308 | wait_queue_head_t wait; | |
309 | ||
8b103e9c | 310 | struct rvt_ack_entry *s_ack_queue; |
b4e64397 DD |
311 | struct rvt_sge_state s_rdma_read_sge; |
312 | ||
313 | spinlock_t r_lock ____cacheline_aligned_in_smp; /* used for APM */ | |
d2421a82 | 314 | u32 r_psn; /* expected rcv packet sequence number */ |
b4e64397 DD |
315 | unsigned long r_aflags; |
316 | u64 r_wr_id; /* ID for current receive WQE */ | |
317 | u32 r_ack_psn; /* PSN for next ACK or atomic ACK */ | |
318 | u32 r_len; /* total length of r_sge */ | |
319 | u32 r_rcv_len; /* receive data len processed */ | |
b4e64397 DD |
320 | u32 r_msn; /* message sequence number */ |
321 | ||
322 | u8 r_state; /* opcode of last packet received */ | |
323 | u8 r_flags; | |
324 | u8 r_head_ack_queue; /* index into s_ack_queue[] */ | |
325 | ||
326 | struct list_head rspwait; /* link for waiting to respond */ | |
327 | ||
328 | struct rvt_sge_state r_sge; /* current receive data */ | |
329 | struct rvt_rq r_rq; /* receive work queue */ | |
330 | ||
46a80d62 MM |
331 | /* post send line */ |
332 | spinlock_t s_hlock ____cacheline_aligned_in_smp; | |
333 | u32 s_head; /* new entries added here */ | |
334 | u32 s_next_psn; /* PSN for next request */ | |
335 | u32 s_avail; /* number of entries avail */ | |
336 | u32 s_ssn; /* SSN of tail entry */ | |
856cc4c2 | 337 | atomic_t s_reserved_used; /* reserved entries in use */ |
46a80d62 | 338 | |
b4e64397 | 339 | spinlock_t s_lock ____cacheline_aligned_in_smp; |
b4e64397 | 340 | u32 s_flags; |
d2421a82 | 341 | struct rvt_sge_state *s_cur_sge; |
b4e64397 DD |
342 | struct rvt_swqe *s_wqe; |
343 | struct rvt_sge_state s_sge; /* current send request data */ | |
344 | struct rvt_mregion *s_rdma_mr; | |
b4e64397 DD |
345 | u32 s_cur_size; /* size of send packet in bytes */ |
346 | u32 s_len; /* total length of s_sge */ | |
347 | u32 s_rdma_read_len; /* total length of s_rdma_read_sge */ | |
b4e64397 DD |
348 | u32 s_last_psn; /* last response PSN processed */ |
349 | u32 s_sending_psn; /* lowest PSN that is being sent */ | |
350 | u32 s_sending_hpsn; /* highest PSN that is being sent */ | |
351 | u32 s_psn; /* current packet sequence number */ | |
352 | u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */ | |
353 | u32 s_ack_psn; /* PSN for acking sends and RDMA writes */ | |
b4e64397 DD |
354 | u32 s_tail; /* next entry to process */ |
355 | u32 s_cur; /* current work queue entry */ | |
356 | u32 s_acked; /* last un-ACK'ed entry */ | |
357 | u32 s_last; /* last completed entry */ | |
b4e64397 DD |
358 | u32 s_lsn; /* limit sequence number (credit) */ |
359 | u16 s_hdrwords; /* size of s_hdr in 32 bit words */ | |
360 | u16 s_rdma_ack_cnt; | |
361 | s8 s_ahgidx; | |
362 | u8 s_state; /* opcode of last packet sent */ | |
363 | u8 s_ack_state; /* opcode of packet to ACK */ | |
364 | u8 s_nak_state; /* non-zero if NAK is pending */ | |
365 | u8 r_nak_state; /* non-zero if NAK is pending */ | |
366 | u8 s_retry; /* requester retry counter */ | |
367 | u8 s_rnr_retry; /* requester RNR retry counter */ | |
368 | u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */ | |
369 | u8 s_tail_ack_queue; /* index into s_ack_queue[] */ | |
370 | ||
371 | struct rvt_sge_state s_ack_rdma_sge; | |
372 | struct timer_list s_timer; | |
11a10d4b | 373 | struct hrtimer s_rnr_timer; |
b4e64397 | 374 | |
d9f87239 JX |
375 | atomic_t local_ops_pending; /* number of fast_reg/local_inv reqs */ |
376 | ||
b4e64397 DD |
377 | /* |
378 | * This sge list MUST be last. Do not add anything below here. | |
379 | */ | |
380 | struct rvt_sge r_sg_list[0] /* verified SGEs */ | |
381 | ____cacheline_aligned_in_smp; | |
382 | }; | |
383 | ||
384 | struct rvt_srq { | |
385 | struct ib_srq ibsrq; | |
386 | struct rvt_rq rq; | |
387 | struct rvt_mmap_info *ip; | |
388 | /* send signal when number of RWQEs < limit */ | |
389 | u32 limit; | |
390 | }; | |
391 | ||
0acb0cc7 DD |
392 | #define RVT_QPN_MAX BIT(24) |
393 | #define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE) | |
394 | #define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE) | |
395 | #define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1) | |
3b0b3fb3 | 396 | #define RVT_QPN_MASK 0xFFFFFF |
0acb0cc7 DD |
397 | |
398 | /* | |
399 | * QPN-map pages start out as NULL, they get allocated upon | |
400 | * first use and are never deallocated. This way, | |
401 | * large bitmaps are not allocated unless large numbers of QPs are used. | |
402 | */ | |
403 | struct rvt_qpn_map { | |
404 | void *page; | |
405 | }; | |
406 | ||
407 | struct rvt_qpn_table { | |
408 | spinlock_t lock; /* protect changes to the qp table */ | |
409 | unsigned flags; /* flags for QP0/1 allocated for each port */ | |
410 | u32 last; /* last QP number allocated */ | |
411 | u32 nmaps; /* size of the map table */ | |
412 | u16 limit; | |
413 | u8 incr; | |
414 | /* bit map of free QP numbers other than 0/1 */ | |
415 | struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES]; | |
416 | }; | |
417 | ||
418 | struct rvt_qp_ibdev { | |
419 | u32 qp_table_size; | |
420 | u32 qp_table_bits; | |
421 | struct rvt_qp __rcu **qp_table; | |
422 | spinlock_t qpt_lock; /* qptable lock */ | |
423 | struct rvt_qpn_table qpn_table; | |
424 | }; | |
425 | ||
4e74080b DD |
426 | /* |
427 | * There is one struct rvt_mcast for each multicast GID. | |
428 | * All attached QPs are then stored as a list of | |
429 | * struct rvt_mcast_qp. | |
430 | */ | |
431 | struct rvt_mcast_qp { | |
432 | struct list_head list; | |
433 | struct rvt_qp *qp; | |
434 | }; | |
435 | ||
436 | struct rvt_mcast { | |
437 | struct rb_node rb_node; | |
438 | union ib_gid mgid; | |
439 | struct list_head qp_list; | |
440 | wait_queue_head_t wait; | |
441 | atomic_t refcount; | |
442 | int n_attached; | |
443 | }; | |
444 | ||
bfbac097 DD |
445 | /* |
446 | * Since struct rvt_swqe is not a fixed size, we can't simply index into | |
4e74080b | 447 | * struct rvt_qp.s_wq. This function does the array index computation. |
bfbac097 DD |
448 | */ |
449 | static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp, | |
450 | unsigned n) | |
451 | { | |
452 | return (struct rvt_swqe *)((char *)qp->s_wq + | |
453 | (sizeof(struct rvt_swqe) + | |
454 | qp->s_max_sge * | |
455 | sizeof(struct rvt_sge)) * n); | |
456 | } | |
457 | ||
3b0b3fb3 DD |
458 | /* |
459 | * Since struct rvt_rwqe is not a fixed size, we can't simply index into | |
460 | * struct rvt_rwq.wq. This function does the array index computation. | |
461 | */ | |
462 | static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n) | |
463 | { | |
464 | return (struct rvt_rwqe *) | |
465 | ((char *)rq->wq->wq + | |
466 | (sizeof(struct rvt_rwqe) + | |
467 | rq->max_sge * sizeof(struct ib_sge)) * n); | |
468 | } | |
469 | ||
4107b8a0 MM |
470 | /** |
471 | * rvt_get_qp - get a QP reference | |
472 | * @qp - the QP to hold | |
473 | */ | |
474 | static inline void rvt_get_qp(struct rvt_qp *qp) | |
475 | { | |
476 | atomic_inc(&qp->refcount); | |
477 | } | |
478 | ||
479 | /** | |
480 | * rvt_put_qp - release a QP reference | |
481 | * @qp - the QP to release | |
482 | */ | |
483 | static inline void rvt_put_qp(struct rvt_qp *qp) | |
484 | { | |
485 | if (qp && atomic_dec_and_test(&qp->refcount)) | |
486 | wake_up(&qp->wait); | |
487 | } | |
488 | ||
f6475223 MM |
489 | /** |
490 | * rvt_put_swqe - drop mr refs held by swqe | |
491 | * @wqe - the send wqe | |
492 | * | |
493 | * This drops any mr references held by the swqe | |
494 | */ | |
495 | static inline void rvt_put_swqe(struct rvt_swqe *wqe) | |
496 | { | |
497 | int i; | |
498 | ||
499 | for (i = 0; i < wqe->wr.num_sge; i++) { | |
500 | struct rvt_sge *sge = &wqe->sg_list[i]; | |
501 | ||
502 | rvt_put_mr(sge->mr); | |
503 | } | |
504 | } | |
505 | ||
856cc4c2 MM |
506 | /** |
507 | * rvt_qp_wqe_reserve - reserve operation | |
508 | * @qp - the rvt qp | |
509 | * @wqe - the send wqe | |
510 | * | |
511 | * This routine used in post send to record | |
512 | * a wqe relative reserved operation use. | |
513 | */ | |
514 | static inline void rvt_qp_wqe_reserve( | |
515 | struct rvt_qp *qp, | |
516 | struct rvt_swqe *wqe) | |
517 | { | |
518 | wqe->wr.send_flags |= RVT_SEND_RESERVE_USED; | |
519 | atomic_inc(&qp->s_reserved_used); | |
520 | } | |
521 | ||
522 | /** | |
523 | * rvt_qp_wqe_unreserve - clean reserved operation | |
524 | * @qp - the rvt qp | |
525 | * @wqe - the send wqe | |
526 | * | |
527 | * This decrements the reserve use count. | |
528 | * | |
529 | * This call MUST precede the change to | |
530 | * s_last to insure that post send sees a stable | |
531 | * s_avail. | |
532 | * | |
533 | * An smp_mp__after_atomic() is used to insure | |
534 | * the compiler does not juggle the order of the s_last | |
535 | * ring index and the decrementing of s_reserved_used. | |
536 | */ | |
537 | static inline void rvt_qp_wqe_unreserve( | |
538 | struct rvt_qp *qp, | |
539 | struct rvt_swqe *wqe) | |
540 | { | |
541 | if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) { | |
542 | wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED; | |
543 | atomic_dec(&qp->s_reserved_used); | |
544 | /* insure no compiler re-order up to s_last change */ | |
545 | smp_mb__after_atomic(); | |
546 | } | |
547 | } | |
548 | ||
f2dc9cdc MM |
549 | extern const enum ib_wc_opcode ib_rvt_wc_opcode[]; |
550 | ||
551 | /** | |
552 | * rvt_qp_swqe_complete() - insert send completion | |
553 | * @qp - the qp | |
554 | * @wqe - the send wqe | |
555 | * @status - completion status | |
556 | * | |
557 | * Insert a send completion into the completion | |
558 | * queue if the qp indicates it should be done. | |
559 | * | |
560 | * See IBTA 10.7.3.1 for info on completion | |
561 | * control. | |
562 | */ | |
563 | static inline void rvt_qp_swqe_complete( | |
564 | struct rvt_qp *qp, | |
565 | struct rvt_swqe *wqe, | |
566 | enum ib_wc_status status) | |
567 | { | |
568 | if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) | |
569 | return; | |
570 | if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) || | |
571 | (wqe->wr.send_flags & IB_SEND_SIGNALED) || | |
572 | status != IB_WC_SUCCESS) { | |
573 | struct ib_wc wc; | |
574 | ||
575 | memset(&wc, 0, sizeof(wc)); | |
576 | wc.wr_id = wqe->wr.wr_id; | |
577 | wc.status = status; | |
578 | wc.opcode = ib_rvt_wc_opcode[wqe->wr.opcode]; | |
579 | wc.qp = &qp->ibqp; | |
580 | wc.byte_len = wqe->length; | |
581 | rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc, | |
582 | status != IB_WC_SUCCESS); | |
583 | } | |
584 | } | |
585 | ||
696513e8 BW |
586 | #define RVT_AETH_CREDIT_SHIFT 24 |
587 | #define RVT_AETH_CREDIT_MASK 0x1F | |
588 | #define RVT_AETH_NAK_SHIFT 29 | |
589 | #define RVT_MSN_MASK 0xFFFFFF | |
590 | ||
591 | /* | |
592 | * Compare the lower 24 bits of the msn values. | |
593 | * Returns an integer <, ==, or > than zero. | |
594 | */ | |
595 | static inline int rvt_cmp_msn(u32 a, u32 b) | |
596 | { | |
597 | return (((int)a) - ((int)b)) << 8; | |
598 | } | |
599 | ||
600 | /** | |
601 | * rvt_compute_aeth - compute the AETH (syndrome + MSN) | |
602 | * @qp: the queue pair to compute the AETH for | |
603 | * | |
604 | * Returns the AETH. | |
605 | */ | |
606 | __be32 rvt_compute_aeth(struct rvt_qp *qp); | |
607 | ||
608 | /** | |
609 | * rvt_get_credit - flush the send work queue of a QP | |
610 | * @qp: the qp who's send work queue to flush | |
611 | * @aeth: the Acknowledge Extended Transport Header | |
612 | * | |
613 | * The QP s_lock should be held. | |
614 | */ | |
615 | void rvt_get_credit(struct rvt_qp *qp, u32 aeth); | |
616 | ||
5dc80605 MM |
617 | /** |
618 | * @qp - the qp pair | |
619 | * @len - the length | |
620 | * | |
621 | * Perform a shift based mtu round up divide | |
622 | */ | |
623 | static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len) | |
624 | { | |
625 | return (len + qp->pmtu - 1) >> qp->log_pmtu; | |
626 | } | |
627 | ||
628 | /** | |
629 | * @qp - the qp pair | |
630 | * @len - the length | |
631 | * | |
632 | * Perform a shift based mtu divide | |
633 | */ | |
634 | static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len) | |
635 | { | |
636 | return len >> qp->log_pmtu; | |
637 | } | |
638 | ||
639 | extern const int ib_rvt_state_ops[]; | |
bfbac097 | 640 | |
3b0b3fb3 | 641 | struct rvt_dev_info; |
beb5a042 | 642 | void rvt_comm_est(struct rvt_qp *qp); |
3b0b3fb3 | 643 | int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err); |
beb5a042 | 644 | void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err); |
11a10d4b VSD |
645 | enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t); |
646 | void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth); | |
647 | void rvt_del_timers_sync(struct rvt_qp *qp); | |
648 | void rvt_stop_rc_timers(struct rvt_qp *qp); | |
649 | void rvt_add_retry_timer(struct rvt_qp *qp); | |
3b0b3fb3 | 650 | |
b4e64397 | 651 | #endif /* DEF_RDMAVT_INCQP_H */ |