Commit | Line | Data |
---|---|---|
e28c00ad | 1 | /* |
e7eacd36 | 2 | * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. |
e28c00ad BS |
3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | #include <linux/err.h> | |
d43c36dc | 35 | #include <linux/sched.h> |
5a0e3ad6 | 36 | #include <linux/slab.h> |
e28c00ad BS |
37 | #include <linux/vmalloc.h> |
38 | ||
39 | #include "ipath_verbs.h" | |
373d9915 | 40 | #include "ipath_kernel.h" |
e28c00ad BS |
41 | |
42 | #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE) | |
43 | #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) | |
44 | #define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \ | |
45 | (off)) | |
46 | #define find_next_offset(map, off) find_next_zero_bit((map)->page, \ | |
47 | BITS_PER_PAGE, off) | |
48 | ||
e28c00ad BS |
49 | /* |
50 | * Convert the AETH credit code into the number of credits. | |
51 | */ | |
52 | static u32 credit_table[31] = { | |
53 | 0, /* 0 */ | |
54 | 1, /* 1 */ | |
55 | 2, /* 2 */ | |
56 | 3, /* 3 */ | |
57 | 4, /* 4 */ | |
58 | 6, /* 5 */ | |
59 | 8, /* 6 */ | |
60 | 12, /* 7 */ | |
61 | 16, /* 8 */ | |
62 | 24, /* 9 */ | |
63 | 32, /* A */ | |
64 | 48, /* B */ | |
65 | 64, /* C */ | |
66 | 96, /* D */ | |
67 | 128, /* E */ | |
68 | 192, /* F */ | |
69 | 256, /* 10 */ | |
70 | 384, /* 11 */ | |
71 | 512, /* 12 */ | |
72 | 768, /* 13 */ | |
73 | 1024, /* 14 */ | |
74 | 1536, /* 15 */ | |
75 | 2048, /* 16 */ | |
76 | 3072, /* 17 */ | |
77 | 4096, /* 18 */ | |
78 | 6144, /* 19 */ | |
79 | 8192, /* 1A */ | |
80 | 12288, /* 1B */ | |
81 | 16384, /* 1C */ | |
82 | 24576, /* 1D */ | |
83 | 32768 /* 1E */ | |
84 | }; | |
85 | ||
662af581 BS |
86 | |
87 | static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map) | |
88 | { | |
89 | unsigned long page = get_zeroed_page(GFP_KERNEL); | |
90 | unsigned long flags; | |
91 | ||
92 | /* | |
93 | * Free the page if someone raced with us installing it. | |
94 | */ | |
95 | ||
96 | spin_lock_irqsave(&qpt->lock, flags); | |
97 | if (map->page) | |
98 | free_page(page); | |
99 | else | |
100 | map->page = (void *)page; | |
101 | spin_unlock_irqrestore(&qpt->lock, flags); | |
102 | } | |
103 | ||
104 | ||
105 | static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type) | |
e28c00ad BS |
106 | { |
107 | u32 i, offset, max_scan, qpn; | |
108 | struct qpn_map *map; | |
662af581 BS |
109 | u32 ret = -1; |
110 | ||
111 | if (type == IB_QPT_SMI) | |
112 | ret = 0; | |
113 | else if (type == IB_QPT_GSI) | |
114 | ret = 1; | |
115 | ||
116 | if (ret != -1) { | |
117 | map = &qpt->map[0]; | |
118 | if (unlikely(!map->page)) { | |
119 | get_map_page(qpt, map); | |
120 | if (unlikely(!map->page)) { | |
121 | ret = -ENOMEM; | |
122 | goto bail; | |
123 | } | |
124 | } | |
125 | if (!test_and_set_bit(ret, map->page)) | |
126 | atomic_dec(&map->n_free); | |
127 | else | |
128 | ret = -EBUSY; | |
129 | goto bail; | |
130 | } | |
e28c00ad BS |
131 | |
132 | qpn = qpt->last + 1; | |
133 | if (qpn >= QPN_MAX) | |
134 | qpn = 2; | |
135 | offset = qpn & BITS_PER_PAGE_MASK; | |
136 | map = &qpt->map[qpn / BITS_PER_PAGE]; | |
137 | max_scan = qpt->nmaps - !offset; | |
138 | for (i = 0;;) { | |
139 | if (unlikely(!map->page)) { | |
662af581 | 140 | get_map_page(qpt, map); |
e28c00ad BS |
141 | if (unlikely(!map->page)) |
142 | break; | |
143 | } | |
144 | if (likely(atomic_read(&map->n_free))) { | |
145 | do { | |
146 | if (!test_and_set_bit(offset, map->page)) { | |
147 | atomic_dec(&map->n_free); | |
148 | qpt->last = qpn; | |
149 | ret = qpn; | |
150 | goto bail; | |
151 | } | |
152 | offset = find_next_offset(map, offset); | |
153 | qpn = mk_qpn(qpt, map, offset); | |
154 | /* | |
155 | * This test differs from alloc_pidmap(). | |
156 | * If find_next_offset() does find a zero | |
157 | * bit, we don't need to check for QPN | |
158 | * wrapping around past our starting QPN. | |
159 | * We just need to be sure we don't loop | |
160 | * forever. | |
161 | */ | |
162 | } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); | |
163 | } | |
164 | /* | |
165 | * In order to keep the number of pages allocated to a | |
166 | * minimum, we scan the all existing pages before increasing | |
167 | * the size of the bitmap table. | |
168 | */ | |
169 | if (++i > max_scan) { | |
170 | if (qpt->nmaps == QPNMAP_ENTRIES) | |
171 | break; | |
172 | map = &qpt->map[qpt->nmaps++]; | |
173 | offset = 0; | |
174 | } else if (map < &qpt->map[qpt->nmaps]) { | |
175 | ++map; | |
176 | offset = 0; | |
177 | } else { | |
178 | map = &qpt->map[0]; | |
179 | offset = 2; | |
180 | } | |
181 | qpn = mk_qpn(qpt, map, offset); | |
182 | } | |
183 | ||
662af581 | 184 | ret = -ENOMEM; |
e28c00ad BS |
185 | |
186 | bail: | |
187 | return ret; | |
188 | } | |
189 | ||
190 | static void free_qpn(struct ipath_qp_table *qpt, u32 qpn) | |
191 | { | |
192 | struct qpn_map *map; | |
193 | ||
194 | map = qpt->map + qpn / BITS_PER_PAGE; | |
195 | if (map->page) | |
196 | clear_bit(qpn & BITS_PER_PAGE_MASK, map->page); | |
197 | atomic_inc(&map->n_free); | |
198 | } | |
199 | ||
200 | /** | |
201 | * ipath_alloc_qpn - allocate a QP number | |
202 | * @qpt: the QP table | |
203 | * @qp: the QP | |
204 | * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special) | |
205 | * | |
206 | * Allocate the next available QPN and put the QP into the hash table. | |
207 | * The hash table holds a reference to the QP. | |
208 | */ | |
ac2ae4c9 RD |
209 | static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp, |
210 | enum ib_qp_type type) | |
e28c00ad BS |
211 | { |
212 | unsigned long flags; | |
e28c00ad BS |
213 | int ret; |
214 | ||
662af581 BS |
215 | ret = alloc_qpn(qpt, type); |
216 | if (ret < 0) | |
217 | goto bail; | |
218 | qp->ibqp.qp_num = ret; | |
e28c00ad BS |
219 | |
220 | /* Add the QP to the hash table. */ | |
221 | spin_lock_irqsave(&qpt->lock, flags); | |
222 | ||
662af581 BS |
223 | ret %= qpt->max; |
224 | qp->next = qpt->table[ret]; | |
225 | qpt->table[ret] = qp; | |
e28c00ad BS |
226 | atomic_inc(&qp->refcount); |
227 | ||
228 | spin_unlock_irqrestore(&qpt->lock, flags); | |
229 | ret = 0; | |
230 | ||
231 | bail: | |
232 | return ret; | |
233 | } | |
234 | ||
235 | /** | |
236 | * ipath_free_qp - remove a QP from the QP table | |
237 | * @qpt: the QP table | |
238 | * @qp: the QP to remove | |
239 | * | |
240 | * Remove the QP from the table so it can't be found asynchronously by | |
241 | * the receive interrupt routine. | |
242 | */ | |
ac2ae4c9 | 243 | static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp) |
e28c00ad BS |
244 | { |
245 | struct ipath_qp *q, **qpp; | |
246 | unsigned long flags; | |
e28c00ad BS |
247 | |
248 | spin_lock_irqsave(&qpt->lock, flags); | |
249 | ||
250 | /* Remove QP from the hash table. */ | |
251 | qpp = &qpt->table[qp->ibqp.qp_num % qpt->max]; | |
252 | for (; (q = *qpp) != NULL; qpp = &q->next) { | |
253 | if (q == qp) { | |
254 | *qpp = qp->next; | |
255 | qp->next = NULL; | |
256 | atomic_dec(&qp->refcount); | |
e28c00ad BS |
257 | break; |
258 | } | |
259 | } | |
260 | ||
261 | spin_unlock_irqrestore(&qpt->lock, flags); | |
e28c00ad BS |
262 | } |
263 | ||
264 | /** | |
e509be89 | 265 | * ipath_free_all_qps - check for QPs still in use |
e28c00ad | 266 | * @qpt: the QP table to empty |
e509be89 RC |
267 | * |
268 | * There should not be any QPs still in use. | |
269 | * Free memory for table. | |
e28c00ad | 270 | */ |
e509be89 | 271 | unsigned ipath_free_all_qps(struct ipath_qp_table *qpt) |
e28c00ad BS |
272 | { |
273 | unsigned long flags; | |
e509be89 RC |
274 | struct ipath_qp *qp; |
275 | u32 n, qp_inuse = 0; | |
e28c00ad | 276 | |
e509be89 | 277 | spin_lock_irqsave(&qpt->lock, flags); |
e28c00ad | 278 | for (n = 0; n < qpt->max; n++) { |
e28c00ad BS |
279 | qp = qpt->table[n]; |
280 | qpt->table[n] = NULL; | |
e509be89 RC |
281 | |
282 | for (; qp; qp = qp->next) | |
283 | qp_inuse++; | |
e28c00ad | 284 | } |
e509be89 | 285 | spin_unlock_irqrestore(&qpt->lock, flags); |
e28c00ad | 286 | |
e509be89 | 287 | for (n = 0; n < ARRAY_SIZE(qpt->map); n++) |
e28c00ad | 288 | if (qpt->map[n].page) |
e509be89 RC |
289 | free_page((unsigned long) qpt->map[n].page); |
290 | return qp_inuse; | |
e28c00ad BS |
291 | } |
292 | ||
293 | /** | |
294 | * ipath_lookup_qpn - return the QP with the given QPN | |
295 | * @qpt: the QP table | |
296 | * @qpn: the QP number to look up | |
297 | * | |
298 | * The caller is responsible for decrementing the QP reference count | |
299 | * when done. | |
300 | */ | |
301 | struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn) | |
302 | { | |
303 | unsigned long flags; | |
304 | struct ipath_qp *qp; | |
305 | ||
306 | spin_lock_irqsave(&qpt->lock, flags); | |
307 | ||
308 | for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) { | |
309 | if (qp->ibqp.qp_num == qpn) { | |
310 | atomic_inc(&qp->refcount); | |
311 | break; | |
312 | } | |
313 | } | |
314 | ||
315 | spin_unlock_irqrestore(&qpt->lock, flags); | |
316 | return qp; | |
317 | } | |
318 | ||
319 | /** | |
320 | * ipath_reset_qp - initialize the QP state to the reset state | |
321 | * @qp: the QP to reset | |
4cd5060c | 322 | * @type: the QP type |
e28c00ad | 323 | */ |
4cd5060c | 324 | static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type) |
e28c00ad BS |
325 | { |
326 | qp->remote_qpn = 0; | |
327 | qp->qkey = 0; | |
328 | qp->qp_access_flags = 0; | |
e509be89 | 329 | atomic_set(&qp->s_dma_busy, 0); |
991bda28 | 330 | qp->s_flags &= IPATH_S_SIGNAL_REQ_WR; |
e28c00ad | 331 | qp->s_hdrwords = 0; |
4ee97180 | 332 | qp->s_wqe = NULL; |
124b4dcb | 333 | qp->s_pkt_delay = 0; |
e509be89 | 334 | qp->s_draining = 0; |
e28c00ad BS |
335 | qp->s_psn = 0; |
336 | qp->r_psn = 0; | |
12eef41f | 337 | qp->r_msn = 0; |
4cd5060c | 338 | if (type == IB_QPT_RC) { |
e28c00ad BS |
339 | qp->s_state = IB_OPCODE_RC_SEND_LAST; |
340 | qp->r_state = IB_OPCODE_RC_SEND_LAST; | |
341 | } else { | |
342 | qp->s_state = IB_OPCODE_UC_SEND_LAST; | |
343 | qp->r_state = IB_OPCODE_UC_SEND_LAST; | |
344 | } | |
345 | qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE; | |
12eef41f | 346 | qp->r_nak_state = 0; |
e509be89 RC |
347 | qp->r_aflags = 0; |
348 | qp->r_flags = 0; | |
e28c00ad BS |
349 | qp->s_rnr_timeout = 0; |
350 | qp->s_head = 0; | |
351 | qp->s_tail = 0; | |
352 | qp->s_cur = 0; | |
353 | qp->s_last = 0; | |
354 | qp->s_ssn = 1; | |
355 | qp->s_lsn = 0; | |
3859e39d RC |
356 | memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue)); |
357 | qp->r_head_ack_queue = 0; | |
358 | qp->s_tail_ack_queue = 0; | |
359 | qp->s_num_rd_atomic = 0; | |
373d9915 RC |
360 | if (qp->r_rq.wq) { |
361 | qp->r_rq.wq->head = 0; | |
362 | qp->r_rq.wq->tail = 0; | |
363 | } | |
e28c00ad BS |
364 | } |
365 | ||
ac2ae4c9 | 366 | /** |
53dc1ca1 RC |
367 | * ipath_error_qp - put a QP into the error state |
368 | * @qp: the QP to put into the error state | |
8d0208cb | 369 | * @err: the receive completion error to signal if a RWQE is active |
ac2ae4c9 RD |
370 | * |
371 | * Flushes both send and receive work queues. | |
d42b01b5 | 372 | * Returns true if last WQE event should be generated. |
0434d271 | 373 | * The QP s_lock should be held and interrupts disabled. |
53dc1ca1 | 374 | * If we are already in error state, just return. |
ac2ae4c9 RD |
375 | */ |
376 | ||
d42b01b5 | 377 | int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err) |
ac2ae4c9 RD |
378 | { |
379 | struct ipath_ibdev *dev = to_idev(qp->ibqp.device); | |
380 | struct ib_wc wc; | |
d42b01b5 | 381 | int ret = 0; |
ac2ae4c9 | 382 | |
53dc1ca1 RC |
383 | if (qp->state == IB_QPS_ERR) |
384 | goto bail; | |
385 | ||
386 | qp->state = IB_QPS_ERR; | |
ac2ae4c9 RD |
387 | |
388 | spin_lock(&dev->pending_lock); | |
94b8d9f9 BS |
389 | if (!list_empty(&qp->timerwait)) |
390 | list_del_init(&qp->timerwait); | |
391 | if (!list_empty(&qp->piowait)) | |
392 | list_del_init(&qp->piowait); | |
ac2ae4c9 RD |
393 | spin_unlock(&dev->pending_lock); |
394 | ||
e509be89 RC |
395 | /* Schedule the sending tasklet to drain the send work queue. */ |
396 | if (qp->s_last != qp->s_head) | |
397 | ipath_schedule_send(qp); | |
398 | ||
399 | memset(&wc, 0, sizeof(wc)); | |
062dbb69 | 400 | wc.qp = &qp->ibqp; |
e509be89 RC |
401 | wc.opcode = IB_WC_RECV; |
402 | ||
403 | if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) { | |
0434d271 | 404 | wc.wr_id = qp->r_wr_id; |
8d0208cb | 405 | wc.status = err; |
2a049e51 | 406 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); |
8d0208cb BS |
407 | } |
408 | wc.status = IB_WC_WR_FLUSH_ERR; | |
ac2ae4c9 | 409 | |
373d9915 RC |
410 | if (qp->r_rq.wq) { |
411 | struct ipath_rwq *wq; | |
412 | u32 head; | |
413 | u32 tail; | |
414 | ||
415 | spin_lock(&qp->r_rq.lock); | |
416 | ||
417 | /* sanity check pointers before trusting them */ | |
418 | wq = qp->r_rq.wq; | |
419 | head = wq->head; | |
420 | if (head >= qp->r_rq.size) | |
421 | head = 0; | |
422 | tail = wq->tail; | |
423 | if (tail >= qp->r_rq.size) | |
424 | tail = 0; | |
373d9915 RC |
425 | while (tail != head) { |
426 | wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; | |
427 | if (++tail >= qp->r_rq.size) | |
428 | tail = 0; | |
429 | ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1); | |
430 | } | |
431 | wq->tail = tail; | |
432 | ||
433 | spin_unlock(&qp->r_rq.lock); | |
d42b01b5 RC |
434 | } else if (qp->ibqp.event_handler) |
435 | ret = 1; | |
436 | ||
53dc1ca1 | 437 | bail: |
d42b01b5 | 438 | return ret; |
ac2ae4c9 RD |
439 | } |
440 | ||
e28c00ad BS |
441 | /** |
442 | * ipath_modify_qp - modify the attributes of a queue pair | |
443 | * @ibqp: the queue pair who's attributes we're modifying | |
444 | * @attr: the new attributes | |
445 | * @attr_mask: the mask of attributes to modify | |
9bc57e2d | 446 | * @udata: user data for ipathverbs.so |
e28c00ad BS |
447 | * |
448 | * Returns 0 on success, otherwise returns an errno. | |
449 | */ | |
450 | int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
9bc57e2d | 451 | int attr_mask, struct ib_udata *udata) |
e28c00ad | 452 | { |
b228b43c | 453 | struct ipath_ibdev *dev = to_idev(ibqp->device); |
e28c00ad BS |
454 | struct ipath_qp *qp = to_iqp(ibqp); |
455 | enum ib_qp_state cur_state, new_state; | |
d42b01b5 | 456 | int lastwqe = 0; |
e28c00ad BS |
457 | int ret; |
458 | ||
e509be89 | 459 | spin_lock_irq(&qp->s_lock); |
e28c00ad BS |
460 | |
461 | cur_state = attr_mask & IB_QP_CUR_STATE ? | |
462 | attr->cur_qp_state : qp->state; | |
463 | new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; | |
464 | ||
465 | if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, | |
466 | attr_mask)) | |
467 | goto inval; | |
468 | ||
fc8cf8cd | 469 | if (attr_mask & IB_QP_AV) { |
b228b43c | 470 | if (attr->ah_attr.dlid == 0 || |
27b678dd | 471 | attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE) |
b228b43c BS |
472 | goto inval; |
473 | ||
fc8cf8cd BS |
474 | if ((attr->ah_attr.ah_flags & IB_AH_GRH) && |
475 | (attr->ah_attr.grh.sgid_index > 1)) | |
476 | goto inval; | |
477 | } | |
478 | ||
b228b43c | 479 | if (attr_mask & IB_QP_PKEY_INDEX) |
34b2aafe | 480 | if (attr->pkey_index >= ipath_get_npkeys(dev->dd)) |
b228b43c BS |
481 | goto inval; |
482 | ||
483 | if (attr_mask & IB_QP_MIN_RNR_TIMER) | |
484 | if (attr->min_rnr_timer > 31) | |
485 | goto inval; | |
486 | ||
fc8cf8cd BS |
487 | if (attr_mask & IB_QP_PORT) |
488 | if (attr->port_num == 0 || | |
489 | attr->port_num > ibqp->device->phys_port_cnt) | |
490 | goto inval; | |
491 | ||
e7340f04 | 492 | /* |
826d8010 DO |
493 | * don't allow invalid Path MTU values or greater than 2048 |
494 | * unless we are configured for a 4KB MTU | |
e7340f04 | 495 | */ |
826d8010 DO |
496 | if ((attr_mask & IB_QP_PATH_MTU) && |
497 | (ib_mtu_enum_to_int(attr->path_mtu) == -1 || | |
498 | (attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096))) | |
499 | goto inval; | |
fc8cf8cd | 500 | |
fc8cf8cd | 501 | if (attr_mask & IB_QP_PATH_MIG_STATE) |
ca4ce383 BS |
502 | if (attr->path_mig_state != IB_MIG_MIGRATED && |
503 | attr->path_mig_state != IB_MIG_REARM) | |
fc8cf8cd BS |
504 | goto inval; |
505 | ||
3859e39d RC |
506 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) |
507 | if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC) | |
508 | goto inval; | |
509 | ||
e28c00ad BS |
510 | switch (new_state) { |
511 | case IB_QPS_RESET: | |
e509be89 RC |
512 | if (qp->state != IB_QPS_RESET) { |
513 | qp->state = IB_QPS_RESET; | |
514 | spin_lock(&dev->pending_lock); | |
515 | if (!list_empty(&qp->timerwait)) | |
516 | list_del_init(&qp->timerwait); | |
517 | if (!list_empty(&qp->piowait)) | |
518 | list_del_init(&qp->piowait); | |
519 | spin_unlock(&dev->pending_lock); | |
520 | qp->s_flags &= ~IPATH_S_ANY_WAIT; | |
521 | spin_unlock_irq(&qp->s_lock); | |
522 | /* Stop the sending tasklet */ | |
523 | tasklet_kill(&qp->s_task); | |
524 | wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); | |
525 | spin_lock_irq(&qp->s_lock); | |
526 | } | |
4cd5060c | 527 | ipath_reset_qp(qp, ibqp->qp_type); |
e28c00ad BS |
528 | break; |
529 | ||
e509be89 RC |
530 | case IB_QPS_SQD: |
531 | qp->s_draining = qp->s_last != qp->s_cur; | |
532 | qp->state = new_state; | |
533 | break; | |
534 | ||
535 | case IB_QPS_SQE: | |
536 | if (qp->ibqp.qp_type == IB_QPT_RC) | |
537 | goto inval; | |
538 | qp->state = new_state; | |
539 | break; | |
540 | ||
e28c00ad | 541 | case IB_QPS_ERR: |
d42b01b5 | 542 | lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR); |
e28c00ad BS |
543 | break; |
544 | ||
545 | default: | |
e509be89 | 546 | qp->state = new_state; |
e28c00ad | 547 | break; |
e28c00ad BS |
548 | } |
549 | ||
b228b43c | 550 | if (attr_mask & IB_QP_PKEY_INDEX) |
e28c00ad | 551 | qp->s_pkey_index = attr->pkey_index; |
e28c00ad BS |
552 | |
553 | if (attr_mask & IB_QP_DEST_QPN) | |
554 | qp->remote_qpn = attr->dest_qp_num; | |
555 | ||
556 | if (attr_mask & IB_QP_SQ_PSN) { | |
6022943e | 557 | qp->s_psn = qp->s_next_psn = attr->sq_psn; |
e28c00ad BS |
558 | qp->s_last_psn = qp->s_next_psn - 1; |
559 | } | |
560 | ||
561 | if (attr_mask & IB_QP_RQ_PSN) | |
562 | qp->r_psn = attr->rq_psn; | |
563 | ||
564 | if (attr_mask & IB_QP_ACCESS_FLAGS) | |
565 | qp->qp_access_flags = attr->qp_access_flags; | |
566 | ||
124b4dcb | 567 | if (attr_mask & IB_QP_AV) { |
e28c00ad | 568 | qp->remote_ah_attr = attr->ah_attr; |
124b4dcb DO |
569 | qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate); |
570 | } | |
e28c00ad BS |
571 | |
572 | if (attr_mask & IB_QP_PATH_MTU) | |
573 | qp->path_mtu = attr->path_mtu; | |
574 | ||
575 | if (attr_mask & IB_QP_RETRY_CNT) | |
576 | qp->s_retry = qp->s_retry_cnt = attr->retry_cnt; | |
577 | ||
578 | if (attr_mask & IB_QP_RNR_RETRY) { | |
579 | qp->s_rnr_retry = attr->rnr_retry; | |
580 | if (qp->s_rnr_retry > 7) | |
581 | qp->s_rnr_retry = 7; | |
582 | qp->s_rnr_retry_cnt = qp->s_rnr_retry; | |
583 | } | |
584 | ||
b228b43c | 585 | if (attr_mask & IB_QP_MIN_RNR_TIMER) |
12eef41f | 586 | qp->r_min_rnr_timer = attr->min_rnr_timer; |
e28c00ad | 587 | |
fc8cf8cd BS |
588 | if (attr_mask & IB_QP_TIMEOUT) |
589 | qp->timeout = attr->timeout; | |
590 | ||
e28c00ad BS |
591 | if (attr_mask & IB_QP_QKEY) |
592 | qp->qkey = attr->qkey; | |
593 | ||
3859e39d RC |
594 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) |
595 | qp->r_max_rd_atomic = attr->max_dest_rd_atomic; | |
596 | ||
597 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) | |
598 | qp->s_max_rd_atomic = attr->max_rd_atomic; | |
599 | ||
e509be89 | 600 | spin_unlock_irq(&qp->s_lock); |
12eef41f | 601 | |
d42b01b5 RC |
602 | if (lastwqe) { |
603 | struct ib_event ev; | |
604 | ||
605 | ev.device = qp->ibqp.device; | |
606 | ev.element.qp = &qp->ibqp; | |
607 | ev.event = IB_EVENT_QP_LAST_WQE_REACHED; | |
608 | qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); | |
609 | } | |
e28c00ad BS |
610 | ret = 0; |
611 | goto bail; | |
612 | ||
613 | inval: | |
e509be89 | 614 | spin_unlock_irq(&qp->s_lock); |
e28c00ad BS |
615 | ret = -EINVAL; |
616 | ||
617 | bail: | |
618 | return ret; | |
619 | } | |
620 | ||
621 | int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
622 | int attr_mask, struct ib_qp_init_attr *init_attr) | |
623 | { | |
624 | struct ipath_qp *qp = to_iqp(ibqp); | |
625 | ||
626 | attr->qp_state = qp->state; | |
627 | attr->cur_qp_state = attr->qp_state; | |
628 | attr->path_mtu = qp->path_mtu; | |
629 | attr->path_mig_state = 0; | |
630 | attr->qkey = qp->qkey; | |
631 | attr->rq_psn = qp->r_psn; | |
632 | attr->sq_psn = qp->s_next_psn; | |
633 | attr->dest_qp_num = qp->remote_qpn; | |
634 | attr->qp_access_flags = qp->qp_access_flags; | |
635 | attr->cap.max_send_wr = qp->s_size - 1; | |
373d9915 | 636 | attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1; |
e28c00ad BS |
637 | attr->cap.max_send_sge = qp->s_max_sge; |
638 | attr->cap.max_recv_sge = qp->r_rq.max_sge; | |
639 | attr->cap.max_inline_data = 0; | |
640 | attr->ah_attr = qp->remote_ah_attr; | |
641 | memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr)); | |
642 | attr->pkey_index = qp->s_pkey_index; | |
643 | attr->alt_pkey_index = 0; | |
644 | attr->en_sqd_async_notify = 0; | |
e509be89 | 645 | attr->sq_draining = qp->s_draining; |
3859e39d RC |
646 | attr->max_rd_atomic = qp->s_max_rd_atomic; |
647 | attr->max_dest_rd_atomic = qp->r_max_rd_atomic; | |
12eef41f | 648 | attr->min_rnr_timer = qp->r_min_rnr_timer; |
e28c00ad | 649 | attr->port_num = 1; |
fc8cf8cd | 650 | attr->timeout = qp->timeout; |
e28c00ad | 651 | attr->retry_cnt = qp->s_retry_cnt; |
87d5aed8 | 652 | attr->rnr_retry = qp->s_rnr_retry_cnt; |
e28c00ad BS |
653 | attr->alt_port_num = 0; |
654 | attr->alt_timeout = 0; | |
655 | ||
656 | init_attr->event_handler = qp->ibqp.event_handler; | |
657 | init_attr->qp_context = qp->ibqp.qp_context; | |
658 | init_attr->send_cq = qp->ibqp.send_cq; | |
659 | init_attr->recv_cq = qp->ibqp.recv_cq; | |
660 | init_attr->srq = qp->ibqp.srq; | |
661 | init_attr->cap = attr->cap; | |
3859e39d | 662 | if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR) |
a78aa6fb BS |
663 | init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; |
664 | else | |
665 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; | |
e28c00ad BS |
666 | init_attr->qp_type = qp->ibqp.qp_type; |
667 | init_attr->port_num = 1; | |
668 | return 0; | |
669 | } | |
670 | ||
671 | /** | |
672 | * ipath_compute_aeth - compute the AETH (syndrome + MSN) | |
673 | * @qp: the queue pair to compute the AETH for | |
674 | * | |
675 | * Returns the AETH. | |
e28c00ad BS |
676 | */ |
677 | __be32 ipath_compute_aeth(struct ipath_qp *qp) | |
678 | { | |
27b678dd | 679 | u32 aeth = qp->r_msn & IPATH_MSN_MASK; |
e28c00ad | 680 | |
12eef41f | 681 | if (qp->ibqp.srq) { |
e28c00ad BS |
682 | /* |
683 | * Shared receive queues don't generate credits. | |
684 | * Set the credit field to the invalid value. | |
685 | */ | |
27b678dd | 686 | aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT; |
e28c00ad BS |
687 | } else { |
688 | u32 min, max, x; | |
689 | u32 credits; | |
373d9915 RC |
690 | struct ipath_rwq *wq = qp->r_rq.wq; |
691 | u32 head; | |
692 | u32 tail; | |
693 | ||
694 | /* sanity check pointers before trusting them */ | |
695 | head = wq->head; | |
696 | if (head >= qp->r_rq.size) | |
697 | head = 0; | |
698 | tail = wq->tail; | |
699 | if (tail >= qp->r_rq.size) | |
700 | tail = 0; | |
e28c00ad BS |
701 | /* |
702 | * Compute the number of credits available (RWQEs). | |
703 | * XXX Not holding the r_rq.lock here so there is a small | |
704 | * chance that the pair of reads are not atomic. | |
705 | */ | |
373d9915 | 706 | credits = head - tail; |
e28c00ad BS |
707 | if ((int)credits < 0) |
708 | credits += qp->r_rq.size; | |
709 | /* | |
710 | * Binary search the credit table to find the code to | |
711 | * use. | |
712 | */ | |
713 | min = 0; | |
714 | max = 31; | |
715 | for (;;) { | |
716 | x = (min + max) / 2; | |
717 | if (credit_table[x] == credits) | |
718 | break; | |
719 | if (credit_table[x] > credits) | |
720 | max = x; | |
721 | else if (min == x) | |
722 | break; | |
723 | else | |
724 | min = x; | |
725 | } | |
27b678dd | 726 | aeth |= x << IPATH_AETH_CREDIT_SHIFT; |
e28c00ad BS |
727 | } |
728 | return cpu_to_be32(aeth); | |
729 | } | |
730 | ||
731 | /** | |
732 | * ipath_create_qp - create a queue pair for a device | |
733 | * @ibpd: the protection domain who's device we create the queue pair for | |
734 | * @init_attr: the attributes of the queue pair | |
735 | * @udata: unused by InfiniPath | |
736 | * | |
737 | * Returns the queue pair on success, otherwise returns an errno. | |
738 | * | |
739 | * Called by the ib_create_qp() core verbs function. | |
740 | */ | |
741 | struct ib_qp *ipath_create_qp(struct ib_pd *ibpd, | |
742 | struct ib_qp_init_attr *init_attr, | |
743 | struct ib_udata *udata) | |
744 | { | |
745 | struct ipath_qp *qp; | |
746 | int err; | |
747 | struct ipath_swqe *swq = NULL; | |
748 | struct ipath_ibdev *dev; | |
749 | size_t sz; | |
7c37d744 | 750 | size_t sg_list_sz; |
e28c00ad BS |
751 | struct ib_qp *ret; |
752 | ||
b846f25a EC |
753 | if (init_attr->create_flags) { |
754 | ret = ERR_PTR(-EINVAL); | |
755 | goto bail; | |
756 | } | |
757 | ||
fe62546a | 758 | if (init_attr->cap.max_send_sge > ib_ipath_max_sges || |
10a8c3cd RC |
759 | init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) { |
760 | ret = ERR_PTR(-EINVAL); | |
e28c00ad BS |
761 | goto bail; |
762 | } | |
763 | ||
10a8c3cd RC |
764 | /* Check receive queue parameters if no SRQ is specified. */ |
765 | if (!init_attr->srq) { | |
766 | if (init_attr->cap.max_recv_sge > ib_ipath_max_sges || | |
767 | init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) { | |
768 | ret = ERR_PTR(-EINVAL); | |
769 | goto bail; | |
770 | } | |
771 | if (init_attr->cap.max_send_sge + | |
772 | init_attr->cap.max_send_wr + | |
773 | init_attr->cap.max_recv_sge + | |
774 | init_attr->cap.max_recv_wr == 0) { | |
775 | ret = ERR_PTR(-EINVAL); | |
776 | goto bail; | |
777 | } | |
4a45b7d4 BS |
778 | } |
779 | ||
e28c00ad BS |
780 | switch (init_attr->qp_type) { |
781 | case IB_QPT_UC: | |
782 | case IB_QPT_RC: | |
4ee97180 RC |
783 | case IB_QPT_UD: |
784 | case IB_QPT_SMI: | |
785 | case IB_QPT_GSI: | |
e28c00ad BS |
786 | sz = sizeof(struct ipath_sge) * |
787 | init_attr->cap.max_send_sge + | |
788 | sizeof(struct ipath_swqe); | |
789 | swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz); | |
790 | if (swq == NULL) { | |
791 | ret = ERR_PTR(-ENOMEM); | |
792 | goto bail; | |
793 | } | |
373d9915 | 794 | sz = sizeof(*qp); |
7c37d744 | 795 | sg_list_sz = 0; |
373d9915 RC |
796 | if (init_attr->srq) { |
797 | struct ipath_srq *srq = to_isrq(init_attr->srq); | |
798 | ||
7c37d744 RC |
799 | if (srq->rq.max_sge > 1) |
800 | sg_list_sz = sizeof(*qp->r_sg_list) * | |
801 | (srq->rq.max_sge - 1); | |
802 | } else if (init_attr->cap.max_recv_sge > 1) | |
803 | sg_list_sz = sizeof(*qp->r_sg_list) * | |
804 | (init_attr->cap.max_recv_sge - 1); | |
805 | qp = kmalloc(sz + sg_list_sz, GFP_KERNEL); | |
e28c00ad BS |
806 | if (!qp) { |
807 | ret = ERR_PTR(-ENOMEM); | |
373d9915 | 808 | goto bail_swq; |
e28c00ad | 809 | } |
7c37d744 RC |
810 | if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD || |
811 | init_attr->qp_type == IB_QPT_SMI || | |
812 | init_attr->qp_type == IB_QPT_GSI)) { | |
813 | qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL); | |
814 | if (!qp->r_ud_sg_list) { | |
815 | ret = ERR_PTR(-ENOMEM); | |
816 | goto bail_qp; | |
817 | } | |
818 | } else | |
819 | qp->r_ud_sg_list = NULL; | |
357b552f | 820 | if (init_attr->srq) { |
373d9915 | 821 | sz = 0; |
357b552f BS |
822 | qp->r_rq.size = 0; |
823 | qp->r_rq.max_sge = 0; | |
824 | qp->r_rq.wq = NULL; | |
373d9915 RC |
825 | init_attr->cap.max_recv_wr = 0; |
826 | init_attr->cap.max_recv_sge = 0; | |
357b552f BS |
827 | } else { |
828 | qp->r_rq.size = init_attr->cap.max_recv_wr + 1; | |
829 | qp->r_rq.max_sge = init_attr->cap.max_recv_sge; | |
373d9915 | 830 | sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) + |
357b552f | 831 | sizeof(struct ipath_rwqe); |
373d9915 RC |
832 | qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + |
833 | qp->r_rq.size * sz); | |
357b552f | 834 | if (!qp->r_rq.wq) { |
357b552f | 835 | ret = ERR_PTR(-ENOMEM); |
7c37d744 | 836 | goto bail_sg_list; |
357b552f | 837 | } |
e28c00ad BS |
838 | } |
839 | ||
840 | /* | |
841 | * ib_create_qp() will initialize qp->ibqp | |
842 | * except for qp->ibqp.qp_num. | |
843 | */ | |
844 | spin_lock_init(&qp->s_lock); | |
845 | spin_lock_init(&qp->r_rq.lock); | |
846 | atomic_set(&qp->refcount, 0); | |
847 | init_waitqueue_head(&qp->wait); | |
e509be89 | 848 | init_waitqueue_head(&qp->wait_dma); |
4ee97180 | 849 | tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp); |
94b8d9f9 BS |
850 | INIT_LIST_HEAD(&qp->piowait); |
851 | INIT_LIST_HEAD(&qp->timerwait); | |
e28c00ad BS |
852 | qp->state = IB_QPS_RESET; |
853 | qp->s_wq = swq; | |
854 | qp->s_size = init_attr->cap.max_send_wr + 1; | |
855 | qp->s_max_sge = init_attr->cap.max_send_sge; | |
a78aa6fb | 856 | if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR) |
3859e39d | 857 | qp->s_flags = IPATH_S_SIGNAL_REQ_WR; |
a78aa6fb BS |
858 | else |
859 | qp->s_flags = 0; | |
e28c00ad BS |
860 | dev = to_idev(ibpd->device); |
861 | err = ipath_alloc_qpn(&dev->qp_table, qp, | |
862 | init_attr->qp_type); | |
863 | if (err) { | |
e28c00ad | 864 | ret = ERR_PTR(err); |
8a278e6d | 865 | vfree(qp->r_rq.wq); |
7c37d744 | 866 | goto bail_sg_list; |
e28c00ad | 867 | } |
373d9915 | 868 | qp->ip = NULL; |
124b4dcb | 869 | qp->s_tx = NULL; |
4cd5060c | 870 | ipath_reset_qp(qp, init_attr->qp_type); |
e28c00ad BS |
871 | break; |
872 | ||
873 | default: | |
874 | /* Don't support raw QPs */ | |
875 | ret = ERR_PTR(-ENOSYS); | |
876 | goto bail; | |
877 | } | |
878 | ||
879 | init_attr->cap.max_inline_data = 0; | |
880 | ||
373d9915 RC |
881 | /* |
882 | * Return the address of the RWQ as the offset to mmap. | |
883 | * See ipath_mmap() for details. | |
884 | */ | |
885 | if (udata && udata->outlen >= sizeof(__u64)) { | |
6b66b2da RW |
886 | if (!qp->r_rq.wq) { |
887 | __u64 offset = 0; | |
373d9915 | 888 | |
6b66b2da RW |
889 | err = ib_copy_to_udata(udata, &offset, |
890 | sizeof(offset)); | |
891 | if (err) { | |
892 | ret = ERR_PTR(err); | |
8a278e6d | 893 | goto bail_ip; |
6b66b2da RW |
894 | } |
895 | } else { | |
896 | u32 s = sizeof(struct ipath_rwq) + | |
897 | qp->r_rq.size * sz; | |
898 | ||
899 | qp->ip = | |
900 | ipath_create_mmap_info(dev, s, | |
901 | ibpd->uobject->context, | |
902 | qp->r_rq.wq); | |
903 | if (!qp->ip) { | |
373d9915 | 904 | ret = ERR_PTR(-ENOMEM); |
8a278e6d | 905 | goto bail_ip; |
373d9915 | 906 | } |
6b66b2da RW |
907 | |
908 | err = ib_copy_to_udata(udata, &(qp->ip->offset), | |
909 | sizeof(qp->ip->offset)); | |
910 | if (err) { | |
911 | ret = ERR_PTR(err); | |
912 | goto bail_ip; | |
913 | } | |
373d9915 RC |
914 | } |
915 | } | |
916 | ||
0b81e4f7 BS |
917 | spin_lock(&dev->n_qps_lock); |
918 | if (dev->n_qps_allocated == ib_ipath_max_qps) { | |
919 | spin_unlock(&dev->n_qps_lock); | |
920 | ret = ERR_PTR(-ENOMEM); | |
921 | goto bail_ip; | |
922 | } | |
923 | ||
924 | dev->n_qps_allocated++; | |
925 | spin_unlock(&dev->n_qps_lock); | |
926 | ||
6b66b2da RW |
927 | if (qp->ip) { |
928 | spin_lock_irq(&dev->pending_lock); | |
929 | list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps); | |
930 | spin_unlock_irq(&dev->pending_lock); | |
931 | } | |
932 | ||
e28c00ad | 933 | ret = &qp->ibqp; |
373d9915 | 934 | goto bail; |
e28c00ad | 935 | |
0b81e4f7 | 936 | bail_ip: |
8a278e6d RC |
937 | if (qp->ip) |
938 | kref_put(&qp->ip->ref, ipath_release_mmap_info); | |
939 | else | |
940 | vfree(qp->r_rq.wq); | |
941 | ipath_free_qp(&dev->qp_table, qp); | |
e509be89 | 942 | free_qpn(&dev->qp_table, qp->ibqp.qp_num); |
7c37d744 RC |
943 | bail_sg_list: |
944 | kfree(qp->r_ud_sg_list); | |
373d9915 RC |
945 | bail_qp: |
946 | kfree(qp); | |
947 | bail_swq: | |
948 | vfree(swq); | |
e28c00ad BS |
949 | bail: |
950 | return ret; | |
951 | } | |
952 | ||
953 | /** | |
954 | * ipath_destroy_qp - destroy a queue pair | |
955 | * @ibqp: the queue pair to destroy | |
956 | * | |
957 | * Returns 0 on success. | |
958 | * | |
959 | * Note that this can be called while the QP is actively sending or | |
960 | * receiving! | |
961 | */ | |
962 | int ipath_destroy_qp(struct ib_qp *ibqp) | |
963 | { | |
964 | struct ipath_qp *qp = to_iqp(ibqp); | |
965 | struct ipath_ibdev *dev = to_idev(ibqp->device); | |
e28c00ad | 966 | |
e509be89 RC |
967 | /* Make sure HW and driver activity is stopped. */ |
968 | spin_lock_irq(&qp->s_lock); | |
969 | if (qp->state != IB_QPS_RESET) { | |
970 | qp->state = IB_QPS_RESET; | |
971 | spin_lock(&dev->pending_lock); | |
972 | if (!list_empty(&qp->timerwait)) | |
973 | list_del_init(&qp->timerwait); | |
974 | if (!list_empty(&qp->piowait)) | |
975 | list_del_init(&qp->piowait); | |
976 | spin_unlock(&dev->pending_lock); | |
977 | qp->s_flags &= ~IPATH_S_ANY_WAIT; | |
978 | spin_unlock_irq(&qp->s_lock); | |
979 | /* Stop the sending tasklet */ | |
980 | tasklet_kill(&qp->s_task); | |
981 | wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy)); | |
982 | } else | |
983 | spin_unlock_irq(&qp->s_lock); | |
e28c00ad | 984 | |
e509be89 | 985 | ipath_free_qp(&dev->qp_table, qp); |
e28c00ad | 986 | |
124b4dcb DO |
987 | if (qp->s_tx) { |
988 | atomic_dec(&qp->refcount); | |
989 | if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) | |
990 | kfree(qp->s_tx->txreq.map_addr); | |
e509be89 RC |
991 | spin_lock_irq(&dev->pending_lock); |
992 | list_add(&qp->s_tx->txreq.list, &dev->txreq_free); | |
993 | spin_unlock_irq(&dev->pending_lock); | |
994 | qp->s_tx = NULL; | |
124b4dcb DO |
995 | } |
996 | ||
e509be89 | 997 | wait_event(qp->wait, !atomic_read(&qp->refcount)); |
e28c00ad | 998 | |
e509be89 RC |
999 | /* all user's cleaned up, mark it available */ |
1000 | free_qpn(&dev->qp_table, qp->ibqp.qp_num); | |
1001 | spin_lock(&dev->n_qps_lock); | |
1002 | dev->n_qps_allocated--; | |
1003 | spin_unlock(&dev->n_qps_lock); | |
e28c00ad | 1004 | |
373d9915 RC |
1005 | if (qp->ip) |
1006 | kref_put(&qp->ip->ref, ipath_release_mmap_info); | |
1007 | else | |
1008 | vfree(qp->r_rq.wq); | |
7c37d744 | 1009 | kfree(qp->r_ud_sg_list); |
e28c00ad | 1010 | vfree(qp->s_wq); |
e28c00ad BS |
1011 | kfree(qp); |
1012 | return 0; | |
1013 | } | |
1014 | ||
1015 | /** | |
1016 | * ipath_init_qp_table - initialize the QP table for a device | |
1017 | * @idev: the device who's QP table we're initializing | |
1018 | * @size: the size of the QP table | |
1019 | * | |
1020 | * Returns 0 on success, otherwise returns an errno. | |
1021 | */ | |
1022 | int ipath_init_qp_table(struct ipath_ibdev *idev, int size) | |
1023 | { | |
1024 | int i; | |
1025 | int ret; | |
1026 | ||
1027 | idev->qp_table.last = 1; /* QPN 0 and 1 are special. */ | |
1028 | idev->qp_table.max = size; | |
1029 | idev->qp_table.nmaps = 1; | |
1030 | idev->qp_table.table = kzalloc(size * sizeof(*idev->qp_table.table), | |
1031 | GFP_KERNEL); | |
1032 | if (idev->qp_table.table == NULL) { | |
1033 | ret = -ENOMEM; | |
1034 | goto bail; | |
1035 | } | |
1036 | ||
1037 | for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) { | |
1038 | atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE); | |
1039 | idev->qp_table.map[i].page = NULL; | |
1040 | } | |
1041 | ||
1042 | ret = 0; | |
1043 | ||
1044 | bail: | |
1045 | return ret; | |
1046 | } | |
1047 | ||
e28c00ad BS |
1048 | /** |
1049 | * ipath_get_credit - flush the send work queue of a QP | |
1050 | * @qp: the qp who's send work queue to flush | |
1051 | * @aeth: the Acknowledge Extended Transport Header | |
1052 | * | |
1053 | * The QP s_lock should be held. | |
1054 | */ | |
1055 | void ipath_get_credit(struct ipath_qp *qp, u32 aeth) | |
1056 | { | |
27b678dd | 1057 | u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK; |
e28c00ad BS |
1058 | |
1059 | /* | |
1060 | * If the credit is invalid, we can send | |
1061 | * as many packets as we like. Otherwise, we have to | |
1062 | * honor the credit field. | |
1063 | */ | |
27b678dd | 1064 | if (credit == IPATH_AETH_CREDIT_INVAL) |
e28c00ad | 1065 | qp->s_lsn = (u32) -1; |
ddd4bb22 | 1066 | else if (qp->s_lsn != (u32) -1) { |
e28c00ad | 1067 | /* Compute new LSN (i.e., MSN + credit) */ |
27b678dd | 1068 | credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK; |
e28c00ad BS |
1069 | if (ipath_cmp24(credit, qp->s_lsn) > 0) |
1070 | qp->s_lsn = credit; | |
1071 | } | |
1072 | ||
1073 | /* Restart sending if it was blocked due to lack of credits. */ | |
e509be89 RC |
1074 | if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) && |
1075 | qp->s_cur != qp->s_head && | |
e28c00ad BS |
1076 | (qp->s_lsn == (u32) -1 || |
1077 | ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn, | |
1078 | qp->s_lsn + 1) <= 0)) | |
e509be89 | 1079 | ipath_schedule_send(qp); |
e28c00ad | 1080 | } |