IB/qib: Limit the number of packets processed per interrupt
[linux-2.6-block.git] / drivers / infiniband / hw / qib / qib_ud.c
CommitLineData
f931551b
RC
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <rdma/ib_smi.h>
35
36#include "qib.h"
37#include "qib_mad.h"
38
39/**
40 * qib_ud_loopback - handle send on loopback QPs
41 * @sqp: the sending QP
42 * @swqe: the send work request
43 *
44 * This is called from qib_make_ud_req() to forward a WQE addressed
45 * to the same HCA.
46 * Note that the receive interrupt handler may be calling qib_ud_rcv()
47 * while this is being called.
48 */
49static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
50{
51 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
52 struct qib_pportdata *ppd;
53 struct qib_qp *qp;
54 struct ib_ah_attr *ah_attr;
55 unsigned long flags;
56 struct qib_sge_state ssge;
57 struct qib_sge *sge;
58 struct ib_wc wc;
59 u32 length;
60
61 qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
62 if (!qp) {
63 ibp->n_pkt_drops++;
64 return;
65 }
66 if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
67 !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
68 ibp->n_pkt_drops++;
69 goto drop;
70 }
71
72 ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
73 ppd = ppd_from_ibp(ibp);
74
75 if (qp->ibqp.qp_num > 1) {
76 u16 pkey1;
77 u16 pkey2;
78 u16 lid;
79
80 pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
81 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
82 if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
83 lid = ppd->lid | (ah_attr->src_path_bits &
84 ((1 << ppd->lmc) - 1));
85 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
86 ah_attr->sl,
87 sqp->ibqp.qp_num, qp->ibqp.qp_num,
88 cpu_to_be16(lid),
89 cpu_to_be16(ah_attr->dlid));
90 goto drop;
91 }
92 }
93
94 /*
95 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
96 * Qkeys with the high order bit set mean use the
97 * qkey from the QP context instead of the WR (see 10.2.5).
98 */
99 if (qp->ibqp.qp_num) {
100 u32 qkey;
101
102 qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ?
103 sqp->qkey : swqe->wr.wr.ud.remote_qkey;
104 if (unlikely(qkey != qp->qkey)) {
105 u16 lid;
106
107 lid = ppd->lid | (ah_attr->src_path_bits &
108 ((1 << ppd->lmc) - 1));
109 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
110 ah_attr->sl,
111 sqp->ibqp.qp_num, qp->ibqp.qp_num,
112 cpu_to_be16(lid),
113 cpu_to_be16(ah_attr->dlid));
114 goto drop;
115 }
116 }
117
118 /*
119 * A GRH is expected to preceed the data even if not
120 * present on the wire.
121 */
122 length = swqe->length;
123 memset(&wc, 0, sizeof wc);
124 wc.byte_len = length + sizeof(struct ib_grh);
125
126 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
127 wc.wc_flags = IB_WC_WITH_IMM;
128 wc.ex.imm_data = swqe->wr.ex.imm_data;
129 }
130
131 spin_lock_irqsave(&qp->r_lock, flags);
132
133 /*
134 * Get the next work request entry to find where to put the data.
135 */
136 if (qp->r_flags & QIB_R_REUSE_SGE)
137 qp->r_flags &= ~QIB_R_REUSE_SGE;
138 else {
139 int ret;
140
141 ret = qib_get_rwqe(qp, 0);
142 if (ret < 0) {
143 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
144 goto bail_unlock;
145 }
146 if (!ret) {
147 if (qp->ibqp.qp_num == 0)
148 ibp->n_vl15_dropped++;
149 goto bail_unlock;
150 }
151 }
152 /* Silently drop packets which are too big. */
153 if (unlikely(wc.byte_len > qp->r_len)) {
154 qp->r_flags |= QIB_R_REUSE_SGE;
155 ibp->n_pkt_drops++;
156 goto bail_unlock;
157 }
158
159 if (ah_attr->ah_flags & IB_AH_GRH) {
160 qib_copy_sge(&qp->r_sge, &ah_attr->grh,
161 sizeof(struct ib_grh), 1);
162 wc.wc_flags |= IB_WC_GRH;
163 } else
164 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
165 ssge.sg_list = swqe->sg_list + 1;
166 ssge.sge = *swqe->sg_list;
167 ssge.num_sge = swqe->wr.num_sge;
168 sge = &ssge.sge;
169 while (length) {
170 u32 len = sge->length;
171
172 if (len > length)
173 len = length;
174 if (len > sge->sge_length)
175 len = sge->sge_length;
176 BUG_ON(len == 0);
177 qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
178 sge->vaddr += len;
179 sge->length -= len;
180 sge->sge_length -= len;
181 if (sge->sge_length == 0) {
182 if (--ssge.num_sge)
183 *sge = *ssge.sg_list++;
184 } else if (sge->length == 0 && sge->mr->lkey) {
185 if (++sge->n >= QIB_SEGSZ) {
186 if (++sge->m >= sge->mr->mapsz)
187 break;
188 sge->n = 0;
189 }
190 sge->vaddr =
191 sge->mr->map[sge->m]->segs[sge->n].vaddr;
192 sge->length =
193 sge->mr->map[sge->m]->segs[sge->n].length;
194 }
195 length -= len;
196 }
197 while (qp->r_sge.num_sge) {
198 atomic_dec(&qp->r_sge.sge.mr->refcount);
199 if (--qp->r_sge.num_sge)
200 qp->r_sge.sge = *qp->r_sge.sg_list++;
201 }
202 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
203 goto bail_unlock;
204 wc.wr_id = qp->r_wr_id;
205 wc.status = IB_WC_SUCCESS;
206 wc.opcode = IB_WC_RECV;
207 wc.qp = &qp->ibqp;
208 wc.src_qp = sqp->ibqp.qp_num;
209 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
210 swqe->wr.wr.ud.pkey_index : 0;
211 wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
212 wc.sl = ah_attr->sl;
213 wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
214 wc.port_num = qp->port_num;
215 /* Signal completion event if the solicited bit is set. */
216 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
217 swqe->wr.send_flags & IB_SEND_SOLICITED);
218 ibp->n_loop_pkts++;
219bail_unlock:
220 spin_unlock_irqrestore(&qp->r_lock, flags);
221drop:
222 if (atomic_dec_and_test(&qp->refcount))
223 wake_up(&qp->wait);
224}
225
226/**
227 * qib_make_ud_req - construct a UD request packet
228 * @qp: the QP
229 *
230 * Return 1 if constructed; otherwise, return 0.
231 */
232int qib_make_ud_req(struct qib_qp *qp)
233{
234 struct qib_other_headers *ohdr;
235 struct ib_ah_attr *ah_attr;
236 struct qib_pportdata *ppd;
237 struct qib_ibport *ibp;
238 struct qib_swqe *wqe;
239 unsigned long flags;
240 u32 nwords;
241 u32 extra_bytes;
242 u32 bth0;
243 u16 lrh0;
244 u16 lid;
245 int ret = 0;
246 int next_cur;
247
248 spin_lock_irqsave(&qp->s_lock, flags);
249
250 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
251 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
252 goto bail;
253 /* We are in the error state, flush the work request. */
254 if (qp->s_last == qp->s_head)
255 goto bail;
256 /* If DMAs are in progress, we can't flush immediately. */
257 if (atomic_read(&qp->s_dma_busy)) {
258 qp->s_flags |= QIB_S_WAIT_DMA;
259 goto bail;
260 }
261 wqe = get_swqe_ptr(qp, qp->s_last);
262 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
263 goto done;
264 }
265
266 if (qp->s_cur == qp->s_head)
267 goto bail;
268
269 wqe = get_swqe_ptr(qp, qp->s_cur);
270 next_cur = qp->s_cur + 1;
271 if (next_cur >= qp->s_size)
272 next_cur = 0;
273
274 /* Construct the header. */
275 ibp = to_iport(qp->ibqp.device, qp->port_num);
276 ppd = ppd_from_ibp(ibp);
277 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
278 if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
279 if (ah_attr->dlid != QIB_PERMISSIVE_LID)
280 ibp->n_multicast_xmit++;
281 else
282 ibp->n_unicast_xmit++;
283 } else {
284 ibp->n_unicast_xmit++;
285 lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
286 if (unlikely(lid == ppd->lid)) {
287 /*
288 * If DMAs are in progress, we can't generate
289 * a completion for the loopback packet since
290 * it would be out of order.
291 * XXX Instead of waiting, we could queue a
292 * zero length descriptor so we get a callback.
293 */
294 if (atomic_read(&qp->s_dma_busy)) {
295 qp->s_flags |= QIB_S_WAIT_DMA;
296 goto bail;
297 }
298 qp->s_cur = next_cur;
299 spin_unlock_irqrestore(&qp->s_lock, flags);
300 qib_ud_loopback(qp, wqe);
301 spin_lock_irqsave(&qp->s_lock, flags);
302 qib_send_complete(qp, wqe, IB_WC_SUCCESS);
303 goto done;
304 }
305 }
306
307 qp->s_cur = next_cur;
308 extra_bytes = -wqe->length & 3;
309 nwords = (wqe->length + extra_bytes) >> 2;
310
311 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
312 qp->s_hdrwords = 7;
313 qp->s_cur_size = wqe->length;
314 qp->s_cur_sge = &qp->s_sge;
315 qp->s_srate = ah_attr->static_rate;
316 qp->s_wqe = wqe;
317 qp->s_sge.sge = wqe->sg_list[0];
318 qp->s_sge.sg_list = wqe->sg_list + 1;
319 qp->s_sge.num_sge = wqe->wr.num_sge;
320 qp->s_sge.total_len = wqe->length;
321
322 if (ah_attr->ah_flags & IB_AH_GRH) {
323 /* Header size in 32-bit words. */
324 qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
325 &ah_attr->grh,
326 qp->s_hdrwords, nwords);
327 lrh0 = QIB_LRH_GRH;
328 ohdr = &qp->s_hdr.u.l.oth;
329 /*
330 * Don't worry about sending to locally attached multicast
331 * QPs. It is unspecified by the spec. what happens.
332 */
333 } else {
334 /* Header size in 32-bit words. */
335 lrh0 = QIB_LRH_BTH;
336 ohdr = &qp->s_hdr.u.oth;
337 }
338 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
339 qp->s_hdrwords++;
340 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
341 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
342 } else
343 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
344 lrh0 |= ah_attr->sl << 4;
345 if (qp->ibqp.qp_type == IB_QPT_SMI)
346 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
347 else
348 lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
349 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
350 qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
351 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
352 lid = ppd->lid;
353 if (lid) {
354 lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
355 qp->s_hdr.lrh[3] = cpu_to_be16(lid);
356 } else
357 qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
358 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
359 bth0 |= IB_BTH_SOLICITED;
360 bth0 |= extra_bytes << 20;
361 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
362 qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
363 wqe->wr.wr.ud.pkey_index : qp->s_pkey_index);
364 ohdr->bth[0] = cpu_to_be32(bth0);
365 /*
366 * Use the multicast QP if the destination LID is a multicast LID.
367 */
368 ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
369 ah_attr->dlid != QIB_PERMISSIVE_LID ?
370 cpu_to_be32(QIB_MULTICAST_QPN) :
371 cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
372 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
373 /*
374 * Qkeys with the high order bit set mean use the
375 * qkey from the QP context instead of the WR (see 10.2.5).
376 */
377 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
378 qp->qkey : wqe->wr.wr.ud.remote_qkey);
379 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
380
381done:
382 ret = 1;
383 goto unlock;
384
385bail:
386 qp->s_flags &= ~QIB_S_BUSY;
387unlock:
388 spin_unlock_irqrestore(&qp->s_lock, flags);
389 return ret;
390}
391
392static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
393{
394 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
395 struct qib_devdata *dd = ppd->dd;
396 unsigned ctxt = ppd->hw_pidx;
397 unsigned i;
398
399 pkey &= 0x7fff; /* remove limited/full membership bit */
400
401 for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
402 if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
403 return i;
404
405 /*
406 * Should not get here, this means hardware failed to validate pkeys.
407 * Punt and return index 0.
408 */
409 return 0;
410}
411
412/**
413 * qib_ud_rcv - receive an incoming UD packet
414 * @ibp: the port the packet came in on
415 * @hdr: the packet header
416 * @has_grh: true if the packet has a GRH
417 * @data: the packet data
418 * @tlen: the packet length
419 * @qp: the QP the packet came on
420 *
421 * This is called from qib_qp_rcv() to process an incoming UD packet
422 * for the given QP.
423 * Called at interrupt level.
424 */
425void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
426 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
427{
428 struct qib_other_headers *ohdr;
429 int opcode;
430 u32 hdrsize;
431 u32 pad;
432 struct ib_wc wc;
433 u32 qkey;
434 u32 src_qp;
435 u16 dlid;
436
437 /* Check for GRH */
438 if (!has_grh) {
439 ohdr = &hdr->u.oth;
440 hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
441 } else {
442 ohdr = &hdr->u.l.oth;
443 hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
444 }
445 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
446 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
447
448 /* Get the number of bytes the message was padded by. */
449 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
450 if (unlikely(tlen < (hdrsize + pad + 4))) {
451 /* Drop incomplete packets. */
452 ibp->n_pkt_drops++;
453 goto bail;
454 }
455 tlen -= hdrsize + pad + 4;
456
457 /*
458 * Check that the permissive LID is only used on QP0
459 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
460 */
461 if (qp->ibqp.qp_num) {
462 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
463 hdr->lrh[3] == IB_LID_PERMISSIVE)) {
464 ibp->n_pkt_drops++;
465 goto bail;
466 }
467 if (qp->ibqp.qp_num > 1) {
468 u16 pkey1, pkey2;
469
470 pkey1 = be32_to_cpu(ohdr->bth[0]);
471 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
472 if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
473 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
474 pkey1,
475 (be16_to_cpu(hdr->lrh[0]) >> 4) &
476 0xF,
477 src_qp, qp->ibqp.qp_num,
478 hdr->lrh[3], hdr->lrh[1]);
479 goto bail;
480 }
481 }
482 if (unlikely(qkey != qp->qkey)) {
483 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
484 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
485 src_qp, qp->ibqp.qp_num,
486 hdr->lrh[3], hdr->lrh[1]);
487 goto bail;
488 }
489 /* Drop invalid MAD packets (see 13.5.3.1). */
490 if (unlikely(qp->ibqp.qp_num == 1 &&
491 (tlen != 256 ||
492 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15))) {
493 ibp->n_pkt_drops++;
494 goto bail;
495 }
496 } else {
497 struct ib_smp *smp;
498
499 /* Drop invalid MAD packets (see 13.5.3.1). */
500 if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15) {
501 ibp->n_pkt_drops++;
502 goto bail;
503 }
504 smp = (struct ib_smp *) data;
505 if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
506 hdr->lrh[3] == IB_LID_PERMISSIVE) &&
507 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
508 ibp->n_pkt_drops++;
509 goto bail;
510 }
511 }
512
513 /*
514 * The opcode is in the low byte when its in network order
515 * (top byte when in host order).
516 */
517 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
518 if (qp->ibqp.qp_num > 1 &&
519 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
520 wc.ex.imm_data = ohdr->u.ud.imm_data;
521 wc.wc_flags = IB_WC_WITH_IMM;
522 hdrsize += sizeof(u32);
523 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
524 wc.ex.imm_data = 0;
525 wc.wc_flags = 0;
526 } else {
527 ibp->n_pkt_drops++;
528 goto bail;
529 }
530
531 /*
532 * A GRH is expected to preceed the data even if not
533 * present on the wire.
534 */
535 wc.byte_len = tlen + sizeof(struct ib_grh);
536
537 /*
538 * We need to serialize getting a receive work queue entry and
539 * generating a completion for it against QPs sending to this QP
540 * locally.
541 */
542 spin_lock(&qp->r_lock);
543
544 /*
545 * Get the next work request entry to find where to put the data.
546 */
547 if (qp->r_flags & QIB_R_REUSE_SGE)
548 qp->r_flags &= ~QIB_R_REUSE_SGE;
549 else {
550 int ret;
551
552 ret = qib_get_rwqe(qp, 0);
553 if (ret < 0) {
554 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
555 goto bail_unlock;
556 }
557 if (!ret) {
558 if (qp->ibqp.qp_num == 0)
559 ibp->n_vl15_dropped++;
560 goto bail_unlock;
561 }
562 }
563 /* Silently drop packets which are too big. */
564 if (unlikely(wc.byte_len > qp->r_len)) {
565 qp->r_flags |= QIB_R_REUSE_SGE;
566 ibp->n_pkt_drops++;
567 goto bail_unlock;
568 }
569 if (has_grh) {
570 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
571 sizeof(struct ib_grh), 1);
572 wc.wc_flags |= IB_WC_GRH;
573 } else
574 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
575 qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
576 while (qp->r_sge.num_sge) {
577 atomic_dec(&qp->r_sge.sge.mr->refcount);
578 if (--qp->r_sge.num_sge)
579 qp->r_sge.sge = *qp->r_sge.sg_list++;
580 }
581 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
582 goto bail_unlock;
583 wc.wr_id = qp->r_wr_id;
584 wc.status = IB_WC_SUCCESS;
585 wc.opcode = IB_WC_RECV;
586 wc.vendor_err = 0;
587 wc.qp = &qp->ibqp;
588 wc.src_qp = src_qp;
589 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
590 qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
591 wc.slid = be16_to_cpu(hdr->lrh[3]);
592 wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
593 dlid = be16_to_cpu(hdr->lrh[1]);
594 /*
595 * Save the LMC lower bits if the destination LID is a unicast LID.
596 */
597 wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 :
598 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
599 wc.port_num = qp->port_num;
600 /* Signal completion event if the solicited bit is set. */
601 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
602 (ohdr->bth[0] &
603 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
604bail_unlock:
605 spin_unlock(&qp->r_lock);
606bail:;
607}