IB/qib: Clean up register_ib_device
[linux-2.6-block.git] / drivers / infiniband / hw / qib / qib_ud.c
CommitLineData
f931551b
RC
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <rdma/ib_smi.h>
9ff198f5 35#include <rdma/ib_verbs.h>
f931551b
RC
36
37#include "qib.h"
38#include "qib_mad.h"
39
40/**
41 * qib_ud_loopback - handle send on loopback QPs
42 * @sqp: the sending QP
43 * @swqe: the send work request
44 *
45 * This is called from qib_make_ud_req() to forward a WQE addressed
46 * to the same HCA.
47 * Note that the receive interrupt handler may be calling qib_ud_rcv()
48 * while this is being called.
49 */
7c2e11fe 50static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
f931551b
RC
51{
52 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
53 struct qib_pportdata *ppd;
7c2e11fe 54 struct rvt_qp *qp;
f931551b
RC
55 struct ib_ah_attr *ah_attr;
56 unsigned long flags;
7c2e11fe
DD
57 struct rvt_sge_state ssge;
58 struct rvt_sge *sge;
f931551b
RC
59 struct ib_wc wc;
60 u32 length;
6e0ea9e6 61 enum ib_qp_type sqptype, dqptype;
f931551b 62
e622f2f4 63 qp = qib_lookup_qpn(ibp, swqe->ud_wr.remote_qpn);
f931551b 64 if (!qp) {
f24a6d48 65 ibp->rvp.n_pkt_drops++;
f931551b
RC
66 return;
67 }
6e0ea9e6
IW
68
69 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
70 IB_QPT_UD : sqp->ibqp.qp_type;
71 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
72 IB_QPT_UD : qp->ibqp.qp_type;
73
74 if (dqptype != sqptype ||
db3ef0eb 75 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
f24a6d48 76 ibp->rvp.n_pkt_drops++;
f931551b
RC
77 goto drop;
78 }
79
96ab1ac1 80 ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr;
f931551b
RC
81 ppd = ppd_from_ibp(ibp);
82
83 if (qp->ibqp.qp_num > 1) {
84 u16 pkey1;
85 u16 pkey2;
86 u16 lid;
87
88 pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
89 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
90 if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
91 lid = ppd->lid | (ah_attr->src_path_bits &
92 ((1 << ppd->lmc) - 1));
93 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
94 ah_attr->sl,
95 sqp->ibqp.qp_num, qp->ibqp.qp_num,
96 cpu_to_be16(lid),
97 cpu_to_be16(ah_attr->dlid));
98 goto drop;
99 }
100 }
101
102 /*
103 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
104 * Qkeys with the high order bit set mean use the
105 * qkey from the QP context instead of the WR (see 10.2.5).
106 */
107 if (qp->ibqp.qp_num) {
108 u32 qkey;
109
e622f2f4
CH
110 qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
111 sqp->qkey : swqe->ud_wr.remote_qkey;
f931551b
RC
112 if (unlikely(qkey != qp->qkey)) {
113 u16 lid;
114
115 lid = ppd->lid | (ah_attr->src_path_bits &
116 ((1 << ppd->lmc) - 1));
117 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
118 ah_attr->sl,
119 sqp->ibqp.qp_num, qp->ibqp.qp_num,
120 cpu_to_be16(lid),
121 cpu_to_be16(ah_attr->dlid));
122 goto drop;
123 }
124 }
125
126 /*
25985edc 127 * A GRH is expected to precede the data even if not
f931551b
RC
128 * present on the wire.
129 */
130 length = swqe->length;
041af0bb 131 memset(&wc, 0, sizeof(wc));
f931551b
RC
132 wc.byte_len = length + sizeof(struct ib_grh);
133
134 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
135 wc.wc_flags = IB_WC_WITH_IMM;
136 wc.ex.imm_data = swqe->wr.ex.imm_data;
137 }
138
139 spin_lock_irqsave(&qp->r_lock, flags);
140
141 /*
142 * Get the next work request entry to find where to put the data.
143 */
01ba79d4
HC
144 if (qp->r_flags & RVT_R_REUSE_SGE)
145 qp->r_flags &= ~RVT_R_REUSE_SGE;
f931551b
RC
146 else {
147 int ret;
148
149 ret = qib_get_rwqe(qp, 0);
150 if (ret < 0) {
151 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
152 goto bail_unlock;
153 }
154 if (!ret) {
155 if (qp->ibqp.qp_num == 0)
f24a6d48 156 ibp->rvp.n_vl15_dropped++;
f931551b
RC
157 goto bail_unlock;
158 }
159 }
160 /* Silently drop packets which are too big. */
161 if (unlikely(wc.byte_len > qp->r_len)) {
01ba79d4 162 qp->r_flags |= RVT_R_REUSE_SGE;
f24a6d48 163 ibp->rvp.n_pkt_drops++;
f931551b
RC
164 goto bail_unlock;
165 }
166
167 if (ah_attr->ah_flags & IB_AH_GRH) {
168 qib_copy_sge(&qp->r_sge, &ah_attr->grh,
169 sizeof(struct ib_grh), 1);
170 wc.wc_flags |= IB_WC_GRH;
171 } else
172 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
173 ssge.sg_list = swqe->sg_list + 1;
174 ssge.sge = *swqe->sg_list;
175 ssge.num_sge = swqe->wr.num_sge;
176 sge = &ssge.sge;
177 while (length) {
178 u32 len = sge->length;
179
180 if (len > length)
181 len = length;
182 if (len > sge->sge_length)
183 len = sge->sge_length;
184 BUG_ON(len == 0);
185 qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
186 sge->vaddr += len;
187 sge->length -= len;
188 sge->sge_length -= len;
189 if (sge->sge_length == 0) {
190 if (--ssge.num_sge)
191 *sge = *ssge.sg_list++;
192 } else if (sge->length == 0 && sge->mr->lkey) {
7c2e11fe 193 if (++sge->n >= RVT_SEGSZ) {
f931551b
RC
194 if (++sge->m >= sge->mr->mapsz)
195 break;
196 sge->n = 0;
197 }
198 sge->vaddr =
199 sge->mr->map[sge->m]->segs[sge->n].vaddr;
200 sge->length =
201 sge->mr->map[sge->m]->segs[sge->n].length;
202 }
203 length -= len;
204 }
6a82649f 205 qib_put_ss(&qp->r_sge);
01ba79d4 206 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
f931551b
RC
207 goto bail_unlock;
208 wc.wr_id = qp->r_wr_id;
209 wc.status = IB_WC_SUCCESS;
210 wc.opcode = IB_WC_RECV;
211 wc.qp = &qp->ibqp;
212 wc.src_qp = sqp->ibqp.qp_num;
213 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
e622f2f4 214 swqe->ud_wr.pkey_index : 0;
f931551b
RC
215 wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
216 wc.sl = ah_attr->sl;
217 wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
218 wc.port_num = qp->port_num;
219 /* Signal completion event if the solicited bit is set. */
4bb88e5f 220 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
f931551b 221 swqe->wr.send_flags & IB_SEND_SOLICITED);
f24a6d48 222 ibp->rvp.n_loop_pkts++;
f931551b
RC
223bail_unlock:
224 spin_unlock_irqrestore(&qp->r_lock, flags);
225drop:
226 if (atomic_dec_and_test(&qp->refcount))
227 wake_up(&qp->wait);
228}
229
230/**
231 * qib_make_ud_req - construct a UD request packet
232 * @qp: the QP
233 *
234 * Return 1 if constructed; otherwise, return 0.
235 */
7c2e11fe 236int qib_make_ud_req(struct rvt_qp *qp)
f931551b 237{
ffc26907 238 struct qib_qp_priv *priv = qp->priv;
f931551b
RC
239 struct qib_other_headers *ohdr;
240 struct ib_ah_attr *ah_attr;
241 struct qib_pportdata *ppd;
242 struct qib_ibport *ibp;
7c2e11fe 243 struct rvt_swqe *wqe;
f931551b
RC
244 unsigned long flags;
245 u32 nwords;
246 u32 extra_bytes;
247 u32 bth0;
248 u16 lrh0;
249 u16 lid;
250 int ret = 0;
251 int next_cur;
252
253 spin_lock_irqsave(&qp->s_lock, flags);
254
db3ef0eb
HC
255 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
256 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
f931551b
RC
257 goto bail;
258 /* We are in the error state, flush the work request. */
259 if (qp->s_last == qp->s_head)
260 goto bail;
261 /* If DMAs are in progress, we can't flush immediately. */
ffc26907 262 if (atomic_read(&priv->s_dma_busy)) {
01ba79d4 263 qp->s_flags |= RVT_S_WAIT_DMA;
f931551b
RC
264 goto bail;
265 }
db3ef0eb 266 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
f931551b
RC
267 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
268 goto done;
269 }
270
271 if (qp->s_cur == qp->s_head)
272 goto bail;
273
db3ef0eb 274 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
f931551b
RC
275 next_cur = qp->s_cur + 1;
276 if (next_cur >= qp->s_size)
277 next_cur = 0;
278
279 /* Construct the header. */
280 ibp = to_iport(qp->ibqp.device, qp->port_num);
281 ppd = ppd_from_ibp(ibp);
96ab1ac1 282 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
9ff198f5
DD
283 if (ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
284 if (ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE))
7d7632ad 285 this_cpu_inc(ibp->pmastats->n_multicast_xmit);
f931551b 286 else
7d7632ad 287 this_cpu_inc(ibp->pmastats->n_unicast_xmit);
f931551b 288 } else {
7d7632ad 289 this_cpu_inc(ibp->pmastats->n_unicast_xmit);
f931551b
RC
290 lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
291 if (unlikely(lid == ppd->lid)) {
292 /*
293 * If DMAs are in progress, we can't generate
294 * a completion for the loopback packet since
295 * it would be out of order.
296 * XXX Instead of waiting, we could queue a
297 * zero length descriptor so we get a callback.
298 */
ffc26907 299 if (atomic_read(&priv->s_dma_busy)) {
01ba79d4 300 qp->s_flags |= RVT_S_WAIT_DMA;
f931551b
RC
301 goto bail;
302 }
303 qp->s_cur = next_cur;
304 spin_unlock_irqrestore(&qp->s_lock, flags);
305 qib_ud_loopback(qp, wqe);
306 spin_lock_irqsave(&qp->s_lock, flags);
307 qib_send_complete(qp, wqe, IB_WC_SUCCESS);
308 goto done;
309 }
310 }
311
312 qp->s_cur = next_cur;
313 extra_bytes = -wqe->length & 3;
314 nwords = (wqe->length + extra_bytes) >> 2;
315
316 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
317 qp->s_hdrwords = 7;
318 qp->s_cur_size = wqe->length;
319 qp->s_cur_sge = &qp->s_sge;
320 qp->s_srate = ah_attr->static_rate;
321 qp->s_wqe = wqe;
322 qp->s_sge.sge = wqe->sg_list[0];
323 qp->s_sge.sg_list = wqe->sg_list + 1;
324 qp->s_sge.num_sge = wqe->wr.num_sge;
325 qp->s_sge.total_len = wqe->length;
326
327 if (ah_attr->ah_flags & IB_AH_GRH) {
328 /* Header size in 32-bit words. */
ffc26907 329 qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
f931551b
RC
330 &ah_attr->grh,
331 qp->s_hdrwords, nwords);
332 lrh0 = QIB_LRH_GRH;
ffc26907 333 ohdr = &priv->s_hdr->u.l.oth;
f931551b
RC
334 /*
335 * Don't worry about sending to locally attached multicast
336 * QPs. It is unspecified by the spec. what happens.
337 */
338 } else {
339 /* Header size in 32-bit words. */
340 lrh0 = QIB_LRH_BTH;
ffc26907 341 ohdr = &priv->s_hdr->u.oth;
f931551b
RC
342 }
343 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
344 qp->s_hdrwords++;
345 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
346 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
347 } else
348 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
349 lrh0 |= ah_attr->sl << 4;
350 if (qp->ibqp.qp_type == IB_QPT_SMI)
351 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
352 else
353 lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
ffc26907
DD
354 priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
355 priv->s_hdr->lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
356 priv->s_hdr->lrh[2] =
357 cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
f931551b
RC
358 lid = ppd->lid;
359 if (lid) {
360 lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
ffc26907 361 priv->s_hdr->lrh[3] = cpu_to_be16(lid);
f931551b 362 } else
ffc26907 363 priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
f931551b
RC
364 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
365 bth0 |= IB_BTH_SOLICITED;
366 bth0 |= extra_bytes << 20;
367 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
368 qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
e622f2f4 369 wqe->ud_wr.pkey_index : qp->s_pkey_index);
f931551b
RC
370 ohdr->bth[0] = cpu_to_be32(bth0);
371 /*
372 * Use the multicast QP if the destination LID is a multicast LID.
373 */
9ff198f5
DD
374 ohdr->bth[1] = ah_attr->dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) &&
375 ah_attr->dlid != be16_to_cpu(IB_LID_PERMISSIVE) ?
f931551b 376 cpu_to_be32(QIB_MULTICAST_QPN) :
e622f2f4 377 cpu_to_be32(wqe->ud_wr.remote_qpn);
f931551b
RC
378 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
379 /*
380 * Qkeys with the high order bit set mean use the
381 * qkey from the QP context instead of the WR (see 10.2.5).
382 */
e622f2f4
CH
383 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
384 qp->qkey : wqe->ud_wr.remote_qkey);
f931551b
RC
385 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
386
387done:
388 ret = 1;
389 goto unlock;
390
391bail:
01ba79d4 392 qp->s_flags &= ~RVT_S_BUSY;
f931551b
RC
393unlock:
394 spin_unlock_irqrestore(&qp->s_lock, flags);
395 return ret;
396}
397
398static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
399{
400 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
401 struct qib_devdata *dd = ppd->dd;
402 unsigned ctxt = ppd->hw_pidx;
403 unsigned i;
404
405 pkey &= 0x7fff; /* remove limited/full membership bit */
406
407 for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
408 if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
409 return i;
410
411 /*
412 * Should not get here, this means hardware failed to validate pkeys.
413 * Punt and return index 0.
414 */
415 return 0;
416}
417
418/**
419 * qib_ud_rcv - receive an incoming UD packet
420 * @ibp: the port the packet came in on
421 * @hdr: the packet header
422 * @has_grh: true if the packet has a GRH
423 * @data: the packet data
424 * @tlen: the packet length
425 * @qp: the QP the packet came on
426 *
427 * This is called from qib_qp_rcv() to process an incoming UD packet
428 * for the given QP.
429 * Called at interrupt level.
430 */
431void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
7c2e11fe 432 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
f931551b
RC
433{
434 struct qib_other_headers *ohdr;
435 int opcode;
436 u32 hdrsize;
437 u32 pad;
438 struct ib_wc wc;
439 u32 qkey;
440 u32 src_qp;
441 u16 dlid;
442
443 /* Check for GRH */
444 if (!has_grh) {
445 ohdr = &hdr->u.oth;
446 hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
447 } else {
448 ohdr = &hdr->u.l.oth;
449 hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
450 }
451 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
452 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
453
057ae62f
MM
454 /*
455 * Get the number of bytes the message was padded by
456 * and drop incomplete packets.
457 */
f931551b 458 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
057ae62f
MM
459 if (unlikely(tlen < (hdrsize + pad + 4)))
460 goto drop;
461
f931551b
RC
462 tlen -= hdrsize + pad + 4;
463
464 /*
465 * Check that the permissive LID is only used on QP0
466 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
467 */
468 if (qp->ibqp.qp_num) {
469 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
057ae62f
MM
470 hdr->lrh[3] == IB_LID_PERMISSIVE))
471 goto drop;
f931551b
RC
472 if (qp->ibqp.qp_num > 1) {
473 u16 pkey1, pkey2;
474
475 pkey1 = be32_to_cpu(ohdr->bth[0]);
476 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
477 if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
478 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
479 pkey1,
480 (be16_to_cpu(hdr->lrh[0]) >> 4) &
481 0xF,
482 src_qp, qp->ibqp.qp_num,
483 hdr->lrh[3], hdr->lrh[1]);
057ae62f 484 return;
f931551b
RC
485 }
486 }
487 if (unlikely(qkey != qp->qkey)) {
488 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
489 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
490 src_qp, qp->ibqp.qp_num,
491 hdr->lrh[3], hdr->lrh[1]);
057ae62f 492 return;
f931551b
RC
493 }
494 /* Drop invalid MAD packets (see 13.5.3.1). */
495 if (unlikely(qp->ibqp.qp_num == 1 &&
496 (tlen != 256 ||
057ae62f
MM
497 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
498 goto drop;
f931551b
RC
499 } else {
500 struct ib_smp *smp;
501
502 /* Drop invalid MAD packets (see 13.5.3.1). */
057ae62f
MM
503 if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
504 goto drop;
f931551b
RC
505 smp = (struct ib_smp *) data;
506 if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
507 hdr->lrh[3] == IB_LID_PERMISSIVE) &&
057ae62f
MM
508 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
509 goto drop;
f931551b
RC
510 }
511
512 /*
513 * The opcode is in the low byte when its in network order
514 * (top byte when in host order).
515 */
516 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
517 if (qp->ibqp.qp_num > 1 &&
518 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
519 wc.ex.imm_data = ohdr->u.ud.imm_data;
520 wc.wc_flags = IB_WC_WITH_IMM;
c7665e5a 521 tlen -= sizeof(u32);
f931551b
RC
522 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
523 wc.ex.imm_data = 0;
524 wc.wc_flags = 0;
057ae62f
MM
525 } else
526 goto drop;
f931551b
RC
527
528 /*
25985edc 529 * A GRH is expected to precede the data even if not
f931551b
RC
530 * present on the wire.
531 */
532 wc.byte_len = tlen + sizeof(struct ib_grh);
533
f931551b
RC
534 /*
535 * Get the next work request entry to find where to put the data.
536 */
01ba79d4
HC
537 if (qp->r_flags & RVT_R_REUSE_SGE)
538 qp->r_flags &= ~RVT_R_REUSE_SGE;
f931551b
RC
539 else {
540 int ret;
541
542 ret = qib_get_rwqe(qp, 0);
543 if (ret < 0) {
544 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
a5210c12 545 return;
f931551b
RC
546 }
547 if (!ret) {
548 if (qp->ibqp.qp_num == 0)
f24a6d48 549 ibp->rvp.n_vl15_dropped++;
a5210c12 550 return;
f931551b
RC
551 }
552 }
553 /* Silently drop packets which are too big. */
554 if (unlikely(wc.byte_len > qp->r_len)) {
01ba79d4 555 qp->r_flags |= RVT_R_REUSE_SGE;
057ae62f 556 goto drop;
f931551b
RC
557 }
558 if (has_grh) {
559 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
560 sizeof(struct ib_grh), 1);
561 wc.wc_flags |= IB_WC_GRH;
562 } else
563 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
564 qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
6a82649f 565 qib_put_ss(&qp->r_sge);
01ba79d4 566 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
a5210c12 567 return;
f931551b
RC
568 wc.wr_id = qp->r_wr_id;
569 wc.status = IB_WC_SUCCESS;
570 wc.opcode = IB_WC_RECV;
571 wc.vendor_err = 0;
572 wc.qp = &qp->ibqp;
573 wc.src_qp = src_qp;
574 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
575 qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
576 wc.slid = be16_to_cpu(hdr->lrh[3]);
577 wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
578 dlid = be16_to_cpu(hdr->lrh[1]);
579 /*
580 * Save the LMC lower bits if the destination LID is a unicast LID.
581 */
9ff198f5 582 wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
f931551b
RC
583 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
584 wc.port_num = qp->port_num;
585 /* Signal completion event if the solicited bit is set. */
4bb88e5f 586 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
f931551b
RC
587 (ohdr->bth[0] &
588 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
057ae62f
MM
589 return;
590
591drop:
f24a6d48 592 ibp->rvp.n_pkt_drops++;
f931551b 593}