locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_...
[linux-2.6-block.git] / drivers / infiniband / hw / qib / qib_ud.c
CommitLineData
f931551b
RC
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <rdma/ib_smi.h>
9ff198f5 35#include <rdma/ib_verbs.h>
f931551b
RC
36
37#include "qib.h"
38#include "qib_mad.h"
39
40/**
41 * qib_ud_loopback - handle send on loopback QPs
42 * @sqp: the sending QP
43 * @swqe: the send work request
44 *
45 * This is called from qib_make_ud_req() to forward a WQE addressed
46 * to the same HCA.
47 * Note that the receive interrupt handler may be calling qib_ud_rcv()
48 * while this is being called.
49 */
7c2e11fe 50static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
f931551b
RC
51{
52 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
1cefc2cd
HC
53 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
54 struct qib_devdata *dd = ppd->dd;
55 struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
7c2e11fe 56 struct rvt_qp *qp;
90898850 57 struct rdma_ah_attr *ah_attr;
f931551b 58 unsigned long flags;
7c2e11fe
DD
59 struct rvt_sge_state ssge;
60 struct rvt_sge *sge;
f931551b
RC
61 struct ib_wc wc;
62 u32 length;
6e0ea9e6 63 enum ib_qp_type sqptype, dqptype;
f931551b 64
1cefc2cd
HC
65 rcu_read_lock();
66 qp = rvt_lookup_qpn(rdi, &ibp->rvp, swqe->ud_wr.remote_qpn);
f931551b 67 if (!qp) {
f24a6d48 68 ibp->rvp.n_pkt_drops++;
13d84914 69 goto drop;
f931551b 70 }
6e0ea9e6
IW
71
72 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
73 IB_QPT_UD : sqp->ibqp.qp_type;
74 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
75 IB_QPT_UD : qp->ibqp.qp_type;
76
77 if (dqptype != sqptype ||
db3ef0eb 78 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
f24a6d48 79 ibp->rvp.n_pkt_drops++;
f931551b
RC
80 goto drop;
81 }
82
96ab1ac1 83 ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr;
f931551b
RC
84 ppd = ppd_from_ibp(ibp);
85
86 if (qp->ibqp.qp_num > 1) {
87 u16 pkey1;
88 u16 pkey2;
89 u16 lid;
90
91 pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
92 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
93 if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
d8966fcd 94 lid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
f931551b 95 ((1 << ppd->lmc) - 1));
13d84914
DD
96 qib_bad_pkey(ibp, pkey1,
97 rdma_ah_get_sl(ah_attr),
98 sqp->ibqp.qp_num, qp->ibqp.qp_num,
99 cpu_to_be16(lid),
100 cpu_to_be16(rdma_ah_get_dlid(ah_attr)));
f931551b
RC
101 goto drop;
102 }
103 }
104
105 /*
106 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
107 * Qkeys with the high order bit set mean use the
108 * qkey from the QP context instead of the WR (see 10.2.5).
109 */
110 if (qp->ibqp.qp_num) {
111 u32 qkey;
112
e622f2f4
CH
113 qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
114 sqp->qkey : swqe->ud_wr.remote_qkey;
13d84914 115 if (unlikely(qkey != qp->qkey))
f931551b 116 goto drop;
f931551b
RC
117 }
118
119 /*
25985edc 120 * A GRH is expected to precede the data even if not
f931551b
RC
121 * present on the wire.
122 */
123 length = swqe->length;
041af0bb 124 memset(&wc, 0, sizeof(wc));
f931551b
RC
125 wc.byte_len = length + sizeof(struct ib_grh);
126
127 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
128 wc.wc_flags = IB_WC_WITH_IMM;
129 wc.ex.imm_data = swqe->wr.ex.imm_data;
130 }
131
132 spin_lock_irqsave(&qp->r_lock, flags);
133
134 /*
135 * Get the next work request entry to find where to put the data.
136 */
01ba79d4
HC
137 if (qp->r_flags & RVT_R_REUSE_SGE)
138 qp->r_flags &= ~RVT_R_REUSE_SGE;
f931551b
RC
139 else {
140 int ret;
141
142 ret = qib_get_rwqe(qp, 0);
143 if (ret < 0) {
beb5a042 144 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
f931551b
RC
145 goto bail_unlock;
146 }
147 if (!ret) {
148 if (qp->ibqp.qp_num == 0)
f24a6d48 149 ibp->rvp.n_vl15_dropped++;
f931551b
RC
150 goto bail_unlock;
151 }
152 }
153 /* Silently drop packets which are too big. */
154 if (unlikely(wc.byte_len > qp->r_len)) {
01ba79d4 155 qp->r_flags |= RVT_R_REUSE_SGE;
f24a6d48 156 ibp->rvp.n_pkt_drops++;
f931551b
RC
157 goto bail_unlock;
158 }
159
d8966fcd 160 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
527dbf12 161 struct ib_grh grh;
d8966fcd 162 const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
527dbf12 163
d8966fcd 164 qib_make_grh(ibp, &grh, grd, 0, 0);
527dbf12
DC
165 qib_copy_sge(&qp->r_sge, &grh,
166 sizeof(grh), 1);
f931551b
RC
167 wc.wc_flags |= IB_WC_GRH;
168 } else
3fc4a090 169 rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
f931551b
RC
170 ssge.sg_list = swqe->sg_list + 1;
171 ssge.sge = *swqe->sg_list;
172 ssge.num_sge = swqe->wr.num_sge;
173 sge = &ssge.sge;
174 while (length) {
175 u32 len = sge->length;
176
177 if (len > length)
178 len = length;
179 if (len > sge->sge_length)
180 len = sge->sge_length;
181 BUG_ON(len == 0);
182 qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
183 sge->vaddr += len;
184 sge->length -= len;
185 sge->sge_length -= len;
186 if (sge->sge_length == 0) {
187 if (--ssge.num_sge)
188 *sge = *ssge.sg_list++;
189 } else if (sge->length == 0 && sge->mr->lkey) {
7c2e11fe 190 if (++sge->n >= RVT_SEGSZ) {
f931551b
RC
191 if (++sge->m >= sge->mr->mapsz)
192 break;
193 sge->n = 0;
194 }
195 sge->vaddr =
196 sge->mr->map[sge->m]->segs[sge->n].vaddr;
197 sge->length =
198 sge->mr->map[sge->m]->segs[sge->n].length;
199 }
200 length -= len;
201 }
70696ea7 202 rvt_put_ss(&qp->r_sge);
01ba79d4 203 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
f931551b
RC
204 goto bail_unlock;
205 wc.wr_id = qp->r_wr_id;
206 wc.status = IB_WC_SUCCESS;
207 wc.opcode = IB_WC_RECV;
208 wc.qp = &qp->ibqp;
209 wc.src_qp = sqp->ibqp.qp_num;
210 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
e622f2f4 211 swqe->ud_wr.pkey_index : 0;
d8966fcd
DC
212 wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
213 ((1 << ppd->lmc) - 1));
214 wc.sl = rdma_ah_get_sl(ah_attr);
215 wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
f931551b
RC
216 wc.port_num = qp->port_num;
217 /* Signal completion event if the solicited bit is set. */
4bb88e5f 218 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
f931551b 219 swqe->wr.send_flags & IB_SEND_SOLICITED);
f24a6d48 220 ibp->rvp.n_loop_pkts++;
f931551b
RC
221bail_unlock:
222 spin_unlock_irqrestore(&qp->r_lock, flags);
223drop:
1cefc2cd 224 rcu_read_unlock();
f931551b
RC
225}
226
227/**
228 * qib_make_ud_req - construct a UD request packet
229 * @qp: the QP
230 *
46a80d62
MM
231 * Assumes the s_lock is held.
232 *
f931551b
RC
233 * Return 1 if constructed; otherwise, return 0.
234 */
747f4d7a 235int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
f931551b 236{
ffc26907 237 struct qib_qp_priv *priv = qp->priv;
261a4351 238 struct ib_other_headers *ohdr;
90898850 239 struct rdma_ah_attr *ah_attr;
f931551b
RC
240 struct qib_pportdata *ppd;
241 struct qib_ibport *ibp;
7c2e11fe 242 struct rvt_swqe *wqe;
f931551b
RC
243 u32 nwords;
244 u32 extra_bytes;
245 u32 bth0;
246 u16 lrh0;
247 u16 lid;
248 int ret = 0;
249 int next_cur;
250
db3ef0eb
HC
251 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
252 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
f931551b
RC
253 goto bail;
254 /* We are in the error state, flush the work request. */
46a80d62 255 smp_read_barrier_depends(); /* see post_one_send */
6aa7de05 256 if (qp->s_last == READ_ONCE(qp->s_head))
f931551b
RC
257 goto bail;
258 /* If DMAs are in progress, we can't flush immediately. */
ffc26907 259 if (atomic_read(&priv->s_dma_busy)) {
01ba79d4 260 qp->s_flags |= RVT_S_WAIT_DMA;
f931551b
RC
261 goto bail;
262 }
db3ef0eb 263 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
f931551b
RC
264 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
265 goto done;
266 }
267
46a80d62
MM
268 /* see post_one_send() */
269 smp_read_barrier_depends();
6aa7de05 270 if (qp->s_cur == READ_ONCE(qp->s_head))
f931551b
RC
271 goto bail;
272
db3ef0eb 273 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
f931551b
RC
274 next_cur = qp->s_cur + 1;
275 if (next_cur >= qp->s_size)
276 next_cur = 0;
277
278 /* Construct the header. */
279 ibp = to_iport(qp->ibqp.device, qp->port_num);
280 ppd = ppd_from_ibp(ibp);
96ab1ac1 281 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
d8966fcd
DC
282 if (rdma_ah_get_dlid(ah_attr) >= be16_to_cpu(IB_MULTICAST_LID_BASE)) {
283 if (rdma_ah_get_dlid(ah_attr) !=
284 be16_to_cpu(IB_LID_PERMISSIVE))
7d7632ad 285 this_cpu_inc(ibp->pmastats->n_multicast_xmit);
f931551b 286 else
7d7632ad 287 this_cpu_inc(ibp->pmastats->n_unicast_xmit);
f931551b 288 } else {
7d7632ad 289 this_cpu_inc(ibp->pmastats->n_unicast_xmit);
d8966fcd 290 lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
f931551b 291 if (unlikely(lid == ppd->lid)) {
747f4d7a 292 unsigned long tflags = *flags;
f931551b
RC
293 /*
294 * If DMAs are in progress, we can't generate
295 * a completion for the loopback packet since
296 * it would be out of order.
297 * XXX Instead of waiting, we could queue a
298 * zero length descriptor so we get a callback.
299 */
ffc26907 300 if (atomic_read(&priv->s_dma_busy)) {
01ba79d4 301 qp->s_flags |= RVT_S_WAIT_DMA;
f931551b
RC
302 goto bail;
303 }
304 qp->s_cur = next_cur;
747f4d7a 305 spin_unlock_irqrestore(&qp->s_lock, tflags);
f931551b 306 qib_ud_loopback(qp, wqe);
747f4d7a
MM
307 spin_lock_irqsave(&qp->s_lock, tflags);
308 *flags = tflags;
f931551b
RC
309 qib_send_complete(qp, wqe, IB_WC_SUCCESS);
310 goto done;
311 }
312 }
313
314 qp->s_cur = next_cur;
315 extra_bytes = -wqe->length & 3;
316 nwords = (wqe->length + extra_bytes) >> 2;
317
318 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
319 qp->s_hdrwords = 7;
320 qp->s_cur_size = wqe->length;
321 qp->s_cur_sge = &qp->s_sge;
d8966fcd 322 qp->s_srate = rdma_ah_get_static_rate(ah_attr);
f931551b
RC
323 qp->s_wqe = wqe;
324 qp->s_sge.sge = wqe->sg_list[0];
325 qp->s_sge.sg_list = wqe->sg_list + 1;
326 qp->s_sge.num_sge = wqe->wr.num_sge;
327 qp->s_sge.total_len = wqe->length;
328
d8966fcd 329 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
f931551b 330 /* Header size in 32-bit words. */
ffc26907 331 qp->s_hdrwords += qib_make_grh(ibp, &priv->s_hdr->u.l.grh,
d8966fcd 332 rdma_ah_read_grh(ah_attr),
f931551b
RC
333 qp->s_hdrwords, nwords);
334 lrh0 = QIB_LRH_GRH;
ffc26907 335 ohdr = &priv->s_hdr->u.l.oth;
f931551b
RC
336 /*
337 * Don't worry about sending to locally attached multicast
338 * QPs. It is unspecified by the spec. what happens.
339 */
340 } else {
341 /* Header size in 32-bit words. */
342 lrh0 = QIB_LRH_BTH;
ffc26907 343 ohdr = &priv->s_hdr->u.oth;
f931551b
RC
344 }
345 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
346 qp->s_hdrwords++;
347 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
348 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
349 } else
350 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
d8966fcd 351 lrh0 |= rdma_ah_get_sl(ah_attr) << 4;
f931551b
RC
352 if (qp->ibqp.qp_type == IB_QPT_SMI)
353 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
354 else
d8966fcd 355 lrh0 |= ibp->sl_to_vl[rdma_ah_get_sl(ah_attr)] << 12;
ffc26907 356 priv->s_hdr->lrh[0] = cpu_to_be16(lrh0);
d8966fcd
DC
357 priv->s_hdr->lrh[1] =
358 cpu_to_be16(rdma_ah_get_dlid(ah_attr)); /* DEST LID */
ffc26907
DD
359 priv->s_hdr->lrh[2] =
360 cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
f931551b
RC
361 lid = ppd->lid;
362 if (lid) {
d8966fcd
DC
363 lid |= rdma_ah_get_path_bits(ah_attr) &
364 ((1 << ppd->lmc) - 1);
ffc26907 365 priv->s_hdr->lrh[3] = cpu_to_be16(lid);
f931551b 366 } else
ffc26907 367 priv->s_hdr->lrh[3] = IB_LID_PERMISSIVE;
f931551b
RC
368 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
369 bth0 |= IB_BTH_SOLICITED;
370 bth0 |= extra_bytes << 20;
371 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
372 qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
e622f2f4 373 wqe->ud_wr.pkey_index : qp->s_pkey_index);
f931551b
RC
374 ohdr->bth[0] = cpu_to_be32(bth0);
375 /*
376 * Use the multicast QP if the destination LID is a multicast LID.
377 */
d8966fcd
DC
378 ohdr->bth[1] = rdma_ah_get_dlid(ah_attr) >=
379 be16_to_cpu(IB_MULTICAST_LID_BASE) &&
380 rdma_ah_get_dlid(ah_attr) != be16_to_cpu(IB_LID_PERMISSIVE) ?
f931551b 381 cpu_to_be32(QIB_MULTICAST_QPN) :
e622f2f4 382 cpu_to_be32(wqe->ud_wr.remote_qpn);
46a80d62 383 ohdr->bth[2] = cpu_to_be32(wqe->psn & QIB_PSN_MASK);
f931551b
RC
384 /*
385 * Qkeys with the high order bit set mean use the
386 * qkey from the QP context instead of the WR (see 10.2.5).
387 */
e622f2f4
CH
388 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
389 qp->qkey : wqe->ud_wr.remote_qkey);
f931551b
RC
390 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
391
392done:
46a80d62 393 return 1;
f931551b 394bail:
01ba79d4 395 qp->s_flags &= ~RVT_S_BUSY;
f931551b
RC
396 return ret;
397}
398
399static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
400{
401 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
402 struct qib_devdata *dd = ppd->dd;
403 unsigned ctxt = ppd->hw_pidx;
404 unsigned i;
405
406 pkey &= 0x7fff; /* remove limited/full membership bit */
407
408 for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
409 if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
410 return i;
411
412 /*
413 * Should not get here, this means hardware failed to validate pkeys.
414 * Punt and return index 0.
415 */
416 return 0;
417}
418
419/**
420 * qib_ud_rcv - receive an incoming UD packet
421 * @ibp: the port the packet came in on
422 * @hdr: the packet header
423 * @has_grh: true if the packet has a GRH
424 * @data: the packet data
425 * @tlen: the packet length
426 * @qp: the QP the packet came on
427 *
428 * This is called from qib_qp_rcv() to process an incoming UD packet
429 * for the given QP.
430 * Called at interrupt level.
431 */
261a4351 432void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
7c2e11fe 433 int has_grh, void *data, u32 tlen, struct rvt_qp *qp)
f931551b 434{
261a4351 435 struct ib_other_headers *ohdr;
f931551b
RC
436 int opcode;
437 u32 hdrsize;
438 u32 pad;
439 struct ib_wc wc;
440 u32 qkey;
441 u32 src_qp;
442 u16 dlid;
443
444 /* Check for GRH */
445 if (!has_grh) {
446 ohdr = &hdr->u.oth;
447 hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
448 } else {
449 ohdr = &hdr->u.l.oth;
450 hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
451 }
452 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
70696ea7 453 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & RVT_QPN_MASK;
f931551b 454
057ae62f
MM
455 /*
456 * Get the number of bytes the message was padded by
457 * and drop incomplete packets.
458 */
f931551b 459 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
057ae62f
MM
460 if (unlikely(tlen < (hdrsize + pad + 4)))
461 goto drop;
462
f931551b
RC
463 tlen -= hdrsize + pad + 4;
464
465 /*
466 * Check that the permissive LID is only used on QP0
467 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
468 */
469 if (qp->ibqp.qp_num) {
470 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
057ae62f
MM
471 hdr->lrh[3] == IB_LID_PERMISSIVE))
472 goto drop;
f931551b
RC
473 if (qp->ibqp.qp_num > 1) {
474 u16 pkey1, pkey2;
475
476 pkey1 = be32_to_cpu(ohdr->bth[0]);
477 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
478 if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
13d84914
DD
479 qib_bad_pkey(ibp,
480 pkey1,
481 (be16_to_cpu(hdr->lrh[0]) >> 4) &
f931551b 482 0xF,
13d84914
DD
483 src_qp, qp->ibqp.qp_num,
484 hdr->lrh[3], hdr->lrh[1]);
057ae62f 485 return;
f931551b
RC
486 }
487 }
13d84914 488 if (unlikely(qkey != qp->qkey))
057ae62f 489 return;
13d84914 490
f931551b
RC
491 /* Drop invalid MAD packets (see 13.5.3.1). */
492 if (unlikely(qp->ibqp.qp_num == 1 &&
493 (tlen != 256 ||
057ae62f
MM
494 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
495 goto drop;
f931551b
RC
496 } else {
497 struct ib_smp *smp;
498
499 /* Drop invalid MAD packets (see 13.5.3.1). */
057ae62f
MM
500 if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
501 goto drop;
f931551b
RC
502 smp = (struct ib_smp *) data;
503 if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
504 hdr->lrh[3] == IB_LID_PERMISSIVE) &&
057ae62f
MM
505 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
506 goto drop;
f931551b
RC
507 }
508
509 /*
510 * The opcode is in the low byte when its in network order
511 * (top byte when in host order).
512 */
513 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
514 if (qp->ibqp.qp_num > 1 &&
515 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
516 wc.ex.imm_data = ohdr->u.ud.imm_data;
517 wc.wc_flags = IB_WC_WITH_IMM;
c7665e5a 518 tlen -= sizeof(u32);
f931551b
RC
519 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
520 wc.ex.imm_data = 0;
521 wc.wc_flags = 0;
057ae62f
MM
522 } else
523 goto drop;
f931551b
RC
524
525 /*
25985edc 526 * A GRH is expected to precede the data even if not
f931551b
RC
527 * present on the wire.
528 */
529 wc.byte_len = tlen + sizeof(struct ib_grh);
530
f931551b
RC
531 /*
532 * Get the next work request entry to find where to put the data.
533 */
01ba79d4
HC
534 if (qp->r_flags & RVT_R_REUSE_SGE)
535 qp->r_flags &= ~RVT_R_REUSE_SGE;
f931551b
RC
536 else {
537 int ret;
538
539 ret = qib_get_rwqe(qp, 0);
540 if (ret < 0) {
beb5a042 541 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
a5210c12 542 return;
f931551b
RC
543 }
544 if (!ret) {
545 if (qp->ibqp.qp_num == 0)
f24a6d48 546 ibp->rvp.n_vl15_dropped++;
a5210c12 547 return;
f931551b
RC
548 }
549 }
550 /* Silently drop packets which are too big. */
551 if (unlikely(wc.byte_len > qp->r_len)) {
01ba79d4 552 qp->r_flags |= RVT_R_REUSE_SGE;
057ae62f 553 goto drop;
f931551b
RC
554 }
555 if (has_grh) {
556 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
557 sizeof(struct ib_grh), 1);
558 wc.wc_flags |= IB_WC_GRH;
559 } else
3fc4a090 560 rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
f931551b 561 qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
70696ea7 562 rvt_put_ss(&qp->r_sge);
01ba79d4 563 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
a5210c12 564 return;
f931551b
RC
565 wc.wr_id = qp->r_wr_id;
566 wc.status = IB_WC_SUCCESS;
567 wc.opcode = IB_WC_RECV;
568 wc.vendor_err = 0;
569 wc.qp = &qp->ibqp;
570 wc.src_qp = src_qp;
571 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
572 qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
573 wc.slid = be16_to_cpu(hdr->lrh[3]);
574 wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
575 dlid = be16_to_cpu(hdr->lrh[1]);
576 /*
577 * Save the LMC lower bits if the destination LID is a unicast LID.
578 */
9ff198f5 579 wc.dlid_path_bits = dlid >= be16_to_cpu(IB_MULTICAST_LID_BASE) ? 0 :
f931551b
RC
580 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
581 wc.port_num = qp->port_num;
582 /* Signal completion event if the solicited bit is set. */
4bb88e5f 583 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
f931551b
RC
584 (ohdr->bth[0] &
585 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
057ae62f
MM
586 return;
587
588drop:
f24a6d48 589 ibp->rvp.n_pkt_drops++;
f931551b 590}