IB/hfi1: Add support for 16B Management Packets
[linux-block.git] / drivers / infiniband / hw / hfi1 / ud.c
CommitLineData
77241056 1/*
05d6ac1d 2 * Copyright(c) 2015, 2016 Intel Corporation.
77241056
MM
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
77241056
MM
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
77241056
MM
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/net.h>
49#include <rdma/ib_smi.h>
50
51#include "hfi.h"
52#include "mad.h"
bb5df5f9 53#include "verbs_txreq.h"
711e104d 54#include "qp.h"
77241056 55
88733e3b
DH
56/* We support only two types - 9B and 16B for now */
57static const hfi1_make_req hfi1_make_ud_req_tbl[2] = {
58 [HFI1_PKT_TYPE_9B] = &hfi1_make_ud_req_9B,
59 [HFI1_PKT_TYPE_16B] = &hfi1_make_ud_req_16B
60};
61
77241056
MM
62/**
63 * ud_loopback - handle send on loopback QPs
64 * @sqp: the sending QP
65 * @swqe: the send work request
66 *
67 * This is called from hfi1_make_ud_req() to forward a WQE addressed
68 * to the same HFI.
69 * Note that the receive interrupt handler may be calling hfi1_ud_rcv()
70 * while this is being called.
71 */
895420dd 72static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
77241056
MM
73{
74 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
75 struct hfi1_pportdata *ppd;
88733e3b 76 struct hfi1_qp_priv *priv = sqp->priv;
895420dd 77 struct rvt_qp *qp;
90898850 78 struct rdma_ah_attr *ah_attr;
77241056 79 unsigned long flags;
895420dd
DD
80 struct rvt_sge_state ssge;
81 struct rvt_sge *sge;
77241056
MM
82 struct ib_wc wc;
83 u32 length;
84 enum ib_qp_type sqptype, dqptype;
85
86 rcu_read_lock();
87
ec4274f1
DD
88 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
89 swqe->ud_wr.remote_qpn);
77241056 90 if (!qp) {
4eb06882 91 ibp->rvp.n_pkt_drops++;
77241056
MM
92 rcu_read_unlock();
93 return;
94 }
95
96 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
97 IB_QPT_UD : sqp->ibqp.qp_type;
98 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
99 IB_QPT_UD : qp->ibqp.qp_type;
100
101 if (dqptype != sqptype ||
83693bd1 102 !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
4eb06882 103 ibp->rvp.n_pkt_drops++;
77241056
MM
104 goto drop;
105 }
106
15723f06 107 ah_attr = &ibah_to_rvtah(swqe->ud_wr.ah)->attr;
77241056
MM
108 ppd = ppd_from_ibp(ibp);
109
110 if (qp->ibqp.qp_num > 1) {
111 u16 pkey;
88733e3b 112 u32 slid;
d8966fcd 113 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
77241056
MM
114
115 pkey = hfi1_get_pkey(ibp, sqp->s_pkey_index);
d8966fcd 116 slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
77241056
MM
117 ((1 << ppd->lmc) - 1));
118 if (unlikely(ingress_pkey_check(ppd, pkey, sc5,
5786adf3
DH
119 qp->s_pkey_index,
120 slid, false))) {
13d84914
DD
121 hfi1_bad_pkey(ibp, pkey,
122 rdma_ah_get_sl(ah_attr),
123 sqp->ibqp.qp_num, qp->ibqp.qp_num,
124 slid, rdma_ah_get_dlid(ah_attr));
77241056
MM
125 goto drop;
126 }
127 }
128
129 /*
130 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
131 * Qkeys with the high order bit set mean use the
132 * qkey from the QP context instead of the WR (see 10.2.5).
133 */
134 if (qp->ibqp.qp_num) {
135 u32 qkey;
136
e622f2f4
CH
137 qkey = (int)swqe->ud_wr.remote_qkey < 0 ?
138 sqp->qkey : swqe->ud_wr.remote_qkey;
13d84914
DD
139 if (unlikely(qkey != qp->qkey))
140 goto drop; /* silently drop per IBTA spec */
77241056
MM
141 }
142
143 /*
144 * A GRH is expected to precede the data even if not
145 * present on the wire.
146 */
147 length = swqe->length;
148 memset(&wc, 0, sizeof(wc));
149 wc.byte_len = length + sizeof(struct ib_grh);
150
151 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
152 wc.wc_flags = IB_WC_WITH_IMM;
153 wc.ex.imm_data = swqe->wr.ex.imm_data;
154 }
155
156 spin_lock_irqsave(&qp->r_lock, flags);
157
158 /*
159 * Get the next work request entry to find where to put the data.
160 */
e490974e 161 if (qp->r_flags & RVT_R_REUSE_SGE) {
54d10c1e 162 qp->r_flags &= ~RVT_R_REUSE_SGE;
e490974e 163 } else {
77241056
MM
164 int ret;
165
832369fa 166 ret = rvt_get_rwqe(qp, false);
77241056 167 if (ret < 0) {
beb5a042 168 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
77241056
MM
169 goto bail_unlock;
170 }
171 if (!ret) {
172 if (qp->ibqp.qp_num == 0)
4eb06882 173 ibp->rvp.n_vl15_dropped++;
77241056
MM
174 goto bail_unlock;
175 }
176 }
177 /* Silently drop packets which are too big. */
178 if (unlikely(wc.byte_len > qp->r_len)) {
54d10c1e 179 qp->r_flags |= RVT_R_REUSE_SGE;
4eb06882 180 ibp->rvp.n_pkt_drops++;
77241056
MM
181 goto bail_unlock;
182 }
183
d8966fcd 184 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
527dbf12 185 struct ib_grh grh;
88733e3b
DH
186 struct ib_global_route grd = *(rdma_ah_read_grh(ah_attr));
187
188 /*
189 * For loopback packets with extended LIDs, the
190 * sgid_index in the GRH is 0 and the dgid is
191 * OPA GID of the sender. While creating a response
192 * to the loopback packet, IB core creates the new
193 * sgid_index from the DGID and that will be the
194 * OPA_GID_INDEX. The new dgid is from the sgid
195 * index and that will be in the IB GID format.
196 *
197 * We now have a case where the sent packet had a
198 * different sgid_index and dgid compared to the
199 * one that was received in response.
200 *
201 * Fix this inconsistency.
202 */
203 if (priv->hdr_type == HFI1_PKT_TYPE_16B) {
204 if (grd.sgid_index == 0)
205 grd.sgid_index = OPA_GID_INDEX;
527dbf12 206
88733e3b
DH
207 if (ib_is_opa_gid(&grd.dgid))
208 grd.dgid.global.interface_id =
209 cpu_to_be64(ppd->guids[HFI1_PORT_GUID_INDEX]);
210 }
211
212 hfi1_make_grh(ibp, &grh, &grd, 0, 0);
527dbf12 213 hfi1_copy_sge(&qp->r_sge, &grh,
0128fcea 214 sizeof(grh), true, false);
77241056 215 wc.wc_flags |= IB_WC_GRH;
e490974e 216 } else {
1198fcea 217 rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
e490974e 218 }
77241056
MM
219 ssge.sg_list = swqe->sg_list + 1;
220 ssge.sge = *swqe->sg_list;
221 ssge.num_sge = swqe->wr.num_sge;
222 sge = &ssge.sge;
223 while (length) {
224 u32 len = sge->length;
225
226 if (len > length)
227 len = length;
228 if (len > sge->sge_length)
229 len = sge->sge_length;
230 WARN_ON_ONCE(len == 0);
0128fcea 231 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, true, false);
77241056
MM
232 sge->vaddr += len;
233 sge->length -= len;
234 sge->sge_length -= len;
235 if (sge->sge_length == 0) {
236 if (--ssge.num_sge)
237 *sge = *ssge.sg_list++;
238 } else if (sge->length == 0 && sge->mr->lkey) {
cd4ceee3 239 if (++sge->n >= RVT_SEGSZ) {
77241056
MM
240 if (++sge->m >= sge->mr->mapsz)
241 break;
242 sge->n = 0;
243 }
244 sge->vaddr =
245 sge->mr->map[sge->m]->segs[sge->n].vaddr;
246 sge->length =
247 sge->mr->map[sge->m]->segs[sge->n].length;
248 }
249 length -= len;
250 }
ec4274f1 251 rvt_put_ss(&qp->r_sge);
54d10c1e 252 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
77241056
MM
253 goto bail_unlock;
254 wc.wr_id = qp->r_wr_id;
255 wc.status = IB_WC_SUCCESS;
256 wc.opcode = IB_WC_RECV;
257 wc.qp = &qp->ibqp;
258 wc.src_qp = sqp->ibqp.qp_num;
259 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI) {
260 if (sqp->ibqp.qp_type == IB_QPT_GSI ||
261 sqp->ibqp.qp_type == IB_QPT_SMI)
e622f2f4 262 wc.pkey_index = swqe->ud_wr.pkey_index;
77241056
MM
263 else
264 wc.pkey_index = sqp->s_pkey_index;
265 } else {
266 wc.pkey_index = 0;
267 }
b64581ad
DH
268 wc.slid = (ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
269 ((1 << ppd->lmc) - 1))) & U16_MAX;
77241056
MM
270 /* Check for loopback when the port lid is not set */
271 if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
8859b4a6 272 wc.slid = be16_to_cpu(IB_LID_PERMISSIVE);
d8966fcd
DC
273 wc.sl = rdma_ah_get_sl(ah_attr);
274 wc.dlid_path_bits = rdma_ah_get_dlid(ah_attr) & ((1 << ppd->lmc) - 1);
77241056
MM
275 wc.port_num = qp->port_num;
276 /* Signal completion event if the solicited bit is set. */
abd712da
DD
277 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
278 swqe->wr.send_flags & IB_SEND_SOLICITED);
4eb06882 279 ibp->rvp.n_loop_pkts++;
77241056
MM
280bail_unlock:
281 spin_unlock_irqrestore(&qp->r_lock, flags);
282drop:
283 rcu_read_unlock();
284}
285
88733e3b
DH
286static void hfi1_make_bth_deth(struct rvt_qp *qp, struct rvt_swqe *wqe,
287 struct ib_other_headers *ohdr,
288 u16 *pkey, u32 extra_bytes, bool bypass)
289{
290 u32 bth0;
291 struct hfi1_ibport *ibp;
292
293 ibp = to_iport(qp->ibqp.device, qp->port_num);
294 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
295 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
296 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
297 } else {
298 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
299 }
300
301 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
302 bth0 |= IB_BTH_SOLICITED;
303 bth0 |= extra_bytes << 20;
304 if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_SMI)
305 *pkey = hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
306 else
307 *pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
308 if (!bypass)
309 bth0 |= *pkey;
310 ohdr->bth[0] = cpu_to_be32(bth0);
311 ohdr->bth[1] = cpu_to_be32(wqe->ud_wr.remote_qpn);
312 ohdr->bth[2] = cpu_to_be32(mask_psn(wqe->psn));
313 /*
314 * Qkeys with the high order bit set mean use the
315 * qkey from the QP context instead of the WR (see 10.2.5).
316 */
317 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
318 qp->qkey : wqe->ud_wr.remote_qkey);
319 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
320}
321
322void hfi1_make_ud_req_9B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
323 struct rvt_swqe *wqe)
324{
325 u32 nwords, extra_bytes;
326 u16 len, slid, dlid, pkey;
327 u16 lrh0 = 0;
328 u8 sc5;
329 struct hfi1_qp_priv *priv = qp->priv;
330 struct ib_other_headers *ohdr;
331 struct rdma_ah_attr *ah_attr;
332 struct hfi1_pportdata *ppd;
333 struct hfi1_ibport *ibp;
334 struct ib_grh *grh;
335
336 ibp = to_iport(qp->ibqp.device, qp->port_num);
337 ppd = ppd_from_ibp(ibp);
338 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
339
340 extra_bytes = -wqe->length & 3;
341 nwords = ((wqe->length + extra_bytes) >> 2) + SIZE_OF_CRC;
342 /* header size in dwords LRH+BTH+DETH = (8+12+8)/4. */
9636258f 343 ps->s_txreq->hdr_dwords = 7;
88733e3b 344 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
9636258f 345 ps->s_txreq->hdr_dwords++;
88733e3b
DH
346
347 if (rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) {
348 grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
9636258f
MH
349 ps->s_txreq->hdr_dwords +=
350 hfi1_make_grh(ibp, grh, rdma_ah_read_grh(ah_attr),
78d3633b
MM
351 ps->s_txreq->hdr_dwords - LRH_9B_DWORDS,
352 nwords);
88733e3b
DH
353 lrh0 = HFI1_LRH_GRH;
354 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
355 } else {
356 lrh0 = HFI1_LRH_BTH;
357 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
358 }
359
360 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
361 lrh0 |= (rdma_ah_get_sl(ah_attr) & 0xf) << 4;
362 if (qp->ibqp.qp_type == IB_QPT_SMI) {
363 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
364 priv->s_sc = 0xf;
365 } else {
366 lrh0 |= (sc5 & 0xf) << 12;
367 priv->s_sc = sc5;
368 }
369
370 dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 9B);
371 if (dlid == be16_to_cpu(IB_LID_PERMISSIVE)) {
372 slid = be16_to_cpu(IB_LID_PERMISSIVE);
373 } else {
374 u16 lid = (u16)ppd->lid;
375
376 if (lid) {
377 lid |= rdma_ah_get_path_bits(ah_attr) &
378 ((1 << ppd->lmc) - 1);
379 slid = lid;
380 } else {
381 slid = be16_to_cpu(IB_LID_PERMISSIVE);
382 }
383 }
384 hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, false);
9636258f 385 len = ps->s_txreq->hdr_dwords + nwords;
88733e3b
DH
386
387 /* Setup the packet */
388 ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_9B;
389 hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
390 lrh0, len, dlid, slid);
391}
392
393void hfi1_make_ud_req_16B(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
394 struct rvt_swqe *wqe)
395{
396 struct hfi1_qp_priv *priv = qp->priv;
397 struct ib_other_headers *ohdr;
398 struct rdma_ah_attr *ah_attr;
399 struct hfi1_pportdata *ppd;
400 struct hfi1_ibport *ibp;
401 u32 dlid, slid, nwords, extra_bytes;
81cd3891
DH
402 u32 dest_qp = wqe->ud_wr.remote_qpn;
403 u32 src_qp = qp->ibqp.qp_num;
88733e3b
DH
404 u16 len, pkey;
405 u8 l4, sc5;
81cd3891 406 bool is_mgmt = false;
88733e3b
DH
407
408 ibp = to_iport(qp->ibqp.device, qp->port_num);
409 ppd = ppd_from_ibp(ibp);
410 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
81cd3891
DH
411
412 /*
413 * Build 16B Management Packet if either the destination
414 * or source queue pair number is 0 or 1.
415 */
416 if (dest_qp == 0 || src_qp == 0 || dest_qp == 1 || src_qp == 1) {
417 /* header size in dwords 16B LRH+L4_FM = (16+8)/4. */
418 ps->s_txreq->hdr_dwords = 6;
419 is_mgmt = true;
420 } else {
421 /* header size in dwords 16B LRH+BTH+DETH = (16+12+8)/4. */
422 ps->s_txreq->hdr_dwords = 9;
423 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
424 ps->s_txreq->hdr_dwords++;
425 }
88733e3b
DH
426
427 /* SW provides space for CRC and LT for bypass packets. */
9636258f 428 extra_bytes = hfi1_get_16b_padding((ps->s_txreq->hdr_dwords << 2),
88733e3b
DH
429 wqe->length);
430 nwords = ((wqe->length + extra_bytes + SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
431
432 if ((rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH) &&
433 hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) {
434 struct ib_grh *grh;
435 struct ib_global_route *grd = rdma_ah_retrieve_grh(ah_attr);
436 /*
437 * Ensure OPA GIDs are transformed to IB gids
438 * before creating the GRH.
439 */
440 if (grd->sgid_index == OPA_GID_INDEX) {
441 dd_dev_warn(ppd->dd, "Bad sgid_index. sgid_index: %d\n",
442 grd->sgid_index);
443 grd->sgid_index = 0;
444 }
445 grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
78d3633b
MM
446 ps->s_txreq->hdr_dwords += hfi1_make_grh(
447 ibp, grh, grd,
448 ps->s_txreq->hdr_dwords - LRH_16B_DWORDS,
449 nwords);
88733e3b
DH
450 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
451 l4 = OPA_16B_L4_IB_GLOBAL;
452 } else {
453 ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
454 l4 = OPA_16B_L4_IB_LOCAL;
455 }
456
457 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
458 if (qp->ibqp.qp_type == IB_QPT_SMI)
459 priv->s_sc = 0xf;
460 else
461 priv->s_sc = sc5;
462
463 dlid = opa_get_lid(rdma_ah_get_dlid(ah_attr), 16B);
464 if (!ppd->lid)
465 slid = be32_to_cpu(OPA_LID_PERMISSIVE);
466 else
467 slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
468 ((1 << ppd->lmc) - 1));
469
81cd3891
DH
470 if (is_mgmt) {
471 l4 = OPA_16B_L4_FM;
472 pkey = hfi1_get_pkey(ibp, wqe->ud_wr.pkey_index);
473 hfi1_16B_set_qpn(&ps->s_txreq->phdr.hdr.opah.u.mgmt,
474 dest_qp, src_qp);
475 } else {
476 hfi1_make_bth_deth(qp, wqe, ohdr, &pkey, extra_bytes, true);
477 }
88733e3b 478 /* Convert dwords to flits */
9636258f 479 len = (ps->s_txreq->hdr_dwords + nwords) >> 1;
88733e3b
DH
480
481 /* Setup the packet */
482 ps->s_txreq->phdr.hdr.hdr_type = HFI1_PKT_TYPE_16B;
483 hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
484 slid, dlid, len, pkey, 0, 0, l4, priv->s_sc);
485}
486
77241056
MM
487/**
488 * hfi1_make_ud_req - construct a UD request packet
489 * @qp: the QP
490 *
46a80d62
MM
491 * Assume s_lock is held.
492 *
77241056
MM
493 * Return 1 if constructed; otherwise, return 0.
494 */
bb5df5f9 495int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
77241056 496{
4c6829c5 497 struct hfi1_qp_priv *priv = qp->priv;
90898850 498 struct rdma_ah_attr *ah_attr;
77241056
MM
499 struct hfi1_pportdata *ppd;
500 struct hfi1_ibport *ibp;
895420dd 501 struct rvt_swqe *wqe;
77241056 502 int next_cur;
88733e3b 503 u32 lid;
77241056 504
bb5df5f9
DD
505 ps->s_txreq = get_txreq(ps->dev, qp);
506 if (IS_ERR(ps->s_txreq))
507 goto bail_no_tx;
508
83693bd1
DD
509 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
510 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
77241056
MM
511 goto bail;
512 /* We are in the error state, flush the work request. */
6aa7de05 513 if (qp->s_last == READ_ONCE(qp->s_head))
77241056
MM
514 goto bail;
515 /* If DMAs are in progress, we can't flush immediately. */
14553ca1 516 if (iowait_sdma_pending(&priv->s_iowait)) {
54d10c1e 517 qp->s_flags |= RVT_S_WAIT_DMA;
77241056
MM
518 goto bail;
519 }
83693bd1 520 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
77241056 521 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
bb5df5f9 522 goto done_free_tx;
77241056
MM
523 }
524
46a80d62 525 /* see post_one_send() */
6aa7de05 526 if (qp->s_cur == READ_ONCE(qp->s_head))
77241056
MM
527 goto bail;
528
83693bd1 529 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
77241056
MM
530 next_cur = qp->s_cur + 1;
531 if (next_cur >= qp->s_size)
532 next_cur = 0;
533
534 /* Construct the header. */
535 ibp = to_iport(qp->ibqp.device, qp->port_num);
536 ppd = ppd_from_ibp(ibp);
15723f06 537 ah_attr = &ibah_to_rvtah(wqe->ud_wr.ah)->attr;
88733e3b
DH
538 priv->hdr_type = hfi1_get_hdr_type(ppd->lid, ah_attr);
539 if ((!hfi1_check_mcast(rdma_ah_get_dlid(ah_attr))) ||
540 (rdma_ah_get_dlid(ah_attr) == be32_to_cpu(OPA_LID_PERMISSIVE))) {
d8966fcd 541 lid = rdma_ah_get_dlid(ah_attr) & ~((1 << ppd->lmc) - 1);
17fb4f29 542 if (unlikely(!loopback &&
88733e3b
DH
543 ((lid == ppd->lid) ||
544 ((lid == be32_to_cpu(OPA_LID_PERMISSIVE)) &&
545 (qp->ibqp.qp_type == IB_QPT_GSI))))) {
747f4d7a 546 unsigned long tflags = ps->flags;
77241056
MM
547 /*
548 * If DMAs are in progress, we can't generate
549 * a completion for the loopback packet since
550 * it would be out of order.
551 * Instead of waiting, we could queue a
552 * zero length descriptor so we get a callback.
553 */
14553ca1 554 if (iowait_sdma_pending(&priv->s_iowait)) {
54d10c1e 555 qp->s_flags |= RVT_S_WAIT_DMA;
77241056
MM
556 goto bail;
557 }
558 qp->s_cur = next_cur;
747f4d7a 559 spin_unlock_irqrestore(&qp->s_lock, tflags);
77241056 560 ud_loopback(qp, wqe);
747f4d7a
MM
561 spin_lock_irqsave(&qp->s_lock, tflags);
562 ps->flags = tflags;
77241056 563 hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
bb5df5f9 564 goto done_free_tx;
77241056
MM
565 }
566 }
567
568 qp->s_cur = next_cur;
e922ae06 569 ps->s_txreq->s_cur_size = wqe->length;
b777f154 570 ps->s_txreq->ss = &qp->s_sge;
d8966fcd 571 qp->s_srate = rdma_ah_get_static_rate(ah_attr);
77241056
MM
572 qp->srate_mbps = ib_rate_to_mbps(qp->s_srate);
573 qp->s_wqe = wqe;
574 qp->s_sge.sge = wqe->sg_list[0];
575 qp->s_sge.sg_list = wqe->sg_list + 1;
576 qp->s_sge.num_sge = wqe->wr.num_sge;
577 qp->s_sge.total_len = wqe->length;
578
88733e3b
DH
579 /* Make the appropriate header */
580 hfi1_make_ud_req_tbl[priv->hdr_type](qp, ps, qp->s_wqe);
4c6829c5 581 priv->s_sde = qp_to_sdma_engine(qp, priv->s_sc);
711e104d 582 ps->s_txreq->sde = priv->s_sde;
721d0427 583 priv->s_sendcontext = qp_to_send_context(qp, priv->s_sc);
711e104d 584 ps->s_txreq->psc = priv->s_sendcontext;
77241056 585 /* disarm any ahg */
a9b6b3bc
DC
586 priv->s_ahg->ahgcount = 0;
587 priv->s_ahg->ahgidx = 0;
588 priv->s_ahg->tx_flags = 0;
77241056 589
46a80d62 590 return 1;
bb5df5f9
DD
591
592done_free_tx:
593 hfi1_put_txreq(ps->s_txreq);
594 ps->s_txreq = NULL;
595 return 1;
596
77241056 597bail:
bb5df5f9
DD
598 hfi1_put_txreq(ps->s_txreq);
599
600bail_no_tx:
601 ps->s_txreq = NULL;
54d10c1e 602 qp->s_flags &= ~RVT_S_BUSY;
bb5df5f9 603 return 0;
77241056
MM
604}
605
606/*
607 * Hardware can't check this so we do it here.
608 *
609 * This is a slightly different algorithm than the standard pkey check. It
610 * special cases the management keys and allows for 0x7fff and 0xffff to be in
611 * the table at the same time.
612 *
613 * @returns the index found or -1 if not found
614 */
615int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
616{
617 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
618 unsigned i;
619
620 if (pkey == FULL_MGMT_P_KEY || pkey == LIM_MGMT_P_KEY) {
621 unsigned lim_idx = -1;
622
623 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i) {
624 /* here we look for an exact match */
625 if (ppd->pkeys[i] == pkey)
626 return i;
627 if (ppd->pkeys[i] == LIM_MGMT_P_KEY)
628 lim_idx = i;
629 }
630
631 /* did not find 0xffff return 0x7fff idx if found */
632 if (pkey == FULL_MGMT_P_KEY)
633 return lim_idx;
634
635 /* no match... */
636 return -1;
637 }
638
639 pkey &= 0x7fff; /* remove limited/full membership bit */
640
641 for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
642 if ((ppd->pkeys[i] & 0x7fff) == pkey)
643 return i;
644
645 /*
646 * Should not get here, this means hardware failed to validate pkeys.
647 */
648 return -1;
649}
650
88733e3b 651void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
f59fb9e0 652 u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
88733e3b
DH
653 u8 sc5, const struct ib_grh *old_grh)
654{
655 u64 pbc, pbc_flags = 0;
656 u32 bth0, plen, vl, hwords = 7;
657 u16 len;
658 u8 l4;
659 struct hfi1_16b_header hdr;
660 struct ib_other_headers *ohdr;
661 struct pio_buf *pbuf;
662 struct send_context *ctxt = qp_to_send_context(qp, sc5);
663 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
664 u32 nwords;
665
666 /* Populate length */
667 nwords = ((hfi1_get_16b_padding(hwords << 2, 0) +
668 SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
669 if (old_grh) {
670 struct ib_grh *grh = &hdr.u.l.grh;
671
672 grh->version_tclass_flow = old_grh->version_tclass_flow;
78d3633b
MM
673 grh->paylen = cpu_to_be16(
674 (hwords - LRH_16B_DWORDS + nwords) << 2);
88733e3b
DH
675 grh->hop_limit = 0xff;
676 grh->sgid = old_grh->dgid;
677 grh->dgid = old_grh->sgid;
678 ohdr = &hdr.u.l.oth;
679 l4 = OPA_16B_L4_IB_GLOBAL;
680 hwords += sizeof(struct ib_grh) / sizeof(u32);
681 } else {
682 ohdr = &hdr.u.oth;
683 l4 = OPA_16B_L4_IB_LOCAL;
684 }
685
686 /* BIT 16 to 19 is TVER. Bit 20 to 22 is pad cnt */
687 bth0 = (IB_OPCODE_CNP << 24) | (1 << 16) |
688 (hfi1_get_16b_padding(hwords << 2, 0) << 20);
689 ohdr->bth[0] = cpu_to_be32(bth0);
690
691 ohdr->bth[1] = cpu_to_be32(remote_qpn);
692 ohdr->bth[2] = 0; /* PSN 0 */
693
694 /* Convert dwords to flits */
695 len = (hwords + nwords) >> 1;
696 hfi1_make_16b_hdr(&hdr, slid, dlid, len, pkey, 1, 0, l4, sc5);
697
698 plen = 2 /* PBC */ + hwords + nwords;
699 pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
700 vl = sc_to_vlt(ppd->dd, sc5);
701 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
702 if (ctxt) {
703 pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
704 if (pbuf)
705 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
706 &hdr, hwords);
707 }
708}
709
895420dd 710void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
f59fb9e0 711 u16 pkey, u32 slid, u32 dlid, u8 sc5,
77241056
MM
712 const struct ib_grh *old_grh)
713{
714 u64 pbc, pbc_flags = 0;
715 u32 bth0, plen, vl, hwords = 5;
716 u16 lrh0;
717 u8 sl = ibp->sc_to_sl[sc5];
261a4351
MM
718 struct ib_header hdr;
719 struct ib_other_headers *ohdr;
77241056
MM
720 struct pio_buf *pbuf;
721 struct send_context *ctxt = qp_to_send_context(qp, sc5);
722 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
723
724 if (old_grh) {
725 struct ib_grh *grh = &hdr.u.l.grh;
726
727 grh->version_tclass_flow = old_grh->version_tclass_flow;
78d3633b
MM
728 grh->paylen = cpu_to_be16(
729 (hwords - LRH_9B_DWORDS + SIZE_OF_CRC) << 2);
77241056
MM
730 grh->hop_limit = 0xff;
731 grh->sgid = old_grh->dgid;
732 grh->dgid = old_grh->sgid;
733 ohdr = &hdr.u.l.oth;
734 lrh0 = HFI1_LRH_GRH;
735 hwords += sizeof(struct ib_grh) / sizeof(u32);
736 } else {
737 ohdr = &hdr.u.oth;
738 lrh0 = HFI1_LRH_BTH;
739 }
740
741 lrh0 |= (sc5 & 0xf) << 12 | sl << 4;
742
743 bth0 = pkey | (IB_OPCODE_CNP << 24);
744 ohdr->bth[0] = cpu_to_be32(bth0);
745
3d591099 746 ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << IB_BECN_SHIFT));
77241056
MM
747 ohdr->bth[2] = 0; /* PSN 0 */
748
88733e3b 749 hfi1_make_ib_hdr(&hdr, lrh0, hwords + SIZE_OF_CRC, dlid, slid);
77241056 750 plen = 2 /* PBC */ + hwords;
7dafbab3 751 pbc_flags |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
77241056
MM
752 vl = sc_to_vlt(ppd->dd, sc5);
753 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
754 if (ctxt) {
755 pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
756 if (pbuf)
757 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
758 &hdr, hwords);
759 }
760}
761
762/*
763 * opa_smp_check() - Do the regular pkey checking, and the additional
f3809209
JX
764 * checks for SMPs specified in OPAv1 rev 1.0, 9/19/2016 update, section
765 * 9.10.25 ("SMA Packet Checks").
77241056
MM
766 *
767 * Note that:
768 * - Checks are done using the pkey directly from the packet's BTH,
769 * and specifically _not_ the pkey that we attach to the completion,
770 * which may be different.
771 * - These checks are specifically for "non-local" SMPs (i.e., SMPs
772 * which originated on another node). SMPs which are sent from, and
773 * destined to this node are checked in opa_local_smp_check().
774 *
775 * At the point where opa_smp_check() is called, we know:
776 * - destination QP is QP0
777 *
778 * opa_smp_check() returns 0 if all checks succeed, 1 otherwise.
779 */
780static int opa_smp_check(struct hfi1_ibport *ibp, u16 pkey, u8 sc5,
895420dd 781 struct rvt_qp *qp, u16 slid, struct opa_smp *smp)
77241056
MM
782{
783 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
784
785 /*
786 * I don't think it's possible for us to get here with sc != 0xf,
787 * but check it to be certain.
788 */
789 if (sc5 != 0xf)
790 return 1;
791
792 if (rcv_pkey_check(ppd, pkey, sc5, slid))
793 return 1;
794
795 /*
796 * At this point we know (and so don't need to check again) that
797 * the pkey is either LIM_MGMT_P_KEY, or FULL_MGMT_P_KEY
798 * (see ingress_pkey_check).
799 */
800 if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE &&
801 smp->mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED) {
802 ingress_pkey_table_fail(ppd, pkey, slid);
803 return 1;
804 }
805
806 /*
807 * SMPs fall into one of four (disjoint) categories:
f3809209
JX
808 * SMA request, SMA response, SMA trap, or SMA trap repress.
809 * Our response depends, in part, on which type of SMP we're
810 * processing.
77241056 811 *
f3809209
JX
812 * If this is an SMA response, skip the check here.
813 *
814 * If this is an SMA request or SMA trap repress:
77241056
MM
815 * - pkey != FULL_MGMT_P_KEY =>
816 * increment port recv constraint errors, drop MAD
f3809209
JX
817 *
818 * Otherwise:
819 * - accept if the port is running an SM
820 * - drop MAD if it's an SMA trap
821 * - pkey == FULL_MGMT_P_KEY =>
822 * reply with unsupported method
823 * - pkey != FULL_MGMT_P_KEY =>
824 * increment port recv constraint errors, drop MAD
77241056
MM
825 */
826 switch (smp->method) {
f3809209
JX
827 case IB_MGMT_METHOD_GET_RESP:
828 case IB_MGMT_METHOD_REPORT_RESP:
829 break;
77241056
MM
830 case IB_MGMT_METHOD_GET:
831 case IB_MGMT_METHOD_SET:
832 case IB_MGMT_METHOD_REPORT:
833 case IB_MGMT_METHOD_TRAP_REPRESS:
834 if (pkey != FULL_MGMT_P_KEY) {
835 ingress_pkey_table_fail(ppd, pkey, slid);
836 return 1;
837 }
838 break;
f3809209 839 default:
4eb06882 840 if (ibp->rvp.port_cap_flags & IB_PORT_SM)
77241056 841 return 0;
f3809209
JX
842 if (smp->method == IB_MGMT_METHOD_TRAP)
843 return 1;
77241056
MM
844 if (pkey == FULL_MGMT_P_KEY) {
845 smp->status |= IB_SMP_UNSUP_METHOD;
846 return 0;
847 }
f3809209
JX
848 ingress_pkey_table_fail(ppd, pkey, slid);
849 return 1;
77241056
MM
850 }
851 return 0;
852}
853
77241056
MM
854/**
855 * hfi1_ud_rcv - receive an incoming UD packet
856 * @ibp: the port the packet came in on
857 * @hdr: the packet header
858 * @rcv_flags: flags relevant to rcv processing
859 * @data: the packet data
860 * @tlen: the packet length
861 * @qp: the QP the packet came on
862 *
863 * This is called from qp_rcv() to process an incoming UD packet
864 * for the given QP.
865 * Called at interrupt level.
866 */
867void hfi1_ud_rcv(struct hfi1_packet *packet)
868{
77241056 869 u32 hdrsize = packet->hlen;
77241056 870 struct ib_wc wc;
77241056 871 u32 src_qp;
9039746c 872 u16 pkey;
77241056 873 int mgmt_pkey_idx = -1;
f3e862cb 874 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
89c057ca 875 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
72c07e2b 876 void *data = packet->payload;
77241056 877 u32 tlen = packet->tlen;
895420dd 878 struct rvt_qp *qp = packet->qp;
88733e3b 879 u8 sc5 = packet->sc;
9039746c 880 u8 sl_from_sc;
9039746c
DH
881 u8 opcode = packet->opcode;
882 u8 sl = packet->sl;
883 u32 dlid = packet->dlid;
884 u32 slid = packet->slid;
88733e3b 885 u8 extra_bytes;
81cd3891 886 u8 l4 = 0;
88733e3b
DH
887 bool dlid_is_permissive;
888 bool slid_is_permissive;
81cd3891 889 bool solicited = false;
77241056 890
88733e3b 891 extra_bytes = packet->pad + packet->extra_byte + (SIZE_OF_CRC << 2);
88733e3b
DH
892
893 if (packet->etype == RHF_RCV_TYPE_BYPASS) {
894 u32 permissive_lid =
895 opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B);
896
81cd3891 897 l4 = hfi1_16B_get_l4(packet->hdr);
88733e3b
DH
898 pkey = hfi1_16B_get_pkey(packet->hdr);
899 dlid_is_permissive = (dlid == permissive_lid);
900 slid_is_permissive = (slid == permissive_lid);
901 } else {
81cd3891 902 pkey = ib_bth_get_pkey(packet->ohdr);
88733e3b
DH
903 dlid_is_permissive = (dlid == be16_to_cpu(IB_LID_PERMISSIVE));
904 slid_is_permissive = (slid == be16_to_cpu(IB_LID_PERMISSIVE));
905 }
89c057ca 906 sl_from_sc = ibp->sc_to_sl[sc5];
77241056 907
81cd3891
DH
908 if (likely(l4 != OPA_16B_L4_FM)) {
909 src_qp = ib_get_sqpn(packet->ohdr);
910 solicited = ib_bth_is_solicited(packet->ohdr);
911 } else {
912 src_qp = hfi1_16B_get_src_qpn(packet->mgmt);
913 }
914
5fd2b562 915 process_ecn(qp, packet, (opcode != IB_OPCODE_CNP));
77241056
MM
916 /*
917 * Get the number of bytes the message was padded by
918 * and drop incomplete packets.
919 */
89c057ca 920 if (unlikely(tlen < (hdrsize + extra_bytes)))
77241056
MM
921 goto drop;
922
89c057ca 923 tlen -= hdrsize + extra_bytes;
77241056
MM
924
925 /*
926 * Check that the permissive LID is only used on QP0
927 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
928 */
929 if (qp->ibqp.qp_num) {
88733e3b 930 if (unlikely(dlid_is_permissive || slid_is_permissive))
77241056
MM
931 goto drop;
932 if (qp->ibqp.qp_num > 1) {
77241056
MM
933 if (unlikely(rcv_pkey_check(ppd, pkey, sc5, slid))) {
934 /*
935 * Traps will not be sent for packets dropped
936 * by the HW. This is fine, as sending trap
937 * for invalid pkeys is optional according to
938 * IB spec (release 1.3, section 10.9.4)
939 */
13d84914
DD
940 hfi1_bad_pkey(ibp,
941 pkey, sl,
942 src_qp, qp->ibqp.qp_num,
943 slid, dlid);
77241056
MM
944 return;
945 }
946 } else {
947 /* GSI packet */
948 mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
949 if (mgmt_pkey_idx < 0)
950 goto drop;
77241056 951 }
81cd3891
DH
952 if (unlikely(l4 != OPA_16B_L4_FM &&
953 ib_get_qkey(packet->ohdr) != qp->qkey))
954 return; /* Silent drop */
13d84914 955
77241056
MM
956 /* Drop invalid MAD packets (see 13.5.3.1). */
957 if (unlikely(qp->ibqp.qp_num == 1 &&
89c057ca 958 (tlen > 2048 || (sc5 == 0xF))))
77241056
MM
959 goto drop;
960 } else {
961 /* Received on QP0, and so by definition, this is an SMP */
962 struct opa_smp *smp = (struct opa_smp *)data;
77241056
MM
963
964 if (opa_smp_check(ibp, pkey, sc5, qp, slid, smp))
965 goto drop;
966
967 if (tlen > 2048)
968 goto drop;
88733e3b 969 if ((dlid_is_permissive || slid_is_permissive) &&
77241056
MM
970 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
971 goto drop;
972
973 /* look up SMI pkey */
974 mgmt_pkey_idx = hfi1_lookup_pkey_idx(ibp, pkey);
975 if (mgmt_pkey_idx < 0)
976 goto drop;
77241056
MM
977 }
978
979 if (qp->ibqp.qp_num > 1 &&
980 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
81cd3891 981 wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
77241056
MM
982 wc.wc_flags = IB_WC_WITH_IMM;
983 tlen -= sizeof(u32);
984 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
985 wc.ex.imm_data = 0;
986 wc.wc_flags = 0;
e490974e 987 } else {
77241056 988 goto drop;
e490974e 989 }
77241056
MM
990
991 /*
992 * A GRH is expected to precede the data even if not
993 * present on the wire.
994 */
995 wc.byte_len = tlen + sizeof(struct ib_grh);
996
997 /*
998 * Get the next work request entry to find where to put the data.
999 */
e490974e 1000 if (qp->r_flags & RVT_R_REUSE_SGE) {
54d10c1e 1001 qp->r_flags &= ~RVT_R_REUSE_SGE;
e490974e 1002 } else {
77241056
MM
1003 int ret;
1004
832369fa 1005 ret = rvt_get_rwqe(qp, false);
77241056 1006 if (ret < 0) {
beb5a042 1007 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
77241056
MM
1008 return;
1009 }
1010 if (!ret) {
1011 if (qp->ibqp.qp_num == 0)
4eb06882 1012 ibp->rvp.n_vl15_dropped++;
77241056
MM
1013 return;
1014 }
1015 }
1016 /* Silently drop packets which are too big. */
1017 if (unlikely(wc.byte_len > qp->r_len)) {
54d10c1e 1018 qp->r_flags |= RVT_R_REUSE_SGE;
77241056
MM
1019 goto drop;
1020 }
9039746c 1021 if (packet->grh) {
88733e3b
DH
1022 hfi1_copy_sge(&qp->r_sge, packet->grh,
1023 sizeof(struct ib_grh), true, false);
1024 wc.wc_flags |= IB_WC_GRH;
1025 } else if (packet->etype == RHF_RCV_TYPE_BYPASS) {
1026 struct ib_grh grh;
1027 /*
1028 * Assuming we only created 16B on the send side
1029 * if we want to use large LIDs, since GRH was stripped
1030 * out when creating 16B, add back the GRH here.
1031 */
1032 hfi1_make_ext_grh(packet, &grh, slid, dlid);
1033 hfi1_copy_sge(&qp->r_sge, &grh,
0128fcea 1034 sizeof(struct ib_grh), true, false);
77241056 1035 wc.wc_flags |= IB_WC_GRH;
e490974e 1036 } else {
1198fcea 1037 rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
e490974e 1038 }
7b0b01aa 1039 hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
0128fcea 1040 true, false);
ec4274f1 1041 rvt_put_ss(&qp->r_sge);
54d10c1e 1042 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
77241056
MM
1043 return;
1044 wc.wr_id = qp->r_wr_id;
1045 wc.status = IB_WC_SUCCESS;
1046 wc.opcode = IB_WC_RECV;
1047 wc.vendor_err = 0;
1048 wc.qp = &qp->ibqp;
1049 wc.src_qp = src_qp;
1050
1051 if (qp->ibqp.qp_type == IB_QPT_GSI ||
1052 qp->ibqp.qp_type == IB_QPT_SMI) {
1053 if (mgmt_pkey_idx < 0) {
1054 if (net_ratelimit()) {
77241056
MM
1055 struct hfi1_devdata *dd = ppd->dd;
1056
1057 dd_dev_err(dd, "QP type %d mgmt_pkey_idx < 0 and packet not dropped???\n",
1058 qp->ibqp.qp_type);
1059 mgmt_pkey_idx = 0;
1060 }
1061 }
1062 wc.pkey_index = (unsigned)mgmt_pkey_idx;
e490974e 1063 } else {
77241056 1064 wc.pkey_index = 0;
e490974e 1065 }
88733e3b
DH
1066 if (slid_is_permissive)
1067 slid = be32_to_cpu(OPA_LID_PERMISSIVE);
b64581ad 1068 wc.slid = slid & U16_MAX;
89c057ca 1069 wc.sl = sl_from_sc;
77241056
MM
1070
1071 /*
1072 * Save the LMC lower bits if the destination LID is a unicast LID.
1073 */
88733e3b 1074 wc.dlid_path_bits = hfi1_check_mcast(dlid) ? 0 :
77241056
MM
1075 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
1076 wc.port_num = qp->port_num;
1077 /* Signal completion event if the solicited bit is set. */
81cd3891 1078 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc, solicited);
77241056
MM
1079 return;
1080
1081drop:
4eb06882 1082 ibp->rvp.n_pkt_drops++;
77241056 1083}