2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include "verbs_txreq.h"
52 /* cut down ridiculously long IB macro names */
53 #define OP(x) UC_OP(x)
56 * hfi1_make_uc_req - construct a request packet (SEND, RDMA write)
57 * @qp: a pointer to the QP
59 * Assume s_lock is held.
61 * Return 1 if constructed; otherwise, return 0.
63 int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
65 struct hfi1_qp_priv *priv = qp->priv;
66 struct ib_other_headers *ohdr;
74 ps->s_txreq = get_txreq(ps->dev, qp);
75 if (IS_ERR(ps->s_txreq))
78 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
79 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
81 /* We are in the error state, flush the work request. */
82 smp_read_barrier_depends(); /* see post_one_send() */
83 if (qp->s_last == READ_ONCE(qp->s_head))
85 /* If DMAs are in progress, we can't flush immediately. */
86 if (iowait_sdma_pending(&priv->s_iowait)) {
87 qp->s_flags |= RVT_S_WAIT_DMA;
91 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
92 hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
96 if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
97 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
99 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
100 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
102 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
104 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
106 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
107 (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
108 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
110 ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
113 /* Get the next send request. */
114 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
116 switch (qp->s_state) {
118 if (!(ib_rvt_state_ops[qp->state] &
119 RVT_PROCESS_NEXT_SEND_OK))
121 /* Check if send work queue is empty. */
122 smp_read_barrier_depends(); /* see post_one_send() */
123 if (qp->s_cur == READ_ONCE(qp->s_head)) {
128 * Local operations are processed immediately
129 * after all prior requests have completed.
131 if (wqe->wr.opcode == IB_WR_REG_MR ||
132 wqe->wr.opcode == IB_WR_LOCAL_INV) {
136 if (qp->s_last != qp->s_cur)
138 if (++qp->s_cur == qp->s_size)
140 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
141 err = rvt_invalidate_rkey(
142 qp, wqe->wr.ex.invalidate_rkey);
145 hfi1_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR
148 atomic_dec(&qp->local_ops_pending);
152 * Start a new request.
154 qp->s_psn = wqe->psn;
155 qp->s_sge.sge = wqe->sg_list[0];
156 qp->s_sge.sg_list = wqe->sg_list + 1;
157 qp->s_sge.num_sge = wqe->wr.num_sge;
158 qp->s_sge.total_len = wqe->length;
161 switch (wqe->wr.opcode) {
163 case IB_WR_SEND_WITH_IMM:
165 qp->s_state = OP(SEND_FIRST);
169 if (wqe->wr.opcode == IB_WR_SEND) {
170 qp->s_state = OP(SEND_ONLY);
173 OP(SEND_ONLY_WITH_IMMEDIATE);
174 /* Immediate data comes after the BTH */
175 ohdr->u.imm_data = wqe->wr.ex.imm_data;
178 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
179 bth0 |= IB_BTH_SOLICITED;
181 if (++qp->s_cur >= qp->s_size)
185 case IB_WR_RDMA_WRITE:
186 case IB_WR_RDMA_WRITE_WITH_IMM:
187 ohdr->u.rc.reth.vaddr =
188 cpu_to_be64(wqe->rdma_wr.remote_addr);
189 ohdr->u.rc.reth.rkey =
190 cpu_to_be32(wqe->rdma_wr.rkey);
191 ohdr->u.rc.reth.length = cpu_to_be32(len);
192 hwords += sizeof(struct ib_reth) / 4;
194 qp->s_state = OP(RDMA_WRITE_FIRST);
198 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
199 qp->s_state = OP(RDMA_WRITE_ONLY);
202 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
203 /* Immediate data comes after the RETH */
204 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
206 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
207 bth0 |= IB_BTH_SOLICITED;
210 if (++qp->s_cur >= qp->s_size)
220 qp->s_state = OP(SEND_MIDDLE);
222 case OP(SEND_MIDDLE):
226 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
229 if (wqe->wr.opcode == IB_WR_SEND) {
230 qp->s_state = OP(SEND_LAST);
232 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
233 /* Immediate data comes after the BTH */
234 ohdr->u.imm_data = wqe->wr.ex.imm_data;
237 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
238 bth0 |= IB_BTH_SOLICITED;
240 if (++qp->s_cur >= qp->s_size)
244 case OP(RDMA_WRITE_FIRST):
245 qp->s_state = OP(RDMA_WRITE_MIDDLE);
247 case OP(RDMA_WRITE_MIDDLE):
251 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
254 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
255 qp->s_state = OP(RDMA_WRITE_LAST);
258 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
259 /* Immediate data comes after the BTH */
260 ohdr->u.imm_data = wqe->wr.ex.imm_data;
262 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
263 bth0 |= IB_BTH_SOLICITED;
266 if (++qp->s_cur >= qp->s_size)
271 ps->s_txreq->hdr_dwords = hwords;
272 ps->s_txreq->sde = priv->s_sde;
273 ps->s_txreq->ss = &qp->s_sge;
274 ps->s_txreq->s_cur_size = len;
275 hfi1_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24),
276 mask_psn(qp->s_psn++), middle, ps);
280 hfi1_put_txreq(ps->s_txreq);
285 hfi1_put_txreq(ps->s_txreq);
289 qp->s_flags &= ~RVT_S_BUSY;
294 * hfi1_uc_rcv - handle an incoming UC packet
295 * @ibp: the port the packet came in on
296 * @hdr: the header of the packet
297 * @rcv_flags: flags relevant to rcv processing
298 * @data: the packet data
299 * @tlen: the length of the packet
300 * @qp: the QP for this packet.
302 * This is called from qp_rcv() to process an incoming UC packet
304 * Called at interrupt level.
306 void hfi1_uc_rcv(struct hfi1_packet *packet)
308 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
309 void *data = packet->payload;
310 u32 tlen = packet->tlen;
311 struct rvt_qp *qp = packet->qp;
312 struct ib_other_headers *ohdr = packet->ohdr;
313 u32 opcode = packet->opcode;
314 u32 hdrsize = packet->hlen;
316 u32 pad = packet->pad;
319 struct ib_reth *reth;
321 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
323 if (hfi1_ruc_check_hdr(ibp, packet))
326 process_ecn(qp, packet, true);
328 psn = ib_bth_get_psn(ohdr);
329 /* Compare the PSN verses the expected PSN. */
330 if (unlikely(cmp_psn(psn, qp->r_psn) != 0)) {
332 * Handle a sequence error.
333 * Silently drop any current message.
337 if (qp->r_state == OP(SEND_FIRST) ||
338 qp->r_state == OP(SEND_MIDDLE)) {
339 set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
340 qp->r_sge.num_sge = 0;
342 rvt_put_ss(&qp->r_sge);
344 qp->r_state = OP(SEND_LAST);
348 case OP(SEND_ONLY_WITH_IMMEDIATE):
351 case OP(RDMA_WRITE_FIRST):
352 case OP(RDMA_WRITE_ONLY):
353 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
361 /* Check for opcode sequence errors. */
362 switch (qp->r_state) {
364 case OP(SEND_MIDDLE):
365 if (opcode == OP(SEND_MIDDLE) ||
366 opcode == OP(SEND_LAST) ||
367 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
371 case OP(RDMA_WRITE_FIRST):
372 case OP(RDMA_WRITE_MIDDLE):
373 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
374 opcode == OP(RDMA_WRITE_LAST) ||
375 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
380 if (opcode == OP(SEND_FIRST) ||
381 opcode == OP(SEND_ONLY) ||
382 opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
383 opcode == OP(RDMA_WRITE_FIRST) ||
384 opcode == OP(RDMA_WRITE_ONLY) ||
385 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
390 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
393 /* OK, process the packet. */
397 case OP(SEND_ONLY_WITH_IMMEDIATE):
399 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
400 qp->r_sge = qp->s_rdma_read_sge;
402 ret = hfi1_rvt_get_rwqe(qp, 0);
408 * qp->s_rdma_read_sge will be the owner
409 * of the mr references.
411 qp->s_rdma_read_sge = qp->r_sge;
414 if (opcode == OP(SEND_ONLY))
415 goto no_immediate_data;
416 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
419 case OP(SEND_MIDDLE):
420 /* Check for invalid length PMTU or posted rwqe len. */
422 * There will be no padding for 9B packet but 16B packets
423 * will come in with some padding since we always add
424 * CRC and LT bytes which will need to be flit aligned
426 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
428 qp->r_rcv_len += pmtu;
429 if (unlikely(qp->r_rcv_len > qp->r_len))
431 hfi1_copy_sge(&qp->r_sge, data, pmtu, false, false);
434 case OP(SEND_LAST_WITH_IMMEDIATE):
436 wc.ex.imm_data = ohdr->u.imm_data;
437 wc.wc_flags = IB_WC_WITH_IMM;
444 /* Check for invalid length. */
445 /* LAST len should be >= 1 */
446 if (unlikely(tlen < (hdrsize + extra_bytes)))
448 /* Don't count the CRC. */
449 tlen -= (hdrsize + extra_bytes);
450 wc.byte_len = tlen + qp->r_rcv_len;
451 if (unlikely(wc.byte_len > qp->r_len))
453 wc.opcode = IB_WC_RECV;
454 hfi1_copy_sge(&qp->r_sge, data, tlen, false, false);
455 rvt_put_ss(&qp->s_rdma_read_sge);
457 wc.wr_id = qp->r_wr_id;
458 wc.status = IB_WC_SUCCESS;
460 wc.src_qp = qp->remote_qpn;
461 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
463 * It seems that IB mandates the presence of an SL in a
464 * work completion only for the UD transport (see section
465 * 11.4.2 of IBTA Vol. 1).
467 * However, the way the SL is chosen below is consistent
468 * with the way that IB/qib works and is trying avoid
469 * introducing incompatibilities.
471 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
473 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
474 /* zero fields that are N/A */
477 wc.dlid_path_bits = 0;
479 /* Signal completion event if the solicited bit is set. */
480 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
481 ib_bth_is_solicited(ohdr));
484 case OP(RDMA_WRITE_FIRST):
485 case OP(RDMA_WRITE_ONLY):
486 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
488 if (unlikely(!(qp->qp_access_flags &
489 IB_ACCESS_REMOTE_WRITE))) {
492 reth = &ohdr->u.rc.reth;
493 qp->r_len = be32_to_cpu(reth->length);
495 qp->r_sge.sg_list = NULL;
496 if (qp->r_len != 0) {
497 u32 rkey = be32_to_cpu(reth->rkey);
498 u64 vaddr = be64_to_cpu(reth->vaddr);
502 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len,
503 vaddr, rkey, IB_ACCESS_REMOTE_WRITE);
506 qp->r_sge.num_sge = 1;
508 qp->r_sge.num_sge = 0;
509 qp->r_sge.sge.mr = NULL;
510 qp->r_sge.sge.vaddr = NULL;
511 qp->r_sge.sge.length = 0;
512 qp->r_sge.sge.sge_length = 0;
514 if (opcode == OP(RDMA_WRITE_ONLY)) {
516 } else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) {
517 wc.ex.imm_data = ohdr->u.rc.imm_data;
521 case OP(RDMA_WRITE_MIDDLE):
522 /* Check for invalid length PMTU or posted rwqe len. */
523 if (unlikely(tlen != (hdrsize + pmtu + 4)))
525 qp->r_rcv_len += pmtu;
526 if (unlikely(qp->r_rcv_len > qp->r_len))
528 hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false);
531 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
532 wc.ex.imm_data = ohdr->u.imm_data;
534 wc.wc_flags = IB_WC_WITH_IMM;
536 /* Check for invalid length. */
537 /* LAST len should be >= 1 */
538 if (unlikely(tlen < (hdrsize + pad + 4)))
540 /* Don't count the CRC. */
541 tlen -= (hdrsize + extra_bytes);
542 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
544 if (test_and_clear_bit(RVT_R_REWIND_SGE, &qp->r_aflags)) {
545 rvt_put_ss(&qp->s_rdma_read_sge);
547 ret = hfi1_rvt_get_rwqe(qp, 1);
553 wc.byte_len = qp->r_len;
554 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
555 hfi1_copy_sge(&qp->r_sge, data, tlen, true, false);
556 rvt_put_ss(&qp->r_sge);
559 case OP(RDMA_WRITE_LAST):
561 /* Check for invalid length. */
562 /* LAST len should be >= 1 */
563 if (unlikely(tlen < (hdrsize + pad + 4)))
565 /* Don't count the CRC. */
566 tlen -= (hdrsize + extra_bytes);
567 if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
569 hfi1_copy_sge(&qp->r_sge, data, tlen, true, false);
570 rvt_put_ss(&qp->r_sge);
574 /* Drop packet for unknown opcodes. */
578 qp->r_state = opcode;
582 set_bit(RVT_R_REWIND_SGE, &qp->r_aflags);
583 qp->r_sge.num_sge = 0;
585 ibp->rvp.n_pkt_drops++;
589 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);