Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Dec 2016 18:38:48 +0000 (10:38 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 23 Dec 2016 18:38:48 +0000 (10:38 -0800)
Pull rdma fixes from Doug Ledford:
 "First round of -rc fixes for 4.10 kernel:

   - a series of qedr fixes
   - a series of rxe fixes
   - one i40iw fix
   - one cma fix
   - one cxgb4 fix"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  IB/rxe: Don't check for null ptr in send()
  IB/rxe: Drop future atomic/read packets rather than retrying
  IB/rxe: Use BTH_PSN_MASK when ACKing duplicate sends
  qedr: Always notify the verb consumer of flushed CQEs
  qedr: clear the vendor error field in the work completion
  qedr: post_send/recv according to QP state
  qedr: ignore inline flag in read verbs
  qedr: modify QP state to error when destroying it
  qedr: return correct value on modify qp
  qedr: return error if destroy CQ failed
  qedr: configure the number of CQEs on CQ creation
  i40iw: Set 128B as the only supported RQ WQE size
  IB/cma: Fix a race condition in iboe_addr_get_sgid()
  IB/rxe: Fix a memory leak in rxe_qp_cleanup()
  iw_cxgb4: set correct FetchBurstMax for QPs

1  2 
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_qp.c

index b7ac97b27c88c2fe11ad11f564ce786085d3217c,108c8837ba58ac7ad5508efba8bc21b6b5729132..cda5542e13a206347447a49f18f9e8cb930e7c8c
@@@ -321,7 -321,8 +321,8 @@@ static int create_qp(struct c4iw_rdev *
                FW_RI_RES_WR_DCAEN_V(0) |
                FW_RI_RES_WR_DCACPU_V(0) |
                FW_RI_RES_WR_FBMIN_V(2) |
-               FW_RI_RES_WR_FBMAX_V(2) |
+               (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
+                                        FW_RI_RES_WR_FBMAX_V(3)) |
                FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
                FW_RI_RES_WR_CIDXFTHRESH_V(0) |
                FW_RI_RES_WR_EQSIZE_V(eqsize));
                FW_RI_RES_WR_DCAEN_V(0) |
                FW_RI_RES_WR_DCACPU_V(0) |
                FW_RI_RES_WR_FBMIN_V(2) |
-               FW_RI_RES_WR_FBMAX_V(2) |
+               FW_RI_RES_WR_FBMAX_V(3) |
                FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
                FW_RI_RES_WR_CIDXFTHRESH_V(0) |
                FW_RI_RES_WR_EQSIZE_V(eqsize));
@@@ -706,8 -707,12 +707,8 @@@ static int build_memreg(struct t4_sq *s
        return 0;
  }
  
 -static int build_inv_stag(struct c4iw_dev *dev, union t4_wr *wqe,
 -                        struct ib_send_wr *wr, u8 *len16)
 +static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
  {
 -      struct c4iw_mr *mhp = get_mhp(dev, wr->ex.invalidate_rkey >> 8);
 -
 -      mhp->attr.state = 0;
        wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
        wqe->inv.r2 = 0;
        *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16);
@@@ -793,13 -798,11 +794,13 @@@ int c4iw_post_send(struct ib_qp *ibqp, 
        spin_lock_irqsave(&qhp->lock, flag);
        if (t4_wq_in_error(&qhp->wq)) {
                spin_unlock_irqrestore(&qhp->lock, flag);
 +              *bad_wr = wr;
                return -EINVAL;
        }
        num_wrs = t4_sq_avail(&qhp->wq);
        if (num_wrs == 0) {
                spin_unlock_irqrestore(&qhp->lock, flag);
 +              *bad_wr = wr;
                return -ENOMEM;
        }
        while (wr) {
                case IB_WR_RDMA_READ_WITH_INV:
                        fw_opcode = FW_RI_RDMA_READ_WR;
                        swsqe->opcode = FW_RI_READ_REQ;
 -                      if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
 +                      if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
 +                              c4iw_invalidate_mr(qhp->rhp,
 +                                                 wr->sg_list[0].lkey);
                                fw_flags = FW_RI_RDMA_READ_INVALIDATE;
 -                      else
 +                      } else {
                                fw_flags = 0;
 +                      }
                        err = build_rdma_read(wqe, wr, &len16);
                        if (err)
                                break;
                                fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
                        fw_opcode = FW_RI_INV_LSTAG_WR;
                        swsqe->opcode = FW_RI_LOCAL_INV;
 -                      err = build_inv_stag(qhp->rhp, wqe, wr, &len16);
 +                      err = build_inv_stag(wqe, wr, &len16);
 +                      c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
                        break;
                default:
                        PDBG("%s post of type=%d TBD!\n", __func__,
@@@ -936,13 -935,11 +937,13 @@@ int c4iw_post_receive(struct ib_qp *ibq
        spin_lock_irqsave(&qhp->lock, flag);
        if (t4_wq_in_error(&qhp->wq)) {
                spin_unlock_irqrestore(&qhp->lock, flag);
 +              *bad_wr = wr;
                return -EINVAL;
        }
        num_wrs = t4_rq_avail(&qhp->wq);
        if (num_wrs == 0) {
                spin_unlock_irqrestore(&qhp->lock, flag);
 +              *bad_wr = wr;
                return -ENOMEM;
        }
        while (wr) {
index 16967cdb45dffb6c9841748ac0b13f379d02adb9,10ec699834fa3b58f7b158ae29326e4f3874da62..342e78163613dfdc719b171e1396d01fd44432eb
@@@ -243,8 -243,10 +243,8 @@@ static struct socket *rxe_setup_udp_tun
  {
        int err;
        struct socket *sock;
 -      struct udp_port_cfg udp_cfg;
 -      struct udp_tunnel_sock_cfg tnl_cfg;
 -
 -      memset(&udp_cfg, 0, sizeof(udp_cfg));
 +      struct udp_port_cfg udp_cfg = {0};
 +      struct udp_tunnel_sock_cfg tnl_cfg = {0};
  
        if (ipv6) {
                udp_cfg.family = AF_INET6;
                return ERR_PTR(err);
        }
  
 -      tnl_cfg.sk_user_data = NULL;
        tnl_cfg.encap_type = 1;
        tnl_cfg.encap_rcv = rxe_udp_encap_recv;
 -      tnl_cfg.encap_destroy = NULL;
  
        /* Setup UDP tunnel */
        setup_udp_tunnel_sock(net, sock, &tnl_cfg);
@@@ -455,8 -459,7 +455,7 @@@ static int send(struct rxe_dev *rxe, st
                return -EAGAIN;
        }
  
-       if (pkt->qp)
-               atomic_inc(&pkt->qp->skb_out);
+       atomic_inc(&pkt->qp->skb_out);
        kfree_skb(skb);
  
        return 0;
index c3e60e4bde6e2a3ba5e0953b531a42f65927b717,4b6e455e72a1eca463782029dc48d8e234614e80..486d576e55bc016dda1f8ddad6b8f00941f66727
@@@ -522,7 -522,6 +522,7 @@@ static void rxe_qp_reset(struct rxe_qp 
        if (qp->sq.queue) {
                __rxe_do_task(&qp->comp.task);
                __rxe_do_task(&qp->req.task);
 +              rxe_queue_reset(qp->sq.queue);
        }
  
        /* cleanup attributes */
@@@ -574,7 -573,6 +574,7 @@@ void rxe_qp_error(struct rxe_qp *qp
  {
        qp->req.state = QP_STATE_ERROR;
        qp->resp.state = QP_STATE_ERROR;
 +      qp->attr.qp_state = IB_QPS_ERR;
  
        /* drain work and packet queues */
        rxe_run_task(&qp->resp.task, 1);
@@@ -855,4 -853,5 +855,5 @@@ void rxe_qp_cleanup(void *arg
        free_rd_atomic_resources(qp);
  
        kernel_sock_shutdown(qp->sk, SHUT_RDWR);
+       sock_release(qp->sk);
  }