Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 8 May 2013 22:29:48 +0000 (15:29 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 8 May 2013 22:29:48 +0000 (15:29 -0700)
Pull InfiniBand/RDMA changes from Roland Dreier:
 - XRC transport fixes
 - Fix DHCP on IPoIB
 - mlx4 preparations for flow steering
 - iSER fixes
 - miscellaneous other fixes

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (23 commits)
  IB/iser: Add support for iser CM REQ additional info
  IB/iser: Return error to upper layers on EAGAIN registration failures
  IB/iser: Move informational messages from error to info level
  IB/iser: Add module version
  mlx4_core: Expose a few helpers to fill DMFS HW strucutures
  mlx4_core: Directly expose fields of DMFS HW rule control segment
  mlx4_core: Change a few DMFS fields names to match firmare spec
  mlx4: Match DMFS promiscuous field names to firmware spec
  mlx4_core: Move DMFS HW structs to common header file
  IB/mlx4: Set link type for RAW PACKET QPs in the QP context
  IB/mlx4: Disable VLAN stripping for RAW PACKET QPs
  mlx4_core: Reduce warning message for SRQ_LIMIT event to debug level
  RDMA/iwcm: Don't touch cmid after dropping reference
  IB/qib: Correct qib_verbs_register_sysfs() error handling
  IB/ipath: Correct ipath_verbs_register_sysfs() error handling
  RDMA/cxgb4: Fix SQ allocation when on-chip SQ is disabled
  SRPT: Fix odd use of WARN_ON()
  IPoIB: Fix ipoib_hard_header() return value
  RDMA: Rename random32() to prandom_u32()
  RDMA/cxgb3: Fix uninitialized variable
  ...

1  2 
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
include/linux/mlx4/device.h

index 5b059e2d80cc18019b060a7143bdf59098879ef0,ed49ab345b6ea92ce44824d94b9e709dbf3ae814..232040447e8a23803cc73e64c203734d36492c2e
@@@ -42,21 -42,10 +42,21 @@@ static int ocqp_support = 1
  module_param(ocqp_support, int, 0644);
  MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
  
 -int db_fc_threshold = 2000;
 +int db_fc_threshold = 1000;
  module_param(db_fc_threshold, int, 0644);
 -MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic "
 -               "db flow control mode (default = 2000)");
 +MODULE_PARM_DESC(db_fc_threshold,
 +               "QP count/threshold that triggers"
 +               " automatic db flow control mode (default = 1000)");
 +
 +int db_coalescing_threshold;
 +module_param(db_coalescing_threshold, int, 0644);
 +MODULE_PARM_DESC(db_coalescing_threshold,
 +               "QP count/threshold that triggers"
 +               " disabling db coalescing (default = 0)");
 +
 +static int max_fr_immd = T4_MAX_FR_IMMD;
 +module_param(max_fr_immd, int, 0644);
 +MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
  
  static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
  {
@@@ -87,7 -76,7 +87,7 @@@ static void dealloc_sq(struct c4iw_rde
  
  static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
  {
 -      if (!ocqp_support || !t4_ocqp_supported())
 +      if (!ocqp_support || !ocqp_supported(&rdev->lldi))
                return -ENOSYS;
        sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
        if (!sq->dma_addr)
@@@ -111,6 -100,16 +111,16 @@@ static int alloc_host_sq(struct c4iw_rd
        return 0;
  }
  
+ static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
+ {
+       int ret = -ENOSYS;
+       if (user)
+               ret = alloc_oc_sq(rdev, sq);
+       if (ret)
+               ret = alloc_host_sq(rdev, sq);
+       return ret;
+ }
  static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                      struct c4iw_dev_ucontext *uctx)
  {
@@@ -140,7 -139,7 +150,7 @@@ static int create_qp(struct c4iw_rdev *
        int wr_len;
        struct c4iw_wr_wait wr_wait;
        struct sk_buff *skb;
 -      int ret;
 +      int ret = 0;
        int eqsize;
  
        wq->sq.qid = c4iw_get_qpid(rdev, uctx);
                goto free_sw_rq;
        }
  
-       if (user) {
-               if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
-                       goto free_hwaddr;
-       } else {
-               ret = alloc_host_sq(rdev, &wq->sq);
-               if (ret)
-                       goto free_hwaddr;
-       }
+       ret = alloc_sq(rdev, &wq->sq, user);
+       if (ret)
+               goto free_hwaddr;
        memset(wq->sq.queue, 0, wq->sq.memsize);
        dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
  
@@@ -542,7 -535,7 +546,7 @@@ static int build_rdma_recv(struct c4iw_
  }
  
  static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
 -                       struct ib_send_wr *wr, u8 *len16)
 +                       struct ib_send_wr *wr, u8 *len16, u8 t5dev)
  {
  
        struct fw_ri_immd *imdp;
        wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
        wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
                                        0xffffffff);
 -      WARN_ON(pbllen > T4_MAX_FR_IMMD);
 -      imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
 -      imdp->op = FW_RI_DATA_IMMD;
 -      imdp->r1 = 0;
 -      imdp->r2 = 0;
 -      imdp->immdlen = cpu_to_be32(pbllen);
 -      p = (__be64 *)(imdp + 1);
 -      rem = pbllen;
 -      for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
 -              *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
 -              rem -= sizeof *p;
 -              if (++p == (__be64 *)&sq->queue[sq->size])
 -                      p = (__be64 *)sq->queue;
 -      }
 -      BUG_ON(rem < 0);
 -      while (rem) {
 -              *p = 0;
 -              rem -= sizeof *p;
 -              if (++p == (__be64 *)&sq->queue[sq->size])
 -                      p = (__be64 *)sq->queue;
 +
 +      if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
 +              struct c4iw_fr_page_list *c4pl =
 +                      to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
 +              struct fw_ri_dsgl *sglp;
 +
 +              for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
 +                      wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
 +                              cpu_to_be64((u64)
 +                              wr->wr.fast_reg.page_list->page_list[i]);
 +              }
 +
 +              sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
 +              sglp->op = FW_RI_DATA_DSGL;
 +              sglp->r1 = 0;
 +              sglp->nsge = cpu_to_be16(1);
 +              sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
 +              sglp->len0 = cpu_to_be32(pbllen);
 +
 +              *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
 +      } else {
 +              imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
 +              imdp->op = FW_RI_DATA_IMMD;
 +              imdp->r1 = 0;
 +              imdp->r2 = 0;
 +              imdp->immdlen = cpu_to_be32(pbllen);
 +              p = (__be64 *)(imdp + 1);
 +              rem = pbllen;
 +              for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
 +                      *p = cpu_to_be64(
 +                              (u64)wr->wr.fast_reg.page_list->page_list[i]);
 +                      rem -= sizeof(*p);
 +                      if (++p == (__be64 *)&sq->queue[sq->size])
 +                              p = (__be64 *)sq->queue;
 +              }
 +              BUG_ON(rem < 0);
 +              while (rem) {
 +                      *p = 0;
 +                      rem -= sizeof(*p);
 +                      if (++p == (__be64 *)&sq->queue[sq->size])
 +                              p = (__be64 *)sq->queue;
 +              }
 +              *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
 +                                    + pbllen, 16);
        }
 -      *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
        return 0;
  }
  
@@@ -709,10 -679,7 +713,10 @@@ int c4iw_post_send(struct ib_qp *ibqp, 
                case IB_WR_FAST_REG_MR:
                        fw_opcode = FW_RI_FR_NSMR_WR;
                        swsqe->opcode = FW_RI_FAST_REGISTER;
 -                      err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16);
 +                      err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
 +                                          is_t5(
 +                                          qhp->rhp->rdev.lldi.adapter_type) ?
 +                                          1 : 0);
                        break;
                case IB_WR_LOCAL_INV:
                        if (wr->send_flags & IB_SEND_FENCE)
@@@ -1484,9 -1451,6 +1488,9 @@@ int c4iw_destroy_qp(struct ib_qp *ib_qp
                rhp->db_state = NORMAL;
                idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
        }
 +      if (db_coalescing_threshold >= 0)
 +              if (rhp->qpcnt <= db_coalescing_threshold)
 +                      cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
        spin_unlock_irq(&rhp->lock);
        atomic_dec(&qhp->refcnt);
        wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
@@@ -1598,15 -1562,11 +1602,15 @@@ struct ib_qp *c4iw_create_qp(struct ib_
        spin_lock_irq(&rhp->lock);
        if (rhp->db_state != NORMAL)
                t4_disable_wq_db(&qhp->wq);
 -      if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
 +      rhp->qpcnt++;
 +      if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
                rhp->rdev.stats.db_state_transitions++;
                rhp->db_state = FLOW_CONTROL;
                idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
        }
 +      if (db_coalescing_threshold >= 0)
 +              if (rhp->qpcnt > db_coalescing_threshold)
 +                      cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
        ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
        spin_unlock_irq(&rhp->lock);
        if (ret)
index 73b3a7132587b6fac4e05dfe33ed215c98dc104f,dab4b5188a27439f3575d9677de8ac8657bddd08..d5e60f44ba5ad7c4f5ad53d7e0fe348248ed797b
@@@ -33,6 -33,7 +33,7 @@@
  
  #include <linux/mlx4/cq.h>
  #include <linux/mlx4/qp.h>
+ #include <linux/mlx4/srq.h>
  #include <linux/slab.h>
  
  #include "mlx4_ib.h"
@@@ -228,7 -229,7 +229,7 @@@ struct ib_cq *mlx4_ib_create_cq(struct 
                vector = dev->eq_table[vector % ibdev->num_comp_vectors];
  
        err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
 -                          cq->db.dma, &cq->mcq, vector, 0);
 +                          cq->db.dma, &cq->mcq, vector, 0, 0);
        if (err)
                goto err_dbmap;
  
@@@ -585,6 -586,7 +586,7 @@@ static int mlx4_ib_poll_one(struct mlx4
        struct mlx4_qp *mqp;
        struct mlx4_ib_wq *wq;
        struct mlx4_ib_srq *srq;
+       struct mlx4_srq *msrq = NULL;
        int is_send;
        int is_error;
        u32 g_mlpath_rqpn;
@@@ -653,6 -655,20 +655,20 @@@ repoll
  
        wc->qp = &(*cur_qp)->ibqp;
  
+       if (wc->qp->qp_type == IB_QPT_XRC_TGT) {
+               u32 srq_num;
+               g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
+               srq_num       = g_mlpath_rqpn & 0xffffff;
+               /* SRQ is also in the radix tree */
+               msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev,
+                                      srq_num);
+               if (unlikely(!msrq)) {
+                       pr_warn("CQ %06x with entry for unknown SRQN %06x\n",
+                               cq->mcq.cqn, srq_num);
+                       return -EINVAL;
+               }
+       }
        if (is_send) {
                wq = &(*cur_qp)->sq;
                if (!(*cur_qp)->sq_signal_bits) {
                wqe_ctr = be16_to_cpu(cqe->wqe_index);
                wc->wr_id = srq->wrid[wqe_ctr];
                mlx4_ib_free_srq_wqe(srq, wqe_ctr);
+       } else if (msrq) {
+               srq = to_mibsrq(msrq);
+               wqe_ctr = be16_to_cpu(cqe->wqe_index);
+               wc->wr_id = srq->wrid[wqe_ctr];
+               mlx4_ib_free_srq_wqe(srq, wqe_ctr);
        } else {
                wq        = &(*cur_qp)->rq;
                tail      = wq->tail & (wq->wqe_cnt - 1);
index 554b9063da5492aab4763051146121eca6774e84,31dd2a7a880f7cbe0544d6c3f4f890d862fbb914..b6e049a3c7a853b92d5c9d126c61a2d850f290cb
@@@ -730,8 -730,7 +730,8 @@@ static int ipoib_start_xmit(struct sk_b
                if ((header->proto != htons(ETH_P_IP)) &&
                    (header->proto != htons(ETH_P_IPV6)) &&
                    (header->proto != htons(ETH_P_ARP)) &&
 -                  (header->proto != htons(ETH_P_RARP))) {
 +                  (header->proto != htons(ETH_P_RARP)) &&
 +                  (header->proto != htons(ETH_P_TIPC))) {
                        /* ethertype not supported by IPoIB */
                        ++dev->stats.tx_dropped;
                        dev_kfree_skb_any(skb);
        switch (header->proto) {
        case htons(ETH_P_IP):
        case htons(ETH_P_IPV6):
 +      case htons(ETH_P_TIPC):
                neigh = ipoib_neigh_get(dev, cb->hwaddr);
                if (unlikely(!neigh)) {
                        neigh_add_path(skb, cb->hwaddr, dev);
@@@ -830,7 -828,7 +830,7 @@@ static int ipoib_hard_header(struct sk_
         */
        memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
  
-       return 0;
+       return sizeof *header;
  }
  
  static void ipoib_set_mcast_list(struct net_device *dev)
index bcf4d118e98c9228b32ac196136b15fa515c7ace,20476844fb2033a4dcbd22cb4185dc535b3580ba..c9e6b62dd000955565fb0186d334221a834b0a40
@@@ -889,7 -889,7 +889,7 @@@ static int mlx4_en_flow_replace(struct 
                .queue_mode = MLX4_NET_TRANS_Q_FIFO,
                .exclusive = 0,
                .allow_loopback = 1,
-               .promisc_mode = MLX4_FS_PROMISC_NONE,
+               .promisc_mode = MLX4_FS_REGULAR,
        };
  
        rule.port = priv->port;
        return err;
  }
  
 +static int mlx4_en_get_ts_info(struct net_device *dev,
 +                             struct ethtool_ts_info *info)
 +{
 +      struct mlx4_en_priv *priv = netdev_priv(dev);
 +      struct mlx4_en_dev *mdev = priv->mdev;
 +      int ret;
 +
 +      ret = ethtool_op_get_ts_info(dev, info);
 +      if (ret)
 +              return ret;
 +
 +      if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
 +              info->so_timestamping |=
 +                      SOF_TIMESTAMPING_TX_HARDWARE |
 +                      SOF_TIMESTAMPING_RX_HARDWARE |
 +                      SOF_TIMESTAMPING_RAW_HARDWARE;
 +
 +              info->tx_types =
 +                      (1 << HWTSTAMP_TX_OFF) |
 +                      (1 << HWTSTAMP_TX_ON);
 +
 +              info->rx_filters =
 +                      (1 << HWTSTAMP_FILTER_NONE) |
 +                      (1 << HWTSTAMP_FILTER_ALL);
 +      }
 +
 +      return ret;
 +}
 +
  const struct ethtool_ops mlx4_en_ethtool_ops = {
        .get_drvinfo = mlx4_en_get_drvinfo,
        .get_settings = mlx4_en_get_settings,
        .set_rxfh_indir = mlx4_en_set_rxfh_indir,
        .get_channels = mlx4_en_get_channels,
        .set_channels = mlx4_en_set_channels,
 +      .get_ts_info = mlx4_en_get_ts_info,
  };
  
  
index a69a908614e69b0760630204a2c7ba8518390ade,0860130f2b173a4014a7ec6a24a1183708e61028..b35f9470009363bad560f7142322436baa76d8a3
@@@ -127,7 -127,7 +127,7 @@@ static void mlx4_en_filter_work(struct 
                .queue_mode = MLX4_NET_TRANS_Q_LIFO,
                .exclusive = 1,
                .allow_loopback = 1,
-               .promisc_mode = MLX4_FS_PROMISC_NONE,
+               .promisc_mode = MLX4_FS_REGULAR,
                .port = priv->port,
                .priority = MLX4_DOMAIN_RFS,
        };
@@@ -356,8 -356,7 +356,8 @@@ static void mlx4_en_filter_rfs_expire(s
  }
  #endif
  
 -static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
 +static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
 +                                 __be16 proto, u16 vid)
  {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
        return 0;
  }
  
 -static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
 +static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
 +                                  __be16 proto, u16 vid)
  {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
@@@ -448,7 -446,7 +448,7 @@@ static int mlx4_en_uc_steer_add(struct 
                        .queue_mode = MLX4_NET_TRANS_Q_FIFO,
                        .exclusive = 0,
                        .allow_loopback = 1,
-                       .promisc_mode = MLX4_FS_PROMISC_NONE,
+                       .promisc_mode = MLX4_FS_REGULAR,
                        .priority = MLX4_DOMAIN_NIC,
                };
  
@@@ -795,7 -793,7 +795,7 @@@ static void mlx4_en_set_promisc_mode(st
                        err = mlx4_flow_steer_promisc_add(mdev->dev,
                                                          priv->port,
                                                          priv->base_qpn,
-                                                         MLX4_FS_PROMISC_UPLINK);
+                                                         MLX4_FS_ALL_DEFAULT);
                        if (err)
                                en_err(priv, "Failed enabling promiscuous mode\n");
                        priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
@@@ -858,7 -856,7 +858,7 @@@ static void mlx4_en_clear_promisc_mode(
        case MLX4_STEERING_MODE_DEVICE_MANAGED:
                err = mlx4_flow_steer_promisc_remove(mdev->dev,
                                                     priv->port,
-                                                    MLX4_FS_PROMISC_UPLINK);
+                                                    MLX4_FS_ALL_DEFAULT);
                if (err)
                        en_err(priv, "Failed disabling promiscuous mode\n");
                priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
@@@ -919,7 -917,7 +919,7 @@@ static void mlx4_en_do_multicast(struc
                                err = mlx4_flow_steer_promisc_add(mdev->dev,
                                                                  priv->port,
                                                                  priv->base_qpn,
-                                                                 MLX4_FS_PROMISC_ALL_MULTI);
+                                                                 MLX4_FS_MC_DEFAULT);
                                break;
  
                        case MLX4_STEERING_MODE_B0:
                        case MLX4_STEERING_MODE_DEVICE_MANAGED:
                                err = mlx4_flow_steer_promisc_remove(mdev->dev,
                                                                     priv->port,
-                                                                    MLX4_FS_PROMISC_ALL_MULTI);
+                                                                    MLX4_FS_MC_DEFAULT);
                                break;
  
                        case MLX4_STEERING_MODE_B0:
@@@ -1361,27 -1359,6 +1361,27 @@@ static void mlx4_en_do_get_stats(struc
        mutex_unlock(&mdev->state_lock);
  }
  
 +/* mlx4_en_service_task - Run service task for tasks that needed to be done
 + * periodically
 + */
 +static void mlx4_en_service_task(struct work_struct *work)
 +{
 +      struct delayed_work *delay = to_delayed_work(work);
 +      struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
 +                                               service_task);
 +      struct mlx4_en_dev *mdev = priv->mdev;
 +
 +      mutex_lock(&mdev->state_lock);
 +      if (mdev->device_up) {
 +              if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
 +                      mlx4_en_ptp_overflow_check(mdev);
 +
 +              queue_delayed_work(mdev->workqueue, &priv->service_task,
 +                                 SERVICE_TASK_DELAY);
 +      }
 +      mutex_unlock(&mdev->state_lock);
 +}
 +
  static void mlx4_en_linkstate(struct work_struct *work)
  {
        struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
@@@ -1621,10 -1598,10 +1621,10 @@@ void mlx4_en_stop_port(struct net_devic
                                 MLX4_EN_FLAG_MC_PROMISC);
                mlx4_flow_steer_promisc_remove(mdev->dev,
                                               priv->port,
-                                              MLX4_FS_PROMISC_UPLINK);
+                                              MLX4_FS_ALL_DEFAULT);
                mlx4_flow_steer_promisc_remove(mdev->dev,
                                               priv->port,
-                                              MLX4_FS_PROMISC_ALL_MULTI);
+                                              MLX4_FS_MC_DEFAULT);
        } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
                priv->flags &= ~MLX4_EN_FLAG_PROMISC;
  
@@@ -1886,7 -1863,6 +1886,7 @@@ void mlx4_en_destroy_netdev(struct net_
                mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
  
        cancel_delayed_work(&priv->stats_task);
 +      cancel_delayed_work(&priv->service_task);
        /* flush any pending task for this netdev */
        flush_workqueue(mdev->workqueue);
  
@@@ -1938,75 -1914,6 +1938,75 @@@ static int mlx4_en_change_mtu(struct ne
        return 0;
  }
  
 +static int mlx4_en_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
 +{
 +      struct mlx4_en_priv *priv = netdev_priv(dev);
 +      struct mlx4_en_dev *mdev = priv->mdev;
 +      struct hwtstamp_config config;
 +
 +      if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
 +              return -EFAULT;
 +
 +      /* reserved for future extensions */
 +      if (config.flags)
 +              return -EINVAL;
 +
 +      /* device doesn't support time stamping */
 +      if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS))
 +              return -EINVAL;
 +
 +      /* TX HW timestamp */
 +      switch (config.tx_type) {
 +      case HWTSTAMP_TX_OFF:
 +      case HWTSTAMP_TX_ON:
 +              break;
 +      default:
 +              return -ERANGE;
 +      }
 +
 +      /* RX HW timestamp */
 +      switch (config.rx_filter) {
 +      case HWTSTAMP_FILTER_NONE:
 +              break;
 +      case HWTSTAMP_FILTER_ALL:
 +      case HWTSTAMP_FILTER_SOME:
 +      case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
 +      case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
 +      case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
 +      case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
 +      case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
 +      case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
 +      case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
 +      case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
 +      case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
 +      case HWTSTAMP_FILTER_PTP_V2_EVENT:
 +      case HWTSTAMP_FILTER_PTP_V2_SYNC:
 +      case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
 +              config.rx_filter = HWTSTAMP_FILTER_ALL;
 +              break;
 +      default:
 +              return -ERANGE;
 +      }
 +
 +      if (mlx4_en_timestamp_config(dev, config.tx_type, config.rx_filter)) {
 +              config.tx_type = HWTSTAMP_TX_OFF;
 +              config.rx_filter = HWTSTAMP_FILTER_NONE;
 +      }
 +
 +      return copy_to_user(ifr->ifr_data, &config,
 +                          sizeof(config)) ? -EFAULT : 0;
 +}
 +
 +static int mlx4_en_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 +{
 +      switch (cmd) {
 +      case SIOCSHWTSTAMP:
 +              return mlx4_en_hwtstamp_ioctl(dev, ifr);
 +      default:
 +              return -EOPNOTSUPP;
 +      }
 +}
 +
  static int mlx4_en_set_features(struct net_device *netdev,
                netdev_features_t features)
  {
  
  }
  
 -static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 -                         struct net_device *dev,
 -                         const unsigned char *addr, u16 flags)
 +static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
  {
 -      struct mlx4_en_priv *priv = netdev_priv(dev);
 -      struct mlx4_dev *mdev = priv->mdev->dev;
 -      int err;
 -
 -      if (!mlx4_is_mfunc(mdev))
 -              return -EOPNOTSUPP;
 +      struct mlx4_en_priv *en_priv = netdev_priv(dev);
 +      struct mlx4_en_dev *mdev = en_priv->mdev;
 +      u64 mac_u64 = mlx4_en_mac_to_u64(mac);
  
 -      /* Hardware does not support aging addresses, allow only
 -       * permanent addresses if ndm_state is given
 -       */
 -      if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
 -              en_info(priv, "Add FDB only supports static addresses\n");
 +      if (!is_valid_ether_addr(mac))
                return -EINVAL;
 -      }
  
 -      if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
 -              err = dev_uc_add_excl(dev, addr);
 -      else if (is_multicast_ether_addr(addr))
 -              err = dev_mc_add_excl(dev, addr);
 -      else
 -              err = -EINVAL;
 -
 -      /* Only return duplicate errors if NLM_F_EXCL is set */
 -      if (err == -EEXIST && !(flags & NLM_F_EXCL))
 -              err = 0;
 -
 -      return err;
 +      return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
  }
  
 -static int mlx4_en_fdb_del(struct ndmsg *ndm,
 -                         struct nlattr *tb[],
 -                         struct net_device *dev,
 -                         const unsigned char *addr)
 +static int mlx4_en_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
  {
 -      struct mlx4_en_priv *priv = netdev_priv(dev);
 -      struct mlx4_dev *mdev = priv->mdev->dev;
 -      int err;
 -
 -      if (!mlx4_is_mfunc(mdev))
 -              return -EOPNOTSUPP;
 +      struct mlx4_en_priv *en_priv = netdev_priv(dev);
 +      struct mlx4_en_dev *mdev = en_priv->mdev;
  
 -      if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
 -              en_info(priv, "Del FDB only supports static addresses\n");
 -              return -EINVAL;
 -      }
 +      return mlx4_set_vf_vlan(mdev->dev, en_priv->port, vf, vlan, qos);
 +}
  
 -      if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
 -              err = dev_uc_del(dev, addr);
 -      else if (is_multicast_ether_addr(addr))
 -              err = dev_mc_del(dev, addr);
 -      else
 -              err = -EINVAL;
 +static int mlx4_en_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
 +{
 +      struct mlx4_en_priv *en_priv = netdev_priv(dev);
 +      struct mlx4_en_dev *mdev = en_priv->mdev;
  
 -      return err;
 +      return mlx4_set_vf_spoofchk(mdev->dev, en_priv->port, vf, setting);
  }
  
 -static int mlx4_en_fdb_dump(struct sk_buff *skb,
 -                          struct netlink_callback *cb,
 -                          struct net_device *dev, int idx)
 +static int mlx4_en_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivf)
  {
 -      struct mlx4_en_priv *priv = netdev_priv(dev);
 -      struct mlx4_dev *mdev = priv->mdev->dev;
 -
 -      if (mlx4_is_mfunc(mdev))
 -              idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
 +      struct mlx4_en_priv *en_priv = netdev_priv(dev);
 +      struct mlx4_en_dev *mdev = en_priv->mdev;
  
 -      return idx;
 +      return mlx4_get_vf_config(mdev->dev, en_priv->port, vf, ivf);
  }
  
  static const struct net_device_ops mlx4_netdev_ops = {
        .ndo_set_mac_address    = mlx4_en_set_mac,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_change_mtu         = mlx4_en_change_mtu,
 +      .ndo_do_ioctl           = mlx4_en_ioctl,
        .ndo_tx_timeout         = mlx4_en_tx_timeout,
        .ndo_vlan_rx_add_vid    = mlx4_en_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = mlx4_en_vlan_rx_kill_vid,
  #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
  #endif
 -      .ndo_fdb_add            = mlx4_en_fdb_add,
 -      .ndo_fdb_del            = mlx4_en_fdb_del,
 -      .ndo_fdb_dump           = mlx4_en_fdb_dump,
 +};
 +
 +static const struct net_device_ops mlx4_netdev_ops_master = {
 +      .ndo_open               = mlx4_en_open,
 +      .ndo_stop               = mlx4_en_close,
 +      .ndo_start_xmit         = mlx4_en_xmit,
 +      .ndo_select_queue       = mlx4_en_select_queue,
 +      .ndo_get_stats          = mlx4_en_get_stats,
 +      .ndo_set_rx_mode        = mlx4_en_set_rx_mode,
 +      .ndo_set_mac_address    = mlx4_en_set_mac,
 +      .ndo_validate_addr      = eth_validate_addr,
 +      .ndo_change_mtu         = mlx4_en_change_mtu,
 +      .ndo_tx_timeout         = mlx4_en_tx_timeout,
 +      .ndo_vlan_rx_add_vid    = mlx4_en_vlan_rx_add_vid,
 +      .ndo_vlan_rx_kill_vid   = mlx4_en_vlan_rx_kill_vid,
 +      .ndo_set_vf_mac         = mlx4_en_set_vf_mac,
 +      .ndo_set_vf_vlan        = mlx4_en_set_vf_vlan,
 +      .ndo_set_vf_spoofchk    = mlx4_en_set_vf_spoofchk,
 +      .ndo_get_vf_config      = mlx4_en_get_vf_config,
 +#ifdef CONFIG_NET_POLL_CONTROLLER
 +      .ndo_poll_controller    = mlx4_en_netpoll,
 +#endif
 +      .ndo_set_features       = mlx4_en_set_features,
 +      .ndo_setup_tc           = mlx4_en_setup_tc,
 +#ifdef CONFIG_RFS_ACCEL
 +      .ndo_rx_flow_steer      = mlx4_en_filter_rfs,
 +#endif
  };
  
  int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
        INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
        INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
 +      INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
  #ifdef CONFIG_MLX4_EN_DCB
 -      if (!mlx4_is_slave(priv->mdev->dev))
 -              dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
 +      if (!mlx4_is_slave(priv->mdev->dev)) {
 +              if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
 +                      dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
 +              } else {
 +                      en_info(priv, "enabling only PFC DCB ops\n");
 +                      dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
 +              }
 +      }
  #endif
  
        for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
        spin_lock_init(&priv->filters_lock);
  #endif
  
 +      /* Initialize time stamping config */
 +      priv->hwtstamp_config.flags = 0;
 +      priv->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
 +      priv->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
 +
        /* Allocate page for receive rings */
        err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
                                MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
        /*
         * Initialize netdev entry points
         */
 -      dev->netdev_ops = &mlx4_netdev_ops;
 +      if (mlx4_is_master(priv->mdev->dev))
 +              dev->netdev_ops = &mlx4_netdev_ops_master;
 +      else
 +              dev->netdev_ops = &mlx4_netdev_ops;
        dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
        netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
        netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
  
        dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
        dev->features = dev->hw_features | NETIF_F_HIGHDMA |
 -                      NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
 -                      NETIF_F_HW_VLAN_FILTER;
 +                      NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
 +                      NETIF_F_HW_VLAN_CTAG_FILTER;
        dev->hw_features |= NETIF_F_LOOPBACK;
  
        if (mdev->dev->caps.steering_mode ==
        }
        mlx4_en_set_default_moderation(priv);
        queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
 +
 +      if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
 +              queue_delayed_work(mdev->workqueue, &priv->service_task,
 +                                 SERVICE_TASK_DELAY);
 +
        return 0;
  
  out:
index ffc78d2cb0cf648341e26733fac5e6508388fd3a,00b4e7be7c7eee52c56740543bd88a4aa606b6a5..f3e804f2a35f0bd2a9be0e32b7b577bed5518cfa
@@@ -645,25 -645,37 +645,37 @@@ static int find_entry(struct mlx4_dev *
        return err;
  }
  
+ static const u8 __promisc_mode[] = {
+       [MLX4_FS_REGULAR]   = 0x0,
+       [MLX4_FS_ALL_DEFAULT] = 0x1,
+       [MLX4_FS_MC_DEFAULT] = 0x3,
+       [MLX4_FS_UC_SNIFFER] = 0x4,
+       [MLX4_FS_MC_SNIFFER] = 0x5,
+ };
+ int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
+                                   enum mlx4_net_trans_promisc_mode flow_type)
+ {
+       if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) {
+               mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
+               return -EINVAL;
+       }
+       return __promisc_mode[flow_type];
+ }
+ EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode);
  static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
                                  struct mlx4_net_trans_rule_hw_ctrl *hw)
  {
-       static const u8 __promisc_mode[] = {
-               [MLX4_FS_PROMISC_NONE]   = 0x0,
-               [MLX4_FS_PROMISC_UPLINK] = 0x1,
-               [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2,
-               [MLX4_FS_PROMISC_ALL_MULTI] = 0x3,
-       };
-       u32 dw = 0;
-       dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
-       dw |= ctrl->exclusive ? (1 << 2) : 0;
-       dw |= ctrl->allow_loopback ? (1 << 3) : 0;
-       dw |= __promisc_mode[ctrl->promisc_mode] << 8;
-       dw |= ctrl->priority << 16;
-       hw->ctrl = cpu_to_be32(dw);
+       u8 flags = 0;
+       flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
+       flags |= ctrl->exclusive ? (1 << 2) : 0;
+       flags |= ctrl->allow_loopback ? (1 << 3) : 0;
+       hw->flags = flags;
+       hw->type = __promisc_mode[ctrl->promisc_mode];
+       hw->prio = cpu_to_be16(ctrl->priority);
        hw->port = ctrl->port;
        hw->qpn = cpu_to_be32(ctrl->qpn);
  }
@@@ -677,29 -689,51 +689,51 @@@ const u16 __sw_id_hw[] = 
        [MLX4_NET_TRANS_RULE_ID_UDP]     = 0xE006
  };
  
+ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
+                                 enum mlx4_net_trans_rule_id id)
+ {
+       if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+               mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
+               return -EINVAL;
+       }
+       return __sw_id_hw[id];
+ }
+ EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id);
+ static const int __rule_hw_sz[] = {
+       [MLX4_NET_TRANS_RULE_ID_ETH] =
+               sizeof(struct mlx4_net_trans_rule_hw_eth),
+       [MLX4_NET_TRANS_RULE_ID_IB] =
+               sizeof(struct mlx4_net_trans_rule_hw_ib),
+       [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
+       [MLX4_NET_TRANS_RULE_ID_IPV4] =
+               sizeof(struct mlx4_net_trans_rule_hw_ipv4),
+       [MLX4_NET_TRANS_RULE_ID_TCP] =
+               sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
+       [MLX4_NET_TRANS_RULE_ID_UDP] =
+               sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
+ };
+ int mlx4_hw_rule_sz(struct mlx4_dev *dev,
+              enum mlx4_net_trans_rule_id id)
+ {
+       if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) {
+               mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
+               return -EINVAL;
+       }
+       return __rule_hw_sz[id];
+ }
+ EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz);
  static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
                            struct _rule_hw *rule_hw)
  {
-       static const size_t __rule_hw_sz[] = {
-               [MLX4_NET_TRANS_RULE_ID_ETH] =
-                       sizeof(struct mlx4_net_trans_rule_hw_eth),
-               [MLX4_NET_TRANS_RULE_ID_IB] =
-                       sizeof(struct mlx4_net_trans_rule_hw_ib),
-               [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
-               [MLX4_NET_TRANS_RULE_ID_IPV4] =
-                       sizeof(struct mlx4_net_trans_rule_hw_ipv4),
-               [MLX4_NET_TRANS_RULE_ID_TCP] =
-                       sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
-               [MLX4_NET_TRANS_RULE_ID_UDP] =
-                       sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
-       };
-       if (spec->id >= MLX4_NET_TRANS_RULE_NUM) {
-               mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
+       if (mlx4_hw_rule_sz(dev, spec->id) < 0)
                return -EINVAL;
-       }
-       memset(rule_hw, 0, __rule_hw_sz[spec->id]);
+       memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id));
        rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
-       rule_hw->size = __rule_hw_sz[spec->id] >> 2;
+       rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2;
  
        switch (spec->id) {
        case MLX4_NET_TRANS_RULE_ID_ETH:
                        rule_hw->eth.ether_type_enable = 1;
                        rule_hw->eth.ether_type = spec->eth.ether_type;
                }
-               rule_hw->eth.vlan_id = spec->eth.vlan_id;
-               rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk;
+               rule_hw->eth.vlan_tag = spec->eth.vlan_id;
+               rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk;
                break;
  
        case MLX4_NET_TRANS_RULE_ID_IB:
-               rule_hw->ib.qpn = spec->ib.r_qpn;
+               rule_hw->ib.l3_qpn = spec->ib.l3_qpn;
                rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
                memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
                memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
@@@ -1125,18 -1159,35 +1159,18 @@@ static int mlx4_QP_ATTACH(struct mlx4_d
        return err;
  }
  
 -int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 -                        u8 port, int block_mcast_loopback,
 -                        enum mlx4_protocol prot, u64 *reg_id)
 +int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
 +                            u8 gid[16], u8 port,
 +                            int block_mcast_loopback,
 +                            enum mlx4_protocol prot, u64 *reg_id)
  {
 -
 -      switch (dev->caps.steering_mode) {
 -      case MLX4_STEERING_MODE_A0:
 -              if (prot == MLX4_PROT_ETH)
 -                      return 0;
 -
 -      case MLX4_STEERING_MODE_B0:
 -              if (prot == MLX4_PROT_ETH)
 -                      gid[7] |= (MLX4_MC_STEER << 1);
 -
 -              if (mlx4_is_mfunc(dev))
 -                      return mlx4_QP_ATTACH(dev, qp, gid, 1,
 -                                            block_mcast_loopback, prot);
 -              return mlx4_qp_attach_common(dev, qp, gid,
 -                                           block_mcast_loopback, prot,
 -                                           MLX4_MC_STEER);
 -
 -      case MLX4_STEERING_MODE_DEVICE_MANAGED: {
                struct mlx4_spec_list spec = { {NULL} };
                __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
  
                struct mlx4_net_trans_rule rule = {
                        .queue_mode = MLX4_NET_TRANS_Q_FIFO,
                        .exclusive = 0,
-                       .promisc_mode = MLX4_FS_PROMISC_NONE,
+                       .promisc_mode = MLX4_FS_REGULAR,
                        .priority = MLX4_DOMAIN_NIC,
                };
  
                list_add_tail(&spec.list, &rule.list);
  
                return mlx4_flow_attach(dev, &rule, reg_id);
 -      }
 +}
 +
 +int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
 +                        u8 port, int block_mcast_loopback,
 +                        enum mlx4_protocol prot, u64 *reg_id)
 +{
 +      switch (dev->caps.steering_mode) {
 +      case MLX4_STEERING_MODE_A0:
 +              if (prot == MLX4_PROT_ETH)
 +                      return 0;
 +
 +      case MLX4_STEERING_MODE_B0:
 +              if (prot == MLX4_PROT_ETH)
 +                      gid[7] |= (MLX4_MC_STEER << 1);
 +
 +              if (mlx4_is_mfunc(dev))
 +                      return mlx4_QP_ATTACH(dev, qp, gid, 1,
 +                                            block_mcast_loopback, prot);
 +              return mlx4_qp_attach_common(dev, qp, gid,
 +                                           block_mcast_loopback, prot,
 +                                           MLX4_MC_STEER);
  
 +      case MLX4_STEERING_MODE_DEVICE_MANAGED:
 +              return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
 +                                               block_mcast_loopback,
 +                                               prot, reg_id);
        default:
                return -EINVAL;
        }
@@@ -1229,11 -1256,10 +1263,10 @@@ int mlx4_flow_steer_promisc_add(struct 
        u64 *regid_p;
  
        switch (mode) {
-       case MLX4_FS_PROMISC_UPLINK:
-       case MLX4_FS_PROMISC_FUNCTION_PORT:
+       case MLX4_FS_ALL_DEFAULT:
                regid_p = &dev->regid_promisc_array[port];
                break;
-       case MLX4_FS_PROMISC_ALL_MULTI:
+       case MLX4_FS_MC_DEFAULT:
                regid_p = &dev->regid_allmulti_array[port];
                break;
        default:
@@@ -1260,11 -1286,10 +1293,10 @@@ int mlx4_flow_steer_promisc_remove(stru
        u64 *regid_p;
  
        switch (mode) {
-       case MLX4_FS_PROMISC_UPLINK:
-       case MLX4_FS_PROMISC_FUNCTION_PORT:
+       case MLX4_FS_ALL_DEFAULT:
                regid_p = &dev->regid_promisc_array[port];
                break;
-       case MLX4_FS_PROMISC_ALL_MULTI:
+       case MLX4_FS_MC_DEFAULT:
                regid_p = &dev->regid_allmulti_array[port];
                break;
        default:
index eac3dae10efe4170d7ed7c276eaccca1ff2434fa,d5fdb19771e2ce995e12451f466884e4195e50a8..df15bb6631cc7d6f68b70191e891321f505fe00b
@@@ -87,8 -87,7 +87,8 @@@ enum 
        MLX4_HCR_SIZE           = 0x0001c,
        MLX4_CLR_INT_SIZE       = 0x00008,
        MLX4_SLAVE_COMM_BASE    = 0x0,
 -      MLX4_COMM_PAGESIZE      = 0x1000
 +      MLX4_COMM_PAGESIZE      = 0x1000,
 +      MLX4_CLOCK_SIZE         = 0x00008
  };
  
  enum {
@@@ -404,7 -403,6 +404,7 @@@ struct mlx4_fw 
        u64                     clr_int_base;
        u64                     catas_offset;
        u64                     comm_base;
 +      u64                     clock_offset;
        struct mlx4_icm        *fw_icm;
        struct mlx4_icm        *aux_icm;
        u32                     catas_size;
        u8                      clr_int_bar;
        u8                      catas_bar;
        u8                      comm_bar;
 +      u8                      clock_bar;
  };
  
  struct mlx4_comm {
@@@ -473,30 -470,6 +473,30 @@@ struct mlx4_slave_state 
        enum slave_port_state port_state[MLX4_MAX_PORTS + 1];
  };
  
 +#define MLX4_VGT 4095
 +#define NO_INDX  (-1)
 +
 +struct mlx4_vport_state {
 +      u64 mac;
 +      u16 default_vlan;
 +      u8  default_qos;
 +      u32 tx_rate;
 +      bool spoofchk;
 +};
 +
 +struct mlx4_vf_admin_state {
 +      struct mlx4_vport_state vport[MLX4_MAX_PORTS + 1];
 +};
 +
 +struct mlx4_vport_oper_state {
 +      struct mlx4_vport_state state;
 +      int mac_idx;
 +      int vlan_idx;
 +};
 +struct mlx4_vf_oper_state {
 +      struct mlx4_vport_oper_state vport[MLX4_MAX_PORTS + 1];
 +};
 +
  struct slave_list {
        struct mutex mutex;
        struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
@@@ -527,8 -500,6 +527,8 @@@ struct mlx4_master_qp0_state 
  
  struct mlx4_mfunc_master_ctx {
        struct mlx4_slave_state *slave_state;
 +      struct mlx4_vf_admin_state *vf_admin;
 +      struct mlx4_vf_oper_state *vf_oper;
        struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
        int                     init_port_ref[MLX4_MAX_PORTS + 1];
        u16                     max_mtu[MLX4_MAX_PORTS + 1];
@@@ -730,85 -701,6 +730,6 @@@ struct mlx4_steer 
        struct list_head steer_entries[MLX4_NUM_STEERS];
  };
  
- struct mlx4_net_trans_rule_hw_ctrl {
-       __be32 ctrl;
-       u8 rsvd1;
-       u8 funcid;
-       u8 vep;
-       u8 port;
-       __be32 qpn;
-       __be32 rsvd2;
- };
- struct mlx4_net_trans_rule_hw_ib {
-       u8 size;
-       u8 rsvd1;
-       __be16 id;
-       u32 rsvd2;
-       __be32 qpn;
-       __be32 qpn_mask;
-       u8 dst_gid[16];
-       u8 dst_gid_msk[16];
- } __packed;
- struct mlx4_net_trans_rule_hw_eth {
-       u8      size;
-       u8      rsvd;
-       __be16  id;
-       u8      rsvd1[6];
-       u8      dst_mac[6];
-       u16     rsvd2;
-       u8      dst_mac_msk[6];
-       u16     rsvd3;
-       u8      src_mac[6];
-       u16     rsvd4;
-       u8      src_mac_msk[6];
-       u8      rsvd5;
-       u8      ether_type_enable;
-       __be16  ether_type;
-       __be16  vlan_id_msk;
-       __be16  vlan_id;
- } __packed;
- struct mlx4_net_trans_rule_hw_tcp_udp {
-       u8      size;
-       u8      rsvd;
-       __be16  id;
-       __be16  rsvd1[3];
-       __be16  dst_port;
-       __be16  rsvd2;
-       __be16  dst_port_msk;
-       __be16  rsvd3;
-       __be16  src_port;
-       __be16  rsvd4;
-       __be16  src_port_msk;
- } __packed;
- struct mlx4_net_trans_rule_hw_ipv4 {
-       u8      size;
-       u8      rsvd;
-       __be16  id;
-       __be32  rsvd1;
-       __be32  dst_ip;
-       __be32  dst_ip_msk;
-       __be32  src_ip;
-       __be32  src_ip_msk;
- } __packed;
- struct _rule_hw {
-       union {
-               struct {
-                       u8 size;
-                       u8 rsvd;
-                       __be16 id;
-               };
-               struct mlx4_net_trans_rule_hw_eth eth;
-               struct mlx4_net_trans_rule_hw_ib ib;
-               struct mlx4_net_trans_rule_hw_ipv4 ipv4;
-               struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
-       };
- };
  enum {
        MLX4_PCI_DEV_IS_VF              = 1 << 0,
        MLX4_PCI_DEV_FORCE_SENSE_PORT   = 1 << 1,
@@@ -855,7 -747,6 +776,7 @@@ struct mlx4_priv 
        struct list_head        bf_list;
        struct mutex            bf_mutex;
        struct io_mapping       *bf_mapping;
 +      void __iomem            *clock_mapping;
        int                     reserved_mtts;
        int                     fs_hash_mode;
        u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
@@@ -1157,8 -1048,6 +1078,8 @@@ int mlx4_change_port_types(struct mlx4_
  
  void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
  void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
 +void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
 +int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
  
  int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz);
  /* resource tracker functions*/
@@@ -1222,10 -1111,6 +1143,10 @@@ int mlx4_qp_detach_common(struct mlx4_d
  int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                          int block_mcast_loopback, enum mlx4_protocol prot,
                          enum mlx4_steer_type steer);
 +int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
 +                            u8 gid[16], u8 port,
 +                            int block_mcast_loopback,
 +                            enum mlx4_protocol prot, u64 *reg_id);
  int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
                                struct mlx4_vhcr *vhcr,
                                struct mlx4_cmd_mailbox *inbox,
index 53acaf64189f7bce45b238d51458ddad9a03db0b,ad4a53fbdddfd24752d2c845f50e296cadfd13b8..a51b0134ce18c6bc4c15e3ac0fb898243d51bb86
@@@ -40,8 -40,6 +40,8 @@@
  
  #include <linux/atomic.h>
  
 +#include <linux/clocksource.h>
 +
  #define MAX_MSIX_P_PORT               17
  #define MAX_MSIX              64
  #define MSIX_LEGACY_SZ                4
@@@ -142,7 -140,6 +142,7 @@@ enum 
        MLX4_DEV_CAP_FLAG_VEP_UC_STEER  = 1LL << 41,
        MLX4_DEV_CAP_FLAG_VEP_MC_STEER  = 1LL << 42,
        MLX4_DEV_CAP_FLAG_COUNTERS      = 1LL << 48,
 +      MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53,
        MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
        MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
        MLX4_DEV_CAP_FLAG_64B_EQE       = 1LL << 61,
@@@ -154,10 -151,7 +154,10 @@@ enum 
        MLX4_DEV_CAP_FLAG2_RSS_TOP              = 1LL <<  1,
        MLX4_DEV_CAP_FLAG2_RSS_XOR              = 1LL <<  2,
        MLX4_DEV_CAP_FLAG2_FS_EN                = 1LL <<  3,
 -      MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN     = 1LL <<  4
 +      MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN     = 1LL <<  4,
 +      MLX4_DEV_CAP_FLAG2_TS                   = 1LL <<  5,
 +      MLX4_DEV_CAP_FLAG2_VLAN_CONTROL         = 1LL <<  6,
 +      MLX4_DEV_CAP_FLAG2_FSM                  = 1LL <<  7
  };
  
  enum {
@@@ -449,7 -443,6 +449,7 @@@ struct mlx4_caps 
        u8                      eqe_factor;
        u32                     userspace_caps; /* userspace must be aware of these */
        u32                     function_caps;  /* VFs must be aware of these */
 +      u16                     hca_core_clock;
  };
  
  struct mlx4_buf_list {
@@@ -844,7 -837,7 +844,7 @@@ void mlx4_free_hwq_res(struct mlx4_dev 
  
  int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
                  struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
 -                unsigned vector, int collapsed);
 +                unsigned vector, int collapsed, int timestamp_en);
  void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
  
  int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
@@@ -903,11 -896,12 +903,12 @@@ static inline int map_hw_to_sw_id(u16 h
  }
  
  enum mlx4_net_trans_promisc_mode {
-       MLX4_FS_PROMISC_NONE = 0,
-       MLX4_FS_PROMISC_UPLINK,
-       /* For future use. Not implemented yet */
-       MLX4_FS_PROMISC_FUNCTION_PORT,
-       MLX4_FS_PROMISC_ALL_MULTI,
+       MLX4_FS_REGULAR = 1,
+       MLX4_FS_ALL_DEFAULT,
+       MLX4_FS_MC_DEFAULT,
+       MLX4_FS_UC_SNIFFER,
+       MLX4_FS_MC_SNIFFER,
+       MLX4_FS_MODE_NUM, /* should be last */
  };
  
  struct mlx4_spec_eth {
@@@ -936,7 -930,7 +937,7 @@@ struct mlx4_spec_ipv4 
  };
  
  struct mlx4_spec_ib {
-       __be32  r_qpn;
+       __be32  l3_qpn;
        __be32  qpn_msk;
        u8      dst_gid[16];
        u8      dst_gid_msk[16];
@@@ -969,6 -963,92 +970,92 @@@ struct mlx4_net_trans_rule 
        u32     qpn;
  };
  
+ struct mlx4_net_trans_rule_hw_ctrl {
+       __be16 prio;
+       u8 type;
+       u8 flags;
+       u8 rsvd1;
+       u8 funcid;
+       u8 vep;
+       u8 port;
+       __be32 qpn;
+       __be32 rsvd2;
+ };
+ struct mlx4_net_trans_rule_hw_ib {
+       u8 size;
+       u8 rsvd1;
+       __be16 id;
+       u32 rsvd2;
+       __be32 l3_qpn;
+       __be32 qpn_mask;
+       u8 dst_gid[16];
+       u8 dst_gid_msk[16];
+ } __packed;
+ struct mlx4_net_trans_rule_hw_eth {
+       u8      size;
+       u8      rsvd;
+       __be16  id;
+       u8      rsvd1[6];
+       u8      dst_mac[6];
+       u16     rsvd2;
+       u8      dst_mac_msk[6];
+       u16     rsvd3;
+       u8      src_mac[6];
+       u16     rsvd4;
+       u8      src_mac_msk[6];
+       u8      rsvd5;
+       u8      ether_type_enable;
+       __be16  ether_type;
+       __be16  vlan_tag_msk;
+       __be16  vlan_tag;
+ } __packed;
+ struct mlx4_net_trans_rule_hw_tcp_udp {
+       u8      size;
+       u8      rsvd;
+       __be16  id;
+       __be16  rsvd1[3];
+       __be16  dst_port;
+       __be16  rsvd2;
+       __be16  dst_port_msk;
+       __be16  rsvd3;
+       __be16  src_port;
+       __be16  rsvd4;
+       __be16  src_port_msk;
+ } __packed;
+ struct mlx4_net_trans_rule_hw_ipv4 {
+       u8      size;
+       u8      rsvd;
+       __be16  id;
+       __be32  rsvd1;
+       __be32  dst_ip;
+       __be32  dst_ip_msk;
+       __be32  src_ip;
+       __be32  src_ip_msk;
+ } __packed;
+ struct _rule_hw {
+       union {
+               struct {
+                       u8 size;
+                       u8 rsvd;
+                       __be16 id;
+               };
+               struct mlx4_net_trans_rule_hw_eth eth;
+               struct mlx4_net_trans_rule_hw_ib ib;
+               struct mlx4_net_trans_rule_hw_ipv4 ipv4;
+               struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
+       };
+ };
+ /* translating DMFS verbs sniffer rule to the FW API would need two reg IDs */
+ struct mlx4_flow_handle {
+       u64 reg_id[2];
+ };
  int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
                                enum mlx4_net_trans_promisc_mode mode);
  int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
@@@ -1018,6 -1098,11 +1105,11 @@@ void mlx4_counter_free(struct mlx4_dev 
  int mlx4_flow_attach(struct mlx4_dev *dev,
                     struct mlx4_net_trans_rule *rule, u64 *reg_id);
  int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id);
+ int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
+                                   enum mlx4_net_trans_promisc_mode flow_type);
+ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
+                                 enum mlx4_net_trans_rule_id id);
+ int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id);
  
  void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port,
                          int i, int val);
@@@ -1035,6 -1120,4 +1127,6 @@@ int set_and_calc_slave_port_state(struc
  void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid);
  __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave);
  
 +cycle_t mlx4_read_clock(struct mlx4_dev *dev);
 +
  #endif /* MLX4_DEVICE_H */