Merge branch 'mlx5-next'
authorDavid S. Miller <davem@davemloft.net>
Thu, 11 Jun 2015 22:55:26 +0000 (15:55 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 11 Jun 2015 22:55:26 +0000 (15:55 -0700)
Or Gerlitz says:

====================
mlx5 Ethernet driver update - Jun 11 2015

This series from Saeed, Achiad and Gal contains few fixes
to the recently introduced mlx5 Ethernet functionality.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/infiniband/hw/mlx5/main.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/transobj.c
drivers/net/ethernet/mellanox/mlx5/core/transobj.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h

index d4dea86052d6b0cbb4dffc89d58053df256a5798..79dadd627e9ce152a6febdad9e7becef8f059ade 100644 (file)
@@ -446,15 +446,11 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
        if (err)
                goto out;
 
-       err = mlx5_query_port_max_mtu(mdev, &max_mtu, port);
-       if (err)
-               goto out;
+       mlx5_query_port_max_mtu(mdev, &max_mtu, port);
 
        props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
 
-       err = mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
-       if (err)
-               goto out;
+       mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
 
        props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
 
index e9edb7210de1c8fb09407a5fa367d093070ca117..e14120eccf04ced0d5d641b4acb62a05668b5a4a 100644 (file)
@@ -57,7 +57,6 @@
 #define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
 #define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ         0x7
-#define MLX5E_PARAMS_MIN_MTU                            46
 
 #define MLX5E_TX_CQ_POLL_BUDGET        128
 #define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
@@ -284,6 +283,8 @@ struct mlx5e_sq {
        struct netdev_queue       *txq;
        u32                        sqn;
        u32                        bf_buf_size;
+       u16                        max_inline;
+       u16                        edge;
        struct device             *pdev;
        __be32                     mkey_be;
        unsigned long              state;
@@ -388,6 +389,7 @@ struct mlx5e_priv {
        struct mutex               state_lock; /* Protects Interface state */
        struct mlx5_uar            cq_uar;
        u32                        pdn;
+       u32                        tdn;
        struct mlx5_core_mr        mr;
 
        struct mlx5e_channel     **channel;
@@ -454,6 +456,7 @@ enum mlx5e_link_mode {
 
 #define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
 
+void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
                       void *accel_priv, select_queue_fallback_t fallback);
 netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
index 075e5175e7795a1244beb76a081b00c0d24c7e8c..9a48d8eac0fcc89227eef9d969ff69396093fe15 100644 (file)
@@ -257,25 +257,8 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
        spin_unlock_irq(&priv->async_events_spinlock);
 }
 
-static void mlx5e_send_nop(struct mlx5e_sq *sq)
-{
-       struct mlx5_wq_cyc                *wq  = &sq->wq;
-
-       u16 pi = sq->pc & wq->sz_m1;
-       struct mlx5e_tx_wqe              *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
-
-       struct mlx5_wqe_ctrl_seg         *cseg = &wqe->ctrl;
-
-       memset(cseg, 0, sizeof(*cseg));
-
-       cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
-       cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | 0x01);
-       cseg->fm_ce_se         = MLX5_WQE_CTRL_CQ_UPDATE;
-
-       sq->skb[pi] = NULL;
-       sq->pc++;
-       mlx5e_tx_notify_hw(sq, wqe);
-}
+#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
 
 static int mlx5e_create_rq(struct mlx5e_channel *c,
                           struct mlx5e_rq_param *param,
@@ -305,13 +288,16 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
        }
 
        rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
-                               priv->netdev->mtu + ETH_HLEN + VLAN_HLEN;
+                                            MLX5E_SW2HW_MTU(priv->netdev->mtu);
+       rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
 
        for (i = 0; i < wq_sz; i++) {
                struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
+               u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
 
                wqe->data.lkey       = c->mkey_be;
-               wqe->data.byte_count = cpu_to_be32(rq->wqe_sz);
+               wqe->data.byte_count =
+                       cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
        }
 
        rq->pdev    = c->pdev;
@@ -447,7 +433,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
                goto err_disable_rq;
 
        set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
-       mlx5e_send_nop(&c->sq[0]); /* trigger mlx5e_post_rx_wqes() */
+       mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
 
        return 0;
 
@@ -536,6 +522,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
        sq->mkey_be = c->mkey_be;
        sq->channel = c;
        sq->tc      = tc;
+       sq->edge    = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
 
        return 0;
 
@@ -689,7 +676,7 @@ static void mlx5e_close_sq(struct mlx5e_sq *sq)
 
        /* ensure hw is notified of all pending wqes */
        if (mlx5e_sq_has_room_for(sq, 1))
-               mlx5e_send_nop(sq);
+               mlx5e_send_nop(sq, true);
 
        mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
        while (sq->cc != sq->pc) /* wait till sq is empty */
@@ -1115,6 +1102,7 @@ static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
        memset(in, 0, sizeof(in));
 
        MLX5_SET(tisc, tisc, prio,  tc);
+       MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
 
        return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
 }
@@ -1213,6 +1201,8 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
 {
        void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
 
+       MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
 #define ROUGH_MAX_L2_L3_HDR_SZ 256
 
 #define MLX5_HASH_IP     (MLX5_HASH_FIELD_SEL_SRC_IP   |\
@@ -1367,11 +1357,30 @@ static void mlx5e_close_tirs(struct mlx5e_priv *priv)
                mlx5e_close_tir(priv, i);
 }
 
-int mlx5e_open_locked(struct net_device *netdev)
+static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
-       int actual_mtu;
+       int hw_mtu;
+       int err;
+
+       err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
+       if (err)
+               return err;
+
+       mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+
+       if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
+               netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
+                           __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
+
+       netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
+       return 0;
+}
+
+int mlx5e_open_locked(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
        int num_txqs;
        int err;
 
@@ -1380,25 +1389,9 @@ int mlx5e_open_locked(struct net_device *netdev)
        netif_set_real_num_tx_queues(netdev, num_txqs);
        netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
 
-       err = mlx5_set_port_mtu(mdev, netdev->mtu);
-       if (err) {
-               netdev_err(netdev, "%s: mlx5_set_port_mtu failed %d\n",
-                          __func__, err);
-               return err;
-       }
-
-       err = mlx5_query_port_oper_mtu(mdev, &actual_mtu, 1);
-       if (err) {
-               netdev_err(netdev, "%s: mlx5_query_port_oper_mtu failed %d\n",
-                          __func__, err);
+       err = mlx5e_set_dev_port_mtu(netdev);
+       if (err)
                return err;
-       }
-
-       if (actual_mtu != netdev->mtu)
-               netdev_warn(netdev, "%s: Failed to set MTU to %d\n",
-                           __func__, netdev->mtu);
-
-       netdev->mtu = actual_mtu;
 
        err = mlx5e_open_tises(priv);
        if (err) {
@@ -1613,15 +1606,14 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
        int max_mtu;
-       int err = 0;
+       int err;
 
-       err = mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
-       if (err)
-               return err;
+       mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 
-       if (new_mtu > max_mtu || new_mtu < MLX5E_PARAMS_MIN_MTU) {
-               netdev_err(netdev, "%s: Bad MTU size, mtu must be [%d-%d]\n",
-                          __func__, MLX5E_PARAMS_MIN_MTU, max_mtu);
+       if (new_mtu > max_mtu) {
+               netdev_err(netdev,
+                          "%s: Bad MTU (%d) > (%d) Max\n",
+                          __func__, new_mtu, max_mtu);
                return -EINVAL;
        }
 
@@ -1655,7 +1647,10 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
            !MLX5_CAP_ETH(mdev, csum_cap) ||
            !MLX5_CAP_ETH(mdev, max_lso_cap) ||
            !MLX5_CAP_ETH(mdev, vlan_cap) ||
-           !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap)) {
+           !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
+           MLX5_CAP_FLOWTABLE(mdev,
+                              flow_table_properties_nic_receive.max_ft_level)
+                              < 3) {
                mlx5_core_warn(mdev,
                               "Not creating net device, some required device capabilities are missing\n");
                return -ENOTSUPP;
@@ -1736,6 +1731,7 @@ static void mlx5e_build_netdev(struct net_device *netdev)
 
        netdev->ethtool_ops       = &mlx5e_ethtool_ops;
 
+       netdev->vlan_features    |= NETIF_F_SG;
        netdev->vlan_features    |= NETIF_F_IP_CSUM;
        netdev->vlan_features    |= NETIF_F_IPV6_CSUM;
        netdev->vlan_features    |= NETIF_F_GRO;
@@ -1748,7 +1744,6 @@ static void mlx5e_build_netdev(struct net_device *netdev)
                netdev->vlan_features    |= NETIF_F_LRO;
 
        netdev->hw_features       = netdev->vlan_features;
-       netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_TX;
        netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_RX;
        netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
 
@@ -1827,11 +1822,18 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
                goto err_unmap_free_uar;
        }
 
+       err = mlx5_alloc_transport_domain(mdev, &priv->tdn);
+       if (err) {
+               netdev_err(netdev, "%s: mlx5_alloc_transport_domain failed, %d\n",
+                          __func__, err);
+               goto err_dealloc_pd;
+       }
+
        err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
        if (err) {
                netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
                           __func__, err);
-               goto err_dealloc_pd;
+               goto err_dealloc_transport_domain;
        }
 
        err = register_netdev(netdev);
@@ -1848,6 +1850,9 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
 err_destroy_mkey:
        mlx5_core_destroy_mkey(mdev, &priv->mr);
 
+err_dealloc_transport_domain:
+       mlx5_dealloc_transport_domain(mdev, priv->tdn);
+
 err_dealloc_pd:
        mlx5_core_dealloc_pd(mdev, priv->pdn);
 
@@ -1867,6 +1872,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
 
        unregister_netdev(netdev);
        mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
+       mlx5_dealloc_transport_domain(priv->mdev, priv->tdn);
        mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
        mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
        mlx5e_disable_async_events(priv);
index ce1317cdabd751c6b5240375ae8d39b51e96c9d0..06e7c744ed4a6071de2643a9c7ae3df0eaa3391b 100644 (file)
@@ -45,18 +45,18 @@ static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
        if (unlikely(!skb))
                return -ENOMEM;
 
-       skb_reserve(skb, MLX5E_NET_IP_ALIGN);
-
        dma_addr = dma_map_single(rq->pdev,
                                  /* hw start padding */
-                                 skb->data - MLX5E_NET_IP_ALIGN,
-                                 /* hw   end padding */
+                                 skb->data,
+                                 /* hw end padding */
                                  rq->wqe_sz,
                                  DMA_FROM_DEVICE);
 
        if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
                goto err_free_skb;
 
+       skb_reserve(skb, MLX5E_NET_IP_ALIGN);
+
        *((dma_addr_t *)skb->cb) = dma_addr;
        wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
 
@@ -217,7 +217,7 @@ bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
 
                dma_unmap_single(rq->pdev,
                                 *((dma_addr_t *)skb->cb),
-                                skb_end_offset(skb),
+                                rq->wqe_sz,
                                 DMA_FROM_DEVICE);
 
                if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
index 8020986cdaf698c12f66d75def76d4d6249ae28c..bac268a670f46b09a8c8f610453611c0ef218977 100644 (file)
 #include <linux/if_vlan.h>
 #include "en.h"
 
+#define MLX5E_SQ_NOPS_ROOM  MLX5_SEND_WQE_MAX_WQEBBS
+#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
+                           MLX5E_SQ_NOPS_ROOM)
+
+void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
+{
+       struct mlx5_wq_cyc                *wq  = &sq->wq;
+
+       u16 pi = sq->pc & wq->sz_m1;
+       struct mlx5e_tx_wqe              *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
+
+       struct mlx5_wqe_ctrl_seg         *cseg = &wqe->ctrl;
+
+       memset(cseg, 0, sizeof(*cseg));
+
+       cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
+       cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | 0x01);
+
+       sq->skb[pi] = NULL;
+       sq->pc++;
+
+       if (notify_hw) {
+               cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+               mlx5e_tx_notify_hw(sq, wqe);
+       }
+}
+
 static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
                                      u32 *size)
 {
@@ -89,21 +116,6 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
        return MLX5E_MIN_INLINE;
 }
 
-static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
-{
-       struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
-       int cpy1_sz = 2 * ETH_ALEN;
-       int cpy2_sz = ihs - cpy1_sz - VLAN_HLEN;
-
-       skb_copy_from_linear_data(skb, vhdr, cpy1_sz);
-       skb_pull_inline(skb, cpy1_sz);
-       vhdr->h_vlan_proto = skb->vlan_proto;
-       vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
-       skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto,
-                                 cpy2_sz);
-       skb_pull_inline(skb, cpy2_sz);
-}
-
 static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
 {
        struct mlx5_wq_cyc       *wq   = &sq->wq;
@@ -149,12 +161,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
                                                        ETH_ZLEN);
        }
 
-       if (skb_vlan_tag_present(skb)) {
-               mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs);
-       } else {
-               skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
-               skb_pull_inline(skb, ihs);
-       }
+       skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
+       skb_pull_inline(skb, ihs);
 
        eseg->inline_hdr_sz     = cpu_to_be16(ihs);
 
@@ -215,7 +223,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
 
        netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);
 
-       if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS))) {
+       if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
                netif_tx_stop_queue(sq->txq);
                sq->stats.stopped++;
        }
@@ -223,6 +231,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
        if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
                mlx5e_tx_notify_hw(sq, wqe);
 
+       /* fill sq edge with nops to avoid wqe wrap around */
+       while ((sq->pc & wq->sz_m1) > sq->edge)
+               mlx5e_send_nop(sq, false);
+
        sq->stats.packets++;
        return NETDEV_TX_OK;
 
@@ -330,7 +342,7 @@ free_skb:
        netdev_tx_completed_queue(sq->txq, npkts, nbytes);
 
        if (netif_tx_queue_stopped(sq->txq) &&
-           mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS) &&
+           mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
            likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
                                netif_tx_wake_queue(sq->txq);
                                sq->stats.wake++;
index 619d3baf19eac890f94bcb0b9f74cef8b9363813..70147999f6574f9fc1e236b6b43ce0610cbd4d3b 100644 (file)
@@ -248,22 +248,18 @@ int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
        return err;
 }
 
-static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
-                              int *admin_mtu, int *max_mtu, int *oper_mtu,
-                              u8 local_port)
+static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
+                               int *max_mtu, int *oper_mtu, u8 port)
 {
        u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
        u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
-       int err;
 
        memset(in, 0, sizeof(in));
 
-       MLX5_SET(pmtu_reg, in, local_port, local_port);
+       MLX5_SET(pmtu_reg, in, local_port, port);
 
-       err = mlx5_core_access_reg(dev, in, sizeof(in), out,
-                                  sizeof(out), MLX5_REG_PMTU, 0, 0);
-       if (err)
-               return err;
+       mlx5_core_access_reg(dev, in, sizeof(in), out,
+                            sizeof(out), MLX5_REG_PMTU, 0, 0);
 
        if (max_mtu)
                *max_mtu  = MLX5_GET(pmtu_reg, out, max_mtu);
@@ -271,11 +267,9 @@ static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
                *oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu);
        if (admin_mtu)
                *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
-
-       return 0;
 }
 
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu)
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
 {
        u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
        u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -283,24 +277,24 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu)
        memset(in, 0, sizeof(in));
 
        MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
-       MLX5_SET(pmtu_reg, in, local_port, 1);
+       MLX5_SET(pmtu_reg, in, local_port, port);
 
-       return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
-                                   MLX5_REG_PMTU, 0, 1);
+       return mlx5_core_access_reg(dev, in, sizeof(in), out,
+                                  sizeof(out), MLX5_REG_PMTU, 0, 1);
 }
 EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
 
-int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
-                           u8 local_port)
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
+                            u8 port)
 {
-       return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, local_port);
+       mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
 
-int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
-                            u8 local_port)
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+                             u8 port)
 {
-       return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, local_port);
+       mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
 }
 EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
 
index 7a120283de2c9d04b2a0c903e374b692fc97d85a..8d98b03026d5db588eee7de23f04611f268b4543 100644 (file)
 #include "mlx5_core.h"
 #include "transobj.h"
 
+int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
+{
+       u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)];
+       u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)];
+       int err;
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+       MLX5_SET(alloc_transport_domain_in, in, opcode,
+                MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+
+       err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+       if (!err)
+               *tdn = MLX5_GET(alloc_transport_domain_out, out,
+                               transport_domain);
+
+       return err;
+}
+
+void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
+{
+       u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)];
+       u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)];
+
+       memset(in, 0, sizeof(in));
+       memset(out, 0, sizeof(out));
+
+       MLX5_SET(dealloc_transport_domain_in, in, opcode,
+                MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+       MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+
+       mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
 int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
 {
        u32 out[MLX5_ST_SZ_DW(create_rq_out)];
index 90322c11361f1ef14ca0565e3acb57203e78b7f1..f9ef244710d534b5e22bddd313d1497b36da12fa 100644 (file)
@@ -33,6 +33,8 @@
 #ifndef __TRANSOBJ_H__
 #define __TRANSOBJ_H__
 
+int mlx5_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
+void mlx5_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
 int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
                        u32 *rqn);
 int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
index b2c43508a73711842caa4549f3ae4dc98d04aea3..b943cd9e2097326466919eccc83a25bd93b2ba58 100644 (file)
@@ -131,6 +131,10 @@ enum {
        MLX5_INLINE_SEG = 0x80000000,
 };
 
+enum {
+       MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
+};
+
 enum {
        MLX5_MIN_PKEY_TABLE_SIZE = 128,
        MLX5_MAX_LOG_PKEY_TABLE  = 5,
index 6093bde16b94b8d734f2047c7d6454e1beaafe2c..c0930f8d702112303ad43bd62b920a81f476fdc9 100644 (file)
@@ -756,11 +756,11 @@ int mlx5_set_port_status(struct mlx5_core_dev *dev,
                         enum mlx5_port_status status);
 int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status);
 
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu);
-int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
-                           u8 local_port);
-int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
-                            u8 local_port);
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+                             u8 port);
+
 int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
                              u8 *vl_hw_cap, u8 local_port);