Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorDavid S. Miller <davem@davemloft.net>
Wed, 17 Feb 2021 01:30:20 +0000 (17:30 -0800)
committerDavid S. Miller <davem@davemloft.net>
Wed, 17 Feb 2021 01:51:13 +0000 (17:51 -0800)
32 files changed:
1  2 
Documentation/networking/ip-sysctl.rst
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/ipa/ipa_main.c
drivers/net/phy/phy_device.c
include/net/act_api.h
include/uapi/linux/pkt_cls.h
kernel/bpf/verifier.c
net/core/flow_dissector.c
net/mptcp/options.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/sched/act_api.c
net/sched/cls_api.c
net/sched/cls_flower.c
tools/testing/selftests/net/forwarding/tc_flower.sh

index 927d5f36d3081fcaf4f0cc420a2fd1b7e75a6670,13ae7eee7ef5fb2cf1e805d8772ed8d65577ec3b..5cf7e5a367f0a8f8161527a4f32cfe175fe0bc65
@@@ -2288,7 -2395,10 +2294,8 @@@ static int ibmvnic_reset(struct ibmvnic
        unsigned long flags;
        int ret;
  
-       /* If failover is pending don't schedule any other reset.
 -      spin_lock_irqsave(&adapter->rwi_lock, flags);
 -
+       /*
+        * If failover is pending don't schedule any other reset.
         * Instead let the failover complete. If there is already a
         * a failover reset scheduled, we will detect and drop the
         * duplicate reset when walking the ->rwi_list below.
index 270d1cac86a4db42ac3a7c1343229abafe878531,72fea3b1c87d98cf4ea896da80cf2e9f6394ffd4..e4dcc63b9710bc475b61cda9d3810dda8e05f84e
@@@ -1081,12 -1080,10 +1081,14 @@@ struct ibmvnic_adapter 
  
        struct tasklet_struct tasklet;
        enum vnic_state state;
 +      /* Used for serializatin of state field */
 +      spinlock_t state_lock;
        enum ibmvnic_reset_reason reset_reason;
+       /* when taking both state and rwi locks, take state lock first */
+       spinlock_t rwi_lock;
        struct list_head rwi_list;
 +      /* Used for serialization of rwi_list */
 +      spinlock_t rwi_lock;
        struct work_struct ibmvnic_reset;
        struct delayed_work ibmvnic_delayed_reset;
        unsigned long resetting;
index aa76a6e0dae8504eccdc850797ca90d632788bc3,41474e42a819aaa29216c1c2aa32d0b625cd5317..d7d8a68ef23d7e1abf5f50d70dcc6b218e1aa031
@@@ -129,18 -127,12 +129,23 @@@ static int mlx5_devlink_reload_down(str
                                    struct netlink_ext_ack *extack)
  {
        struct mlx5_core_dev *dev = devlink_priv(devlink);
 +      bool sf_dev_allocated;
 +
 +      sf_dev_allocated = mlx5_sf_dev_allocated(dev);
 +      if (sf_dev_allocated) {
 +              /* Reload results in deleting SF device which further results in
 +               * unregistering devlink instance while holding devlink_mutext.
 +               * Hence, do not support reload.
 +               */
 +              NL_SET_ERR_MSG_MOD(extack, "reload is unsupported when SFs are allocated\n");
 +              return -EOPNOTSUPP;
 +      }
  
+       if (mlx5_lag_is_active(dev)) {
+               NL_SET_ERR_MSG_MOD(extack, "reload is unsupported in Lag mode\n");
+               return -EOPNOTSUPP;
+       }
        switch (action) {
        case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
                mlx5_unload_one(dev, false);
index 0b503ebe59ecc9bfde5bb7c9ac6c4d0728791c4d,24e2c0d955b999722d114641a6d6ed75e8670b19..f3f6eb0819489410aa0039e0262016f69eea4ac3
@@@ -793,20 -884,32 +885,30 @@@ mlx5_tc_ct_shared_counter_get(struct ml
        }
  
        /* Use the same counter as the reverse direction */
-       mutex_lock(&ct_priv->shared_counter_lock);
-       rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
-                                          tuples_ht_params);
-       if (rev_entry) {
-               if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
-                       mutex_unlock(&ct_priv->shared_counter_lock);
-                       return rev_entry->counter;
-               }
+       spin_lock_bh(&ct_priv->ht_lock);
+       rev_entry = mlx5_tc_ct_entry_get(ct_priv, &rev_tuple);
+       if (IS_ERR(rev_entry)) {
+               spin_unlock_bh(&ct_priv->ht_lock);
+               goto create_counter;
        }
-       mutex_unlock(&ct_priv->shared_counter_lock);
+       if (rev_entry && refcount_inc_not_zero(&rev_entry->counter->refcount)) {
+               ct_dbg("Using shared counter entry=0x%p rev=0x%p\n", entry, rev_entry);
+               shared_counter = rev_entry->counter;
+               spin_unlock_bh(&ct_priv->ht_lock);
+               mlx5_tc_ct_entry_put(rev_entry);
+               return shared_counter;
+       }
+       spin_unlock_bh(&ct_priv->ht_lock);
+ create_counter:
  
        shared_counter = mlx5_tc_ct_counter_create(ct_priv);
 -      if (IS_ERR(shared_counter)) {
 -              ret = PTR_ERR(shared_counter);
 -              return ERR_PTR(ret);
 -      }
 +      if (IS_ERR(shared_counter))
 +              return shared_counter;
  
        shared_counter->is_shared = true;
        refcount_set(&shared_counter->refcount, 1);
index c8866c14b8a376db3355a61723e86fd8d943d810,a2e0b548bf5704d3c8dc95cadfc8d12d1d0fa76e..39acbc83682d3d0ffeab07c7d9c00c3ed4c16c1c
@@@ -65,8 -65,7 +65,9 @@@
  #include "en/devlink.h"
  #include "lib/mlx5.h"
  #include "en/ptp.h"
 +#include "qos.h"
 +#include "en/trap.h"
+ #include "fpga/ipsec.h"
  
  bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
  {
@@@ -2122,8 -2069,10 +2123,13 @@@ static void mlx5e_build_rq_frags_info(s
        u32 buf_size = 0;
        int i;
  
++<<<<<<< HEAD
 +      if (MLX5_IPSEC_DEV(mdev))
++=======
+ #ifdef CONFIG_MLX5_EN_IPSEC
+       if (mlx5_fpga_is_ipsec_device(mdev))
++>>>>>>> 3af409ca278d4a8d50e91f9f7c4c33b175645cf3
                byte_count += MLX5E_METADATA_ETHER_LEN;
 -#endif
  
        if (mlx5e_rx_is_linear_skb(params, xsk)) {
                int frag_stride;
index fac96ea819a1df59d9a44a6e22766ddcdd802f99,4864deed9dc94efbbc5d910457ce9a25adaed344..1b6ad94ebb10342293d348b425269638149ed4b9
@@@ -1783,10 -1794,12 +1783,10 @@@ int mlx5e_rq_set_handlers(struct mlx5e_
                rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
  
                rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
-               if (MLX5_IPSEC_DEV(mdev)) {
-                       netdev_err(netdev, "MPWQE RQ with IPSec offload not supported\n");
 -#ifdef CONFIG_MLX5_EN_IPSEC
+               if (mlx5_fpga_is_ipsec_device(mdev)) {
+                       netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
                        return -EINVAL;
                }
 -#endif
                if (!rq->handle_rx_cqe) {
                        netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
                        return -EINVAL;
index cbc30df4e08a45b250f6ac1f4d8a2ed3626886df,e7a59dc5fe49858d7659e4c2a51121a687adb418..9ce98e3d3f9f7a5245e707d20e274b5b10d4f394
@@@ -2228,10 -2202,68 +2228,35 @@@ static void rtl_prepare_power_down(stru
  
        if (device_may_wakeup(tp_to_dev(tp))) {
                phy_speed_down(tp->phydev, false);
 -              rtl_wol_suspend_quirk(tp);
 -              return;
 +              rtl_wol_enable_rx(tp);
        }
+       switch (tp->mac_version) {
+       case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+       case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
+       case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
+       case RTL_GIGA_MAC_VER_37:
+       case RTL_GIGA_MAC_VER_39:
+       case RTL_GIGA_MAC_VER_43:
+       case RTL_GIGA_MAC_VER_44:
+       case RTL_GIGA_MAC_VER_45:
+       case RTL_GIGA_MAC_VER_46:
+       case RTL_GIGA_MAC_VER_47:
+       case RTL_GIGA_MAC_VER_48:
+       case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
+               RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
+               break;
+       case RTL_GIGA_MAC_VER_40:
+       case RTL_GIGA_MAC_VER_41:
+       case RTL_GIGA_MAC_VER_49:
+               rtl_eri_clear_bits(tp, 0x1a8, 0xfc000000);
+               RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
+               break;
+       default:
+               break;
+       }
  }
  
 -static void rtl_pll_power_up(struct rtl8169_private *tp)
 -{
 -      switch (tp->mac_version) {
 -      case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
 -      case RTL_GIGA_MAC_VER_29 ... RTL_GIGA_MAC_VER_30:
 -      case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
 -      case RTL_GIGA_MAC_VER_37:
 -      case RTL_GIGA_MAC_VER_39:
 -      case RTL_GIGA_MAC_VER_43:
 -              RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
 -              break;
 -      case RTL_GIGA_MAC_VER_44:
 -      case RTL_GIGA_MAC_VER_45:
 -      case RTL_GIGA_MAC_VER_46:
 -      case RTL_GIGA_MAC_VER_47:
 -      case RTL_GIGA_MAC_VER_48:
 -      case RTL_GIGA_MAC_VER_50 ... RTL_GIGA_MAC_VER_63:
 -              RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
 -              break;
 -      case RTL_GIGA_MAC_VER_40:
 -      case RTL_GIGA_MAC_VER_41:
 -      case RTL_GIGA_MAC_VER_49:
 -              RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
 -              rtl_eri_set_bits(tp, 0x1a8, 0xfc000000);
 -              break;
 -      default:
 -              break;
 -      }
 -
 -      phy_resume(tp->phydev);
 -}
 -
  static void rtl_init_rxcfg(struct rtl8169_private *tp)
  {
        switch (tp->mac_version) {
Simple merge
Simple merge
Simple merge
index afe6836e44b15c2da29f40b740f1968c6865d58b,88f4bf0047e7abae7facd325c6a67d5db4d37d56..7ea59cfe1fa7221c8516f25199933b9054342893
@@@ -591,8 -591,8 +591,9 @@@ enum 
        TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED = 1 << 1, /* Part of an existing connection. */
        TCA_FLOWER_KEY_CT_FLAGS_RELATED = 1 << 2, /* Related to an established connection. */
        TCA_FLOWER_KEY_CT_FLAGS_TRACKED = 1 << 3, /* Conntrack has occurred. */
 -
 +      TCA_FLOWER_KEY_CT_FLAGS_INVALID = 1 << 4, /* Conntrack is invalid. */
 +      TCA_FLOWER_KEY_CT_FLAGS_REPLY = 1 << 5, /* Packet is in the reply direction. */
+       __TCA_FLOWER_KEY_CT_FLAGS_MAX,
  };
  
  enum {
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index ce2dea2a6e0a2cf24cdf44c0b9b55542d1fcf3b2,8b2338dfdc8078291e4f98c5dfc11b0ef4039d22..06e233410e0e5ec21f6b88509ca3c5c2aa5a58b9
@@@ -108,23 -100,8 +108,13 @@@ static void subflow_init_req(struct req
        subflow_req->mp_join = 0;
        subflow_req->msk = NULL;
        mptcp_token_init_request(req);
- #ifdef CONFIG_TCP_MD5SIG
-       /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
-        * TCP option space.
-        */
-       if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info))
-               return -EINVAL;
- #endif
-       return 0;
  }
  
 +static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
 +{
 +      return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
 +}
 +
  /* Init mptcp request socket.
   *
   * Returns an error code if a JOIN has failed and a TCP reset
@@@ -1118,12 -1032,49 +1110,52 @@@ static void subflow_data_ready(struct s
  
  static void subflow_write_space(struct sock *ssk)
  {
 -      /* we take action in __mptcp_clean_una() */
 +      struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
 +
 +      mptcp_propagate_sndbuf(sk, ssk);
 +      mptcp_write_space(sk);
  }
  
+ void __mptcp_error_report(struct sock *sk)
+ {
+       struct mptcp_subflow_context *subflow;
+       struct mptcp_sock *msk = mptcp_sk(sk);
+       mptcp_for_each_subflow(msk, subflow) {
+               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+               int err = sock_error(ssk);
+               if (!err)
+                       continue;
+               /* only propagate errors on fallen-back sockets or
+                * on MPC connect
+                */
+               if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(msk))
+                       continue;
+               inet_sk_state_store(sk, inet_sk_state_load(ssk));
+               sk->sk_err = -err;
+               /* This barrier is coupled with smp_rmb() in mptcp_poll() */
+               smp_wmb();
+               sk->sk_error_report(sk);
+               break;
+       }
+ }
+ static void subflow_error_report(struct sock *ssk)
+ {
+       struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
+       mptcp_data_lock(sk);
+       if (!sock_owned_by_user(sk))
+               __mptcp_error_report(sk);
+       else
+               set_bit(MPTCP_ERROR_REPORT,  &mptcp_sk(sk)->flags);
+       mptcp_data_unlock(sk);
+ }
  static struct inet_connection_sock_af_ops *
  subflow_default_af_ops(struct sock *sk)
  {
Simple merge
Simple merge
Simple merge
index a554838666c428abe96dad5cd93a32ba318cafe0,b11d8e6b5bc145919111735dcadf9016533481eb..4b58ccae34290e0721fd28ad0f6974ebcc4317bb
@@@ -3,9 -3,7 +3,9 @@@
  
  ALL_TESTS="match_dst_mac_test match_src_mac_test match_dst_ip_test \
        match_src_ip_test match_ip_flags_test match_pcp_test match_vlan_test \
-       match_ip_tos_test match_indev_test match_mpls_label_test \
 -      match_ip_tos_test match_indev_test match_ip_ttl_test"
++      match_ip_tos_test match_indev_testmatch_ip_ttl_test match_mpls_label_test \
 +      match_mpls_tc_test match_mpls_bos_test match_mpls_ttl_test \
 +      match_mpls_lse_test"
  NUM_NETIFS=2
  source tc_common.sh
  source lib.sh