Merge tag 'net-6.10-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 6 Jun 2024 16:55:27 +0000 (09:55 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 6 Jun 2024 16:55:27 +0000 (09:55 -0700)
Pull networking fixes from Jakub Kicinski:
 "Including fixes from BPF and big collection of fixes for WiFi core and
  drivers.

  Current release - regressions:

   - vxlan: fix regression when dropping packets due to invalid src
     addresses

   - bpf: fix a potential use-after-free in bpf_link_free()

   - xdp: revert support for redirect to any xsk socket bound to the
     same UMEM as it can result in a corruption

   - virtio_net:
      - add missing lock protection when reading return code from
        control_buf
      - fix false-positive lockdep splat in DIM
      - Revert "wifi: wilc1000: convert list management to RCU"

   - wifi: ath11k: fix error path in ath11k_pcic_ext_irq_config

  Previous releases - regressions:

   - rtnetlink: make the "split" NLM_DONE handling generic, restore the
     old behavior for two cases where we started coalescing those
     messages with normal messages, breaking sloppily-coded userspace

   - wifi:
      - cfg80211: validate HE operation element parsing
      - cfg80211: fix 6 GHz scan request building
      - mt76: mt7615: add missing chanctx ops
      - ath11k: move power type check to ASSOC stage, fix connecting to
        6 GHz AP
      - ath11k: fix WCN6750 firmware crash caused by 17 num_vdevs
      - rtlwifi: ignore IEEE80211_CONF_CHANGE_RETRY_LIMITS
      - iwlwifi: mvm: fix a crash on 7265

  Previous releases - always broken:

   - ncsi: prevent multi-threaded channel probing, a spec violation

   - vmxnet3: disable rx data ring on dma allocation failure

   - ethtool: init tsinfo stats if requested, prevent unintentionally
     reporting all-zero stats on devices which don't implement any

   - dst_cache: fix possible races in less common IPv6 features

   - tcp: auth: don't consider TCP_CLOSE to be in TCP_AO_ESTABLISHED

   - ax25: fix two refcounting bugs

   - eth: ionic: fix kernel panic in XDP_TX action

  Misc:

   - tcp: count CLOSE-WAIT sockets for TCP_MIB_CURRESTAB"

* tag 'net-6.10-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (107 commits)
  selftests: net: lib: set 'i' as local
  selftests: net: lib: avoid error removing empty netns name
  selftests: net: lib: support errexit with busywait
  net: ethtool: fix the error condition in ethtool_get_phy_stats_ethtool()
  ipv6: fix possible race in __fib6_drop_pcpu_from()
  af_unix: Annotate data-race of sk->sk_shutdown in sk_diag_fill().
  af_unix: Use skb_queue_len_lockless() in sk_diag_show_rqlen().
  af_unix: Use skb_queue_empty_lockless() in unix_release_sock().
  af_unix: Use unix_recvq_full_lockless() in unix_stream_connect().
  af_unix: Annotate data-race of net->unx.sysctl_max_dgram_qlen.
  af_unix: Annotate data-races around sk->sk_sndbuf.
  af_unix: Annotate data-races around sk->sk_state in UNIX_DIAG.
  af_unix: Annotate data-race of sk->sk_state in unix_stream_read_skb().
  af_unix: Annotate data-races around sk->sk_state in sendmsg() and recvmsg().
  af_unix: Annotate data-race of sk->sk_state in unix_accept().
  af_unix: Annotate data-race of sk->sk_state in unix_stream_connect().
  af_unix: Annotate data-races around sk->sk_state in unix_write_space() and poll().
  af_unix: Annotate data-race of sk->sk_state in unix_inq_len().
  af_unix: Annodate data-races around sk->sk_state for writers.
  af_unix: Set sk->sk_state under unix_state_lock() for truly disconencted peer.
  ...

106 files changed:
Documentation/networking/af_xdp.rst
MAINTAINERS
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_nvm.c
drivers/net/ethernet/intel/ice/ice_type.h
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/igc/igc_ethtool.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
drivers/net/phy/micrel.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vxlan/vxlan_core.c
drivers/net/wireless/ath/ath10k/Kconfig
drivers/net/wireless/ath/ath11k/core.c
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/ath11k/pcic.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/rs.h
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/mediatek/mt76/mt7615/main.c
drivers/net/wireless/microchip/wilc1000/cfg80211.c
drivers/net/wireless/microchip/wilc1000/hif.c
drivers/net/wireless/microchip/wilc1000/netdev.c
drivers/net/wireless/microchip/wilc1000/netdev.h
drivers/net/wireless/microchip/wilc1000/wlan.c
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/net/wwan/iosm/iosm_ipc_devlink.c
drivers/ptp/ptp_chardev.c
include/net/rtnetlink.h
include/net/tcp_ao.h
kernel/bpf/devmap.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/trace/bpf_trace.c
lib/test_rhashtable.c
net/ax25/af_ax25.c
net/ax25/ax25_dev.c
net/bpf/test_run.c
net/core/dev.c
net/core/dst_cache.c
net/core/rtnetlink.c
net/ethtool/ioctl.c
net/ethtool/tsinfo.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/ipv4/tcp.c
net/ipv4/tcp_ao.c
net/ipv6/ila/ila_lwt.c
net/ipv6/ioam6_iptunnel.c
net/ipv6/ip6_fib.c
net/ipv6/route.c
net/ipv6/rpl_iptunnel.c
net/ipv6/seg6_iptunnel.c
net/mac80211/cfg.c
net/mac80211/he.c
net/mac80211/ieee80211_i.h
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh_pathtbl.c
net/mac80211/parse.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/util.c
net/mptcp/protocol.c
net/ncsi/internal.h
net/ncsi/ncsi-manage.c
net/ncsi/ncsi-rsp.c
net/sched/sch_multiq.c
net/sched/sch_taprio.c
net/smc/af_smc.c
net/unix/af_unix.c
net/unix/diag.c
net/wireless/core.c
net/wireless/pmsr.c
net/wireless/rdev-ops.h
net/wireless/scan.c
net/wireless/sysfs.c
net/wireless/util.c
net/xdp/xsk.c
tools/lib/bpf/features.c
tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c
tools/testing/selftests/net/hsr/config
tools/testing/selftests/net/lib.sh

index 72da7057e4cf9643c179493dce22f9e243f31d70..dceeb0d763aa232f418bc229a59a3a6d6b7540b2 100644 (file)
@@ -329,24 +329,23 @@ XDP_SHARED_UMEM option and provide the initial socket's fd in the
 sxdp_shared_umem_fd field as you registered the UMEM on that
 socket. These two sockets will now share one and the same UMEM.
 
-In this case, it is possible to use the NIC's packet steering
-capabilities to steer the packets to the right queue. This is not
-possible in the previous example as there is only one queue shared
-among sockets, so the NIC cannot do this steering as it can only steer
-between queues.
-
-In libxdp (or libbpf prior to version 1.0), you need to use the
-xsk_socket__create_shared() API as it takes a reference to a FILL ring
-and a COMPLETION ring that will be created for you and bound to the
-shared UMEM. You can use this function for all the sockets you create,
-or you can use it for the second and following ones and use
-xsk_socket__create() for the first one. Both methods yield the same
-result.
+There is no need to supply an XDP program like the one in the previous
+case where sockets were bound to the same queue id and
+device. Instead, use the NIC's packet steering capabilities to steer
+the packets to the right queue. In the previous example, there is only
+one queue shared among sockets, so the NIC cannot do this steering. It
+can only steer between queues.
+
+In libbpf, you need to use the xsk_socket__create_shared() API as it
+takes a reference to a FILL ring and a COMPLETION ring that will be
+created for you and bound to the shared UMEM. You can use this
+function for all the sockets you create, or you can use it for the
+second and following ones and use xsk_socket__create() for the first
+one. Both methods yield the same result.
 
 Note that a UMEM can be shared between sockets on the same queue id
 and device, as well as between queues on the same device and between
-devices at the same time. It is also possible to redirect to any
-socket as long as it is bound to the same umem with XDP_SHARED_UMEM.
+devices at the same time.
 
 XDP_USE_NEED_WAKEUP bind flag
 -----------------------------
@@ -823,10 +822,6 @@ A: The short answer is no, that is not supported at the moment. The
    switch, or other distribution mechanism, in your NIC to direct
    traffic to the correct queue id and socket.
 
-   Note that if you are using the XDP_SHARED_UMEM option, it is
-   possible to switch traffic between any socket bound to the same
-   umem.
-
 Q: My packets are sometimes corrupted. What is wrong?
 
 A: Care has to be taken not to feed the same buffer in the UMEM into
index a7574732d11724ec8a4e379cf69aeb5257e016bb..aacccb376c28a197f1f47b36e94c9e95a388e082 100644 (file)
@@ -15237,7 +15237,6 @@ F:      drivers/staging/most/
 F:     include/linux/most.h
 
 MOTORCOMM PHY DRIVER
-M:     Peter Geis <pgwipeout@gmail.com>
 M:     Frank <Frank.Sae@motor-comm.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
index 6ad8002b22e14a9a86f2ed994a6798d67627b5f3..99a75a59078ef3e6c71c46696b694d00fd082ea1 100644 (file)
@@ -409,7 +409,6 @@ struct ice_vsi {
        struct ice_tc_cfg tc_cfg;
        struct bpf_prog *xdp_prog;
        struct ice_tx_ring **xdp_rings;  /* XDP ring array */
-       unsigned long *af_xdp_zc_qps;    /* tracks AF_XDP ZC enabled qps */
        u16 num_xdp_txq;                 /* Used XDP queues */
        u8 xdp_mapping_mode;             /* ICE_MAP_MODE_[CONTIG|SCATTER] */
 
@@ -746,6 +745,25 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
        ring->flags |= ICE_TX_FLAGS_RING_XDP;
 }
 
+/**
+ * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
+ * @vsi: pointer to VSI
+ * @qid: index of a queue to look at XSK buff pool presence
+ *
+ * Return: A pointer to xsk_buff_pool structure if there is a buffer pool
+ * attached and configured as zero-copy, NULL otherwise.
+ */
+static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
+                                                       u16 qid)
+{
+       struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+
+       if (!ice_is_xdp_ena_vsi(vsi))
+               return NULL;
+
+       return (pool && pool->dev) ? pool : NULL;
+}
+
 /**
  * ice_xsk_pool - get XSK buffer pool bound to a ring
  * @ring: Rx ring to use
@@ -758,10 +776,7 @@ static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
        struct ice_vsi *vsi = ring->vsi;
        u16 qid = ring->q_index;
 
-       if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
-               return NULL;
-
-       return xsk_get_pool_from_qid(vsi->netdev, qid);
+       return ice_get_xp_from_qid(vsi, qid);
 }
 
 /**
@@ -786,12 +801,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
        if (!ring)
                return;
 
-       if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
-               ring->xsk_pool = NULL;
-               return;
-       }
-
-       ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+       ring->xsk_pool = ice_get_xp_from_qid(vsi, qid);
 }
 
 /**
@@ -920,9 +930,17 @@ int ice_down(struct ice_vsi *vsi);
 int ice_down_up(struct ice_vsi *vsi);
 int ice_vsi_cfg_lan(struct ice_vsi *vsi);
 struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+
+enum ice_xdp_cfg {
+       ICE_XDP_CFG_FULL,       /* Fully apply new config in .ndo_bpf() */
+       ICE_XDP_CFG_PART,       /* Save/use part of config in VSI rebuild */
+};
+
 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
-int ice_destroy_xdp_rings(struct ice_vsi *vsi);
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+                         enum ice_xdp_cfg cfg_type);
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
+void ice_map_xdp_rings(struct ice_vsi *vsi);
 int
 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
             u32 flags);
index 687f6cb2b917afc55de7020c401c5095c6163825..5d396c1a7731482f725561a8eff709ecd3cc793e 100644 (file)
@@ -842,6 +842,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
                }
                rx_rings_rem -= rx_rings_per_v;
        }
+
+       if (ice_is_xdp_ena_vsi(vsi))
+               ice_map_xdp_rings(vsi);
 }
 
 /**
index 5371e91f6bbb4b48f4065421ac9a68d82fda17d1..7629b0190578b3d4bf1fc8d54b54af570f1648d4 100644 (file)
@@ -114,14 +114,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
        if (!vsi->q_vectors)
                goto err_vectors;
 
-       vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
-       if (!vsi->af_xdp_zc_qps)
-               goto err_zc_qps;
-
        return 0;
 
-err_zc_qps:
-       devm_kfree(dev, vsi->q_vectors);
 err_vectors:
        devm_kfree(dev, vsi->rxq_map);
 err_rxq_map:
@@ -309,8 +303,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
 
        dev = ice_pf_to_dev(pf);
 
-       bitmap_free(vsi->af_xdp_zc_qps);
-       vsi->af_xdp_zc_qps = NULL;
        /* free the ring and vector containers */
        devm_kfree(dev, vsi->q_vectors);
        vsi->q_vectors = NULL;
@@ -2282,22 +2274,23 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
                if (ret)
                        goto unroll_vector_base;
 
-               ice_vsi_map_rings_to_vectors(vsi);
-
-               /* Associate q_vector rings to napi */
-               ice_vsi_set_napi_queues(vsi);
-
-               vsi->stat_offsets_loaded = false;
-
                if (ice_is_xdp_ena_vsi(vsi)) {
                        ret = ice_vsi_determine_xdp_res(vsi);
                        if (ret)
                                goto unroll_vector_base;
-                       ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+                       ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
+                                                   ICE_XDP_CFG_PART);
                        if (ret)
                                goto unroll_vector_base;
                }
 
+               ice_vsi_map_rings_to_vectors(vsi);
+
+               /* Associate q_vector rings to napi */
+               ice_vsi_set_napi_queues(vsi);
+
+               vsi->stat_offsets_loaded = false;
+
                /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
                if (vsi->type != ICE_VSI_CTRL)
                        /* Do not exit if configuring RSS had an issue, at
@@ -2437,7 +2430,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
                /* return value check can be skipped here, it always returns
                 * 0 if reset is in progress
                 */
-               ice_destroy_xdp_rings(vsi);
+               ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
 
        ice_vsi_clear_rings(vsi);
        ice_vsi_free_q_vectors(vsi);
index f60c022f79609695bcad9f8ff581c389de02e35c..1b61ca3a6eb6e15353be17e6d7f72a27708bff8b 100644 (file)
@@ -2707,17 +2707,72 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
                bpf_prog_put(old_prog);
 }
 
+static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
+{
+       struct ice_q_vector *q_vector;
+       struct ice_tx_ring *ring;
+
+       if (static_key_enabled(&ice_xdp_locking_key))
+               return vsi->xdp_rings[qid % vsi->num_xdp_txq];
+
+       q_vector = vsi->rx_rings[qid]->q_vector;
+       ice_for_each_tx_ring(ring, q_vector->tx)
+               if (ice_ring_is_xdp(ring))
+                       return ring;
+
+       return NULL;
+}
+
+/**
+ * ice_map_xdp_rings - Map XDP rings to interrupt vectors
+ * @vsi: the VSI with XDP rings being configured
+ *
+ * Map XDP rings to interrupt vectors and perform the configuration steps
+ * dependent on the mapping.
+ */
+void ice_map_xdp_rings(struct ice_vsi *vsi)
+{
+       int xdp_rings_rem = vsi->num_xdp_txq;
+       int v_idx, q_idx;
+
+       /* follow the logic from ice_vsi_map_rings_to_vectors */
+       ice_for_each_q_vector(vsi, v_idx) {
+               struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+               int xdp_rings_per_v, q_id, q_base;
+
+               xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+                                              vsi->num_q_vectors - v_idx);
+               q_base = vsi->num_xdp_txq - xdp_rings_rem;
+
+               for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+                       struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
+
+                       xdp_ring->q_vector = q_vector;
+                       xdp_ring->next = q_vector->tx.tx_ring;
+                       q_vector->tx.tx_ring = xdp_ring;
+               }
+               xdp_rings_rem -= xdp_rings_per_v;
+       }
+
+       ice_for_each_rxq(vsi, q_idx) {
+               vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
+                                                                      q_idx);
+               ice_tx_xsk_pool(vsi, q_idx);
+       }
+}
+
 /**
  * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
  * @vsi: VSI to bring up Tx rings used by XDP
  * @prog: bpf program that will be assigned to VSI
+ * @cfg_type: create from scratch or restore the existing configuration
  *
  * Return 0 on success and negative value on error
  */
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+                         enum ice_xdp_cfg cfg_type)
 {
        u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
-       int xdp_rings_rem = vsi->num_xdp_txq;
        struct ice_pf *pf = vsi->back;
        struct ice_qs_cfg xdp_qs_cfg = {
                .qs_mutex = &pf->avail_q_mutex,
@@ -2730,8 +2785,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
                .mapping_mode = ICE_VSI_MAP_CONTIG
        };
        struct device *dev;
-       int i, v_idx;
-       int status;
+       int status, i;
 
        dev = ice_pf_to_dev(pf);
        vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2750,49 +2804,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
        if (ice_xdp_alloc_setup_rings(vsi))
                goto clear_xdp_rings;
 
-       /* follow the logic from ice_vsi_map_rings_to_vectors */
-       ice_for_each_q_vector(vsi, v_idx) {
-               struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
-               int xdp_rings_per_v, q_id, q_base;
-
-               xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
-                                              vsi->num_q_vectors - v_idx);
-               q_base = vsi->num_xdp_txq - xdp_rings_rem;
-
-               for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
-                       struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
-
-                       xdp_ring->q_vector = q_vector;
-                       xdp_ring->next = q_vector->tx.tx_ring;
-                       q_vector->tx.tx_ring = xdp_ring;
-               }
-               xdp_rings_rem -= xdp_rings_per_v;
-       }
-
-       ice_for_each_rxq(vsi, i) {
-               if (static_key_enabled(&ice_xdp_locking_key)) {
-                       vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
-               } else {
-                       struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
-                       struct ice_tx_ring *ring;
-
-                       ice_for_each_tx_ring(ring, q_vector->tx) {
-                               if (ice_ring_is_xdp(ring)) {
-                                       vsi->rx_rings[i]->xdp_ring = ring;
-                                       break;
-                               }
-                       }
-               }
-               ice_tx_xsk_pool(vsi, i);
-       }
-
        /* omit the scheduler update if in reset path; XDP queues will be
         * taken into account at the end of ice_vsi_rebuild, where
         * ice_cfg_vsi_lan is being called
         */
-       if (ice_is_reset_in_progress(pf->state))
+       if (cfg_type == ICE_XDP_CFG_PART)
                return 0;
 
+       ice_map_xdp_rings(vsi);
+
        /* tell the Tx scheduler that right now we have
         * additional queues
         */
@@ -2842,22 +2862,21 @@ err_map_xdp:
 /**
  * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
  * @vsi: VSI to remove XDP rings
+ * @cfg_type: disable XDP permanently or allow it to be restored later
  *
  * Detach XDP rings from irq vectors, clean up the PF bitmap and free
  * resources
  */
-int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
 {
        u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
        struct ice_pf *pf = vsi->back;
        int i, v_idx;
 
        /* q_vectors are freed in reset path so there's no point in detaching
-        * rings; in case of rebuild being triggered not from reset bits
-        * in pf->state won't be set, so additionally check first q_vector
-        * against NULL
+        * rings
         */
-       if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+       if (cfg_type == ICE_XDP_CFG_PART)
                goto free_qmap;
 
        ice_for_each_q_vector(vsi, v_idx) {
@@ -2898,7 +2917,7 @@ free_qmap:
        if (static_key_enabled(&ice_xdp_locking_key))
                static_branch_dec(&ice_xdp_locking_key);
 
-       if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+       if (cfg_type == ICE_XDP_CFG_PART)
                return 0;
 
        ice_vsi_assign_bpf_prog(vsi, NULL);
@@ -3009,7 +3028,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
                if (xdp_ring_err) {
                        NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
                } else {
-                       xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+                       xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
+                                                            ICE_XDP_CFG_FULL);
                        if (xdp_ring_err)
                                NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
                }
@@ -3020,7 +3040,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
                        NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
        } else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
                xdp_features_clear_redirect_target(vsi->netdev);
-               xdp_ring_err = ice_destroy_xdp_rings(vsi);
+               xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
                if (xdp_ring_err)
                        NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
                /* reallocate Rx queues that were used for zero-copy */
index 84eab92dc03cfe99f791518f3a21358dd7a91822..59e8879ac0598a8d6e7fac474ba0acbcb9981a02 100644 (file)
@@ -374,11 +374,25 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
  *
  * Read the specified word from the copy of the Shadow RAM found in the
  * specified NVM module.
+ *
+ * Note that the Shadow RAM copy is always located after the CSS header, and
+ * is aligned to 64-byte (32-word) offsets.
  */
 static int
 ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
 {
-       return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
+       u32 sr_copy;
+
+       switch (bank) {
+       case ICE_ACTIVE_FLASH_BANK:
+               sr_copy = roundup(hw->flash.banks.active_css_hdr_len, 32);
+               break;
+       case ICE_INACTIVE_FLASH_BANK:
+               sr_copy = roundup(hw->flash.banks.inactive_css_hdr_len, 32);
+               break;
+       }
+
+       return ice_read_nvm_module(hw, bank, sr_copy + offset, data);
 }
 
 /**
@@ -440,8 +454,7 @@ int
 ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
                       u16 module_type)
 {
-       u16 pfa_len, pfa_ptr;
-       u16 next_tlv;
+       u16 pfa_len, pfa_ptr, next_tlv, max_tlv;
        int status;
 
        status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
@@ -454,11 +467,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
                ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
                return status;
        }
+
+       /* The Preserved Fields Area contains a sequence of Type-Length-Value
+        * structures which define its contents. The PFA length includes all
+        * of the TLVs, plus the initial length word itself, *and* one final
+        * word at the end after all of the TLVs.
+        */
+       if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) {
+               dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n",
+                        pfa_ptr, pfa_len);
+               return -EINVAL;
+       }
+
        /* Starting with first TLV after PFA length, iterate through the list
         * of TLVs to find the requested one.
         */
        next_tlv = pfa_ptr + 1;
-       while (next_tlv < pfa_ptr + pfa_len) {
+       while (next_tlv < max_tlv) {
                u16 tlv_sub_module_type;
                u16 tlv_len;
 
@@ -482,10 +507,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
                        }
                        return -EINVAL;
                }
-               /* Check next TLV, i.e. current TLV pointer + length + 2 words
-                * (for current TLV's type and length)
-                */
-               next_tlv = next_tlv + tlv_len + 2;
+
+               if (check_add_overflow(next_tlv, 2, &next_tlv) ||
+                   check_add_overflow(next_tlv, tlv_len, &next_tlv)) {
+                       dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n",
+                                tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len);
+                       return -EINVAL;
+               }
        }
        /* Module does not exist */
        return -ENOENT;
@@ -1009,6 +1037,72 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw)
        return 0;
 }
 
+/**
+ * ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the Authentication
+ * header size, and then convert to words.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int
+ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
+                       u32 *hdr_len)
+{
+       u16 hdr_len_l, hdr_len_h;
+       u32 hdr_len_dword;
+       int status;
+
+       status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L,
+                                    &hdr_len_l);
+       if (status)
+               return status;
+
+       status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H,
+                                    &hdr_len_h);
+       if (status)
+               return status;
+
+       /* CSS header length is in DWORD, so convert to words and add
+        * authentication header size
+        */
+       hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
+       *hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN;
+
+       return 0;
+}
+
+/**
+ * ice_determine_css_hdr_len - Discover CSS header length for the device
+ * @hw: pointer to the HW struct
+ *
+ * Determine the size of the CSS header at the start of the NVM module. This
+ * is useful for locating the Shadow RAM copy in the NVM, as the Shadow RAM is
+ * always located just after the CSS header.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int ice_determine_css_hdr_len(struct ice_hw *hw)
+{
+       struct ice_bank_info *banks = &hw->flash.banks;
+       int status;
+
+       status = ice_get_nvm_css_hdr_len(hw, ICE_ACTIVE_FLASH_BANK,
+                                        &banks->active_css_hdr_len);
+       if (status)
+               return status;
+
+       status = ice_get_nvm_css_hdr_len(hw, ICE_INACTIVE_FLASH_BANK,
+                                        &banks->inactive_css_hdr_len);
+       if (status)
+               return status;
+
+       return 0;
+}
+
 /**
  * ice_init_nvm - initializes NVM setting
  * @hw: pointer to the HW struct
@@ -1055,6 +1149,12 @@ int ice_init_nvm(struct ice_hw *hw)
                return status;
        }
 
+       status = ice_determine_css_hdr_len(hw);
+       if (status) {
+               ice_debug(hw, ICE_DBG_NVM, "Failed to determine Shadow RAM copy offsets.\n");
+               return status;
+       }
+
        status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm);
        if (status) {
                ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n");
index f0796a93f4287e22e490f24a0dc1260cf5d7c777..eef397e5baa07d7fba682cd7262e216329e10689 100644 (file)
@@ -482,6 +482,8 @@ struct ice_bank_info {
        u32 orom_size;                          /* Size of OROM bank */
        u32 netlist_ptr;                        /* Pointer to 1st Netlist bank */
        u32 netlist_size;                       /* Size of Netlist bank */
+       u32 active_css_hdr_len;                 /* Active CSS header length */
+       u32 inactive_css_hdr_len;               /* Inactive CSS header length */
        enum ice_flash_bank nvm_bank;           /* Active NVM bank */
        enum ice_flash_bank orom_bank;          /* Active OROM bank */
        enum ice_flash_bank netlist_bank;       /* Active Netlist bank */
@@ -1087,17 +1089,13 @@ struct ice_aq_get_set_rss_lut_params {
 #define ICE_SR_SECTOR_SIZE_IN_WORDS    0x800
 
 /* CSS Header words */
+#define ICE_NVM_CSS_HDR_LEN_L                  0x02
+#define ICE_NVM_CSS_HDR_LEN_H                  0x03
 #define ICE_NVM_CSS_SREV_L                     0x14
 #define ICE_NVM_CSS_SREV_H                     0x15
 
-/* Length of CSS header section in words */
-#define ICE_CSS_HEADER_LENGTH                  330
-
-/* Offset of Shadow RAM copy in the NVM bank area. */
-#define ICE_NVM_SR_COPY_WORD_OFFSET            roundup(ICE_CSS_HEADER_LENGTH, 32)
-
-/* Size in bytes of Option ROM trailer */
-#define ICE_NVM_OROM_TRAILER_LENGTH            (2 * ICE_CSS_HEADER_LENGTH)
+/* Length of Authentication header section in words */
+#define ICE_NVM_AUTH_HEADER_LEN                        0x08
 
 /* The Link Topology Netlist section is stored as a series of words. It is
  * stored in the NVM as a TLV, with the first two words containing the type
index 7541f223bf4f69cb63f985f553e4e30e8036e9c8..a65955eb23c0bd85adc7f0f8f1a4b39867da8907 100644 (file)
@@ -269,7 +269,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
        if (!pool)
                return -EINVAL;
 
-       clear_bit(qid, vsi->af_xdp_zc_qps);
        xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
 
        return 0;
@@ -300,8 +299,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
        if (err)
                return err;
 
-       set_bit(qid, vsi->af_xdp_zc_qps);
-
        return 0;
 }
 
@@ -349,11 +346,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
 int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
 {
        struct ice_rx_ring *rx_ring;
-       unsigned long q;
+       uint i;
+
+       ice_for_each_rxq(vsi, i) {
+               rx_ring = vsi->rx_rings[i];
+               if (!rx_ring->xsk_pool)
+                       continue;
 
-       for_each_set_bit(q, vsi->af_xdp_zc_qps,
-                        max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
-               rx_ring = vsi->rx_rings[q];
                if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
                        return -ENOMEM;
        }
index f2c4f1966bb041629411c252ca7c7f06b5fdf51e..0cd2bd695db1dfddd64d2c82027f4206b6609268 100644 (file)
@@ -1629,12 +1629,17 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
        struct igc_hw *hw = &adapter->hw;
        u32 eeer;
 
+       linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+                        edata->supported);
+       linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+                        edata->supported);
+       linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+                        edata->supported);
+
        if (hw->dev_spec._base.eee_enable)
                mii_eee_cap1_mod_linkmode_t(edata->advertised,
                                            adapter->eee_advert);
 
-       *edata = adapter->eee;
-
        eeer = rd32(IGC_EEER);
 
        /* EEE status on negotiated link */
index 12f004f46082cdfdd68493f43ef2e1653b2e815e..305e05294a26595fdc361851e2c75dd9355b9e20 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/bpf_trace.h>
 #include <net/xdp_sock_drv.h>
 #include <linux/pci.h>
+#include <linux/mdio.h>
 
 #include <net/ipv6.h>
 
@@ -4975,6 +4976,9 @@ void igc_up(struct igc_adapter *adapter)
        /* start the watchdog. */
        hw->mac.get_link_status = true;
        schedule_work(&adapter->watchdog_task);
+
+       adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T |
+                             MDIO_EEE_2_5GT;
 }
 
 /**
index e8b73b9d75e3118f56ee42a322d05491b0c325f0..97722ce8c4cb34e2fac06adfb08c189d18989a5b 100644 (file)
@@ -2519,7 +2519,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
         * - when available free entries are less.
         * Lower priority ones out of avaialble free entries are always
         * chosen when 'high vs low' question arises.
+        *
+        * For a VF base MCAM match rule is set by its PF. And all the
+        * further MCAM rules installed by VF on its own are
+        * concatenated with the base rule set by its PF. Hence PF entries
+        * should be at lower priority compared to VF entries. Otherwise
+        * base rule is hit always and rules installed by VF will be of
+        * no use. Hence if the request is from PF then allocate low
+        * priority entries.
         */
+       if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+               goto lprio_alloc;
 
        /* Get the search range for priority allocation request */
        if (req->priority) {
@@ -2528,17 +2538,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
                goto alloc;
        }
 
-       /* For a VF base MCAM match rule is set by its PF. And all the
-        * further MCAM rules installed by VF on its own are
-        * concatenated with the base rule set by its PF. Hence PF entries
-        * should be at lower priority compared to VF entries. Otherwise
-        * base rule is hit always and rules installed by VF will be of
-        * no use. Hence if the request is from PF and NOT a priority
-        * allocation request then allocate low priority entries.
-        */
-       if (!(pcifunc & RVU_PFVF_FUNC_MASK))
-               goto lprio_alloc;
-
        /* Find out the search range for non-priority allocation request
         *
         * Get MCAM free entry count in middle zone.
@@ -2568,6 +2567,18 @@ lprio_alloc:
                reverse = true;
                start = 0;
                end = mcam->bmap_entries;
+               /* Ensure PF requests are always at bottom and if PF requests
+                * for higher/lower priority entry wrt reference entry then
+                * honour that criteria and start search for entries from bottom
+                * and not in mid zone.
+                */
+               if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+                   req->priority == NPC_MCAM_HIGHER_PRIO)
+                       end = req->ref_entry;
+
+               if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+                   req->priority == NPC_MCAM_LOWER_PRIO)
+                       start = req->ref_entry;
        }
 
 alloc:
index cae46290a7aee28a2d4feb4ab65ec5340dc91bf8..c84ce54a84a00e88fd463723fd66dbdd6a86c44a 100644 (file)
@@ -1131,9 +1131,9 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
 {
        const struct mtk_soc_data *soc = eth->soc;
        dma_addr_t phy_ring_tail;
-       int cnt = MTK_QDMA_RING_SIZE;
+       int cnt = soc->tx.fq_dma_size;
        dma_addr_t dma_addr;
-       int i;
+       int i, j, len;
 
        if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
                eth->scratch_ring = eth->sram_base;
@@ -1142,40 +1142,46 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
                                                       cnt * soc->tx.desc_size,
                                                       &eth->phy_scratch_ring,
                                                       GFP_KERNEL);
+
        if (unlikely(!eth->scratch_ring))
                return -ENOMEM;
 
-       eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
-       if (unlikely(!eth->scratch_head))
-               return -ENOMEM;
+       phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
 
-       dma_addr = dma_map_single(eth->dma_dev,
-                                 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
-                                 DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
-               return -ENOMEM;
+       for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
+               len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
+               eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
 
-       phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
+               if (unlikely(!eth->scratch_head[j]))
+                       return -ENOMEM;
 
-       for (i = 0; i < cnt; i++) {
-               dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
-               struct mtk_tx_dma_v2 *txd;
+               dma_addr = dma_map_single(eth->dma_dev,
+                                         eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
+                                         DMA_FROM_DEVICE);
 
-               txd = eth->scratch_ring + i * soc->tx.desc_size;
-               txd->txd1 = addr;
-               if (i < cnt - 1)
-                       txd->txd2 = eth->phy_scratch_ring +
-                                   (i + 1) * soc->tx.desc_size;
+               if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+                       return -ENOMEM;
 
-               txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
-               if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
-                       txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
-               txd->txd4 = 0;
-               if (mtk_is_netsys_v2_or_greater(eth)) {
-                       txd->txd5 = 0;
-                       txd->txd6 = 0;
-                       txd->txd7 = 0;
-                       txd->txd8 = 0;
+               for (i = 0; i < cnt; i++) {
+                       struct mtk_tx_dma_v2 *txd;
+
+                       txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
+                       txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+                       if (j * MTK_FQ_DMA_LENGTH + i < cnt)
+                               txd->txd2 = eth->phy_scratch_ring +
+                                           (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
+
+                       txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+                       if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
+                               txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
+
+                       txd->txd4 = 0;
+                       if (mtk_is_netsys_v2_or_greater(eth)) {
+                               txd->txd5 = 0;
+                               txd->txd6 = 0;
+                               txd->txd7 = 0;
+                               txd->txd8 = 0;
+                       }
                }
        }
 
@@ -2457,7 +2463,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
        if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
                ring_size = MTK_QDMA_RING_SIZE;
        else
-               ring_size = MTK_DMA_SIZE;
+               ring_size = soc->tx.dma_size;
 
        ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
                               GFP_KERNEL);
@@ -2465,8 +2471,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
                goto no_tx_mem;
 
        if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
-               ring->dma = eth->sram_base + ring_size * sz;
-               ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
+               ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
+               ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
        } else {
                ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
                                               &ring->phys, GFP_KERNEL);
@@ -2588,6 +2594,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
 {
        const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+       const struct mtk_soc_data *soc = eth->soc;
        struct mtk_rx_ring *ring;
        int rx_data_len, rx_dma_size, tx_ring_size;
        int i;
@@ -2595,7 +2602,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
        if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
                tx_ring_size = MTK_QDMA_RING_SIZE;
        else
-               tx_ring_size = MTK_DMA_SIZE;
+               tx_ring_size = soc->tx.dma_size;
 
        if (rx_flag == MTK_RX_FLAGS_QDMA) {
                if (ring_no)
@@ -2610,7 +2617,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
                rx_dma_size = MTK_HW_LRO_DMA_SIZE;
        } else {
                rx_data_len = ETH_DATA_LEN;
-               rx_dma_size = MTK_DMA_SIZE;
+               rx_dma_size = soc->rx.dma_size;
        }
 
        ring->frag_size = mtk_max_frag_size(rx_data_len);
@@ -3139,7 +3146,10 @@ static void mtk_dma_free(struct mtk_eth *eth)
                        mtk_rx_clean(eth, &eth->rx_ring[i], false);
        }
 
-       kfree(eth->scratch_head);
+       for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
+               kfree(eth->scratch_head[i]);
+               eth->scratch_head[i] = NULL;
+       }
 }
 
 static bool mtk_hw_reset_check(struct mtk_eth *eth)
@@ -5052,11 +5062,14 @@ static const struct mtk_soc_data mt2701_data = {
                .desc_size = sizeof(struct mtk_tx_dma),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
                .irq_done_mask = MTK_RX_DONE_INT,
                .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_size = MTK_DMA_SIZE(2K),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
@@ -5076,11 +5089,14 @@ static const struct mtk_soc_data mt7621_data = {
                .desc_size = sizeof(struct mtk_tx_dma),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
                .irq_done_mask = MTK_RX_DONE_INT,
                .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_size = MTK_DMA_SIZE(2K),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
@@ -5102,11 +5118,14 @@ static const struct mtk_soc_data mt7622_data = {
                .desc_size = sizeof(struct mtk_tx_dma),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
                .irq_done_mask = MTK_RX_DONE_INT,
                .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_size = MTK_DMA_SIZE(2K),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
@@ -5127,11 +5146,14 @@ static const struct mtk_soc_data mt7623_data = {
                .desc_size = sizeof(struct mtk_tx_dma),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
                .irq_done_mask = MTK_RX_DONE_INT,
                .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_size = MTK_DMA_SIZE(2K),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
@@ -5150,11 +5172,14 @@ static const struct mtk_soc_data mt7629_data = {
                .desc_size = sizeof(struct mtk_tx_dma),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
                .irq_done_mask = MTK_RX_DONE_INT,
                .dma_l4_valid = RX_DMA_L4_VALID,
+               .dma_size = MTK_DMA_SIZE(2K),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
@@ -5176,6 +5201,8 @@ static const struct mtk_soc_data mt7981_data = {
                .desc_size = sizeof(struct mtk_tx_dma_v2),
                .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
                .dma_len_offset = 8,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
@@ -5183,6 +5210,7 @@ static const struct mtk_soc_data mt7981_data = {
                .dma_l4_valid = RX_DMA_L4_VALID_V2,
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
        },
 };
 
@@ -5202,6 +5230,8 @@ static const struct mtk_soc_data mt7986_data = {
                .desc_size = sizeof(struct mtk_tx_dma_v2),
                .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
                .dma_len_offset = 8,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
@@ -5209,6 +5239,7 @@ static const struct mtk_soc_data mt7986_data = {
                .dma_l4_valid = RX_DMA_L4_VALID_V2,
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
        },
 };
 
@@ -5228,6 +5259,8 @@ static const struct mtk_soc_data mt7988_data = {
                .desc_size = sizeof(struct mtk_tx_dma_v2),
                .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
                .dma_len_offset = 8,
+               .dma_size = MTK_DMA_SIZE(2K),
+               .fq_dma_size = MTK_DMA_SIZE(4K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma_v2),
@@ -5235,6 +5268,7 @@ static const struct mtk_soc_data mt7988_data = {
                .dma_l4_valid = RX_DMA_L4_VALID_V2,
                .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
                .dma_len_offset = 8,
+               .dma_size = MTK_DMA_SIZE(2K),
        },
 };
 
@@ -5249,6 +5283,7 @@ static const struct mtk_soc_data rt5350_data = {
                .desc_size = sizeof(struct mtk_tx_dma),
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
        },
        .rx = {
                .desc_size = sizeof(struct mtk_rx_dma),
@@ -5256,6 +5291,7 @@ static const struct mtk_soc_data rt5350_data = {
                .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
+               .dma_size = MTK_DMA_SIZE(2K),
        },
 };
 
index 4eab30b44070633a2357f8517a6ad1e5c8340149..f5174f6cb1bbec5bfc525f31e6b7359b8e900634 100644 (file)
@@ -32,7 +32,9 @@
 #define MTK_TX_DMA_BUF_LEN     0x3fff
 #define MTK_TX_DMA_BUF_LEN_V2  0xffff
 #define MTK_QDMA_RING_SIZE     2048
-#define MTK_DMA_SIZE           512
+#define MTK_DMA_SIZE(x)                (SZ_##x)
+#define MTK_FQ_DMA_HEAD                32
+#define MTK_FQ_DMA_LENGTH      2048
 #define MTK_RX_ETH_HLEN                (ETH_HLEN + ETH_FCS_LEN)
 #define MTK_RX_HLEN            (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
 #define MTK_DMA_DUMMY_DESC     0xffffffff
@@ -1176,6 +1178,8 @@ struct mtk_soc_data {
                u32     desc_size;
                u32     dma_max_len;
                u32     dma_len_offset;
+               u32     dma_size;
+               u32     fq_dma_size;
        } tx;
        struct {
                u32     desc_size;
@@ -1183,6 +1187,7 @@ struct mtk_soc_data {
                u32     dma_l4_valid;
                u32     dma_max_len;
                u32     dma_len_offset;
+               u32     dma_size;
        } rx;
 };
 
@@ -1264,7 +1269,7 @@ struct mtk_eth {
        struct napi_struct              rx_napi;
        void                            *scratch_ring;
        dma_addr_t                      phy_scratch_ring;
-       void                            *scratch_head;
+       void                            *scratch_head[MTK_FQ_DMA_HEAD];
        struct clk                      *clks[MTK_CLK_MAX];
 
        struct mii_bus                  *mii_bus;
index 2d95a9b7b44e197f67a0a6c2e6c3a1c15379a7bb..b61b7d96611413deda2c987282326d84a019531c 100644 (file)
@@ -373,6 +373,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
        do {
                if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
                        break;
+               if (pci_channel_offline(dev->pdev)) {
+                       mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
+                       return -EACCES;
+               }
 
                cond_resched();
        } while (!time_after(jiffies, end));
index ad38e31822df10bbf72ab9a3416bc7143efb6b63..a6329ca2d9bffbda0b5a69b4973f061144a9fa67 100644 (file)
@@ -248,6 +248,10 @@ recover_from_sw_reset:
        do {
                if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
                        break;
+               if (pci_channel_offline(dev->pdev)) {
+                       mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
+                       goto unlock;
+               }
 
                msleep(20);
        } while (!time_after(jiffies, end));
@@ -317,6 +321,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
                        mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n");
                        return -ENODEV;
                }
+               if (pci_channel_offline(dev->pdev)) {
+                       mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n");
+                       return -EACCES;
+               }
                msleep(100);
        }
        return 0;
index c16b462ddedf7e6914f0c0b796f68ee2ae2bf189..ab2717012b79b5bd115b73a5cd71e245c6045979 100644 (file)
@@ -88,9 +88,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
                                                                      &dest, 1);
                        if (IS_ERR(lag_definer->rules[idx])) {
                                err = PTR_ERR(lag_definer->rules[idx]);
-                               while (i--)
-                                       while (j--)
+                               do {
+                                       while (j--) {
+                                               idx = i * ldev->buckets + j;
                                                mlx5_del_flow_rules(lag_definer->rules[idx]);
+                                       }
+                                       j = ldev->buckets;
+                               } while (i--);
                                goto destroy_fg;
                        }
                }
index 6b774e0c2766594250271a2931b77b4540e7ba7c..d0b595ba611014bbfe16712506daf035a012fd7e 100644 (file)
@@ -74,6 +74,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev)
                        ret = -EBUSY;
                        goto pci_unlock;
                }
+               if (pci_channel_offline(dev->pdev)) {
+                       ret = -EACCES;
+                       goto pci_unlock;
+               }
 
                /* Check if semaphore is already locked */
                ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val);
index 6574c145dc1e2da27d2d3a6071028dc010d0a825..459a836a5d9c15321409dc75ebc6a84e66c3fac7 100644 (file)
@@ -1298,6 +1298,9 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
 
        if (!err)
                mlx5_function_disable(dev, boot);
+       else
+               mlx5_stop_health_poll(dev, boot);
+
        return err;
 }
 
index 5dba6d2d633cb6d26487b9e6db87307ab12ddef0..2427610f4306d97191f20ac6d25476e304b34870 100644 (file)
@@ -586,6 +586,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
                        netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
                        goto out_xdp_abort;
                }
+               buf_info->page = NULL;
                stats->xdp_tx++;
 
                /* the Tx completion will free the buffers */
index 2b8f8b7f1517cc204c92a9b26b9800e681230fb2..5aada7cf3da72632510612ac5863953f9d964808 100644 (file)
@@ -866,6 +866,17 @@ static int ksz8061_config_init(struct phy_device *phydev)
 {
        int ret;
 
+       /* Chip can be powered down by the bootstrap code. */
+       ret = phy_read(phydev, MII_BMCR);
+       if (ret < 0)
+               return ret;
+       if (ret & BMCR_PDOWN) {
+               ret = phy_write(phydev, MII_BMCR, ret & ~BMCR_PDOWN);
+               if (ret < 0)
+                       return ret;
+               usleep_range(1000, 2000);
+       }
+
        ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
        if (ret)
                return ret;
@@ -1939,7 +1950,7 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = {
        {0x1c, 0x20, 0xeeee},
 };
 
-static int ksz9477_config_init(struct phy_device *phydev)
+static int ksz9477_phy_errata(struct phy_device *phydev)
 {
        int err;
        int i;
@@ -1967,16 +1978,30 @@ static int ksz9477_config_init(struct phy_device *phydev)
                        return err;
        }
 
+       err = genphy_restart_aneg(phydev);
+       if (err)
+               return err;
+
+       return err;
+}
+
+static int ksz9477_config_init(struct phy_device *phydev)
+{
+       int err;
+
+       /* Only KSZ9897 family of switches needs this fix. */
+       if ((phydev->phy_id & 0xf) == 1) {
+               err = ksz9477_phy_errata(phydev);
+               if (err)
+                       return err;
+       }
+
        /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
         * in this switch shall be regarded as broken.
         */
        if (phydev->dev_flags & MICREL_NO_EEE)
                phydev->eee_broken_modes = -1;
 
-       err = genphy_restart_aneg(phydev);
-       if (err)
-               return err;
-
        return kszphy_config_init(phydev);
 }
 
@@ -2085,6 +2110,71 @@ static int kszphy_resume(struct phy_device *phydev)
        return 0;
 }
 
+static int ksz9477_resume(struct phy_device *phydev)
+{
+       int ret;
+
+       /* No need to initialize registers if not powered down. */
+       ret = phy_read(phydev, MII_BMCR);
+       if (ret < 0)
+               return ret;
+       if (!(ret & BMCR_PDOWN))
+               return 0;
+
+       genphy_resume(phydev);
+
+       /* After switching from power-down to normal mode, an internal global
+        * reset is automatically generated. Wait a minimum of 1 ms before
+        * read/write access to the PHY registers.
+        */
+       usleep_range(1000, 2000);
+
+       /* Only KSZ9897 family of switches needs this fix. */
+       if ((phydev->phy_id & 0xf) == 1) {
+               ret = ksz9477_phy_errata(phydev);
+               if (ret)
+                       return ret;
+       }
+
+       /* Enable PHY Interrupts */
+       if (phy_interrupt_is_valid(phydev)) {
+               phydev->interrupts = PHY_INTERRUPT_ENABLED;
+               if (phydev->drv->config_intr)
+                       phydev->drv->config_intr(phydev);
+       }
+
+       return 0;
+}
+
+static int ksz8061_resume(struct phy_device *phydev)
+{
+       int ret;
+
+       /* This function can be called twice when the Ethernet device is on. */
+       ret = phy_read(phydev, MII_BMCR);
+       if (ret < 0)
+               return ret;
+       if (!(ret & BMCR_PDOWN))
+               return 0;
+
+       genphy_resume(phydev);
+       usleep_range(1000, 2000);
+
+       /* Re-program the value after chip is reset. */
+       ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+       if (ret)
+               return ret;
+
+       /* Enable PHY Interrupts */
+       if (phy_interrupt_is_valid(phydev)) {
+               phydev->interrupts = PHY_INTERRUPT_ENABLED;
+               if (phydev->drv->config_intr)
+                       phydev->drv->config_intr(phydev);
+       }
+
+       return 0;
+}
+
 static int kszphy_probe(struct phy_device *phydev)
 {
        const struct kszphy_type *type = phydev->drv->driver_data;
@@ -5339,7 +5429,7 @@ static struct phy_driver ksphy_driver[] = {
        .config_intr    = kszphy_config_intr,
        .handle_interrupt = kszphy_handle_interrupt,
        .suspend        = kszphy_suspend,
-       .resume         = kszphy_resume,
+       .resume         = ksz8061_resume,
 }, {
        .phy_id         = PHY_ID_KSZ9021,
        .phy_id_mask    = 0x000ffffe,
@@ -5493,7 +5583,7 @@ static struct phy_driver ksphy_driver[] = {
        .config_intr    = kszphy_config_intr,
        .handle_interrupt = kszphy_handle_interrupt,
        .suspend        = genphy_suspend,
-       .resume         = genphy_resume,
+       .resume         = ksz9477_resume,
        .get_features   = ksz9477_get_features,
 } };
 
index 4a802c0ea2cbc381ecc8a48c7ca28bc75c4808f8..61a57d134544f958682f1b2caa66e3548fc199db 100644 (file)
@@ -2686,6 +2686,7 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd
 {
        struct scatterlist *sgs[5], hdr, stat;
        u32 out_num = 0, tmp, in_num = 0;
+       bool ok;
        int ret;
 
        /* Caller should know better */
@@ -2731,8 +2732,9 @@ static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd
        }
 
 unlock:
+       ok = vi->ctrl->status == VIRTIO_NET_OK;
        mutex_unlock(&vi->cvq_lock);
-       return vi->ctrl->status == VIRTIO_NET_OK;
+       return ok;
 }
 
 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
@@ -4257,7 +4259,6 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
        struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
        bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
        struct scatterlist sgs_rx;
-       int ret = 0;
        int i;
 
        if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
@@ -4267,27 +4268,27 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
                               ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets))
                return -EINVAL;
 
-       /* Acquire all queues dim_locks */
-       for (i = 0; i < vi->max_queue_pairs; i++)
-               mutex_lock(&vi->rq[i].dim_lock);
-
        if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
                vi->rx_dim_enabled = true;
-               for (i = 0; i < vi->max_queue_pairs; i++)
+               for (i = 0; i < vi->max_queue_pairs; i++) {
+                       mutex_lock(&vi->rq[i].dim_lock);
                        vi->rq[i].dim_enabled = true;
-               goto unlock;
+                       mutex_unlock(&vi->rq[i].dim_lock);
+               }
+               return 0;
        }
 
        coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
-       if (!coal_rx) {
-               ret = -ENOMEM;
-               goto unlock;
-       }
+       if (!coal_rx)
+               return -ENOMEM;
 
        if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
                vi->rx_dim_enabled = false;
-               for (i = 0; i < vi->max_queue_pairs; i++)
+               for (i = 0; i < vi->max_queue_pairs; i++) {
+                       mutex_lock(&vi->rq[i].dim_lock);
                        vi->rq[i].dim_enabled = false;
+                       mutex_unlock(&vi->rq[i].dim_lock);
+               }
        }
 
        /* Since the per-queue coalescing params can be set,
@@ -4300,22 +4301,19 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
                                  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
-                                 &sgs_rx)) {
-               ret = -EINVAL;
-               goto unlock;
-       }
+                                 &sgs_rx))
+               return -EINVAL;
 
        vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
        vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
        for (i = 0; i < vi->max_queue_pairs; i++) {
+               mutex_lock(&vi->rq[i].dim_lock);
                vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
                vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
-       }
-unlock:
-       for (i = vi->max_queue_pairs - 1; i >= 0; i--)
                mutex_unlock(&vi->rq[i].dim_lock);
+       }
 
-       return ret;
+       return 0;
 }
 
 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
@@ -4417,9 +4415,9 @@ static void virtnet_rx_dim_work(struct work_struct *work)
                if (err)
                        pr_debug("%s: Failed to send dim parameters on rxq%d\n",
                                 dev->name, qnum);
-               dim->state = DIM_START_MEASURE;
        }
 out:
+       dim->state = DIM_START_MEASURE;
        mutex_unlock(&rq->dim_lock);
 }
 
index 89ca6e75fcc6b066bad0cb3c2b18734df3a0dec3..63822d454c00c765e2860b50a2a67487bcbafdae 100644 (file)
@@ -2034,8 +2034,8 @@ vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
                                          rq->data_ring.base,
                                          rq->data_ring.basePA);
                        rq->data_ring.base = NULL;
-                       rq->data_ring.desc_size = 0;
                }
+               rq->data_ring.desc_size = 0;
        }
 }
 
index f78dd0438843b151d15d343d6d2e713d57ab1b3a..567cb3faab709c43ae7deff0fd5a155327899e12 100644 (file)
@@ -1446,6 +1446,10 @@ static bool vxlan_snoop(struct net_device *dev,
        struct vxlan_fdb *f;
        u32 ifindex = 0;
 
+       /* Ignore packets from invalid src-address */
+       if (!is_valid_ether_addr(src_mac))
+               return true;
+
 #if IS_ENABLED(CONFIG_IPV6)
        if (src_ip->sa.sa_family == AF_INET6 &&
            (ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
@@ -1616,10 +1620,6 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
        if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
                return false;
 
-       /* Ignore packets from invalid src-address */
-       if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
-               return false;
-
        /* Get address from the outer IP header */
        if (vxlan_get_sk_family(vs) == AF_INET) {
                saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
index e6ea884cafc190ac1bcbbf15d80823896e259572..4f385f4a8cef2acfc6f7aece9f703c6b8b46eabc 100644 (file)
@@ -45,6 +45,7 @@ config ATH10K_SNOC
        depends on ATH10K
        depends on ARCH_QCOM || COMPILE_TEST
        depends on QCOM_SMEM
+       depends on QCOM_RPROC_COMMON || QCOM_RPROC_COMMON=n
        select QCOM_SCM
        select QCOM_QMI_HELPERS
        help
index 3cc817a3b4a4047600591804bbc57aa45cba866f..b82e8fb2854130b1b4ebf109f4712f6f8c586590 100644 (file)
@@ -604,7 +604,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
                .coldboot_cal_ftm = true,
                .cbcal_restart_fw = false,
                .fw_mem_mode = 0,
-               .num_vdevs = 16 + 1,
+               .num_vdevs = 3,
                .num_peers = 512,
                .supports_suspend = false,
                .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
index 4f62e38ba48b3a144ba658ce11f4c8193024d89a..9b96dbb21d8336fd5bf8264514cfd3386e74537c 100644 (file)
@@ -7988,8 +7988,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
        struct ath11k_base *ab = ar->ab;
        struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
        int ret;
-       struct cur_regulatory_info *reg_info;
-       enum ieee80211_ap_reg_power power_type;
 
        mutex_lock(&ar->conf_mutex);
 
@@ -8000,17 +7998,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
        if (ath11k_wmi_supports_6ghz_cc_ext(ar) &&
            ctx->def.chan->band == NL80211_BAND_6GHZ &&
            arvif->vdev_type == WMI_VDEV_TYPE_STA) {
-               reg_info = &ab->reg_info_store[ar->pdev_idx];
-               power_type = vif->bss_conf.power_type;
-
-               ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx power type %d\n", power_type);
-
-               if (power_type == IEEE80211_REG_UNSET_AP) {
-                       ret = -EINVAL;
-                       goto out;
-               }
-
-               ath11k_reg_handle_chan_list(ab, reg_info, power_type);
                arvif->chanctx = *ctx;
                ath11k_mac_parse_tx_pwr_env(ar, vif, ctx);
        }
@@ -9626,6 +9613,8 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
        struct ath11k *ar = hw->priv;
        struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
        struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+       enum ieee80211_ap_reg_power power_type;
+       struct cur_regulatory_info *reg_info;
        struct ath11k_peer *peer;
        int ret = 0;
 
@@ -9705,6 +9694,29 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
                                ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
                                            sta->addr, arvif->vdev_id, ret);
                }
+
+               if (!ret &&
+                   ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+                   arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+                   arvif->chanctx.def.chan &&
+                   arvif->chanctx.def.chan->band == NL80211_BAND_6GHZ) {
+                       reg_info = &ar->ab->reg_info_store[ar->pdev_idx];
+                       power_type = vif->bss_conf.power_type;
+
+                       if (power_type == IEEE80211_REG_UNSET_AP) {
+                               ath11k_warn(ar->ab, "invalid power type %d\n",
+                                           power_type);
+                               ret = -EINVAL;
+                       } else {
+                               ret = ath11k_reg_handle_chan_list(ar->ab,
+                                                                 reg_info,
+                                                                 power_type);
+                               if (ret)
+                                       ath11k_warn(ar->ab,
+                                                   "failed to handle chan list with power type %d\n",
+                                                   power_type);
+                       }
+               }
        } else if (old_state == IEEE80211_STA_AUTHORIZED &&
                   new_state == IEEE80211_STA_ASSOC) {
                spin_lock_bh(&ar->ab->base_lock);
index 79eb3f9c902f4b77f3eb4481ed9d07e23038f54a..debe7c5919ef006ee911c94882c82ea3bcf61548 100644 (file)
@@ -561,6 +561,7 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
 {
        int i, j, n, ret, num_vectors = 0;
        u32 user_base_data = 0, base_vector = 0;
+       struct ath11k_ext_irq_grp *irq_grp;
        unsigned long irq_flags;
 
        ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
@@ -574,14 +575,16 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
                irq_flags |= IRQF_NOBALANCING;
 
        for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
-               struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+               irq_grp = &ab->ext_irq_grp[i];
                u32 num_irq = 0;
 
                irq_grp->ab = ab;
                irq_grp->grp_id = i;
                irq_grp->napi_ndev = alloc_netdev_dummy(0);
-               if (!irq_grp->napi_ndev)
-                       return -ENOMEM;
+               if (!irq_grp->napi_ndev) {
+                       ret = -ENOMEM;
+                       goto fail_allocate;
+               }
 
                netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
                               ath11k_pcic_ext_grp_napi_poll);
@@ -606,11 +609,8 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
                        int irq = ath11k_pcic_get_msi_irq(ab, vector);
 
                        if (irq < 0) {
-                               for (n = 0; n <= i; n++) {
-                                       irq_grp = &ab->ext_irq_grp[n];
-                                       free_netdev(irq_grp->napi_ndev);
-                               }
-                               return irq;
+                               ret = irq;
+                               goto fail_irq;
                        }
 
                        ab->irq_num[irq_idx] = irq;
@@ -635,6 +635,15 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
        }
 
        return 0;
+fail_irq:
+       /* i ->napi_ndev was properly allocated. Free it also */
+       i += 1;
+fail_allocate:
+       for (n = 0; n < i; n++) {
+               irq_grp = &ab->ext_irq_grp[n];
+               free_netdev(irq_grp->napi_ndev);
+       }
+       return ret;
 }
 
 int ath11k_pcic_config_irq(struct ath11k_base *ab)
index 33654f228ee871f7b8bbe1463351cc677904662a..d156a9c6419404b46995068f9589b2f156d8e01a 100644 (file)
@@ -1815,8 +1815,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
 err_fw:
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        debugfs_remove_recursive(drv->dbgfs_drv);
-       iwl_dbg_tlv_free(drv->trans);
 #endif
+       iwl_dbg_tlv_free(drv->trans);
        kfree(drv);
 err:
        return ERR_PTR(ret);
index 71e6b06481a93b6bf6f6eb14b1b1656510cdbc92..54f4acbbd05bd4d2f7e8a25e748c02c432025de1 100644 (file)
@@ -595,6 +595,12 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
                                         void *_data)
 {
        struct wowlan_key_gtk_type_iter *data = _data;
+       __le32 *cipher = NULL;
+
+       if (key->keyidx == 4 || key->keyidx == 5)
+               cipher = &data->kek_kck_cmd->igtk_cipher;
+       if (key->keyidx == 6 || key->keyidx == 7)
+               cipher = &data->kek_kck_cmd->bigtk_cipher;
 
        switch (key->cipher) {
        default:
@@ -606,10 +612,13 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
                return;
        case WLAN_CIPHER_SUITE_BIP_GMAC_256:
        case WLAN_CIPHER_SUITE_BIP_GMAC_128:
-               data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+               if (cipher)
+                       *cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
                return;
        case WLAN_CIPHER_SUITE_AES_CMAC:
-               data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+       case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+               if (cipher)
+                       *cipher = cpu_to_le32(STA_KEY_FLG_CCM);
                return;
        case WLAN_CIPHER_SUITE_CCMP:
                if (!sta)
@@ -2341,7 +2350,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
 
 out:
        if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
-                                   WOWLAN_GET_STATUSES, 0) < 10) {
+                                   WOWLAN_GET_STATUSES,
+                                   IWL_FW_CMD_VER_UNKNOWN) < 10) {
                mvmvif->seqno_valid = true;
                /* +0x10 because the set API expects next-to-use, not last-used */
                mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
index 79f4ac8cbc729e9a6caaf393080fe99a4320892e..8101ecbb478b6ff5b38107aa0c5a3975d878c914 100644 (file)
@@ -1617,6 +1617,15 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
                                         &beacon_cmd.tim_size,
                                         beacon->data, beacon->len);
 
+               if (iwl_fw_lookup_cmd_ver(mvm->fw,
+                                         BEACON_TEMPLATE_CMD, 0) >= 14) {
+                       u32 offset = iwl_mvm_find_ie_offset(beacon->data,
+                                                           WLAN_EID_S1G_TWT,
+                                                           beacon->len);
+
+                       beacon_cmd.btwt_offset = cpu_to_le32(offset);
+               }
+
                iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
                                                 sizeof(beacon_cmd));
        }
index e7f5978ef2d71950e2d8bf9a006c054fe910a3b9..f4937a100cbe9b95b1d8b5758d501a47346aa50e 100644 (file)
@@ -94,20 +94,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
-       __le32 *dump_data = mfu_dump_notif->data;
-       int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
-       int i;
 
        if (mfu_dump_notif->index_num == 0)
                IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
                         le32_to_cpu(mfu_dump_notif->assert_id));
-
-       for (i = 0; i < n_words; i++)
-               IWL_DEBUG_INFO(mvm,
-                              "MFUART assert dump, dword %u: 0x%08x\n",
-                              le16_to_cpu(mfu_dump_notif->index_num) *
-                              n_words + i,
-                              le32_to_cpu(dump_data[i]));
 }
 
 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
@@ -895,8 +885,8 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
        int ret;
        u16 len = 0;
        u32 n_subbands;
-       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
-                                          IWL_FW_CMD_VER_UNKNOWN);
+       u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
+
        if (cmd_ver >= 7) {
                len = sizeof(cmd.v7);
                n_subbands = IWL_NUM_SUB_BANDS_V2;
index 5a06f887769a6ad2de8f73c59afb1b75aa37aa1f..5144fa0f96b0e047a5feea0d2e8f564ec6d1cb94 100644 (file)
@@ -873,7 +873,7 @@ void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
        }
 }
 
-static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
+u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
 {
        struct ieee80211_mgmt *mgmt = (void *)beacon;
        const u8 *ie;
index 486a6b8f3c97f7540e95e68fa7dfc0fee8fb2af9..de9f0b446545625b51288b7565871c6ff5bdcf6c 100644 (file)
@@ -1128,6 +1128,39 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
        RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL);
 }
 
+static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta)
+{
+       struct iwl_mvm *mvm = data;
+       struct iwl_mvm_sta *mvm_sta;
+       struct ieee80211_vif *vif;
+       int link_id;
+
+       mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+       vif = mvm_sta->vif;
+
+       if (!sta->valid_links)
+               return;
+
+       for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) {
+               struct iwl_mvm_link_sta *mvm_link_sta;
+
+               mvm_link_sta =
+                       rcu_dereference_check(mvm_sta->link[link_id],
+                                             lockdep_is_held(&mvm->mutex));
+               if (mvm_link_sta && !(vif->active_links & BIT(link_id))) {
+                       /*
+                        * We have a link STA but the link is inactive in
+                        * mac80211. This will happen if we failed to
+                        * deactivate the link but mac80211 roll back the
+                        * deactivation of the link.
+                        * Delete the stale data to avoid issues later on.
+                        */
+                       iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
+                                                 link_id, false);
+               }
+       }
+}
+
 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
 {
        iwl_mvm_stop_device(mvm);
@@ -1150,6 +1183,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
         */
        ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
 
+       /* cleanup stations as links may be gone after restart */
+       ieee80211_iterate_stations_atomic(mvm->hw,
+                                         iwl_mvm_cleanup_sta_iterator, mvm);
+
        mvm->p2p_device_vif = NULL;
 
        iwl_mvm_reset_phy_ctxts(mvm);
@@ -6348,7 +6385,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
                .len[0] = sizeof(cmd),
                .data[1] = data,
                .len[1] = size,
-               .flags = sync ? 0 : CMD_ASYNC,
+               .flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC),
        };
        int ret;
 
index 0a3b7284eeddf9f382752e7d12a88da6a0b0d654..fcfd2dd7568e502633b7a0c4cee5808bec0e7024 100644 (file)
@@ -75,8 +75,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
                goto out_free_bf;
 
        iwl_mvm_tcm_add_vif(mvm, vif);
-       INIT_DELAYED_WORK(&mvmvif->csa_work,
-                         iwl_mvm_channel_switch_disconnect_wk);
 
        if (vif->type == NL80211_IFTYPE_MONITOR) {
                mvm->monitor_on = true;
index b7a461dba41ee3fe87a09f3ca10d5a1cdded0360..9d139b56e1527cdeb21e87433f9e54f169b76dd3 100644 (file)
@@ -515,11 +515,11 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        return iwl_mvm_mld_send_sta_cmd(mvm, &cmd);
 }
 
-static void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
-                                     struct iwl_mvm_sta *mvm_sta,
-                                     struct iwl_mvm_link_sta *mvm_sta_link,
-                                     unsigned int link_id,
-                                     bool is_in_fw)
+void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+                              struct iwl_mvm_sta *mvm_sta,
+                              struct iwl_mvm_link_sta *mvm_sta_link,
+                              unsigned int link_id,
+                              bool is_in_fw)
 {
        RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id],
                         is_in_fw ? ERR_PTR(-EINVAL) : NULL);
@@ -1014,7 +1014,8 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm,
 
                cmd.modify.tid = cpu_to_le32(data->tid);
 
-               ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
+               ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_SEND_IN_RFKILL,
+                                          sizeof(cmd), &cmd);
                data->sta_mask = new_sta_mask;
                if (ret)
                        return ret;
index 1f58c727fa632df016241ed1a64ba5731d0b1d79..0a1959bd4079997854d9f3c2c444adc2f2f945a5 100644 (file)
@@ -1758,6 +1758,7 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
 void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2,
                           u64 *boottime, ktime_t *realtime);
 u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
+u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size);
 
 /* Tx / Host Commands */
 int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
index 376b23b409dcad2c3a9fffd9c1a4ce0d50b71e28..6cd4ec4d8f34411b7c4f53a2f03f468d1f1498b9 100644 (file)
@@ -122,13 +122,8 @@ enum {
 
 #define LINK_QUAL_AGG_FRAME_LIMIT_DEF  (63)
 #define LINK_QUAL_AGG_FRAME_LIMIT_MAX  (63)
-/*
- * FIXME - various places in firmware API still use u8,
- * e.g. LQ command and SCD config command.
- * This should be 256 instead.
- */
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF     (255)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX     (255)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF     (64)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX     (64)
 #define LINK_QUAL_AGG_FRAME_LIMIT_MIN  (0)
 
 #define LQ_SIZE                2       /* 2 mode tables:  "Active" and "Search" */
index d78af29281522ee756b3a9a3d412e673d70106ed..489cfb0a4ab1ecb77672cb22a2c1277c52266406 100644 (file)
@@ -2450,8 +2450,11 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
         *
         * We mark it as mac header, for upper layers to know where
         * all radio tap header ends.
+        *
+        * Since data doesn't move data while putting data on skb and that is
+        * the only way we use, data + len is the next place that hdr would be put
         */
-       skb_reset_mac_header(skb);
+       skb_set_mac_header(skb, skb->len);
 
        /*
         * Override the nss from the rx_vec since the rate_n_flags has
index a7ec172eeade8513f5c3a63d6c650a3473a9594b..b5f664ae5a17d0677e10aa9e4d79bf3152130e1c 100644 (file)
@@ -1313,7 +1313,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
                if (IWL_MVM_ADWELL_MAX_BUDGET)
                        cmd->v7.adwell_max_budget =
                                cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
-               else if (params->ssids && params->ssids[0].ssid_len)
+               else if (params->n_ssids && params->ssids[0].ssid_len)
                        cmd->v7.adwell_max_budget =
                                cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
                else
@@ -1418,7 +1418,7 @@ iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
        if (IWL_MVM_ADWELL_MAX_BUDGET)
                general_params->adwell_max_budget =
                        cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
-       else if (params->ssids && params->ssids[0].ssid_len)
+       else if (params->n_ssids && params->ssids[0].ssid_len)
                general_params->adwell_max_budget =
                        cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
        else
@@ -1730,7 +1730,10 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
                                break;
                }
 
-               if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) {
+               if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE &&
+                   !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid),
+                              "scan: invalid BSSID at index %u, index_b=%u\n",
+                              j, idex_b)) {
                        memcpy(&pp->bssid_array[idex_b++],
                               scan_6ghz_params[j].bssid, ETH_ALEN);
                }
@@ -3319,10 +3322,11 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
 
        ret = iwl_mvm_send_cmd_pdu(mvm,
                                   WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
-                                  0, sizeof(cmd), &cmd);
+                                  CMD_SEND_IN_RFKILL, sizeof(cmd), &cmd);
        if (!ret)
                mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
 
+       IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d\n", ret);
        return ret;
 }
 
index 20d4968d692a3664387b14951ebdf01af147cae8..cc79fe991c2633586d13701ebabb82c646a8f36e 100644 (file)
@@ -2848,7 +2848,12 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
                .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
                                  cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
        };
-       u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
+       struct iwl_host_cmd hcmd = {
+               .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
+               .flags = CMD_SEND_IN_RFKILL,
+               .len[0] = sizeof(cmd),
+               .data[0] = &cmd,
+       };
        int ret;
 
        BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
@@ -2860,7 +2865,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
                cmd.alloc.ssn = cpu_to_le16(ssn);
                cmd.alloc.win_size = cpu_to_le16(buf_size);
                baid = -EIO;
-       } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
+       } else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) {
                cmd.remove_v1.baid = cpu_to_le32(baid);
                BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
        } else {
@@ -2869,8 +2874,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
                cmd.remove.tid = cpu_to_le32(tid);
        }
 
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
-                                         &cmd, &baid);
+       ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid);
        if (ret)
                return ret;
 
index 264f1f9394b6de26af044f5d7dc971ad73d7e7c7..754a05a8c189bcb7e2af33bbe98d549e8547dc5a 100644 (file)
@@ -662,6 +662,11 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                           struct ieee80211_sta *sta);
 int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta);
+void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+                              struct iwl_mvm_sta *mvm_sta,
+                              struct iwl_mvm_link_sta *mvm_sta_link,
+                              unsigned int link_id,
+                              bool is_in_fw);
 int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id);
 int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
                                 struct ieee80211_vif *vif,
index 0971c164b57e926d2d22dd1ef4f0559d45420db2..c27acaf0eb1cf7e9698adec73868867f349620e8 100644 (file)
@@ -1326,6 +1326,10 @@ static void mt7615_set_rekey_data(struct ieee80211_hw *hw,
 #endif /* CONFIG_PM */
 
 const struct ieee80211_ops mt7615_ops = {
+       .add_chanctx = ieee80211_emulate_add_chanctx,
+       .remove_chanctx = ieee80211_emulate_remove_chanctx,
+       .change_chanctx = ieee80211_emulate_change_chanctx,
+       .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
        .tx = mt7615_tx,
        .start = mt7615_start,
        .stop = mt7615_stop,
index 7d9fb9f2d52799b1e1c92ae285943bdd921114a0..089102ed9ae51b8fa42c0d49cdaacf34b7ed670b 100644 (file)
@@ -237,11 +237,12 @@ static int set_channel(struct wiphy *wiphy,
        struct wilc_vif *vif;
        u32 channelnum;
        int result;
+       int srcu_idx;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wl->srcu);
        vif = wilc_get_wl_to_vif(wl);
        if (IS_ERR(vif)) {
-               rcu_read_unlock();
+               srcu_read_unlock(&wl->srcu, srcu_idx);
                return PTR_ERR(vif);
        }
 
@@ -252,7 +253,7 @@ static int set_channel(struct wiphy *wiphy,
        if (result)
                netdev_err(vif->ndev, "Error in setting channel\n");
 
-       rcu_read_unlock();
+       srcu_read_unlock(&wl->srcu, srcu_idx);
        return result;
 }
 
@@ -805,8 +806,9 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
        struct wilc *wl = wiphy_priv(wiphy);
        struct wilc_vif *vif;
        struct wilc_priv *priv;
+       int srcu_idx;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wl->srcu);
        vif = wilc_get_wl_to_vif(wl);
        if (IS_ERR(vif))
                goto out;
@@ -861,7 +863,7 @@ static int set_wiphy_params(struct wiphy *wiphy, u32 changed)
                netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n");
 
 out:
-       rcu_read_unlock();
+       srcu_read_unlock(&wl->srcu, srcu_idx);
        return ret;
 }
 
@@ -1537,19 +1539,20 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
 
        if (type == NL80211_IFTYPE_MONITOR) {
                struct net_device *ndev;
+               int srcu_idx;
 
-               rcu_read_lock();
+               srcu_idx = srcu_read_lock(&wl->srcu);
                vif = wilc_get_vif_from_type(wl, WILC_AP_MODE);
                if (!vif) {
                        vif = wilc_get_vif_from_type(wl, WILC_GO_MODE);
                        if (!vif) {
-                               rcu_read_unlock();
+                               srcu_read_unlock(&wl->srcu, srcu_idx);
                                goto validate_interface;
                        }
                }
 
                if (vif->monitor_flag) {
-                       rcu_read_unlock();
+                       srcu_read_unlock(&wl->srcu, srcu_idx);
                        goto validate_interface;
                }
 
@@ -1557,12 +1560,12 @@ static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy,
                if (ndev) {
                        vif->monitor_flag = 1;
                } else {
-                       rcu_read_unlock();
+                       srcu_read_unlock(&wl->srcu, srcu_idx);
                        return ERR_PTR(-EINVAL);
                }
 
                wdev = &vif->priv.wdev;
-               rcu_read_unlock();
+               srcu_read_unlock(&wl->srcu, srcu_idx);
                return wdev;
        }
 
@@ -1610,7 +1613,7 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
        list_del_rcu(&vif->list);
        wl->vif_num--;
        mutex_unlock(&wl->vif_mutex);
-       synchronize_rcu();
+       synchronize_srcu(&wl->srcu);
        return 0;
 }
 
@@ -1635,23 +1638,25 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled)
 {
        struct wilc *wl = wiphy_priv(wiphy);
        struct wilc_vif *vif;
+       int srcu_idx;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wl->srcu);
        vif = wilc_get_wl_to_vif(wl);
        if (IS_ERR(vif)) {
-               rcu_read_unlock();
+               srcu_read_unlock(&wl->srcu, srcu_idx);
                return;
        }
 
        netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled);
        wilc_set_wowlan_trigger(vif, enabled);
-       rcu_read_unlock();
+       srcu_read_unlock(&wl->srcu, srcu_idx);
 }
 
 static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
                        enum nl80211_tx_power_setting type, int mbm)
 {
        int ret;
+       int srcu_idx;
        s32 tx_power = MBM_TO_DBM(mbm);
        struct wilc *wl = wiphy_priv(wiphy);
        struct wilc_vif *vif;
@@ -1659,10 +1664,10 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
        if (!wl->initialized)
                return -EIO;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wl->srcu);
        vif = wilc_get_wl_to_vif(wl);
        if (IS_ERR(vif)) {
-               rcu_read_unlock();
+               srcu_read_unlock(&wl->srcu, srcu_idx);
                return -EINVAL;
        }
 
@@ -1674,7 +1679,7 @@ static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev,
        ret = wilc_set_tx_power(vif, tx_power);
        if (ret)
                netdev_err(vif->ndev, "Failed to set tx power\n");
-       rcu_read_unlock();
+       srcu_read_unlock(&wl->srcu, srcu_idx);
 
        return ret;
 }
@@ -1757,6 +1762,7 @@ static void wlan_init_locks(struct wilc *wl)
        init_completion(&wl->cfg_event);
        init_completion(&wl->sync_event);
        init_completion(&wl->txq_thread_started);
+       init_srcu_struct(&wl->srcu);
 }
 
 void wlan_deinit_locks(struct wilc *wilc)
@@ -1767,6 +1773,7 @@ void wlan_deinit_locks(struct wilc *wilc)
        mutex_destroy(&wilc->txq_add_to_head_cs);
        mutex_destroy(&wilc->vif_mutex);
        mutex_destroy(&wilc->deinit_lock);
+       cleanup_srcu_struct(&wilc->srcu);
 }
 
 int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
index 919de6ffb8217c54375df09f8ecde8e1bfa997bf..f1085ccb7eedc025aba6a6cf45ecd1bc4edef66c 100644 (file)
@@ -1570,11 +1570,12 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
        struct host_if_drv *hif_drv;
        struct host_if_msg *msg;
        struct wilc_vif *vif;
+       int srcu_idx;
        int result;
        int id;
 
        id = get_unaligned_le32(&buffer[length - 4]);
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        vif = wilc_get_vif_from_idx(wilc, id);
        if (!vif)
                goto out;
@@ -1593,7 +1594,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
        msg->body.net_info.rssi = buffer[8];
        msg->body.net_info.mgmt = kmemdup(&buffer[9],
                                          msg->body.net_info.frame_len,
-                                         GFP_ATOMIC);
+                                         GFP_KERNEL);
        if (!msg->body.net_info.mgmt) {
                kfree(msg);
                goto out;
@@ -1606,7 +1607,7 @@ void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length)
                kfree(msg);
        }
 out:
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
 }
 
 void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
@@ -1614,13 +1615,14 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
        struct host_if_drv *hif_drv;
        struct host_if_msg *msg;
        struct wilc_vif *vif;
+       int srcu_idx;
        int result;
        int id;
 
        mutex_lock(&wilc->deinit_lock);
 
        id = get_unaligned_le32(&buffer[length - 4]);
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        vif = wilc_get_vif_from_idx(wilc, id);
        if (!vif)
                goto out;
@@ -1647,7 +1649,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length)
                kfree(msg);
        }
 out:
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
        mutex_unlock(&wilc->deinit_lock);
 }
 
@@ -1655,11 +1657,12 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
 {
        struct host_if_drv *hif_drv;
        struct wilc_vif *vif;
+       int srcu_idx;
        int result;
        int id;
 
        id = get_unaligned_le32(&buffer[length - 4]);
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        vif = wilc_get_vif_from_idx(wilc, id);
        if (!vif)
                goto out;
@@ -1684,7 +1687,7 @@ void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length)
                }
        }
 out:
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
 }
 
 int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie, u16 chan,
index 73f56f7b002bf373277804db28c18cc8a4338a67..710e29bea5605843a8c2e78406de3d8242bbb16c 100644 (file)
@@ -127,28 +127,30 @@ void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid,
 
 int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
 {
+       int srcu_idx;
        u8 ret_val = 0;
        struct wilc_vif *vif;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        wilc_for_each_vif(wilc, vif) {
                if (!is_zero_ether_addr(vif->bssid))
                        ret_val++;
        }
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
        return ret_val;
 }
 
 static void wilc_wake_tx_queues(struct wilc *wl)
 {
+       int srcu_idx;
        struct wilc_vif *ifc;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wl->srcu);
        wilc_for_each_vif(wl, ifc) {
                if (ifc->mac_opened && netif_queue_stopped(ifc->ndev))
                        netif_wake_queue(ifc->ndev);
        }
-       rcu_read_unlock();
+       srcu_read_unlock(&wl->srcu, srcu_idx);
 }
 
 static int wilc_txq_task(void *vp)
@@ -653,6 +655,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
        struct sockaddr *addr = (struct sockaddr *)p;
        unsigned char mac_addr[ETH_ALEN];
        struct wilc_vif *tmp_vif;
+       int srcu_idx;
 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
@@ -664,19 +667,19 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
 
        /* Verify MAC Address is not already in use: */
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        wilc_for_each_vif(wilc, tmp_vif) {
                wilc_get_mac_address(tmp_vif, mac_addr);
                if (ether_addr_equal(addr->sa_data, mac_addr)) {
                        if (vif != tmp_vif) {
-                               rcu_read_unlock();
+                               srcu_read_unlock(&wilc->srcu, srcu_idx);
                                return -EADDRNOTAVAIL;
                        }
-                       rcu_read_unlock();
+                       srcu_read_unlock(&wilc->srcu, srcu_idx);
                        return 0;
                }
        }
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
 
        result = wilc_set_mac_address(vif, (u8 *)addr->sa_data);
        if (result)
@@ -764,14 +767,15 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
                                                wilc_tx_complete);
 
        if (queue_count > FLOW_CONTROL_UPPER_THRESHOLD) {
+               int srcu_idx;
                struct wilc_vif *vif;
 
-               rcu_read_lock();
+               srcu_idx = srcu_read_lock(&wilc->srcu);
                wilc_for_each_vif(wilc, vif) {
                        if (vif->mac_opened)
                                netif_stop_queue(vif->ndev);
                }
-               rcu_read_unlock();
+               srcu_read_unlock(&wilc->srcu, srcu_idx);
        }
 
        return NETDEV_TX_OK;
@@ -815,12 +819,13 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
        unsigned int frame_len = 0;
        struct wilc_vif *vif;
        struct sk_buff *skb;
+       int srcu_idx;
        int stats;
 
        if (!wilc)
                return;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        wilc_netdev = get_if_handler(wilc, buff);
        if (!wilc_netdev)
                goto out;
@@ -848,14 +853,15 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size,
                netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats);
        }
 out:
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
 }
 
 void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
 {
+       int srcu_idx;
        struct wilc_vif *vif;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        wilc_for_each_vif(wilc, vif) {
                struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buff;
                u16 type = le16_to_cpup((__le16 *)buff);
@@ -876,7 +882,7 @@ void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth)
                if (vif->monitor_flag)
                        wilc_wfi_monitor_rx(wilc->monitor_dev, buff, size);
        }
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
 }
 
 static const struct net_device_ops wilc_netdev_ops = {
@@ -906,7 +912,7 @@ void wilc_netdev_cleanup(struct wilc *wilc)
                list_del_rcu(&vif->list);
                wilc->vif_num--;
                mutex_unlock(&wilc->vif_mutex);
-               synchronize_rcu();
+               synchronize_srcu(&wilc->srcu);
                if (vif->ndev)
                        unregister_netdev(vif->ndev);
        }
@@ -925,15 +931,16 @@ static u8 wilc_get_available_idx(struct wilc *wl)
 {
        int idx = 0;
        struct wilc_vif *vif;
+       int srcu_idx;
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wl->srcu);
        wilc_for_each_vif(wl, vif) {
                if (vif->idx == 0)
                        idx = 1;
                else
                        idx = 0;
        }
-       rcu_read_unlock();
+       srcu_read_unlock(&wl->srcu, srcu_idx);
        return idx;
 }
 
@@ -983,7 +990,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
        list_add_tail_rcu(&vif->list, &wl->vif_list);
        wl->vif_num += 1;
        mutex_unlock(&wl->vif_mutex);
-       synchronize_rcu();
+       synchronize_srcu(&wl->srcu);
 
        return vif;
 
index eecee3973d6a420cce81f721ea1c9a24de0bcccd..fde8610a9c84ba0c82b61a214643f2f9b2d43da8 100644 (file)
@@ -32,8 +32,8 @@
 
 #define wilc_for_each_vif(w, v) \
        struct wilc *_w = w; \
-       list_for_each_entry_rcu(v, &_w->vif_list, list, \
-                                rcu_read_lock_held())
+       list_for_each_entry_srcu(v, &_w->vif_list, list, \
+                                srcu_read_lock_held(&_w->srcu))
 
 struct wilc_wfi_stats {
        unsigned long rx_packets;
@@ -220,6 +220,14 @@ struct wilc {
 
        /* protect vif list */
        struct mutex vif_mutex;
+       /* Sleepable RCU struct to manipulate vif list. Sleepable version is
+        * needed over the classic RCU version because the driver's current
+        * design involves some sleeping code while manipulating a vif
+        * retrieved from vif list (so in a SRCU critical section), like:
+        * - sending commands to the chip, using info from retrieved vif
+        * - registering a new monitoring net device
+        */
+       struct srcu_struct srcu;
        u8 open_ifcs;
 
        /* protect head of transmit queue */
index 37c32d17856ea70e4aa15146463c08807da59e84..a9e872a7b2c38b59b55cb617af08d6914970baf5 100644 (file)
@@ -712,6 +712,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
        u32 *vmm_table = wilc->vmm_table;
        u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0};
        const struct wilc_hif_func *func;
+       int srcu_idx;
        u8 *txb = wilc->tx_buffer;
        struct wilc_vif *vif;
 
@@ -723,10 +724,10 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
 
        mutex_lock(&wilc->txq_add_to_head_cs);
 
-       rcu_read_lock();
+       srcu_idx = srcu_read_lock(&wilc->srcu);
        wilc_for_each_vif(wilc, vif)
                wilc_wlan_txq_filter_dup_tcp_ack(vif->ndev);
-       rcu_read_unlock();
+       srcu_read_unlock(&wilc->srcu, srcu_idx);
 
        for (ac = 0; ac < NQUEUES; ac++)
                tqe_q[ac] = wilc_wlan_txq_get_first(wilc, ac);
index 2e60a6991ca1665db8c4d5c3730500e9dc53eded..42b7db12b1bd41d0c3abbaea60bb2218cbab4894 100644 (file)
@@ -633,21 +633,6 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
                }
        }
 
-       if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
-               rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
-                       "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
-                       hw->conf.long_frame_max_tx_count);
-               /* brought up everything changes (changed == ~0) indicates first
-                * open, so use our default value instead of that of wiphy.
-                */
-               if (changed != ~0) {
-                       mac->retry_long = hw->conf.long_frame_max_tx_count;
-                       mac->retry_short = hw->conf.long_frame_max_tx_count;
-                       rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
-                               (u8 *)(&hw->conf.long_frame_max_tx_count));
-               }
-       }
-
        if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
            !rtlpriv->proximity.proxim_on) {
                struct ieee80211_channel *channel = hw->conf.chandef.chan;
index bef6819986e93914edcf659e7e2591e3058ba8a2..33d6342124bc339ab403c3046c32db10573aabf9 100644 (file)
@@ -211,7 +211,7 @@ static int ipc_devlink_create_region(struct iosm_devlink *devlink)
                        rc = PTR_ERR(devlink->cd_regions[i]);
                        dev_err(devlink->dev, "Devlink region fail,err %d", rc);
                        /* Delete previously created regions */
-                       for ( ; i >= 0; i--)
+                       for (i--; i >= 0; i--)
                                devlink_region_destroy(devlink->cd_regions[i]);
                        goto region_create_fail;
                }
index 7513018c9f9ac72d5c1b0055b55ae9ff36e710b0..2067b0120d083d868e0f8fee2119fb0a4cd6a2b4 100644 (file)
@@ -85,7 +85,8 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
        }
 
        if (info->verify(info, pin, func, chan)) {
-               pr_err("driver cannot use function %u on pin %u\n", func, chan);
+               pr_err("driver cannot use function %u and channel %u on pin %u\n",
+                      func, chan, pin);
                return -EOPNOTSUPP;
        }
 
index 3bfb80bad1739d244a3906fa7f0e1a606dfaf868..b45d57b5968af4d17a1fc002a75a0923aefabf12 100644 (file)
@@ -13,6 +13,7 @@ enum rtnl_link_flags {
        RTNL_FLAG_DOIT_UNLOCKED         = BIT(0),
        RTNL_FLAG_BULK_DEL_SUPPORTED    = BIT(1),
        RTNL_FLAG_DUMP_UNLOCKED         = BIT(2),
+       RTNL_FLAG_DUMP_SPLIT_NLM_DONE   = BIT(3),       /* legacy behavior */
 };
 
 enum rtnl_kinds {
index 471e177362b4c02ed37a85d4b5b1174aed67f18c..5d8e9ed2c0056f8437baf6b7d1853d30f079a64a 100644 (file)
@@ -86,7 +86,8 @@ static inline int tcp_ao_sizeof_key(const struct tcp_ao_key *key)
 struct tcp_ao_info {
        /* List of tcp_ao_key's */
        struct hlist_head       head;
-       /* current_key and rnext_key aren't maintained on listen sockets.
+       /* current_key and rnext_key are maintained on sockets
+        * in TCP_AO_ESTABLISHED states.
         * Their purpose is to cache keys on established connections,
         * saving needless lookups. Never dereference any of them from
         * listen sockets.
@@ -201,9 +202,9 @@ struct tcp6_ao_context {
 };
 
 struct tcp_sigpool;
+/* Established states are fast-path and there always is current_key/rnext_key */
 #define TCP_AO_ESTABLISHED (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2 | \
-                           TCPF_CLOSE | TCPF_CLOSE_WAIT | \
-                           TCPF_LAST_ACK | TCPF_CLOSING)
+                           TCPF_CLOSE_WAIT | TCPF_LAST_ACK | TCPF_CLOSING)
 
 int tcp_ao_transmit_skb(struct sock *sk, struct sk_buff *skb,
                        struct tcp_ao_key *key, struct tcphdr *th,
index 4e2cdbb5629f22fc1464c5cba296688c45fc8d26..7f3b34452243c83b6e113b5a5d831fc088854f54 100644 (file)
@@ -760,9 +760,6 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb,
                for (i = 0; i < dtab->n_buckets; i++) {
                        head = dev_map_index_hash(dtab, i);
                        hlist_for_each_entry_safe(dst, next, head, index_hlist) {
-                               if (!dst)
-                                       continue;
-
                                if (is_ifindex_excluded(excluded_devices, num_excluded,
                                                        dst->dev->ifindex))
                                        continue;
index 2222c3ff88e7fd639390112de66581b934584457..f45ed6adc092af680ae01b01027b1d572e5c418f 100644 (file)
@@ -2998,6 +2998,7 @@ static int bpf_obj_get(const union bpf_attr *attr)
 void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
                   const struct bpf_link_ops *ops, struct bpf_prog *prog)
 {
+       WARN_ON(ops->dealloc && ops->dealloc_deferred);
        atomic64_set(&link->refcnt, 1);
        link->type = type;
        link->id = 0;
@@ -3056,16 +3057,17 @@ static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
 /* bpf_link_free is guaranteed to be called from process context */
 static void bpf_link_free(struct bpf_link *link)
 {
+       const struct bpf_link_ops *ops = link->ops;
        bool sleepable = false;
 
        bpf_link_free_id(link->id);
        if (link->prog) {
                sleepable = link->prog->sleepable;
                /* detach BPF program, clean up used resources */
-               link->ops->release(link);
+               ops->release(link);
                bpf_prog_put(link->prog);
        }
-       if (link->ops->dealloc_deferred) {
+       if (ops->dealloc_deferred) {
                /* schedule BPF link deallocation; if underlying BPF program
                 * is sleepable, we need to first wait for RCU tasks trace
                 * sync, then go through "classic" RCU grace period
@@ -3074,9 +3076,8 @@ static void bpf_link_free(struct bpf_link *link)
                        call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
                else
                        call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
-       }
-       if (link->ops->dealloc)
-               link->ops->dealloc(link);
+       } else if (ops->dealloc)
+               ops->dealloc(link);
 }
 
 static void bpf_link_put_deferred(struct work_struct *work)
index 48f3a9acdef3d4f206c8fef1067195404e9438be..36ef8e96787ed571e8a266687f916d3b450bd3bf 100644 (file)
@@ -11128,7 +11128,11 @@ BTF_ID(func, bpf_iter_css_task_new)
 #else
 BTF_ID_UNUSED
 #endif
+#ifdef CONFIG_BPF_EVENTS
 BTF_ID(func, bpf_session_cookie)
+#else
+BTF_ID_UNUSED
+#endif
 
 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
 {
index 6249dac61701834be8a6b8619945560b7bafa8ed..d1daeab1bbc141df37023a2f1c683ec7268b732e 100644 (file)
@@ -3517,7 +3517,6 @@ static u64 bpf_uprobe_multi_entry_ip(struct bpf_run_ctx *ctx)
 }
 #endif /* CONFIG_UPROBES */
 
-#ifdef CONFIG_FPROBE
 __bpf_kfunc_start_defs();
 
 __bpf_kfunc bool bpf_session_is_return(void)
@@ -3566,4 +3565,3 @@ static int __init bpf_kprobe_multi_kfuncs_init(void)
 }
 
 late_initcall(bpf_kprobe_multi_kfuncs_init);
-#endif
index 42b585208249c6d8731cb02e81ff62c5939c121d..c63db03ebb9dcfd5730c7363bf4585e2424af4bc 100644 (file)
@@ -811,4 +811,5 @@ static void __exit test_rht_exit(void)
 module_init(test_rht_init);
 module_exit(test_rht_exit);
 
+MODULE_DESCRIPTION("Resizable, Scalable, Concurrent Hash Table test module");
 MODULE_LICENSE("GPL v2");
index 8077cf2ee448038fab65e842785f6d18bf0e4dc4..d6f9fae06a9d8139ec0505358327e3876af228ae 100644 (file)
@@ -1378,8 +1378,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock,
 {
        struct sk_buff *skb;
        struct sock *newsk;
+       ax25_dev *ax25_dev;
        DEFINE_WAIT(wait);
        struct sock *sk;
+       ax25_cb *ax25;
        int err = 0;
 
        if (sock->state != SS_UNCONNECTED)
@@ -1434,6 +1436,10 @@ static int ax25_accept(struct socket *sock, struct socket *newsock,
        kfree_skb(skb);
        sk_acceptq_removed(sk);
        newsock->state = SS_CONNECTED;
+       ax25 = sk_to_ax25(newsk);
+       ax25_dev = ax25->ax25_dev;
+       netdev_hold(ax25_dev->dev, &ax25->dev_tracker, GFP_ATOMIC);
+       ax25_dev_hold(ax25_dev);
 
 out:
        release_sock(sk);
index 742d7c68e7e7e9fb6c1734f75d20402b2e4ad5a9..9efd6690b3443653a2f2ef421080aa48b214a8ba 100644 (file)
@@ -196,7 +196,7 @@ void __exit ax25_dev_free(void)
        list_for_each_entry_safe(s, n, &ax25_dev_list, list) {
                netdev_put(s->dev, &s->dev_tracker);
                list_del(&s->list);
-               kfree(s);
+               ax25_dev_put(s);
        }
        spin_unlock_bh(&ax25_dev_lock);
 }
index f6aad4ed2ab2f6b09ad2f6442cde26b39949871a..36ae54f57bf5743b37c5c785baa7c4aa8e871024 100644 (file)
@@ -727,10 +727,16 @@ static void
 __bpf_prog_test_run_raw_tp(void *data)
 {
        struct bpf_raw_tp_test_run_info *info = data;
+       struct bpf_trace_run_ctx run_ctx = {};
+       struct bpf_run_ctx *old_run_ctx;
+
+       old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
 
        rcu_read_lock();
        info->retval = bpf_prog_run(info->prog, info->ctx);
        rcu_read_unlock();
+
+       bpf_reset_run_ctx(old_run_ctx);
 }
 
 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
index e1bb6d7856d922dfca1499df91bdade86bf391c1..4d4de9008f6f3bd4e96b15fda5a2350d9d5efe10 100644 (file)
@@ -4516,12 +4516,13 @@ set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
            struct rps_dev_flow *rflow, u16 next_cpu)
 {
        if (next_cpu < nr_cpu_ids) {
+               u32 head;
 #ifdef CONFIG_RFS_ACCEL
                struct netdev_rx_queue *rxqueue;
                struct rps_dev_flow_table *flow_table;
                struct rps_dev_flow *old_rflow;
-               u32 flow_id, head;
                u16 rxq_index;
+               u32 flow_id;
                int rc;
 
                /* Should we steer this flow to a different hardware queue? */
index 6a0482e676d379f1f9bffdda51c7535243b3ec38..70c634b9e7b02300188582a1634d5977838db132 100644 (file)
@@ -27,6 +27,7 @@ struct dst_cache_pcpu {
 static void dst_cache_per_cpu_dst_set(struct dst_cache_pcpu *dst_cache,
                                      struct dst_entry *dst, u32 cookie)
 {
+       DEBUG_NET_WARN_ON_ONCE(!in_softirq());
        dst_release(dst_cache->dst);
        if (dst)
                dst_hold(dst);
@@ -40,6 +41,7 @@ static struct dst_entry *dst_cache_per_cpu_get(struct dst_cache *dst_cache,
 {
        struct dst_entry *dst;
 
+       DEBUG_NET_WARN_ON_ONCE(!in_softirq());
        dst = idst->dst;
        if (!dst)
                goto fail;
index b86b0a87367dd53b5bdef80cfddad558a186bb7c..4668d671804070d978d4e1f0f46200dbb496145a 100644 (file)
@@ -6484,6 +6484,46 @@ static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
 
 /* Process one rtnetlink message. */
 
+static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       rtnl_dumpit_func dumpit = cb->data;
+       int err;
+
+       /* Previous iteration have already finished, avoid calling->dumpit()
+        * again, it may not expect to be called after it reached the end.
+        */
+       if (!dumpit)
+               return 0;
+
+       err = dumpit(skb, cb);
+
+       /* Old dump handlers used to send NLM_DONE as in a separate recvmsg().
+        * Some applications which parse netlink manually depend on this.
+        */
+       if (cb->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
+               if (err < 0 && err != -EMSGSIZE)
+                       return err;
+               if (!err)
+                       cb->data = NULL;
+
+               return skb->len;
+       }
+       return err;
+}
+
+static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb,
+                               const struct nlmsghdr *nlh,
+                               struct netlink_dump_control *control)
+{
+       if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) {
+               WARN_ON(control->data);
+               control->data = control->dump;
+               control->dump = rtnl_dumpit;
+       }
+
+       return netlink_dump_start(ssk, skb, nlh, control);
+}
+
 static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
                             struct netlink_ext_ack *extack)
 {
@@ -6548,7 +6588,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
                                .module         = owner,
                                .flags          = flags,
                        };
-                       err = netlink_dump_start(rtnl, skb, nlh, &c);
+                       err = rtnetlink_dump_start(rtnl, skb, nlh, &c);
                        /* netlink_dump_start() will keep a reference on
                         * module if dump is still in progress.
                         */
@@ -6694,7 +6734,7 @@ void __init rtnetlink_init(void)
        register_netdevice_notifier(&rtnetlink_dev_notifier);
 
        rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
-                     rtnl_dump_ifinfo, 0);
+                     rtnl_dump_ifinfo, RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
        rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, 0);
        rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, 0);
        rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, 0);
index 5a55270aa86e8804f467bd094f8bca9171bc2617..e645d751a5e8998064ef5fa239d465f66c044e6f 100644 (file)
@@ -2220,7 +2220,7 @@ static int ethtool_get_phy_stats_ethtool(struct net_device *dev,
        const struct ethtool_ops *ops = dev->ethtool_ops;
        int n_stats, ret;
 
-       if (!ops || !ops->get_sset_count || ops->get_ethtool_phy_stats)
+       if (!ops || !ops->get_sset_count || !ops->get_ethtool_phy_stats)
                return -EOPNOTSUPP;
 
        n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
index be2755c8d8fde7a2e74225aed4a276a39d72d0f1..57d496287e523797b05f85fc26dca111dd701c6a 100644 (file)
@@ -38,11 +38,11 @@ static int tsinfo_prepare_data(const struct ethnl_req_info *req_base,
        ret = ethnl_ops_begin(dev);
        if (ret < 0)
                return ret;
-       if (req_base->flags & ETHTOOL_FLAG_STATS &&
-           dev->ethtool_ops->get_ts_stats) {
+       if (req_base->flags & ETHTOOL_FLAG_STATS) {
                ethtool_stats_init((u64 *)&data->stats,
                                   sizeof(data->stats) / sizeof(u64));
-               dev->ethtool_ops->get_ts_stats(dev, &data->stats);
+               if (dev->ethtool_ops->get_ts_stats)
+                       dev->ethtool_ops->get_ts_stats(dev, &data->stats);
        }
        ret = __ethtool_get_ts_info(dev, &data->ts_info);
        ethnl_ops_complete(dev);
index f3892ee9dfb33f8af86e9deb450da41d1508b80c..d09f557eaa7790baf83cf567bf7874d32f010de0 100644 (file)
@@ -2805,7 +2805,7 @@ void __init devinet_init(void)
        rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, 0);
        rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, 0);
        rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr,
-                     RTNL_FLAG_DUMP_UNLOCKED);
+                     RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
        rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
                      inet_netconf_dump_devconf,
                      RTNL_FLAG_DOIT_UNLOCKED | RTNL_FLAG_DUMP_UNLOCKED);
index c484b1c0fc00a79a45a1c3e7fde230ce59cb67a3..7ad2cafb927634fd60935c36ad68ded45e52dbab 100644 (file)
@@ -1050,11 +1050,6 @@ next:
                        e++;
                }
        }
-
-       /* Don't let NLM_DONE coalesce into a message, even if it could.
-        * Some user space expects NLM_DONE in a separate recv().
-        */
-       err = skb->len;
 out:
 
        cb->args[1] = e;
@@ -1665,5 +1660,5 @@ void __init ip_fib_init(void)
        rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, 0);
        rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, 0);
        rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib,
-                     RTNL_FLAG_DUMP_UNLOCKED);
+                     RTNL_FLAG_DUMP_UNLOCKED | RTNL_FLAG_DUMP_SPLIT_NLM_DONE);
 }
index 681b54e1f3a64387787738ab6495531b8abe1771..e6790ea7487738d8ab825ac0298d15f6744fb3c4 100644 (file)
@@ -1165,6 +1165,9 @@ new_segment:
 
                        process_backlog++;
 
+#ifdef CONFIG_SKB_DECRYPTED
+                       skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
+#endif
                        tcp_skb_entail(sk, skb);
                        copy = size_goal;
 
@@ -2646,6 +2649,10 @@ void tcp_set_state(struct sock *sk, int state)
                if (oldstate != TCP_ESTABLISHED)
                        TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
                break;
+       case TCP_CLOSE_WAIT:
+               if (oldstate == TCP_SYN_RECV)
+                       TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
+               break;
 
        case TCP_CLOSE:
                if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
@@ -2657,7 +2664,7 @@ void tcp_set_state(struct sock *sk, int state)
                        inet_put_port(sk);
                fallthrough;
        default:
-               if (oldstate == TCP_ESTABLISHED)
+               if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
                        TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
        }
 
index 781b67a525719a42f21b713eb424427670d7afb2..37c42b63ff993466b52c5eea7270312149ca913b 100644 (file)
@@ -933,6 +933,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
        struct tcp_ao_key *key;
        __be32 sisn, disn;
        u8 *traffic_key;
+       int state;
        u32 sne = 0;
 
        info = rcu_dereference(tcp_sk(sk)->ao_info);
@@ -948,8 +949,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
                disn = 0;
        }
 
+       state = READ_ONCE(sk->sk_state);
        /* Fast-path */
-       if (likely((1 << sk->sk_state) & TCP_AO_ESTABLISHED)) {
+       if (likely((1 << state) & TCP_AO_ESTABLISHED)) {
                enum skb_drop_reason err;
                struct tcp_ao_key *current_key;
 
@@ -988,6 +990,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
                return SKB_NOT_DROPPED_YET;
        }
 
+       if (unlikely(state == TCP_CLOSE))
+               return SKB_DROP_REASON_TCP_CLOSE;
+
        /* Lookup key based on peer address and keyid.
         * current_key and rnext_key must not be used on tcp listen
         * sockets as otherwise:
@@ -1001,7 +1006,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
        if (th->syn && !th->ack)
                goto verify_hash;
 
-       if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
+       if ((1 << state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV)) {
                /* Make the initial syn the likely case here */
                if (unlikely(req)) {
                        sne = tcp_ao_compute_sne(0, tcp_rsk(req)->rcv_isn,
@@ -1018,14 +1023,14 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb,
                        /* no way to figure out initial sisn/disn - drop */
                        return SKB_DROP_REASON_TCP_FLAGS;
                }
-       } else if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
+       } else if ((1 << state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
                disn = info->lisn;
                if (th->syn || th->rst)
                        sisn = th->seq;
                else
                        sisn = info->risn;
        } else {
-               WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", sk->sk_state);
+               WARN_ONCE(1, "TCP-AO: Unexpected sk_state %d", state);
                return SKB_DROP_REASON_TCP_AOFAILURE;
        }
 verify_hash:
index 0601bad798221389fe83318fbb17f192cec880d4..ff7e734e335b06f03c4c8815163a706136e42a3e 100644 (file)
@@ -58,7 +58,9 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                return orig_dst->lwtstate->orig_output(net, sk, skb);
        }
 
+       local_bh_disable();
        dst = dst_cache_get(&ilwt->dst_cache);
+       local_bh_enable();
        if (unlikely(!dst)) {
                struct ipv6hdr *ip6h = ipv6_hdr(skb);
                struct flowi6 fl6;
@@ -86,8 +88,11 @@ static int ila_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                        goto drop;
                }
 
-               if (ilwt->connected)
+               if (ilwt->connected) {
+                       local_bh_disable();
                        dst_cache_set_ip6(&ilwt->dst_cache, dst, &fl6.saddr);
+                       local_bh_enable();
+               }
        }
 
        skb_dst_set(skb, dst);
index 7563f8c6aa87cf9f7841ee78dcea2a16f60ac344..bf7120ecea1ebe834e70073710be0c1692d7ad1d 100644 (file)
@@ -351,9 +351,9 @@ do_encap:
                goto drop;
 
        if (!ipv6_addr_equal(&orig_daddr, &ipv6_hdr(skb)->daddr)) {
-               preempt_disable();
+               local_bh_disable();
                dst = dst_cache_get(&ilwt->cache);
-               preempt_enable();
+               local_bh_enable();
 
                if (unlikely(!dst)) {
                        struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -373,9 +373,9 @@ do_encap:
                                goto drop;
                        }
 
-                       preempt_disable();
+                       local_bh_disable();
                        dst_cache_set_ip6(&ilwt->cache, dst, &fl6.saddr);
-                       preempt_enable();
+                       local_bh_enable();
                }
 
                skb_dst_drop(skb);
index 31d77885bcae3e3843b6d486cfc21cdbe709bcf0..6e57c03e3255f09ac3927fd4b61232ed07103332 100644 (file)
@@ -966,6 +966,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
        if (!fib6_nh->rt6i_pcpu)
                return;
 
+       rcu_read_lock();
        /* release the reference to this fib entry from
         * all of its cached pcpu routes
         */
@@ -974,7 +975,9 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
                struct rt6_info *pcpu_rt;
 
                ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
-               pcpu_rt = *ppcpu_rt;
+
+               /* Paired with xchg() in rt6_get_pcpu_route() */
+               pcpu_rt = READ_ONCE(*ppcpu_rt);
 
                /* only dropping the 'from' reference if the cached route
                 * is using 'match'. The cached pcpu_rt->from only changes
@@ -988,6 +991,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh,
                        fib6_info_release(from);
                }
        }
+       rcu_read_unlock();
 }
 
 struct fib6_nh_pcpu_arg {
index a504b88ec06b5aec6b0f915c3ff044cd98f864ab..f083d9faba6b1e544121d711c8cf391aea292f37 100644 (file)
@@ -1409,6 +1409,7 @@ static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
                struct rt6_info *prev, **p;
 
                p = this_cpu_ptr(res->nh->rt6i_pcpu);
+               /* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */
                prev = xchg(p, NULL);
                if (prev) {
                        dst_dev_put(&prev->dst);
index a013b92cbb860aa36a23f50d3d5c5963857d601c..2c83b7586422ddd2ae877f98e47698410e47b233 100644 (file)
@@ -212,9 +212,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
        if (unlikely(err))
                goto drop;
 
-       preempt_disable();
+       local_bh_disable();
        dst = dst_cache_get(&rlwt->cache);
-       preempt_enable();
+       local_bh_enable();
 
        if (unlikely(!dst)) {
                struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -234,9 +234,9 @@ static int rpl_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                        goto drop;
                }
 
-               preempt_disable();
+               local_bh_disable();
                dst_cache_set_ip6(&rlwt->cache, dst, &fl6.saddr);
-               preempt_enable();
+               local_bh_enable();
        }
 
        skb_dst_drop(skb);
@@ -268,23 +268,21 @@ static int rpl_input(struct sk_buff *skb)
                return err;
        }
 
-       preempt_disable();
+       local_bh_disable();
        dst = dst_cache_get(&rlwt->cache);
-       preempt_enable();
 
        if (!dst) {
                ip6_route_input(skb);
                dst = skb_dst(skb);
                if (!dst->error) {
-                       preempt_disable();
                        dst_cache_set_ip6(&rlwt->cache, dst,
                                          &ipv6_hdr(skb)->saddr);
-                       preempt_enable();
                }
        } else {
                skb_dst_drop(skb);
                skb_dst_set(skb, dst);
        }
+       local_bh_enable();
 
        err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
        if (unlikely(err))
index a75df2ec8db0d369a4e3481576fc09f511a4dd36..098632adc9b5afa69e4b65439ee54c3fc0a8d668 100644 (file)
@@ -464,23 +464,21 @@ static int seg6_input_core(struct net *net, struct sock *sk,
 
        slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
 
-       preempt_disable();
+       local_bh_disable();
        dst = dst_cache_get(&slwt->cache);
-       preempt_enable();
 
        if (!dst) {
                ip6_route_input(skb);
                dst = skb_dst(skb);
                if (!dst->error) {
-                       preempt_disable();
                        dst_cache_set_ip6(&slwt->cache, dst,
                                          &ipv6_hdr(skb)->saddr);
-                       preempt_enable();
                }
        } else {
                skb_dst_drop(skb);
                skb_dst_set(skb, dst);
        }
+       local_bh_enable();
 
        err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
        if (unlikely(err))
@@ -536,9 +534,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
 
        slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
 
-       preempt_disable();
+       local_bh_disable();
        dst = dst_cache_get(&slwt->cache);
-       preempt_enable();
+       local_bh_enable();
 
        if (unlikely(!dst)) {
                struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -558,9 +556,9 @@ static int seg6_output_core(struct net *net, struct sock *sk,
                        goto drop;
                }
 
-               preempt_disable();
+               local_bh_disable();
                dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
-               preempt_enable();
+               local_bh_enable();
        }
 
        skb_dst_drop(skb);
index b08e5d7687e3fcfb855b3db7ea916491d5c10c1b..83ad6c9709fe609414dc2a9a3519fc943293a2c5 100644 (file)
@@ -2958,8 +2958,9 @@ static int ieee80211_set_mcast_rate(struct wiphy *wiphy, struct net_device *dev,
        memcpy(sdata->vif.bss_conf.mcast_rate, rate,
               sizeof(int) * NUM_NL80211_BANDS);
 
-       ieee80211_link_info_change_notify(sdata, &sdata->deflink,
-                                         BSS_CHANGED_MCAST_RATE);
+       if (ieee80211_sdata_running(sdata))
+               ieee80211_link_info_change_notify(sdata, &sdata->deflink,
+                                                 BSS_CHANGED_MCAST_RATE);
 
        return 0;
 }
@@ -4016,7 +4017,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
                goto out;
        }
 
-       link_data->csa_chanreq = chanreq; 
+       link_data->csa_chanreq = chanreq;
        link_conf->csa_active = true;
 
        if (params->block_tx &&
@@ -4027,7 +4028,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
        }
 
        cfg80211_ch_switch_started_notify(sdata->dev,
-                                         &link_data->csa_chanreq.oper, 0,
+                                         &link_data->csa_chanreq.oper, link_id,
                                          params->count, params->block_tx);
 
        if (changed) {
index 9f5ffdc9db284a73be270c48f40d77a94b301e79..ecbb042dd0433e2224237dc7e8ae127bf7e97449 100644 (file)
@@ -230,15 +230,21 @@ ieee80211_he_spr_ie_to_bss_conf(struct ieee80211_vif *vif,
 
        if (!he_spr_ie_elem)
                return;
+
+       he_obss_pd->sr_ctrl = he_spr_ie_elem->he_sr_control;
        data = he_spr_ie_elem->optional;
 
        if (he_spr_ie_elem->he_sr_control &
            IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
-               data++;
+               he_obss_pd->non_srg_max_offset = *data++;
+
        if (he_spr_ie_elem->he_sr_control &
            IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
-               he_obss_pd->max_offset = *data++;
                he_obss_pd->min_offset = *data++;
+               he_obss_pd->max_offset = *data++;
+               memcpy(he_obss_pd->bss_color_bitmap, data, 8);
+               data += 8;
+               memcpy(he_obss_pd->partial_bssid_bitmap, data, 8);
                he_obss_pd->enable = true;
        }
 }
index eb62b7d4b4f7e298ce70b87c45fac16a28a03c94..3cedfdc9099b7deb3decc8b2583ebc495572c472 100644 (file)
@@ -1845,6 +1845,8 @@ void ieee80211_link_info_change_notify(struct ieee80211_sub_if_data *sdata,
 void ieee80211_configure_filter(struct ieee80211_local *local);
 u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
 
+void ieee80211_handle_queued_frames(struct ieee80211_local *local);
+
 u64 ieee80211_mgmt_tx_cookie(struct ieee80211_local *local);
 int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
                             u64 *cookie, gfp_t gfp);
index 4eaea0a9975b474b0b6b314b3244a88d8e0f1d77..1132dea0e290ea9ca06240cd09a5f11aba278f4c 100644 (file)
@@ -423,9 +423,8 @@ u64 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
               BSS_CHANGED_ERP_SLOT;
 }
 
-static void ieee80211_tasklet_handler(struct tasklet_struct *t)
+void ieee80211_handle_queued_frames(struct ieee80211_local *local)
 {
-       struct ieee80211_local *local = from_tasklet(local, t, tasklet);
        struct sk_buff *skb;
 
        while ((skb = skb_dequeue(&local->skb_queue)) ||
@@ -450,6 +449,13 @@ static void ieee80211_tasklet_handler(struct tasklet_struct *t)
        }
 }
 
+static void ieee80211_tasklet_handler(struct tasklet_struct *t)
+{
+       struct ieee80211_local *local = from_tasklet(local, t, tasklet);
+
+       ieee80211_handle_queued_frames(local);
+}
+
 static void ieee80211_restart_work(struct work_struct *work)
 {
        struct ieee80211_local *local =
index cbc9b5e40cb35e81fb80dd55016c3afc8c31deb7..6d4510221c98e695ea140e40e5e9ec5cbf385a5e 100644 (file)
@@ -1776,6 +1776,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
        ifmsh->last_preq = jiffies;
        ifmsh->next_perr = jiffies;
        ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
+       ifmsh->nonpeer_pm = NL80211_MESH_POWER_ACTIVE;
        /* Allocate all mesh structures when creating the first mesh interface. */
        if (!mesh_allocated)
                ieee80211s_init();
index a6b62169f08483c5aa481f4f8f59f67fa56a4ef7..c0a5c75cddcb9f1c3f509b909e443ea0e74fb19d 100644 (file)
@@ -1017,10 +1017,23 @@ void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
  */
 void mesh_path_flush_pending(struct mesh_path *mpath)
 {
+       struct ieee80211_sub_if_data *sdata = mpath->sdata;
+       struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+       struct mesh_preq_queue *preq, *tmp;
        struct sk_buff *skb;
 
        while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
                mesh_path_discard_frame(mpath->sdata, skb);
+
+       spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
+       list_for_each_entry_safe(preq, tmp, &ifmsh->preq_queue.list, list) {
+               if (ether_addr_equal(mpath->dst, preq->dst)) {
+                       list_del(&preq->list);
+                       kfree(preq);
+                       --ifmsh->preq_queue_len;
+               }
+       }
+       spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
 }
 
 /**
index 55e5497f89781c0fc2c74965b30b966366672d25..055a60e90979b32129d6ba93a2361aec70af0cbb 100644 (file)
@@ -111,7 +111,7 @@ ieee80211_parse_extension_element(u32 *crc,
                if (params->mode < IEEE80211_CONN_MODE_HE)
                        break;
                if (len >= sizeof(*elems->he_spr) &&
-                   len >= ieee80211_he_spr_size(data))
+                   len >= ieee80211_he_spr_size(data) - 1)
                        elems->he_spr = data;
                break;
        case WLAN_EID_EXT_HE_6GHZ_CAPA:
index 3da1c5c450358cb77422d48b9e9a0dd5542a7508..8ecc4b710b0e64449c2a03339b3d3093ec939b7b 100644 (file)
@@ -744,15 +744,21 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
                        local->hw_scan_ies_bufsize *= n_bands;
                }
 
-               local->hw_scan_req = kmalloc(
-                               sizeof(*local->hw_scan_req) +
-                               req->n_channels * sizeof(req->channels[0]) +
-                               local->hw_scan_ies_bufsize, GFP_KERNEL);
+               local->hw_scan_req = kmalloc(struct_size(local->hw_scan_req,
+                                                        req.channels,
+                                                        req->n_channels) +
+                                            local->hw_scan_ies_bufsize,
+                                            GFP_KERNEL);
                if (!local->hw_scan_req)
                        return -ENOMEM;
 
                local->hw_scan_req->req.ssids = req->ssids;
                local->hw_scan_req->req.n_ssids = req->n_ssids;
+               /* None of the channels are actually set
+                * up but let UBSAN know the boundaries.
+                */
+               local->hw_scan_req->req.n_channels = req->n_channels;
+
                ies = (u8 *)local->hw_scan_req +
                        sizeof(*local->hw_scan_req) +
                        req->n_channels * sizeof(req->channels[0]);
index da5fdd6f5c852ba5514f1d04e3ef71398b126b9c..aa22f09e6d145f3e7fb40b95005fb434856d6d26 100644 (file)
@@ -1724,7 +1724,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
        skb_queue_head_init(&pending);
 
        /* sync with ieee80211_tx_h_unicast_ps_buf */
-       spin_lock(&sta->ps_lock);
+       spin_lock_bh(&sta->ps_lock);
        /* Send all buffered frames to the station */
        for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
                int count = skb_queue_len(&pending), tmp;
@@ -1753,7 +1753,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
         */
        clear_sta_flag(sta, WLAN_STA_PSPOLL);
        clear_sta_flag(sta, WLAN_STA_UAPSD);
-       spin_unlock(&sta->ps_lock);
+       spin_unlock_bh(&sta->ps_lock);
 
        atomic_dec(&ps->num_sta_ps);
 
index 0b893e958959440e4b918e284533933a5c6145da..283bfc99417e57dd65ed713a42991a2362bfff81 100644 (file)
@@ -1567,6 +1567,8 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
 
 void ieee80211_stop_device(struct ieee80211_local *local)
 {
+       ieee80211_handle_queued_frames(local);
+
        ieee80211_led_radio(local, false);
        ieee80211_mod_tpt_led_trig(local, 0, IEEE80211_TPT_LEDTRIG_FL_RADIO);
 
index 7d44196ec5b630500d28815fde9f92c0890c8314..96b113854bd3cea706c46490b92bdb139ed0a242 100644 (file)
@@ -2916,9 +2916,14 @@ void mptcp_set_state(struct sock *sk, int state)
                if (oldstate != TCP_ESTABLISHED)
                        MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
                break;
-
+       case TCP_CLOSE_WAIT:
+               /* Unlike TCP, MPTCP sk would not have the TCP_SYN_RECV state:
+                * MPTCP "accepted" sockets will be created later on. So no
+                * transition from TCP_SYN_RECV to TCP_CLOSE_WAIT.
+                */
+               break;
        default:
-               if (oldstate == TCP_ESTABLISHED)
+               if (oldstate == TCP_ESTABLISHED || oldstate == TCP_CLOSE_WAIT)
                        MPTCP_DEC_STATS(sock_net(sk), MPTCP_MIB_CURRESTAB);
        }
 
index 374412ed780b6ed601e63e2af23cb0a364df80fc..ef0f8f73826f53b8995b7b11ae13da5f3bf82af9 100644 (file)
@@ -325,6 +325,7 @@ struct ncsi_dev_priv {
        spinlock_t          lock;            /* Protect the NCSI device    */
        unsigned int        package_probe_id;/* Current ID during probe    */
        unsigned int        package_num;     /* Number of packages         */
+       unsigned int        channel_probe_id;/* Current cahnnel ID during probe */
        struct list_head    packages;        /* List of packages           */
        struct ncsi_channel *hot_channel;    /* Channel was ever active    */
        struct ncsi_request requests[256];   /* Request table              */
@@ -343,6 +344,7 @@ struct ncsi_dev_priv {
        bool                multi_package;   /* Enable multiple packages   */
        bool                mlx_multi_host;  /* Enable multi host Mellanox */
        u32                 package_whitelist; /* Packages to configure    */
+       unsigned char       channel_count;     /* Num of channels to probe   */
 };
 
 struct ncsi_cmd_arg {
index 745c788f1d1dfcd3a0467f08271d2ce44dacf2a8..5ecf611c882009d647909e52a686286e090ba6c3 100644 (file)
@@ -510,17 +510,19 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
 
                break;
        case ncsi_dev_state_suspend_gls:
-               ndp->pending_req_num = np->channel_num;
+               ndp->pending_req_num = 1;
 
                nca.type = NCSI_PKT_CMD_GLS;
                nca.package = np->id;
+               nca.channel = ndp->channel_probe_id;
+               ret = ncsi_xmit_cmd(&nca);
+               if (ret)
+                       goto error;
+               ndp->channel_probe_id++;
 
-               nd->state = ncsi_dev_state_suspend_dcnt;
-               NCSI_FOR_EACH_CHANNEL(np, nc) {
-                       nca.channel = nc->id;
-                       ret = ncsi_xmit_cmd(&nca);
-                       if (ret)
-                               goto error;
+               if (ndp->channel_probe_id == ndp->channel_count) {
+                       ndp->channel_probe_id = 0;
+                       nd->state = ncsi_dev_state_suspend_dcnt;
                }
 
                break;
@@ -1345,7 +1347,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
 {
        struct ncsi_dev *nd = &ndp->ndev;
        struct ncsi_package *np;
-       struct ncsi_channel *nc;
        struct ncsi_cmd_arg nca;
        unsigned char index;
        int ret;
@@ -1423,23 +1424,6 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
 
                nd->state = ncsi_dev_state_probe_cis;
                break;
-       case ncsi_dev_state_probe_cis:
-               ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
-
-               /* Clear initial state */
-               nca.type = NCSI_PKT_CMD_CIS;
-               nca.package = ndp->active_package->id;
-               for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
-                       nca.channel = index;
-                       ret = ncsi_xmit_cmd(&nca);
-                       if (ret)
-                               goto error;
-               }
-
-               nd->state = ncsi_dev_state_probe_gvi;
-               if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY))
-                       nd->state = ncsi_dev_state_probe_keep_phy;
-               break;
        case ncsi_dev_state_probe_keep_phy:
                ndp->pending_req_num = 1;
 
@@ -1452,14 +1436,17 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
 
                nd->state = ncsi_dev_state_probe_gvi;
                break;
+       case ncsi_dev_state_probe_cis:
        case ncsi_dev_state_probe_gvi:
        case ncsi_dev_state_probe_gc:
        case ncsi_dev_state_probe_gls:
                np = ndp->active_package;
-               ndp->pending_req_num = np->channel_num;
+               ndp->pending_req_num = 1;
 
-               /* Retrieve version, capability or link status */
-               if (nd->state == ncsi_dev_state_probe_gvi)
+               /* Clear initial state Retrieve version, capability or link status */
+               if (nd->state == ncsi_dev_state_probe_cis)
+                       nca.type = NCSI_PKT_CMD_CIS;
+               else if (nd->state == ncsi_dev_state_probe_gvi)
                        nca.type = NCSI_PKT_CMD_GVI;
                else if (nd->state == ncsi_dev_state_probe_gc)
                        nca.type = NCSI_PKT_CMD_GC;
@@ -1467,19 +1454,29 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
                        nca.type = NCSI_PKT_CMD_GLS;
 
                nca.package = np->id;
-               NCSI_FOR_EACH_CHANNEL(np, nc) {
-                       nca.channel = nc->id;
-                       ret = ncsi_xmit_cmd(&nca);
-                       if (ret)
-                               goto error;
-               }
+               nca.channel = ndp->channel_probe_id;
 
-               if (nd->state == ncsi_dev_state_probe_gvi)
+               ret = ncsi_xmit_cmd(&nca);
+               if (ret)
+                       goto error;
+
+               if (nd->state == ncsi_dev_state_probe_cis) {
+                       nd->state = ncsi_dev_state_probe_gvi;
+                       if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY) && ndp->channel_probe_id == 0)
+                               nd->state = ncsi_dev_state_probe_keep_phy;
+               } else if (nd->state == ncsi_dev_state_probe_gvi) {
                        nd->state = ncsi_dev_state_probe_gc;
-               else if (nd->state == ncsi_dev_state_probe_gc)
+               } else if (nd->state == ncsi_dev_state_probe_gc) {
                        nd->state = ncsi_dev_state_probe_gls;
-               else
+               } else {
+                       nd->state = ncsi_dev_state_probe_cis;
+                       ndp->channel_probe_id++;
+               }
+
+               if (ndp->channel_probe_id == ndp->channel_count) {
+                       ndp->channel_probe_id = 0;
                        nd->state = ncsi_dev_state_probe_dp;
+               }
                break;
        case ncsi_dev_state_probe_dp:
                ndp->pending_req_num = 1;
@@ -1780,6 +1777,7 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
                ndp->requests[i].ndp = ndp;
                timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
        }
+       ndp->channel_count = NCSI_RESERVED_CHANNEL;
 
        spin_lock_irqsave(&ncsi_dev_lock, flags);
        list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
@@ -1813,6 +1811,7 @@ int ncsi_start_dev(struct ncsi_dev *nd)
 
        if (!(ndp->flags & NCSI_DEV_PROBED)) {
                ndp->package_probe_id = 0;
+               ndp->channel_probe_id = 0;
                nd->state = ncsi_dev_state_probe;
                schedule_work(&ndp->work);
                return 0;
index bee290d0f48b6f25701312fd2de9b1b796761c7a..e28be33bdf2c487c0fbfe3a1b4de6f52c8f923cc 100644 (file)
@@ -795,12 +795,13 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
        struct ncsi_rsp_gc_pkt *rsp;
        struct ncsi_dev_priv *ndp = nr->ndp;
        struct ncsi_channel *nc;
+       struct ncsi_package *np;
        size_t size;
 
        /* Find the channel */
        rsp = (struct ncsi_rsp_gc_pkt *)skb_network_header(nr->rsp);
        ncsi_find_package_and_channel(ndp, rsp->rsp.common.channel,
-                                     NULL, &nc);
+                                     &np, &nc);
        if (!nc)
                return -ENODEV;
 
@@ -835,6 +836,7 @@ static int ncsi_rsp_handler_gc(struct ncsi_request *nr)
         */
        nc->vlan_filter.bitmap = U64_MAX;
        nc->vlan_filter.n_vids = rsp->vlan_cnt;
+       np->ndp->channel_count = rsp->channel_cnt;
 
        return 0;
 }
index 79e93a19d5fabeb5ee91e20388d67ef019064064..06e03f5cd7ce182590c142e4c83f8d93f800b89e 100644 (file)
@@ -185,7 +185,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
 
        qopt->bands = qdisc_dev(sch)->real_num_tx_queues;
 
-       removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands),
+       removed = kmalloc(sizeof(*removed) * (q->max_bands - qopt->bands),
                          GFP_KERNEL);
        if (!removed)
                return -ENOMEM;
index 937a0c513c174b837225c400b4c3ad9915244810..b284a06b5a75fa3408c4a6515b71ed1a6ec63e78 100644 (file)
@@ -1176,16 +1176,13 @@ static int taprio_parse_mqprio_opt(struct net_device *dev,
 {
        bool allow_overlapping_txqs = TXTIME_ASSIST_IS_ENABLED(taprio_flags);
 
-       if (!qopt && !dev->num_tc) {
-               NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
-               return -EINVAL;
-       }
-
-       /* If num_tc is already set, it means that the user already
-        * configured the mqprio part
-        */
-       if (dev->num_tc)
+       if (!qopt) {
+               if (!dev->num_tc) {
+                       NL_SET_ERR_MSG(extack, "'mqprio' configuration is necessary");
+                       return -EINVAL;
+               }
                return 0;
+       }
 
        /* taprio imposes that traffic classes map 1:n to tx queues */
        if (qopt->num_tc > dev->num_tx_queues) {
index e50a286fd0fb77dfe2644d794ffb11d9db332906..c5f98c6b25613f542066f8fab1bb0d630d46a77d 100644 (file)
@@ -459,29 +459,11 @@ out:
 static void smc_adjust_sock_bufsizes(struct sock *nsk, struct sock *osk,
                                     unsigned long mask)
 {
-       struct net *nnet = sock_net(nsk);
-
        nsk->sk_userlocks = osk->sk_userlocks;
-       if (osk->sk_userlocks & SOCK_SNDBUF_LOCK) {
+       if (osk->sk_userlocks & SOCK_SNDBUF_LOCK)
                nsk->sk_sndbuf = osk->sk_sndbuf;
-       } else {
-               if (mask == SK_FLAGS_SMC_TO_CLC)
-                       WRITE_ONCE(nsk->sk_sndbuf,
-                                  READ_ONCE(nnet->ipv4.sysctl_tcp_wmem[1]));
-               else
-                       WRITE_ONCE(nsk->sk_sndbuf,
-                                  2 * READ_ONCE(nnet->smc.sysctl_wmem));
-       }
-       if (osk->sk_userlocks & SOCK_RCVBUF_LOCK) {
+       if (osk->sk_userlocks & SOCK_RCVBUF_LOCK)
                nsk->sk_rcvbuf = osk->sk_rcvbuf;
-       } else {
-               if (mask == SK_FLAGS_SMC_TO_CLC)
-                       WRITE_ONCE(nsk->sk_rcvbuf,
-                                  READ_ONCE(nnet->ipv4.sysctl_tcp_rmem[1]));
-               else
-                       WRITE_ONCE(nsk->sk_rcvbuf,
-                                  2 * READ_ONCE(nnet->smc.sysctl_rmem));
-       }
 }
 
 static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
index 25b49efc0926b00d51cb8aaa5c205d96ff900127..80846279de9f3b94be5c60eda8be17f2adeeaf6b 100644 (file)
@@ -221,15 +221,9 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
        return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
 }
 
-static inline int unix_recvq_full(const struct sock *sk)
-{
-       return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
-}
-
 static inline int unix_recvq_full_lockless(const struct sock *sk)
 {
-       return skb_queue_len_lockless(&sk->sk_receive_queue) >
-               READ_ONCE(sk->sk_max_ack_backlog);
+       return skb_queue_len_lockless(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
 }
 
 struct sock *unix_peer_get(struct sock *s)
@@ -530,10 +524,10 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
        return 0;
 }
 
-static int unix_writable(const struct sock *sk)
+static int unix_writable(const struct sock *sk, unsigned char state)
 {
-       return sk->sk_state != TCP_LISTEN &&
-              (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
+       return state != TCP_LISTEN &&
+               (refcount_read(&sk->sk_wmem_alloc) << 2) <= READ_ONCE(sk->sk_sndbuf);
 }
 
 static void unix_write_space(struct sock *sk)
@@ -541,7 +535,7 @@ static void unix_write_space(struct sock *sk)
        struct socket_wq *wq;
 
        rcu_read_lock();
-       if (unix_writable(sk)) {
+       if (unix_writable(sk, READ_ONCE(sk->sk_state))) {
                wq = rcu_dereference(sk->sk_wq);
                if (skwq_has_sleeper(wq))
                        wake_up_interruptible_sync_poll(&wq->wait,
@@ -570,7 +564,6 @@ static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
                        sk_error_report(other);
                }
        }
-       other->sk_state = TCP_CLOSE;
 }
 
 static void unix_sock_destructor(struct sock *sk)
@@ -617,7 +610,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
        u->path.dentry = NULL;
        u->path.mnt = NULL;
        state = sk->sk_state;
-       sk->sk_state = TCP_CLOSE;
+       WRITE_ONCE(sk->sk_state, TCP_CLOSE);
 
        skpair = unix_peer(sk);
        unix_peer(sk) = NULL;
@@ -638,7 +631,7 @@ static void unix_release_sock(struct sock *sk, int embrion)
                        unix_state_lock(skpair);
                        /* No more writes */
                        WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
-                       if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
+                       if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
                                WRITE_ONCE(skpair->sk_err, ECONNRESET);
                        unix_state_unlock(skpair);
                        skpair->sk_state_change(skpair);
@@ -739,7 +732,8 @@ static int unix_listen(struct socket *sock, int backlog)
        if (backlog > sk->sk_max_ack_backlog)
                wake_up_interruptible_all(&u->peer_wait);
        sk->sk_max_ack_backlog  = backlog;
-       sk->sk_state            = TCP_LISTEN;
+       WRITE_ONCE(sk->sk_state, TCP_LISTEN);
+
        /* set credentials so connect can copy them */
        init_peercred(sk);
        err = 0;
@@ -976,7 +970,7 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
        sk->sk_hash             = unix_unbound_hash(sk);
        sk->sk_allocation       = GFP_KERNEL_ACCOUNT;
        sk->sk_write_space      = unix_write_space;
-       sk->sk_max_ack_backlog  = net->unx.sysctl_max_dgram_qlen;
+       sk->sk_max_ack_backlog  = READ_ONCE(net->unx.sysctl_max_dgram_qlen);
        sk->sk_destruct         = unix_sock_destructor;
        u = unix_sk(sk);
        u->listener = NULL;
@@ -1402,7 +1396,8 @@ restart:
                if (err)
                        goto out_unlock;
 
-               sk->sk_state = other->sk_state = TCP_ESTABLISHED;
+               WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
+               WRITE_ONCE(other->sk_state, TCP_ESTABLISHED);
        } else {
                /*
                 *      1003.1g breaking connected state with AF_UNSPEC
@@ -1419,13 +1414,20 @@ restart:
 
                unix_peer(sk) = other;
                if (!other)
-                       sk->sk_state = TCP_CLOSE;
+                       WRITE_ONCE(sk->sk_state, TCP_CLOSE);
                unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
 
                unix_state_double_unlock(sk, other);
 
-               if (other != old_peer)
+               if (other != old_peer) {
                        unix_dgram_disconnected(sk, old_peer);
+
+                       unix_state_lock(old_peer);
+                       if (!unix_peer(old_peer))
+                               WRITE_ONCE(old_peer->sk_state, TCP_CLOSE);
+                       unix_state_unlock(old_peer);
+               }
+
                sock_put(old_peer);
        } else {
                unix_peer(sk) = other;
@@ -1473,7 +1475,6 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
        struct sk_buff *skb = NULL;
        long timeo;
        int err;
-       int st;
 
        err = unix_validate_addr(sunaddr, addr_len);
        if (err)
@@ -1538,7 +1539,7 @@ restart:
        if (other->sk_shutdown & RCV_SHUTDOWN)
                goto out_unlock;
 
-       if (unix_recvq_full(other)) {
+       if (unix_recvq_full_lockless(other)) {
                err = -EAGAIN;
                if (!timeo)
                        goto out_unlock;
@@ -1563,9 +1564,7 @@ restart:
 
           Well, and we have to recheck the state after socket locked.
         */
-       st = sk->sk_state;
-
-       switch (st) {
+       switch (READ_ONCE(sk->sk_state)) {
        case TCP_CLOSE:
                /* This is ok... continue with connect */
                break;
@@ -1580,7 +1579,7 @@ restart:
 
        unix_state_lock_nested(sk, U_LOCK_SECOND);
 
-       if (sk->sk_state != st) {
+       if (sk->sk_state != TCP_CLOSE) {
                unix_state_unlock(sk);
                unix_state_unlock(other);
                sock_put(other);
@@ -1633,7 +1632,7 @@ restart:
        copy_peercred(sk, other);
 
        sock->state     = SS_CONNECTED;
-       sk->sk_state    = TCP_ESTABLISHED;
+       WRITE_ONCE(sk->sk_state, TCP_ESTABLISHED);
        sock_hold(newsk);
 
        smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
@@ -1705,7 +1704,7 @@ static int unix_accept(struct socket *sock, struct socket *newsock,
                goto out;
 
        arg->err = -EINVAL;
-       if (sk->sk_state != TCP_LISTEN)
+       if (READ_ONCE(sk->sk_state) != TCP_LISTEN)
                goto out;
 
        /* If socket state is TCP_LISTEN it cannot change (for now...),
@@ -1962,7 +1961,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
        }
 
        err = -EMSGSIZE;
-       if (len > sk->sk_sndbuf - 32)
+       if (len > READ_ONCE(sk->sk_sndbuf) - 32)
                goto out;
 
        if (len > SKB_MAX_ALLOC) {
@@ -2044,7 +2043,7 @@ restart_locked:
                        unix_peer(sk) = NULL;
                        unix_dgram_peer_wake_disconnect_wakeup(sk, other);
 
-                       sk->sk_state = TCP_CLOSE;
+                       WRITE_ONCE(sk->sk_state, TCP_CLOSE);
                        unix_state_unlock(sk);
 
                        unix_dgram_disconnected(sk, other);
@@ -2221,7 +2220,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
        }
 
        if (msg->msg_namelen) {
-               err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
+               err = READ_ONCE(sk->sk_state) == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
                goto out_err;
        } else {
                err = -ENOTCONN;
@@ -2242,7 +2241,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
                                                   &err, 0);
                } else {
                        /* Keep two messages in the pipe so it schedules better */
-                       size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
+                       size = min_t(int, size, (READ_ONCE(sk->sk_sndbuf) >> 1) - 64);
 
                        /* allow fallback to order-0 allocations */
                        size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
@@ -2335,7 +2334,7 @@ static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
        if (err)
                return err;
 
-       if (sk->sk_state != TCP_ESTABLISHED)
+       if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
                return -ENOTCONN;
 
        if (msg->msg_namelen)
@@ -2349,7 +2348,7 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
 {
        struct sock *sk = sock->sk;
 
-       if (sk->sk_state != TCP_ESTABLISHED)
+       if (READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)
                return -ENOTCONN;
 
        return unix_dgram_recvmsg(sock, msg, size, flags);
@@ -2654,7 +2653,7 @@ static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
 
 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
 {
-       if (unlikely(sk->sk_state != TCP_ESTABLISHED))
+       if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED))
                return -ENOTCONN;
 
        return unix_read_skb(sk, recv_actor);
@@ -2678,7 +2677,7 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state,
        size_t size = state->size;
        unsigned int last_len;
 
-       if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
+       if (unlikely(READ_ONCE(sk->sk_state) != TCP_ESTABLISHED)) {
                err = -EINVAL;
                goto out;
        }
@@ -3009,7 +3008,7 @@ long unix_inq_len(struct sock *sk)
        struct sk_buff *skb;
        long amount = 0;
 
-       if (sk->sk_state == TCP_LISTEN)
+       if (READ_ONCE(sk->sk_state) == TCP_LISTEN)
                return -EINVAL;
 
        spin_lock(&sk->sk_receive_queue.lock);
@@ -3121,12 +3120,14 @@ static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
 {
        struct sock *sk = sock->sk;
+       unsigned char state;
        __poll_t mask;
        u8 shutdown;
 
        sock_poll_wait(file, sock, wait);
        mask = 0;
        shutdown = READ_ONCE(sk->sk_shutdown);
+       state = READ_ONCE(sk->sk_state);
 
        /* exceptional events? */
        if (READ_ONCE(sk->sk_err))
@@ -3148,14 +3149,14 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
 
        /* Connection-based need to check for termination and startup */
        if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
-           sk->sk_state == TCP_CLOSE)
+           state == TCP_CLOSE)
                mask |= EPOLLHUP;
 
        /*
         * we set writable also when the other side has shut down the
         * connection. This prevents stuck sockets.
         */
-       if (unix_writable(sk))
+       if (unix_writable(sk, state))
                mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
 
        return mask;
@@ -3166,12 +3167,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
 {
        struct sock *sk = sock->sk, *other;
        unsigned int writable;
+       unsigned char state;
        __poll_t mask;
        u8 shutdown;
 
        sock_poll_wait(file, sock, wait);
        mask = 0;
        shutdown = READ_ONCE(sk->sk_shutdown);
+       state = READ_ONCE(sk->sk_state);
 
        /* exceptional events? */
        if (READ_ONCE(sk->sk_err) ||
@@ -3191,19 +3194,14 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
-       if (sk->sk_type == SOCK_SEQPACKET) {
-               if (sk->sk_state == TCP_CLOSE)
-                       mask |= EPOLLHUP;
-               /* connection hasn't started yet? */
-               if (sk->sk_state == TCP_SYN_SENT)
-                       return mask;
-       }
+       if (sk->sk_type == SOCK_SEQPACKET && state == TCP_CLOSE)
+               mask |= EPOLLHUP;
 
        /* No write status requested, avoid expensive OUT tests. */
        if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
                return mask;
 
-       writable = unix_writable(sk);
+       writable = unix_writable(sk, state);
        if (writable) {
                unix_state_lock(sk);
 
index ae39538c5042b34e864e709004bf554841eaf4c9..937edf4afed41339afce117bd08f9a58bb2a6118 100644 (file)
@@ -65,7 +65,7 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb)
        u32 *buf;
        int i;
 
-       if (sk->sk_state == TCP_LISTEN) {
+       if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
                spin_lock(&sk->sk_receive_queue.lock);
 
                attr = nla_reserve(nlskb, UNIX_DIAG_ICONS,
@@ -103,8 +103,8 @@ static int sk_diag_show_rqlen(struct sock *sk, struct sk_buff *nlskb)
 {
        struct unix_diag_rqlen rql;
 
-       if (sk->sk_state == TCP_LISTEN) {
-               rql.udiag_rqueue = sk->sk_receive_queue.qlen;
+       if (READ_ONCE(sk->sk_state) == TCP_LISTEN) {
+               rql.udiag_rqueue = skb_queue_len_lockless(&sk->sk_receive_queue);
                rql.udiag_wqueue = sk->sk_max_ack_backlog;
        } else {
                rql.udiag_rqueue = (u32) unix_inq_len(sk);
@@ -136,7 +136,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
        rep = nlmsg_data(nlh);
        rep->udiag_family = AF_UNIX;
        rep->udiag_type = sk->sk_type;
-       rep->udiag_state = sk->sk_state;
+       rep->udiag_state = READ_ONCE(sk->sk_state);
        rep->pad = 0;
        rep->udiag_ino = sk_ino;
        sock_diag_save_cookie(sk, rep->udiag_cookie);
@@ -165,7 +165,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_r
            sock_diag_put_meminfo(sk, skb, UNIX_DIAG_MEMINFO))
                goto out_nlmsg_trim;
 
-       if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, sk->sk_shutdown))
+       if (nla_put_u8(skb, UNIX_DIAG_SHUTDOWN, READ_ONCE(sk->sk_shutdown)))
                goto out_nlmsg_trim;
 
        if ((req->udiag_show & UDIAG_SHOW_UID) &&
@@ -215,7 +215,7 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
                sk_for_each(sk, &net->unx.table.buckets[slot]) {
                        if (num < s_num)
                                goto next;
-                       if (!(req->udiag_states & (1 << sk->sk_state)))
+                       if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state))))
                                goto next;
                        if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk),
                                         NETLINK_CB(cb->skb).portid,
index 3fb1b637352a9d0b469206d890601031ffd4c68f..4b1f45e3070e06c72095037793cf5b087bf5cd0f 100644 (file)
@@ -431,7 +431,7 @@ static void cfg80211_wiphy_work(struct work_struct *work)
        if (wk) {
                list_del_init(&wk->entry);
                if (!list_empty(&rdev->wiphy_work_list))
-                       schedule_work(work);
+                       queue_work(system_unbound_wq, work);
                spin_unlock_irq(&rdev->wiphy_work_lock);
 
                wk->func(&rdev->wiphy, wk);
index e106dcea3977828456992ca3ca9af97d7c9775c7..c569c37da31758a1b7f4051541537588140ef45d 100644 (file)
@@ -56,7 +56,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
        out->ftm.burst_period = 0;
        if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD])
                out->ftm.burst_period =
-                       nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
+                       nla_get_u16(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD]);
 
        out->ftm.asap = !!tb[NL80211_PMSR_FTM_REQ_ATTR_ASAP];
        if (out->ftm.asap && !capa->ftm.asap) {
@@ -75,7 +75,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
        out->ftm.num_bursts_exp = 0;
        if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP])
                out->ftm.num_bursts_exp =
-                       nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
+                       nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP]);
 
        if (capa->ftm.max_bursts_exponent >= 0 &&
            out->ftm.num_bursts_exp > capa->ftm.max_bursts_exponent) {
@@ -88,7 +88,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
        out->ftm.burst_duration = 15;
        if (tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION])
                out->ftm.burst_duration =
-                       nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
+                       nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION]);
 
        out->ftm.ftms_per_burst = 0;
        if (tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST])
@@ -107,7 +107,7 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev,
        out->ftm.ftmr_retries = 3;
        if (tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES])
                out->ftm.ftmr_retries =
-                       nla_get_u32(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
+                       nla_get_u8(tb[NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES]);
 
        out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI];
        if (out->ftm.request_lci && !capa->ftm.request_lci) {
index 43897a5269b6a9e011e8e31bd882f55cd19480c5..755af47b88b91a6a315daecbc47341b9f4c71eb8 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Portions of this file
  * Copyright(c) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018, 2021-2023 Intel Corporation
+ * Copyright (C) 2018, 2021-2024 Intel Corporation
  */
 #ifndef __CFG80211_RDEV_OPS
 #define __CFG80211_RDEV_OPS
@@ -458,6 +458,10 @@ static inline int rdev_scan(struct cfg80211_registered_device *rdev,
                            struct cfg80211_scan_request *request)
 {
        int ret;
+
+       if (WARN_ON_ONCE(!request->n_ssids && request->ssids))
+               return -EINVAL;
+
        trace_rdev_scan(&rdev->wiphy, request);
        ret = rdev->ops->scan(&rdev->wiphy, request);
        trace_rdev_return_int(&rdev->wiphy, ret);
index 127853877a0ad14fec0f92615976c8fc8d33c3ef..2f2a3163968a7cc5c44ce85f73068e0a2765df76 100644 (file)
@@ -812,6 +812,7 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
        LIST_HEAD(coloc_ap_list);
        bool need_scan_psc = true;
        const struct ieee80211_sband_iftype_data *iftd;
+       size_t size, offs_ssids, offs_6ghz_params, offs_ies;
 
        rdev_req->scan_6ghz = true;
 
@@ -877,10 +878,15 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
                spin_unlock_bh(&rdev->bss_lock);
        }
 
-       request = kzalloc(struct_size(request, channels, n_channels) +
-                         sizeof(*request->scan_6ghz_params) * count +
-                         sizeof(*request->ssids) * rdev_req->n_ssids,
-                         GFP_KERNEL);
+       size = struct_size(request, channels, n_channels);
+       offs_ssids = size;
+       size += sizeof(*request->ssids) * rdev_req->n_ssids;
+       offs_6ghz_params = size;
+       size += sizeof(*request->scan_6ghz_params) * count;
+       offs_ies = size;
+       size += rdev_req->ie_len;
+
+       request = kzalloc(size, GFP_KERNEL);
        if (!request) {
                cfg80211_free_coloc_ap_list(&coloc_ap_list);
                return -ENOMEM;
@@ -888,8 +894,26 @@ static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
 
        *request = *rdev_req;
        request->n_channels = 0;
-       request->scan_6ghz_params =
-               (void *)&request->channels[n_channels];
+       request->n_6ghz_params = 0;
+       if (rdev_req->n_ssids) {
+               /*
+                * Add the ssids from the parent scan request to the new
+                * scan request, so the driver would be able to use them
+                * in its probe requests to discover hidden APs on PSC
+                * channels.
+                */
+               request->ssids = (void *)request + offs_ssids;
+               memcpy(request->ssids, rdev_req->ssids,
+                      sizeof(*request->ssids) * request->n_ssids);
+       }
+       request->scan_6ghz_params = (void *)request + offs_6ghz_params;
+
+       if (rdev_req->ie_len) {
+               void *ie = (void *)request + offs_ies;
+
+               memcpy(ie, rdev_req->ie, rdev_req->ie_len);
+               request->ie = ie;
+       }
 
        /*
         * PSC channels should not be scanned in case of direct scan with 1 SSID
@@ -978,17 +1002,8 @@ skip:
 
        if (request->n_channels) {
                struct cfg80211_scan_request *old = rdev->int_scan_req;
-               rdev->int_scan_req = request;
 
-               /*
-                * Add the ssids from the parent scan request to the new scan
-                * request, so the driver would be able to use them in its
-                * probe requests to discover hidden APs on PSC channels.
-                */
-               request->ssids = (void *)&request->channels[request->n_channels];
-               request->n_ssids = rdev_req->n_ssids;
-               memcpy(request->ssids, rdev_req->ssids, sizeof(*request->ssids) *
-                      request->n_ssids);
+               rdev->int_scan_req = request;
 
                /*
                 * If this scan follows a previous scan, save the scan start
@@ -2128,7 +2143,8 @@ static bool cfg80211_6ghz_power_type_valid(const u8 *ie, size_t ielen,
        struct ieee80211_he_operation *he_oper;
 
        tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen);
-       if (tmp && tmp->datalen >= sizeof(*he_oper) + 1) {
+       if (tmp && tmp->datalen >= sizeof(*he_oper) + 1 &&
+           tmp->datalen >= ieee80211_he_oper_size(tmp->data + 1)) {
                const struct ieee80211_he_6ghz_oper *he_6ghz_oper;
 
                he_oper = (void *)&tmp->data[1];
index 565511a3f461ed6872db387439a196351d02f00a..62f26618f674741a5163fa8e8d14c5319b2ceff8 100644 (file)
@@ -5,7 +5,7 @@
  *
  * Copyright 2005-2006 Jiri Benc <jbenc@suse.cz>
  * Copyright 2006      Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2020-2021, 2023 Intel Corporation
+ * Copyright (C) 2020-2021, 2023-2024 Intel Corporation
  */
 
 #include <linux/device.h>
@@ -137,7 +137,7 @@ static int wiphy_resume(struct device *dev)
        if (rdev->wiphy.registered && rdev->ops->resume)
                ret = rdev_resume(rdev);
        rdev->suspended = false;
-       schedule_work(&rdev->wiphy_work);
+       queue_work(system_unbound_wq, &rdev->wiphy_work);
        wiphy_unlock(&rdev->wiphy);
 
        if (ret)
index 2bde8a35463132d32dd0a185566ad4e72012e96a..082c6f9c5416eb5da26282f6a030440c79ed223c 100644 (file)
@@ -2549,6 +2549,7 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
 {
        struct cfg80211_registered_device *rdev;
        struct wireless_dev *wdev;
+       int ret;
 
        wdev = dev->ieee80211_ptr;
        if (!wdev)
@@ -2560,7 +2561,11 @@ int cfg80211_get_station(struct net_device *dev, const u8 *mac_addr,
 
        memset(sinfo, 0, sizeof(*sinfo));
 
-       return rdev_get_station(rdev, dev, mac_addr, sinfo);
+       wiphy_lock(&rdev->wiphy);
+       ret = rdev_get_station(rdev, dev, mac_addr, sinfo);
+       wiphy_unlock(&rdev->wiphy);
+
+       return ret;
 }
 EXPORT_SYMBOL(cfg80211_get_station);
 
index 727aa20be4bde8dc63a544a44a5cdeb19cac7dcb..7d1c0986f9bb354aa5a562f3edc72e54837b8ac5 100644 (file)
@@ -313,13 +313,10 @@ static bool xsk_is_bound(struct xdp_sock *xs)
 
 static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
 {
-       struct net_device *dev = xdp->rxq->dev;
-       u32 qid = xdp->rxq->queue_index;
-
        if (!xsk_is_bound(xs))
                return -ENXIO;
 
-       if (!dev->_rx[qid].pool || xs->umem != dev->_rx[qid].pool->umem)
+       if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
                return -EINVAL;
 
        if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
index 3df0125ed5fa7aae35673bf266029a356f19c147..50befe125ddc550997fc0c8452c05c8e6f955579 100644 (file)
@@ -393,7 +393,8 @@ static int probe_uprobe_multi_link(int token_fd)
        err = -errno; /* close() can clobber errno */
 
        if (link_fd >= 0 || err != -EBADF) {
-               close(link_fd);
+               if (link_fd >= 0)
+                       close(link_fd);
                close(prog_fd);
                return 0;
        }
index 02e718f06e0f5c5b81c14e9d75c635b7c0837a31..40531e56776e426c247af0472a1d1ed6e9c0e49e 100644 (file)
@@ -84,7 +84,7 @@ int BPF_PROG(trace_tcp_connect, struct sock *sk)
 }
 
 SEC("fexit/inet_csk_accept")
-int BPF_PROG(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern,
+int BPF_PROG(inet_csk_accept, struct sock *sk, struct proto_accept_arg *arg,
             struct sock *accepted_sk)
 {
        set_task_info(accepted_sk);
index 22061204fb691298e6004aaec1839d011bd17ecb..241542441c5177f60f08963f0b63d456d0ecfdbc 100644 (file)
@@ -2,3 +2,4 @@ CONFIG_IPV6=y
 CONFIG_NET_SCH_NETEM=m
 CONFIG_HSR=y
 CONFIG_VETH=y
+CONFIG_BRIDGE=y
index edc030e81a4649e5245c6bc1a336f3c83c28daed..9155c914c064fe542fae87631561692e48104a26 100644 (file)
@@ -15,7 +15,7 @@ ksft_xfail=2
 ksft_skip=4
 
 # namespace list created by setup_ns
-NS_LIST=""
+NS_LIST=()
 
 ##############################################################################
 # Helpers
@@ -27,6 +27,7 @@ __ksft_status_merge()
        local -A weights
        local weight=0
 
+       local i
        for i in "$@"; do
                weights[$i]=$((weight++))
        done
@@ -67,9 +68,7 @@ loopy_wait()
        while true
        do
                local out
-               out=$("$@")
-               local ret=$?
-               if ((!ret)); then
+               if out=$("$@"); then
                        echo -n "$out"
                        return 0
                fi
@@ -139,6 +138,7 @@ cleanup_ns()
        fi
 
        for ns in "$@"; do
+               [ -z "${ns}" ] && continue
                ip netns delete "${ns}" &> /dev/null
                if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then
                        echo "Warn: Failed to remove namespace $ns"
@@ -152,7 +152,7 @@ cleanup_ns()
 
 cleanup_all_ns()
 {
-       cleanup_ns $NS_LIST
+       cleanup_ns "${NS_LIST[@]}"
 }
 
 # setup netns with given names as prefix. e.g
@@ -161,7 +161,7 @@ setup_ns()
 {
        local ns=""
        local ns_name=""
-       local ns_list=""
+       local ns_list=()
        local ns_exist=
        for ns_name in "$@"; do
                # Some test may setup/remove same netns multi times
@@ -177,13 +177,13 @@ setup_ns()
 
                if ! ip netns add "$ns"; then
                        echo "Failed to create namespace $ns_name"
-                       cleanup_ns "$ns_list"
+                       cleanup_ns "${ns_list[@]}"
                        return $ksft_skip
                fi
                ip -n "$ns" link set lo up
-               ! $ns_exist && ns_list="$ns_list $ns"
+               ! $ns_exist && ns_list+=("$ns")
        done
-       NS_LIST="$NS_LIST $ns_list"
+       NS_LIST+=("${ns_list[@]}")
 }
 
 tc_rule_stats_get()