Merge tag 'gpio-fixes-for-v6.9-rc6' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Apr 2024 18:27:02 +0000 (11:27 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Apr 2024 18:27:02 +0000 (11:27 -0700)
Pull gpio fixes from Bartosz Golaszewski:

 - fix a regression in pin access control in gpio-tegra186

 - make data pointer dereference robust in Intel Tangier driver

* tag 'gpio-fixes-for-v6.9-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux:
  gpio: tegra186: Fix tegra186_gpio_is_accessible() check
  gpio: tangier: Use correct type for the IRQ chip data

198 files changed:
.mailmap
Documentation/admin-guide/verify-bugs-and-bisect-regressions.rst
MAINTAINERS
arch/loongarch/Kconfig
arch/loongarch/include/asm/crash_core.h [deleted file]
arch/loongarch/include/asm/crash_reserve.h [new file with mode: 0644]
arch/loongarch/include/asm/perf_event.h
arch/loongarch/include/asm/tlb.h
arch/loongarch/kernel/perf_event.c
arch/loongarch/mm/fault.c
block/bdev.c
drivers/acpi/cppc_acpi.c
drivers/acpi/x86/s2idle.c
drivers/bluetooth/btmtk.c
drivers/bluetooth/btqca.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_qca.c
drivers/cxl/core/mbox.c
drivers/dpll/dpll_core.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
drivers/gpu/drm/drm_gem_atomic_helper.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
drivers/gpu/drm/gma500/Makefile
drivers/gpu/drm/gma500/psb_device.c
drivers/gpu/drm/gma500/psb_drv.h
drivers/gpu/drm/gma500/psb_lid.c [deleted file]
drivers/gpu/drm/xe/xe_gt.c
drivers/gpu/drm/xe/xe_gt_ccs_mode.c
drivers/gpu/drm/xe/xe_gt_ccs_mode.h
drivers/gpu/drm/xe/xe_guc_ct.c
drivers/gpu/drm/xe/xe_huc.c
drivers/hid/hid-logitech-dj.c
drivers/hid/hid-mcp2221.c
drivers/hid/hid-nintendo.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hid/intel-ish-hid/ipc/ipc.c
drivers/md/dm-vdo/murmurhash3.c
drivers/md/dm.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/port.h
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/ice/ice_vf_lib.c
drivers/net/ethernet/intel/igc/igc.h
drivers/net/ethernet/intel/igc/igc_leds.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/core_env.c
drivers/net/ethernet/mellanox/mlxsw/pci.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/ti/am65-cpts.c
drivers/net/ethernet/ti/icssg/icssg_prueth.c
drivers/net/ethernet/wangxun/libwx/wx_lib.c
drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
drivers/net/gtp.c
drivers/net/macsec.c
drivers/net/phy/dp83869.c
drivers/net/phy/mediatek-ge-soc.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/qmi_wwan.c
drivers/net/vxlan/vxlan_core.c
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/intel/iwlwifi/cfg/bz.c
drivers/net/wireless/intel/iwlwifi/cfg/sc.c
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
drivers/net/wireless/intel/iwlwifi/mvm/link.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/virtual/mac80211_hwsim.c
drivers/nfc/trf7970a.c
drivers/vdpa/vdpa.c
drivers/video/fbdev/core/fb_defio.c
fs/9p/v9fs.h
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/9p/vfs_super.c
fs/bcachefs/backpointers.c
fs/bcachefs/bcachefs_format.h
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_io.c
fs/bcachefs/btree_key_cache.c
fs/bcachefs/btree_node_scan.c
fs/bcachefs/btree_types.h
fs/bcachefs/btree_update_interior.c
fs/bcachefs/chardev.c
fs/bcachefs/fs.c
fs/bcachefs/journal_io.c
fs/bcachefs/recovery.c
fs/bcachefs/sb-clean.c
fs/bcachefs/sb-errors_types.h
fs/bcachefs/sb-members.c
fs/bcachefs/sb-members.h
fs/bcachefs/super.c
fs/bcachefs/thread_with_file.c
fs/bcachefs/thread_with_file.h
fs/btrfs/backref.c
fs/btrfs/extent_map.c
fs/btrfs/inode.c
fs/btrfs/messages.c
fs/btrfs/scrub.c
fs/btrfs/tests/extent-map-tests.c
fs/ioctl.c
fs/netfs/buffered_write.c
fs/nfsd/nfs4callback.c
fs/nfsd/state.h
fs/ntfs3/Kconfig
fs/ntfs3/dir.c
fs/ntfs3/file.c
fs/ntfs3/inode.c
fs/ntfs3/ntfs_fs.h
fs/ntfs3/super.c
fs/smb/client/cifsfs.c
fs/smb/client/cifsglob.h
fs/smb/client/cifsproto.h
fs/smb/client/connect.c
fs/smb/client/fs_context.c
fs/smb/client/fs_context.h
fs/smb/client/fscache.c
fs/smb/client/misc.c
fs/smb/client/smb2misc.c
fs/smb/client/smb2ops.c
fs/smb/client/smb2pdu.c
fs/smb/client/smb2transport.c
fs/smb/client/trace.h
fs/smb/common/smb2pdu.h
fs/smb/server/ksmbd_netlink.h
fs/smb/server/server.c
fs/smb/server/smb2pdu.c
fs/smb/server/vfs.c
include/linux/etherdevice.h
include/linux/sunrpc/svc_rdma.h
include/net/af_unix.h
include/net/bluetooth/hci_core.h
include/net/mac80211.h
include/net/macsec.h
include/net/sock.h
include/net/tls.h
include/uapi/drm/etnaviv_drm.h
include/uapi/linux/vdpa.h
net/ax25/af_ax25.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sync.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/sco.c
net/bridge/br_netlink.c
net/ethernet/eth.c
net/ipv4/icmp.c
net/ipv4/route.c
net/ipv4/tcp_ao.c
net/ipv4/udp.c
net/ipv6/udp.c
net/mac80211/chan.c
net/mac80211/mesh.c
net/mac80211/mesh.h
net/mac80211/mesh_pathtbl.c
net/mac80211/mlme.c
net/mac80211/rate.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/tx.c
net/netfilter/ipvs/ip_vs_proto_sctp.c
net/netfilter/nft_chain_filter.c
net/openvswitch/conntrack.c
net/sunrpc/xprtrdma/svc_rdma_rw.c
net/sunrpc/xprtrdma/svc_rdma_sendto.c
net/tls/tls.h
net/tls/tls_strp.c
net/unix/garbage.c
net/wireless/nl80211.c
net/wireless/trace.h
tools/net/ynl/lib/ynl.py

index f877262b0be99a588a14c1ed744ef9a79ee5c515..16b704e1d5d3665d178f48992a8e3d03cad57cac 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -38,6 +38,16 @@ Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
 Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
 Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
 Alexey Makhalov <alexey.amakhalov@broadcom.com> <amakhalov@vmware.com>
+Alex Elder <elder@kernel.org>
+Alex Elder <elder@kernel.org> <aelder@sgi.com>
+Alex Elder <elder@kernel.org> <alex.elder@linaro.org>
+Alex Elder <elder@kernel.org> <alex.elder@linary.org>
+Alex Elder <elder@kernel.org> <elder@dreamhost.com>
+Alex Elder <elder@kernel.org> <elder@dreawmhost.com>
+Alex Elder <elder@kernel.org> <elder@ieee.org>
+Alex Elder <elder@kernel.org> <elder@inktank.com>
+Alex Elder <elder@kernel.org> <elder@linaro.org>
+Alex Elder <elder@kernel.org> <elder@newdream.net>
 Alex Hung <alexhung@gmail.com> <alex.hung@canonical.com>
 Alex Shi <alexs@kernel.org> <alex.shi@intel.com>
 Alex Shi <alexs@kernel.org> <alex.shi@linaro.org>
@@ -98,6 +108,8 @@ Ben Widawsky <bwidawsk@kernel.org> <ben@bwidawsk.net>
 Ben Widawsky <bwidawsk@kernel.org> <ben.widawsky@intel.com>
 Ben Widawsky <bwidawsk@kernel.org> <benjamin.widawsky@intel.com>
 Benjamin Poirier <benjamin.poirier@gmail.com> <bpoirier@suse.de>
+Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@gmail.com>
+Benjamin Tissoires <bentiss@kernel.org> <benjamin.tissoires@redhat.com>
 Bjorn Andersson <andersson@kernel.org> <bjorn@kryo.se>
 Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@linaro.org>
 Bjorn Andersson <andersson@kernel.org> <bjorn.andersson@sonymobile.com>
index d3504826f401541e1dd4946c3f6d9f55989bee46..c389d4fd7599df2a8003365ee7df3d12793fae16 100644 (file)
@@ -29,7 +29,7 @@ The essence of the process (aka 'TL;DR')
 ========================================
 
 *[If you are new to building or bisecting Linux, ignore this section and head
-over to the* ":ref:`step-by-step guide<introguide_bissbs>`" *below. It utilizes
+over to the* ':ref:`step-by-step guide <introguide_bissbs>`' *below. It utilizes
 the same commands as this section while describing them in brief fashion. The
 steps are nevertheless easy to follow and together with accompanying entries
 in a reference section mention many alternatives, pitfalls, and additional
@@ -38,8 +38,8 @@ aspects, all of which might be essential in your present case.]*
 **In case you want to check if a bug is present in code currently supported by
 developers**, execute just the *preparations* and *segment 1*; while doing so,
 consider the newest Linux kernel you regularly use to be the 'working' kernel.
-In the following example that's assumed to be 6.0.13, which is why the sources
-of 6.0 will be used to prepare the .config file.
+In the following example that's assumed to be 6.0, which is why its sources
+will be used to prepare the .config file.
 
 **In case you face a regression**, follow the steps at least till the end of
 *segment 2*. Then you can submit a preliminary report -- or continue with
@@ -61,7 +61,7 @@ will be considered the 'good' release and used to prepare the .config file.
     cd ~/linux/
     git remote add -t master stable \
       https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
-    git checkout --detach v6.0
+    git switch --detach v6.0
     # * Hint: if you used an existing clone, ensure no stale .config is around.
     make olddefconfig
     # * Ensure the former command picked the .config of the 'working' kernel.
@@ -87,7 +87,7 @@ will be considered the 'good' release and used to prepare the .config file.
   a) Checking out latest mainline code::
 
        cd ~/linux/
-       git checkout --force --detach mainline/master
+       git switch --discard-changes --detach mainline/master
 
   b) Build, install, and boot a kernel::
 
@@ -125,7 +125,7 @@ will be considered the 'good' release and used to prepare the .config file.
   a) Start by checking out the sources of the 'good' version::
 
        cd ~/linux/
-       git checkout --force --detach v6.0
+       git switch --discard-changes --detach v6.0
 
   b) Build, install, and boot a kernel as described earlier in *segment 1,
      section b* -- just feel free to skip the 'du' commands, as you have a rough
@@ -136,8 +136,7 @@ will be considered the 'good' release and used to prepare the .config file.
 
 * **Segment 3**: perform and validate the bisection.
 
-  a) In case your 'broken' version is a stable/longterm release, add the Git
-     branch holding it::
+  a) Retrieve the sources for your 'bad' version::
 
        git remote set-branches --add stable linux-6.1.y
        git fetch stable
@@ -157,11 +156,12 @@ will be considered the 'good' release and used to prepare the .config file.
      works with the newly built kernel. If it does, tell Git by executing
      ``git bisect good``; if it does not, run ``git bisect bad`` instead.
 
-     All three commands will make Git checkout another commit; then re-execute
+     All three commands will make Git check out another commit; then re-execute
      this step (e.g. build, install, boot, and test a kernel to then tell Git
      the outcome). Do so again and again until Git shows which commit broke
      things. If you run short of disk space during this process, check the
-     "Supplementary tasks" section below.
+     section 'Complementary tasks: cleanup during and after the process'
+     below.
 
   d) Once your finished the bisection, put a few things away::
 
@@ -172,14 +172,17 @@ will be considered the 'good' release and used to prepare the .config file.
 
   e) Try to verify the bisection result::
 
-       git checkout --force --detach mainline/master
+       git switch --discard-changes --detach mainline/master
        git revert --no-edit cafec0cacaca0
+       cp ~/kernel-config-working .config
+       ./scripts/config --set-str CONFIG_LOCALVERSION '-local-cafec0cacaca0-reverted'
 
     This is optional, as some commits are impossible to revert. But if the
     second command worked flawlessly, build, install, and boot one more kernel
-    kernel, which should not show the regression.
+    kernel; just this time skip the first command copying the base .config file
+    over, as that already has been taken care off.
 
-* **Supplementary tasks**: cleanup during and after the process.
+* **Complementary tasks**: cleanup during and after the process.
 
   a) To avoid running out of disk space during a bisection, you might need to
      remove some kernels you built earlier. You most likely want to keep those
@@ -202,13 +205,25 @@ will be considered the 'good' release and used to prepare the .config file.
      the kernels you built earlier and later you might want to keep around for
      a week or two.
 
+* **Optional task**: test a debug patch or a proposed fix later::
+
+    git fetch mainline
+    git switch --discard-changes --detach mainline/master
+    git apply /tmp/foobars-proposed-fix-v1.patch
+    cp ~/kernel-config-working .config
+    ./scripts/config --set-str CONFIG_LOCALVERSION '-local-foobars-fix-v1'
+
+  Build, install, and boot a kernel as described in *segment 1, section b* --
+  but this time omit the first command copying the build configuration over,
+  as that has been taken care of already.
+
 .. _introguide_bissbs:
 
 Step-by-step guide on how to verify bugs and bisect regressions
 ===============================================================
 
 This guide describes how to set up your own Linux kernels for investigating bugs
-or regressions you intent to report. How far you want to follow the instructions
+or regressions you intend to report. How far you want to follow the instructions
 depends on your issue:
 
 Execute all steps till the end of *segment 1* to **verify if your kernel problem
@@ -221,15 +236,17 @@ report; instead of the latter your could also head straight on and follow
 *segment 3* to **perform a bisection** for a full-fledged regression report
 developers are obliged to act upon.
 
- :ref:`Preparations: set up everything to build your own kernels.<introprep_bissbs>`
+ :ref:`Preparations: set up everything to build your own kernels <introprep_bissbs>`.
 
- :ref:`Segment 1: try to reproduce the problem with the latest codebase.<introlatestcheck_bissbs>`
+ :ref:`Segment 1: try to reproduce the problem with the latest codebase <introlatestcheck_bissbs>`.
 
- :ref:`Segment 2: check if the kernels you build work fine.<introworkingcheck_bissbs>`
+ :ref:`Segment 2: check if the kernels you build work fine <introworkingcheck_bissbs>`.
 
- :ref:`Segment 3: perform a bisection and validate the result.<introbisect_bissbs>`
+ :ref:`Segment 3: perform a bisection and validate the result <introbisect_bissbs>`.
 
- :ref:`Supplementary tasks: cleanup during and after following this guide.<introclosure_bissbs>`
+ :ref:`Complementary tasks: cleanup during and after following this guide <introclosure_bissbs>`.
+
+ :ref:`Optional tasks: test reverts, patches, or later versions <introoptional_bissbs>`.
 
 The steps in each segment illustrate the important aspects of the process, while
 a comprehensive reference section holds additional details for almost all of the
@@ -240,24 +257,35 @@ to get things rolling again.
 For further details on how to report Linux kernel issues or regressions check
 out Documentation/admin-guide/reporting-issues.rst, which works in conjunction
 with this document. It among others explains why you need to verify bugs with
-the latest 'mainline' kernel, even if you face a problem with a kernel from a
-'stable/longterm' series; for users facing a regression it also explains that
-sending a preliminary report after finishing segment 2 might be wise, as the
-regression and its culprit might be known already. For further details on
-what actually qualifies as a regression check out
-Documentation/admin-guide/reporting-regressions.rst.
+the latest 'mainline' kernel (e.g. versions like 6.0, 6.1-rc1, or 6.1-rc6),
+even if you face a problem with a kernel from a 'stable/longterm' series
+(say 6.0.13).
+
+For users facing a regression that document also explains why sending a
+preliminary report after segment 2 might be wise, as the regression and its
+culprit might be known already. For further details on what actually qualifies
+as a regression check out Documentation/admin-guide/reporting-regressions.rst.
+
+If you run into any problems while following this guide or have ideas how to
+improve it, :ref:`please let the kernel developers know <submit_improvements>`.
 
 .. _introprep_bissbs:
 
 Preparations: set up everything to build your own kernels
 ---------------------------------------------------------
 
+The following steps lay the groundwork for all further tasks.
+
+Note: the instructions assume you are building and testing on the same
+machine; if you want to compile the kernel on another system, check
+:ref:`Build kernels on a different machine <buildhost_bis>` below.
+
 .. _backup_bissbs:
 
 * Create a fresh backup and put system repair and restore tools at hand, just
   to be prepared for the unlikely case of something going sideways.
 
-  [:ref:`details<backup_bisref>`]
+  [:ref:`details <backup_bisref>`]
 
 .. _vanilla_bissbs:
 
@@ -265,7 +293,7 @@ Preparations: set up everything to build your own kernels
   builds them automatically. That includes but is not limited to DKMS, openZFS,
   VirtualBox, and Nvidia's graphics drivers (including the GPLed kernel module).
 
-  [:ref:`details<vanilla_bisref>`]
+  [:ref:`details <vanilla_bisref>`]
 
 .. _secureboot_bissbs:
 
@@ -276,48 +304,49 @@ Preparations: set up everything to build your own kernels
   their restrictions through a process initiated by
   ``mokutil --disable-validation``.
 
-  [:ref:`details<secureboot_bisref>`]
+  [:ref:`details <secureboot_bisref>`]
 
 .. _rangecheck_bissbs:
 
 * Determine the kernel versions considered 'good' and 'bad' throughout this
-  guide.
+  guide:
 
-  Do you follow this guide to verify if a bug is present in the code developers
-  care for? Then consider the mainline release your 'working' kernel (the newest
-  one you regularly use) is based on to be the 'good' version; if your 'working'
-  kernel for example is 6.0.11, then your 'good' kernel is 6.0.
+  * Do you follow this guide to verify if a bug is present in the code the
+    primary developers care for? Then consider the version of the newest kernel
+    you regularly use currently as 'good' (e.g. 6.0, 6.0.13, or 6.1-rc2).
 
-  In case you face a regression, it depends on the version range where the
-  regression was introduced:
+  * Do you face a regression, e.g. something broke or works worse after
+    switching to a newer kernel version? In that case it depends on the version
+    range during which the problem appeared:
 
-  * Something which used to work in Linux 6.0 broke when switching to Linux
-    6.1-rc1? Then henceforth regard 6.0 as the last known 'good' version
-    and 6.1-rc1 as the first 'bad' one.
+    * Something regressed when updating from a stable/longterm release
+      (say 6.0.13) to a newer mainline series (like 6.1-rc7 or 6.1) or a
+      stable/longterm version based on one (say 6.1.5)? Then consider the
+      mainline release your working kernel is based on to be the 'good'
+      version (e.g. 6.0) and the first version to be broken as the 'bad' one
+      (e.g. 6.1-rc7, 6.1, or 6.1.5). Note, at this point it is merely assumed
+      that 6.0 is fine; this hypothesis will be checked in segment 2.
 
-  * Some function stopped working when updating from 6.0.11 to 6.1.4? Then for
-    the time being consider 6.0 as the last 'good' version and 6.1.4 as
-    the 'bad' one. Note, at this point it is merely assumed that 6.0 is fine;
-    this assumption will be checked in segment 2.
+    * Something regressed when switching from one mainline version (say 6.0) to
+      a later one (like 6.1-rc1) or a stable/longterm release based on it
+      (say 6.1.5)? Then regard the last working version (e.g. 6.0) as 'good' and
+      the first broken (e.g. 6.1-rc1 or 6.1.5) as 'bad'.
 
-  * A feature you used in 6.0.11 does not work at all or worse in 6.1.13? In
-    that case you want to bisect within a stable/longterm series: consider
-    6.0.11 as the last known 'good' version and 6.0.13 as the first 'bad'
-    one. Note, in this case you still want to compile and test a mainline kernel
-    as explained in segment 1: the outcome will determine if you need to report
-    your issue to the regular developers or the stable team.
+    * Something regressed when updating within a stable/longterm series (say
+      from 6.0.13 to 6.0.15)? Then consider those versions as 'good' and 'bad'
+      (e.g. 6.0.13 and 6.0.15), as you need to bisect within that series.
 
   *Note, do not confuse 'good' version with 'working' kernel; the latter term
   throughout this guide will refer to the last kernel that has been working
   fine.*
 
-  [:ref:`details<rangecheck_bisref>`]
+  [:ref:`details <rangecheck_bisref>`]
 
 .. _bootworking_bissbs:
 
 * Boot into the 'working' kernel and briefly use the apparently broken feature.
 
-  [:ref:`details<bootworking_bisref>`]
+  [:ref:`details <bootworking_bisref>`]
 
 .. _diskspace_bissbs:
 
@@ -327,7 +356,7 @@ Preparations: set up everything to build your own kernels
   debug symbols: both explain approaches reducing the amount of space, which
   should allow you to master these tasks with about 4 Gigabytes free space.
 
-  [:ref:`details<diskspace_bisref>`]
+  [:ref:`details <diskspace_bisref>`]
 
 .. _buildrequires_bissbs:
 
@@ -337,7 +366,7 @@ Preparations: set up everything to build your own kernels
   reference section shows how to quickly install those on various popular Linux
   distributions.
 
-  [:ref:`details<buildrequires_bisref>`]
+  [:ref:`details <buildrequires_bisref>`]
 
 .. _sources_bissbs:
 
@@ -360,14 +389,23 @@ Preparations: set up everything to build your own kernels
     git remote add -t master stable \
       https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
 
-  [:ref:`details<sources_bisref>`]
+  [:ref:`details <sources_bisref>`]
+
+.. _stablesources_bissbs:
+
+* Is one of the versions you earlier established as 'good' or 'bad' a stable or
+  longterm release (say 6.1.5)? Then download the code for the series it belongs
+  to ('linux-6.1.y' in this example)::
+
+    git remote set-branches --add stable linux-6.1.y
+    git fetch stable
 
 .. _oldconfig_bissbs:
 
 * Start preparing a kernel build configuration (the '.config' file).
 
   Before doing so, ensure you are still running the 'working' kernel an earlier
-  step told you to boot; if you are unsure, check the current kernel release
+  step told you to boot; if you are unsure, check the current kernelrelease
   identifier using ``uname -r``.
 
   Afterwards check out the source code for the version earlier established as
@@ -375,7 +413,7 @@ Preparations: set up everything to build your own kernels
   the version number in this and all later Git commands needs to be prefixed
   with a 'v'::
 
-    git checkout --detach v6.0
+    git switch --discard-changes --detach v6.0
 
   Now create a build configuration file::
 
@@ -398,7 +436,7 @@ Preparations: set up everything to build your own kernels
   'make olddefconfig' again and check if it now picked up the right config file
   as base.
 
-  [:ref:`details<oldconfig_bisref>`]
+  [:ref:`details <oldconfig_bisref>`]
 
 .. _localmodconfig_bissbs:
 
@@ -432,7 +470,7 @@ Preparations: set up everything to build your own kernels
   spending much effort on, as long as it boots and allows to properly test the
   feature that causes trouble.
 
-  [:ref:`details<localmodconfig_bisref>`]
+  [:ref:`details <localmodconfig_bisref>`]
 
 .. _tagging_bissbs:
 
@@ -442,7 +480,7 @@ Preparations: set up everything to build your own kernels
     ./scripts/config --set-str CONFIG_LOCALVERSION '-local'
     ./scripts/config -e CONFIG_LOCALVERSION_AUTO
 
-  [:ref:`details<tagging_bisref>`]
+  [:ref:`details <tagging_bisref>`]
 
 .. _debugsymbols_bissbs:
 
@@ -461,7 +499,7 @@ Preparations: set up everything to build your own kernels
     ./scripts/config -d DEBUG_INFO -d DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT \
       -d DEBUG_INFO_DWARF4 -d DEBUG_INFO_DWARF5 -e CONFIG_DEBUG_INFO_NONE
 
-  [:ref:`details<debugsymbols_bisref>`]
+  [:ref:`details <debugsymbols_bisref>`]
 
 .. _configmods_bissbs:
 
@@ -471,14 +509,14 @@ Preparations: set up everything to build your own kernels
   * Are you running Debian? Then you want to avoid known problems by performing
     additional adjustments explained in the reference section.
 
-    [:ref:`details<configmods_distros_bisref>`].
+    [:ref:`details <configmods_distros_bisref>`].
 
   * If you want to influence other aspects of the configuration, do so now using
     your preferred tool. Note, to use make targets like 'menuconfig' or
     'nconfig', you will need to install the development files of ncurses; for
     'xconfig' you likewise need the Qt5 or Qt6 headers.
 
-    [:ref:`details<configmods_individual_bisref>`].
+    [:ref:`details <configmods_individual_bisref>`].
 
 .. _saveconfig_bissbs:
 
@@ -488,7 +526,7 @@ Preparations: set up everything to build your own kernels
      make olddefconfig
      cp .config ~/kernel-config-working
 
-  [:ref:`details<saveconfig_bisref>`]
+  [:ref:`details <saveconfig_bisref>`]
 
 .. _introlatestcheck_bissbs:
 
@@ -498,16 +536,30 @@ Segment 1: try to reproduce the problem with the latest codebase
 The following steps verify if the problem occurs with the code currently
 supported by developers. In case you face a regression, it also checks that the
 problem is not caused by some .config change, as reporting the issue then would
-be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
+be a waste of time. [:ref:`details <introlatestcheck_bisref>`]
 
 .. _checkoutmaster_bissbs:
 
-* Check out the latest Linux codebase::
+* Check out the latest Linux codebase.
 
-    cd ~/linux/
-    git checkout --force --detach mainline/master
+  * Are your 'good' and 'bad' versions from the same stable or longterm series?
+    Then check the `front page of kernel.org <https://kernel.org/>`_: if it
+    lists a release from that series without an '[EOL]' tag, checkout the series
+    latest version ('linux-6.1.y' in the following example)::
+
+      cd ~/linux/
+      git switch --discard-changes --detach stable/linux-6.1.y
+
+    Your series is unsupported, if is not listed or carrying a 'end of life'
+    tag. In that case you might want to check if a successor series (say
+    linux-6.2.y) or mainline (see next point) fix the bug.
 
-  [:ref:`details<checkoutmaster_bisref>`]
+  * In all other cases, run::
+
+      cd ~/linux/
+      git switch --discard-changes --detach mainline/master
+
+  [:ref:`details <checkoutmaster_bisref>`]
 
 .. _build_bissbs:
 
@@ -522,7 +574,7 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
   reference section for alternatives, which obviously will require other
   steps to install as well.
 
-  [:ref:`details<build_bisref>`]
+  [:ref:`details <build_bisref>`]
 
 .. _install_bissbs:
 
@@ -555,7 +607,7 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
   down: if you will build more kernels as described in segment 2 and 3, you will
   have to perform those again after executing ``command -v installkernel [...]``.
 
-  [:ref:`details<install_bisref>`]
+  [:ref:`details <install_bisref>`]
 
 .. _storagespace_bissbs:
 
@@ -568,7 +620,7 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
   Write down or remember those two values for later: they enable you to prevent
   running out of disk space accidentally during a bisection.
 
-  [:ref:`details<storagespace_bisref>`]
+  [:ref:`details <storagespace_bisref>`]
 
 .. _kernelrelease_bissbs:
 
@@ -595,7 +647,7 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
   If that command does not return '0', check the reference section, as the cause
   for this might interfere with your testing.
 
-  [:ref:`details<tainted_bisref>`]
+  [:ref:`details <tainted_bisref>`]
 
 .. _recheckbroken_bissbs:
 
@@ -603,21 +655,19 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
   out the instructions in the reference section to ensure nothing went sideways
   during your tests.
 
-  [:ref:`details<recheckbroken_bisref>`]
+  [:ref:`details <recheckbroken_bisref>`]
 
 .. _recheckstablebroken_bissbs:
 
-* Are you facing a problem within a stable/longterm series, but failed to
-  reproduce it with the mainline kernel you just built? One that according to
-  the `front page of kernel.org <https://kernel.org/>`_ is still supported? Then
-  check if the latest codebase for the particular series might already fix the
-  problem. To do so, add the stable series Git branch for your 'good' kernel
-  (again, this here is assumed to be 6.0) and check out the latest version::
+* Did you just built a stable or longterm kernel? And were you able to reproduce
+  the regression with it? Then you should test the latest mainline codebase as
+  well, because the result determines which developers the bug must be submitted
+  to.
+
+  To prepare that test, check out current mainline::
 
     cd ~/linux/
-    git remote set-branches --add stable linux-6.0.y
-    git fetch stable
-    git checkout --force --detach linux-6.0.y
+    git switch --discard-changes --detach mainline/master
 
   Now use the checked out code to build and install another kernel using the
   commands the earlier steps already described in more detail::
@@ -639,14 +689,16 @@ be a waste of time. [:ref:`details<introlatestcheck_bisref>`]
     uname -r
     cat /proc/sys/kernel/tainted
 
-  Now verify if this kernel is showing the problem.
+  Now verify if this kernel is showing the problem. If it does, then you need
+  to report the bug to the primary developers; if it does not, report it to the
+  stable team. See Documentation/admin-guide/reporting-issues.rst for details.
 
-  [:ref:`details<recheckstablebroken_bisref>`]
+  [:ref:`details <recheckstablebroken_bisref>`]
 
 Do you follow this guide to verify if a problem is present in the code
 currently supported by Linux kernel developers? Then you are done at this
 point. If you later want to remove the kernel you just built, check out
-:ref:`Supplementary tasks: cleanup during and after following this guide<introclosure_bissbs>`.
+:ref:`Complementary tasks: cleanup during and after following this guide <introclosure_bissbs>`.
 
 In case you face a regression, move on and execute at least the next segment
 as well.
@@ -658,7 +710,7 @@ Segment 2: check if the kernels you build work fine
 
 In case of a regression, you now want to ensure the trimmed configuration file
 you created earlier works as expected; a bisection with the .config file
-otherwise would be a waste of time. [:ref:`details<introworkingcheck_bisref>`]
+otherwise would be a waste of time. [:ref:`details <introworkingcheck_bisref>`]
 
 .. _recheckworking_bissbs:
 
@@ -669,7 +721,7 @@ otherwise would be a waste of time. [:ref:`details<introworkingcheck_bisref>`]
   'good' (once again assumed to be 6.0 here)::
 
     cd ~/linux/
-    git checkout --detach v6.0
+    git switch --discard-changes --detach v6.0
 
   Now use the checked out code to configure, build, and install another kernel
   using the commands the previous subsection explained in more detail::
@@ -693,7 +745,7 @@ otherwise would be a waste of time. [:ref:`details<introworkingcheck_bisref>`]
   Now check if this kernel works as expected; if not, consult the reference
   section for further instructions.
 
-  [:ref:`details<recheckworking_bisref>`]
+  [:ref:`details <recheckworking_bisref>`]
 
 .. _introbisect_bissbs:
 
@@ -703,18 +755,11 @@ Segment 3: perform the bisection and validate the result
 With all the preparations and precaution builds taken care of, you are now ready
 to begin the bisection. This will make you build quite a few kernels -- usually
 about 15 in case you encountered a regression when updating to a newer series
-(say from 6.0.11 to 6.1.3). But do not worry, due to the trimmed build
+(say from 6.0.13 to 6.1.5). But do not worry, due to the trimmed build
 configuration created earlier this works a lot faster than many people assume:
 overall on average it will often just take about 10 to 15 minutes to compile
 each kernel on commodity x86 machines.
 
-* In case your 'bad' version is a stable/longterm release (say 6.1.5), add its
-  stable branch, unless you already did so earlier::
-
-    cd ~/linux/
-    git remote set-branches --add stable linux-6.1.y
-    git fetch stable
-
 .. _bisectstart_bissbs:
 
 * Start the bisection and tell Git about the versions earlier established as
@@ -725,7 +770,7 @@ each kernel on commodity x86 machines.
     git bisect good v6.0
     git bisect bad v6.1.5
 
-  [:ref:`details<bisectstart_bisref>`]
+  [:ref:`details <bisectstart_bisref>`]
 
 .. _bisectbuild_bissbs:
 
@@ -745,7 +790,7 @@ each kernel on commodity x86 machines.
   If compilation fails for some reason, run ``git bisect skip`` and restart
   executing the stack of commands from the beginning.
 
-  In case you skipped the "test latest codebase" step in the guide, check its
+  In case you skipped the 'test latest codebase' step in the guide, check its
   description as for why the 'df [...]' and 'make -s kernelrelease [...]'
   commands are here.
 
@@ -754,7 +799,7 @@ each kernel on commodity x86 machines.
   totally normal to see release identifiers like '6.0-rc1-local-gcafec0cacaca0'
   if you bisect between versions 6.1 and 6.2 for example.
 
-  [:ref:`details<bisectbuild_bisref>`]
+  [:ref:`details <bisectbuild_bisref>`]
 
 .. _bisecttest_bissbs:
 
@@ -794,7 +839,7 @@ each kernel on commodity x86 machines.
   might need to scroll up to see the message mentioning the culprit;
   alternatively, run ``git bisect log > ~/bisection-log``.
 
-  [:ref:`details<bisecttest_bisref>`]
+  [:ref:`details <bisecttest_bisref>`]
 
 .. _bisectlog_bissbs:
 
@@ -806,7 +851,7 @@ each kernel on commodity x86 machines.
     cp .config ~/bisection-config-culprit
     git bisect reset
 
-  [:ref:`details<bisectlog_bisref>`]
+  [:ref:`details <bisectlog_bisref>`]
 
 .. _revert_bissbs:
 
@@ -823,16 +868,16 @@ each kernel on commodity x86 machines.
   Begin by checking out the latest codebase depending on the range you bisected:
 
   * Did you face a regression within a stable/longterm series (say between
-    6.0.11 and 6.0.13) that does not happen in mainline? Then check out the
+    6.0.13 and 6.0.15) that does not happen in mainline? Then check out the
     latest codebase for the affected series like this::
 
       git fetch stable
-      git checkout --force --detach linux-6.0.y
+      git switch --discard-changes --detach linux-6.0.y
 
   * In all other cases check out latest mainline::
 
       git fetch mainline
-      git checkout --force --detach mainline/master
+      git switch --discard-changes --detach mainline/master
 
     If you bisected a regression within a stable/longterm series that also
     happens in mainline, there is one more thing to do: look up the mainline
@@ -846,27 +891,33 @@ each kernel on commodity x86 machines.
 
     git revert --no-edit cafec0cacaca0
 
-  If that fails, give up trying and move on to the next step. But if it works,
-  build a kernel again using the familiar command sequence::
+  If that fails, give up trying and move on to the next step; if it works,
+  adjust the tag to facilitate the identification and prevent accidentally
+  overwriting another kernel::
 
     cp ~/kernel-config-working .config
+    ./scripts/config --set-str CONFIG_LOCALVERSION '-local-cafec0cacaca0-reverted'
+
+  Build a kernel using the familiar command sequence, just without copying the
+  the base .config over::
+
     make olddefconfig &&
-    make -j $(nproc --all) &&
+    make -j $(nproc --all)
     # * Check if the free space suffices holding another kernel:
     df -h /boot/ /lib/modules/
     sudo make modules_install
     command -v installkernel && sudo make install
-    Make -s kernelrelease | tee -a ~/kernels-built
+    make -s kernelrelease | tee -a ~/kernels-built
     reboot
 
-  Now check one last time if the feature that made you perform a bisection work
-  with that kernel.
+  Now check one last time if the feature that made you perform a bisection works
+  with that kernel: if everything went well, it should not show the regression.
 
-  [:ref:`details<revert_bisref>`]
+  [:ref:`details <revert_bisref>`]
 
 .. _introclosure_bissbs:
 
-Supplementary tasks: cleanup during and after the bisection
+Complementary tasks: cleanup during and after the bisection
 -----------------------------------------------------------
 
 During and after following this guide you might want or need to remove some of
@@ -903,7 +954,7 @@ space might run out.
   kernel image and related files behind; in that case remove them as described
   in the reference section.
 
-  [:ref:`details<makeroom_bisref>`]
+  [:ref:`details <makeroom_bisref>`]
 
 .. _finishingtouch_bissbs:
 
@@ -926,18 +977,99 @@ space might run out.
     the version considered 'good', and the last three or four you compiled
     during the actual bisection process.
 
-  [:ref:`details<finishingtouch_bisref>`]
+  [:ref:`details <finishingtouch_bisref>`]
+
+.. _introoptional_bissbs:
+
+Optional: test reverts, patches, or later versions
+--------------------------------------------------
+
+While or after reporting a bug, you might want or potentially will be asked to
+test reverts, debug patches, proposed fixes, or other versions. In that case
+follow these instructions.
+
+* Update your Git clone and check out the latest code.
+
+  * In case you want to test mainline, fetch its latest changes before checking
+    its code out::
+
+      git fetch mainline
+      git switch --discard-changes --detach mainline/master
+
+  * In case you want to test a stable or longterm kernel, first add the branch
+    holding the series you are interested in (6.2 in the example), unless you
+    already did so earlier::
+
+      git remote set-branches --add stable linux-6.2.y
+
+    Then fetch the latest changes and check out the latest version from the
+    series::
+
+      git fetch stable
+      git switch --discard-changes --detach stable/linux-6.2.y
+
+* Copy your kernel build configuration over::
+
+    cp ~/kernel-config-working .config
+
+* Your next step depends on what you want to do:
+
+  * In case you just want to test the latest codebase, head to the next step,
+    you are already all set.
+
+  * In case you want to test if a revert fixes an issue, revert one or multiple
+    changes by specifying their commit ids::
+
+      git revert --no-edit cafec0cacaca0
+
+    Now give that kernel a special tag to facilitates its identification and
+    prevent accidentally overwriting another kernel::
+
+      ./scripts/config --set-str CONFIG_LOCALVERSION '-local-cafec0cacaca0-reverted'
+
+  * In case you want to test a patch, store the patch in a file like
+    '/tmp/foobars-proposed-fix-v1.patch' and apply it like this::
+
+      git apply /tmp/foobars-proposed-fix-v1.patch
+
+    In case of multiple patches, repeat this step with the others.
+
+    Now give that kernel a special tag to facilitates its identification and
+    prevent accidentally overwriting another kernel::
+
+    ./scripts/config --set-str CONFIG_LOCALVERSION '-local-foobars-fix-v1'
+
+* Build a kernel using the familiar commands, just without copying the kernel
+  build configuration over, as that has been taken care of already::
+
+    make olddefconfig &&
+    make -j $(nproc --all)
+    # * Check if the free space suffices holding another kernel:
+    df -h /boot/ /lib/modules/
+    sudo make modules_install
+    command -v installkernel && sudo make install
+    make -s kernelrelease | tee -a ~/kernels-built
+    reboot
+
+* Now verify you booted the newly built kernel and check it.
+
+[:ref:`details <introoptional_bisref>`]
 
 .. _submit_improvements:
 
-This concludes the step-by-step guide.
+Conclusion
+----------
+
+You have reached the end of the step-by-step guide.
 
 Did you run into trouble following any of the above steps not cleared up by the
 reference section below? Did you spot errors? Or do you have ideas how to
-improve the guide? Then please take a moment and let the maintainer of this
+improve the guide?
+
+If any of that applies, please take a moment and let the maintainer of this
 document know by email (Thorsten Leemhuis <linux@leemhuis.info>), ideally while
 CCing the Linux docs mailing list (linux-doc@vger.kernel.org). Such feedback is
-vital to improve this document further, which is in everybody's interest, as it
+vital to improve this text further, which is in everybody's interest, as it
 will enable more people to master the task described here -- and hopefully also
 improve similar guides inspired by this one.
 
@@ -948,10 +1080,20 @@ Reference section for the step-by-step guide
 This section holds additional information for almost all the items in the above
 step-by-step guide.
 
+Preparations for building your own kernels
+------------------------------------------
+
+  *The steps in this section lay the groundwork for all further tests.*
+  [:ref:`... <introprep_bissbs>`]
+
+The steps in all later sections of this guide depend on those described here.
+
+[:ref:`back to step-by-step guide <introprep_bissbs>`].
+
 .. _backup_bisref:
 
 Prepare for emergencies
------------------------
+~~~~~~~~~~~~~~~~~~~~~~~
 
   *Create a fresh backup and put system repair and restore tools at hand.*
   [:ref:`... <backup_bissbs>`]
@@ -966,7 +1108,7 @@ for something going sideways, even if that should not happen.
 .. _vanilla_bisref:
 
 Remove anything related to externally maintained kernel modules
----------------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Remove all software that depends on externally developed kernel drivers or
   builds them automatically.* [:ref:`...<vanilla_bissbs>`]
@@ -984,7 +1126,7 @@ explains in more detail.
 .. _secureboot_bisref:
 
 Deal with techniques like Secure Boot
--------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *On platforms with 'Secure Boot' or similar techniques, prepare everything to
   ensure the system will permit your self-compiled kernel to boot later.*
@@ -1021,7 +1163,7 @@ Afterwards, permit MokManager to reboot the machine.
 .. _bootworking_bisref:
 
 Boot the last kernel that was working
--------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Boot into the last working kernel and briefly recheck if the feature that
   regressed really works.* [:ref:`...<bootworking_bissbs>`]
@@ -1034,7 +1176,7 @@ the right thing.
 .. _diskspace_bisref:
 
 Space requirements
-------------------
+~~~~~~~~~~~~~~~~~~
 
   *Ensure to have enough free space for building Linux.*
   [:ref:`... <diskspace_bissbs>`]
@@ -1052,32 +1194,32 @@ space by quite a few gigabytes.
 .. _rangecheck_bisref:
 
 Bisection range
----------------
+~~~~~~~~~~~~~~~
 
   *Determine the kernel versions considered 'good' and 'bad' throughout this
   guide.* [:ref:`...<rangecheck_bissbs>`]
 
 Establishing the range of commits to be checked is mostly straightforward,
 except when a regression occurred when switching from a release of one stable
-series to a release of a later series (e.g. from 6.0.11 to 6.1.4). In that case
+series to a release of a later series (e.g. from 6.0.13 to 6.1.5). In that case
 Git will need some hand holding, as there is no straight line of descent.
 
 That's because with the release of 6.0 mainline carried on to 6.1 while the
 stable series 6.0.y branched to the side. It's therefore theoretically possible
-that the issue you face with 6.1.4 only worked in 6.0.11, as it was fixed by a
+that the issue you face with 6.1.5 only worked in 6.0.13, as it was fixed by a
 commit that went into one of the 6.0.y releases, but never hit mainline or the
 6.1.y series. Thankfully that normally should not happen due to the way the
 stable/longterm maintainers maintain the code. It's thus pretty safe to assume
 6.0 as a 'good' kernel. That assumption will be tested anyway, as that kernel
 will be built and tested in the segment '2' of this guide; Git would force you
-to do this as well, if you tried bisecting between 6.0.11 and 6.1.13.
+to do this as well, if you tried bisecting between 6.0.13 and 6.1.15.
 
 [:ref:`back to step-by-step guide <rangecheck_bissbs>`]
 
 .. _buildrequires_bisref:
 
 Install build requirements
---------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Install all software required to build a Linux kernel.*
   [:ref:`...<buildrequires_bissbs>`]
@@ -1117,7 +1259,7 @@ These commands install a few packages that are often, but not always needed. You
 for example might want to skip installing the development headers for ncurses,
 which you will only need in case you later might want to adjust the kernel build
 configuration using make the targets 'menuconfig' or 'nconfig'; likewise omit
-the headers of Qt6 is you do not plan to adjust the .config using 'xconfig'.
+the headers of Qt6 if you do not plan to adjust the .config using 'xconfig'.
 
 You furthermore might need additional libraries and their development headers
 for tasks not covered in this guide -- for example when building utilities from
@@ -1128,7 +1270,7 @@ the kernel's tools/ directory.
 .. _sources_bisref:
 
 Download the sources using Git
-------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Retrieve the Linux mainline sources.*
   [:ref:`...<sources_bissbs>`]
@@ -1148,7 +1290,7 @@ work better for you:
 .. _sources_bundle_bisref:
 
 Downloading Linux mainline sources using a bundle
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""""""""""""""""""""""""""""""""""""""""""""""""
 
 Use the following commands to retrieve the Linux mainline sources using a
 bundle::
@@ -1184,7 +1326,7 @@ First, execute the following command to retrieve the latest mainline codebase::
       https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
 
 Now deepen your clone's history to the second predecessor of the mainline
-release of your 'good' version. In case the latter are 6.0 or 6.0.11, 5.19 would
+release of your 'good' version. In case the latter are 6.0 or 6.0.13, 5.19 would
 be the first predecessor and 5.18 the second -- hence deepen the history up to
 that version::
 
@@ -1219,7 +1361,7 @@ Note, shallow clones have a few peculiar characteristics:
 .. _oldconfig_bisref:
 
 Start defining the build configuration for your kernel
-------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Start preparing a kernel build configuration (the '.config' file).*
   [:ref:`... <oldconfig_bissbs>`]
@@ -1279,7 +1421,7 @@ that file to the build machine and store it as ~/linux/.config; afterwards run
 .. _localmodconfig_bisref:
 
 Trim the build configuration for your kernel
---------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Disable any kernel modules apparently superfluous for your setup.*
   [:ref:`... <localmodconfig_bissbs>`]
@@ -1328,7 +1470,7 @@ step-by-step guide mentions::
 .. _tagging_bisref:
 
 Tag the kernels about to be build
----------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Ensure all the kernels you will build are clearly identifiable using a
   special tag and a unique version identifier.* [:ref:`... <tagging_bissbs>`]
@@ -1344,7 +1486,7 @@ confusing during the bisection.
 .. _debugsymbols_bisref:
 
 Decide to enable or disable debug symbols
------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Decide how to handle debug symbols.* [:ref:`... <debugsymbols_bissbs>`]
 
@@ -1373,7 +1515,7 @@ explains this process in more detail.
 .. _configmods_bisref:
 
 Adjust build configuration
---------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Check if you may want or need to adjust some other kernel configuration
   options:*
@@ -1384,7 +1526,7 @@ kernel configuration options.
 .. _configmods_distros_bisref:
 
 Distro specific adjustments
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
+"""""""""""""""""""""""""""
 
   *Are you running* [:ref:`... <configmods_bissbs>`]
 
@@ -1409,7 +1551,7 @@ when following this guide on a few commodity distributions.
 .. _configmods_individual_bisref:
 
 Individual adjustments
-~~~~~~~~~~~~~~~~~~~~~~
+""""""""""""""""""""""
 
   *If you want to influence the other aspects of the configuration, do so
   now.* [:ref:`... <configmods_bissbs>`]
@@ -1426,13 +1568,13 @@ is missing.
 .. _saveconfig_bisref:
 
 Put the .config file aside
---------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Reprocess the .config after the latest changes and store it in a safe place.*
   [:ref:`... <saveconfig_bissbs>`]
 
 Put the .config you prepared aside, as you want to copy it back to the build
-directory every time  during this guide before you start building another
+directory every time during this guide before you start building another
 kernel. That's because going back and forth between different versions can alter
 .config files in odd ways; those occasionally cause side effects that could
 confuse testing or in some cases render the result of your bisection
@@ -1442,8 +1584,8 @@ meaningless.
 
 .. _introlatestcheck_bisref:
 
-Try to reproduce the regression
------------------------------------------
+Try to reproduce the problem with the latest codebase
+-----------------------------------------------------
 
   *Verify the regression is not caused by some .config change and check if it
   still occurs with the latest codebase.* [:ref:`... <introlatestcheck_bissbs>`]
@@ -1490,28 +1632,28 @@ highly recommended for these reasons:
 
   Your report might be ignored if you send it to the wrong party -- and even
   when you get a reply there is a decent chance that developers tell you to
-  evaluate   which of the two cases it is before they take a closer look.
+  evaluate which of the two cases it is before they take a closer look.
 
 [:ref:`back to step-by-step guide <introlatestcheck_bissbs>`]
 
 .. _checkoutmaster_bisref:
 
 Check out the latest Linux codebase
------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Check out the latest Linux codebase.*
-  [:ref:`... <introlatestcheck_bissbs>`]
+  [:ref:`... <checkoutmaster_bissbs>`]
 
 In case you later want to recheck if an ever newer codebase might fix the
 problem, remember to run that ``git fetch --shallow-exclude [...]`` command
 again mentioned earlier to update your local Git repository.
 
-[:ref:`back to step-by-step guide <introlatestcheck_bissbs>`]
+[:ref:`back to step-by-step guide <checkoutmaster_bissbs>`]
 
 .. _build_bisref:
 
 Build your kernel
------------------
+~~~~~~~~~~~~~~~~~
 
   *Build the image and the modules of your first kernel using the config file
   you prepared.* [:ref:`... <build_bissbs>`]
@@ -1521,7 +1663,7 @@ yourself. Another subsection explains how to directly package your kernel up as
 deb, rpm or tar file.
 
 Dealing with build errors
-~~~~~~~~~~~~~~~~~~~~~~~~~
+"""""""""""""""""""""""""
 
 When a build error occurs, it might be caused by some aspect of your machine's
 setup that often can be fixed quickly; other times though the problem lies in
@@ -1552,11 +1694,11 @@ by modifying your search terms or using another line from the error messages.
 
 In the end, most issues you run into have likely been encountered and
 reported by others already. That includes issues where the cause is not your
-system, but lies in the code. If you run into one of those, you might thus find a
-solution (e.g. a patch) or workaround for your issue, too.
+system, but lies in the code. If you run into one of those, you might thus find
+solution (e.g. a patch) or workaround for your issue, too.
 
 Package your kernel up
-~~~~~~~~~~~~~~~~~~~~~~
+""""""""""""""""""""""
 
 The step-by-step guide uses the default make targets (e.g. 'bzImage' and
 'modules' on x86) to build the image and the modules of your kernel, which later
@@ -1587,7 +1729,7 @@ distribution's kernel packages.
 .. _install_bisref:
 
 Put the kernel in place
------------------------
+~~~~~~~~~~~~~~~~~~~~~~~
 
   *Install the kernel you just built.* [:ref:`... <install_bissbs>`]
 
@@ -1630,7 +1772,7 @@ process. Afterwards add your kernel to your bootloader configuration and reboot.
 .. _storagespace_bisref:
 
 Storage requirements per kernel
--------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Check how much storage space the kernel, its modules, and other related files
   like the initramfs consume.* [:ref:`... <storagespace_bissbs>`]
@@ -1651,7 +1793,7 @@ need to look in different places.
 .. _tainted_bisref:
 
 Check if your newly built kernel considers itself 'tainted'
------------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Check if the kernel marked itself as 'tainted'.*
   [:ref:`... <tainted_bissbs>`]
@@ -1670,7 +1812,7 @@ interest, as your testing might be flawed otherwise.
 .. _recheckbroken_bisref:
 
 Check the kernel built from a recent mainline codebase
-------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Verify if your bug occurs with the newly built kernel.*
   [:ref:`... <recheckbroken_bissbs>`]
@@ -1696,7 +1838,7 @@ the kernel you built from the latest codebase. These are the most frequent:
 .. _recheckstablebroken_bisref:
 
 Check the kernel built from the latest stable/longterm codebase
----------------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Are you facing a regression within a stable/longterm release, but failed to
   reproduce it with the kernel you just built using the latest mainline sources?
@@ -1741,7 +1883,7 @@ ensure the kernel version you assumed to be 'good' earlier in the process (e.g.
 .. _recheckworking_bisref:
 
 Build your own version of the 'good' kernel
--------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Build your own variant of the working kernel and check if the feature that
   regressed works as expected with it.* [:ref:`... <recheckworking_bissbs>`]
@@ -1767,15 +1909,25 @@ multitude of reasons why this might happen. Some ideas where to look:
 
 Note, if you found and fixed problems with the .config file, you want to use it
 to build another kernel from the latest codebase, as your earlier tests with
-mainline and the latest version from an affected stable/longterm series were most
-likely flawed.
+mainline and the latest version from an affected stable/longterm series were
+most likely flawed.
 
 [:ref:`back to step-by-step guide <recheckworking_bissbs>`]
 
+Perform a bisection and validate the result
+-------------------------------------------
+
+  *With all the preparations and precaution builds taken care of, you are now
+  ready to begin the bisection.* [:ref:`... <introbisect_bissbs>`]
+
+The steps in this segment perform and validate the bisection.
+
+[:ref:`back to step-by-step guide <introbisect_bissbs>`].
+
 .. _bisectstart_bisref:
 
 Start the bisection
--------------------
+~~~~~~~~~~~~~~~~~~~
 
   *Start the bisection and tell Git about the versions earlier established as
   'good' and 'bad'.* [:ref:`... <bisectstart_bissbs>`]
@@ -1789,7 +1941,7 @@ for you to test.
 .. _bisectbuild_bisref:
 
 Build a kernel from the bisection point
----------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Build, install, and boot a kernel from the code Git checked out using the
   same commands you used earlier.* [:ref:`... <bisectbuild_bissbs>`]
@@ -1817,7 +1969,7 @@ There are two things worth of note here:
 .. _bisecttest_bisref:
 
 Bisection checkpoint
---------------------
+~~~~~~~~~~~~~~~~~~~~
 
   *Check if the feature that regressed works in the kernel you just built.*
   [:ref:`... <bisecttest_bissbs>`]
@@ -1831,7 +1983,7 @@ will be for nothing.
 .. _bisectlog_bisref:
 
 Put the bisection log away
---------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Store Git's bisection log and the current .config file in a safe place.*
   [:ref:`... <bisectlog_bissbs>`]
@@ -1851,7 +2003,7 @@ ask for it after you report the regression.
 .. _revert_bisref:
 
 Try reverting the culprit
--------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *Try reverting the culprit on top of the latest codebase to see if this fixes
   your regression.* [:ref:`... <revert_bissbs>`]
@@ -1869,14 +2021,20 @@ succeeds, test that kernel version instead.
 
 [:ref:`back to step-by-step guide <revert_bissbs>`]
 
+Cleanup steps during and after following this guide
+---------------------------------------------------
 
-Supplementary tasks: cleanup during and after the bisection
------------------------------------------------------------
+  *During and after following this guide you might want or need to remove some
+  of the kernels you installed.* [:ref:`... <introclosure_bissbs>`]
+
+The steps in this section describe clean-up procedures.
+
+[:ref:`back to step-by-step guide <introclosure_bissbs>`].
 
 .. _makeroom_bisref:
 
 Cleaning up during the bisection
---------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
   *To remove one of the kernels you installed, look up its 'kernelrelease'
   identifier.* [:ref:`... <makeroom_bissbs>`]
@@ -1911,13 +2069,13 @@ Now remove the boot entry for the kernel from your bootloader's configuration;
 the steps to do that vary quite a bit between Linux distributions.
 
 Note, be careful with wildcards like '*' when deleting files or directories
-for kernels manually: you might accidentally remove files of a 6.0.11 kernel
+for kernels manually: you might accidentally remove files of a 6.0.13 kernel
 when all you want is to remove 6.0 or 6.0.1.
 
 [:ref:`back to step-by-step guide <makeroom_bissbs>`]
 
 Cleaning up after the bisection
--------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 .. _finishingtouch_bisref:
 
@@ -1932,26 +2090,105 @@ build artifacts and the Linux sources, but will leave the Git repository
 (~/linux/.git/) behind -- a simple ``git reset --hard`` thus will bring the
 sources back.
 
-Removing the repository as well would likely be unwise at this point: there is a
-decent chance developers will ask you to build another kernel to perform
-additional tests. This is often required to debug an issue or check proposed
-fixes. Before doing so you want to run the ``git fetch mainline`` command again
-followed by ``git checkout mainline/master`` to bring your clone up to date and
-checkout the latest codebase. Then apply the patch using ``git apply
-<filename>`` or ``git am <filename>`` and build yet another kernel using the
-familiar commands.
+Removing the repository as well would likely be unwise at this point: there
+is a decent chance developers will ask you to build another kernel to
+perform additional tests -- like testing a debug patch or a proposed fix.
+Details on how to perform those can be found in the section :ref:`Optional
+tasks: test reverts, patches, or later versions <introoptional_bissbs>`.
 
 Additional tests are also the reason why you want to keep the
 ~/kernel-config-working file around for a few weeks.
 
 [:ref:`back to step-by-step guide <finishingtouch_bissbs>`]
 
+.. _introoptional_bisref:
 
-Additional reading material
-===========================
+Test reverts, patches, or later versions
+----------------------------------------
+
+  *While or after reporting a bug, you might want or potentially will be asked
+  to test reverts, patches, proposed fixes, or other versions.*
+  [:ref:`... <introoptional_bissbs>`]
+
+All the commands used in this section should be pretty straight forward, so
+there is not much to add except one thing: when setting a kernel tag as
+instructed, ensure it is not much longer than the one used in the example, as
+problems will arise if the kernelrelease identifier exceeds 63 characters.
+
+[:ref:`back to step-by-step guide <introoptional_bissbs>`].
+
+
+Additional information
+======================
+
+.. _buildhost_bis:
+
+Build kernels on a different machine
+------------------------------------
+
+To compile kernels on another system, slightly alter the step-by-step guide's
+instructions:
+
+* Start following the guide on the machine where you want to install and test
+  the kernels later.
+
+* After executing ':ref:`Boot into the working kernel and briefly use the
+  apparently broken feature <bootworking_bissbs>`', save the list of loaded
+  modules to a file using ``lsmod > ~/test-machine-lsmod``. Then locate the
+  build configuration for the running kernel (see ':ref:`Start defining the
+  build configuration for your kernel <oldconfig_bisref>`' for hints on where
+  to find it) and store it as '~/test-machine-config-working'. Transfer both
+  files to the home directory of your build host.
+
+* Continue the guide on the build host (e.g. with ':ref:`Ensure to have enough
+  free space for building [...] <diskspace_bissbs>`').
+
+* When you reach ':ref:`Start preparing a kernel build configuration[...]
+  <oldconfig_bissbs>`': before running ``make olddefconfig`` for the first time,
+  execute the following command to base your configuration on the one from the
+  test machine's 'working' kernel::
+
+    cp ~/test-machine-config-working ~/linux/.config
+
+* During the next step to ':ref:`disable any apparently superfluous kernel
+  modules <localmodconfig_bissbs>`' use the following command instead::
 
-Further sources
----------------
+    yes '' | make localmodconfig LSMOD=~/lsmod_foo-machine localmodconfig
+
+* Continue the guide, but ignore the instructions outlining how to compile,
+  install, and reboot into a kernel every time they come up. Instead build
+  like this::
+
+    cp ~/kernel-config-working .config
+    make olddefconfig &&
+    make -j $(nproc --all) targz-pkg
+
+  This will generate a gzipped tar file whose name is printed in the last
+  line shown; for example, a kernel with the kernelrelease identifier
+  '6.0.0-rc1-local-g928a87efa423' built for x86 machines usually will
+  be stored as '~/linux/linux-6.0.0-rc1-local-g928a87efa423-x86.tar.gz'.
+
+  Copy that file to your test machine's home directory.
+
+* Switch to the test machine to check if you have enough space to hold another
+  kernel. Then extract the file you transferred::
+
+    sudo tar -xvzf ~/linux-6.0.0-rc1-local-g928a87efa423-x86.tar.gz -C /
+
+  Afterwards :ref:`generate the initramfs and add the kernel to your boot
+  loader's configuration <install_bisref>`; on some distributions the following
+  command will take care of both these tasks::
+
+    sudo /sbin/installkernel 6.0.0-rc1-local-g928a87efa423 /boot/vmlinuz-6.0.0-rc1-local-g928a87efa423
+
+  Now reboot and ensure you started the intended kernel.
+
+This approach even works when building for another architecture: just install
+cross-compilers and add the appropriate parameters to every invocation of make
+(e.g. ``make ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- [...]``).
+
+Additional reading material
+---------------------------
 
 * The `man page for 'git bisect' <https://git-scm.com/docs/git-bisect>`_ and
   `fighting regressions with 'git bisect' <https://git-scm.com/docs/git-bisect-lk2009.html>`_
index ebf03f5f0619a662bfca89f6457ae98375fb660d..a86685c57e129e2fb7a16ec947a1bbef37533035 100644 (file)
@@ -7829,9 +7829,8 @@ W:        http://aeschi.ch.eu.org/efs/
 F:     fs/efs/
 
 EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
-M:     Douglas Miller <dougmill@linux.ibm.com>
 L:     netdev@vger.kernel.org
-S:     Maintained
+S:     Orphan
 F:     drivers/net/ethernet/ibm/ehea/
 
 ELM327 CAN NETWORK DRIVER
@@ -9576,7 +9575,7 @@ F:        kernel/power/
 
 HID CORE LAYER
 M:     Jiri Kosina <jikos@kernel.org>
-M:     Benjamin Tissoires <benjamin.tissoires@redhat.com>
+M:     Benjamin Tissoires <bentiss@kernel.org>
 L:     linux-input@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
@@ -17873,7 +17872,7 @@ F:      Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.yaml
 F:     drivers/media/rc/pwm-ir-tx.c
 
 PWM SUBSYSTEM
-M:     Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+M:     Uwe Kleine-König <ukleinek@kernel.org>
 L:     linux-pwm@vger.kernel.org
 S:     Maintained
 Q:     https://patchwork.ozlabs.org/project/linux-pwm/list/
@@ -20177,7 +20176,6 @@ F:      include/linux/platform_data/simplefb.h
 
 SIOX
 M:     Thorsten Scherer <t.scherer@eckelmann.de>
-M:     Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
 R:     Pengutronix Kernel Team <kernel@pengutronix.de>
 S:     Supported
 F:     drivers/gpio/gpio-siox.c
@@ -22839,7 +22837,7 @@ F:      drivers/usb/host/ehci*
 
 USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
 M:     Jiri Kosina <jikos@kernel.org>
-M:     Benjamin Tissoires <benjamin.tissoires@redhat.com>
+M:     Benjamin Tissoires <bentiss@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
index a5f300ec6f2808b8890ebc27d0de8a918eaa8636..54ad04dacdee94d869b2bd7d0ab92a92e60fe642 100644 (file)
@@ -595,7 +595,7 @@ config ARCH_SELECTS_CRASH_DUMP
        select RELOCATABLE
 
 config ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
-       def_bool CRASH_CORE
+       def_bool CRASH_RESERVE
 
 config RELOCATABLE
        bool "Relocatable kernel"
diff --git a/arch/loongarch/include/asm/crash_core.h b/arch/loongarch/include/asm/crash_core.h
deleted file mode 100644 (file)
index 218bdbf..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _LOONGARCH_CRASH_CORE_H
-#define _LOONGARCH_CRASH_CORE_H
-
-#define CRASH_ALIGN                    SZ_2M
-
-#define CRASH_ADDR_LOW_MAX             SZ_4G
-#define CRASH_ADDR_HIGH_MAX            memblock_end_of_DRAM()
-
-extern phys_addr_t memblock_end_of_DRAM(void);
-
-#endif
diff --git a/arch/loongarch/include/asm/crash_reserve.h b/arch/loongarch/include/asm/crash_reserve.h
new file mode 100644 (file)
index 0000000..a1d9b84
--- /dev/null
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _LOONGARCH_CRASH_RESERVE_H
+#define _LOONGARCH_CRASH_RESERVE_H
+
+#define CRASH_ALIGN                    SZ_2M
+
+#define CRASH_ADDR_LOW_MAX             SZ_4G
+#define CRASH_ADDR_HIGH_MAX            memblock_end_of_DRAM()
+
+extern phys_addr_t memblock_end_of_DRAM(void);
+
+#endif
index 2a35a0bc2aaabf128cb5336d25dcbec1738d646b..52b638059e40b31645a62243e467c09e7d7ce0cf 100644 (file)
@@ -7,6 +7,14 @@
 #ifndef __LOONGARCH_PERF_EVENT_H__
 #define __LOONGARCH_PERF_EVENT_H__
 
+#include <asm/ptrace.h>
+
 #define perf_arch_bpf_user_pt_regs(regs) (struct user_pt_regs *)regs
 
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+       (regs)->csr_era = (__ip); \
+       (regs)->regs[3] = current_stack_pointer; \
+       (regs)->regs[22] = (unsigned long) __builtin_frame_address(0); \
+}
+
 #endif /* __LOONGARCH_PERF_EVENT_H__ */
index da7a3b5b9374aeaf8bc1009d49d3ee0265938e9e..e071f5e9e85802b2117ba89ce86e4ca3219864d2 100644 (file)
@@ -132,8 +132,6 @@ static __always_inline void invtlb_all(u32 op, u32 info, u64 addr)
                );
 }
 
-#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-
 static void tlb_flush(struct mmu_gather *tlb);
 
 #define tlb_flush tlb_flush
index 0491bf453cd49601c4f8b7b35565ea4a2b83c689..cac7cba81b65f791cf5d3379dfda2daca01814ec 100644 (file)
@@ -884,4 +884,4 @@ static int __init init_hw_perf_events(void)
 
        return 0;
 }
-early_initcall(init_hw_perf_events);
+pure_initcall(init_hw_perf_events);
index 1fc2f6813ea027d43ccf24af8aade31f1093df62..97b40defde060846d95c9bc02c70b13ec53372a7 100644 (file)
@@ -202,10 +202,10 @@ good_area:
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
        } else {
-               if (!(vma->vm_flags & VM_READ) && address != exception_era(regs))
-                       goto bad_area;
                if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs))
                        goto bad_area;
+               if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs))
+                       goto bad_area;
        }
 
        /*
index 4dc94145eb533065f2ad4547c0b11e010dfe126a..da2a167a4d08b66fe26c99826a9e84277f76f0e3 100644 (file)
@@ -882,7 +882,7 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
                goto abort_claiming;
        ret = -EBUSY;
        if (!bdev_may_open(bdev, mode))
-               goto abort_claiming;
+               goto put_module;
        if (bdev_is_partition(bdev))
                ret = blkdev_get_part(bdev, mode);
        else
index 4bfbe55553f410119378fc3bbf8c73b9d6717a05..a40b6f3946efeb6b46fccd445053a5c48e069254 100644 (file)
@@ -170,8 +170,8 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
 #define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
 
 /* Shift and apply the mask for CPC reads/writes */
-#define MASK_VAL(reg, val) ((val) >> ((reg)->bit_offset &                      \
-                                       GENMASK(((reg)->bit_width), 0)))
+#define MASK_VAL(reg, val) (((val) >> (reg)->bit_offset) &                     \
+                                       GENMASK(((reg)->bit_width) - 1, 0))
 
 static ssize_t show_feedback_ctrs(struct kobject *kobj,
                struct kobj_attribute *attr, char *buf)
@@ -1002,14 +1002,14 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
        }
 
        *val = 0;
+       size = GET_BIT_WIDTH(reg);
 
        if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
-               u32 width = GET_BIT_WIDTH(reg);
                u32 val_u32;
                acpi_status status;
 
                status = acpi_os_read_port((acpi_io_address)reg->address,
-                                          &val_u32, width);
+                                          &val_u32, size);
                if (ACPI_FAILURE(status)) {
                        pr_debug("Error: Failed to read SystemIO port %llx\n",
                                 reg->address);
@@ -1018,17 +1018,22 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
 
                *val = val_u32;
                return 0;
-       } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+       } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+               /*
+                * For registers in PCC space, the register size is determined
+                * by the bit width field; the access size is used to indicate
+                * the PCC subspace id.
+                */
+               size = reg->bit_width;
                vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+       }
        else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
                vaddr = reg_res->sys_mem_vaddr;
        else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
                return cpc_read_ffh(cpu, reg, val);
        else
                return acpi_os_read_memory((acpi_physical_address)reg->address,
-                               val, reg->bit_width);
-
-       size = GET_BIT_WIDTH(reg);
+                               val, size);
 
        switch (size) {
        case 8:
@@ -1044,8 +1049,13 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
                *val = readq_relaxed(vaddr);
                break;
        default:
-               pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
-                        reg->bit_width, pcc_ss_id);
+               if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+                       pr_debug("Error: Cannot read %u bit width from system memory: 0x%llx\n",
+                               size, reg->address);
+               } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+                       pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
+                               size, pcc_ss_id);
+               }
                return -EFAULT;
        }
 
@@ -1063,12 +1073,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
        int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
        struct cpc_reg *reg = &reg_res->cpc_entry.reg;
 
+       size = GET_BIT_WIDTH(reg);
+
        if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
-               u32 width = GET_BIT_WIDTH(reg);
                acpi_status status;
 
                status = acpi_os_write_port((acpi_io_address)reg->address,
-                                           (u32)val, width);
+                                           (u32)val, size);
                if (ACPI_FAILURE(status)) {
                        pr_debug("Error: Failed to write SystemIO port %llx\n",
                                 reg->address);
@@ -1076,17 +1087,22 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
                }
 
                return 0;
-       } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
+       } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0) {
+               /*
+                * For registers in PCC space, the register size is determined
+                * by the bit width field; the access size is used to indicate
+                * the PCC subspace id.
+                */
+               size = reg->bit_width;
                vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
+       }
        else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
                vaddr = reg_res->sys_mem_vaddr;
        else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
                return cpc_write_ffh(cpu, reg, val);
        else
                return acpi_os_write_memory((acpi_physical_address)reg->address,
-                               val, reg->bit_width);
-
-       size = GET_BIT_WIDTH(reg);
+                               val, size);
 
        if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
                val = MASK_VAL(reg, val);
@@ -1105,8 +1121,13 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
                writeq_relaxed(val, vaddr);
                break;
        default:
-               pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
-                        reg->bit_width, pcc_ss_id);
+               if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+                       pr_debug("Error: Cannot write %u bit width to system memory: 0x%llx\n",
+                               size, reg->address);
+               } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
+                       pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
+                               size, pcc_ss_id);
+               }
                ret_val = -EFAULT;
                break;
        }
index cd84af23f7eac8fad793c6393bcd8130596077ce..dd0b40b9bbe8bef5f8c30a082eebbc25dcdfeafd 100644 (file)
@@ -492,16 +492,14 @@ static int lps0_device_attach(struct acpi_device *adev,
                        unsigned int func_mask;
 
                        /*
-                        * Avoid evaluating the same _DSM function for two
-                        * different UUIDs and prioritize the MSFT one.
+                        * Log a message if the _DSM function sets for two
+                        * different UUIDs overlap.
                         */
                        func_mask = lps0_dsm_func_mask & lps0_dsm_func_mask_microsoft;
-                       if (func_mask) {
+                       if (func_mask)
                                acpi_handle_info(adev->handle,
                                                 "Duplicate LPS0 _DSM functions (mask: 0x%x)\n",
                                                 func_mask);
-                               lps0_dsm_func_mask &= ~func_mask;
-                       }
                }
        }
 
index ac8ebccd350756747eee3400596e04fcbac3cabd..812fd2a8f853e1dc305fa6aab04db8098ea28872 100644 (file)
@@ -380,8 +380,10 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
        switch (data->cd_info.state) {
        case HCI_DEVCOREDUMP_IDLE:
                err = hci_devcd_init(hdev, MTK_COREDUMP_SIZE);
-               if (err < 0)
+               if (err < 0) {
+                       kfree_skb(skb);
                        break;
+               }
                data->cd_info.cnt = 0;
 
                /* It is supposed coredump can be done within 5 seconds */
@@ -407,9 +409,6 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb)
                break;
        }
 
-       if (err < 0)
-               kfree_skb(skb);
-
        return err;
 }
 EXPORT_SYMBOL_GPL(btmtk_process_coredump);
index 19cfc342fc7bbb67af65cb4de10e074622a991a4..216826c31ee34f0e65ef74edcab98c7cd9eff7e5 100644 (file)
@@ -15,6 +15,8 @@
 
 #define VERSION "0.1"
 
+#define QCA_BDADDR_DEFAULT (&(bdaddr_t) {{ 0xad, 0x5a, 0x00, 0x00, 0x00, 0x00 }})
+
 int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver,
                         enum qca_btsoc_type soc_type)
 {
@@ -612,6 +614,38 @@ int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr)
 }
 EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome);
 
+static int qca_check_bdaddr(struct hci_dev *hdev)
+{
+       struct hci_rp_read_bd_addr *bda;
+       struct sk_buff *skb;
+       int err;
+
+       if (bacmp(&hdev->public_addr, BDADDR_ANY))
+               return 0;
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL,
+                            HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               bt_dev_err(hdev, "Failed to read device address (%d)", err);
+               return err;
+       }
+
+       if (skb->len != sizeof(*bda)) {
+               bt_dev_err(hdev, "Device address length mismatch");
+               kfree_skb(skb);
+               return -EIO;
+       }
+
+       bda = (struct hci_rp_read_bd_addr *)skb->data;
+       if (!bacmp(&bda->bdaddr, QCA_BDADDR_DEFAULT))
+               set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+
+       kfree_skb(skb);
+
+       return 0;
+}
+
 static void qca_generate_hsp_nvm_name(char *fwname, size_t max_size,
                struct qca_btsoc_version ver, u8 rom_ver, u16 bid)
 {
@@ -818,6 +852,10 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
                break;
        }
 
+       err = qca_check_bdaddr(hdev);
+       if (err)
+               return err;
+
        bt_dev_info(hdev, "QCA setup on UART is completed");
 
        return 0;
index 06e915b57283f8ca3d3ab19b64e1e248e6ecb2bc..e3946f7b736e3cccfe727575096ff073fd86cf1d 100644 (file)
@@ -542,6 +542,8 @@ static const struct usb_device_id quirks_table[] = {
        /* Realtek 8852BE Bluetooth devices */
        { USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
                                                     BTUSB_WIDEBAND_SPEECH },
+       { USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
+                                                    BTUSB_WIDEBAND_SPEECH },
        { USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
                                                     BTUSB_WIDEBAND_SPEECH },
        { USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |
@@ -3480,13 +3482,12 @@ static void btusb_dump_hdr_qca(struct hci_dev *hdev, struct sk_buff *skb)
 
 static void btusb_coredump_qca(struct hci_dev *hdev)
 {
+       int err;
        static const u8 param[] = { 0x26 };
-       struct sk_buff *skb;
 
-       skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT);
-       if (IS_ERR(skb))
-               bt_dev_err(hdev, "%s: triggle crash failed (%ld)", __func__, PTR_ERR(skb));
-       kfree_skb(skb);
+       err = __hci_cmd_send(hdev, 0xfc0c, 1, param);
+       if (err < 0)
+               bt_dev_err(hdev, "%s: triggle crash failed (%d)", __func__, err);
 }
 
 /*
index ecbc52eaf1010912b9024ddbc3c87aac4254e1e3..0c9c9ee56592dc851ab12f98be5a6be2465b812e 100644 (file)
@@ -1672,6 +1672,9 @@ static bool qca_wakeup(struct hci_dev *hdev)
        struct hci_uart *hu = hci_get_drvdata(hdev);
        bool wakeup;
 
+       if (!hu->serdev)
+               return true;
+
        /* BT SoC attached through the serial bus is handled by the serdev driver.
         * So we need to use the device handle of the serdev driver to get the
         * status of device may wakeup.
@@ -1905,8 +1908,6 @@ retry:
        case QCA_WCN6750:
        case QCA_WCN6855:
        case QCA_WCN7850:
-               set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
-
                qcadev = serdev_device_get_drvdata(hu->serdev);
                if (qcadev->bdaddr_property_broken)
                        set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
@@ -1957,8 +1958,10 @@ retry:
                qca_debugfs_init(hdev);
                hu->hdev->hw_error = qca_hw_error;
                hu->hdev->cmd_timeout = qca_cmd_timeout;
-               if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
-                       hu->hdev->wakeup = qca_wakeup;
+               if (hu->serdev) {
+                       if (device_can_wakeup(hu->serdev->ctrl->dev.parent))
+                               hu->hdev->wakeup = qca_wakeup;
+               }
        } else if (ret == -ENOENT) {
                /* No patch/nvm-config found, run with original fw/config */
                set_bit(QCA_ROM_FW, &qca->flags);
@@ -2329,16 +2332,21 @@ static int qca_serdev_probe(struct serdev_device *serdev)
                    (data->soc_type == QCA_WCN6750 ||
                     data->soc_type == QCA_WCN6855)) {
                        dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n");
-                       power_ctrl_enabled = false;
+                       return PTR_ERR(qcadev->bt_en);
                }
 
+               if (!qcadev->bt_en)
+                       power_ctrl_enabled = false;
+
                qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl",
                                               GPIOD_IN);
                if (IS_ERR(qcadev->sw_ctrl) &&
                    (data->soc_type == QCA_WCN6750 ||
                     data->soc_type == QCA_WCN6855 ||
-                    data->soc_type == QCA_WCN7850))
-                       dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+                    data->soc_type == QCA_WCN7850)) {
+                       dev_err(&serdev->dev, "failed to acquire SW_CTRL gpio\n");
+                       return PTR_ERR(qcadev->sw_ctrl);
+               }
 
                qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
                if (IS_ERR(qcadev->susclk)) {
@@ -2357,10 +2365,13 @@ static int qca_serdev_probe(struct serdev_device *serdev)
                qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
                                               GPIOD_OUT_LOW);
                if (IS_ERR(qcadev->bt_en)) {
-                       dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
-                       power_ctrl_enabled = false;
+                       dev_err(&serdev->dev, "failed to acquire enable gpio\n");
+                       return PTR_ERR(qcadev->bt_en);
                }
 
+               if (!qcadev->bt_en)
+                       power_ctrl_enabled = false;
+
                qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
                if (IS_ERR(qcadev->susclk)) {
                        dev_warn(&serdev->dev, "failed to acquire clk\n");
index f0f54aeccc872b50311a14958ddf874860af7982..65185c9fa00134e4ed9f5449628ae04f053ba927 100644 (file)
@@ -946,25 +946,22 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
        struct cxl_memdev *cxlmd = mds->cxlds.cxlmd;
        struct device *dev = mds->cxlds.dev;
        struct cxl_get_event_payload *payload;
-       struct cxl_mbox_cmd mbox_cmd;
        u8 log_type = type;
        u16 nr_rec;
 
        mutex_lock(&mds->event.log_lock);
        payload = mds->event.buf;
 
-       mbox_cmd = (struct cxl_mbox_cmd) {
-               .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
-               .payload_in = &log_type,
-               .size_in = sizeof(log_type),
-               .payload_out = payload,
-               .min_out = struct_size(payload, records, 0),
-       };
-
        do {
                int rc, i;
-
-               mbox_cmd.size_out = mds->payload_size;
+               struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd) {
+                       .opcode = CXL_MBOX_OP_GET_EVENT_RECORD,
+                       .payload_in = &log_type,
+                       .size_in = sizeof(log_type),
+                       .payload_out = payload,
+                       .size_out = mds->payload_size,
+                       .min_out = struct_size(payload, records, 0),
+               };
 
                rc = cxl_internal_send_cmd(mds, &mbox_cmd);
                if (rc) {
@@ -1297,7 +1294,6 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
        struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
        struct cxl_mbox_poison_out *po;
        struct cxl_mbox_poison_in pi;
-       struct cxl_mbox_cmd mbox_cmd;
        int nr_records = 0;
        int rc;
 
@@ -1309,16 +1305,16 @@ int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,
        pi.offset = cpu_to_le64(offset);
        pi.length = cpu_to_le64(len / CXL_POISON_LEN_MULT);
 
-       mbox_cmd = (struct cxl_mbox_cmd) {
-               .opcode = CXL_MBOX_OP_GET_POISON,
-               .size_in = sizeof(pi),
-               .payload_in = &pi,
-               .size_out = mds->payload_size,
-               .payload_out = po,
-               .min_out = struct_size(po, record, 0),
-       };
-
        do {
+               struct cxl_mbox_cmd mbox_cmd = (struct cxl_mbox_cmd){
+                       .opcode = CXL_MBOX_OP_GET_POISON,
+                       .size_in = sizeof(pi),
+                       .payload_in = &pi,
+                       .size_out = mds->payload_size,
+                       .payload_out = po,
+                       .min_out = struct_size(po, record, 0),
+               };
+
                rc = cxl_internal_send_cmd(mds, &mbox_cmd);
                if (rc)
                        break;
index 64eaca80d736c5652958ddb5e21bc64ff3a6bc65..d0f6693ca142623af740bf69ee8664fc88926fcb 100644 (file)
@@ -42,6 +42,7 @@ struct dpll_pin_registration {
        struct list_head list;
        const struct dpll_pin_ops *ops;
        void *priv;
+       void *cookie;
 };
 
 struct dpll_device *dpll_device_get_by_id(int id)
@@ -54,12 +55,14 @@ struct dpll_device *dpll_device_get_by_id(int id)
 
 static struct dpll_pin_registration *
 dpll_pin_registration_find(struct dpll_pin_ref *ref,
-                          const struct dpll_pin_ops *ops, void *priv)
+                          const struct dpll_pin_ops *ops, void *priv,
+                          void *cookie)
 {
        struct dpll_pin_registration *reg;
 
        list_for_each_entry(reg, &ref->registration_list, list) {
-               if (reg->ops == ops && reg->priv == priv)
+               if (reg->ops == ops && reg->priv == priv &&
+                   reg->cookie == cookie)
                        return reg;
        }
        return NULL;
@@ -67,7 +70,8 @@ dpll_pin_registration_find(struct dpll_pin_ref *ref,
 
 static int
 dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
-                   const struct dpll_pin_ops *ops, void *priv)
+                   const struct dpll_pin_ops *ops, void *priv,
+                   void *cookie)
 {
        struct dpll_pin_registration *reg;
        struct dpll_pin_ref *ref;
@@ -78,7 +82,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
        xa_for_each(xa_pins, i, ref) {
                if (ref->pin != pin)
                        continue;
-               reg = dpll_pin_registration_find(ref, ops, priv);
+               reg = dpll_pin_registration_find(ref, ops, priv, cookie);
                if (reg) {
                        refcount_inc(&ref->refcount);
                        return 0;
@@ -111,6 +115,7 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
        }
        reg->ops = ops;
        reg->priv = priv;
+       reg->cookie = cookie;
        if (ref_exists)
                refcount_inc(&ref->refcount);
        list_add_tail(&reg->list, &ref->registration_list);
@@ -119,7 +124,8 @@ dpll_xa_ref_pin_add(struct xarray *xa_pins, struct dpll_pin *pin,
 }
 
 static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
-                              const struct dpll_pin_ops *ops, void *priv)
+                              const struct dpll_pin_ops *ops, void *priv,
+                              void *cookie)
 {
        struct dpll_pin_registration *reg;
        struct dpll_pin_ref *ref;
@@ -128,7 +134,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
        xa_for_each(xa_pins, i, ref) {
                if (ref->pin != pin)
                        continue;
-               reg = dpll_pin_registration_find(ref, ops, priv);
+               reg = dpll_pin_registration_find(ref, ops, priv, cookie);
                if (WARN_ON(!reg))
                        return -EINVAL;
                list_del(&reg->list);
@@ -146,7 +152,7 @@ static int dpll_xa_ref_pin_del(struct xarray *xa_pins, struct dpll_pin *pin,
 
 static int
 dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
-                    const struct dpll_pin_ops *ops, void *priv)
+                    const struct dpll_pin_ops *ops, void *priv, void *cookie)
 {
        struct dpll_pin_registration *reg;
        struct dpll_pin_ref *ref;
@@ -157,7 +163,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
        xa_for_each(xa_dplls, i, ref) {
                if (ref->dpll != dpll)
                        continue;
-               reg = dpll_pin_registration_find(ref, ops, priv);
+               reg = dpll_pin_registration_find(ref, ops, priv, cookie);
                if (reg) {
                        refcount_inc(&ref->refcount);
                        return 0;
@@ -190,6 +196,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
        }
        reg->ops = ops;
        reg->priv = priv;
+       reg->cookie = cookie;
        if (ref_exists)
                refcount_inc(&ref->refcount);
        list_add_tail(&reg->list, &ref->registration_list);
@@ -199,7 +206,7 @@ dpll_xa_ref_dpll_add(struct xarray *xa_dplls, struct dpll_device *dpll,
 
 static void
 dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
-                    const struct dpll_pin_ops *ops, void *priv)
+                    const struct dpll_pin_ops *ops, void *priv, void *cookie)
 {
        struct dpll_pin_registration *reg;
        struct dpll_pin_ref *ref;
@@ -208,7 +215,7 @@ dpll_xa_ref_dpll_del(struct xarray *xa_dplls, struct dpll_device *dpll,
        xa_for_each(xa_dplls, i, ref) {
                if (ref->dpll != dpll)
                        continue;
-               reg = dpll_pin_registration_find(ref, ops, priv);
+               reg = dpll_pin_registration_find(ref, ops, priv, cookie);
                if (WARN_ON(!reg))
                        return;
                list_del(&reg->list);
@@ -594,14 +601,14 @@ EXPORT_SYMBOL_GPL(dpll_pin_put);
 
 static int
 __dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
-                   const struct dpll_pin_ops *ops, void *priv)
+                   const struct dpll_pin_ops *ops, void *priv, void *cookie)
 {
        int ret;
 
-       ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv);
+       ret = dpll_xa_ref_pin_add(&dpll->pin_refs, pin, ops, priv, cookie);
        if (ret)
                return ret;
-       ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv);
+       ret = dpll_xa_ref_dpll_add(&pin->dpll_refs, dpll, ops, priv, cookie);
        if (ret)
                goto ref_pin_del;
        xa_set_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
@@ -610,7 +617,7 @@ __dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
        return ret;
 
 ref_pin_del:
-       dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
+       dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
        return ret;
 }
 
@@ -642,7 +649,7 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
                      dpll->clock_id == pin->clock_id)))
                ret = -EINVAL;
        else
-               ret = __dpll_pin_register(dpll, pin, ops, priv);
+               ret = __dpll_pin_register(dpll, pin, ops, priv, NULL);
        mutex_unlock(&dpll_lock);
 
        return ret;
@@ -651,11 +658,11 @@ EXPORT_SYMBOL_GPL(dpll_pin_register);
 
 static void
 __dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
-                     const struct dpll_pin_ops *ops, void *priv)
+                     const struct dpll_pin_ops *ops, void *priv, void *cookie)
 {
        ASSERT_DPLL_PIN_REGISTERED(pin);
-       dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv);
-       dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv);
+       dpll_xa_ref_pin_del(&dpll->pin_refs, pin, ops, priv, cookie);
+       dpll_xa_ref_dpll_del(&pin->dpll_refs, dpll, ops, priv, cookie);
        if (xa_empty(&pin->dpll_refs))
                xa_clear_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED);
 }
@@ -680,7 +687,7 @@ void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
 
        mutex_lock(&dpll_lock);
        dpll_pin_delete_ntf(pin);
-       __dpll_pin_unregister(dpll, pin, ops, priv);
+       __dpll_pin_unregister(dpll, pin, ops, priv, NULL);
        mutex_unlock(&dpll_lock);
 }
 EXPORT_SYMBOL_GPL(dpll_pin_unregister);
@@ -716,12 +723,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
                return -EINVAL;
 
        mutex_lock(&dpll_lock);
-       ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);
+       ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv, pin);
        if (ret)
                goto unlock;
        refcount_inc(&pin->refcount);
        xa_for_each(&parent->dpll_refs, i, ref) {
-               ret = __dpll_pin_register(ref->dpll, pin, ops, priv);
+               ret = __dpll_pin_register(ref->dpll, pin, ops, priv, parent);
                if (ret) {
                        stop = i;
                        goto dpll_unregister;
@@ -735,11 +742,12 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
 dpll_unregister:
        xa_for_each(&parent->dpll_refs, i, ref)
                if (i < stop) {
-                       __dpll_pin_unregister(ref->dpll, pin, ops, priv);
+                       __dpll_pin_unregister(ref->dpll, pin, ops, priv,
+                                             parent);
                        dpll_pin_delete_ntf(pin);
                }
        refcount_dec(&pin->refcount);
-       dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
+       dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
 unlock:
        mutex_unlock(&dpll_lock);
        return ret;
@@ -764,10 +772,10 @@ void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
 
        mutex_lock(&dpll_lock);
        dpll_pin_delete_ntf(pin);
-       dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv);
+       dpll_xa_ref_pin_del(&pin->parent_refs, parent, ops, priv, pin);
        refcount_dec(&pin->refcount);
        xa_for_each(&pin->dpll_refs, i, ref)
-               __dpll_pin_unregister(ref->dpll, pin, ops, priv);
+               __dpll_pin_unregister(ref->dpll, pin, ops, priv, parent);
        mutex_unlock(&dpll_lock);
 }
 EXPORT_SYMBOL_GPL(dpll_pin_on_pin_unregister);
index df58a6a1a67ec51f1bb81ff1bd8364be8a46cc13..2131de36e3dac00fee06ccbcc1aebe80d69f0fbc 100644 (file)
@@ -1854,6 +1854,7 @@ err_node_allow:
 err_bo_create:
        amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
 err_reserve_limit:
+       amdgpu_sync_free(&(*mem)->sync);
        mutex_destroy(&(*mem)->lock);
        if (gobj)
                drm_gem_object_put(gobj);
@@ -2900,13 +2901,12 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
 
        amdgpu_sync_create(&sync_obj);
 
-       /* Validate BOs and map them to GPUVM (update VM page tables). */
+       /* Validate BOs managed by KFD */
        list_for_each_entry(mem, &process_info->kfd_bo_list,
                            validate_list) {
 
                struct amdgpu_bo *bo = mem->bo;
                uint32_t domain = mem->domain;
-               struct kfd_mem_attachment *attachment;
                struct dma_resv_iter cursor;
                struct dma_fence *fence;
 
@@ -2931,6 +2931,25 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
                                goto validate_map_fail;
                        }
                }
+       }
+
+       if (failed_size)
+               pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
+
+       /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
+        * validations above would invalidate DMABuf imports again.
+        */
+       ret = process_validate_vms(process_info, &exec.ticket);
+       if (ret) {
+               pr_debug("Validating VMs failed, ret: %d\n", ret);
+               goto validate_map_fail;
+       }
+
+       /* Update mappings managed by KFD. */
+       list_for_each_entry(mem, &process_info->kfd_bo_list,
+                           validate_list) {
+               struct kfd_mem_attachment *attachment;
+
                list_for_each_entry(attachment, &mem->attachments, list) {
                        if (!attachment->is_mapped)
                                continue;
@@ -2947,18 +2966,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
                }
        }
 
-       if (failed_size)
-               pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
-
-       /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
-        * validations above would invalidate DMABuf imports again.
-        */
-       ret = process_validate_vms(process_info, &exec.ticket);
-       if (ret) {
-               pr_debug("Validating VMs failed, ret: %d\n", ret);
-               goto validate_map_fail;
-       }
-
        /* Update mappings not managed by KFD */
        list_for_each_entry(peer_vm, &process_info->vm_list_head,
                        vm_list_node) {
index a00cf4756ad0e2f371742e760183882773a80243..1569bef030eac166ea6194427ca9c8489cc62796 100644 (file)
@@ -1132,6 +1132,7 @@ void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
                return;
 
        amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
+       del_timer_sync(&ring->fence_drv.fallback_timer);
        amdgpu_ring_fini(ring);
        kfree(ring);
 }
index 2099159a693fa02e7c508c3aecdc9f695498cecd..ce733e3cb35d05e445830dc22b8216bd0f6dd014 100644 (file)
@@ -605,6 +605,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
        else
                amdgpu_bo_placement_from_domain(bo, bp->domain);
        if (bp->type == ttm_bo_type_kernel)
+               bo->tbo.priority = 2;
+       else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
                bo->tbo.priority = 1;
 
        if (!bp->destroy)
index 0df97c3e3a700dccc7bc84d4688e1dc8e1b8a1e2..f7c73533e336fac3358c58e387cb27e102ac2684 100644 (file)
@@ -774,6 +774,9 @@ static int umsch_mm_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend)
+               return 0;
+
        return umsch_mm_test(adev);
 }
 
index 6695481f870f8a0dc61edfbe0d9947b19288bc25..c23d97d34b7ec55e90f2f9f2f81f9a87e7c7ff26 100644 (file)
@@ -205,7 +205,7 @@ disable_dpm:
        dpm_ctl &= 0xfffffffe; /* Disable DPM */
        WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
        dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__);
-       return 0;
+       return -EINVAL;
 }
 
 int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev)
index f90905ef32c76d62c3d490445b71388e9e0dc6bb..701146d649c353c9ace940af21a52c48fd37523c 100644 (file)
@@ -9186,7 +9186,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
                7 + /* PIPELINE_SYNC */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
-               2 + /* VM_FLUSH */
+               4 + /* VM_FLUSH */
                8 + /* FENCE for VM_FLUSH */
                20 + /* GDS switch */
                4 + /* double SWITCH_BUFFER,
@@ -9276,7 +9276,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
                7 + /* gfx_v10_0_ring_emit_pipeline_sync */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
-               2 + /* gfx_v10_0_ring_emit_vm_flush */
                8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
        .emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
        .emit_ib = gfx_v10_0_ring_emit_ib_compute,
index f7325b02a191f726196d4ad0ac6fa3d090ab9977..f00e05aba46a4e40b97ef0cbfe093a3b17ef350f 100644 (file)
@@ -6192,7 +6192,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
                7 + /* PIPELINE_SYNC */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
-               2 + /* VM_FLUSH */
+               4 + /* VM_FLUSH */
                8 + /* FENCE for VM_FLUSH */
                20 + /* GDS switch */
                5 + /* COND_EXEC */
@@ -6278,7 +6278,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
                7 + /* gfx_v11_0_ring_emit_pipeline_sync */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
-               2 + /* gfx_v11_0_ring_emit_vm_flush */
                8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
        .emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
        .emit_ib = gfx_v11_0_ring_emit_ib_compute,
index 6f97a6d0e6d0525cd6b7db25611d71506b3b0da6..99dbd2341120db5fa82b653daf76758052f7e306 100644 (file)
@@ -6981,7 +6981,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
                7 + /* gfx_v9_0_ring_emit_pipeline_sync */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
-               2 + /* gfx_v9_0_ring_emit_vm_flush */
                8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
                7 + /* gfx_v9_0_emit_mem_sync */
                5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
@@ -7019,7 +7018,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
                7 + /* gfx_v9_0_ring_emit_pipeline_sync */
                SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
                SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
-               2 + /* gfx_v9_0_ring_emit_vm_flush */
                8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
        .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
        .emit_fence = gfx_v9_0_ring_emit_fence_kiq,
index 82eab49be82bb99807e5caabf5079b32dfd4cb26..e708468ac54dd57fbe8fd46b250ff1bc6dd42bb2 100644 (file)
@@ -368,7 +368,8 @@ static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
        u32 ref_and_mask = 0;
        const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 
-       ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+       ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0
+                      << (ring->me % adev->sdma.num_inst_per_aid);
 
        sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
                               adev->nbio.funcs->get_hdp_flush_done_offset(adev),
index 42f4bd250def622d490d355bac4883772def9c7a..da01b524b9f2a91dac57e75f7b1f1702116f6893 100644 (file)
@@ -280,17 +280,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
        u32 ref_and_mask = 0;
        const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
 
-       ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
-
-       amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
-                         SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
-                         SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
-       amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
-       amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
-       amdgpu_ring_write(ring, ref_and_mask); /* reference */
-       amdgpu_ring_write(ring, ref_and_mask); /* mask */
-       amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
-                         SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+       if (ring->me > 1) {
+               amdgpu_asic_flush_hdp(adev, ring);
+       } else {
+               ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
+
+               amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
+                                 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
+                                 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
+               amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
+               amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
+               amdgpu_ring_write(ring, ref_and_mask); /* reference */
+               amdgpu_ring_write(ring, ref_and_mask); /* mask */
+               amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+                                 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
+       }
 }
 
 /**
index 769eb8f7bb3c570c90fb6325ecaeb0f49368ec24..09315dd5a1ec95a896ff62e3010746f1750757f8 100644 (file)
@@ -144,6 +144,12 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
                        WREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL), ret);
        }
 
+       /* setup collaborate mode */
+       vpe_v6_1_set_collaborate_mode(vpe, true);
+       /* setup DPM */
+       if (amdgpu_vpe_configure_dpm(vpe))
+               dev_warn(adev->dev, "VPE failed to enable DPM\n");
+
        /*
         * For VPE 6.1.1, still only need to add master's offset, and psp will apply it to slave as well.
         * Here use instance 0 as master.
@@ -159,11 +165,7 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
                adev->vpe.cmdbuf_cpu_addr[0] = f32_offset;
                adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl;
 
-               amdgpu_vpe_psp_update_sram(adev);
-               vpe_v6_1_set_collaborate_mode(vpe, true);
-               amdgpu_vpe_configure_dpm(vpe);
-
-               return 0;
+               return amdgpu_vpe_psp_update_sram(adev);
        }
 
        vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data;
@@ -196,8 +198,6 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
        }
 
        vpe_v6_1_halt(vpe, false);
-       vpe_v6_1_set_collaborate_mode(vpe, true);
-       amdgpu_vpe_configure_dpm(vpe);
 
        return 0;
 }
index bdc01ca9609a7e57fac05ee60d6866a5950e2b07..5c8d81bfce7ab14938c9a45cc23f3ab6beac1dd0 100644 (file)
@@ -509,10 +509,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
        start = start_mgr << PAGE_SHIFT;
        end = (last_mgr + 1) << PAGE_SHIFT;
 
+       r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
+                                       prange->npages * PAGE_SIZE,
+                                       KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+                                       node->xcp ? node->xcp->id : 0);
+       if (r) {
+               dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
+               return -ENOSPC;
+       }
+
        r = svm_range_vram_node_new(node, prange, true);
        if (r) {
                dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
-               return r;
+               goto out;
        }
        ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
 
@@ -545,6 +554,11 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
                svm_range_vram_node_free(prange);
        }
 
+out:
+       amdgpu_amdkfd_unreserve_mem_limit(node->adev,
+                                       prange->npages * PAGE_SIZE,
+                                       KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
+                                       node->xcp ? node->xcp->id : 0);
        return r < 0 ? r : 0;
 }
 
index b79986412cd839bc89741a0b3bc1986daa2b10e4..58c1fe5421934d547bc552d5e72526468951bf69 100644 (file)
@@ -1922,6 +1922,8 @@ static int signal_eviction_fence(struct kfd_process *p)
        rcu_read_lock();
        ef = dma_fence_get_rcu_safe(&p->ef);
        rcu_read_unlock();
+       if (!ef)
+               return -EINVAL;
 
        ret = dma_fence_signal(ef);
        dma_fence_put(ef);
@@ -1949,10 +1951,9 @@ static void evict_process_worker(struct work_struct *work)
                 * they are responsible stopping the queues and scheduling
                 * the restore work.
                 */
-               if (!signal_eviction_fence(p))
-                       queue_delayed_work(kfd_restore_wq, &p->restore_work,
-                               msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
-               else
+               if (signal_eviction_fence(p) ||
+                   mod_delayed_work(kfd_restore_wq, &p->restore_work,
+                                    msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
                        kfd_process_restore_queues(p);
 
                pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
@@ -2011,9 +2012,9 @@ static void restore_process_worker(struct work_struct *work)
        if (ret) {
                pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
                         p->pasid, PROCESS_BACK_OFF_TIME_MS);
-               ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
-                               msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
-               WARN(!ret, "reschedule restore work failed\n");
+               if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
+                                    msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
+                       kfd_process_restore_queues(p);
        }
 }
 
index f0f7f48af4137acb088e2e903f803c345babc3ec..386875e6eb96bacb6585ae58c5620db1f41fde92 100644 (file)
@@ -3426,7 +3426,7 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
                                mm, KFD_MIGRATE_TRIGGER_PREFETCH);
        *migrated = !r;
 
-       return r;
+       return 0;
 }
 
 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
index 6d2f60c61decc36711953fa5b0dd67888c652a32..f3f94d109726d380326c96e576afd5263e8a1daf 100644 (file)
@@ -3029,6 +3029,7 @@ static int dm_resume(void *handle)
                        dc_stream_release(dm_new_crtc_state->stream);
                        dm_new_crtc_state->stream = NULL;
                }
+               dm_new_crtc_state->base.color_mgmt_changed = true;
        }
 
        for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
index f09b9d49297e815f469cc54be1c1130711576a12..bbd0169010c2d50a2454e64f53342a9304ac6e89 100644 (file)
@@ -4261,6 +4261,13 @@ static int amdgpu_od_set_init(struct amdgpu_device *adev)
                }
        }
 
+       /*
+        * If gpu_od is the only member in the list, that means gpu_od is an
+        * empty directory, so remove it.
+        */
+       if (list_is_singular(&adev->pm.od_kobj_list))
+               goto err_out;
+
        return 0;
 
 err_out:
index 3957af057d54ff1ed8d5f5f9545e51562cb3973c..c977ebe88001df958c74dab7cd771c39807f6c62 100644 (file)
@@ -2294,6 +2294,17 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
        return sizeof(*gpu_metrics);
 }
 
+static void smu_v13_0_6_restore_pci_config(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+       int i;
+
+       for (i = 0; i < 16; i++)
+               pci_write_config_dword(adev->pdev, i * 4,
+                                      adev->pdev->saved_config_space[i]);
+       pci_restore_msi_state(adev->pdev);
+}
+
 static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
 {
        int ret = 0, index;
@@ -2315,6 +2326,20 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
        /* Restore the config space saved during init */
        amdgpu_device_load_pci_state(adev->pdev);
 
+       /* Certain platforms have switches which assign virtual BAR values to
+        * devices. OS uses the virtual BAR values and device behind the switch
+        * is assgined another BAR value. When device's config space registers
+        * are queried, switch returns the virtual BAR values. When mode-2 reset
+        * is performed, switch is unaware of it, and will continue to return
+        * the same virtual values to the OS.This affects
+        * pci_restore_config_space() API as it doesn't write the value saved if
+        * the current value read from config space is the same as what is
+        * saved. As a workaround, make sure the config space is restored
+        * always.
+        */
+       if (!(adev->flags & AMD_IS_APU))
+               smu_v13_0_6_restore_pci_config(smu);
+
        dev_dbg(smu->adev->dev, "wait for reset ack\n");
        do {
                ret = smu_cmn_wait_for_response(smu);
index e440f458b6633d71ab5c49c5e07c3687b6b025e8..93337543aac32b50121f1698c3c79950e3e67f4f 100644 (file)
@@ -224,8 +224,8 @@ __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
 
        __drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base);
 
-       drm_format_conv_state_copy(&shadow_plane_state->fmtcnv_state,
-                                  &new_shadow_plane_state->fmtcnv_state);
+       drm_format_conv_state_copy(&new_shadow_plane_state->fmtcnv_state,
+                                  &shadow_plane_state->fmtcnv_state);
 }
 EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state);
 
index 734412aae94dde5c08e6e575c405fd9805e98d8d..a9bf426f69b365caa5b335e167109b7c7f5be90e 100644 (file)
@@ -164,26 +164,6 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
                *value = gpu->identity.eco_id;
                break;
 
-       case ETNAVIV_PARAM_GPU_NN_CORE_COUNT:
-               *value = gpu->identity.nn_core_count;
-               break;
-
-       case ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE:
-               *value = gpu->identity.nn_mad_per_core;
-               break;
-
-       case ETNAVIV_PARAM_GPU_TP_CORE_COUNT:
-               *value = gpu->identity.tp_core_count;
-               break;
-
-       case ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE:
-               *value = gpu->identity.on_chip_sram_size;
-               break;
-
-       case ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE:
-               *value = gpu->identity.axi_sram_size;
-               break;
-
        default:
                DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
                return -EINVAL;
@@ -663,8 +643,8 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
        /* Disable TX clock gating on affected core revisions. */
        if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
            etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
-           etnaviv_is_model_rev(gpu, GC2000, 0x6202) ||
-           etnaviv_is_model_rev(gpu, GC2000, 0x6203))
+           etnaviv_is_model_rev(gpu, GC7000, 0x6202) ||
+           etnaviv_is_model_rev(gpu, GC7000, 0x6203))
                pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
 
        /* Disable SE and RA clock gating on affected core revisions. */
index 7d5e9158e13c1aca6df49f254b00dd72d3a27a9e..197e0037732ec84998aba60b1769a2fc305ea1bf 100644 (file)
@@ -54,18 +54,6 @@ struct etnaviv_chip_identity {
        /* Number of Neural Network cores. */
        u32 nn_core_count;
 
-       /* Number of MAD units per Neural Network core. */
-       u32 nn_mad_per_core;
-
-       /* Number of Tensor Processing cores. */
-       u32 tp_core_count;
-
-       /* Size in bytes of the SRAM inside the NPU. */
-       u32 on_chip_sram_size;
-
-       /* Size in bytes of the SRAM across the AXI bus. */
-       u32 axi_sram_size;
-
        /* Size of the vertex cache. */
        u32 vertex_cache_size;
 
index d8e7334de8ceac8d608ad97c04c8c3184db9267b..8665f2658d51b302f7e2e8ad9f52a438ccbc5d6f 100644 (file)
@@ -17,10 +17,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
                .thread_count = 128,
                .shader_core_count = 1,
                .nn_core_count = 0,
-               .nn_mad_per_core = 0,
-               .tp_core_count = 0,
-               .on_chip_sram_size = 0,
-               .axi_sram_size = 0,
                .vertex_cache_size = 8,
                .vertex_output_buffer_size = 1024,
                .pixel_pipes = 1,
@@ -52,11 +48,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
                .register_max = 64,
                .thread_count = 256,
                .shader_core_count = 1,
-               .nn_core_count = 0,
-               .nn_mad_per_core = 0,
-               .tp_core_count = 0,
-               .on_chip_sram_size = 0,
-               .axi_sram_size = 0,
                .vertex_cache_size = 8,
                .vertex_output_buffer_size = 512,
                .pixel_pipes = 1,
@@ -89,10 +80,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
                .thread_count = 512,
                .shader_core_count = 2,
                .nn_core_count = 0,
-               .nn_mad_per_core = 0,
-               .tp_core_count = 0,
-               .on_chip_sram_size = 0,
-               .axi_sram_size = 0,
                .vertex_cache_size = 16,
                .vertex_output_buffer_size = 1024,
                .pixel_pipes = 1,
@@ -125,10 +112,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
                .thread_count = 512,
                .shader_core_count = 2,
                .nn_core_count = 0,
-               .nn_mad_per_core = 0,
-               .tp_core_count = 0,
-               .on_chip_sram_size = 0,
-               .axi_sram_size = 0,
                .vertex_cache_size = 16,
                .vertex_output_buffer_size = 1024,
                .pixel_pipes = 1,
@@ -160,11 +143,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
                .register_max = 64,
                .thread_count = 512,
                .shader_core_count = 2,
-               .nn_core_count = 0,
-               .nn_mad_per_core = 0,
-               .tp_core_count = 0,
-               .on_chip_sram_size = 0,
-               .axi_sram_size = 0,
                .vertex_cache_size = 16,
                .vertex_output_buffer_size = 1024,
                .pixel_pipes = 1,
@@ -197,10 +175,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
                .thread_count = 1024,
                .shader_core_count = 4,
                .nn_core_count = 0,
-               .nn_mad_per_core = 0,
-               .tp_core_count = 0,
-               .on_chip_sram_size = 0,
-               .axi_sram_size = 0,
                .vertex_cache_size = 16,
                .vertex_output_buffer_size = 1024,
                .pixel_pipes = 2,
@@ -233,10 +207,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
                .thread_count = 256,
                .shader_core_count = 1,
                .nn_core_count = 8,
-               .nn_mad_per_core = 64,
-               .tp_core_count = 4,
-               .on_chip_sram_size = 524288,
-               .axi_sram_size = 1048576,
                .vertex_cache_size = 16,
                .vertex_output_buffer_size = 1024,
                .pixel_pipes = 1,
@@ -269,10 +239,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
                .thread_count = 256,
                .shader_core_count = 1,
                .nn_core_count = 6,
-               .nn_mad_per_core = 64,
-               .tp_core_count = 3,
-               .on_chip_sram_size = 262144,
-               .axi_sram_size = 0,
                .vertex_cache_size = 16,
                .vertex_output_buffer_size = 1024,
                .pixel_pipes = 1,
index 4f302cd5e1a6ca3a6fb1f15241c9568fc5ea74e7..58fed80c7392a02c317011365c9ce3165412bf3d 100644 (file)
@@ -34,7 +34,6 @@ gma500_gfx-y += \
          psb_intel_lvds.o \
          psb_intel_modes.o \
          psb_intel_sdvo.o \
-         psb_lid.o \
          psb_irq.o
 
 gma500_gfx-$(CONFIG_ACPI) +=  opregion.o
index dcfcd7b89d4a1dfe9fb5092b7aa63a111785b7fc..6dece8f0e380f7a447d582d6f914caa8d3d3704a 100644 (file)
@@ -73,8 +73,7 @@ static int psb_backlight_setup(struct drm_device *dev)
        }
 
        psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS);
-       /* This must occur after the backlight is properly initialised */
-       psb_lid_timer_init(dev_priv);
+
        return 0;
 }
 
@@ -259,8 +258,6 @@ static int psb_chip_setup(struct drm_device *dev)
 
 static void psb_chip_teardown(struct drm_device *dev)
 {
-       struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
-       psb_lid_timer_takedown(dev_priv);
        gma_intel_teardown_gmbus(dev);
 }
 
index c5edfa4aa4ccdd526fa39a53e30f24a0fee41d28..83c17689c454f7c27955ad37dcf7ae2a6399c95c 100644 (file)
 #define PSB_NUM_VBLANKS 2
 
 #define PSB_WATCHDOG_DELAY (HZ * 2)
-#define PSB_LID_DELAY (HZ / 10)
 
 #define PSB_MAX_BRIGHTNESS             100
 
@@ -491,11 +490,7 @@ struct drm_psb_private {
        /* Hotplug handling */
        struct work_struct hotplug_work;
 
-       /* LID-Switch */
-       spinlock_t lid_lock;
-       struct timer_list lid_timer;
        struct psb_intel_opregion opregion;
-       u32 lid_last_state;
 
        /* Watchdog */
        uint32_t apm_reg;
@@ -591,10 +586,6 @@ struct psb_ops {
        int i2c_bus;            /* I2C bus identifier for Moorestown */
 };
 
-/* psb_lid.c */
-extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
-extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
-
 /* modesetting */
 extern void psb_modeset_init(struct drm_device *dev);
 extern void psb_modeset_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
deleted file mode 100644 (file)
index 58a7fe3..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/**************************************************************************
- * Copyright (c) 2007, Intel Corporation.
- *
- * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
- **************************************************************************/
-
-#include <linux/spinlock.h>
-
-#include "psb_drv.h"
-#include "psb_intel_reg.h"
-#include "psb_reg.h"
-
-static void psb_lid_timer_func(struct timer_list *t)
-{
-       struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
-       struct drm_device *dev = (struct drm_device *)&dev_priv->dev;
-       struct timer_list *lid_timer = &dev_priv->lid_timer;
-       unsigned long irq_flags;
-       u32 __iomem *lid_state = dev_priv->opregion.lid_state;
-       u32 pp_status;
-
-       if (readl(lid_state) == dev_priv->lid_last_state)
-               goto lid_timer_schedule;
-
-       if ((readl(lid_state)) & 0x01) {
-               /*lid state is open*/
-               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
-               do {
-                       pp_status = REG_READ(PP_STATUS);
-               } while ((pp_status & PP_ON) == 0 &&
-                        (pp_status & PP_SEQUENCE_MASK) != 0);
-
-               if (REG_READ(PP_STATUS) & PP_ON) {
-                       /*FIXME: should be backlight level before*/
-                       psb_intel_lvds_set_brightness(dev, 100);
-               } else {
-                       DRM_DEBUG("LVDS panel never powered up");
-                       return;
-               }
-       } else {
-               psb_intel_lvds_set_brightness(dev, 0);
-
-               REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
-               do {
-                       pp_status = REG_READ(PP_STATUS);
-               } while ((pp_status & PP_ON) == 0);
-       }
-       dev_priv->lid_last_state =  readl(lid_state);
-
-lid_timer_schedule:
-       spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
-       if (!timer_pending(lid_timer)) {
-               lid_timer->expires = jiffies + PSB_LID_DELAY;
-               add_timer(lid_timer);
-       }
-       spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
-}
-
-void psb_lid_timer_init(struct drm_psb_private *dev_priv)
-{
-       struct timer_list *lid_timer = &dev_priv->lid_timer;
-       unsigned long irq_flags;
-
-       spin_lock_init(&dev_priv->lid_lock);
-       spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
-
-       timer_setup(lid_timer, psb_lid_timer_func, 0);
-
-       lid_timer->expires = jiffies + PSB_LID_DELAY;
-
-       add_timer(lid_timer);
-       spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
-}
-
-void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
-{
-       del_timer_sync(&dev_priv->lid_timer);
-}
-
index a0afe1ba6dd5ce2cb6c3dfd53b60874ebb9c747c..f9705430ada93057c3094c1cb20ec400ae64ffdd 100644 (file)
@@ -378,7 +378,9 @@ static int gt_fw_domain_init(struct xe_gt *gt)
                         err);
 
        /* Initialize CCS mode sysfs after early initialization of HW engines */
-       xe_gt_ccs_mode_sysfs_init(gt);
+       err = xe_gt_ccs_mode_sysfs_init(gt);
+       if (err)
+               goto err_force_wake;
 
        /*
         * Stash hardware-reported version.  Since this register does not exist
index 529fc286cd06c6d46bcfde3b39bcd0e0befb8b44..396aeb5b992424b24ceeabeee9d76581ef404dbe 100644 (file)
@@ -167,25 +167,20 @@ static void xe_gt_ccs_mode_sysfs_fini(struct drm_device *drm, void *arg)
  * and it is expected that there are no open drm clients while doing so.
  * The number of available compute slices is exposed to user through a per-gt
  * 'num_cslices' sysfs interface.
+ *
+ * Returns: Returns error value for failure and 0 for success.
  */
-void xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
 {
        struct xe_device *xe = gt_to_xe(gt);
        int err;
 
        if (!xe_gt_ccs_mode_enabled(gt))
-               return;
+               return 0;
 
        err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
-       if (err) {
-               drm_warn(&xe->drm, "Sysfs creation for ccs_mode failed err: %d\n", err);
-               return;
-       }
+       if (err)
+               return err;
 
-       err = drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt);
-       if (err) {
-               sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
-               drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
-                        __func__, err);
-       }
+       return drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt);
 }
index f39975aaaab0db1c62e06cc912afd74d668b1303..f8779852cf0d26587e3b579f351dcdeaf93efa5d 100644 (file)
@@ -12,7 +12,7 @@
 #include "xe_platform_types.h"
 
 void xe_gt_apply_ccs_mode(struct xe_gt *gt);
-void xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt);
+int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt);
 
 static inline bool xe_gt_ccs_mode_enabled(const struct xe_gt *gt)
 {
index 355edd4d758af7cf1e4c15daf00ed86bbd4de898..7f32547f94b266092afc75bd387f8b6b59f18cec 100644 (file)
@@ -1054,10 +1054,10 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
                                                           adj_len);
                break;
        case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
-               ret = xe_guc_relay_process_guc2pf(&guc->relay, payload, adj_len);
+               ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
                break;
        case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
-               ret = xe_guc_relay_process_guc2vf(&guc->relay, payload, adj_len);
+               ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
                break;
        default:
                drm_err(&xe->drm, "unexpected action 0x%04x\n", action);
index b545f850087cd8b9a7ae0031a2feb1fadba9458b..6b9b1cbedd379e35f78d6b943d46991f85caefa5 100644 (file)
@@ -53,7 +53,6 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
        struct xe_gt *gt = huc_to_gt(huc);
        struct xe_device *xe = gt_to_xe(gt);
        struct xe_bo *bo;
-       int err;
 
        /* we use a single object for both input and output */
        bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
@@ -66,13 +65,7 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
 
        huc->gsc_pkt = bo;
 
-       err = drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
-       if (err) {
-               free_gsc_pkt(&xe->drm, huc);
-               return err;
-       }
-
-       return 0;
+       return drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
 }
 
 int xe_huc_init(struct xe_huc *huc)
index e6a8b6d8eab707da539cbc209f205d0ef02bba67..3c3c497b6b91141bb2948b1a124b1a144f1a4fdf 100644 (file)
@@ -965,9 +965,7 @@ static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
                }
                break;
        case REPORT_TYPE_MOUSE:
-               workitem->reports_supported |= STD_MOUSE | HIDPP;
-               if (djrcv_dev->type == recvr_type_mouse_only)
-                       workitem->reports_supported |= MULTIMEDIA;
+               workitem->reports_supported |= STD_MOUSE | HIDPP | MULTIMEDIA;
                break;
        }
 }
index f9cceaeffd0814411d6024a3dd714444953a80db..da5ea5a23b087cde332ed28bf21a542c758e8919 100644 (file)
@@ -944,9 +944,11 @@ static void mcp2221_hid_unregister(void *ptr)
 /* This is needed to be sure hid_hw_stop() isn't called twice by the subsystem */
 static void mcp2221_remove(struct hid_device *hdev)
 {
+#if IS_REACHABLE(CONFIG_IIO)
        struct mcp2221 *mcp = hid_get_drvdata(hdev);
 
        cancel_delayed_work_sync(&mcp->init_work);
+#endif
 }
 
 #if IS_REACHABLE(CONFIG_IIO)
index ab5953fc24367afb075bd82756f57878e75d38c2..80e0f23c1c33ec698f107ee6f8ed942020142dbf 100644 (file)
@@ -481,10 +481,10 @@ static const struct joycon_ctlr_button_mapping n64con_button_mappings[] = {
        { BTN_TR,               JC_BTN_R,       },
        { BTN_TR2,              JC_BTN_LSTICK,  }, /* ZR */
        { BTN_START,            JC_BTN_PLUS,    },
-       { BTN_FORWARD,          JC_BTN_Y,       }, /* C UP */
-       { BTN_BACK,             JC_BTN_ZR,      }, /* C DOWN */
-       { BTN_LEFT,             JC_BTN_X,       }, /* C LEFT */
-       { BTN_RIGHT,            JC_BTN_MINUS,   }, /* C RIGHT */
+       { BTN_SELECT,           JC_BTN_Y,       }, /* C UP */
+       { BTN_X,                JC_BTN_ZR,      }, /* C DOWN */
+       { BTN_Y,                JC_BTN_X,       }, /* C LEFT */
+       { BTN_C,                JC_BTN_MINUS,   }, /* C RIGHT */
        { BTN_MODE,             JC_BTN_HOME,    },
        { BTN_Z,                JC_BTN_CAP,     },
        { /* sentinel */ },
index 2df1ab3c31cc54da812ee653face224f32e69fc2..d965382196c69e87cd79d1aad49deeab7da1bba5 100644 (file)
@@ -64,7 +64,6 @@
 /* flags */
 #define I2C_HID_STARTED                0
 #define I2C_HID_RESET_PENDING  1
-#define I2C_HID_READ_PENDING   2
 
 #define I2C_HID_PWR_ON         0x00
 #define I2C_HID_PWR_SLEEP      0x01
@@ -190,15 +189,10 @@ static int i2c_hid_xfer(struct i2c_hid *ihid,
                msgs[n].len = recv_len;
                msgs[n].buf = recv_buf;
                n++;
-
-               set_bit(I2C_HID_READ_PENDING, &ihid->flags);
        }
 
        ret = i2c_transfer(client->adapter, msgs, n);
 
-       if (recv_len)
-               clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
-
        if (ret != n)
                return ret < 0 ? ret : -EIO;
 
@@ -556,9 +550,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
 {
        struct i2c_hid *ihid = dev_id;
 
-       if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
-               return IRQ_HANDLED;
-
        i2c_hid_get_input(ihid);
 
        return IRQ_HANDLED;
@@ -735,12 +726,15 @@ static int i2c_hid_parse(struct hid_device *hid)
        mutex_lock(&ihid->reset_lock);
        do {
                ret = i2c_hid_start_hwreset(ihid);
-               if (ret)
+               if (ret == 0)
+                       ret = i2c_hid_finish_hwreset(ihid);
+               else
                        msleep(1000);
        } while (tries-- > 0 && ret);
+       mutex_unlock(&ihid->reset_lock);
 
        if (ret)
-               goto abort_reset;
+               return ret;
 
        use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name,
                                                                &rsize);
@@ -750,11 +744,8 @@ static int i2c_hid_parse(struct hid_device *hid)
                i2c_hid_dbg(ihid, "Using a HID report descriptor override\n");
        } else {
                rdesc = kzalloc(rsize, GFP_KERNEL);
-
-               if (!rdesc) {
-                       ret = -ENOMEM;
-                       goto abort_reset;
-               }
+               if (!rdesc)
+                       return -ENOMEM;
 
                i2c_hid_dbg(ihid, "asking HID report descriptor\n");
 
@@ -763,23 +754,10 @@ static int i2c_hid_parse(struct hid_device *hid)
                                            rdesc, rsize);
                if (ret) {
                        hid_err(hid, "reading report descriptor failed\n");
-                       goto abort_reset;
+                       goto out;
                }
        }
 
-       /*
-        * Windows directly reads the report-descriptor after sending reset
-        * and then waits for resets completion afterwards. Some touchpads
-        * actually wait for the report-descriptor to be read before signalling
-        * reset completion.
-        */
-       ret = i2c_hid_finish_hwreset(ihid);
-abort_reset:
-       clear_bit(I2C_HID_RESET_PENDING, &ihid->flags);
-       mutex_unlock(&ihid->reset_lock);
-       if (ret)
-               goto out;
-
        i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
 
        ret = hid_parse_report(hid, rdesc, rsize);
index a49c6affd7c4c48cdd09e3bdcca95139d0c066b8..dd5fc60874ba1d4f507e99fb5f28d87c16fdca9b 100644 (file)
@@ -948,6 +948,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
        if (!dev)
                return NULL;
 
+       dev->devc = &pdev->dev;
        ishtp_device_init(dev);
 
        init_waitqueue_head(&dev->wait_hw_ready);
@@ -983,7 +984,6 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
        }
 
        dev->ops = &ish_hw_ops;
-       dev->devc = &pdev->dev;
        dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
        return dev;
 }
index 01d2743444ec6cc7eec0945b69ba1c63195f09b1..3a989efae1420a0f2e5b965b55555fe8a85a4037 100644 (file)
@@ -137,7 +137,7 @@ void murmurhash3_128(const void *key, const int len, const u32 seed, void *out)
                        break;
                default:
                        break;
-               };
+               }
        }
        /* finalization */
 
index 56aa2a8b9d7153ac0792bbdc626ab5250cf074c7..7d0746b37c8ec791f111d6e589476eb2b500e9d4 100644 (file)
@@ -765,7 +765,7 @@ static struct table_device *open_table_device(struct mapped_device *md,
        return td;
 
 out_blkdev_put:
-       fput(bdev_file);
+       __fput_sync(bdev_file);
 out_free_td:
        kfree(td);
        return ERR_PTR(r);
@@ -778,7 +778,13 @@ static void close_table_device(struct table_device *td, struct mapped_device *md
 {
        if (md->disk->slave_dir)
                bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
-       fput(td->dm_dev.bdev_file);
+
+       /* Leverage async fput() if DMF_DEFERRED_REMOVE set */
+       if (unlikely(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
+               fput(td->dm_dev.bdev_file);
+       else
+               __fput_sync(td->dm_dev.bdev_file);
+
        put_dax(td->dm_dev.dax_dev);
        list_del(&td->list);
        kfree(td);
index c95787cb908673c6ab1b236e9c447952e5e8e452..59b5dd0e2f41d2a8751a4f5139e39302acb2b7bd 100644 (file)
@@ -566,13 +566,61 @@ static void mv88e6xxx_translate_cmode(u8 cmode, unsigned long *supported)
                phy_interface_set_rgmii(supported);
 }
 
-static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
-                                      struct phylink_config *config)
+static void
+mv88e6250_setup_supported_interfaces(struct mv88e6xxx_chip *chip, int port,
+                                    struct phylink_config *config)
 {
        unsigned long *supported = config->supported_interfaces;
+       int err;
+       u16 reg;
 
-       /* Translate the default cmode */
-       mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+       err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
+       if (err) {
+               dev_err(chip->dev, "p%d: failed to read port status\n", port);
+               return;
+       }
+
+       switch (reg & MV88E6250_PORT_STS_PORTMODE_MASK) {
+       case MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY:
+       case MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY:
+       case MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY:
+       case MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY:
+               __set_bit(PHY_INTERFACE_MODE_REVMII, supported);
+               break;
+
+       case MV88E6250_PORT_STS_PORTMODE_MII_HALF:
+       case MV88E6250_PORT_STS_PORTMODE_MII_FULL:
+               __set_bit(PHY_INTERFACE_MODE_MII, supported);
+               break;
+
+       case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY:
+       case MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY:
+       case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY:
+       case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY:
+               __set_bit(PHY_INTERFACE_MODE_REVRMII, supported);
+               break;
+
+       case MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL:
+       case MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL:
+               __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+               break;
+
+       case MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII:
+               __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
+               break;
+
+       default:
+               dev_err(chip->dev,
+                       "p%d: invalid port mode in status register: %04x\n",
+                       port, reg);
+       }
+}
+
+static void mv88e6250_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+                                      struct phylink_config *config)
+{
+       if (!mv88e6xxx_phy_is_internal(chip, port))
+               mv88e6250_setup_supported_interfaces(chip, port, config);
 
        config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100;
 }
index 86deeb347cbc1d82526f362f719c57ac17039bf0..ddadeb9bfdaeed6978d85d55ccd2e52710971529 100644 (file)
 #define MV88E6250_PORT_STS_PORTMODE_PHY_100_HALF       0x0900
 #define MV88E6250_PORT_STS_PORTMODE_PHY_10_FULL                0x0a00
 #define MV88E6250_PORT_STS_PORTMODE_PHY_100_FULL       0x0b00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF                0x0c00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF       0x0d00
-#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL                0x0e00
-#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL       0x0f00
+/* - Modes with PHY suffix use output instead of input clock
+ * - Modes without RMII or RGMII use MII
+ * - Modes without speed do not have a fixed speed specified in the manual
+ *   ("DC to x MHz" - variable clock support?)
+ */
+#define MV88E6250_PORT_STS_PORTMODE_MII_DISABLED               0x0000
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_RGMII              0x0100
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL_PHY 0x0200
+#define MV88E6250_PORT_STS_PORTMODE_MII_200_RMII_FULL_PHY      0x0400
+#define MV88E6250_PORT_STS_PORTMODE_MII_DUAL_100_RMII_FULL     0x0600
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL       0x0700
+#define MV88E6250_PORT_STS_PORTMODE_MII_HALF                   0x0800
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_HALF_PHY   0x0900
+#define MV88E6250_PORT_STS_PORTMODE_MII_FULL                   0x0a00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_100_RMII_FULL_PHY   0x0b00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_HALF_PHY            0x0c00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_HALF_PHY           0x0d00
+#define MV88E6250_PORT_STS_PORTMODE_MII_10_FULL_PHY            0x0e00
+#define MV88E6250_PORT_STS_PORTMODE_MII_100_FULL_PHY           0x0f00
 #define MV88E6XXX_PORT_STS_LINK                        0x0800
 #define MV88E6XXX_PORT_STS_DUPLEX              0x0400
 #define MV88E6XXX_PORT_STS_SPEED_MASK          0x0300
index 72ea97c5d5d424482fe6812cef1a013158319a70..82768b0e90262b80b949b959b40151a0ddd0b6a9 100644 (file)
@@ -436,10 +436,8 @@ static void umac_init(struct bcmasp_intf *intf)
        umac_wl(intf, 0x800, UMC_RX_MAX_PKT_SZ);
 }
 
-static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+static int bcmasp_tx_reclaim(struct bcmasp_intf *intf)
 {
-       struct bcmasp_intf *intf =
-               container_of(napi, struct bcmasp_intf, tx_napi);
        struct bcmasp_intf_stats64 *stats = &intf->stats64;
        struct device *kdev = &intf->parent->pdev->dev;
        unsigned long read, released = 0;
@@ -482,10 +480,16 @@ static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
                                                        DESC_RING_COUNT);
        }
 
-       /* Ensure all descriptors have been written to DRAM for the hardware
-        * to see updated contents.
-        */
-       wmb();
+       return released;
+}
+
+static int bcmasp_tx_poll(struct napi_struct *napi, int budget)
+{
+       struct bcmasp_intf *intf =
+               container_of(napi, struct bcmasp_intf, tx_napi);
+       int released = 0;
+
+       released = bcmasp_tx_reclaim(intf);
 
        napi_complete(&intf->tx_napi);
 
@@ -797,6 +801,7 @@ static void bcmasp_init_tx(struct bcmasp_intf *intf)
        intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
        intf->tx_spb_index = 0;
        intf->tx_spb_clean_index = 0;
+       memset(intf->tx_cbs, 0, sizeof(struct bcmasp_tx_cb) * DESC_RING_COUNT);
 
        /* Make sure channels are disabled */
        tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
@@ -885,6 +890,8 @@ static void bcmasp_netif_deinit(struct net_device *dev)
        } while (timeout-- > 0);
        tx_spb_dma_wl(intf, 0x0, TX_SPB_DMA_FIFO_CTRL);
 
+       bcmasp_tx_reclaim(intf);
+
        umac_enable_set(intf, UMC_CMD_TX_EN, 0);
 
        phy_stop(dev->phydev);
index 3e4fb3c3e8342ad3dbf7aa32df03fc4ce57e3cad..1be6d14030bcffc0fd149b6be0af819964284a4e 100644 (file)
@@ -2009,12 +2009,14 @@ static int b44_set_pauseparam(struct net_device *dev,
                bp->flags |= B44_FLAG_TX_PAUSE;
        else
                bp->flags &= ~B44_FLAG_TX_PAUSE;
-       if (bp->flags & B44_FLAG_PAUSE_AUTO) {
-               b44_halt(bp);
-               b44_init_rings(bp);
-               b44_init_hw(bp, B44_FULL_RESET);
-       } else {
-               __b44_set_flow_ctrl(bp, bp->flags);
+       if (netif_running(dev)) {
+               if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+                       b44_halt(bp);
+                       b44_init_rings(bp);
+                       b44_init_hw(bp, B44_FULL_RESET);
+               } else {
+                       __b44_set_flow_ctrl(bp, bp->flags);
+               }
        }
        spin_unlock_irq(&bp->lock);
 
index 57e61f9631678edf31a2ff237fe0301254a396e5..2c2ee79c4d77957761d8f3d9ca85bcecedd5fd0f 100644 (file)
@@ -1778,7 +1778,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
                skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
                if (!skb) {
                        bnxt_abort_tpa(cpr, idx, agg_bufs);
-                       cpr->sw_stats.rx.rx_oom_discards += 1;
+                       cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
                        return NULL;
                }
        } else {
@@ -1788,7 +1788,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
                new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
                if (!new_data) {
                        bnxt_abort_tpa(cpr, idx, agg_bufs);
-                       cpr->sw_stats.rx.rx_oom_discards += 1;
+                       cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
                        return NULL;
                }
 
@@ -1804,7 +1804,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
                if (!skb) {
                        skb_free_frag(data);
                        bnxt_abort_tpa(cpr, idx, agg_bufs);
-                       cpr->sw_stats.rx.rx_oom_discards += 1;
+                       cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
                        return NULL;
                }
                skb_reserve(skb, bp->rx_offset);
@@ -1815,7 +1815,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
                skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
                if (!skb) {
                        /* Page reuse already handled by bnxt_rx_pages(). */
-                       cpr->sw_stats.rx.rx_oom_discards += 1;
+                       cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
                        return NULL;
                }
        }
@@ -2094,11 +2094,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
                        u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
                                                             cp_cons, agg_bufs,
                                                             false);
-                       if (!frag_len) {
-                               cpr->sw_stats.rx.rx_oom_discards += 1;
-                               rc = -ENOMEM;
-                               goto next_rx;
-                       }
+                       if (!frag_len)
+                               goto oom_next_rx;
                }
                xdp_active = true;
        }
@@ -2121,9 +2118,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
                                else
                                        bnxt_xdp_buff_frags_free(rxr, &xdp);
                        }
-                       cpr->sw_stats.rx.rx_oom_discards += 1;
-                       rc = -ENOMEM;
-                       goto next_rx;
+                       goto oom_next_rx;
                }
        } else {
                u32 payload;
@@ -2134,29 +2129,21 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
                        payload = 0;
                skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
                                      payload | len);
-               if (!skb) {
-                       cpr->sw_stats.rx.rx_oom_discards += 1;
-                       rc = -ENOMEM;
-                       goto next_rx;
-               }
+               if (!skb)
+                       goto oom_next_rx;
        }
 
        if (agg_bufs) {
                if (!xdp_active) {
                        skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
-                       if (!skb) {
-                               cpr->sw_stats.rx.rx_oom_discards += 1;
-                               rc = -ENOMEM;
-                               goto next_rx;
-                       }
+                       if (!skb)
+                               goto oom_next_rx;
                } else {
                        skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
                        if (!skb) {
                                /* we should be able to free the old skb here */
                                bnxt_xdp_buff_frags_free(rxr, &xdp);
-                               cpr->sw_stats.rx.rx_oom_discards += 1;
-                               rc = -ENOMEM;
-                               goto next_rx;
+                               goto oom_next_rx;
                        }
                }
        }
@@ -2234,6 +2221,11 @@ next_rx_no_prod_no_len:
        *raw_cons = tmp_raw_cons;
 
        return rc;
+
+oom_next_rx:
+       cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+       rc = -ENOMEM;
+       goto next_rx;
 }
 
 /* In netpoll mode, if we are using a combined completion ring, we need to
@@ -2280,7 +2272,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
        }
        rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
        if (rc && rc != -EBUSY)
-               cpr->sw_stats.rx.rx_netpoll_discards += 1;
+               cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1;
        return rc;
 }
 
@@ -9089,7 +9081,7 @@ static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
                                             BNXT_FW_HEALTH_WIN_BASE +
                                             BNXT_GRC_REG_CHIP_NUM);
                }
-               if (!BNXT_CHIP_P5(bp))
+               if (!BNXT_CHIP_P5_PLUS(bp))
                        return;
 
                status_loc = BNXT_GRC_REG_STATUS_P5 |
@@ -13037,6 +13029,16 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
        bnxt_rtnl_unlock_sp(bp);
 }
 
+static void bnxt_fw_fatal_close(struct bnxt *bp)
+{
+       bnxt_tx_disable(bp);
+       bnxt_disable_napi(bp);
+       bnxt_disable_int_sync(bp);
+       bnxt_free_irq(bp);
+       bnxt_clear_int_mode(bp);
+       pci_disable_device(bp->pdev);
+}
+
 static void bnxt_fw_reset_close(struct bnxt *bp)
 {
        bnxt_ulp_stop(bp);
@@ -13050,12 +13052,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
                pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
                if (val == 0xffff)
                        bp->fw_reset_min_dsecs = 0;
-               bnxt_tx_disable(bp);
-               bnxt_disable_napi(bp);
-               bnxt_disable_int_sync(bp);
-               bnxt_free_irq(bp);
-               bnxt_clear_int_mode(bp);
-               pci_disable_device(bp->pdev);
+               bnxt_fw_fatal_close(bp);
        }
        __bnxt_close_nic(bp, true, false);
        bnxt_vf_reps_free(bp);
@@ -15373,6 +15370,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct bnxt *bp = netdev_priv(netdev);
+       bool abort = false;
 
        netdev_info(netdev, "PCI I/O error detected\n");
 
@@ -15381,16 +15379,27 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
 
        bnxt_ulp_stop(bp);
 
-       if (state == pci_channel_io_perm_failure) {
+       if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+               netdev_err(bp->dev, "Firmware reset already in progress\n");
+               abort = true;
+       }
+
+       if (abort || state == pci_channel_io_perm_failure) {
                rtnl_unlock();
                return PCI_ERS_RESULT_DISCONNECT;
        }
 
-       if (state == pci_channel_io_frozen)
+       /* Link is not reliable anymore if state is pci_channel_io_frozen
+        * so we disable bus master to prevent any potential bad DMAs before
+        * freeing kernel memory.
+        */
+       if (state == pci_channel_io_frozen) {
                set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+               bnxt_fw_fatal_close(bp);
+       }
 
        if (netif_running(netdev))
-               bnxt_close(netdev);
+               __bnxt_close_nic(bp, true, true);
 
        if (pci_is_enabled(pdev))
                pci_disable_device(pdev);
@@ -15474,6 +15483,7 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
        }
 
 reset_exit:
+       clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
        bnxt_clear_reservations(bp, true);
        rtnl_unlock();
 
index 48b9ddb2b1b38b385527f137124ce86a36ac036d..ffb9f9f15c5232e2aeb4a45c1209b6a0763062d7 100644 (file)
@@ -16107,8 +16107,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        val = FIELD_GET(I40E_PRTGL_SAH_MFS_MASK,
                        rd32(&pf->hw, I40E_PRTGL_SAH));
        if (val < MAX_FRAME_SIZE_DEFAULT)
-               dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
-                        pf->hw.port, val);
+               dev_warn(&pdev->dev, "MFS for port %x (%d) has been set below the default (%d)\n",
+                        pf->hw.port, val, MAX_FRAME_SIZE_DEFAULT);
 
        /* Add a filter to drop all Flow control frames from any VSI from being
         * transmitted. By doing so we stop a malicious VF from sending out
@@ -16650,7 +16650,7 @@ static int __init i40e_init_module(void)
         * since we need to be able to guarantee forward progress even under
         * memory pressure.
         */
-       i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
+       i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
        if (!i40e_wq) {
                pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
                return -ENOMEM;
index ef2440f3abf8b6aed9f47e5948f67a5dec52b51e..166832a4213a289f17198d748f4abea1d5e4efc4 100644 (file)
@@ -3502,6 +3502,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
        spin_unlock_bh(&adapter->cloud_filter_list_lock);
 }
 
+/**
+ * iavf_is_tc_config_same - Compare the mqprio TC config with the
+ * TC config already configured on this adapter.
+ * @adapter: board private structure
+ * @mqprio_qopt: TC config received from kernel.
+ *
+ * This function compares the TC config received from the kernel
+ * with the config already configured on the adapter.
+ *
+ * Return: True if configuration is same, false otherwise.
+ **/
+static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
+                                  struct tc_mqprio_qopt *mqprio_qopt)
+{
+       struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
+       int i;
+
+       if (adapter->num_tc != mqprio_qopt->num_tc)
+               return false;
+
+       for (i = 0; i < adapter->num_tc; i++) {
+               if (ch[i].count != mqprio_qopt->count[i] ||
+                   ch[i].offset != mqprio_qopt->offset[i])
+                       return false;
+       }
+       return true;
+}
+
 /**
  * __iavf_setup_tc - configure multiple traffic classes
  * @netdev: network interface device structure
@@ -3559,7 +3587,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
                if (ret)
                        return ret;
                /* Return if same TC config is requested */
-               if (adapter->num_tc == num_tc)
+               if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
                        return 0;
                adapter->num_tc = num_tc;
 
index 21d26e19338a69acb265a279bfc633bbfea0cad2..d10a4be965b591027c357be2c5ede2ab3269a7da 100644 (file)
@@ -856,6 +856,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
                return 0;
        }
 
+       if (flags & ICE_VF_RESET_LOCK)
+               mutex_lock(&vf->cfg_lock);
+       else
+               lockdep_assert_held(&vf->cfg_lock);
+
        lag = pf->lag;
        mutex_lock(&pf->lag_mutex);
        if (lag && lag->bonded && lag->primary) {
@@ -867,11 +872,6 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
                        act_prt = ICE_LAG_INVALID_PORT;
        }
 
-       if (flags & ICE_VF_RESET_LOCK)
-               mutex_lock(&vf->cfg_lock);
-       else
-               lockdep_assert_held(&vf->cfg_lock);
-
        if (ice_is_vf_disabled(vf)) {
                vsi = ice_get_vf_vsi(vf);
                if (!vsi) {
@@ -956,14 +956,14 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
        ice_mbx_clear_malvf(&vf->mbx_info);
 
 out_unlock:
-       if (flags & ICE_VF_RESET_LOCK)
-               mutex_unlock(&vf->cfg_lock);
-
        if (lag && lag->bonded && lag->primary &&
            act_prt != ICE_LAG_INVALID_PORT)
                ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
        mutex_unlock(&pf->lag_mutex);
 
+       if (flags & ICE_VF_RESET_LOCK)
+               mutex_unlock(&vf->cfg_lock);
+
        return err;
 }
 
index 90316dc58630874d98aeb4095dc991c259a593f4..6bc56c7c181e4882d3ec99ea86e01c8188682175 100644 (file)
@@ -298,6 +298,7 @@ struct igc_adapter {
 
        /* LEDs */
        struct mutex led_mutex;
+       struct igc_led_classdev *leds;
 };
 
 void igc_up(struct igc_adapter *adapter);
@@ -723,6 +724,7 @@ void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
 void igc_ptp_tx_tstamp_event(struct igc_adapter *adapter);
 
 int igc_led_setup(struct igc_adapter *adapter);
+void igc_led_free(struct igc_adapter *adapter);
 
 #define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
 
index bf240c5daf8657b3f9dd89f58fda05f87e57e5ae..3929b25b6ae6eb55335a5a6eb7b6494c4cc21982 100644 (file)
@@ -236,8 +236,8 @@ static void igc_led_get_name(struct igc_adapter *adapter, int index, char *buf,
                 pci_dev_id(adapter->pdev), index);
 }
 
-static void igc_setup_ldev(struct igc_led_classdev *ldev,
-                          struct net_device *netdev, int index)
+static int igc_setup_ldev(struct igc_led_classdev *ldev,
+                         struct net_device *netdev, int index)
 {
        struct igc_adapter *adapter = netdev_priv(netdev);
        struct led_classdev *led_cdev = &ldev->led;
@@ -257,24 +257,46 @@ static void igc_setup_ldev(struct igc_led_classdev *ldev,
        led_cdev->hw_control_get = igc_led_hw_control_get;
        led_cdev->hw_control_get_device = igc_led_hw_control_get_device;
 
-       devm_led_classdev_register(&netdev->dev, led_cdev);
+       return led_classdev_register(&netdev->dev, led_cdev);
 }
 
 int igc_led_setup(struct igc_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
-       struct device *dev = &netdev->dev;
        struct igc_led_classdev *leds;
-       int i;
+       int i, err;
 
        mutex_init(&adapter->led_mutex);
 
-       leds = devm_kcalloc(dev, IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
+       leds = kcalloc(IGC_NUM_LEDS, sizeof(*leds), GFP_KERNEL);
        if (!leds)
                return -ENOMEM;
 
-       for (i = 0; i < IGC_NUM_LEDS; i++)
-               igc_setup_ldev(leds + i, netdev, i);
+       for (i = 0; i < IGC_NUM_LEDS; i++) {
+               err = igc_setup_ldev(leds + i, netdev, i);
+               if (err)
+                       goto err;
+       }
+
+       adapter->leds = leds;
 
        return 0;
+
+err:
+       for (i--; i >= 0; i--)
+               led_classdev_unregister(&((leds + i)->led));
+
+       kfree(leds);
+       return err;
+}
+
+void igc_led_free(struct igc_adapter *adapter)
+{
+       struct igc_led_classdev *leds = adapter->leds;
+       int i;
+
+       for (i = 0; i < IGC_NUM_LEDS; i++)
+               led_classdev_unregister(&((leds + i)->led));
+
+       kfree(leds);
 }
index 35ad40a803cb64a66b983b3a2103a8f15b808df5..4d975d620a8e4b925fe3798a792f2a365877c5a8 100644 (file)
@@ -7021,6 +7021,9 @@ static void igc_remove(struct pci_dev *pdev)
        cancel_work_sync(&adapter->watchdog_task);
        hrtimer_cancel(&adapter->hrtimer);
 
+       if (IS_ENABLED(CONFIG_IGC_LEDS))
+               igc_led_free(adapter);
+
        /* Release control of h/w to f/w.  If f/w is AMT enabled, this
         * would have already happened in close and is redundant.
         */
index be709f83f3318cd8766ca6a2e6b51b88fa8a30e3..e8b73b9d75e3118f56ee42a322d05491b0c325f0 100644 (file)
@@ -2181,7 +2181,6 @@ void rvu_npc_freemem(struct rvu *rvu)
 
        kfree(pkind->rsrc.bmap);
        npc_mcam_rsrcs_deinit(rvu);
-       kfree(mcam->counters.bmap);
        if (rvu->kpu_prfl_addr)
                iounmap(rvu->kpu_prfl_addr);
        else
index b2cabd6ab86cb9044f8d0dc404fa8a052d31938c..cc9bcc420032428eee5188e692991d50aaffc684 100644 (file)
@@ -1640,6 +1640,7 @@ static const struct macsec_ops macsec_offload_ops = {
        .mdo_add_secy = mlx5e_macsec_add_secy,
        .mdo_upd_secy = mlx5e_macsec_upd_secy,
        .mdo_del_secy = mlx5e_macsec_del_secy,
+       .rx_uses_md_dst = true,
 };
 
 bool mlx5e_macsec_handle_tx_skb(struct mlx5e_macsec *macsec, struct sk_buff *skb)
index e4d7739bd7c888fc774aeb82a5ece0ed894497d8..4a79c0d7e7ad8546aae5e51f7c23ce7ee8642674 100644 (file)
@@ -849,7 +849,7 @@ free_skb:
 
 static const struct mlxsw_listener mlxsw_emad_rx_listener =
        MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
-                 EMAD, DISCARD);
+                 EMAD, FORWARD);
 
 static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core)
 {
index 53b150b7ae4e708080cf152c1260983f4cbfc54e..6c06b0592760845f19311fdf4f91672f9209daef 100644 (file)
@@ -1357,24 +1357,20 @@ static struct mlxsw_linecards_event_ops mlxsw_env_event_ops = {
        .got_inactive = mlxsw_env_got_inactive,
 };
 
-static int mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
+static void mlxsw_env_max_module_eeprom_len_query(struct mlxsw_env *mlxsw_env)
 {
        char mcam_pl[MLXSW_REG_MCAM_LEN];
-       bool mcia_128b_supported;
+       bool mcia_128b_supported = false;
        int err;
 
        mlxsw_reg_mcam_pack(mcam_pl,
                            MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
        err = mlxsw_reg_query(mlxsw_env->core, MLXSW_REG(mcam), mcam_pl);
-       if (err)
-               return err;
-
-       mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
-                             &mcia_128b_supported);
+       if (!err)
+               mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_MCIA_128B,
+                                     &mcia_128b_supported);
 
        mlxsw_env->max_eeprom_len = mcia_128b_supported ? 128 : 48;
-
-       return 0;
 }
 
 int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
@@ -1445,15 +1441,11 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core,
        if (err)
                goto err_type_set;
 
-       err = mlxsw_env_max_module_eeprom_len_query(env);
-       if (err)
-               goto err_eeprom_len_query;
-
+       mlxsw_env_max_module_eeprom_len_query(env);
        env->line_cards[0]->active = true;
 
        return 0;
 
-err_eeprom_len_query:
 err_type_set:
        mlxsw_env_module_event_disable(env, 0);
 err_mlxsw_env_module_event_enable:
index af99bf17eb36de0793b008c063c2a93c6c5b22c8..f42a1b1c9368733d2623643ac6abdd8ffe9b9aff 100644 (file)
@@ -1530,7 +1530,7 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
 {
        struct pci_dev *pdev = mlxsw_pci->pdev;
        char mcam_pl[MLXSW_REG_MCAM_LEN];
-       bool pci_reset_supported;
+       bool pci_reset_supported = false;
        u32 sys_status;
        int err;
 
@@ -1548,11 +1548,9 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
        mlxsw_reg_mcam_pack(mcam_pl,
                            MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
        err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
-       if (err)
-               return err;
-
-       mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
-                             &pci_reset_supported);
+       if (!err)
+               mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
+                                     &pci_reset_supported);
 
        if (pci_reset_supported) {
                pci_dbg(pdev, "Starting PCI reset flow\n");
index f20052776b3f2e9a3ac7181921a12c3858abea4b..92a406f02eae746b4244ca56619fb75c1a2f9d93 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/netdevice.h>
 #include <linux/mutex.h>
 #include <linux/refcount.h>
+#include <linux/idr.h>
 #include <net/devlink.h>
 #include <trace/events/mlxsw.h>
 
@@ -58,41 +59,43 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
                                           u16 *p_id)
 {
-       u16 id;
+       int id;
 
-       id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
-       if (id < tcam->max_regions) {
-               __set_bit(id, tcam->used_regions);
-               *p_id = id;
-               return 0;
-       }
-       return -ENOBUFS;
+       id = ida_alloc_max(&tcam->used_regions, tcam->max_regions - 1,
+                          GFP_KERNEL);
+       if (id < 0)
+               return id;
+
+       *p_id = id;
+
+       return 0;
 }
 
 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
                                            u16 id)
 {
-       __clear_bit(id, tcam->used_regions);
+       ida_free(&tcam->used_regions, id);
 }
 
 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
                                          u16 *p_id)
 {
-       u16 id;
+       int id;
 
-       id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
-       if (id < tcam->max_groups) {
-               __set_bit(id, tcam->used_groups);
-               *p_id = id;
-               return 0;
-       }
-       return -ENOBUFS;
+       id = ida_alloc_max(&tcam->used_groups, tcam->max_groups - 1,
+                          GFP_KERNEL);
+       if (id < 0)
+               return id;
+
+       *p_id = id;
+
+       return 0;
 }
 
 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
                                           u16 id)
 {
-       __clear_bit(id, tcam->used_groups);
+       ida_free(&tcam->used_groups, id);
 }
 
 struct mlxsw_sp_acl_tcam_pattern {
@@ -715,7 +718,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
                             rehash.dw.work);
        int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
 
+       mutex_lock(&vregion->lock);
        mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
+       mutex_unlock(&vregion->lock);
        if (credits < 0)
                /* Rehash gone out of credits so it was interrupted.
                 * Schedule the work as soon as possible to continue.
@@ -725,6 +730,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
                mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
 }
 
+static void
+mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
+{
+       /* The entry markers are relative to the current chunk and therefore
+        * needs to be reset together with the chunk marker.
+        */
+       ctx->current_vchunk = NULL;
+       ctx->start_ventry = NULL;
+       ctx->stop_ventry = NULL;
+}
+
 static void
 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
 {
@@ -747,7 +763,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
         * the current chunk pointer to make sure all chunks
         * are properly migrated.
         */
-       vregion->rehash.ctx.current_vchunk = NULL;
+       mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
 }
 
 static struct mlxsw_sp_acl_tcam_vregion *
@@ -820,10 +836,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
 
        if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
+               struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
+
                mutex_lock(&tcam->lock);
                list_del(&vregion->tlist);
                mutex_unlock(&tcam->lock);
-               cancel_delayed_work_sync(&vregion->rehash.dw);
+               if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
+                   ctx->hints_priv)
+                       ops->region_rehash_hints_put(ctx->hints_priv);
        }
        mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
        if (vregion->region2)
@@ -1154,8 +1174,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
                                      struct mlxsw_sp_acl_tcam_ventry *ventry,
                                      bool *activity)
 {
-       return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
-                                                   ventry->entry, activity);
+       struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
+       int err;
+
+       mutex_lock(&vregion->lock);
+       err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
+                                                  activity);
+       mutex_unlock(&vregion->lock);
+       return err;
 }
 
 static int
@@ -1189,6 +1215,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
 {
        struct mlxsw_sp_acl_tcam_chunk *new_chunk;
 
+       WARN_ON(vchunk->chunk2);
+
        new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
        if (IS_ERR(new_chunk))
                return PTR_ERR(new_chunk);
@@ -1207,7 +1235,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
 {
        mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
        vchunk->chunk2 = NULL;
-       ctx->current_vchunk = NULL;
+       mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
 }
 
 static int
@@ -1230,6 +1258,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
                return 0;
        }
 
+       if (list_empty(&vchunk->ventry_list))
+               goto out;
+
        /* If the migration got interrupted, we have the ventry to start from
         * stored in context.
         */
@@ -1239,6 +1270,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
                ventry = list_first_entry(&vchunk->ventry_list,
                                          typeof(*ventry), list);
 
+       WARN_ON(ventry->vchunk != vchunk);
+
        list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
                /* During rollback, once we reach the ventry that failed
                 * to migrate, we are done.
@@ -1279,6 +1312,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
                }
        }
 
+out:
        mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
        return 0;
 }
@@ -1292,6 +1326,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
        struct mlxsw_sp_acl_tcam_vchunk *vchunk;
        int err;
 
+       if (list_empty(&vregion->vchunk_list))
+               return 0;
+
        /* If the migration got interrupted, we have the vchunk
         * we are working on stored in context.
         */
@@ -1320,16 +1357,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
        int err, err2;
 
        trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
-       mutex_lock(&vregion->lock);
        err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
                                                   ctx, credits);
        if (err) {
+               if (ctx->this_is_rollback)
+                       return err;
                /* In case migration was not successful, we need to swap
                 * so the original region pointer is assigned again
                 * to vregion->region.
                 */
                swap(vregion->region, vregion->region2);
-               ctx->current_vchunk = NULL;
+               mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
                ctx->this_is_rollback = true;
                err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
                                                            ctx, credits);
@@ -1340,7 +1378,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
                        /* Let the rollback to be continued later on. */
                }
        }
-       mutex_unlock(&vregion->lock);
        trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
        return err;
 }
@@ -1389,6 +1426,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
 
        ctx->hints_priv = hints_priv;
        ctx->this_is_rollback = false;
+       mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
 
        return 0;
 
@@ -1441,7 +1479,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
        err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
                                                ctx, credits);
        if (err) {
-               dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+               dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
+               return;
        }
 
        if (*credits >= 0)
@@ -1549,19 +1588,11 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
        if (max_tcam_regions < max_regions)
                max_regions = max_tcam_regions;
 
-       tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
-       if (!tcam->used_regions) {
-               err = -ENOMEM;
-               goto err_alloc_used_regions;
-       }
+       ida_init(&tcam->used_regions);
        tcam->max_regions = max_regions;
 
        max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
-       tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
-       if (!tcam->used_groups) {
-               err = -ENOMEM;
-               goto err_alloc_used_groups;
-       }
+       ida_init(&tcam->used_groups);
        tcam->max_groups = max_groups;
        tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
                                                  ACL_MAX_GROUP_SIZE);
@@ -1575,10 +1606,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
        return 0;
 
 err_tcam_init:
-       bitmap_free(tcam->used_groups);
-err_alloc_used_groups:
-       bitmap_free(tcam->used_regions);
-err_alloc_used_regions:
+       ida_destroy(&tcam->used_groups);
+       ida_destroy(&tcam->used_regions);
        mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
 err_rehash_params_register:
        mutex_destroy(&tcam->lock);
@@ -1591,8 +1620,8 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
        const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
 
        ops->fini(mlxsw_sp, tcam->priv);
-       bitmap_free(tcam->used_groups);
-       bitmap_free(tcam->used_regions);
+       ida_destroy(&tcam->used_groups);
+       ida_destroy(&tcam->used_regions);
        mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
        mutex_destroy(&tcam->lock);
 }
index 462bf448497d33b74618c1f78001c0924c666dd5..79a1d8606512531730c2da490e01f47ac0fc6399 100644 (file)
@@ -6,15 +6,16 @@
 
 #include <linux/list.h>
 #include <linux/parman.h>
+#include <linux/idr.h>
 
 #include "reg.h"
 #include "spectrum.h"
 #include "core_acl_flex_keys.h"
 
 struct mlxsw_sp_acl_tcam {
-       unsigned long *used_regions; /* bit array */
+       struct ida used_regions;
        unsigned int max_regions;
-       unsigned long *used_groups;  /* bit array */
+       struct ida used_groups;
        unsigned int max_groups;
        unsigned int max_group_size;
        struct mutex lock; /* guards vregion list */
index fcb756d77681cbaf2a17d3e21ddffa5543bc9a84..9b1f639f64a10cfe89255996bb43d68771f38e5e 100644 (file)
@@ -2722,19 +2722,18 @@ static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
        struct platform_device *pdev = priv->pdev;
        struct net_device *ndev = priv->ndev;
        struct device *dev = &pdev->dev;
-       const char *dev_name;
+       const char *devname = dev_name(dev);
        unsigned long flags;
        int error, irq_num;
 
        if (irq_name) {
-               dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
-               if (!dev_name)
+               devname = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", devname, ch);
+               if (!devname)
                        return -ENOMEM;
 
                irq_num = platform_get_irq_byname(pdev, irq_name);
                flags = 0;
        } else {
-               dev_name = ndev->name;
                irq_num = platform_get_irq(pdev, 0);
                flags = IRQF_SHARED;
        }
@@ -2744,9 +2743,9 @@ static int ravb_setup_irq(struct ravb_private *priv, const char *irq_name,
        if (irq)
                *irq = irq_num;
 
-       error = devm_request_irq(dev, irq_num, handler, flags, dev_name, ndev);
+       error = devm_request_irq(dev, irq_num, handler, flags, devname, ndev);
        if (error)
-               netdev_err(ndev, "cannot request IRQ %s\n", dev_name);
+               netdev_err(ndev, "cannot request IRQ %s\n", devname);
 
        return error;
 }
index c66618d91c28fe2bdf9886b3c476bdb014cd5fe5..f89716b1cfb640577d7ca009adfb99e9b9f00c54 100644 (file)
@@ -784,6 +784,11 @@ static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
                struct am65_cpts_skb_cb_data *skb_cb =
                                        (struct am65_cpts_skb_cb_data *)skb->cb;
 
+               if ((ptp_classify_raw(skb) & PTP_CLASS_V1) &&
+                   ((mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK) ==
+                    (skb_cb->skb_mtype_seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK)))
+                       mtype_seqid = skb_cb->skb_mtype_seqid;
+
                if (mtype_seqid == skb_cb->skb_mtype_seqid) {
                        u64 ns = event->timestamp;
 
index cf7b73f8f450728930587dc0646a0bbaa1d2b476..b69af69a1ccd3614ab0051b3c7675ab6a42a7872 100644 (file)
@@ -421,12 +421,14 @@ static int prueth_init_rx_chns(struct prueth_emac *emac,
                if (!i)
                        fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
                                                                     i);
-               rx_chn->irq[i] = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
-               if (rx_chn->irq[i] <= 0) {
-                       ret = rx_chn->irq[i];
+               ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+               if (ret <= 0) {
+                       if (!ret)
+                               ret = -ENXIO;
                        netdev_err(ndev, "Failed to get rx dma irq");
                        goto fail;
                }
+               rx_chn->irq[i] = ret;
        }
 
        return 0;
index 6dff2c85682d8bcdd97ca614447e205919d5decd..6fae161cbcb822614a8f36935559f364ee0a7dc8 100644 (file)
@@ -1598,7 +1598,7 @@ static void wx_set_num_queues(struct wx *wx)
  */
 static int wx_acquire_msix_vectors(struct wx *wx)
 {
-       struct irq_affinity affd = {0, };
+       struct irq_affinity affd = { .pre_vectors = 1 };
        int nvecs, i;
 
        /* We start by asking for one vector per queue pair */
index 2fa511227eac8490314c09821c7b6f1e1fdfed43..93295916b1d2b80751637dee4a0006688958c428 100644 (file)
@@ -20,8 +20,6 @@
 #include "txgbe_phy.h"
 #include "txgbe_hw.h"
 
-#define TXGBE_I2C_CLK_DEV_NAME "i2c_dw"
-
 static int txgbe_swnodes_register(struct txgbe *txgbe)
 {
        struct txgbe_nodes *nodes = &txgbe->nodes;
@@ -573,8 +571,8 @@ static int txgbe_clock_register(struct txgbe *txgbe)
        char clk_name[32];
        struct clk *clk;
 
-       snprintf(clk_name, sizeof(clk_name), "%s.%d",
-                TXGBE_I2C_CLK_DEV_NAME, pci_dev_id(pdev));
+       snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d",
+                pci_dev_id(pdev));
 
        clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000);
        if (IS_ERR(clk))
@@ -636,7 +634,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe)
 
        info.parent = &pdev->dev;
        info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]);
-       info.name = TXGBE_I2C_CLK_DEV_NAME;
+       info.name = "i2c_designware";
        info.id = pci_dev_id(pdev);
 
        info.res = &DEFINE_RES_IRQ(pdev->irq);
index ba4704c2c640b860c55655607f8dd5cc044cfa3e..e62d6cbdf9bc6458064c7c0852fc23db095ae642 100644 (file)
@@ -1098,11 +1098,12 @@ out_hashtable:
 static void gtp_dellink(struct net_device *dev, struct list_head *head)
 {
        struct gtp_dev *gtp = netdev_priv(dev);
+       struct hlist_node *next;
        struct pdp_ctx *pctx;
        int i;
 
        for (i = 0; i < gtp->hash_size; i++)
-               hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
+               hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
                        pdp_context_delete(pctx);
 
        list_del_rcu(&gtp->list);
index 0206b84284ab5e0c95981ff86fa4b2a2a14c5c51..ff016c11b4a0383b6653a37ef4bc6344fff3f703 100644 (file)
@@ -999,10 +999,12 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
        struct metadata_dst *md_dst;
        struct macsec_rxh_data *rxd;
        struct macsec_dev *macsec;
+       bool is_macsec_md_dst;
 
        rcu_read_lock();
        rxd = macsec_data_rcu(skb->dev);
        md_dst = skb_metadata_dst(skb);
+       is_macsec_md_dst = md_dst && md_dst->type == METADATA_MACSEC;
 
        list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
                struct sk_buff *nskb;
@@ -1013,14 +1015,42 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
                 * the SecTAG, so we have to deduce which port to deliver to.
                 */
                if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
-                       struct macsec_rx_sc *rx_sc = NULL;
+                       const struct macsec_ops *ops;
 
-                       if (md_dst && md_dst->type == METADATA_MACSEC)
-                               rx_sc = find_rx_sc(&macsec->secy, md_dst->u.macsec_info.sci);
+                       ops = macsec_get_ops(macsec, NULL);
 
-                       if (md_dst && md_dst->type == METADATA_MACSEC && !rx_sc)
+                       if (ops->rx_uses_md_dst && !is_macsec_md_dst)
                                continue;
 
+                       if (is_macsec_md_dst) {
+                               struct macsec_rx_sc *rx_sc;
+
+                               /* All drivers that implement MACsec offload
+                                * support using skb metadata destinations must
+                                * indicate that they do so.
+                                */
+                               DEBUG_NET_WARN_ON_ONCE(!ops->rx_uses_md_dst);
+                               rx_sc = find_rx_sc(&macsec->secy,
+                                                  md_dst->u.macsec_info.sci);
+                               if (!rx_sc)
+                                       continue;
+                               /* device indicated macsec offload occurred */
+                               skb->dev = ndev;
+                               skb->pkt_type = PACKET_HOST;
+                               eth_skb_pkt_type(skb, ndev);
+                               ret = RX_HANDLER_ANOTHER;
+                               goto out;
+                       }
+
+                       /* This datapath is insecure because it is unable to
+                        * enforce isolation of broadcast/multicast traffic and
+                        * unicast traffic with promiscuous mode on the macsec
+                        * netdev. Since the core stack has no mechanism to
+                        * check that the hardware did indeed receive MACsec
+                        * traffic, it is possible that the response handling
+                        * done by the MACsec port was to a plaintext packet.
+                        * This violates the MACsec protocol standard.
+                        */
                        if (ether_addr_equal_64bits(hdr->h_dest,
                                                    ndev->dev_addr)) {
                                /* exact match, divert skb to this port */
@@ -1036,14 +1066,10 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
                                        break;
 
                                nskb->dev = ndev;
-                               if (ether_addr_equal_64bits(hdr->h_dest,
-                                                           ndev->broadcast))
-                                       nskb->pkt_type = PACKET_BROADCAST;
-                               else
-                                       nskb->pkt_type = PACKET_MULTICAST;
+                               eth_skb_pkt_type(nskb, ndev);
 
                                __netif_rx(nskb);
-                       } else if (rx_sc || ndev->flags & IFF_PROMISC) {
+                       } else if (ndev->flags & IFF_PROMISC) {
                                skb->dev = ndev;
                                skb->pkt_type = PACKET_HOST;
                                ret = RX_HANDLER_ANOTHER;
index fa8c6fdcf30181067c7c3f164e997171908a70b8..d7aaefb5226b62ad5de3c3783c189b8f45b1a7c2 100644 (file)
@@ -695,7 +695,8 @@ static int dp83869_configure_mode(struct phy_device *phydev,
        phy_ctrl_val = dp83869->mode;
        if (phydev->interface == PHY_INTERFACE_MODE_MII) {
                if (dp83869->mode == DP83869_100M_MEDIA_CONVERT ||
-                   dp83869->mode == DP83869_RGMII_100_BASE) {
+                   dp83869->mode == DP83869_RGMII_100_BASE ||
+                   dp83869->mode == DP83869_RGMII_COPPER_ETHERNET) {
                        phy_ctrl_val |= DP83869_OP_MODE_MII;
                } else {
                        phydev_err(phydev, "selected op-mode is not valid with MII mode\n");
index 0f3a1538a8b8ee045953a3c5ff308dc824ea7c0a..f4f9412d0cd7e256f4b2e962dd18974e1120c0fb 100644 (file)
 #define   MTK_PHY_LED_ON_LINK1000              BIT(0)
 #define   MTK_PHY_LED_ON_LINK100               BIT(1)
 #define   MTK_PHY_LED_ON_LINK10                        BIT(2)
+#define   MTK_PHY_LED_ON_LINK                  (MTK_PHY_LED_ON_LINK10 |\
+                                                MTK_PHY_LED_ON_LINK100 |\
+                                                MTK_PHY_LED_ON_LINK1000)
 #define   MTK_PHY_LED_ON_LINKDOWN              BIT(3)
 #define   MTK_PHY_LED_ON_FDX                   BIT(4) /* Full duplex */
 #define   MTK_PHY_LED_ON_HDX                   BIT(5) /* Half duplex */
 #define   MTK_PHY_LED_BLINK_100RX              BIT(3)
 #define   MTK_PHY_LED_BLINK_10TX               BIT(4)
 #define   MTK_PHY_LED_BLINK_10RX               BIT(5)
+#define   MTK_PHY_LED_BLINK_RX                 (MTK_PHY_LED_BLINK_10RX |\
+                                                MTK_PHY_LED_BLINK_100RX |\
+                                                MTK_PHY_LED_BLINK_1000RX)
+#define   MTK_PHY_LED_BLINK_TX                 (MTK_PHY_LED_BLINK_10TX |\
+                                                MTK_PHY_LED_BLINK_100TX |\
+                                                MTK_PHY_LED_BLINK_1000TX)
 #define   MTK_PHY_LED_BLINK_COLLISION          BIT(6)
 #define   MTK_PHY_LED_BLINK_RX_CRC_ERR         BIT(7)
 #define   MTK_PHY_LED_BLINK_RX_IDLE_ERR                BIT(8)
@@ -1247,11 +1256,9 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
        if (blink < 0)
                return -EIO;
 
-       if ((on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 |
-                  MTK_PHY_LED_ON_LINK10)) ||
-           (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX |
-                     MTK_PHY_LED_BLINK_10RX | MTK_PHY_LED_BLINK_1000TX |
-                     MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX)))
+       if ((on & (MTK_PHY_LED_ON_LINK | MTK_PHY_LED_ON_FDX | MTK_PHY_LED_ON_HDX |
+                  MTK_PHY_LED_ON_LINKDOWN)) ||
+           (blink & (MTK_PHY_LED_BLINK_RX | MTK_PHY_LED_BLINK_TX)))
                set_bit(bit_netdev, &priv->led_state);
        else
                clear_bit(bit_netdev, &priv->led_state);
@@ -1269,7 +1276,7 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
        if (!rules)
                return 0;
 
-       if (on & (MTK_PHY_LED_ON_LINK1000 | MTK_PHY_LED_ON_LINK100 | MTK_PHY_LED_ON_LINK10))
+       if (on & MTK_PHY_LED_ON_LINK)
                *rules |= BIT(TRIGGER_NETDEV_LINK);
 
        if (on & MTK_PHY_LED_ON_LINK10)
@@ -1287,10 +1294,10 @@ static int mt798x_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
        if (on & MTK_PHY_LED_ON_HDX)
                *rules |= BIT(TRIGGER_NETDEV_HALF_DUPLEX);
 
-       if (blink & (MTK_PHY_LED_BLINK_1000RX | MTK_PHY_LED_BLINK_100RX | MTK_PHY_LED_BLINK_10RX))
+       if (blink & MTK_PHY_LED_BLINK_RX)
                *rules |= BIT(TRIGGER_NETDEV_RX);
 
-       if (blink & (MTK_PHY_LED_BLINK_1000TX | MTK_PHY_LED_BLINK_100TX | MTK_PHY_LED_BLINK_10TX))
+       if (blink & MTK_PHY_LED_BLINK_TX)
                *rules |= BIT(TRIGGER_NETDEV_TX);
 
        return 0;
@@ -1323,15 +1330,19 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
                on |= MTK_PHY_LED_ON_LINK1000;
 
        if (rules & BIT(TRIGGER_NETDEV_RX)) {
-               blink |= MTK_PHY_LED_BLINK_10RX  |
-                        MTK_PHY_LED_BLINK_100RX |
-                        MTK_PHY_LED_BLINK_1000RX;
+               blink |= (on & MTK_PHY_LED_ON_LINK) ?
+                         (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10RX : 0) |
+                          ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100RX : 0) |
+                          ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000RX : 0)) :
+                         MTK_PHY_LED_BLINK_RX;
        }
 
        if (rules & BIT(TRIGGER_NETDEV_TX)) {
-               blink |= MTK_PHY_LED_BLINK_10TX  |
-                        MTK_PHY_LED_BLINK_100TX |
-                        MTK_PHY_LED_BLINK_1000TX;
+               blink |= (on & MTK_PHY_LED_ON_LINK) ?
+                         (((on & MTK_PHY_LED_ON_LINK10) ? MTK_PHY_LED_BLINK_10TX : 0) |
+                          ((on & MTK_PHY_LED_ON_LINK100) ? MTK_PHY_LED_BLINK_100TX : 0) |
+                          ((on & MTK_PHY_LED_ON_LINK1000) ? MTK_PHY_LED_BLINK_1000TX : 0)) :
+                         MTK_PHY_LED_BLINK_TX;
        }
 
        if (blink || on)
@@ -1344,9 +1355,7 @@ static int mt798x_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
                                MTK_PHY_LED0_ON_CTRL,
                             MTK_PHY_LED_ON_FDX     |
                             MTK_PHY_LED_ON_HDX     |
-                            MTK_PHY_LED_ON_LINK10  |
-                            MTK_PHY_LED_ON_LINK100 |
-                            MTK_PHY_LED_ON_LINK1000,
+                            MTK_PHY_LED_ON_LINK,
                             on);
 
        if (ret)
index 752f821a19901f313a1aca51fe332539ce82385b..df9d767cb524241848c744504d6e2999efc42ed5 100644 (file)
@@ -1456,21 +1456,16 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                        /* Skip IP alignment pseudo header */
                        skb_pull(skb, 2);
 
-                       skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
                        ax88179_rx_checksum(skb, pkt_hdr);
                        return 1;
                }
 
-               ax_skb = skb_clone(skb, GFP_ATOMIC);
+               ax_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
                if (!ax_skb)
                        return 0;
-               skb_trim(ax_skb, pkt_len);
+               skb_put(ax_skb, pkt_len);
+               memcpy(ax_skb->data, skb->data + 2, pkt_len);
 
-               /* Skip IP alignment pseudo header */
-               skb_pull(ax_skb, 2);
-
-               skb->truesize = pkt_len_plus_padd +
-                               SKB_DATA_ALIGN(sizeof(struct sk_buff));
                ax88179_rx_checksum(ax_skb, pkt_hdr);
                usbnet_skb_return(dev, ax_skb);
 
index edc34402e787f9ff84a345ecb892cc7b720ef312..a5469cf5cf6706de2c3ded24f675f504c794227c 100644 (file)
@@ -1368,6 +1368,9 @@ static const struct usb_device_id products[] = {
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
        {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},    /* Telit ME910 */
        {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},    /* Telit ME910 dual modem */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
index 3495591a5c29b2aa929fcf307c2970c661ec2ea6..ba319fc219571975597bf7aad3d913e77dac6898 100644 (file)
@@ -1615,6 +1615,10 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
        if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
                return false;
 
+       /* Ignore packets from invalid src-address */
+       if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
+               return false;
+
        /* Get address from the outer IP header */
        if (vxlan_get_sk_family(vs) == AF_INET) {
                saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
index a6a37d67a50ad552b4185af654289d635c2cfd70..9f4bf41a3d41e4e1395aa77fb6874130e5783d96 100644 (file)
@@ -9020,6 +9020,7 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
        offload = &arvif->arp_ns_offload;
        count = 0;
 
+       /* Note: read_lock_bh() calls rcu_read_lock() */
        read_lock_bh(&idev->lock);
 
        memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr));
@@ -9050,7 +9051,8 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
        }
 
        /* get anycast address */
-       for (ifaca6 = idev->ac_list; ifaca6; ifaca6 = ifaca6->aca_next) {
+       for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6;
+            ifaca6 = rcu_dereference(ifaca6->aca_next)) {
                if (count >= ATH11K_IPV6_MAX_COUNT)
                        goto generate;
 
index 072b0a5827d19f801d9f0da38933aef21044b7fc..eca1457caa0cad9eb13edce501beba6d5540fe57 100644 (file)
@@ -10,7 +10,7 @@
 #include "fw/api/txq.h"
 
 /* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX   90
+#define IWL_BZ_UCODE_API_MAX   89
 
 /* Lowest firmware API version supported */
 #define IWL_BZ_UCODE_API_MIN   80
index 9b79279fd76cad94170d28614b7eee0729ac02f8..dbbcb2d0968c0992d15f652409cf651dc625428c 100644 (file)
@@ -10,7 +10,7 @@
 #include "fw/api/txq.h"
 
 /* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX   90
+#define IWL_SC_UCODE_API_MAX   89
 
 /* Lowest firmware API version supported */
 #define IWL_SC_UCODE_API_MIN   82
index 4863a3c746406ed70a398d8939e080e72636fa0b..d84d7e955bb021fc1994d1b5fa9e21183e01027a 100644 (file)
@@ -53,6 +53,8 @@ int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        if (!pasn)
                return -ENOBUFS;
 
+       iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
+
        pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
 
        switch (pasn->cipher) {
index 9f69e04594e49cb59f3102071c905bd75b93f825..fe5bba8561d0c69abeef30221c5cc040f64c7b96 100644 (file)
@@ -279,6 +279,7 @@ int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
                         NULL);
+       iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
        return 0;
 }
 
@@ -296,7 +297,6 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                return 0;
 
        cmd.link_id = cpu_to_le32(link_info->fw_link_id);
-       iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
        link_info->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
        cmd.spec_link_id = link_conf->link_id;
        cmd.phy_id = cpu_to_le32(FW_CTXT_INVALID);
index f3e3986b4c72f2aabfd9ff67edf8c11a1bebc03c..11559563ae38162e8c138dddc79e0cc89f3b349d 100644 (file)
@@ -2813,7 +2813,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
                if (ver_handler->version != scan_ver)
                        continue;
 
-               return ver_handler->handler(mvm, vif, params, type, uid);
+               err = ver_handler->handler(mvm, vif, params, type, uid);
+               return err ? : uid;
        }
 
        err = iwl_mvm_scan_umac(mvm, vif, params, type, uid);
index b55fe320633c74b42618d37be6bc40a951f0c898..59e1fc0018df3f32d60e3e52341350c10a44d0f0 100644 (file)
@@ -3899,7 +3899,7 @@ static int hwsim_pmsr_report_nl(struct sk_buff *msg, struct genl_info *info)
        }
 
        nla_for_each_nested(peer, peers, rem) {
-               struct cfg80211_pmsr_result result;
+               struct cfg80211_pmsr_result result = {};
 
                err = mac80211_hwsim_parse_pmsr_result(peer, &result, info);
                if (err)
index 7eb17f46a8153d88705603e421f350b0fd091c2b..9e1a34e23af26e0a2bd7e18b4e3fe5f6726f560d 100644 (file)
@@ -424,7 +424,8 @@ struct trf7970a {
        enum trf7970a_state             state;
        struct device                   *dev;
        struct spi_device               *spi;
-       struct regulator                *regulator;
+       struct regulator                *vin_regulator;
+       struct regulator                *vddio_regulator;
        struct nfc_digital_dev          *ddev;
        u32                             quirks;
        bool                            is_initiator;
@@ -1883,7 +1884,7 @@ static int trf7970a_power_up(struct trf7970a *trf)
        if (trf->state != TRF7970A_ST_PWR_OFF)
                return 0;
 
-       ret = regulator_enable(trf->regulator);
+       ret = regulator_enable(trf->vin_regulator);
        if (ret) {
                dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret);
                return ret;
@@ -1926,7 +1927,7 @@ static int trf7970a_power_down(struct trf7970a *trf)
        if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW))
                gpiod_set_value_cansleep(trf->en2_gpiod, 0);
 
-       ret = regulator_disable(trf->regulator);
+       ret = regulator_disable(trf->vin_regulator);
        if (ret)
                dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__,
                        ret);
@@ -2065,37 +2066,37 @@ static int trf7970a_probe(struct spi_device *spi)
        mutex_init(&trf->lock);
        INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
 
-       trf->regulator = devm_regulator_get(&spi->dev, "vin");
-       if (IS_ERR(trf->regulator)) {
-               ret = PTR_ERR(trf->regulator);
+       trf->vin_regulator = devm_regulator_get(&spi->dev, "vin");
+       if (IS_ERR(trf->vin_regulator)) {
+               ret = PTR_ERR(trf->vin_regulator);
                dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
                goto err_destroy_lock;
        }
 
-       ret = regulator_enable(trf->regulator);
+       ret = regulator_enable(trf->vin_regulator);
        if (ret) {
                dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
                goto err_destroy_lock;
        }
 
-       uvolts = regulator_get_voltage(trf->regulator);
+       uvolts = regulator_get_voltage(trf->vin_regulator);
        if (uvolts > 4000000)
                trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
 
-       trf->regulator = devm_regulator_get(&spi->dev, "vdd-io");
-       if (IS_ERR(trf->regulator)) {
-               ret = PTR_ERR(trf->regulator);
+       trf->vddio_regulator = devm_regulator_get(&spi->dev, "vdd-io");
+       if (IS_ERR(trf->vddio_regulator)) {
+               ret = PTR_ERR(trf->vddio_regulator);
                dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret);
-               goto err_destroy_lock;
+               goto err_disable_vin_regulator;
        }
 
-       ret = regulator_enable(trf->regulator);
+       ret = regulator_enable(trf->vddio_regulator);
        if (ret) {
                dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret);
-               goto err_destroy_lock;
+               goto err_disable_vin_regulator;
        }
 
-       if (regulator_get_voltage(trf->regulator) == 1800000) {
+       if (regulator_get_voltage(trf->vddio_regulator) == 1800000) {
                trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW;
                dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n");
        }
@@ -2108,7 +2109,7 @@ static int trf7970a_probe(struct spi_device *spi)
        if (!trf->ddev) {
                dev_err(trf->dev, "Can't allocate NFC digital device\n");
                ret = -ENOMEM;
-               goto err_disable_regulator;
+               goto err_disable_vddio_regulator;
        }
 
        nfc_digital_set_parent_dev(trf->ddev, trf->dev);
@@ -2137,8 +2138,10 @@ err_shutdown:
        trf7970a_shutdown(trf);
 err_free_ddev:
        nfc_digital_free_device(trf->ddev);
-err_disable_regulator:
-       regulator_disable(trf->regulator);
+err_disable_vddio_regulator:
+       regulator_disable(trf->vddio_regulator);
+err_disable_vin_regulator:
+       regulator_disable(trf->vin_regulator);
 err_destroy_lock:
        mutex_destroy(&trf->lock);
        return ret;
@@ -2157,7 +2160,8 @@ static void trf7970a_remove(struct spi_device *spi)
        nfc_digital_unregister_device(trf->ddev);
        nfc_digital_free_device(trf->ddev);
 
-       regulator_disable(trf->regulator);
+       regulator_disable(trf->vddio_regulator);
+       regulator_disable(trf->vin_regulator);
 
        mutex_destroy(&trf->lock);
 }
index b246067e074bc0718ad49ad582b3ad9e382c9af0..6cb96a1e8b7df453c00ac0eaf9a568908f743c9f 100644 (file)
@@ -967,7 +967,7 @@ vdpa_dev_blk_seg_size_config_fill(struct sk_buff *msg, u64 features,
 
        val_u32 = __virtio32_to_cpu(true, config->size_max);
 
-       return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SEG_SIZE, val_u32);
+       return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SIZE_MAX, val_u32);
 }
 
 /* fill the block size*/
@@ -1089,7 +1089,7 @@ static int vdpa_dev_blk_ro_config_fill(struct sk_buff *msg, u64 features)
        u8 ro;
 
        ro = ((features & BIT_ULL(VIRTIO_BLK_F_RO)) == 0) ? 0 : 1;
-       if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_READ_ONLY, ro))
+       if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_READ_ONLY, ro))
                return -EMSGSIZE;
 
        return 0;
@@ -1100,7 +1100,7 @@ static int vdpa_dev_blk_flush_config_fill(struct sk_buff *msg, u64 features)
        u8 flush;
 
        flush = ((features & BIT_ULL(VIRTIO_BLK_F_FLUSH)) == 0) ? 0 : 1;
-       if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_FLUSH, flush))
+       if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_FLUSH, flush))
                return -EMSGSIZE;
 
        return 0;
index dae96c9f61cf8766cfe1e781f83c81e962691673..806ecd32219b691327d9620e41316404624a73ae 100644 (file)
@@ -196,7 +196,7 @@ err_mutex_unlock:
  */
 static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
 {
-       unsigned long offset = vmf->address - vmf->vma->vm_start;
+       unsigned long offset = vmf->pgoff << PAGE_SHIFT;
        struct page *page = vmf->page;
 
        file_update_time(vmf->vma->vm_file);
index 9defa12208f98a715e5b894119f044fca50b3dc5..1775fcc7f0e8efa8b22f4c14e6886d85a22faeb5 100644 (file)
@@ -179,13 +179,14 @@ extern int v9fs_vfs_rename(struct mnt_idmap *idmap,
                           struct inode *old_dir, struct dentry *old_dentry,
                           struct inode *new_dir, struct dentry *new_dentry,
                           unsigned int flags);
-extern struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid);
+extern struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid,
+                                               bool new);
 extern const struct inode_operations v9fs_dir_inode_operations_dotl;
 extern const struct inode_operations v9fs_file_inode_operations_dotl;
 extern const struct inode_operations v9fs_symlink_inode_operations_dotl;
 extern const struct netfs_request_ops v9fs_req_ops;
 extern struct inode *v9fs_fid_iget_dotl(struct super_block *sb,
-                                       struct p9_fid *fid);
+                                               struct p9_fid *fid, bool new);
 
 /* other default globals */
 #define V9FS_PORT      564
@@ -224,12 +225,12 @@ static inline int v9fs_proto_dotl(struct v9fs_session_info *v9ses)
  */
 static inline struct inode *
 v9fs_get_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid,
-                       struct super_block *sb)
+                       struct super_block *sb, bool new)
 {
        if (v9fs_proto_dotl(v9ses))
-               return v9fs_fid_iget_dotl(sb, fid);
+               return v9fs_fid_iget_dotl(sb, fid, new);
        else
-               return v9fs_fid_iget(sb, fid);
+               return v9fs_fid_iget(sb, fid, new);
 }
 
 #endif
index 47bd77199e20c83a3c7554d5dc592697080b7885..7a3308d776060e2e2565af09d358f2cf33416b6b 100644 (file)
@@ -364,7 +364,8 @@ void v9fs_evict_inode(struct inode *inode)
                clear_inode(inode);
 }
 
-struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid)
+struct inode *
+v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid, bool new)
 {
        dev_t rdev;
        int retval;
@@ -376,8 +377,18 @@ struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid)
        inode = iget_locked(sb, QID2INO(&fid->qid));
        if (unlikely(!inode))
                return ERR_PTR(-ENOMEM);
-       if (!(inode->i_state & I_NEW))
-               return inode;
+       if (!(inode->i_state & I_NEW)) {
+               if (!new) {
+                       goto done;
+               } else {
+                       p9_debug(P9_DEBUG_VFS, "WARNING: Inode collision %ld\n",
+                                               inode->i_ino);
+                       iput(inode);
+                       remove_inode_hash(inode);
+                       inode = iget_locked(sb, QID2INO(&fid->qid));
+                       WARN_ON(!(inode->i_state & I_NEW));
+               }
+       }
 
        /*
         * initialize the inode with the stat info
@@ -401,11 +412,11 @@ struct inode *v9fs_fid_iget(struct super_block *sb, struct p9_fid *fid)
        v9fs_set_netfs_context(inode);
        v9fs_cache_inode_get_cookie(inode);
        unlock_new_inode(inode);
+done:
        return inode;
 error:
        iget_failed(inode);
        return ERR_PTR(retval);
-
 }
 
 /**
@@ -437,8 +448,15 @@ static int v9fs_at_to_dotl_flags(int flags)
  */
 static void v9fs_dec_count(struct inode *inode)
 {
-       if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2)
-               drop_nlink(inode);
+       if (!S_ISDIR(inode->i_mode) || inode->i_nlink > 2) {
+               if (inode->i_nlink) {
+                       drop_nlink(inode);
+               } else {
+                       p9_debug(P9_DEBUG_VFS,
+                                               "WARNING: unexpected i_nlink zero %d inode %ld\n",
+                                               inode->i_nlink, inode->i_ino);
+               }
+       }
 }
 
 /**
@@ -489,6 +507,9 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
                } else
                        v9fs_dec_count(inode);
 
+               if (inode->i_nlink <= 0)        /* no more refs unhash it */
+                       remove_inode_hash(inode);
+
                v9fs_invalidate_inode_attr(inode);
                v9fs_invalidate_inode_attr(dir);
 
@@ -554,7 +575,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
                /*
                 * instantiate inode and assign the unopened fid to the dentry
                 */
-               inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+               inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb, true);
                if (IS_ERR(inode)) {
                        err = PTR_ERR(inode);
                        p9_debug(P9_DEBUG_VFS,
@@ -683,7 +704,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
        else if (IS_ERR(fid))
                inode = ERR_CAST(fid);
        else
-               inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb);
+               inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb, false);
        /*
         * If we had a rename on the server and a parallel lookup
         * for the new name, then make sure we instantiate with
index 55dde186041a38d98c997e2ea728e2004c2a9606..c61b97bd13b9a7875b14f9af6e109b85143c79da 100644 (file)
@@ -52,7 +52,10 @@ static kgid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
        return current_fsgid();
 }
 
-struct inode *v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid)
+
+
+struct inode *
+v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid, bool new)
 {
        int retval;
        struct inode *inode;
@@ -62,8 +65,18 @@ struct inode *v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid)
        inode = iget_locked(sb, QID2INO(&fid->qid));
        if (unlikely(!inode))
                return ERR_PTR(-ENOMEM);
-       if (!(inode->i_state & I_NEW))
-               return inode;
+       if (!(inode->i_state & I_NEW)) {
+               if (!new) {
+                       goto done;
+               } else { /* deal with race condition in inode number reuse */
+                       p9_debug(P9_DEBUG_ERROR, "WARNING: Inode collision %lx\n",
+                                               inode->i_ino);
+                       iput(inode);
+                       remove_inode_hash(inode);
+                       inode = iget_locked(sb, QID2INO(&fid->qid));
+                       WARN_ON(!(inode->i_state & I_NEW));
+               }
+       }
 
        /*
         * initialize the inode with the stat info
@@ -90,12 +103,11 @@ struct inode *v9fs_fid_iget_dotl(struct super_block *sb, struct p9_fid *fid)
                goto error;
 
        unlock_new_inode(inode);
-
+done:
        return inode;
 error:
        iget_failed(inode);
        return ERR_PTR(retval);
-
 }
 
 struct dotl_openflag_map {
@@ -247,7 +259,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
                p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
                goto out;
        }
-       inode = v9fs_fid_iget_dotl(dir->i_sb, fid);
+       inode = v9fs_fid_iget_dotl(dir->i_sb, fid, true);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err);
@@ -340,7 +352,7 @@ static int v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
        }
 
        /* instantiate inode and assign the unopened fid to the dentry */
-       inode = v9fs_fid_iget_dotl(dir->i_sb, fid);
+       inode = v9fs_fid_iget_dotl(dir->i_sb, fid, true);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
@@ -776,7 +788,7 @@ v9fs_vfs_mknod_dotl(struct mnt_idmap *idmap, struct inode *dir,
                         err);
                goto error;
        }
-       inode = v9fs_fid_iget_dotl(dir->i_sb, fid);
+       inode = v9fs_fid_iget_dotl(dir->i_sb, fid, true);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n",
index 55e67e36ae682bcf33a66c2f5e7d936e975e8f6f..f52fdf42945cf15d21fe55f01da21967714ed07c 100644 (file)
@@ -139,7 +139,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
        else
                sb->s_d_op = &v9fs_dentry_operations;
 
-       inode = v9fs_get_inode_from_fid(v9ses, fid, sb);
+       inode = v9fs_get_inode_from_fid(v9ses, fid, sb, true);
        if (IS_ERR(inode)) {
                retval = PTR_ERR(inode);
                goto release_sb;
index fadb1078903d291ce6ce3c7928d79c71c1eb18f8..a200442010025a0d8ee7b421d12edb32ff5c5a01 100644 (file)
@@ -470,7 +470,7 @@ found:
                goto err;
        }
 
-       bio = bio_alloc(ca->disk_sb.bdev, 1, REQ_OP_READ, GFP_KERNEL);
+       bio = bio_alloc(ca->disk_sb.bdev, buf_pages(data_buf, bytes), REQ_OP_READ, GFP_KERNEL);
        bio->bi_iter.bi_sector = p.ptr.offset;
        bch2_bio_map(bio, data_buf, bytes);
        ret = submit_bio_wait(bio);
index 085987435a5ea3cfc7354db7ee5392ce62241f05..f7fbfccd2b1e4d7e6bafabd839a6917de906edf9 100644 (file)
@@ -1504,7 +1504,8 @@ enum btree_id_flags {
          BIT_ULL(KEY_TYPE_stripe))                                             \
        x(reflink,              7,      BTREE_ID_EXTENTS|BTREE_ID_DATA,         \
          BIT_ULL(KEY_TYPE_reflink_v)|                                          \
-         BIT_ULL(KEY_TYPE_indirect_inline_data))                               \
+         BIT_ULL(KEY_TYPE_indirect_inline_data)|                               \
+         BIT_ULL(KEY_TYPE_error))                                              \
        x(subvolumes,           8,      0,                                      \
          BIT_ULL(KEY_TYPE_subvolume))                                          \
        x(snapshots,            9,      0,                                      \
index ecbd9598f69fd00e86efbe7537a134d6d4c4db06..791470b0c654553e2fbb9216fb60df1eb8c5fec2 100644 (file)
@@ -1587,7 +1587,7 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
                struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
                ret = PTR_ERR_OR_ZERO(new);
                if (ret)
-                       return ret;
+                       goto out;
 
                if (!r->refcount)
                        new->k.type = KEY_TYPE_deleted;
@@ -1595,6 +1595,7 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
                        *bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount);
                ret = bch2_trans_update(trans, iter, new, 0);
        }
+out:
 fsck_err:
        printbuf_exit(&buf);
        return ret;
index 9678b2375bedde868e7a168435c9a17fc74eb26a..debb0edc3455afa661c0104e6e29fe232bcd18b8 100644 (file)
@@ -888,7 +888,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
                                 -BCH_ERR_btree_node_read_err_fixable,
                                 c, NULL, b, i,
                                 btree_node_bkey_bad_u64s,
-                                "bad k->u64s %u (min %u max %lu)", k->u64s,
+                                "bad k->u64s %u (min %u max %zu)", k->u64s,
                                 bkeyp_key_u64s(&b->format, k),
                                 U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k)))
                        goto drop_this_key;
index 88a3582a32757e34a28eb37143f9ff78a88a4085..e8c1c530cd95f5bb1c34cb39f848cd842b0a88c6 100644 (file)
@@ -842,8 +842,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
         * Newest freed entries are at the end of the list - once we hit one
         * that's too new to be freed, we can bail out:
         */
-       scanned += bc->nr_freed_nonpcpu;
-
        list_for_each_entry_safe(ck, t, &bc->freed_nonpcpu, list) {
                if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
                                                 ck->btree_trans_barrier_seq))
@@ -857,11 +855,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
                bc->nr_freed_nonpcpu--;
        }
 
-       if (scanned >= nr)
-               goto out;
-
-       scanned += bc->nr_freed_pcpu;
-
        list_for_each_entry_safe(ck, t, &bc->freed_pcpu, list) {
                if (!poll_state_synchronize_srcu(&c->btree_trans_barrier,
                                                 ck->btree_trans_barrier_seq))
@@ -875,9 +868,6 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
                bc->nr_freed_pcpu--;
        }
 
-       if (scanned >= nr)
-               goto out;
-
        rcu_read_lock();
        tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
        if (bc->shrink_iter >= tbl->size)
@@ -893,12 +883,12 @@ static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
                        next = rht_dereference_bucket_rcu(pos->next, tbl, bc->shrink_iter);
                        ck = container_of(pos, struct bkey_cached, hash);
 
-                       if (test_bit(BKEY_CACHED_DIRTY, &ck->flags))
+                       if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
                                goto next;
-
-                       if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
+                       } else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) {
                                clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
-                       else if (bkey_cached_lock_for_evict(ck)) {
+                               goto next;
+                       } else if (bkey_cached_lock_for_evict(ck)) {
                                bkey_cached_evict(bc, ck);
                                bkey_cached_free(bc, ck);
                        }
@@ -916,7 +906,6 @@ next:
        } while (scanned < nr && bc->shrink_iter != start);
 
        rcu_read_unlock();
-out:
        memalloc_nofs_restore(flags);
        srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
        mutex_unlock(&bc->lock);
index 866bd278439f8bb72a0b1e31e672953ff9b3f839..c60794264da2898b4d8d64eb2cf6f94ac90afb1b 100644 (file)
@@ -302,6 +302,8 @@ again:
 
                        start->max_key = bpos_predecessor(n->min_key);
                        start->range_updated = true;
+               } else if (n->level) {
+                       n->overwritten = true;
                } else {
                        struct printbuf buf = PRINTBUF;
 
index e0c982a4195c764ab8a415b5f7f80cbff88c1935..c69b233c41bb3d07a02ca296c03429360ff2294e 100644 (file)
@@ -321,9 +321,9 @@ struct bkey_cached {
        struct btree_bkey_cached_common c;
 
        unsigned long           flags;
+       unsigned long           btree_trans_barrier_seq;
        u16                     u64s;
        bool                    valid;
-       u32                     btree_trans_barrier_seq;
        struct bkey_cached_key  key;
 
        struct rhash_head       hash;
index 6030c396754f6f494c3c137abd313f6bf80c2ffb..b4efd8cc4d1a2bfc62a9742d6f4df3e9a0f0aac5 100644 (file)
@@ -1960,7 +1960,11 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
        if ((flags & BCH_WATERMARK_MASK) == BCH_WATERMARK_interior_updates)
                return 0;
 
-       flags &= ~BCH_WATERMARK_MASK;
+       if ((flags & BCH_WATERMARK_MASK) <= BCH_WATERMARK_reclaim) {
+               flags &= ~BCH_WATERMARK_MASK;
+               flags |= BCH_WATERMARK_btree;
+               flags |= BCH_TRANS_COMMIT_journal_reclaim;
+       }
 
        b = trans->paths[path].l[level].b;
 
index 72781aad6ba70ccc774b688c6a9d50b2dc21f133..4d14f19f51850e9d024ee69bd1f68d5a3743a2b0 100644 (file)
@@ -232,13 +232,15 @@ static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_a
        /* We need request_key() to be called before we punt to kthread: */
        opt_set(thr->opts, nostart, true);
 
+       bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops);
+
        thr->c = bch2_fs_open(devs.data, arg.nr_devs, thr->opts);
 
        if (!IS_ERR(thr->c) &&
            thr->c->opts.errors == BCH_ON_ERROR_panic)
                thr->c->opts.errors = BCH_ON_ERROR_ro;
 
-       ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_offline_fsck_ops);
+       ret = __bch2_run_thread_with_stdio(&thr->thr);
 out:
        darray_for_each(devs, i)
                kfree(*i);
index b5ea9fa1259d1462e9033318466fc914911672ce..fce690007edfce089f054d81cc862c04c132c2b3 100644 (file)
@@ -188,7 +188,8 @@ static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_ino
        BUG_ON(!old);
 
        if (unlikely(old != inode)) {
-               discard_new_inode(&inode->v);
+               __destroy_inode(&inode->v);
+               kmem_cache_free(bch2_inode_cache, inode);
                inode = old;
        } else {
                mutex_lock(&c->vfs_inodes_lock);
@@ -225,8 +226,10 @@ static struct bch_inode_info *bch2_new_inode(struct btree_trans *trans)
 
        if (unlikely(!inode)) {
                int ret = drop_locks_do(trans, (inode = to_bch_ei(new_inode(c->vfs_sb))) ? 0 : -ENOMEM);
-               if (ret && inode)
-                       discard_new_inode(&inode->v);
+               if (ret && inode) {
+                       __destroy_inode(&inode->v);
+                       kmem_cache_free(bch2_inode_cache, inode);
+               }
                if (ret)
                        return ERR_PTR(ret);
        }
index 9aa28b52ab926c567f49e0bb68b9c6791fb326e5..eb1f9d6f5a196e55aebf5b74f485bfa634169da5 100644 (file)
@@ -1723,7 +1723,7 @@ static void journal_write_endio(struct bio *bio)
        percpu_ref_put(&ca->io_ref);
 }
 
-static CLOSURE_CALLBACK(do_journal_write)
+static CLOSURE_CALLBACK(journal_write_submit)
 {
        closure_type(w, struct journal_buf, io);
        struct journal *j = container_of(w, struct journal, buf[w->idx]);
@@ -1768,6 +1768,44 @@ static CLOSURE_CALLBACK(do_journal_write)
        continue_at(cl, journal_write_done, j->wq);
 }
 
+static CLOSURE_CALLBACK(journal_write_preflush)
+{
+       closure_type(w, struct journal_buf, io);
+       struct journal *j = container_of(w, struct journal, buf[w->idx]);
+       struct bch_fs *c = container_of(j, struct bch_fs, journal);
+
+       if (j->seq_ondisk + 1 != le64_to_cpu(w->data->seq)) {
+               spin_lock(&j->lock);
+               closure_wait(&j->async_wait, cl);
+               spin_unlock(&j->lock);
+
+               continue_at(cl, journal_write_preflush, j->wq);
+               return;
+       }
+
+       if (w->separate_flush) {
+               for_each_rw_member(c, ca) {
+                       percpu_ref_get(&ca->io_ref);
+
+                       struct journal_device *ja = &ca->journal;
+                       struct bio *bio = &ja->bio[w->idx]->bio;
+                       bio_reset(bio, ca->disk_sb.bdev,
+                                 REQ_OP_WRITE|REQ_SYNC|REQ_META|REQ_PREFLUSH);
+                       bio->bi_end_io          = journal_write_endio;
+                       bio->bi_private         = ca;
+                       closure_bio_submit(bio, cl);
+               }
+
+               continue_at(cl, journal_write_submit, j->wq);
+       } else {
+               /*
+                * no need to punt to another work item if we're not waiting on
+                * preflushes
+                */
+               journal_write_submit(&cl->work);
+       }
+}
+
 static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w)
 {
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
@@ -2033,23 +2071,9 @@ CLOSURE_CALLBACK(bch2_journal_write)
                goto err;
 
        if (!JSET_NO_FLUSH(w->data))
-               closure_wait_event(&j->async_wait, j->seq_ondisk + 1 == le64_to_cpu(w->data->seq));
-
-       if (!JSET_NO_FLUSH(w->data) && w->separate_flush) {
-               for_each_rw_member(c, ca) {
-                       percpu_ref_get(&ca->io_ref);
-
-                       struct journal_device *ja = &ca->journal;
-                       struct bio *bio = &ja->bio[w->idx]->bio;
-                       bio_reset(bio, ca->disk_sb.bdev,
-                                 REQ_OP_WRITE|REQ_SYNC|REQ_META|REQ_PREFLUSH);
-                       bio->bi_end_io          = journal_write_endio;
-                       bio->bi_private         = ca;
-                       closure_bio_submit(bio, cl);
-               }
-       }
-
-       continue_at(cl, do_journal_write, j->wq);
+               continue_at(cl, journal_write_preflush, j->wq);
+       else
+               continue_at(cl, journal_write_submit, j->wq);
        return;
 no_io:
        continue_at(cl, journal_write_done, j->wq);
index 0f328aba9760ba0e89fd015ee757239b6d8bd8c4..be5b47619327001ac8191c026575f6ac18d74d16 100644 (file)
@@ -249,7 +249,10 @@ int bch2_journal_replay(struct bch_fs *c)
 
                struct journal_key *k = *kp;
 
-               replay_now_at(j, k->journal_seq);
+               if (k->journal_seq)
+                       replay_now_at(j, k->journal_seq);
+               else
+                       replay_now_at(j, j->replay_journal_seq_end);
 
                ret = commit_do(trans, NULL, NULL,
                                BCH_TRANS_COMMIT_no_enospc|
index 5980ba2563fe9fa159ba9d87fe08ab2dc53a78fb..35ca3f138de6fad2428f347c704c03992e2bc05a 100644 (file)
@@ -29,6 +29,14 @@ int bch2_sb_clean_validate_late(struct bch_fs *c, struct bch_sb_field_clean *cle
        for (entry = clean->start;
             entry < (struct jset_entry *) vstruct_end(&clean->field);
             entry = vstruct_next(entry)) {
+               if (vstruct_end(entry) > vstruct_end(&clean->field)) {
+                       bch_err(c, "journal entry (u64s %u) overran end of superblock clean section (u64s %u) by %zu",
+                               le16_to_cpu(entry->u64s), le32_to_cpu(clean->field.u64s),
+                               (u64 *) vstruct_end(entry) - (u64 *) vstruct_end(&clean->field));
+                       bch2_sb_error_count(c, BCH_FSCK_ERR_sb_clean_entry_overrun);
+                       return -BCH_ERR_fsck_repair_unimplemented;
+               }
+
                ret = bch2_journal_entry_validate(c, NULL, entry,
                                                  le16_to_cpu(c->disk_sb.sb->version),
                                                  BCH_SB_BIG_ENDIAN(c->disk_sb.sb),
index 4ca6e7b0d8aaed2c4b95fff82c2ed964c6a102ad..06c7a644f4a44279f587a3cffb39473982b3392e 100644 (file)
        x(btree_root_unreadable_and_scan_found_nothing,         263)    \
        x(snapshot_node_missing,                                264)    \
        x(dup_backpointer_to_bad_csum_extent,                   265)    \
-       x(btree_bitmap_not_marked,                              266)
+       x(btree_bitmap_not_marked,                              266)    \
+       x(sb_clean_entry_overrun,                               267)
 
 enum bch_sb_error_id {
 #define x(t, n) BCH_FSCK_ERR_##t = n,
index 522a969345e5289ac87cf53b5f5a735e3b5f8d67..5b8e621ac5eb5780bb0c2e241196737bc0076e2e 100644 (file)
@@ -463,8 +463,8 @@ static void __bch2_dev_btree_bitmap_mark(struct bch_sb_field_members_v2 *mi, uns
                m->btree_bitmap_shift += resize;
        }
 
-       for (unsigned bit = sectors >> m->btree_bitmap_shift;
-            bit << m->btree_bitmap_shift < end;
+       for (unsigned bit = start >> m->btree_bitmap_shift;
+            (u64) bit << m->btree_bitmap_shift < end;
             bit++)
                bitmap |= BIT_ULL(bit);
 
index b27c3e4467cf288d67587143e5343d57d5aa41c9..5efa64eca5f85af5637faa12aa85b83fbcde6ad3 100644 (file)
@@ -235,11 +235,11 @@ static inline bool bch2_dev_btree_bitmap_marked_sectors(struct bch_dev *ca, u64
 {
        u64 end = start + sectors;
 
-       if (end > 64 << ca->mi.btree_bitmap_shift)
+       if (end > 64ULL << ca->mi.btree_bitmap_shift)
                return false;
 
-       for (unsigned bit = sectors >> ca->mi.btree_bitmap_shift;
-            bit << ca->mi.btree_bitmap_shift < end;
+       for (unsigned bit = start >> ca->mi.btree_bitmap_shift;
+            (u64) bit << ca->mi.btree_bitmap_shift < end;
             bit++)
                if (!(ca->mi.btree_allocated_bitmap & BIT_ULL(bit)))
                        return false;
index 8daf80a38d60c6e4fa97b97345d3d4ecb80e7e88..88e214c609bb2b6beab65b604acf1053596f3a8a 100644 (file)
@@ -544,6 +544,7 @@ static void __bch2_fs_free(struct bch_fs *c)
 
        bch2_find_btree_nodes_exit(&c->found_btree_nodes);
        bch2_free_pending_node_rewrites(c);
+       bch2_fs_allocator_background_exit(c);
        bch2_fs_sb_errors_exit(c);
        bch2_fs_counters_exit(c);
        bch2_fs_snapshots_exit(c);
index 940db15d6a939bf93281627e9759904e4a6531f3..b1af7ac430f662aa2b827d0c6550ef6187089352 100644 (file)
@@ -294,16 +294,27 @@ static int thread_with_stdio_fn(void *arg)
        return 0;
 }
 
-int bch2_run_thread_with_stdio(struct thread_with_stdio *thr,
-                              const struct thread_with_stdio_ops *ops)
+void bch2_thread_with_stdio_init(struct thread_with_stdio *thr,
+                                const struct thread_with_stdio_ops *ops)
 {
        stdio_buf_init(&thr->stdio.input);
        stdio_buf_init(&thr->stdio.output);
        thr->ops = ops;
+}
 
+int __bch2_run_thread_with_stdio(struct thread_with_stdio *thr)
+{
        return bch2_run_thread_with_file(&thr->thr, &thread_with_stdio_fops, thread_with_stdio_fn);
 }
 
+int bch2_run_thread_with_stdio(struct thread_with_stdio *thr,
+                              const struct thread_with_stdio_ops *ops)
+{
+       bch2_thread_with_stdio_init(thr, ops);
+
+       return __bch2_run_thread_with_stdio(thr);
+}
+
 int bch2_run_thread_with_stdout(struct thread_with_stdio *thr,
                                const struct thread_with_stdio_ops *ops)
 {
index af54ea8f5b0ff85871c915e275187c29b7b0c6f1..1d63d14d7dcae811a21e49a0cc509407daf7584c 100644 (file)
@@ -63,6 +63,9 @@ struct thread_with_stdio {
        const struct thread_with_stdio_ops      *ops;
 };
 
+void bch2_thread_with_stdio_init(struct thread_with_stdio *,
+                                const struct thread_with_stdio_ops *);
+int __bch2_run_thread_with_stdio(struct thread_with_stdio *);
 int bch2_run_thread_with_stdio(struct thread_with_stdio *,
                               const struct thread_with_stdio_ops *);
 int bch2_run_thread_with_stdout(struct thread_with_stdio *,
index c1e6a5bbeeaffe16b93846d5c8c0e90d4cc37659..58110c96866736ad9bf74b0e40c42e3bd9de81a3 100644 (file)
@@ -2776,20 +2776,14 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
        size_t alloc_bytes;
 
        alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
-       data = kvmalloc(alloc_bytes, GFP_KERNEL);
+       data = kvzalloc(alloc_bytes, GFP_KERNEL);
        if (!data)
                return ERR_PTR(-ENOMEM);
 
-       if (total_bytes >= sizeof(*data)) {
+       if (total_bytes >= sizeof(*data))
                data->bytes_left = total_bytes - sizeof(*data);
-               data->bytes_missing = 0;
-       } else {
+       else
                data->bytes_missing = sizeof(*data) - total_bytes;
-               data->bytes_left = 0;
-       }
-
-       data->elem_cnt = 0;
-       data->elem_missed = 0;
 
        return data;
 }
index 445f7716f1e2f70b3f780e7c1f03d020f0eb6346..24a048210b15719db5ae76f09ac114e227ff0ac0 100644 (file)
@@ -817,7 +817,7 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
                                        split->block_len = em->block_len;
                                        split->orig_start = em->orig_start;
                                } else {
-                                       const u64 diff = start + len - em->start;
+                                       const u64 diff = end - em->start;
 
                                        split->block_len = split->len;
                                        split->block_start += diff;
index c65fe5de40220d3b51003bb73b3e6414eaefba08..7fed887e700c4e8e07b6ff7932434a384790b8da 100644 (file)
@@ -1145,13 +1145,13 @@ static void submit_one_async_extent(struct async_chunk *async_chunk,
                                   0, *alloc_hint, &ins, 1, 1);
        if (ret) {
                /*
-                * Here we used to try again by going back to non-compressed
-                * path for ENOSPC.  But we can't reserve space even for
-                * compressed size, how could it work for uncompressed size
-                * which requires larger size?  So here we directly go error
-                * path.
+                * We can't reserve contiguous space for the compressed size.
+                * Unlikely, but it's possible that we could have enough
+                * non-contiguous space for the uncompressed size instead.  So
+                * fall back to uncompressed.
                 */
-               goto out_free;
+               submit_uncompressed_range(inode, async_extent, locked_page);
+               goto done;
        }
 
        /* Here we're doing allocation and writeback of the compressed pages */
@@ -1203,7 +1203,6 @@ done:
 out_free_reserve:
        btrfs_dec_block_group_reservations(fs_info, ins.objectid);
        btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
-out_free:
        mapping_set_error(inode->vfs_inode.i_mapping, -EIO);
        extent_clear_unlock_delalloc(inode, start, end,
                                     NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
index c96dd66fd0f7224c9a413c4d61d51420c5a9060b..210d9c82e2ae05976fc75325562f25cd7f9dc0b2 100644 (file)
@@ -7,7 +7,7 @@
 
 #ifdef CONFIG_PRINTK
 
-#define STATE_STRING_PREFACE   ": state "
+#define STATE_STRING_PREFACE   " state "
 #define STATE_STRING_BUF_LEN   (sizeof(STATE_STRING_PREFACE) + BTRFS_FS_STATE_COUNT + 1)
 
 /*
index fa25004ab04e7b28d73dee024303c0dab4077db6..4b22cfe9a98cb0244288d0a961fc7f0e1c7daf4e 100644 (file)
@@ -1012,6 +1012,7 @@ static void scrub_stripe_read_repair_worker(struct work_struct *work)
        struct btrfs_fs_info *fs_info = sctx->fs_info;
        int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
                                          stripe->bg->length);
+       unsigned long repaired;
        int mirror;
        int i;
 
@@ -1078,16 +1079,15 @@ out:
         * Submit the repaired sectors.  For zoned case, we cannot do repair
         * in-place, but queue the bg to be relocated.
         */
-       if (btrfs_is_zoned(fs_info)) {
-               if (!bitmap_empty(&stripe->error_bitmap, stripe->nr_sectors))
+       bitmap_andnot(&repaired, &stripe->init_error_bitmap, &stripe->error_bitmap,
+                     stripe->nr_sectors);
+       if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) {
+               if (btrfs_is_zoned(fs_info)) {
                        btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
-       } else if (!sctx->readonly) {
-               unsigned long repaired;
-
-               bitmap_andnot(&repaired, &stripe->init_error_bitmap,
-                             &stripe->error_bitmap, stripe->nr_sectors);
-               scrub_write_sectors(sctx, stripe, repaired, false);
-               wait_scrub_stripe_io(stripe);
+               } else {
+                       scrub_write_sectors(sctx, stripe, repaired, false);
+                       wait_scrub_stripe_io(stripe);
+               }
        }
 
        scrub_stripe_report_errors(sctx, stripe);
index 253cce7ffecfe5acaa81d9b574f8398f5c134300..47b5d301038eed0f040958771b8846ecbd672f65 100644 (file)
@@ -847,6 +847,11 @@ static int test_case_7(struct btrfs_fs_info *fs_info)
                goto out;
        }
 
+       if (em->block_start != SZ_32K + SZ_4K) {
+               test_err("em->block_start is %llu, expected 36K", em->block_start);
+               goto out;
+       }
+
        free_extent_map(em);
 
        read_lock(&em_tree->lock);
index 1d5abfdf0f22a626560b9ae6bb95309f8c146be5..fb0628e680c40f16fbee3b1b38b8bfcd70d1c980 100644 (file)
@@ -769,7 +769,7 @@ static int ioctl_getfsuuid(struct file *file, void __user *argp)
        struct fsuuid2 u = { .len = sb->s_uuid_len, };
 
        if (!sb->s_uuid_len)
-               return -ENOIOCTLCMD;
+               return -ENOTTY;
 
        memcpy(&u.uuid[0], &sb->s_uuid, sb->s_uuid_len);
 
@@ -781,7 +781,7 @@ static int ioctl_get_fs_sysfs_path(struct file *file, void __user *argp)
        struct super_block *sb = file_inode(file)->i_sb;
 
        if (!strlen(sb->s_sysfs_name))
-               return -ENOIOCTLCMD;
+               return -ENOTTY;
 
        struct fs_sysfs_path u = {};
 
index 9a0d32e4b422ad09518a6c6143638d0c68fb8b84..267b622d923b1fc63507300831c3163ba38d8a19 100644 (file)
@@ -164,7 +164,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
        enum netfs_how_to_modify howto;
        enum netfs_folio_trace trace;
        unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
-       ssize_t written = 0, ret;
+       ssize_t written = 0, ret, ret2;
        loff_t i_size, pos = iocb->ki_pos, from, to;
        size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
        bool maybe_trouble = false;
@@ -172,15 +172,14 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
        if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
                     iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
            ) {
-               if (pos < i_size_read(inode)) {
-                       ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
-                       if (ret < 0) {
-                               goto out;
-                       }
-               }
-
                wbc_attach_fdatawrite_inode(&wbc, mapping->host);
 
+               ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
+               if (ret < 0) {
+                       wbc_detach_inode(&wbc);
+                       goto out;
+               }
+
                wreq = netfs_begin_writethrough(iocb, iter->count);
                if (IS_ERR(wreq)) {
                        wbc_detach_inode(&wbc);
@@ -395,10 +394,12 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
 
 out:
        if (unlikely(wreq)) {
-               ret = netfs_end_writethrough(wreq, iocb);
+               ret2 = netfs_end_writethrough(wreq, iocb);
                wbc_detach_inode(&wbc);
-               if (ret == -EIOCBQUEUED)
-                       return ret;
+               if (ret2 == -EIOCBQUEUED)
+                       return ret2;
+               if (ret == 0)
+                       ret = ret2;
        }
 
        iocb->ki_pos += written;
index 87c9547989f69ec8cb38b73da6868f52a84ff673..e88aca0c6e8ef17a613800f0a321a4c403d9e8e2 100644 (file)
@@ -983,15 +983,7 @@ static struct workqueue_struct *callback_wq;
 static bool nfsd4_queue_cb(struct nfsd4_callback *cb)
 {
        trace_nfsd_cb_queue(cb->cb_clp, cb);
-       return queue_delayed_work(callback_wq, &cb->cb_work, 0);
-}
-
-static void nfsd4_queue_cb_delayed(struct nfsd4_callback *cb,
-                                  unsigned long msecs)
-{
-       trace_nfsd_cb_queue(cb->cb_clp, cb);
-       queue_delayed_work(callback_wq, &cb->cb_work,
-                          msecs_to_jiffies(msecs));
+       return queue_work(callback_wq, &cb->cb_work);
 }
 
 static void nfsd41_cb_inflight_begin(struct nfs4_client *clp)
@@ -1490,7 +1482,7 @@ static void
 nfsd4_run_cb_work(struct work_struct *work)
 {
        struct nfsd4_callback *cb =
-               container_of(work, struct nfsd4_callback, cb_work.work);
+               container_of(work, struct nfsd4_callback, cb_work);
        struct nfs4_client *clp = cb->cb_clp;
        struct rpc_clnt *clnt;
        int flags;
@@ -1502,16 +1494,8 @@ nfsd4_run_cb_work(struct work_struct *work)
 
        clnt = clp->cl_cb_client;
        if (!clnt) {
-               if (test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags))
-                       nfsd41_destroy_cb(cb);
-               else {
-                       /*
-                        * XXX: Ideally, we could wait for the client to
-                        *      reconnect, but I haven't figured out how
-                        *      to do that yet.
-                        */
-                       nfsd4_queue_cb_delayed(cb, 25);
-               }
+               /* Callback channel broken, or client killed; give up: */
+               nfsd41_destroy_cb(cb);
                return;
        }
 
@@ -1544,7 +1528,7 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
        cb->cb_msg.rpc_argp = cb;
        cb->cb_msg.rpc_resp = cb;
        cb->cb_ops = ops;
-       INIT_DELAYED_WORK(&cb->cb_work, nfsd4_run_cb_work);
+       INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
        cb->cb_status = 0;
        cb->cb_need_restart = false;
        cb->cb_holds_slot = false;
index 01c6f344564693dd0987413c0ed116458782a677..2ed0fcf879fd17be57d1d963ad74e864d73315f5 100644 (file)
@@ -68,7 +68,7 @@ struct nfsd4_callback {
        struct nfs4_client *cb_clp;
        struct rpc_message cb_msg;
        const struct nfsd4_callback_ops *cb_ops;
-       struct delayed_work cb_work;
+       struct work_struct cb_work;
        int cb_seq_status;
        int cb_status;
        bool cb_need_restart;
index cdfdf51e55d797e0e5fecc349e2f157f09fea7d8..7bc31d69f680dd996f8503c9d06f0a0b96522e80 100644 (file)
@@ -46,3 +46,12 @@ config NTFS3_FS_POSIX_ACL
          NOTE: this is linux only feature. Windows will ignore these ACLs.
 
          If you don't know what Access Control Lists are, say N.
+
+config NTFS_FS
+       tristate "NTFS file system support"
+       select NTFS3_FS
+       select BUFFER_HEAD
+       select NLS
+       help
+         This config option is here only for backward compatibility. NTFS
+         filesystem is now handled by the NTFS3 driver.
index 5cf3d9decf646b1935517e8b564d807626e60e0f..263635199b60d38a23b98c10f31d2105d832eda4 100644 (file)
@@ -616,4 +616,11 @@ const struct file_operations ntfs_dir_operations = {
        .compat_ioctl   = ntfs_compat_ioctl,
 #endif
 };
+
+const struct file_operations ntfs_legacy_dir_operations = {
+       .llseek         = generic_file_llseek,
+       .read           = generic_read_dir,
+       .iterate_shared = ntfs_readdir,
+       .open           = ntfs_file_open,
+};
 // clang-format on
index 5418662c80d8878afe72a8b8e8ffc43cc834b176..b73969e05052ae8bcd49740057405e3e71c0852a 100644 (file)
@@ -1236,4 +1236,12 @@ const struct file_operations ntfs_file_operations = {
        .fallocate      = ntfs_fallocate,
        .release        = ntfs_file_release,
 };
+
+const struct file_operations ntfs_legacy_file_operations = {
+       .llseek         = generic_file_llseek,
+       .read_iter      = ntfs_file_read_iter,
+       .splice_read    = ntfs_file_splice_read,
+       .open           = ntfs_file_open,
+       .release        = ntfs_file_release,
+};
 // clang-format on
index eb7a8c9fba0183f40096d673473be4dffaa7c4c8..d273eda1cf45d68e90cc56866fe689a629be43b5 100644 (file)
@@ -440,7 +440,10 @@ end_enum:
                 * Usually a hard links to directories are disabled.
                 */
                inode->i_op = &ntfs_dir_inode_operations;
-               inode->i_fop = &ntfs_dir_operations;
+               if (is_legacy_ntfs(inode->i_sb))
+                       inode->i_fop = &ntfs_legacy_dir_operations;
+               else
+                       inode->i_fop = &ntfs_dir_operations;
                ni->i_valid = 0;
        } else if (S_ISLNK(mode)) {
                ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
@@ -450,7 +453,10 @@ end_enum:
        } else if (S_ISREG(mode)) {
                ni->std_fa &= ~FILE_ATTRIBUTE_DIRECTORY;
                inode->i_op = &ntfs_file_inode_operations;
-               inode->i_fop = &ntfs_file_operations;
+               if (is_legacy_ntfs(inode->i_sb))
+                       inode->i_fop = &ntfs_legacy_file_operations;
+               else
+                       inode->i_fop = &ntfs_file_operations;
                inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
                                                              &ntfs_aops;
                if (ino != MFT_REC_MFT)
@@ -1614,7 +1620,10 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
 
        if (S_ISDIR(mode)) {
                inode->i_op = &ntfs_dir_inode_operations;
-               inode->i_fop = &ntfs_dir_operations;
+               if (is_legacy_ntfs(inode->i_sb))
+                       inode->i_fop = &ntfs_legacy_dir_operations;
+               else
+                       inode->i_fop = &ntfs_dir_operations;
        } else if (S_ISLNK(mode)) {
                inode->i_op = &ntfs_link_inode_operations;
                inode->i_fop = NULL;
@@ -1623,7 +1632,10 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
                inode_nohighmem(inode);
        } else if (S_ISREG(mode)) {
                inode->i_op = &ntfs_file_inode_operations;
-               inode->i_fop = &ntfs_file_operations;
+               if (is_legacy_ntfs(inode->i_sb))
+                       inode->i_fop = &ntfs_legacy_file_operations;
+               else
+                       inode->i_fop = &ntfs_file_operations;
                inode->i_mapping->a_ops = is_compressed(ni) ? &ntfs_aops_cmpr :
                                                              &ntfs_aops;
                init_rwsem(&ni->file.run_lock);
index 79356fd29a14141de34ed006517b153fd9e4872b..5f4d288c6adfb955fb21bc513cae69f80cb61b20 100644 (file)
@@ -493,6 +493,7 @@ struct inode *dir_search_u(struct inode *dir, const struct cpu_str *uni,
                           struct ntfs_fnd *fnd);
 bool dir_is_empty(struct inode *dir);
 extern const struct file_operations ntfs_dir_operations;
+extern const struct file_operations ntfs_legacy_dir_operations;
 
 /* Globals from file.c */
 int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
@@ -507,6 +508,7 @@ long ntfs_compat_ioctl(struct file *filp, u32 cmd, unsigned long arg);
 extern const struct inode_operations ntfs_special_inode_operations;
 extern const struct inode_operations ntfs_file_inode_operations;
 extern const struct file_operations ntfs_file_operations;
+extern const struct file_operations ntfs_legacy_file_operations;
 
 /* Globals from frecord.c */
 void ni_remove_mi(struct ntfs_inode *ni, struct mft_inode *mi);
@@ -1154,4 +1156,6 @@ static inline void le64_sub_cpu(__le64 *var, u64 val)
        *var = cpu_to_le64(le64_to_cpu(*var) - val);
 }
 
+bool is_legacy_ntfs(struct super_block *sb);
+
 #endif /* _LINUX_NTFS3_NTFS_FS_H */
index 9df7c20d066f6125dda2406a0481e4eab300bb80..b26d95a8d3274d061fc2f0dc2ba7f19cd385db8a 100644 (file)
@@ -408,6 +408,12 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
        struct ntfs_mount_options *new_opts = fc->fs_private;
        int ro_rw;
 
+       /* If ntfs3 is used as legacy ntfs enforce read-only mode. */
+       if (is_legacy_ntfs(sb)) {
+               fc->sb_flags |= SB_RDONLY;
+               goto out;
+       }
+
        ro_rw = sb_rdonly(sb) && !(fc->sb_flags & SB_RDONLY);
        if (ro_rw && (sbi->flags & NTFS_FLAGS_NEED_REPLAY)) {
                errorf(fc,
@@ -427,8 +433,6 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
                        fc,
                        "ntfs3: Cannot use different iocharset when remounting!");
 
-       sync_filesystem(sb);
-
        if (ro_rw && (sbi->volume.flags & VOLUME_FLAG_DIRTY) &&
            !new_opts->force) {
                errorf(fc,
@@ -436,6 +440,8 @@ static int ntfs_fs_reconfigure(struct fs_context *fc)
                return -EINVAL;
        }
 
+out:
+       sync_filesystem(sb);
        swap(sbi->options, fc->fs_private);
 
        return 0;
@@ -1613,6 +1619,8 @@ load_root:
        }
 #endif
 
+       if (is_legacy_ntfs(sb))
+               sb->s_flags |= SB_RDONLY;
        return 0;
 
 put_inode_out:
@@ -1730,7 +1738,7 @@ static const struct fs_context_operations ntfs_context_ops = {
  * This will called when mount/remount. We will first initialize
  * options so that if remount we can use just that.
  */
-static int ntfs_init_fs_context(struct fs_context *fc)
+static int __ntfs_init_fs_context(struct fs_context *fc)
 {
        struct ntfs_mount_options *opts;
        struct ntfs_sb_info *sbi;
@@ -1778,6 +1786,11 @@ free_opts:
        return -ENOMEM;
 }
 
+static int ntfs_init_fs_context(struct fs_context *fc)
+{
+       return __ntfs_init_fs_context(fc);
+}
+
 static void ntfs3_kill_sb(struct super_block *sb)
 {
        struct ntfs_sb_info *sbi = sb->s_fs_info;
@@ -1798,6 +1811,50 @@ static struct file_system_type ntfs_fs_type = {
        .kill_sb                = ntfs3_kill_sb,
        .fs_flags               = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
 };
+
+#if IS_ENABLED(CONFIG_NTFS_FS)
+static int ntfs_legacy_init_fs_context(struct fs_context *fc)
+{
+       int ret;
+
+       ret = __ntfs_init_fs_context(fc);
+       /* If ntfs3 is used as legacy ntfs enforce read-only mode. */
+       fc->sb_flags |= SB_RDONLY;
+       return ret;
+}
+
+static struct file_system_type ntfs_legacy_fs_type = {
+       .owner                  = THIS_MODULE,
+       .name                   = "ntfs",
+       .init_fs_context        = ntfs_legacy_init_fs_context,
+       .parameters             = ntfs_fs_parameters,
+       .kill_sb                = ntfs3_kill_sb,
+       .fs_flags               = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
+};
+MODULE_ALIAS_FS("ntfs");
+
+static inline void register_as_ntfs_legacy(void)
+{
+       int err = register_filesystem(&ntfs_legacy_fs_type);
+       if (err)
+               pr_warn("ntfs3: Failed to register legacy ntfs filesystem driver: %d\n", err);
+}
+
+static inline void unregister_as_ntfs_legacy(void)
+{
+       unregister_filesystem(&ntfs_legacy_fs_type);
+}
+bool is_legacy_ntfs(struct super_block *sb)
+{
+       return sb->s_type == &ntfs_legacy_fs_type;
+}
+#else
+static inline void register_as_ntfs_legacy(void) {}
+static inline void unregister_as_ntfs_legacy(void) {}
+bool is_legacy_ntfs(struct super_block *sb) { return false; }
+#endif
+
+
 // clang-format on
 
 static int __init init_ntfs_fs(void)
@@ -1832,6 +1889,7 @@ static int __init init_ntfs_fs(void)
                goto out1;
        }
 
+       register_as_ntfs_legacy();
        err = register_filesystem(&ntfs_fs_type);
        if (err)
                goto out;
@@ -1849,6 +1907,7 @@ static void __exit exit_ntfs_fs(void)
        rcu_barrier();
        kmem_cache_destroy(ntfs_inode_cachep);
        unregister_filesystem(&ntfs_fs_type);
+       unregister_as_ntfs_legacy();
        ntfs3_exit_bitmap();
 
 #ifdef CONFIG_PROC_FS
index d41eedbff674abb0e62e52ae6cc585aaa5d83d77..39277c37185cac3327c0f002849b1f5fc621cd05 100644 (file)
@@ -389,6 +389,7 @@ cifs_alloc_inode(struct super_block *sb)
         * server, can not assume caching of file data or metadata.
         */
        cifs_set_oplock_level(cifs_inode, 0);
+       cifs_inode->lease_granted = false;
        cifs_inode->flags = 0;
        spin_lock_init(&cifs_inode->writers_lock);
        cifs_inode->writers = 0;
@@ -739,6 +740,8 @@ static void cifs_umount_begin(struct super_block *sb)
 
        spin_lock(&cifs_tcp_ses_lock);
        spin_lock(&tcon->tc_lock);
+       trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+                           netfs_trace_tcon_ref_see_umount);
        if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
                /* we have other mounts to same share or we have
                   already tried to umount this and woken up
index d6669ce4ae87f07415b150eaffcbf429c4fe74bd..6ff35570db813a533ef9221fad6a7aea99e6d1d2 100644 (file)
@@ -1190,6 +1190,7 @@ struct cifs_fattr {
  */
 struct cifs_tcon {
        struct list_head tcon_list;
+       int debug_id;           /* Debugging for tracing */
        int tc_count;
        struct list_head rlist; /* reconnect list */
        spinlock_t tc_lock;  /* protect anything here that is not protected */
@@ -1276,7 +1277,9 @@ struct cifs_tcon {
        __u32 max_cached_dirs;
 #ifdef CONFIG_CIFS_FSCACHE
        u64 resource_id;                /* server resource id */
+       bool fscache_acquired;          /* T if we've tried acquiring a cookie */
        struct fscache_volume *fscache; /* cookie for share */
+       struct mutex fscache_lock;      /* Prevent regetting a cookie */
 #endif
        struct list_head pending_opens; /* list of incomplete opens */
        struct cached_fids *cfids;
index 8e0a348f1f660ebc14498c7fd7d342693411c106..fbc358c09da3b1d7ffc495d0c461e32509f95c1c 100644 (file)
@@ -303,7 +303,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
                     struct TCP_Server_Info *primary_server);
 extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
                                 int from_reconnect);
-extern void cifs_put_tcon(struct cifs_tcon *tcon);
+extern void cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace);
 
 extern void cifs_release_automount_timer(void);
 
@@ -530,8 +530,9 @@ extern int CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses);
 
 extern struct cifs_ses *sesInfoAlloc(void);
 extern void sesInfoFree(struct cifs_ses *);
-extern struct cifs_tcon *tcon_info_alloc(bool dir_leases_enabled);
-extern void tconInfoFree(struct cifs_tcon *);
+extern struct cifs_tcon *tcon_info_alloc(bool dir_leases_enabled,
+                                        enum smb3_tcon_ref_trace trace);
+extern void tconInfoFree(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace);
 
 extern int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
                   __u32 *pexpected_response_sequence_number);
@@ -721,8 +722,6 @@ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
                return options;
 }
 
-struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
-void cifs_put_tcon_super(struct super_block *sb);
 int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry);
 
 /* Put references of @ses and its children */
index 4e35970681bf052dc343c23935549600f5ce8859..7a16e12f5da879bbbb8ace98a4ec4f30aafec33e 100644 (file)
@@ -1943,7 +1943,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
        }
 
        /* no need to setup directory caching on IPC share, so pass in false */
-       tcon = tcon_info_alloc(false);
+       tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_ipc);
        if (tcon == NULL)
                return -ENOMEM;
 
@@ -1960,7 +1960,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb3_fs_context *ctx)
 
        if (rc) {
                cifs_server_dbg(VFS, "failed to connect to IPC (rc=%d)\n", rc);
-               tconInfoFree(tcon);
+               tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc_fail);
                goto out;
        }
 
@@ -2043,7 +2043,7 @@ void __cifs_put_smb_ses(struct cifs_ses *ses)
         * files on session close, as specified in MS-SMB2 3.3.5.6 Receiving an
         * SMB2 LOGOFF Request.
         */
-       tconInfoFree(tcon);
+       tconInfoFree(tcon, netfs_trace_tcon_ref_free_ipc);
        if (do_logoff) {
                xid = get_xid();
                rc = server->ops->logoff(xid, ses);
@@ -2432,6 +2432,8 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
                        continue;
                }
                ++tcon->tc_count;
+               trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+                                   netfs_trace_tcon_ref_get_find);
                spin_unlock(&tcon->tc_lock);
                spin_unlock(&cifs_tcp_ses_lock);
                return tcon;
@@ -2441,7 +2443,7 @@ cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
 }
 
 void
-cifs_put_tcon(struct cifs_tcon *tcon)
+cifs_put_tcon(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
 {
        unsigned int xid;
        struct cifs_ses *ses;
@@ -2457,6 +2459,7 @@ cifs_put_tcon(struct cifs_tcon *tcon)
        cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
        spin_lock(&cifs_tcp_ses_lock);
        spin_lock(&tcon->tc_lock);
+       trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count - 1, trace);
        if (--tcon->tc_count > 0) {
                spin_unlock(&tcon->tc_lock);
                spin_unlock(&cifs_tcp_ses_lock);
@@ -2493,7 +2496,7 @@ cifs_put_tcon(struct cifs_tcon *tcon)
        _free_xid(xid);
 
        cifs_fscache_release_super_cookie(tcon);
-       tconInfoFree(tcon);
+       tconInfoFree(tcon, netfs_trace_tcon_ref_free);
        cifs_put_smb_ses(ses);
 }
 
@@ -2547,7 +2550,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
                nohandlecache = ctx->nohandlecache;
        else
                nohandlecache = true;
-       tcon = tcon_info_alloc(!nohandlecache);
+       tcon = tcon_info_alloc(!nohandlecache, netfs_trace_tcon_ref_new);
        if (tcon == NULL) {
                rc = -ENOMEM;
                goto out_fail;
@@ -2737,7 +2740,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
        return tcon;
 
 out_fail:
-       tconInfoFree(tcon);
+       tconInfoFree(tcon, netfs_trace_tcon_ref_free_fail);
        return ERR_PTR(rc);
 }
 
@@ -2754,7 +2757,7 @@ cifs_put_tlink(struct tcon_link *tlink)
        }
 
        if (!IS_ERR(tlink_tcon(tlink)))
-               cifs_put_tcon(tlink_tcon(tlink));
+               cifs_put_tcon(tlink_tcon(tlink), netfs_trace_tcon_ref_put_tlink);
        kfree(tlink);
 }
 
@@ -3319,7 +3322,7 @@ void cifs_mount_put_conns(struct cifs_mount_ctx *mnt_ctx)
        int rc = 0;
 
        if (mnt_ctx->tcon)
-               cifs_put_tcon(mnt_ctx->tcon);
+               cifs_put_tcon(mnt_ctx->tcon, netfs_trace_tcon_ref_put_mnt_ctx);
        else if (mnt_ctx->ses)
                cifs_put_smb_ses(mnt_ctx->ses);
        else if (mnt_ctx->server)
index 6c727d8c31e870ddd0f809db12b21aae76ac80cd..3bbac925d0766b8c456d731355e71b1594b94af3 100644 (file)
@@ -748,6 +748,16 @@ static int smb3_fs_context_validate(struct fs_context *fc)
        /* set the port that we got earlier */
        cifs_set_port((struct sockaddr *)&ctx->dstaddr, ctx->port);
 
+       if (ctx->uid_specified && !ctx->forceuid_specified) {
+               ctx->override_uid = 1;
+               pr_notice("enabling forceuid mount option implicitly because uid= option is specified\n");
+       }
+
+       if (ctx->gid_specified && !ctx->forcegid_specified) {
+               ctx->override_gid = 1;
+               pr_notice("enabling forcegid mount option implicitly because gid= option is specified\n");
+       }
+
        if (ctx->override_uid && !ctx->uid_specified) {
                ctx->override_uid = 0;
                pr_notice("ignoring forceuid mount option specified with no uid= option\n");
@@ -1019,12 +1029,14 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
                        ctx->override_uid = 0;
                else
                        ctx->override_uid = 1;
+               ctx->forceuid_specified = true;
                break;
        case Opt_forcegid:
                if (result.negated)
                        ctx->override_gid = 0;
                else
                        ctx->override_gid = 1;
+               ctx->forcegid_specified = true;
                break;
        case Opt_perm:
                if (result.negated)
index a947bddeba273ea850b3502f07555a19316266a6..cf577ec0dd0ac4a8f5a3131e8ed0c3ce9574a4d7 100644 (file)
@@ -165,6 +165,8 @@ enum cifs_param {
 };
 
 struct smb3_fs_context {
+       bool forceuid_specified;
+       bool forcegid_specified;
        bool uid_specified;
        bool cruid_specified;
        bool gid_specified;
index 340efce8f052951a308329b70b5846d9e57477f5..1a895e6243ee9aaf21fc8405893ce52ed14303a2 100644 (file)
@@ -43,12 +43,23 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
        char *key;
        int ret = -ENOMEM;
 
+       if (tcon->fscache_acquired)
+               return 0;
+
+       mutex_lock(&tcon->fscache_lock);
+       if (tcon->fscache_acquired) {
+               mutex_unlock(&tcon->fscache_lock);
+               return 0;
+       }
+       tcon->fscache_acquired = true;
+
        tcon->fscache = NULL;
        switch (sa->sa_family) {
        case AF_INET:
        case AF_INET6:
                break;
        default:
+               mutex_unlock(&tcon->fscache_lock);
                cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family);
                return -EINVAL;
        }
@@ -57,6 +68,7 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
 
        sharename = extract_sharename(tcon->tree_name);
        if (IS_ERR(sharename)) {
+               mutex_unlock(&tcon->fscache_lock);
                cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__);
                return PTR_ERR(sharename);
        }
@@ -82,6 +94,11 @@ int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
                }
                pr_err("Cache volume key already in use (%s)\n", key);
                vcookie = NULL;
+               trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+                                   netfs_trace_tcon_ref_see_fscache_collision);
+       } else {
+               trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+                                   netfs_trace_tcon_ref_see_fscache_okay);
        }
 
        tcon->fscache = vcookie;
@@ -90,6 +107,7 @@ out_2:
        kfree(key);
 out:
        kfree(sharename);
+       mutex_unlock(&tcon->fscache_lock);
        return ret;
 }
 
@@ -102,6 +120,8 @@ void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
        cifs_fscache_fill_volume_coherency(tcon, &cd);
        fscache_relinquish_volume(tcon->fscache, &cd, false);
        tcon->fscache = NULL;
+       trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+                           netfs_trace_tcon_ref_see_fscache_relinq);
 }
 
 void cifs_fscache_get_inode_cookie(struct inode *inode)
index 7d15a1969b818439515b5188e8662a3b8f1276ce..07c468ddb88a89d65f8a48433a055759deb3da26 100644 (file)
@@ -111,9 +111,10 @@ sesInfoFree(struct cifs_ses *buf_to_free)
 }
 
 struct cifs_tcon *
-tcon_info_alloc(bool dir_leases_enabled)
+tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace)
 {
        struct cifs_tcon *ret_buf;
+       static atomic_t tcon_debug_id;
 
        ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
        if (!ret_buf)
@@ -130,7 +131,8 @@ tcon_info_alloc(bool dir_leases_enabled)
 
        atomic_inc(&tconInfoAllocCount);
        ret_buf->status = TID_NEW;
-       ++ret_buf->tc_count;
+       ret_buf->debug_id = atomic_inc_return(&tcon_debug_id);
+       ret_buf->tc_count = 1;
        spin_lock_init(&ret_buf->tc_lock);
        INIT_LIST_HEAD(&ret_buf->openFileList);
        INIT_LIST_HEAD(&ret_buf->tcon_list);
@@ -139,17 +141,22 @@ tcon_info_alloc(bool dir_leases_enabled)
        atomic_set(&ret_buf->num_local_opens, 0);
        atomic_set(&ret_buf->num_remote_opens, 0);
        ret_buf->stats_from_time = ktime_get_real_seconds();
+#ifdef CONFIG_CIFS_FSCACHE
+       mutex_init(&ret_buf->fscache_lock);
+#endif
+       trace_smb3_tcon_ref(ret_buf->debug_id, ret_buf->tc_count, trace);
 
        return ret_buf;
 }
 
 void
-tconInfoFree(struct cifs_tcon *tcon)
+tconInfoFree(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
 {
        if (tcon == NULL) {
                cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
                return;
        }
+       trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, trace);
        free_cached_dirs(tcon->cfids);
        atomic_dec(&tconInfoAllocCount);
        kfree(tcon->nativeFileSystem);
index cc72be5a93a933b09c45256c2a7d7615478f54c0..677ef6f99a5be407fb9c73baba7918cf5e28244e 100644 (file)
@@ -767,7 +767,7 @@ smb2_cancelled_close_fid(struct work_struct *work)
        if (rc)
                cifs_tcon_dbg(VFS, "Close cancelled mid failed rc:%d\n", rc);
 
-       cifs_put_tcon(tcon);
+       cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cancelled_close_fid);
        kfree(cancelled);
 }
 
@@ -811,6 +811,8 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
        if (tcon->tc_count <= 0) {
                struct TCP_Server_Info *server = NULL;
 
+               trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+                                   netfs_trace_tcon_ref_see_cancelled_close);
                WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative");
                spin_unlock(&cifs_tcp_ses_lock);
 
@@ -823,12 +825,14 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
                return 0;
        }
        tcon->tc_count++;
+       trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+                           netfs_trace_tcon_ref_get_cancelled_close);
        spin_unlock(&cifs_tcp_ses_lock);
 
        rc = __smb2_handle_cancelled_cmd(tcon, SMB2_CLOSE_HE, 0,
                                         persistent_fid, volatile_fid);
        if (rc)
-               cifs_put_tcon(tcon);
+               cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cancelled_close);
 
        return rc;
 }
@@ -856,7 +860,7 @@ smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *serve
                                         rsp->PersistentFileId,
                                         rsp->VolatileFileId);
        if (rc)
-               cifs_put_tcon(tcon);
+               cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cancelled_mid);
 
        return rc;
 }
index 78c94d0350fe9970fab31564aeba6870d71859bd..28f0b7d19d534b18bff680bb739247889ac7675b 100644 (file)
@@ -2915,8 +2915,11 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
                tcon = list_first_entry_or_null(&ses->tcon_list,
                                                struct cifs_tcon,
                                                tcon_list);
-               if (tcon)
+               if (tcon) {
                        tcon->tc_count++;
+                       trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+                                           netfs_trace_tcon_ref_get_dfs_refer);
+               }
                spin_unlock(&cifs_tcp_ses_lock);
        }
 
@@ -2980,6 +2983,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
                /* ipc tcons are not refcounted */
                spin_lock(&cifs_tcp_ses_lock);
                tcon->tc_count--;
+               trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+                                   netfs_trace_tcon_ref_dec_dfs_refer);
                /* tc_count can never go negative */
                WARN_ON(tcon->tc_count < 0);
                spin_unlock(&cifs_tcp_ses_lock);
index 86c647a947ccd1065a8edb0712e113351839b96f..a5efce03cb58e2d995862f8c3b0cc081f8beed5b 100644 (file)
@@ -4138,6 +4138,8 @@ void smb2_reconnect_server(struct work_struct *work)
                list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
                        if (tcon->need_reconnect || tcon->need_reopen_files) {
                                tcon->tc_count++;
+                               trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+                                                   netfs_trace_tcon_ref_get_reconnect_server);
                                list_add_tail(&tcon->rlist, &tmp_list);
                                tcon_selected = true;
                        }
@@ -4176,14 +4178,14 @@ void smb2_reconnect_server(struct work_struct *work)
                if (tcon->ipc)
                        cifs_put_smb_ses(tcon->ses);
                else
-                       cifs_put_tcon(tcon);
+                       cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_reconnect_server);
        }
 
        if (!ses_exist)
                goto done;
 
        /* allocate a dummy tcon struct used for reconnect */
-       tcon = tcon_info_alloc(false);
+       tcon = tcon_info_alloc(false, netfs_trace_tcon_ref_new_reconnect_server);
        if (!tcon) {
                resched = true;
                list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
@@ -4206,7 +4208,7 @@ void smb2_reconnect_server(struct work_struct *work)
                list_del_init(&ses->rlist);
                cifs_put_smb_ses(ses);
        }
-       tconInfoFree(tcon);
+       tconInfoFree(tcon, netfs_trace_tcon_ref_free_reconnect_server);
 
 done:
        cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
index 1d6e54f7879e6a5e8034a90d30471fecc02d2d1b..02135a6053051ee6848f8df90be30fc2c805af6d 100644 (file)
@@ -189,6 +189,8 @@ smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32  tid)
                if (tcon->tid != tid)
                        continue;
                ++tcon->tc_count;
+               trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
+                                   netfs_trace_tcon_ref_get_find_sess_tcon);
                return tcon;
        }
 
index 5e83cb9da9028e0d15383e6e19413ea1da31e553..604e52876cd2d98e9a86941b0c527dbe6d8abe6a 100644 (file)
@@ -3,6 +3,9 @@
  *   Copyright (C) 2018, Microsoft Corporation.
  *
  *   Author(s): Steve French <stfrench@microsoft.com>
+ *
+ * Please use this 3-part article as a reference for writing new tracepoints:
+ * https://lwn.net/Articles/379903/
  */
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM cifs
 #include <linux/inet.h>
 
 /*
- * Please use this 3-part article as a reference for writing new tracepoints:
- * https://lwn.net/Articles/379903/
+ * Specify enums for tracing information.
+ */
+#define smb3_tcon_ref_traces                                         \
+       EM(netfs_trace_tcon_ref_dec_dfs_refer,          "DEC DfsRef") \
+       EM(netfs_trace_tcon_ref_free,                   "FRE       ") \
+       EM(netfs_trace_tcon_ref_free_fail,              "FRE Fail  ") \
+       EM(netfs_trace_tcon_ref_free_ipc,               "FRE Ipc   ") \
+       EM(netfs_trace_tcon_ref_free_ipc_fail,          "FRE Ipc-F ") \
+       EM(netfs_trace_tcon_ref_free_reconnect_server,  "FRE Reconn") \
+       EM(netfs_trace_tcon_ref_get_cancelled_close,    "GET Cn-Cls") \
+       EM(netfs_trace_tcon_ref_get_dfs_refer,          "GET DfsRef") \
+       EM(netfs_trace_tcon_ref_get_find,               "GET Find  ") \
+       EM(netfs_trace_tcon_ref_get_find_sess_tcon,     "GET FndSes") \
+       EM(netfs_trace_tcon_ref_get_reconnect_server,   "GET Reconn") \
+       EM(netfs_trace_tcon_ref_new,                    "NEW       ") \
+       EM(netfs_trace_tcon_ref_new_ipc,                "NEW Ipc   ") \
+       EM(netfs_trace_tcon_ref_new_reconnect_server,   "NEW Reconn") \
+       EM(netfs_trace_tcon_ref_put_cancelled_close,    "PUT Cn-Cls") \
+       EM(netfs_trace_tcon_ref_put_cancelled_close_fid, "PUT Cn-Fid") \
+       EM(netfs_trace_tcon_ref_put_cancelled_mid,      "PUT Cn-Mid") \
+       EM(netfs_trace_tcon_ref_put_mnt_ctx,            "PUT MntCtx") \
+       EM(netfs_trace_tcon_ref_put_reconnect_server,   "PUT Reconn") \
+       EM(netfs_trace_tcon_ref_put_tlink,              "PUT Tlink ") \
+       EM(netfs_trace_tcon_ref_see_cancelled_close,    "SEE Cn-Cls") \
+       EM(netfs_trace_tcon_ref_see_fscache_collision,  "SEE FV-CO!") \
+       EM(netfs_trace_tcon_ref_see_fscache_okay,       "SEE FV-Ok ") \
+       EM(netfs_trace_tcon_ref_see_fscache_relinq,     "SEE FV-Rlq") \
+       E_(netfs_trace_tcon_ref_see_umount,             "SEE Umount")
+
+#undef EM
+#undef E_
+
+/*
+ * Define those tracing enums.
+ */
+#ifndef __SMB3_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __SMB3_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum smb3_tcon_ref_trace { smb3_tcon_ref_traces } __mode(byte);
+
+#undef EM
+#undef E_
+#endif
+
+/*
+ * Export enum symbols via userspace.
+ */
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+smb3_tcon_ref_traces;
+
+#undef EM
+#undef E_
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
  */
+#define EM(a, b)       { a, b },
+#define E_(a, b)       { a, b }
 
 /* For logging errors in read or write */
 DECLARE_EVENT_CLASS(smb3_rw_err_class,
@@ -1125,6 +1189,30 @@ DEFINE_SMB3_CREDIT_EVENT(waitff_credits);
 DEFINE_SMB3_CREDIT_EVENT(overflow_credits);
 DEFINE_SMB3_CREDIT_EVENT(set_credits);
 
+
+TRACE_EVENT(smb3_tcon_ref,
+           TP_PROTO(unsigned int tcon_debug_id, int ref,
+                    enum smb3_tcon_ref_trace trace),
+           TP_ARGS(tcon_debug_id, ref, trace),
+           TP_STRUCT__entry(
+                   __field(unsigned int,               tcon)
+                   __field(int,                        ref)
+                   __field(enum smb3_tcon_ref_trace,   trace)
+                            ),
+           TP_fast_assign(
+                   __entry->tcon       = tcon_debug_id;
+                   __entry->ref        = ref;
+                   __entry->trace      = trace;
+                          ),
+           TP_printk("TC=%08x %s r=%u",
+                     __entry->tcon,
+                     __print_symbolic(__entry->trace, smb3_tcon_ref_traces),
+                     __entry->ref)
+           );
+
+
+#undef EM
+#undef E_
 #endif /* _CIFS_TRACE_H */
 
 #undef TRACE_INCLUDE_PATH
index 1b594307c9d5a01e0b9d62b47b331dbd31c6dfaf..202ff912815604bd7b22700fc5ff598dd0a769ac 100644 (file)
@@ -711,7 +711,7 @@ struct smb2_close_rsp {
        __le16 StructureSize; /* 60 */
        __le16 Flags;
        __le32 Reserved;
-       struct_group(network_open_info,
+       struct_group_attr(network_open_info, __packed,
                __le64 CreationTime;
                __le64 LastAccessTime;
                __le64 LastWriteTime;
index 686b321c5a8bb5f0a1189023a3311e85aaf84c9e..f4e55199938d58023672fcd3006f7e97502fa6a5 100644 (file)
@@ -340,23 +340,24 @@ enum KSMBD_TREE_CONN_STATUS {
 /*
  * Share config flags.
  */
-#define KSMBD_SHARE_FLAG_INVALID               (0)
-#define KSMBD_SHARE_FLAG_AVAILABLE             BIT(0)
-#define KSMBD_SHARE_FLAG_BROWSEABLE            BIT(1)
-#define KSMBD_SHARE_FLAG_WRITEABLE             BIT(2)
-#define KSMBD_SHARE_FLAG_READONLY              BIT(3)
-#define KSMBD_SHARE_FLAG_GUEST_OK              BIT(4)
-#define KSMBD_SHARE_FLAG_GUEST_ONLY            BIT(5)
-#define KSMBD_SHARE_FLAG_STORE_DOS_ATTRS       BIT(6)
-#define KSMBD_SHARE_FLAG_OPLOCKS               BIT(7)
-#define KSMBD_SHARE_FLAG_PIPE                  BIT(8)
-#define KSMBD_SHARE_FLAG_HIDE_DOT_FILES                BIT(9)
-#define KSMBD_SHARE_FLAG_INHERIT_OWNER         BIT(10)
-#define KSMBD_SHARE_FLAG_STREAMS               BIT(11)
-#define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS       BIT(12)
-#define KSMBD_SHARE_FLAG_ACL_XATTR             BIT(13)
-#define KSMBD_SHARE_FLAG_UPDATE                        BIT(14)
-#define KSMBD_SHARE_FLAG_CROSSMNT              BIT(15)
+#define KSMBD_SHARE_FLAG_INVALID                       (0)
+#define KSMBD_SHARE_FLAG_AVAILABLE                     BIT(0)
+#define KSMBD_SHARE_FLAG_BROWSEABLE                    BIT(1)
+#define KSMBD_SHARE_FLAG_WRITEABLE                     BIT(2)
+#define KSMBD_SHARE_FLAG_READONLY                      BIT(3)
+#define KSMBD_SHARE_FLAG_GUEST_OK                      BIT(4)
+#define KSMBD_SHARE_FLAG_GUEST_ONLY                    BIT(5)
+#define KSMBD_SHARE_FLAG_STORE_DOS_ATTRS               BIT(6)
+#define KSMBD_SHARE_FLAG_OPLOCKS                       BIT(7)
+#define KSMBD_SHARE_FLAG_PIPE                          BIT(8)
+#define KSMBD_SHARE_FLAG_HIDE_DOT_FILES                        BIT(9)
+#define KSMBD_SHARE_FLAG_INHERIT_OWNER                 BIT(10)
+#define KSMBD_SHARE_FLAG_STREAMS                       BIT(11)
+#define KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS               BIT(12)
+#define KSMBD_SHARE_FLAG_ACL_XATTR                     BIT(13)
+#define KSMBD_SHARE_FLAG_UPDATE                                BIT(14)
+#define KSMBD_SHARE_FLAG_CROSSMNT                      BIT(15)
+#define KSMBD_SHARE_FLAG_CONTINUOUS_AVAILABILITY       BIT(16)
 
 /*
  * Tree connect request flags.
index c0788188aa82fa39211f0be694a13f925408580c..c67fbc8d6683ef957b2b39601031c0aa25d22ac7 100644 (file)
@@ -167,20 +167,17 @@ static void __handle_ksmbd_work(struct ksmbd_work *work,
        int rc;
        bool is_chained = false;
 
-       if (conn->ops->allocate_rsp_buf(work))
-               return;
-
        if (conn->ops->is_transform_hdr &&
            conn->ops->is_transform_hdr(work->request_buf)) {
                rc = conn->ops->decrypt_req(work);
-               if (rc < 0) {
-                       conn->ops->set_rsp_status(work, STATUS_DATA_ERROR);
-                       goto send;
-               }
-
+               if (rc < 0)
+                       return;
                work->encrypted = true;
        }
 
+       if (conn->ops->allocate_rsp_buf(work))
+               return;
+
        rc = conn->ops->init_rsp_hdr(work);
        if (rc) {
                /* either uid or tid is not correct */
index 5723bbf372d7cc93c9e1b2dbdd5082c2824f85f8..355824151c2d88194b7013c2ffeabc074fbe6b87 100644 (file)
@@ -535,6 +535,10 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
        if (cmd == SMB2_QUERY_INFO_HE) {
                struct smb2_query_info_req *req;
 
+               if (get_rfc1002_len(work->request_buf) <
+                   offsetof(struct smb2_query_info_req, OutputBufferLength))
+                       return -EINVAL;
+
                req = smb2_get_msg(work->request_buf);
                if ((req->InfoType == SMB2_O_INFO_FILE &&
                     (req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
@@ -1984,7 +1988,12 @@ int smb2_tree_connect(struct ksmbd_work *work)
        write_unlock(&sess->tree_conns_lock);
        rsp->StructureSize = cpu_to_le16(16);
 out_err1:
-       rsp->Capabilities = 0;
+       if (server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE &&
+           test_share_config_flag(share,
+                                  KSMBD_SHARE_FLAG_CONTINUOUS_AVAILABILITY))
+               rsp->Capabilities = SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY;
+       else
+               rsp->Capabilities = 0;
        rsp->Reserved = 0;
        /* default manual caching */
        rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING;
@@ -3498,7 +3507,9 @@ int smb2_open(struct ksmbd_work *work)
        memcpy(fp->client_guid, conn->ClientGUID, SMB2_CLIENT_GUID_SIZE);
 
        if (dh_info.type == DURABLE_REQ_V2 || dh_info.type == DURABLE_REQ) {
-               if (dh_info.type == DURABLE_REQ_V2 && dh_info.persistent)
+               if (dh_info.type == DURABLE_REQ_V2 && dh_info.persistent &&
+                   test_share_config_flag(work->tcon->share_conf,
+                                          KSMBD_SHARE_FLAG_CONTINUOUS_AVAILABILITY))
                        fp->is_persistent = true;
                else
                        fp->is_durable = true;
index 22f0f3db3ac92df2447e6b62646447d5ca1895a0..51b1b0bed616eea98a19e5e470f6aa929c8884b5 100644 (file)
@@ -754,10 +754,15 @@ retry:
                goto out4;
        }
 
+       /*
+        * explicitly handle file overwrite case, for compatibility with
+        * filesystems that may not support rename flags (e.g: fuse)
+        */
        if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) {
                err = -EEXIST;
                goto out4;
        }
+       flags &= ~(RENAME_NOREPLACE);
 
        if (old_child == trap) {
                err = -EINVAL;
index 224645f17c333b2311573197a28b41701eb35f92..297231854ada51ebeb5a8976db22e0e4702e5adb 100644 (file)
@@ -607,6 +607,31 @@ static inline void eth_hw_addr_gen(struct net_device *dev, const u8 *base_addr,
        eth_hw_addr_set(dev, addr);
 }
 
+/**
+ * eth_skb_pkt_type - Assign packet type if destination address does not match
+ * @skb: Assigned a packet type if address does not match @dev address
+ * @dev: Network device used to compare packet address against
+ *
+ * If the destination MAC address of the packet does not match the network
+ * device address, assign an appropriate packet type.
+ */
+static inline void eth_skb_pkt_type(struct sk_buff *skb,
+                                   const struct net_device *dev)
+{
+       const struct ethhdr *eth = eth_hdr(skb);
+
+       if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) {
+               if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
+                       if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
+                               skb->pkt_type = PACKET_BROADCAST;
+                       else
+                               skb->pkt_type = PACKET_MULTICAST;
+               } else {
+                       skb->pkt_type = PACKET_OTHERHOST;
+               }
+       }
+}
+
 /**
  * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
  * @skb: Buffer to pad
index 24cd199dd6f3a972f44344eb3994334201145e63..d33bab33099ab0fe4db1a889117307510e1035fa 100644 (file)
@@ -210,7 +210,6 @@ struct svc_rdma_recv_ctxt {
  */
 struct svc_rdma_write_info {
        struct svcxprt_rdma     *wi_rdma;
-       struct list_head        wi_list;
 
        const struct svc_rdma_chunk     *wi_chunk;
 
@@ -239,10 +238,7 @@ struct svc_rdma_send_ctxt {
        struct ib_cqe           sc_cqe;
        struct xdr_buf          sc_hdrbuf;
        struct xdr_stream       sc_stream;
-
-       struct list_head        sc_write_info_list;
        struct svc_rdma_write_info sc_reply_info;
-
        void                    *sc_xprt_buf;
        int                     sc_page_count;
        int                     sc_cur_sge_no;
@@ -274,14 +270,11 @@ extern void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
 extern void svc_rdma_cc_release(struct svcxprt_rdma *rdma,
                                struct svc_rdma_chunk_ctxt *cc,
                                enum dma_data_direction dir);
-extern void svc_rdma_write_chunk_release(struct svcxprt_rdma *rdma,
-                                        struct svc_rdma_send_ctxt *ctxt);
 extern void svc_rdma_reply_chunk_release(struct svcxprt_rdma *rdma,
                                         struct svc_rdma_send_ctxt *ctxt);
-extern int svc_rdma_prepare_write_list(struct svcxprt_rdma *rdma,
-                                      const struct svc_rdma_pcl *write_pcl,
-                                      struct svc_rdma_send_ctxt *sctxt,
-                                      const struct xdr_buf *xdr);
+extern int svc_rdma_send_write_list(struct svcxprt_rdma *rdma,
+                                   const struct svc_rdma_recv_ctxt *rctxt,
+                                   const struct xdr_buf *xdr);
 extern int svc_rdma_prepare_reply_chunk(struct svcxprt_rdma *rdma,
                                        const struct svc_rdma_pcl *write_pcl,
                                        const struct svc_rdma_pcl *reply_pcl,
index 627ea8e2d915984091944cf114c4d46d4631f6f4..3dee0b2721aa402ce020fcbda72250980afc52ad 100644 (file)
@@ -85,6 +85,9 @@ enum unix_socket_lock_class {
        U_LOCK_NORMAL,
        U_LOCK_SECOND,  /* for double locking, see unix_state_double_lock(). */
        U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
+       U_LOCK_GC_LISTENER, /* used for listening socket while determining gc
+                            * candidates to close a small race window.
+                            */
 };
 
 static inline void unix_state_lock_nested(struct sock *sk,
index 56fb42df44a3331f3665499f5e81cdcdf2c2f64d..e8f581f3f3ce6d4fe3d0d6db8c8a9688daf4addb 100644 (file)
@@ -738,6 +738,8 @@ struct hci_conn {
        __u8            le_per_adv_data[HCI_MAX_PER_AD_TOT_LEN];
        __u16           le_per_adv_data_len;
        __u16           le_per_adv_data_offset;
+       __u8            le_adv_phy;
+       __u8            le_adv_sec_phy;
        __u8            le_tx_phy;
        __u8            le_rx_phy;
        __s8            rssi;
@@ -1512,7 +1514,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
                                     enum conn_reasons conn_reason);
 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                                u8 dst_type, bool dst_resolved, u8 sec_level,
-                               u16 conn_timeout, u8 role);
+                               u16 conn_timeout, u8 role, u8 phy, u8 sec_phy);
 void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status);
 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
                                 u8 sec_level, u8 auth_type,
@@ -1905,6 +1907,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
 #define privacy_mode_capable(dev) (use_ll_privacy(dev) && \
                                   (hdev->commands[39] & 0x04))
 
+#define read_key_size_capable(dev) \
+       ((dev)->commands[20] & 0x10 && \
+        !test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks))
+
 /* Use enhanced synchronous connection if command is supported and its quirk
  * has not been set.
  */
index 353488ab94a294fd1fb2a985d015f9c8b59d421d..2d7f87bc5324b4823a8f70b960f0545bab8549c4 100644 (file)
@@ -953,6 +953,8 @@ enum mac80211_tx_info_flags {
  *     of their QoS TID or other priority field values.
  * @IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX: first MLO TX, used mostly internally
  *     for sequence number assignment
+ * @IEEE80211_TX_CTRL_SCAN_TX: Indicates that this frame is transmitted
+ *     due to scanning, not in normal operation on the interface.
  * @IEEE80211_TX_CTRL_MLO_LINK: If not @IEEE80211_LINK_UNSPECIFIED, this
  *     frame should be transmitted on the specific link. This really is
  *     only relevant for frames that do not have data present, and is
@@ -973,6 +975,7 @@ enum mac80211_tx_control_flags {
        IEEE80211_TX_CTRL_NO_SEQNO              = BIT(7),
        IEEE80211_TX_CTRL_DONT_REORDER          = BIT(8),
        IEEE80211_TX_CTRL_MCAST_MLO_FIRST_TX    = BIT(9),
+       IEEE80211_TX_CTRL_SCAN_TX               = BIT(10),
        IEEE80211_TX_CTRL_MLO_LINK              = 0xf0000000,
 };
 
index dbd22180cc5c3418cfa47261639d3ab7bbf55bee..de216cbc6b059fba9f795f83c6be43f5169c8649 100644 (file)
@@ -321,6 +321,7 @@ struct macsec_context {
  *     for the TX tag
  * @needed_tailroom: number of bytes reserved at the end of the sk_buff for the
  *     TX tag
+ * @rx_uses_md_dst: whether MACsec device offload supports sk_buff md_dst
  */
 struct macsec_ops {
        /* Device wide */
@@ -352,6 +353,7 @@ struct macsec_ops {
                                 struct sk_buff *skb);
        unsigned int needed_headroom;
        unsigned int needed_tailroom;
+       bool rx_uses_md_dst;
 };
 
 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
index f57bfd8a2ad2deaedf3f351325ab9336ae040504..b4b553df7870c0290ae632c51828ad7161ba332d 100644 (file)
@@ -1410,32 +1410,34 @@ sk_memory_allocated(const struct sock *sk)
 #define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
 extern int sysctl_mem_pcpu_rsv;
 
+static inline void proto_memory_pcpu_drain(struct proto *proto)
+{
+       int val = this_cpu_xchg(*proto->per_cpu_fw_alloc, 0);
+
+       if (val)
+               atomic_long_add(val, proto->memory_allocated);
+}
+
 static inline void
-sk_memory_allocated_add(struct sock *sk, int amt)
+sk_memory_allocated_add(const struct sock *sk, int val)
 {
-       int local_reserve;
+       struct proto *proto = sk->sk_prot;
 
-       preempt_disable();
-       local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
-       if (local_reserve >= READ_ONCE(sysctl_mem_pcpu_rsv)) {
-               __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
-               atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
-       }
-       preempt_enable();
+       val = this_cpu_add_return(*proto->per_cpu_fw_alloc, val);
+
+       if (unlikely(val >= READ_ONCE(sysctl_mem_pcpu_rsv)))
+               proto_memory_pcpu_drain(proto);
 }
 
 static inline void
-sk_memory_allocated_sub(struct sock *sk, int amt)
+sk_memory_allocated_sub(const struct sock *sk, int val)
 {
-       int local_reserve;
+       struct proto *proto = sk->sk_prot;
 
-       preempt_disable();
-       local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
-       if (local_reserve <= -READ_ONCE(sysctl_mem_pcpu_rsv)) {
-               __this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
-               atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
-       }
-       preempt_enable();
+       val = this_cpu_sub_return(*proto->per_cpu_fw_alloc, val);
+
+       if (unlikely(val <= -READ_ONCE(sysctl_mem_pcpu_rsv)))
+               proto_memory_pcpu_drain(proto);
 }
 
 #define SK_ALLOC_PERCPU_COUNTER_BATCH 16
index 340ad43971e4711d8091a6397bb5cf3c3c4ef0fd..33f657d3c0510a0bd9e9899bc2530f9d035ce366 100644 (file)
@@ -111,7 +111,8 @@ struct tls_strparser {
        u32 stopped : 1;
        u32 copy_mode : 1;
        u32 mixed_decrypted : 1;
-       u32 msg_ready : 1;
+
+       bool msg_ready;
 
        struct strp_msg stm;
 
index d87410a8443aaeadd86966e86a8bec59c8fd9f87..af024d90453ddc5376892b13efd8e6c51043e97e 100644 (file)
@@ -77,11 +77,6 @@ struct drm_etnaviv_timespec {
 #define ETNAVIV_PARAM_GPU_PRODUCT_ID                0x1c
 #define ETNAVIV_PARAM_GPU_CUSTOMER_ID               0x1d
 #define ETNAVIV_PARAM_GPU_ECO_ID                    0x1e
-#define ETNAVIV_PARAM_GPU_NN_CORE_COUNT             0x1f
-#define ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE           0x20
-#define ETNAVIV_PARAM_GPU_TP_CORE_COUNT             0x21
-#define ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE         0x22
-#define ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE             0x23
 
 #define ETNA_MAX_PIPES 4
 
index 43c51698195ceb0619e5b2787428be675b007a7f..842bf1201ac4142813d2ea3a90e8f34e20ea5fd5 100644 (file)
@@ -57,7 +57,7 @@ enum vdpa_attr {
        VDPA_ATTR_DEV_FEATURES,                 /* u64 */
 
        VDPA_ATTR_DEV_BLK_CFG_CAPACITY,         /* u64 */
-       VDPA_ATTR_DEV_BLK_CFG_SEG_SIZE,         /* u32 */
+       VDPA_ATTR_DEV_BLK_CFG_SIZE_MAX,         /* u32 */
        VDPA_ATTR_DEV_BLK_CFG_BLK_SIZE,         /* u32 */
        VDPA_ATTR_DEV_BLK_CFG_SEG_MAX,          /* u32 */
        VDPA_ATTR_DEV_BLK_CFG_NUM_QUEUES,       /* u16 */
@@ -70,8 +70,8 @@ enum vdpa_attr {
        VDPA_ATTR_DEV_BLK_CFG_DISCARD_SEC_ALIGN,/* u32 */
        VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEC,     /* u32 */
        VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEG,     /* u32 */
-       VDPA_ATTR_DEV_BLK_CFG_READ_ONLY,                /* u8 */
-       VDPA_ATTR_DEV_BLK_CFG_FLUSH,            /* u8 */
+       VDPA_ATTR_DEV_BLK_READ_ONLY,            /* u8 */
+       VDPA_ATTR_DEV_BLK_FLUSH,                /* u8 */
 
        /* new attributes must be added above here */
        VDPA_ATTR_MAX,
index 558e158c98d01075b7614b754a256124c3700a84..9169efb2f43aa9151131410496d3de24af1f1ccd 100644 (file)
@@ -103,7 +103,7 @@ again:
                        s->ax25_dev = NULL;
                        if (sk->sk_socket) {
                                netdev_put(ax25_dev->dev,
-                                          &ax25_dev->dev_tracker);
+                                          &s->dev_tracker);
                                ax25_dev_put(ax25_dev);
                        }
                        ax25_cb_del(s);
index 3ad74f76983b2426ffda03ac038daea0ea34662a..05346250f7195be3e01b11a0a671193f30316c5e 100644 (file)
@@ -1263,7 +1263,7 @@ u8 hci_conn_set_handle(struct hci_conn *conn, u16 handle)
 
 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
                                u8 dst_type, bool dst_resolved, u8 sec_level,
-                               u16 conn_timeout, u8 role)
+                               u16 conn_timeout, u8 role, u8 phy, u8 sec_phy)
 {
        struct hci_conn *conn;
        struct smp_irk *irk;
@@ -1326,6 +1326,8 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
        conn->dst_type = dst_type;
        conn->sec_level = BT_SECURITY_LOW;
        conn->conn_timeout = conn_timeout;
+       conn->le_adv_phy = phy;
+       conn->le_adv_sec_phy = sec_phy;
 
        err = hci_connect_le_sync(hdev, conn);
        if (err) {
@@ -2273,7 +2275,7 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
                le = hci_connect_le(hdev, dst, dst_type, false,
                                    BT_SECURITY_LOW,
                                    HCI_LE_CONN_TIMEOUT,
-                                   HCI_ROLE_SLAVE);
+                                   HCI_ROLE_SLAVE, 0, 0);
        else
                le = hci_connect_le_scan(hdev, dst, dst_type,
                                         BT_SECURITY_LOW,
index a8b8cfebe0180cce2fb661e8e5f21a79bf7a7656..4a27e4a17a67449ffd8a37cb057357e20881667c 100644 (file)
@@ -3218,7 +3218,7 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, void *data,
                        if (key) {
                                set_bit(HCI_CONN_ENCRYPT, &conn->flags);
 
-                               if (!(hdev->commands[20] & 0x10)) {
+                               if (!read_key_size_capable(hdev)) {
                                        conn->enc_key_size = HCI_LINK_KEY_SIZE;
                                } else {
                                        cp.handle = cpu_to_le16(conn->handle);
@@ -3666,8 +3666,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,
                 * controller really supports it. If it doesn't, assume
                 * the default size (16).
                 */
-               if (!(hdev->commands[20] & 0x10) ||
-                   test_bit(HCI_QUIRK_BROKEN_READ_ENC_KEY_SIZE, &hdev->quirks)) {
+               if (!read_key_size_capable(hdev)) {
                        conn->enc_key_size = HCI_LINK_KEY_SIZE;
                        goto notify;
                }
@@ -6038,7 +6037,7 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data,
 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
                                              bdaddr_t *addr,
                                              u8 addr_type, bool addr_resolved,
-                                             u8 adv_type)
+                                             u8 adv_type, u8 phy, u8 sec_phy)
 {
        struct hci_conn *conn;
        struct hci_conn_params *params;
@@ -6093,7 +6092,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
 
        conn = hci_connect_le(hdev, addr, addr_type, addr_resolved,
                              BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout,
-                             HCI_ROLE_MASTER);
+                             HCI_ROLE_MASTER, phy, sec_phy);
        if (!IS_ERR(conn)) {
                /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
                 * by higher layer that tried to connect, if no then
@@ -6128,8 +6127,9 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
 
 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
                               u8 bdaddr_type, bdaddr_t *direct_addr,
-                              u8 direct_addr_type, s8 rssi, u8 *data, u8 len,
-                              bool ext_adv, bool ctl_time, u64 instant)
+                              u8 direct_addr_type, u8 phy, u8 sec_phy, s8 rssi,
+                              u8 *data, u8 len, bool ext_adv, bool ctl_time,
+                              u64 instant)
 {
        struct discovery_state *d = &hdev->discovery;
        struct smp_irk *irk;
@@ -6217,7 +6217,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
         * for advertising reports) and is already verified to be RPA above.
         */
        conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved,
-                                    type);
+                                    type, phy, sec_phy);
        if (!ext_adv && conn && type == LE_ADV_IND &&
            len <= max_adv_len(hdev)) {
                /* Store report for later inclusion by
@@ -6363,7 +6363,8 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,
                if (info->length <= max_adv_len(hdev)) {
                        rssi = info->data[info->length];
                        process_adv_report(hdev, info->type, &info->bdaddr,
-                                          info->bdaddr_type, NULL, 0, rssi,
+                                          info->bdaddr_type, NULL, 0,
+                                          HCI_ADV_PHY_1M, 0, rssi,
                                           info->data, info->length, false,
                                           false, instant);
                } else {
@@ -6448,6 +6449,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,
                if (legacy_evt_type != LE_ADV_INVALID) {
                        process_adv_report(hdev, legacy_evt_type, &info->bdaddr,
                                           info->bdaddr_type, NULL, 0,
+                                          info->primary_phy,
+                                          info->secondary_phy,
                                           info->rssi, info->data, info->length,
                                           !(evt_type & LE_EXT_ADV_LEGACY_PDU),
                                           false, instant);
@@ -6730,8 +6733,8 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,
 
                process_adv_report(hdev, info->type, &info->bdaddr,
                                   info->bdaddr_type, &info->direct_addr,
-                                  info->direct_addr_type, info->rssi, NULL, 0,
-                                  false, false, instant);
+                                  info->direct_addr_type, HCI_ADV_PHY_1M, 0,
+                                  info->rssi, NULL, 0, false, false, instant);
        }
 
        hci_dev_unlock(hdev);
index c5d8799046ccffbf798e6f47ffaef3dddcb364ca..4c707eb64e6f63d8e2ea85a7ac12a31060dcf7e4 100644 (file)
@@ -6346,7 +6346,8 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
 
        plen = sizeof(*cp);
 
-       if (scan_1m(hdev)) {
+       if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M ||
+                             conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) {
                cp->phys |= LE_SCAN_PHY_1M;
                set_ext_conn_params(conn, p);
 
@@ -6354,7 +6355,8 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
                plen += sizeof(*p);
        }
 
-       if (scan_2m(hdev)) {
+       if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M ||
+                             conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) {
                cp->phys |= LE_SCAN_PHY_2M;
                set_ext_conn_params(conn, p);
 
@@ -6362,7 +6364,8 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
                plen += sizeof(*p);
        }
 
-       if (scan_coded(hdev)) {
+       if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED ||
+                                conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) {
                cp->phys |= LE_SCAN_PHY_CODED;
                set_ext_conn_params(conn, p);
 
index dc089740879363dd0d6d973dcdb4fc05cfc7070a..84fc70862d78aeef25d6ca9e6df7fb468338852e 100644 (file)
@@ -7018,7 +7018,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
                if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
                        hcon = hci_connect_le(hdev, dst, dst_type, false,
                                              chan->sec_level, timeout,
-                                             HCI_ROLE_SLAVE);
+                                             HCI_ROLE_SLAVE, 0, 0);
                else
                        hcon = hci_connect_le_scan(hdev, dst, dst_type,
                                                   chan->sec_level, timeout,
index e7d810b23082f5ffd8ea4b506366b2684f2e1ece..5cc83f906c123ffa7349d26a41c310005920aca5 100644 (file)
@@ -439,7 +439,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
        struct l2cap_chan *chan = l2cap_pi(sk)->chan;
        struct l2cap_options opts;
        struct l2cap_conninfo cinfo;
-       int len, err = 0;
+       int err = 0;
+       size_t len;
        u32 opt;
 
        BT_DBG("sk %p", sk);
@@ -486,7 +487,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
 
                BT_DBG("mode 0x%2.2x", chan->mode);
 
-               len = min_t(unsigned int, len, sizeof(opts));
+               len = min(len, sizeof(opts));
                if (copy_to_user(optval, (char *) &opts, len))
                        err = -EFAULT;
 
@@ -536,7 +537,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
                cinfo.hci_handle = chan->conn->hcon->handle;
                memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
 
-               len = min_t(unsigned int, len, sizeof(cinfo));
+               len = min(len, sizeof(cinfo));
                if (copy_to_user(optval, (char *) &cinfo, len))
                        err = -EFAULT;
 
index 32ed6e9245a307483e69ccb1cb1dd8c30c023130..965f621ef865adb607a6ccf71f6b2e7429a10a99 100644 (file)
@@ -2623,7 +2623,11 @@ static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
                goto failed;
        }
 
-       err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
+       /* MGMT_OP_ADD_UUID don't require adapter the UP/Running so use
+        * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
+        */
+       err = hci_cmd_sync_submit(hdev, add_uuid_sync, cmd,
+                                 mgmt_class_complete);
        if (err < 0) {
                mgmt_pending_free(cmd);
                goto failed;
@@ -2717,8 +2721,11 @@ update_class:
                goto unlock;
        }
 
-       err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
-                                mgmt_class_complete);
+       /* MGMT_OP_REMOVE_UUID don't require adapter the UP/Running so use
+        * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
+        */
+       err = hci_cmd_sync_submit(hdev, remove_uuid_sync, cmd,
+                                 mgmt_class_complete);
        if (err < 0)
                mgmt_pending_free(cmd);
 
@@ -2784,8 +2791,11 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
                goto unlock;
        }
 
-       err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
-                                mgmt_class_complete);
+       /* MGMT_OP_SET_DEV_CLASS don't require adapter the UP/Running so use
+        * hci_cmd_sync_submit instead of hci_cmd_sync_queue.
+        */
+       err = hci_cmd_sync_submit(hdev, set_class_sync, cmd,
+                                 mgmt_class_complete);
        if (err < 0)
                mgmt_pending_free(cmd);
 
@@ -5475,8 +5485,8 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
                goto unlock;
        }
 
-       err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd,
-                                mgmt_remove_adv_monitor_complete);
+       err = hci_cmd_sync_submit(hdev, mgmt_remove_adv_monitor_sync, cmd,
+                                 mgmt_remove_adv_monitor_complete);
 
        if (err) {
                mgmt_pending_remove(cmd);
index 368e026f4d15ca4711737af941ad30c7b48b827f..5d03c5440b06f843e654ddb0e3d3f83d4dd0cfd9 100644 (file)
@@ -964,7 +964,8 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
        struct sock *sk = sock->sk;
        struct sco_options opts;
        struct sco_conninfo cinfo;
-       int len, err = 0;
+       int err = 0;
+       size_t len;
 
        BT_DBG("sk %p", sk);
 
@@ -986,7 +987,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
 
                BT_DBG("mtu %u", opts.mtu);
 
-               len = min_t(unsigned int, len, sizeof(opts));
+               len = min(len, sizeof(opts));
                if (copy_to_user(optval, (char *)&opts, len))
                        err = -EFAULT;
 
@@ -1004,7 +1005,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
                cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
                memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
 
-               len = min_t(unsigned int, len, sizeof(cinfo));
+               len = min(len, sizeof(cinfo));
                if (copy_to_user(optval, (char *)&cinfo, len))
                        err = -EFAULT;
 
index 2cf4fc756263992eefe6a3580410766fea0c2c1f..f17dbac7d82843091f9131acc68a5a9132fa2eda 100644 (file)
@@ -667,7 +667,7 @@ void br_ifinfo_notify(int event, const struct net_bridge *br,
 {
        u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
 
-       return br_info_notify(event, br, port, filter);
+       br_info_notify(event, br, port, filter);
 }
 
 /*
index 2edc8b796a4e7326aa44128a0618e15b9aa817de..049c3adeb85044ac78e5adf7dcfb389d21e75652 100644 (file)
@@ -164,17 +164,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
        eth = (struct ethhdr *)skb->data;
        skb_pull_inline(skb, ETH_HLEN);
 
-       if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
-                                             dev->dev_addr))) {
-               if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
-                       if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
-                               skb->pkt_type = PACKET_BROADCAST;
-                       else
-                               skb->pkt_type = PACKET_MULTICAST;
-               } else {
-                       skb->pkt_type = PACKET_OTHERHOST;
-               }
-       }
+       eth_skb_pkt_type(skb, dev);
 
        /*
         * Some variants of DSA tagging don't have an ethertype field
index e63a3bf99617627e17669f9b3aaee1cbbf178ebf..437e782b9663bb59acb900c0558137ddd401cd02 100644 (file)
@@ -92,6 +92,7 @@
 #include <net/inet_common.h>
 #include <net/ip_fib.h>
 #include <net/l3mdev.h>
+#include <net/addrconf.h>
 
 /*
  *     Build xmit assembly blocks
@@ -1032,6 +1033,8 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
        struct icmp_ext_hdr *ext_hdr, _ext_hdr;
        struct icmp_ext_echo_iio *iio, _iio;
        struct net *net = dev_net(skb->dev);
+       struct inet6_dev *in6_dev;
+       struct in_device *in_dev;
        struct net_device *dev;
        char buff[IFNAMSIZ];
        u16 ident_len;
@@ -1115,10 +1118,15 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)
        /* Fill bits in reply message */
        if (dev->flags & IFF_UP)
                status |= ICMP_EXT_ECHOREPLY_ACTIVE;
-       if (__in_dev_get_rcu(dev) && __in_dev_get_rcu(dev)->ifa_list)
+
+       in_dev = __in_dev_get_rcu(dev);
+       if (in_dev && rcu_access_pointer(in_dev->ifa_list))
                status |= ICMP_EXT_ECHOREPLY_IPV4;
-       if (!list_empty(&rcu_dereference(dev->ip6_ptr)->addr_list))
+
+       in6_dev = __in6_dev_get(dev);
+       if (in6_dev && !list_empty(&in6_dev->addr_list))
                status |= ICMP_EXT_ECHOREPLY_IPV6;
+
        dev_put(dev);
        icmphdr->un.echo.sequence |= htons(status);
        return true;
index d36ace160d426f6224f8e692f3b438ae863bb9b9..b814fdab19f710d066d323970be6ce57a3b583c5 100644 (file)
@@ -2166,6 +2166,9 @@ int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        int err = -EINVAL;
        u32 tag = 0;
 
+       if (!in_dev)
+               return -EINVAL;
+
        if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
                goto martian_source;
 
index 3afeeb68e8a7e2a30ce9c4d92dcc8b150314b669..781b67a525719a42f21b713eb424427670d7afb2 100644 (file)
@@ -1068,6 +1068,7 @@ void tcp_ao_connect_init(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct tcp_ao_info *ao_info;
+       struct hlist_node *next;
        union tcp_ao_addr *addr;
        struct tcp_ao_key *key;
        int family, l3index;
@@ -1090,7 +1091,7 @@ void tcp_ao_connect_init(struct sock *sk)
        l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
                                                 sk->sk_bound_dev_if);
 
-       hlist_for_each_entry_rcu(key, &ao_info->head, node) {
+       hlist_for_each_entry_safe(key, next, &ao_info->head, node) {
                if (!tcp_ao_key_cmp(key, l3index, addr, key->prefixlen, family, -1, -1))
                        continue;
 
index c02bf011d4a6f487b2c69e48e5032068eed3debc..420905be5f30c944ff360b349ae29d66104e0286 100644 (file)
@@ -1123,16 +1123,17 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
        if (msg->msg_controllen) {
                err = udp_cmsg_send(sk, msg, &ipc.gso_size);
-               if (err > 0)
+               if (err > 0) {
                        err = ip_cmsg_send(sk, msg, &ipc,
                                           sk->sk_family == AF_INET6);
+                       connected = 0;
+               }
                if (unlikely(err < 0)) {
                        kfree(ipc.opt);
                        return err;
                }
                if (ipc.opt)
                        free = 1;
-               connected = 0;
        }
        if (!ipc.opt) {
                struct ip_options_rcu *inet_opt;
index 8b1dd7f512491d806e4d0a9fc5297a255dafd5a4..1a4cccdd40c9ca44675cea5f5c2a08724ccb2d75 100644 (file)
@@ -1474,9 +1474,11 @@ do_udp_sendmsg:
                ipc6.opt = opt;
 
                err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
-               if (err > 0)
+               if (err > 0) {
                        err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, fl6,
                                                    &ipc6);
+                       connected = false;
+               }
                if (err < 0) {
                        fl6_sock_release(flowlabel);
                        return err;
@@ -1488,7 +1490,6 @@ do_udp_sendmsg:
                }
                if (!(opt->opt_nflen|opt->opt_flen))
                        opt = NULL;
-               connected = false;
        }
        if (!opt) {
                opt = txopt_get(np);
index 80e4b9784131d149c6acf27f79be0b0c16edec85..ccacaed32817aed59240034f85753c47a353501a 100644 (file)
@@ -797,6 +797,7 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_chanctx_conf *conf;
        struct ieee80211_chanctx *curr_ctx = NULL;
+       bool new_idle;
        int ret = 0;
 
        if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_NAN))
@@ -829,8 +830,6 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
 out:
        rcu_assign_pointer(link->conf->chanctx_conf, conf);
 
-       sdata->vif.cfg.idle = !conf;
-
        if (curr_ctx && ieee80211_chanctx_num_assigned(local, curr_ctx) > 0) {
                ieee80211_recalc_chanctx_chantype(local, curr_ctx);
                ieee80211_recalc_smps_chanctx(local, curr_ctx);
@@ -843,9 +842,27 @@ out:
                ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL);
        }
 
-       if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
-           sdata->vif.type != NL80211_IFTYPE_MONITOR)
-               ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_IDLE);
+       if (conf) {
+               new_idle = false;
+       } else {
+               struct ieee80211_link_data *tmp;
+
+               new_idle = true;
+               for_each_sdata_link(local, tmp) {
+                       if (rcu_access_pointer(tmp->conf->chanctx_conf)) {
+                               new_idle = false;
+                               break;
+                       }
+               }
+       }
+
+       if (new_idle != sdata->vif.cfg.idle) {
+               sdata->vif.cfg.idle = new_idle;
+
+               if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+                   sdata->vif.type != NL80211_IFTYPE_MONITOR)
+                       ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_IDLE);
+       }
 
        ieee80211_check_fast_xmit_iface(sdata);
 
index 32475da98d739cbe66d200f6bd8e8b0542f3cb04..cbc9b5e40cb35e81fb80dd55016c3afc8c31deb7 100644 (file)
@@ -747,6 +747,9 @@ bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
                              struct sk_buff *skb, u32 ctrl_flags)
 {
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+       struct ieee80211_mesh_fast_tx_key key = {
+               .type = MESH_FAST_TX_TYPE_LOCAL
+       };
        struct ieee80211_mesh_fast_tx *entry;
        struct ieee80211s_hdr *meshhdr;
        u8 sa[ETH_ALEN] __aligned(2);
@@ -782,7 +785,10 @@ bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
                        return false;
        }
 
-       entry = mesh_fast_tx_get(sdata, skb->data);
+       ether_addr_copy(key.addr, skb->data);
+       if (!ether_addr_equal(skb->data + ETH_ALEN, sdata->vif.addr))
+               key.type = MESH_FAST_TX_TYPE_PROXIED;
+       entry = mesh_fast_tx_get(sdata, &key);
        if (!entry)
                return false;
 
index d913ce7ba72ef897fb6857f55df3fb20bc871b5e..3f9664e4e00c6c2e51faaa43d1a23860a6d22e4c 100644 (file)
@@ -134,10 +134,39 @@ struct mesh_path {
 #define MESH_FAST_TX_CACHE_THRESHOLD_SIZE      384
 #define MESH_FAST_TX_CACHE_TIMEOUT             8000 /* msecs */
 
+/**
+ * enum ieee80211_mesh_fast_tx_type - cached mesh fast tx entry type
+ *
+ * @MESH_FAST_TX_TYPE_LOCAL: tx from the local vif address as SA
+ * @MESH_FAST_TX_TYPE_PROXIED: local tx with a different SA (e.g. bridged)
+ * @MESH_FAST_TX_TYPE_FORWARDED: forwarded from a different mesh point
+ * @NUM_MESH_FAST_TX_TYPE: number of entry types
+ */
+enum ieee80211_mesh_fast_tx_type {
+       MESH_FAST_TX_TYPE_LOCAL,
+       MESH_FAST_TX_TYPE_PROXIED,
+       MESH_FAST_TX_TYPE_FORWARDED,
+
+       /* must be last */
+       NUM_MESH_FAST_TX_TYPE
+};
+
+
+/**
+ * struct ieee80211_mesh_fast_tx_key - cached mesh fast tx entry key
+ *
+ * @addr: The Ethernet DA for this entry
+ * @type: cache entry type
+ */
+struct ieee80211_mesh_fast_tx_key {
+       u8 addr[ETH_ALEN] __aligned(2);
+       u16 type;
+};
+
 /**
  * struct ieee80211_mesh_fast_tx - cached mesh fast tx entry
  * @rhash: rhashtable pointer
- * @addr_key: The Ethernet DA which is the key for this entry
+ * @key: the lookup key for this cache entry
  * @fast_tx: base fast_tx data
  * @hdr: cached mesh and rfc1042 headers
  * @hdrlen: length of mesh + rfc1042
@@ -148,7 +177,7 @@ struct mesh_path {
  */
 struct ieee80211_mesh_fast_tx {
        struct rhash_head rhash;
-       u8 addr_key[ETH_ALEN] __aligned(2);
+       struct ieee80211_mesh_fast_tx_key key;
 
        struct ieee80211_fast_tx fast_tx;
        u8 hdr[sizeof(struct ieee80211s_hdr) + sizeof(rfc1042_header)];
@@ -334,7 +363,8 @@ void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
 
 bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
 struct ieee80211_mesh_fast_tx *
-mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, const u8 *addr);
+mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata,
+                struct ieee80211_mesh_fast_tx_key *key);
 bool ieee80211_mesh_xmit_fast(struct ieee80211_sub_if_data *sdata,
                              struct sk_buff *skb, u32 ctrl_flags);
 void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
index 91b55d6a68b9739f9d22786bf403c801fc34d864..a6b62169f08483c5aa481f4f8f59f67fa56a4ef7 100644 (file)
@@ -37,8 +37,8 @@ static const struct rhashtable_params mesh_rht_params = {
 static const struct rhashtable_params fast_tx_rht_params = {
        .nelem_hint = 10,
        .automatic_shrinking = true,
-       .key_len = ETH_ALEN,
-       .key_offset = offsetof(struct ieee80211_mesh_fast_tx, addr_key),
+       .key_len = sizeof_field(struct ieee80211_mesh_fast_tx, key),
+       .key_offset = offsetof(struct ieee80211_mesh_fast_tx, key),
        .head_offset = offsetof(struct ieee80211_mesh_fast_tx, rhash),
        .hashfn = mesh_table_hash,
 };
@@ -431,20 +431,21 @@ static void mesh_fast_tx_entry_free(struct mesh_tx_cache *cache,
 }
 
 struct ieee80211_mesh_fast_tx *
-mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata, const u8 *addr)
+mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata,
+                struct ieee80211_mesh_fast_tx_key *key)
 {
        struct ieee80211_mesh_fast_tx *entry;
        struct mesh_tx_cache *cache;
 
        cache = &sdata->u.mesh.tx_cache;
-       entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
+       entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
        if (!entry)
                return NULL;
 
        if (!(entry->mpath->flags & MESH_PATH_ACTIVE) ||
            mpath_expired(entry->mpath)) {
                spin_lock_bh(&cache->walk_lock);
-               entry = rhashtable_lookup(&cache->rht, addr, fast_tx_rht_params);
+               entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
                if (entry)
                    mesh_fast_tx_entry_free(cache, entry);
                spin_unlock_bh(&cache->walk_lock);
@@ -489,18 +490,24 @@ void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
        if (!sta)
                return;
 
+       build.key.type = MESH_FAST_TX_TYPE_LOCAL;
        if ((meshhdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) {
                /* This is required to keep the mppath alive */
                mppath = mpp_path_lookup(sdata, meshhdr->eaddr1);
                if (!mppath)
                        return;
                build.mppath = mppath;
+               if (!ether_addr_equal(meshhdr->eaddr2, sdata->vif.addr))
+                       build.key.type = MESH_FAST_TX_TYPE_PROXIED;
        } else if (ieee80211_has_a4(hdr->frame_control)) {
                mppath = mpath;
        } else {
                return;
        }
 
+       if (!ether_addr_equal(hdr->addr4, sdata->vif.addr))
+               build.key.type = MESH_FAST_TX_TYPE_FORWARDED;
+
        /* rate limit, in case fast xmit can't be enabled */
        if (mppath->fast_tx_check == jiffies)
                return;
@@ -547,7 +554,7 @@ void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
                }
        }
 
-       memcpy(build.addr_key, mppath->dst, ETH_ALEN);
+       memcpy(build.key.addr, mppath->dst, ETH_ALEN);
        build.timestamp = jiffies;
        build.fast_tx.band = info->band;
        build.fast_tx.da_offs = offsetof(struct ieee80211_hdr, addr3);
@@ -646,12 +653,18 @@ void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
                             const u8 *addr)
 {
        struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
+       struct ieee80211_mesh_fast_tx_key key = {};
        struct ieee80211_mesh_fast_tx *entry;
+       int i;
 
+       ether_addr_copy(key.addr, addr);
        spin_lock_bh(&cache->walk_lock);
-       entry = rhashtable_lookup_fast(&cache->rht, addr, fast_tx_rht_params);
-       if (entry)
-               mesh_fast_tx_entry_free(cache, entry);
+       for (i = 0; i < NUM_MESH_FAST_TX_TYPE; i++) {
+               key.type = i;
+               entry = rhashtable_lookup_fast(&cache->rht, &key, fast_tx_rht_params);
+               if (entry)
+                       mesh_fast_tx_entry_free(cache, entry);
+       }
        spin_unlock_bh(&cache->walk_lock);
 }
 
index 96b70006b7fc0b11b12f423fb74ec32a030d91af..3bbb216a0fc8ce58138420d13008b2240e260a77 100644 (file)
@@ -616,7 +616,6 @@ ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata,
                .from_ap = true,
                .start = ies->data,
                .len = ies->len,
-               .mode = conn->mode,
        };
        struct ieee802_11_elems *elems;
        struct ieee80211_supported_band *sband;
@@ -625,6 +624,7 @@ ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata,
        int ret;
 
 again:
+       parse_params.mode = conn->mode;
        elems = ieee802_11_parse_elems_full(&parse_params);
        if (!elems)
                return ERR_PTR(-ENOMEM);
@@ -632,15 +632,21 @@ again:
        ap_mode = ieee80211_determine_ap_chan(sdata, channel, bss->vht_cap_info,
                                              elems, false, conn, &ap_chandef);
 
-       mlme_link_id_dbg(sdata, link_id, "determined AP %pM to be %s\n",
-                        cbss->bssid, ieee80211_conn_mode_str(ap_mode));
-
        /* this should be impossible since parsing depends on our mode */
        if (WARN_ON(ap_mode > conn->mode)) {
                ret = -EINVAL;
                goto free;
        }
 
+       if (conn->mode != ap_mode) {
+               conn->mode = ap_mode;
+               kfree(elems);
+               goto again;
+       }
+
+       mlme_link_id_dbg(sdata, link_id, "determined AP %pM to be %s\n",
+                        cbss->bssid, ieee80211_conn_mode_str(ap_mode));
+
        sband = sdata->local->hw.wiphy->bands[channel->band];
 
        switch (channel->band) {
@@ -691,7 +697,6 @@ again:
                break;
        }
 
-       conn->mode = ap_mode;
        chanreq->oper = ap_chandef;
 
        /* wider-bandwidth OFDMA is only done in EHT */
@@ -753,8 +758,10 @@ again:
        }
 
        /* the mode can only decrease, so this must terminate */
-       if (ap_mode != conn->mode)
+       if (ap_mode != conn->mode) {
+               kfree(elems);
                goto again;
+       }
 
        mlme_link_id_dbg(sdata, link_id,
                         "connecting with %s mode, max bandwidth %d MHz\n",
@@ -5812,7 +5819,7 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
                 */
                if (control &
                    IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT)
-                       link_removal_timeout[link_id] = le16_to_cpu(*(__le16 *)pos);
+                       link_removal_timeout[link_id] = get_unaligned_le16(pos);
        }
 
        removed_links &= sdata->vif.valid_links;
@@ -5837,8 +5844,11 @@ static void ieee80211_ml_reconfiguration(struct ieee80211_sub_if_data *sdata,
                        continue;
                }
 
-               link_delay = link_conf->beacon_int *
-                       link_removal_timeout[link_id];
+               if (link_removal_timeout[link_id] < 1)
+                       link_delay = 0;
+               else
+                       link_delay = link_conf->beacon_int *
+                               (link_removal_timeout[link_id] - 1);
 
                if (!delay)
                        delay = link_delay;
@@ -6193,7 +6203,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link,
                        link->u.mgd.dtim_period = elems->dtim_period;
                link->u.mgd.have_beacon = true;
                ifmgd->assoc_data->need_beacon = false;
-               if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
+               if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) &&
+                   !ieee80211_is_s1g_beacon(hdr->frame_control)) {
                        link->conf->sync_tsf =
                                le64_to_cpu(mgmt->u.beacon.timestamp);
                        link->conf->sync_device_ts =
index 23404b275457a74868cd935653bc1a6192ec4cb5..4dc1def695486567b486fdada893557752f8df43 100644 (file)
@@ -877,6 +877,7 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
        struct ieee80211_sub_if_data *sdata;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_supported_band *sband;
+       u32 mask = ~0;
 
        rate_control_fill_sta_table(sta, info, dest, max_rates);
 
@@ -889,9 +890,12 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
        if (ieee80211_is_tx_data(skb))
                rate_control_apply_mask(sdata, sta, sband, dest, max_rates);
 
+       if (!(info->control.flags & IEEE80211_TX_CTRL_SCAN_TX))
+               mask = sdata->rc_rateidx_mask[info->band];
+
        if (dest[0].idx < 0)
                __rate_control_send_low(&sdata->local->hw, sband, sta, info,
-                                       sdata->rc_rateidx_mask[info->band]);
+                                       mask);
 
        if (sta)
                rate_fixup_ratelist(vif, sband, info, dest, max_rates);
index c1f8501384056d3ebbb9ba7ddbd5c659c579d582..6e24864f9a40ba1b8d689263cf905cc0ff5d3d69 100644 (file)
@@ -2763,7 +2763,10 @@ ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata,
                               struct sk_buff *skb, int hdrlen)
 {
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-       struct ieee80211_mesh_fast_tx *entry = NULL;
+       struct ieee80211_mesh_fast_tx_key key = {
+               .type = MESH_FAST_TX_TYPE_FORWARDED
+       };
+       struct ieee80211_mesh_fast_tx *entry;
        struct ieee80211s_hdr *mesh_hdr;
        struct tid_ampdu_tx *tid_tx;
        struct sta_info *sta;
@@ -2772,9 +2775,13 @@ ieee80211_rx_mesh_fast_forward(struct ieee80211_sub_if_data *sdata,
 
        mesh_hdr = (struct ieee80211s_hdr *)(skb->data + sizeof(eth));
        if ((mesh_hdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6)
-               entry = mesh_fast_tx_get(sdata, mesh_hdr->eaddr1);
+               ether_addr_copy(key.addr, mesh_hdr->eaddr1);
        else if (!(mesh_hdr->flags & MESH_FLAGS_AE))
-               entry = mesh_fast_tx_get(sdata, skb->data);
+               ether_addr_copy(key.addr, skb->data);
+       else
+               return false;
+
+       entry = mesh_fast_tx_get(sdata, &key);
        if (!entry)
                return false;
 
@@ -3780,6 +3787,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
                }
                break;
        case WLAN_CATEGORY_PROTECTED_EHT:
+               if (len < offsetofend(typeof(*mgmt),
+                                     u.action.u.ttlm_req.action_code))
+                       break;
+
                switch (mgmt->u.action.u.ttlm_req.action_code) {
                case WLAN_PROTECTED_EHT_ACTION_TTLM_REQ:
                        if (sdata->vif.type != NL80211_IFTYPE_STATION)
index 0429e59ba387c931f42ae1e74255a7deb5ebf5d6..73850312580f7054c60550dd5a2486583ab48f0a 100644 (file)
@@ -648,6 +648,7 @@ static void ieee80211_send_scan_probe_req(struct ieee80211_sub_if_data *sdata,
                                cpu_to_le16(IEEE80211_SN_TO_SEQ(sn));
                }
                IEEE80211_SKB_CB(skb)->flags |= tx_flags;
+               IEEE80211_SKB_CB(skb)->control.flags |= IEEE80211_TX_CTRL_SCAN_TX;
                ieee80211_tx_skb_tid_band(sdata, skb, 7, channel->band);
        }
 }
index 6bf223e6cd1a54aa432ef0bd41da48cd9316ffc5..cfd0a62d0152bd28f32fbb022cdbd094defefca4 100644 (file)
@@ -698,11 +698,16 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
        txrc.bss_conf = &tx->sdata->vif.bss_conf;
        txrc.skb = tx->skb;
        txrc.reported_rate.idx = -1;
-       txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
 
-       if (tx->sdata->rc_has_mcs_mask[info->band])
-               txrc.rate_idx_mcs_mask =
-                       tx->sdata->rc_rateidx_mcs_mask[info->band];
+       if (unlikely(info->control.flags & IEEE80211_TX_CTRL_SCAN_TX)) {
+               txrc.rate_idx_mask = ~0;
+       } else {
+               txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band];
+
+               if (tx->sdata->rc_has_mcs_mask[info->band])
+                       txrc.rate_idx_mcs_mask =
+                               tx->sdata->rc_rateidx_mcs_mask[info->band];
+       }
 
        txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP ||
                    tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
index a0921adc31a9ffe7db09d18e7ae54213a79a8fd7..1e689c71412716e04f417cdb62d9fb56b730a1ab 100644 (file)
@@ -126,7 +126,8 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
        if (sctph->source != cp->vport || payload_csum ||
            skb->ip_summed == CHECKSUM_PARTIAL) {
                sctph->source = cp->vport;
-               sctp_nat_csum(skb, sctph, sctphoff);
+               if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
+                       sctp_nat_csum(skb, sctph, sctphoff);
        } else {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        }
@@ -174,7 +175,8 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
            (skb->ip_summed == CHECKSUM_PARTIAL &&
             !(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) {
                sctph->dest = cp->dport;
-               sctp_nat_csum(skb, sctph, sctphoff);
+               if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
+                       sctp_nat_csum(skb, sctph, sctphoff);
        } else if (skb->ip_summed != CHECKSUM_PARTIAL) {
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        }
index 274b6f7e6bb57e4f270262ef923ebf8d7f1cf02c..d170758a1eb5d08929cc4cd8e8acd350e793524e 100644 (file)
@@ -338,7 +338,9 @@ static void nft_netdev_event(unsigned long event, struct net_device *dev,
                return;
 
        if (n > 1) {
-               nf_unregister_net_hook(ctx->net, &found->ops);
+               if (!(ctx->chain->table->flags & NFT_TABLE_F_DORMANT))
+                       nf_unregister_net_hook(ctx->net, &found->ops);
+
                list_del_rcu(&found->list);
                kfree_rcu(found, rcu);
                return;
index 74b63cdb59923a95dd03a9c2c540af702564873a..2928c142a2ddb3a0d9b1937fdb4ddd37322b3f45 100644 (file)
@@ -1593,9 +1593,9 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
        for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
                struct hlist_head *head = &info->limits[i];
                struct ovs_ct_limit *ct_limit;
+               struct hlist_node *next;
 
-               hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
-                                        lockdep_ovsl_is_held())
+               hlist_for_each_entry_safe(ct_limit, next, head, hlist_node)
                        kfree_rcu(ct_limit, rcu);
        }
        kfree(info->limits);
index f2a100c4c81f12e8ed91d0400938b53e3dd0dc46..40797114d50a49a4e10cb30c182f094fc8e7313d 100644 (file)
@@ -230,28 +230,6 @@ static void svc_rdma_write_info_free(struct svc_rdma_write_info *info)
        queue_work(svcrdma_wq, &info->wi_work);
 }
 
-/**
- * svc_rdma_write_chunk_release - Release Write chunk I/O resources
- * @rdma: controlling transport
- * @ctxt: Send context that is being released
- */
-void svc_rdma_write_chunk_release(struct svcxprt_rdma *rdma,
-                                 struct svc_rdma_send_ctxt *ctxt)
-{
-       struct svc_rdma_write_info *info;
-       struct svc_rdma_chunk_ctxt *cc;
-
-       while (!list_empty(&ctxt->sc_write_info_list)) {
-               info = list_first_entry(&ctxt->sc_write_info_list,
-                                       struct svc_rdma_write_info, wi_list);
-               list_del(&info->wi_list);
-
-               cc = &info->wi_cc;
-               svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
-               svc_rdma_write_info_free(info);
-       }
-}
-
 /**
  * svc_rdma_reply_chunk_release - Release Reply chunk I/O resources
  * @rdma: controlling transport
@@ -308,11 +286,13 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
        struct ib_cqe *cqe = wc->wr_cqe;
        struct svc_rdma_chunk_ctxt *cc =
                        container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
+       struct svc_rdma_write_info *info =
+                       container_of(cc, struct svc_rdma_write_info, wi_cc);
 
        switch (wc->status) {
        case IB_WC_SUCCESS:
                trace_svcrdma_wc_write(&cc->cc_cid);
-               return;
+               break;
        case IB_WC_WR_FLUSH_ERR:
                trace_svcrdma_wc_write_flush(wc, &cc->cc_cid);
                break;
@@ -320,11 +300,12 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
                trace_svcrdma_wc_write_err(wc, &cc->cc_cid);
        }
 
-       /* The RDMA Write has flushed, so the client won't get
-        * some of the outgoing RPC message. Signal the loss
-        * to the client by closing the connection.
-        */
-       svc_xprt_deferred_close(&rdma->sc_xprt);
+       svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
+
+       if (unlikely(wc->status != IB_WC_SUCCESS))
+               svc_xprt_deferred_close(&rdma->sc_xprt);
+
+       svc_rdma_write_info_free(info);
 }
 
 /**
@@ -620,19 +601,13 @@ static int svc_rdma_xb_write(const struct xdr_buf *xdr, void *data)
        return xdr->len;
 }
 
-/* Link Write WRs for @chunk onto @sctxt's WR chain.
- */
-static int svc_rdma_prepare_write_chunk(struct svcxprt_rdma *rdma,
-                                       struct svc_rdma_send_ctxt *sctxt,
-                                       const struct svc_rdma_chunk *chunk,
-                                       const struct xdr_buf *xdr)
+static int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma,
+                                    const struct svc_rdma_chunk *chunk,
+                                    const struct xdr_buf *xdr)
 {
        struct svc_rdma_write_info *info;
        struct svc_rdma_chunk_ctxt *cc;
-       struct ib_send_wr *first_wr;
        struct xdr_buf payload;
-       struct list_head *pos;
-       struct ib_cqe *cqe;
        int ret;
 
        if (xdr_buf_subsegment(xdr, &payload, chunk->ch_position,
@@ -648,25 +623,10 @@ static int svc_rdma_prepare_write_chunk(struct svcxprt_rdma *rdma,
        if (ret != payload.len)
                goto out_err;
 
-       ret = -EINVAL;
-       if (unlikely(cc->cc_sqecount > rdma->sc_sq_depth))
-               goto out_err;
-
-       first_wr = sctxt->sc_wr_chain;
-       cqe = &cc->cc_cqe;
-       list_for_each(pos, &cc->cc_rwctxts) {
-               struct svc_rdma_rw_ctxt *rwc;
-
-               rwc = list_entry(pos, struct svc_rdma_rw_ctxt, rw_list);
-               first_wr = rdma_rw_ctx_wrs(&rwc->rw_ctx, rdma->sc_qp,
-                                          rdma->sc_port_num, cqe, first_wr);
-               cqe = NULL;
-       }
-       sctxt->sc_wr_chain = first_wr;
-       sctxt->sc_sqecount += cc->cc_sqecount;
-       list_add(&info->wi_list, &sctxt->sc_write_info_list);
-
        trace_svcrdma_post_write_chunk(&cc->cc_cid, cc->cc_sqecount);
+       ret = svc_rdma_post_chunk_ctxt(rdma, cc);
+       if (ret < 0)
+               goto out_err;
        return 0;
 
 out_err:
@@ -675,27 +635,25 @@ out_err:
 }
 
 /**
- * svc_rdma_prepare_write_list - Construct WR chain for sending Write list
+ * svc_rdma_send_write_list - Send all chunks on the Write list
  * @rdma: controlling RDMA transport
- * @write_pcl: Write list provisioned by the client
- * @sctxt: Send WR resources
+ * @rctxt: Write list provisioned by the client
  * @xdr: xdr_buf containing an RPC Reply message
  *
  * Returns zero on success, or a negative errno if one or more
  * Write chunks could not be sent.
  */
-int svc_rdma_prepare_write_list(struct svcxprt_rdma *rdma,
-                               const struct svc_rdma_pcl *write_pcl,
-                               struct svc_rdma_send_ctxt *sctxt,
-                               const struct xdr_buf *xdr)
+int svc_rdma_send_write_list(struct svcxprt_rdma *rdma,
+                            const struct svc_rdma_recv_ctxt *rctxt,
+                            const struct xdr_buf *xdr)
 {
        struct svc_rdma_chunk *chunk;
        int ret;
 
-       pcl_for_each_chunk(chunk, write_pcl) {
+       pcl_for_each_chunk(chunk, &rctxt->rc_write_pcl) {
                if (!chunk->ch_payload_length)
                        break;
-               ret = svc_rdma_prepare_write_chunk(rdma, sctxt, chunk, xdr);
+               ret = svc_rdma_send_write_chunk(rdma, chunk, xdr);
                if (ret < 0)
                        return ret;
        }
index dfca39abd16c8860ade9a8f3fc0be4bc023361cd..bb5436b719e05126e250596b61b39230204620c3 100644 (file)
@@ -142,7 +142,6 @@ svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
        ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
        ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
        ctxt->sc_cqe.done = svc_rdma_wc_send;
-       INIT_LIST_HEAD(&ctxt->sc_write_info_list);
        ctxt->sc_xprt_buf = buffer;
        xdr_buf_init(&ctxt->sc_hdrbuf, ctxt->sc_xprt_buf,
                     rdma->sc_max_req_size);
@@ -228,7 +227,6 @@ static void svc_rdma_send_ctxt_release(struct svcxprt_rdma *rdma,
        struct ib_device *device = rdma->sc_cm_id->device;
        unsigned int i;
 
-       svc_rdma_write_chunk_release(rdma, ctxt);
        svc_rdma_reply_chunk_release(rdma, ctxt);
 
        if (ctxt->sc_page_count)
@@ -1015,8 +1013,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
        if (!p)
                goto put_ctxt;
 
-       ret = svc_rdma_prepare_write_list(rdma, &rctxt->rc_write_pcl, sctxt,
-                                         &rqstp->rq_res);
+       ret = svc_rdma_send_write_list(rdma, rctxt, &rqstp->rq_res);
        if (ret < 0)
                goto put_ctxt;
 
index 762f424ff2d59c51ba176bc9dff81b499542fb5c..e5e47452308ab713032d58eedbff68bee9dc3a8d 100644 (file)
@@ -215,7 +215,7 @@ static inline struct sk_buff *tls_strp_msg(struct tls_sw_context_rx *ctx)
 
 static inline bool tls_strp_msg_ready(struct tls_sw_context_rx *ctx)
 {
-       return ctx->strp.msg_ready;
+       return READ_ONCE(ctx->strp.msg_ready);
 }
 
 static inline bool tls_strp_msg_mixed_decrypted(struct tls_sw_context_rx *ctx)
index ca1e0e198ceb452fac72dd48f22ea0a1dcddee6b..5df08d848b5c9c9cf36e21a78c7bc59f38a8f22b 100644 (file)
@@ -360,7 +360,7 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
        if (strp->stm.full_len && strp->stm.full_len == skb->len) {
                desc->count = 0;
 
-               strp->msg_ready = 1;
+               WRITE_ONCE(strp->msg_ready, 1);
                tls_rx_msg_ready(strp);
        }
 
@@ -528,7 +528,7 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
        if (!tls_strp_check_queue_ok(strp))
                return tls_strp_read_copy(strp, false);
 
-       strp->msg_ready = 1;
+       WRITE_ONCE(strp->msg_ready, 1);
        tls_rx_msg_ready(strp);
 
        return 0;
@@ -580,7 +580,7 @@ void tls_strp_msg_done(struct tls_strparser *strp)
        else
                tls_strp_flush_anchor_copy(strp);
 
-       strp->msg_ready = 0;
+       WRITE_ONCE(strp->msg_ready, 0);
        memset(&strp->stm, 0, sizeof(strp->stm));
 
        tls_strp_check_rcv(strp);
index 6433a414acf8624a1d98727f4e309b7c040710b9..0104be9d4704563791b1c1558fcbf166649fee25 100644 (file)
@@ -299,7 +299,7 @@ static void __unix_gc(struct work_struct *work)
                        __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
 
                        if (sk->sk_state == TCP_LISTEN) {
-                               unix_state_lock(sk);
+                               unix_state_lock_nested(sk, U_LOCK_GC_LISTENER);
                                unix_state_unlock(sk);
                        }
                }
index b4edba6b0b7ba0bf3200c7ce966a3e286a6fdf24..30ff9a47081348d1b14d1db520aeedf9c9ffdd09 100644 (file)
@@ -14030,6 +14030,8 @@ static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
 error:
        for (i = 0; i < new_coalesce.n_rules; i++) {
                tmp_rule = &new_coalesce.rules[i];
+               if (!tmp_rule)
+                       continue;
                for (j = 0; j < tmp_rule->n_patterns; j++)
                        kfree(tmp_rule->patterns[j].mask);
                kfree(tmp_rule->patterns);
index cbbf347c6b2e099802b135266ca7fae59bd467f9..df013c98b80dfb0e06a86a78415e51d79cd76693 100644 (file)
@@ -1758,7 +1758,7 @@ TRACE_EVENT(rdev_return_void_tx_rx,
 
 DECLARE_EVENT_CLASS(tx_rx_evt,
        TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
-       TP_ARGS(wiphy, rx, tx),
+       TP_ARGS(wiphy, tx, rx),
        TP_STRUCT__entry(
                WIPHY_ENTRY
                __field(u32, tx)
@@ -1775,7 +1775,7 @@ DECLARE_EVENT_CLASS(tx_rx_evt,
 
 DEFINE_EVENT(tx_rx_evt, rdev_set_antenna,
        TP_PROTO(struct wiphy *wiphy, u32 tx, u32 rx),
-       TP_ARGS(wiphy, rx, tx)
+       TP_ARGS(wiphy, tx, rx)
 );
 
 DECLARE_EVENT_CLASS(wiphy_netdev_id_evt,
index 5fa7957f6e0f56646249c278b7434f300754df77..25810e18b0a73272f42ff3a8d9335ee79699b6a0 100644 (file)
@@ -182,6 +182,7 @@ class NlMsg:
             self.done = 1
             extack_off = 20
         elif self.nl_type == Netlink.NLMSG_DONE:
+            self.error = struct.unpack("i", self.raw[0:4])[0]
             self.done = 1
             extack_off = 4