Merge tag 'net-6.9-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 18 Apr 2024 18:40:54 +0000 (11:40 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 18 Apr 2024 18:40:54 +0000 (11:40 -0700)
Pull networking fixes from Jakub Kicinski:
 "A little calmer than usual, probably just the timing of sub-tree PRs.

  Including fixes from netfilter.

  Current release - regressions:

   - inet: bring NLM_DONE out to a separate recv() again, fix user space
     which assumes multiple recv()s will happen and gets blocked forever

   - drv: mlx5:
       - restore mistakenly dropped parts in register devlink flow
       - use channel mdev reference instead of global mdev instance for
         coalescing
       - acquire RTNL lock before RQs/SQs activation/deactivation

  Previous releases - regressions:

   - net: change maximum number of UDP segments to 128, fix virtio
     compatibility with Windows peers

   - usb: ax88179_178a: avoid writing the mac address before first
     reading

  Previous releases - always broken:

   - sched: fix mirred deadlock on device recursion

   - netfilter:
       - br_netfilter: skip conntrack input hook for promisc packets
       - fixes removal of duplicate elements in the pipapo set backend
       - various fixes for abort paths and error handling

   - af_unix: don't peek OOB data without MSG_OOB

   - drv: flower: fix fragment flags handling in multiple drivers

   - drv: ravb: fix jumbo frames and packet stats accounting

  Misc:

   - kselftest_harness: fix Clang warning about zero-length format

   - tun: limit printing rate when illegal packet received by tun dev"

* tag 'net-6.9-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (46 commits)
  net: ethernet: ti: am65-cpsw-nuss: cleanup DMA Channels before using them
  net: usb: ax88179_178a: avoid writing the mac address before first reading
  net: ravb: Fix RX byte accounting for jumbo packets
  net: ravb: Fix GbEth jumbo packet RX checksum handling
  net: ravb: Allow RX loop to move past DMA mapping errors
  net: ravb: Count packets instead of descriptors in R-Car RX path
  net: ethernet: mtk_eth_soc: fix WED + wifi reset
  net:usb:qmi_wwan: support Rolling modules
  selftests: kselftest_harness: fix Clang warning about zero-length format
  net/sched: Fix mirred deadlock on device recursion
  netfilter: nf_tables: fix memleak in map from abort path
  netfilter: nf_tables: restore set elements when delete set fails
  netfilter: nf_tables: missing iterator type in lookup walk
  s390/ism: Properly fix receive message buffer allocation
  net: dsa: mt7530: fix port mirroring for MT7988 SoC switch
  net: dsa: mt7530: fix mirroring frames received on local port
  tun: limit printing rate when illegal packet received by tun dev
  ice: Fix checking for unsupported keys on non-tunnel device
  ice: tc: allow zero flags in parsing tc flower
  ice: tc: check src_vsi in case of traffic from VF
  ...

246 files changed:
CREDITS
Documentation/admin-guide/hw-vuln/spectre.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/devicetree/bindings/display/msm/qcom,sm8150-mdss.yaml
Documentation/devicetree/bindings/pwm/mediatek,pwm-disp.yaml
Documentation/driver-api/virtio/writing_virtio_drivers.rst
MAINTAINERS
Makefile
arch/Kconfig
arch/arm/boot/dts/nxp/imx/imx7-mba7.dtsi
arch/arm/boot/dts/nxp/imx/imx7s-warp.dts
arch/arm/mach-omap2/board-n8x0.c
arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
arch/arm64/boot/dts/freescale/imx8-ss-dma.dtsi
arch/arm64/boot/dts/freescale/imx8-ss-lsio.dtsi
arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
arch/arm64/boot/dts/freescale/imx8qm-ss-dma.dtsi
arch/arm64/include/asm/tlbflush.h
arch/mips/include/asm/ptrace.h
arch/mips/kernel/asm-offsets.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-n64.S
arch/mips/kernel/scall64-o32.S
arch/x86/Kconfig
arch/x86/events/core.c
arch/x86/hyperv/hv_apic.c
arch/x86/hyperv/hv_proc.c
arch/x86/include/asm/apic.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/topology.c
arch/x86/kernel/cpu/topology_amd.c
block/blk-cgroup.c
block/blk-cgroup.h
block/blk-core.c
block/blk-iocost.c
block/blk-settings.c
drivers/accel/ivpu/ivpu_drv.c
drivers/accel/ivpu/ivpu_drv.h
drivers/accel/ivpu/ivpu_hw.h
drivers/accel/ivpu/ivpu_hw_37xx.c
drivers/accel/ivpu/ivpu_hw_40xx.c
drivers/accel/ivpu/ivpu_ipc.c
drivers/accel/ivpu/ivpu_mmu.c
drivers/accel/ivpu/ivpu_pm.c
drivers/acpi/scan.c
drivers/ata/ahci.c
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/cache/sifive_ccache.c
drivers/char/random.c
drivers/cxl/acpi.c
drivers/cxl/core/cdat.c
drivers/cxl/core/mbox.c
drivers/cxl/core/port.c
drivers/cxl/core/regs.c
drivers/cxl/cxl.h
drivers/cxl/cxlmem.h
drivers/firmware/arm_ffa/driver.c
drivers/firmware/arm_scmi/powercap.c
drivers/firmware/arm_scmi/raw_mode.c
drivers/gpio/gpio-crystalcove.c
drivers/gpio/gpio-lpc32xx.c
drivers/gpio/gpio-wcove.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
drivers/gpu/drm/amd/amdgpu/soc21.c
drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc_state.c
drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
drivers/gpu/drm/ast/ast_dp.c
drivers/gpu/drm/drm_client_modeset.c
drivers/gpu/drm/i915/display/intel_cdclk.c
drivers/gpu/drm/i915/display/intel_cdclk.h
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_hdcp.c
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/i915/display/intel_vrr.c
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/gt/uc/intel_uc.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/msm_fb.c
drivers/gpu/drm/msm/msm_kms.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowof.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/xe/display/xe_display.c
drivers/gpu/drm/xe/regs/xe_engine_regs.h
drivers/gpu/drm/xe/xe_hwmon.c
drivers/gpu/drm/xe/xe_lrc.c
drivers/gpu/drm/xe/xe_migrate.c
drivers/gpu/host1x/bus.c
drivers/hv/channel.c
drivers/hv/connection.c
drivers/hv/vmbus_drv.c
drivers/iommu/amd/init.c
drivers/iommu/amd/iommu.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/perfmon.c
drivers/iommu/intel/svm.c
drivers/iommu/mtk_iommu.c
drivers/iommu/mtk_iommu_v1.c
drivers/irqchip/irq-gic-v3-its.c
drivers/md/raid1.c
drivers/mmc/host/omap.c
drivers/net/hyperv/netvsc.c
drivers/pci/quirks.c
drivers/platform/x86/amd/pmc/pmc-quirks.c
drivers/platform/x86/amd/pmf/Makefile
drivers/platform/x86/amd/pmf/acpi.c
drivers/platform/x86/amd/pmf/core.c
drivers/platform/x86/amd/pmf/pmf-quirks.c [new file with mode: 0644]
drivers/platform/x86/amd/pmf/pmf.h
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
drivers/platform/x86/intel/uncore-frequency/uncore-frequency-tpmi.c
drivers/pwm/pwm-dwc-core.c
drivers/pwm/pwm-dwc.c
drivers/pwm/pwm-dwc.h
drivers/uio/uio_hv_generic.c
drivers/vhost/vhost.c
drivers/virt/vmgenid.c
drivers/virtio/virtio.c
fs/bcachefs/backpointers.c
fs/bcachefs/backpointers.h
fs/bcachefs/bcachefs.h
fs/bcachefs/bcachefs_format.h
fs/bcachefs/bkey.h
fs/bcachefs/bkey_methods.c
fs/bcachefs/btree_cache.c
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_io.c
fs/bcachefs/btree_iter.h
fs/bcachefs/btree_node_scan.c
fs/bcachefs/btree_trans_commit.c
fs/bcachefs/btree_update_interior.c
fs/bcachefs/btree_write_buffer.c
fs/bcachefs/buckets.h
fs/bcachefs/checksum.c
fs/bcachefs/checksum.h
fs/bcachefs/compress.h
fs/bcachefs/ec.c
fs/bcachefs/ec.h
fs/bcachefs/extents.c
fs/bcachefs/fs-io-direct.c
fs/bcachefs/fs-io.c
fs/bcachefs/journal_io.c
fs/bcachefs/opts.c
fs/bcachefs/opts.h
fs/bcachefs/recovery_passes.c
fs/bcachefs/sb-downgrade.c
fs/bcachefs/sb-errors_types.h
fs/bcachefs/sb-members.c
fs/bcachefs/sb-members.h
fs/bcachefs/super-io.c
fs/bcachefs/super_types.h
fs/bcachefs/sysfs.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/kernfs/file.c
fs/nfsd/nfs4xdr.c
fs/smb/client/cached_dir.c
fs/smb/client/cifsglob.h
fs/smb/client/connect.c
fs/smb/client/fs_context.c
fs/smb/client/fs_context.h
fs/smb/client/inode.c
fs/smb/client/misc.c
fs/smb/client/smb2ops.c
fs/smb/client/smb2pdu.c
fs/tracefs/event_inode.c
fs/zonefs/super.c
include/acpi/acpi_bus.h
include/asm-generic/bug.h
include/asm-generic/hyperv-tlfs.h
include/asm-generic/mshyperv.h
include/linux/dma-fence.h
include/linux/gpio/property.h
include/linux/hyperv.h
include/linux/io_uring_types.h
include/linux/irqflags.h
include/linux/rwbase_rt.h
include/linux/rwsem.h
include/linux/virtio.h
include/trace/events/rpcgss.h
include/uapi/linux/vhost.h
io_uring/io_uring.c
io_uring/net.c
kernel/cpu.c
kernel/dma/swiotlb.c
kernel/power/suspend.c
kernel/time/tick-common.c
kernel/time/tick-sched.c
kernel/trace/Kconfig
kernel/trace/ring_buffer.c
kernel/trace/trace_events.c
tools/hv/hv_kvp_daemon.c
tools/testing/cxl/test/cxl.c
tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc
tools/testing/selftests/kselftest.h
tools/testing/selftests/kselftest_harness.h
tools/testing/selftests/timers/posix_timers.c
tools/testing/selftests/timers/valid-adjtimex.c

diff --git a/CREDITS b/CREDITS
index c55c5a0ee4ff65e244eb3a9de9aeb35515bc2381..0107047f807bfc01a0c5e7ad380e15a5ddc95776 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -3146,6 +3146,10 @@ S: Triftstra=DFe 55
 S: 13353 Berlin
 S: Germany
 
+N: Gustavo Pimental
+E: gustavo.pimentel@synopsys.com
+D: PCI driver for Synopsys DesignWare
+
 N: Emanuel Pirker
 E: epirker@edu.uni-klu.ac.at
 D: AIC5800 IEEE 1394, RAW I/O on 1394
index b70b1d8bd8e6572374ae10632f46757269f2fa7e..25a04cda4c2c054864fa1792d98d9f095ea56a17 100644 (file)
@@ -439,12 +439,12 @@ The possible values in this file are:
    - System is protected by retpoline
  * - BHI: BHI_DIS_S
    - System is protected by BHI_DIS_S
- * - BHI: SW loop; KVM SW loop
+ * - BHI: SW loop, KVM SW loop
    - System is protected by software clearing sequence
- * - BHI: Syscall hardening
-   - Syscalls are hardened against BHI
- * - BHI: Syscall hardening; KVM: SW loop
-   - System is protected from userspace attacks by syscall hardening; KVM is protected by software clearing sequence
+ * - BHI: Vulnerable
+   - System is vulnerable to BHI
+ * - BHI: Vulnerable, KVM: SW loop
+   - System is vulnerable; KVM is protected by software clearing sequence
 
 Full mitigation might require a microcode update from the CPU
 vendor. When the necessary microcode is not available, the kernel will
@@ -661,18 +661,14 @@ kernel command line.
        spectre_bhi=
 
                [X86] Control mitigation of Branch History Injection
-               (BHI) vulnerability. Syscalls are hardened against BHI
-               regardless of this setting. This setting affects the deployment
+               (BHI) vulnerability.  This setting affects the deployment
                of the HW BHI control and the SW BHB clearing sequence.
 
                on
-                       unconditionally enable.
+                       (default) Enable the HW or SW mitigation as
+                       needed.
                off
-                       unconditionally disable.
-               auto
-                       enable if hardware mitigation
-                       control(BHI_DIS_S) is available, otherwise
-                       enable alternate mitigation in KVM.
+                       Disable the mitigation.
 
 For spectre_v2_user see Documentation/admin-guide/kernel-parameters.txt
 
index 70046a019d42d80b1f56d1e80577dfd084fdd5f8..902ecd92a29fbe83df18d32d1a8fe652c8277132 100644 (file)
                                               retbleed=off [X86]
                                               spec_rstack_overflow=off [X86]
                                               spec_store_bypass_disable=off [X86,PPC]
+                                              spectre_bhi=off [X86]
                                               spectre_v2_user=off [X86]
                                               srbds=off [X86,INTEL]
                                               ssbd=force-off [ARM64]
                        See Documentation/admin-guide/laptops/sonypi.rst
 
        spectre_bhi=    [X86] Control mitigation of Branch History Injection
-                       (BHI) vulnerability. Syscalls are hardened against BHI
-                       reglardless of this setting. This setting affects the
+                       (BHI) vulnerability.  This setting affects the
                        deployment of the HW BHI control and the SW BHB
                        clearing sequence.
 
-                       on   - unconditionally enable.
-                       off  - unconditionally disable.
-                       auto - (default) enable hardware mitigation
-                              (BHI_DIS_S) if available, otherwise enable
-                              alternate mitigation in KVM.
+                       on   - (default) Enable the HW or SW mitigation
+                              as needed.
+                       off  - Disable the mitigation.
 
        spectre_v2=     [X86,EARLY] Control mitigation of Spectre variant 2
                        (indirect branch speculation) vulnerability.
index c0d6a4fdff97e37f31ecc763347497aea9450780..e6dc5494baee29a7171c11ac074159e6a08f8627 100644 (file)
@@ -53,6 +53,15 @@ patternProperties:
       compatible:
         const: qcom,sm8150-dpu
 
+  "^displayport-controller@[0-9a-f]+$":
+    type: object
+    additionalProperties: true
+
+    properties:
+      compatible:
+        contains:
+          const: qcom,sm8150-dp
+
   "^dsi@[0-9a-f]+$":
     type: object
     additionalProperties: true
index afcdeed4e88af625ea4f0f371cc11ffdbe824859..bc813fe74faba5ae50bc81ecb2f75f9e1d8803c9 100644 (file)
@@ -52,6 +52,9 @@ properties:
       - const: main
       - const: mm
 
+  power-domains:
+    maxItems: 1
+
 required:
   - compatible
   - reg
index e14c58796d250116107041b1be3e40aafa564656..e5de6f5d061a7c2162bc6fac628e542389602be3 100644 (file)
@@ -97,7 +97,6 @@ like this::
 
        static struct virtio_driver virtio_dummy_driver = {
                .driver.name =  KBUILD_MODNAME,
-               .driver.owner = THIS_MODULE,
                .id_table =     id_table,
                .probe =        virtio_dummy_probe,
                .remove =       virtio_dummy_remove,
index b5b89687680b98eace9cb453ec2690bad735a6f2..c23fda1aa1f092302910e962e7ce8c107030a3ca 100644 (file)
@@ -2707,7 +2707,7 @@ F:        sound/soc/rockchip/
 N:     rockchip
 
 ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 R:     Alim Akhtar <alim.akhtar@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org
@@ -4869,7 +4869,6 @@ F:        drivers/power/supply/cw2015_battery.c
 CEPH COMMON CODE (LIBCEPH)
 M:     Ilya Dryomov <idryomov@gmail.com>
 M:     Xiubo Li <xiubli@redhat.com>
-R:     Jeff Layton <jlayton@kernel.org>
 L:     ceph-devel@vger.kernel.org
 S:     Supported
 W:     http://ceph.com/
@@ -4881,7 +4880,6 @@ F:        net/ceph/
 CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
 M:     Xiubo Li <xiubli@redhat.com>
 M:     Ilya Dryomov <idryomov@gmail.com>
-R:     Jeff Layton <jlayton@kernel.org>
 L:     ceph-devel@vger.kernel.org
 S:     Supported
 W:     http://ceph.com/
@@ -5557,7 +5555,7 @@ F:        drivers/cpuidle/cpuidle-big_little.c
 CPUIDLE DRIVER - ARM EXYNOS
 M:     Daniel Lezcano <daniel.lezcano@linaro.org>
 M:     Kukjin Kim <kgene@kernel.org>
-R:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+R:     Krzysztof Kozlowski <krzk@kernel.org>
 L:     linux-pm@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
@@ -8996,7 +8994,7 @@ F:        drivers/i2c/muxes/i2c-mux-gpio.c
 F:     include/linux/platform_data/i2c-mux-gpio.h
 
 GENERIC GPIO RESET DRIVER
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 S:     Maintained
 F:     drivers/reset/reset-gpio.c
 
@@ -13291,7 +13289,7 @@ F:      drivers/iio/adc/max11205.c
 
 MAXIM MAX17040 FAMILY FUEL GAUGE DRIVERS
 R:     Iskren Chernev <iskren.chernev@gmail.com>
-R:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+R:     Krzysztof Kozlowski <krzk@kernel.org>
 R:     Marek Szyprowski <m.szyprowski@samsung.com>
 R:     Matheus Castello <matheus@castello.eng.br>
 L:     linux-pm@vger.kernel.org
@@ -13301,7 +13299,7 @@ F:      drivers/power/supply/max17040_battery.c
 
 MAXIM MAX17042 FAMILY FUEL GAUGE DRIVERS
 R:     Hans de Goede <hdegoede@redhat.com>
-R:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+R:     Krzysztof Kozlowski <krzk@kernel.org>
 R:     Marek Szyprowski <m.szyprowski@samsung.com>
 R:     Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
 R:     Purism Kernel Team <kernel@puri.sm>
@@ -13359,7 +13357,7 @@ F:      Documentation/devicetree/bindings/power/supply/maxim,max77976.yaml
 F:     drivers/power/supply/max77976_charger.c
 
 MAXIM MUIC CHARGER DRIVERS FOR EXYNOS BASED BOARDS
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 B:     mailto:linux-samsung-soc@vger.kernel.org
@@ -13370,7 +13368,7 @@ F:      drivers/power/supply/max77693_charger.c
 
 MAXIM PMIC AND MUIC DRIVERS FOR EXYNOS BASED BOARDS
 M:     Chanwoo Choi <cw00.choi@samsung.com>
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 B:     mailto:linux-samsung-soc@vger.kernel.org
@@ -14154,7 +14152,7 @@ F:      mm/mm_init.c
 F:     tools/testing/memblock/
 
 MEMORY CONTROLLER DRIVERS
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 B:     mailto:krzysztof.kozlowski@linaro.org
@@ -15535,7 +15533,7 @@ F:      include/uapi/linux/nexthop.h
 F:     net/ipv4/nexthop.c
 
 NFC SUBSYSTEM
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/nfc/
@@ -15912,7 +15910,7 @@ F:      Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
 F:     drivers/regulator/pf8x00-regulator.c
 
 NXP PTN5150A CC LOGIC AND EXTCON DRIVER
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/extcon/extcon-ptn5150.yaml
@@ -16523,7 +16521,7 @@ K:      of_overlay_remove
 
 OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
 M:     Rob Herring <robh@kernel.org>
-M:     Krzysztof Kozlowski <krzysztof.kozlowski+dt@linaro.org>
+M:     Krzysztof Kozlowski <krzk+dt@kernel.org>
 M:     Conor Dooley <conor+dt@kernel.org>
 L:     devicetree@vger.kernel.org
 S:     Maintained
@@ -16970,7 +16968,6 @@ F:      drivers/pci/controller/dwc/pci-exynos.c
 
 PCI DRIVER FOR SYNOPSYS DESIGNWARE
 M:     Jingoo Han <jingoohan1@gmail.com>
-M:     Gustavo Pimentel <gustavo.pimentel@synopsys.com>
 M:     Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
 L:     linux-pci@vger.kernel.org
 S:     Maintained
@@ -17481,7 +17478,7 @@ F:      Documentation/devicetree/bindings/pinctrl/renesas,*
 F:     drivers/pinctrl/renesas/
 
 PIN CONTROLLER - SAMSUNG
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 R:     Alim Akhtar <alim.akhtar@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -19449,7 +19446,7 @@ F:      Documentation/devicetree/bindings/sound/samsung*
 F:     sound/soc/samsung/
 
 SAMSUNG EXYNOS PSEUDO RANDOM NUMBER GENERATOR (RNG) DRIVER
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 L:     linux-crypto@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
@@ -19484,7 +19481,7 @@ S:      Maintained
 F:     drivers/platform/x86/samsung-laptop.c
 
 SAMSUNG MULTIFUNCTION PMIC DEVICE DRIVERS
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 L:     linux-kernel@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
@@ -19510,7 +19507,7 @@ F:      drivers/media/platform/samsung/s3c-camif/
 F:     include/media/drv-intf/s3c_camif.h
 
 SAMSUNG S3FWRN5 NFC DRIVER
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
 F:     drivers/nfc/s3fwrn5
@@ -19531,7 +19528,7 @@ S:      Supported
 F:     drivers/media/i2c/s5k5baf.c
 
 SAMSUNG S5P Security SubSystem (SSS) DRIVER
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Vladimir Zapolskiy <vz@mleia.com>
 L:     linux-crypto@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
@@ -19553,7 +19550,7 @@ F:      Documentation/devicetree/bindings/media/samsung,fimc.yaml
 F:     drivers/media/platform/samsung/exynos4-is/
 
 SAMSUNG SOC CLOCK DRIVERS
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 M:     Chanwoo Choi <cw00.choi@samsung.com>
 R:     Alim Akhtar <alim.akhtar@samsung.com>
@@ -19585,7 +19582,7 @@ F:      drivers/net/ethernet/samsung/sxgbe/
 
 SAMSUNG THERMAL DRIVER
 M:     Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 L:     linux-pm@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
 S:     Maintained
@@ -23782,7 +23779,7 @@ S:      Orphan
 F:     drivers/mmc/host/vub300.c
 
 W1 DALLAS'S 1-WIRE BUS
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
+M:     Krzysztof Kozlowski <krzk@kernel.org>
 S:     Maintained
 F:     Documentation/devicetree/bindings/w1/
 F:     Documentation/w1/
index e1bf12891cb0e4a7471d60cf4b2eb0050d600d2f..59d8a7f95d0a863e4308853f9e01baa9fb8fed84 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 9
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
index 9f066785bb71d93ca5da01a22d15ed2effba5901..65afb1de48b36e843bd665a91df6b33badfd23d1 100644 (file)
@@ -1172,12 +1172,12 @@ config PAGE_SIZE_LESS_THAN_256KB
 
 config PAGE_SHIFT
        int
-       default 12 if PAGE_SIZE_4KB
-       default 13 if PAGE_SIZE_8KB
-       default 14 if PAGE_SIZE_16KB
-       default 15 if PAGE_SIZE_32KB
-       default 16 if PAGE_SIZE_64KB
-       default 18 if PAGE_SIZE_256KB
+       default 12 if PAGE_SIZE_4KB
+       default 13 if PAGE_SIZE_8KB
+       default 14 if PAGE_SIZE_16KB
+       default 15 if PAGE_SIZE_32KB
+       default 16 if PAGE_SIZE_64KB
+       default 18 if PAGE_SIZE_256KB
 
 # This allows to use a set of generic functions to determine mmap base
 # address by giving priority to top-down scheme only if the process
index 1235a71c6abe96564059010e214f87304d7d4e8c..52869e68f833c4d8f7cefdcefeadba9b8b78f87a 100644 (file)
        bus-width = <4>;
        no-1-8-v;
        no-sdio;
-       no-emmc;
+       no-mmc;
        status = "okay";
 };
 
index ba7231b364bb8c76296e953bbfa450bc49c1293a..7bab113ca6da79ed3941e7d6550fecfd31687f25 100644 (file)
                                remote-endpoint = <&mipi_from_sensor>;
                                clock-lanes = <0>;
                                data-lanes = <1>;
+                               link-frequencies = /bits/ 64 <330000000>;
                        };
                };
        };
index 31755a378c7364b5b5a055fa59b8796600a898a9..ff2a4a4d822047168008446e6c1835bd7358e789 100644 (file)
@@ -79,10 +79,8 @@ static struct musb_hdrc_platform_data tusb_data = {
 static struct gpiod_lookup_table tusb_gpio_table = {
        .dev_id = "musb-tusb",
        .table = {
-               GPIO_LOOKUP("gpio-0-15", 0, "enable",
-                           GPIO_ACTIVE_HIGH),
-               GPIO_LOOKUP("gpio-48-63", 10, "int",
-                           GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("gpio-0-31", 0, "enable", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("gpio-32-63", 26, "int", GPIO_ACTIVE_HIGH),
                { }
        },
 };
@@ -140,12 +138,11 @@ static int slot1_cover_open;
 static int slot2_cover_open;
 static struct device *mmc_device;
 
-static struct gpiod_lookup_table nokia8xx_mmc_gpio_table = {
+static struct gpiod_lookup_table nokia800_mmc_gpio_table = {
        .dev_id = "mmci-omap.0",
        .table = {
                /* Slot switch, GPIO 96 */
-               GPIO_LOOKUP("gpio-80-111", 16,
-                           "switch", GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP("gpio-96-127", 0, "switch", GPIO_ACTIVE_HIGH),
                { }
        },
 };
@@ -153,12 +150,12 @@ static struct gpiod_lookup_table nokia8xx_mmc_gpio_table = {
 static struct gpiod_lookup_table nokia810_mmc_gpio_table = {
        .dev_id = "mmci-omap.0",
        .table = {
+               /* Slot switch, GPIO 96 */
+               GPIO_LOOKUP("gpio-96-127", 0, "switch", GPIO_ACTIVE_HIGH),
                /* Slot index 1, VSD power, GPIO 23 */
-               GPIO_LOOKUP_IDX("gpio-16-31", 7,
-                               "vsd", 1, GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP_IDX("gpio-0-31", 23, "vsd", 1, GPIO_ACTIVE_HIGH),
                /* Slot index 1, VIO power, GPIO 9 */
-               GPIO_LOOKUP_IDX("gpio-0-15", 9,
-                               "vio", 1, GPIO_ACTIVE_HIGH),
+               GPIO_LOOKUP_IDX("gpio-0-31", 9, "vio", 1, GPIO_ACTIVE_HIGH),
                { }
        },
 };
@@ -415,8 +412,6 @@ static struct omap_mmc_platform_data *mmc_data[OMAP24XX_NR_MMC];
 
 static void __init n8x0_mmc_init(void)
 {
-       gpiod_add_lookup_table(&nokia8xx_mmc_gpio_table);
-
        if (board_is_n810()) {
                mmc1_data.slots[0].name = "external";
 
@@ -429,6 +424,8 @@ static void __init n8x0_mmc_init(void)
                mmc1_data.slots[1].name = "internal";
                mmc1_data.slots[1].ban_openended = 1;
                gpiod_add_lookup_table(&nokia810_mmc_gpio_table);
+       } else {
+               gpiod_add_lookup_table(&nokia800_mmc_gpio_table);
        }
 
        mmc1_data.nr_slots = 2;
index 3c42240e78e245fe54ab5c637d9fa071dc2c0b34..4aaf5a0c1ed8af6f7f845be079c9297f35d2d72b 100644 (file)
@@ -41,7 +41,7 @@ conn_subsys: bus@5b000000 {
                interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
                fsl,usbphy = <&usbphy1>;
                fsl,usbmisc = <&usbmisc1 0>;
-               clocks = <&usb2_lpcg 0>;
+               clocks = <&usb2_lpcg IMX_LPCG_CLK_6>;
                ahb-burst-config = <0x0>;
                tx-burst-size-dword = <0x10>;
                rx-burst-size-dword = <0x10>;
@@ -58,7 +58,7 @@ conn_subsys: bus@5b000000 {
        usbphy1: usbphy@5b100000 {
                compatible = "fsl,imx7ulp-usbphy";
                reg = <0x5b100000 0x1000>;
-               clocks = <&usb2_lpcg 1>;
+               clocks = <&usb2_lpcg IMX_LPCG_CLK_7>;
                power-domains = <&pd IMX_SC_R_USB_0_PHY>;
                status = "disabled";
        };
@@ -67,8 +67,8 @@ conn_subsys: bus@5b000000 {
                interrupts = <GIC_SPI 232 IRQ_TYPE_LEVEL_HIGH>;
                reg = <0x5b010000 0x10000>;
                clocks = <&sdhc0_lpcg IMX_LPCG_CLK_4>,
-                        <&sdhc0_lpcg IMX_LPCG_CLK_0>,
-                        <&sdhc0_lpcg IMX_LPCG_CLK_5>;
+                        <&sdhc0_lpcg IMX_LPCG_CLK_5>,
+                        <&sdhc0_lpcg IMX_LPCG_CLK_0>;
                clock-names = "ipg", "ahb", "per";
                power-domains = <&pd IMX_SC_R_SDHC_0>;
                status = "disabled";
@@ -78,8 +78,8 @@ conn_subsys: bus@5b000000 {
                interrupts = <GIC_SPI 233 IRQ_TYPE_LEVEL_HIGH>;
                reg = <0x5b020000 0x10000>;
                clocks = <&sdhc1_lpcg IMX_LPCG_CLK_4>,
-                        <&sdhc1_lpcg IMX_LPCG_CLK_0>,
-                        <&sdhc1_lpcg IMX_LPCG_CLK_5>;
+                        <&sdhc1_lpcg IMX_LPCG_CLK_5>,
+                        <&sdhc1_lpcg IMX_LPCG_CLK_0>;
                clock-names = "ipg", "ahb", "per";
                power-domains = <&pd IMX_SC_R_SDHC_1>;
                fsl,tuning-start-tap = <20>;
@@ -91,8 +91,8 @@ conn_subsys: bus@5b000000 {
                interrupts = <GIC_SPI 234 IRQ_TYPE_LEVEL_HIGH>;
                reg = <0x5b030000 0x10000>;
                clocks = <&sdhc2_lpcg IMX_LPCG_CLK_4>,
-                        <&sdhc2_lpcg IMX_LPCG_CLK_0>,
-                        <&sdhc2_lpcg IMX_LPCG_CLK_5>;
+                        <&sdhc2_lpcg IMX_LPCG_CLK_5>,
+                        <&sdhc2_lpcg IMX_LPCG_CLK_0>;
                clock-names = "ipg", "ahb", "per";
                power-domains = <&pd IMX_SC_R_SDHC_2>;
                status = "disabled";
index cab3468b1875ee885f32a842f92d56cc0b744998..f7a91d43a0ffe10e85e2b1e71ff6751c314b6ef7 100644 (file)
@@ -28,8 +28,8 @@ dma_subsys: bus@5a000000 {
                #size-cells = <0>;
                interrupts = <GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-parent = <&gic>;
-               clocks = <&spi0_lpcg 0>,
-                        <&spi0_lpcg 1>;
+               clocks = <&spi0_lpcg IMX_LPCG_CLK_0>,
+                        <&spi0_lpcg IMX_LPCG_CLK_4>;
                clock-names = "per", "ipg";
                assigned-clocks = <&clk IMX_SC_R_SPI_0 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <60000000>;
@@ -44,8 +44,8 @@ dma_subsys: bus@5a000000 {
                #size-cells = <0>;
                interrupts = <GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-parent = <&gic>;
-               clocks = <&spi1_lpcg 0>,
-                        <&spi1_lpcg 1>;
+               clocks = <&spi1_lpcg IMX_LPCG_CLK_0>,
+                        <&spi1_lpcg IMX_LPCG_CLK_4>;
                clock-names = "per", "ipg";
                assigned-clocks = <&clk IMX_SC_R_SPI_1 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <60000000>;
@@ -60,8 +60,8 @@ dma_subsys: bus@5a000000 {
                #size-cells = <0>;
                interrupts = <GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-parent = <&gic>;
-               clocks = <&spi2_lpcg 0>,
-                        <&spi2_lpcg 1>;
+               clocks = <&spi2_lpcg IMX_LPCG_CLK_0>,
+                        <&spi2_lpcg IMX_LPCG_CLK_4>;
                clock-names = "per", "ipg";
                assigned-clocks = <&clk IMX_SC_R_SPI_2 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <60000000>;
@@ -76,8 +76,8 @@ dma_subsys: bus@5a000000 {
                #size-cells = <0>;
                interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-parent = <&gic>;
-               clocks = <&spi3_lpcg 0>,
-                        <&spi3_lpcg 1>;
+               clocks = <&spi3_lpcg IMX_LPCG_CLK_0>,
+                        <&spi3_lpcg IMX_LPCG_CLK_4>;
                clock-names = "per", "ipg";
                assigned-clocks = <&clk IMX_SC_R_SPI_3 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <60000000>;
@@ -145,8 +145,8 @@ dma_subsys: bus@5a000000 {
                compatible = "fsl,imx8qxp-pwm", "fsl,imx27-pwm";
                reg = <0x5a190000 0x1000>;
                interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&adma_pwm_lpcg 1>,
-                        <&adma_pwm_lpcg 0>;
+               clocks = <&adma_pwm_lpcg IMX_LPCG_CLK_4>,
+                        <&adma_pwm_lpcg IMX_LPCG_CLK_0>;
                clock-names = "ipg", "per";
                assigned-clocks = <&clk IMX_SC_R_LCD_0_PWM_0 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <24000000>;
@@ -355,8 +355,8 @@ dma_subsys: bus@5a000000 {
                reg = <0x5a880000 0x10000>;
                interrupts = <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-parent = <&gic>;
-               clocks = <&adc0_lpcg 0>,
-                        <&adc0_lpcg 1>;
+               clocks = <&adc0_lpcg IMX_LPCG_CLK_0>,
+                        <&adc0_lpcg IMX_LPCG_CLK_4>;
                clock-names = "per", "ipg";
                assigned-clocks = <&clk IMX_SC_R_ADC_0 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <24000000>;
@@ -370,8 +370,8 @@ dma_subsys: bus@5a000000 {
                reg = <0x5a890000 0x10000>;
                interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-parent = <&gic>;
-               clocks = <&adc1_lpcg 0>,
-                        <&adc1_lpcg 1>;
+               clocks = <&adc1_lpcg IMX_LPCG_CLK_0>,
+                        <&adc1_lpcg IMX_LPCG_CLK_4>;
                clock-names = "per", "ipg";
                assigned-clocks = <&clk IMX_SC_R_ADC_1 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <24000000>;
@@ -384,8 +384,8 @@ dma_subsys: bus@5a000000 {
                reg = <0x5a8d0000 0x10000>;
                interrupts = <GIC_SPI 235 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-parent = <&gic>;
-               clocks = <&can0_lpcg 1>,
-                        <&can0_lpcg 0>;
+               clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
+                        <&can0_lpcg IMX_LPCG_CLK_0>;
                clock-names = "ipg", "per";
                assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <40000000>;
@@ -405,8 +405,8 @@ dma_subsys: bus@5a000000 {
                 * CAN1 shares CAN0's clock and to enable CAN0's clock it
                 * has to be powered on.
                 */
-               clocks = <&can0_lpcg 1>,
-                        <&can0_lpcg 0>;
+               clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
+                        <&can0_lpcg IMX_LPCG_CLK_0>;
                clock-names = "ipg", "per";
                assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <40000000>;
@@ -426,8 +426,8 @@ dma_subsys: bus@5a000000 {
                 * CAN2 shares CAN0's clock and to enable CAN0's clock it
                 * has to be powered on.
                 */
-               clocks = <&can0_lpcg 1>,
-                        <&can0_lpcg 0>;
+               clocks = <&can0_lpcg IMX_LPCG_CLK_4>,
+                        <&can0_lpcg IMX_LPCG_CLK_0>;
                clock-names = "ipg", "per";
                assigned-clocks = <&clk IMX_SC_R_CAN_0 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <40000000>;
index 7e510b21bbac555b38cede99f97b4edc177bf520..764c1a08e3b118841299d99a5cecb29a095e2f66 100644 (file)
@@ -25,8 +25,8 @@ lsio_subsys: bus@5d000000 {
                compatible = "fsl,imx27-pwm";
                reg = <0x5d000000 0x10000>;
                clock-names = "ipg", "per";
-               clocks = <&pwm0_lpcg 4>,
-                        <&pwm0_lpcg 1>;
+               clocks = <&pwm0_lpcg IMX_LPCG_CLK_6>,
+                        <&pwm0_lpcg IMX_LPCG_CLK_1>;
                assigned-clocks = <&clk IMX_SC_R_PWM_0 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <24000000>;
                #pwm-cells = <3>;
@@ -38,8 +38,8 @@ lsio_subsys: bus@5d000000 {
                compatible = "fsl,imx27-pwm";
                reg = <0x5d010000 0x10000>;
                clock-names = "ipg", "per";
-               clocks = <&pwm1_lpcg 4>,
-                        <&pwm1_lpcg 1>;
+               clocks = <&pwm1_lpcg IMX_LPCG_CLK_6>,
+                        <&pwm1_lpcg IMX_LPCG_CLK_1>;
                assigned-clocks = <&clk IMX_SC_R_PWM_1 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <24000000>;
                #pwm-cells = <3>;
@@ -51,8 +51,8 @@ lsio_subsys: bus@5d000000 {
                compatible = "fsl,imx27-pwm";
                reg = <0x5d020000 0x10000>;
                clock-names = "ipg", "per";
-               clocks = <&pwm2_lpcg 4>,
-                        <&pwm2_lpcg 1>;
+               clocks = <&pwm2_lpcg IMX_LPCG_CLK_6>,
+                        <&pwm2_lpcg IMX_LPCG_CLK_1>;
                assigned-clocks = <&clk IMX_SC_R_PWM_2 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <24000000>;
                #pwm-cells = <3>;
@@ -64,8 +64,8 @@ lsio_subsys: bus@5d000000 {
                compatible = "fsl,imx27-pwm";
                reg = <0x5d030000 0x10000>;
                clock-names = "ipg", "per";
-               clocks = <&pwm3_lpcg 4>,
-                        <&pwm3_lpcg 1>;
+               clocks = <&pwm3_lpcg IMX_LPCG_CLK_6>,
+                        <&pwm3_lpcg IMX_LPCG_CLK_1>;
                assigned-clocks = <&clk IMX_SC_R_PWM_3 IMX_SC_PM_CLK_PER>;
                assigned-clock-rates = <24000000>;
                #pwm-cells = <3>;
index 41c79d2ebdd6201dc10278204c064a4c01c71709..f24b14744799e16bb1145738bfb18fd8343c00ee 100644 (file)
@@ -14,6 +14,7 @@
                pinctrl-0 = <&pinctrl_usbcon1>;
                type = "micro";
                label = "otg";
+               vbus-supply = <&reg_usb1_vbus>;
                id-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
 
                port {
 };
 
 &usb3_phy0 {
-       vbus-supply = <&reg_usb1_vbus>;
        status = "okay";
 };
 
index d5c400b355af564123497cd1805e0b0ad56ded21..f5491a608b2f3793ca410871fda7e5005db661e1 100644 (file)
@@ -14,6 +14,7 @@
                pinctrl-0 = <&pinctrl_usbcon1>;
                type = "micro";
                label = "otg";
+               vbus-supply = <&reg_usb1_vbus>;
                id-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
 
                port {
 };
 
 &usb3_phy0 {
-       vbus-supply = <&reg_usb1_vbus>;
        status = "okay";
 };
 
index 11626fae5f97f3a9b2c94528d1957fdc73f9aac8..aa9f28c4431d0249cce852026eda7a9a7cad3ff0 100644 (file)
 };
 
 &flexcan2 {
-       clocks = <&can1_lpcg 1>,
-                <&can1_lpcg 0>;
+       clocks = <&can1_lpcg IMX_LPCG_CLK_4>,
+                <&can1_lpcg IMX_LPCG_CLK_0>;
        assigned-clocks = <&clk IMX_SC_R_CAN_1 IMX_SC_PM_CLK_PER>;
        fsl,clk-source = /bits/ 8 <1>;
 };
 
 &flexcan3 {
-       clocks = <&can2_lpcg 1>,
-                <&can2_lpcg 0>;
+       clocks = <&can2_lpcg IMX_LPCG_CLK_4>,
+                <&can2_lpcg IMX_LPCG_CLK_0>;
        assigned-clocks = <&clk IMX_SC_R_CAN_2 IMX_SC_PM_CLK_PER>;
        fsl,clk-source = /bits/ 8 <1>;
 };
index 3b0e8248e1a41a1ead90bdbf4fea82054d9fcd90..a75de2665d844510a69d4af337ad1b5827b012c8 100644 (file)
@@ -161,12 +161,18 @@ static inline unsigned long get_trans_granule(void)
 #define MAX_TLBI_RANGE_PAGES           __TLBI_RANGE_PAGES(31, 3)
 
 /*
- * Generate 'num' values from -1 to 30 with -1 rejected by the
- * __flush_tlb_range() loop below.
+ * Generate 'num' values from -1 to 31 with -1 rejected by the
+ * __flush_tlb_range() loop below. Its return value is only
+ * significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
+ * 'pages' is more than that, you must iterate over the overall
+ * range.
  */
-#define TLBI_RANGE_MASK                        GENMASK_ULL(4, 0)
-#define __TLBI_RANGE_NUM(pages, scale) \
-       ((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
+#define __TLBI_RANGE_NUM(pages, scale)                                 \
+       ({                                                              \
+               int __pages = min((pages),                              \
+                                 __TLBI_RANGE_PAGES(31, (scale)));     \
+               (__pages >> (5 * (scale) + 1)) - 1;                     \
+       })
 
 /*
  *     TLB Invalidation
@@ -379,10 +385,6 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
  * 3. If there is 1 page remaining, flush it through non-range operations. Range
  *    operations can only span an even number of pages. We save this for last to
  *    ensure 64KB start alignment is maintained for the LPA2 case.
- *
- * Note that certain ranges can be represented by either num = 31 and
- * scale or num = 0 and scale + 1. The loop below favours the latter
- * since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
  */
 #define __flush_tlb_range_op(op, start, pages, stride,                 \
                                asid, tlb_level, tlbi_user, lpa2)       \
index d14d0e37ad02ddf10b42cfed590c65f97f8de424..4a2b40ce39e0911d74806b2db54d69a9735d33ef 100644 (file)
@@ -159,7 +159,7 @@ extern unsigned long exception_ip(struct pt_regs *regs);
 #define exception_ip(regs) exception_ip(regs)
 #define profile_pc(regs) instruction_pointer(regs)
 
-extern asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall);
+extern asmlinkage long syscall_trace_enter(struct pt_regs *regs);
 extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
 
 extern void die(const char *, struct pt_regs *) __noreturn;
index d1b11f66f748f06483edbc08e48d1b4e5e684156..cb1045ebab0621ad2c8c59eaebe96b13d47e4514 100644 (file)
@@ -101,6 +101,7 @@ void output_thread_info_defines(void)
        OFFSET(TI_CPU, thread_info, cpu);
        OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
        OFFSET(TI_REGS, thread_info, regs);
+       OFFSET(TI_SYSCALL, thread_info, syscall);
        DEFINE(_THREAD_SIZE, THREAD_SIZE);
        DEFINE(_THREAD_MASK, THREAD_MASK);
        DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
index 59288c13b581b89ccb46214c7be02126a017dab2..61503a36067e9ef15c2ff7598256c6fd1de6ac8d 100644 (file)
@@ -1317,16 +1317,13 @@ long arch_ptrace(struct task_struct *child, long request,
  * Notification of system call entry/exit
  * - triggered by current->work.syscall_trace
  */
-asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
+asmlinkage long syscall_trace_enter(struct pt_regs *regs)
 {
        user_exit();
 
-       current_thread_info()->syscall = syscall;
-
        if (test_thread_flag(TIF_SYSCALL_TRACE)) {
                if (ptrace_report_syscall_entry(regs))
                        return -1;
-               syscall = current_thread_info()->syscall;
        }
 
 #ifdef CONFIG_SECCOMP
@@ -1335,7 +1332,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
                struct seccomp_data sd;
                unsigned long args[6];
 
-               sd.nr = syscall;
+               sd.nr = current_thread_info()->syscall;
                sd.arch = syscall_get_arch(current);
                syscall_get_arguments(current, regs, args);
                for (i = 0; i < 6; i++)
@@ -1345,23 +1342,23 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
                ret = __secure_computing(&sd);
                if (ret == -1)
                        return ret;
-               syscall = current_thread_info()->syscall;
        }
 #endif
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->regs[2]);
 
-       audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
+       audit_syscall_entry(current_thread_info()->syscall,
+                           regs->regs[4], regs->regs[5],
                            regs->regs[6], regs->regs[7]);
 
        /*
         * Negative syscall numbers are mistaken for rejected syscalls, but
         * won't have had the return value set appropriately, so we do so now.
         */
-       if (syscall < 0)
+       if (current_thread_info()->syscall < 0)
                syscall_set_return_value(current, regs, -ENOSYS, 0);
-       return syscall;
+       return current_thread_info()->syscall;
 }
 
 /*
index 18dc9b34505614d2bc84767479a3e9972c1ba8ad..2c604717e63080b1c1949a080bfadf1cab94acd6 100644 (file)
@@ -77,6 +77,18 @@ loads_done:
        PTR_WD  load_a7, bad_stack_a7
        .previous
 
+       /*
+        * syscall number is in v0 unless we called syscall(__NR_###)
+        * where the real syscall number is in a0
+        */
+       subu    t2, v0,  __NR_O32_Linux
+       bnez    t2, 1f /* __NR_syscall at offset 0 */
+       LONG_S  a0, TI_SYSCALL($28)     # Save a0 as syscall number
+       b       2f
+1:
+       LONG_S  v0, TI_SYSCALL($28)     # Save v0 as syscall number
+2:
+
        lw      t0, TI_FLAGS($28)       # syscall tracing enabled?
        li      t1, _TIF_WORK_SYSCALL_ENTRY
        and     t0, t1
@@ -114,16 +126,7 @@ syscall_trace_entry:
        SAVE_STATIC
        move    a0, sp
 
-       /*
-        * syscall number is in v0 unless we called syscall(__NR_###)
-        * where the real syscall number is in a0
-        */
-       move    a1, v0
-       subu    t2, v0,  __NR_O32_Linux
-       bnez    t2, 1f /* __NR_syscall at offset 0 */
-       lw      a1, PT_R4(sp)
-
-1:     jal     syscall_trace_enter
+       jal     syscall_trace_enter
 
        bltz    v0, 1f                  # seccomp failed? Skip syscall
 
index 97456b2ca7dc32f13cac9a5843a3adea89735318..97788859238c344a64d1f75f2fdd6c2a4bc58006 100644 (file)
@@ -44,6 +44,8 @@ NESTED(handle_sysn32, PT_SIZE, sp)
 
        sd      a3, PT_R26(sp)          # save a3 for syscall restarting
 
+       LONG_S  v0, TI_SYSCALL($28)     # Store syscall number
+
        li      t1, _TIF_WORK_SYSCALL_ENTRY
        LONG_L  t0, TI_FLAGS($28)       # syscall tracing enabled?
        and     t0, t1, t0
@@ -72,7 +74,6 @@ syscall_common:
 n32_syscall_trace_entry:
        SAVE_STATIC
        move    a0, sp
-       move    a1, v0
        jal     syscall_trace_enter
 
        bltz    v0, 1f                  # seccomp failed? Skip syscall
index e6264aa62e457f02b8a50df8b266a58b8361717d..be11ea5cc67e043c8a20fe0fecb4a0414b589ee9 100644 (file)
@@ -46,6 +46,8 @@ NESTED(handle_sys64, PT_SIZE, sp)
 
        sd      a3, PT_R26(sp)          # save a3 for syscall restarting
 
+       LONG_S  v0, TI_SYSCALL($28)     # Store syscall number
+
        li      t1, _TIF_WORK_SYSCALL_ENTRY
        LONG_L  t0, TI_FLAGS($28)       # syscall tracing enabled?
        and     t0, t1, t0
@@ -82,7 +84,6 @@ n64_syscall_exit:
 syscall_trace_entry:
        SAVE_STATIC
        move    a0, sp
-       move    a1, v0
        jal     syscall_trace_enter
 
        bltz    v0, 1f                  # seccomp failed? Skip syscall
index d3c2616cba22690bffd63b4521dc0f0ea7216315..7a5abb73e53127876af7e9d5f13dae2f8b08c3e8 100644 (file)
@@ -79,6 +79,22 @@ loads_done:
        PTR_WD  load_a7, bad_stack_a7
        .previous
 
+       /*
+        * absolute syscall number is in v0 unless we called syscall(__NR_###)
+        * where the real syscall number is in a0
+        * note: NR_syscall is the first O32 syscall but the macro is
+        * only defined when compiling with -mabi=32 (CONFIG_32BIT)
+        * therefore __NR_O32_Linux is used (4000)
+        */
+
+       subu    t2, v0,  __NR_O32_Linux
+       bnez    t2, 1f /* __NR_syscall at offset 0 */
+       LONG_S  a0, TI_SYSCALL($28)     # Save a0 as syscall number
+       b       2f
+1:
+       LONG_S  v0, TI_SYSCALL($28)     # Save v0 as syscall number
+2:
+
        li      t1, _TIF_WORK_SYSCALL_ENTRY
        LONG_L  t0, TI_FLAGS($28)       # syscall tracing enabled?
        and     t0, t1, t0
@@ -113,22 +129,7 @@ trace_a_syscall:
        sd      a7, PT_R11(sp)          # For indirect syscalls
 
        move    a0, sp
-       /*
-        * absolute syscall number is in v0 unless we called syscall(__NR_###)
-        * where the real syscall number is in a0
-        * note: NR_syscall is the first O32 syscall but the macro is
-        * only defined when compiling with -mabi=32 (CONFIG_32BIT)
-        * therefore __NR_O32_Linux is used (4000)
-        */
-       .set    push
-       .set    reorder
-       subu    t1, v0,  __NR_O32_Linux
-       move    a1, v0
-       bnez    t1, 1f /* __NR_syscall at offset 0 */
-       ld      a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
-       .set    pop
-
-1:     jal     syscall_trace_enter
+       jal     syscall_trace_enter
 
        bltz    v0, 1f                  # seccomp failed? Skip syscall
 
index 10a6251f58f3e0789cf5507322b92d92a87bc2eb..4474bf32d0a4970daec7fad3f12f8aa4a9e43871 100644 (file)
@@ -2633,32 +2633,16 @@ config MITIGATION_RFDS
          stored in floating point, vector and integer registers.
          See also <file:Documentation/admin-guide/hw-vuln/reg-file-data-sampling.rst>
 
-choice
-       prompt "Clear branch history"
+config MITIGATION_SPECTRE_BHI
+       bool "Mitigate Spectre-BHB (Branch History Injection)"
        depends on CPU_SUP_INTEL
-       default SPECTRE_BHI_ON
+       default y
        help
          Enable BHI mitigations. BHI attacks are a form of Spectre V2 attacks
          where the branch history buffer is poisoned to speculatively steer
          indirect branches.
          See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
 
-config SPECTRE_BHI_ON
-       bool "on"
-       help
-         Equivalent to setting spectre_bhi=on command line parameter.
-config SPECTRE_BHI_OFF
-       bool "off"
-       help
-         Equivalent to setting spectre_bhi=off command line parameter.
-config SPECTRE_BHI_AUTO
-       bool "auto"
-       depends on BROKEN
-       help
-         Equivalent to setting spectre_bhi=auto command line parameter.
-
-endchoice
-
 endif
 
 config ARCH_HAS_ADD_PAGES
index 09050641ce5d3c02ad099d8faabbe5e98fe57570..5b0dd07b1ef19e915c1553eb13ca1c20ef1814ff 100644 (file)
@@ -1644,6 +1644,7 @@ static void x86_pmu_del(struct perf_event *event, int flags)
        while (++i < cpuc->n_events) {
                cpuc->event_list[i-1] = cpuc->event_list[i];
                cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
+               cpuc->assign[i-1] = cpuc->assign[i];
        }
        cpuc->event_constraint[i-1] = NULL;
        --cpuc->n_events;
index 5fc45543e95502cf16607e69e891c6e282136b30..0569f579338b516b22fe447248ae1ae4e4880a03 100644 (file)
@@ -105,7 +105,7 @@ static bool cpu_is_self(int cpu)
  * IPI implementation on Hyper-V.
  */
 static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
-               bool exclude_self)
+                              bool exclude_self)
 {
        struct hv_send_ipi_ex *ipi_arg;
        unsigned long flags;
@@ -132,8 +132,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
        if (!cpumask_equal(mask, cpu_present_mask) || exclude_self) {
                ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
 
-               nr_bank = cpumask_to_vpset_skip(&(ipi_arg->vp_set), mask,
-                               exclude_self ? cpu_is_self : NULL);
+               nr_bank = cpumask_to_vpset_skip(&ipi_arg->vp_set, mask,
+                                               exclude_self ? cpu_is_self : NULL);
 
                /*
                 * 'nr_bank <= 0' means some CPUs in cpumask can't be
@@ -147,7 +147,7 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector,
        }
 
        status = hv_do_rep_hypercall(HVCALL_SEND_IPI_EX, 0, nr_bank,
-                             ipi_arg, NULL);
+                                    ipi_arg, NULL);
 
 ipi_mask_ex_done:
        local_irq_restore(flags);
@@ -155,7 +155,7 @@ ipi_mask_ex_done:
 }
 
 static bool __send_ipi_mask(const struct cpumask *mask, int vector,
-               bool exclude_self)
+                           bool exclude_self)
 {
        int cur_cpu, vcpu, this_cpu = smp_processor_id();
        struct hv_send_ipi ipi_arg;
@@ -181,7 +181,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector,
                        return false;
        }
 
-       if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
+       if (vector < HV_IPI_LOW_VECTOR || vector > HV_IPI_HIGH_VECTOR)
                return false;
 
        /*
@@ -218,7 +218,7 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector,
        }
 
        status = hv_do_fast_hypercall16(HVCALL_SEND_IPI, ipi_arg.vector,
-                                    ipi_arg.cpu_mask);
+                                       ipi_arg.cpu_mask);
        return hv_result_success(status);
 
 do_ex_hypercall:
@@ -241,7 +241,7 @@ static bool __send_ipi_one(int cpu, int vector)
                        return false;
        }
 
-       if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
+       if (vector < HV_IPI_LOW_VECTOR || vector > HV_IPI_HIGH_VECTOR)
                return false;
 
        if (vp >= 64)
index 68a0843d4750f765b50dd303c82bc445f442646e..3fa1f2ee7b0d0630df03675bddfdad0c40ad411d 100644 (file)
@@ -3,7 +3,6 @@
 #include <linux/vmalloc.h>
 #include <linux/mm.h>
 #include <linux/clockchips.h>
-#include <linux/acpi.h>
 #include <linux/hyperv.h>
 #include <linux/slab.h>
 #include <linux/cpuhotplug.h>
@@ -116,12 +115,11 @@ free_buf:
 
 int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
 {
-       struct hv_add_logical_processor_in *input;
-       struct hv_add_logical_processor_out *output;
+       struct hv_input_add_logical_processor *input;
+       struct hv_output_add_logical_processor *output;
        u64 status;
        unsigned long flags;
        int ret = HV_STATUS_SUCCESS;
-       int pxm = node_to_pxm(node);
 
        /*
         * When adding a logical processor, the hypervisor may return
@@ -137,11 +135,7 @@ int hv_call_add_logical_proc(int node, u32 lp_index, u32 apic_id)
 
                input->lp_index = lp_index;
                input->apic_id = apic_id;
-               input->flags = 0;
-               input->proximity_domain_info.domain_id = pxm;
-               input->proximity_domain_info.flags.reserved = 0;
-               input->proximity_domain_info.flags.proximity_info_valid = 1;
-               input->proximity_domain_info.flags.proximity_preferred = 1;
+               input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
                status = hv_do_hypercall(HVCALL_ADD_LOGICAL_PROCESSOR,
                                         input, output);
                local_irq_restore(flags);
@@ -166,7 +160,6 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
        u64 status;
        unsigned long irq_flags;
        int ret = HV_STATUS_SUCCESS;
-       int pxm = node_to_pxm(node);
 
        /* Root VPs don't seem to need pages deposited */
        if (partition_id != hv_current_partition_id) {
@@ -185,14 +178,7 @@ int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags)
                input->vp_index = vp_index;
                input->flags = flags;
                input->subnode_type = HvSubnodeAny;
-               if (node != NUMA_NO_NODE) {
-                       input->proximity_domain_info.domain_id = pxm;
-                       input->proximity_domain_info.flags.reserved = 0;
-                       input->proximity_domain_info.flags.proximity_info_valid = 1;
-                       input->proximity_domain_info.flags.proximity_preferred = 1;
-               } else {
-                       input->proximity_domain_info.as_uint64 = 0;
-               }
+               input->proximity_domain_info = hv_numa_node_to_pxm_info(node);
                status = hv_do_hypercall(HVCALL_CREATE_VP, input, NULL);
                local_irq_restore(irq_flags);
 
index 94ce0f7c9d3a26cd2b766a60042a0b941b3fe0d2..e6ab0cf15ed573b3acfd5fce79bc20cfce7c493a 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/mpspec.h>
 #include <asm/msr.h>
 #include <asm/hardirq.h>
+#include <asm/io.h>
 
 #define ARCH_APICTIMER_STOPS_ON_C3     1
 
@@ -98,7 +99,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
 
 static inline u32 native_apic_mem_read(u32 reg)
 {
-       return *((volatile u32 *)(APIC_BASE + reg));
+       return readl((void __iomem *)(APIC_BASE + reg));
 }
 
 static inline void native_apic_mem_eoi(void)
index a42d8a6f7149588bc74213268733003bf7ccf470..c342c4aa9c6848c607238dad1ff07105737d5873 100644 (file)
@@ -1687,11 +1687,11 @@ static int x2apic_state;
 
 static bool x2apic_hw_locked(void)
 {
-       u64 ia32_cap;
+       u64 x86_arch_cap_msr;
        u64 msr;
 
-       ia32_cap = x86_read_arch_cap_msr();
-       if (ia32_cap & ARCH_CAP_XAPIC_DISABLE) {
+       x86_arch_cap_msr = x86_read_arch_cap_msr();
+       if (x86_arch_cap_msr & ARCH_CAP_XAPIC_DISABLE) {
                rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
                return (msr & LEGACY_XAPIC_DISABLED);
        }
index 9bf17c9c29dad2e3f3c38c07253accc667cadea3..cb9eece55904d049edc600960bdaa0db58765459 100644 (file)
@@ -535,7 +535,6 @@ clear_sev:
 
 static void early_init_amd(struct cpuinfo_x86 *c)
 {
-       u64 value;
        u32 dummy;
 
        if (c->x86 >= 0xf)
@@ -603,20 +602,6 @@ static void early_init_amd(struct cpuinfo_x86 *c)
 
        early_detect_mem_encrypt(c);
 
-       /* Re-enable TopologyExtensions if switched off by BIOS */
-       if (c->x86 == 0x15 &&
-           (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
-           !cpu_has(c, X86_FEATURE_TOPOEXT)) {
-
-               if (msr_set_bit(0xc0011005, 54) > 0) {
-                       rdmsrl(0xc0011005, value);
-                       if (value & BIT_64(54)) {
-                               set_cpu_cap(c, X86_FEATURE_TOPOEXT);
-                               pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
-                       }
-               }
-       }
-
        if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_IBPB_BRTYPE)) {
                if (c->x86 == 0x17 && boot_cpu_has(X86_FEATURE_AMD_IBPB))
                        setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
index 295463707e68181cb536f8f4bd763bf045936202..ca295b0c1eeee05b812c27bb88bd814dba3c1f00 100644 (file)
@@ -61,6 +61,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
 EXPORT_SYMBOL_GPL(x86_pred_cmd);
 
+static u64 __ro_after_init x86_arch_cap_msr;
+
 static DEFINE_MUTEX(spec_ctrl_mutex);
 
 void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
@@ -144,6 +146,8 @@ void __init cpu_select_mitigations(void)
                x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
        }
 
+       x86_arch_cap_msr = x86_read_arch_cap_msr();
+
        /* Select the proper CPU mitigations before patching alternatives: */
        spectre_v1_select_mitigation();
        spectre_v2_select_mitigation();
@@ -301,8 +305,6 @@ static const char * const taa_strings[] = {
 
 static void __init taa_select_mitigation(void)
 {
-       u64 ia32_cap;
-
        if (!boot_cpu_has_bug(X86_BUG_TAA)) {
                taa_mitigation = TAA_MITIGATION_OFF;
                return;
@@ -341,9 +343,8 @@ static void __init taa_select_mitigation(void)
         * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
         * update is required.
         */
-       ia32_cap = x86_read_arch_cap_msr();
-       if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
-           !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
+       if ( (x86_arch_cap_msr & ARCH_CAP_MDS_NO) &&
+           !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR))
                taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
 
        /*
@@ -401,8 +402,6 @@ static const char * const mmio_strings[] = {
 
 static void __init mmio_select_mitigation(void)
 {
-       u64 ia32_cap;
-
        if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
             boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
             cpu_mitigations_off()) {
@@ -413,8 +412,6 @@ static void __init mmio_select_mitigation(void)
        if (mmio_mitigation == MMIO_MITIGATION_OFF)
                return;
 
-       ia32_cap = x86_read_arch_cap_msr();
-
        /*
         * Enable CPU buffer clear mitigation for host and VMM, if also affected
         * by MDS or TAA. Otherwise, enable mitigation for VMM only.
@@ -437,7 +434,7 @@ static void __init mmio_select_mitigation(void)
         * be propagated to uncore buffers, clearing the Fill buffers on idle
         * is required irrespective of SMT state.
         */
-       if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
+       if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
                static_branch_enable(&mds_idle_clear);
 
        /*
@@ -447,10 +444,10 @@ static void __init mmio_select_mitigation(void)
         * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
         * affected systems.
         */
-       if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
+       if ((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) ||
            (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
             boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
-            !(ia32_cap & ARCH_CAP_MDS_NO)))
+            !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))
                mmio_mitigation = MMIO_MITIGATION_VERW;
        else
                mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
@@ -508,7 +505,7 @@ static void __init rfds_select_mitigation(void)
        if (rfds_mitigation == RFDS_MITIGATION_OFF)
                return;
 
-       if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
+       if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
                setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
        else
                rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
@@ -659,8 +656,6 @@ void update_srbds_msr(void)
 
 static void __init srbds_select_mitigation(void)
 {
-       u64 ia32_cap;
-
        if (!boot_cpu_has_bug(X86_BUG_SRBDS))
                return;
 
@@ -669,8 +664,7 @@ static void __init srbds_select_mitigation(void)
         * are only exposed to SRBDS when TSX is enabled or when CPU is affected
         * by Processor MMIO Stale Data vulnerability.
         */
-       ia32_cap = x86_read_arch_cap_msr();
-       if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
+       if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
            !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
                srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
        else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
@@ -813,7 +807,7 @@ static void __init gds_select_mitigation(void)
        /* Will verify below that mitigation _can_ be disabled */
 
        /* No microcode */
-       if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
+       if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) {
                if (gds_mitigation == GDS_MITIGATION_FORCE) {
                        /*
                         * This only needs to be done on the boot CPU so do it
@@ -1544,20 +1538,25 @@ static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
        return SPECTRE_V2_RETPOLINE;
 }
 
+static bool __ro_after_init rrsba_disabled;
+
 /* Disable in-kernel use of non-RSB RET predictors */
 static void __init spec_ctrl_disable_kernel_rrsba(void)
 {
-       u64 ia32_cap;
+       if (rrsba_disabled)
+               return;
 
-       if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
+       if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) {
+               rrsba_disabled = true;
                return;
+       }
 
-       ia32_cap = x86_read_arch_cap_msr();
+       if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
+               return;
 
-       if (ia32_cap & ARCH_CAP_RRSBA) {
-               x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
-               update_spec_ctrl(x86_spec_ctrl_base);
-       }
+       x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
+       update_spec_ctrl(x86_spec_ctrl_base);
+       rrsba_disabled = true;
 }
 
 static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
@@ -1626,13 +1625,10 @@ static bool __init spec_ctrl_bhi_dis(void)
 enum bhi_mitigations {
        BHI_MITIGATION_OFF,
        BHI_MITIGATION_ON,
-       BHI_MITIGATION_AUTO,
 };
 
 static enum bhi_mitigations bhi_mitigation __ro_after_init =
-       IS_ENABLED(CONFIG_SPECTRE_BHI_ON)  ? BHI_MITIGATION_ON  :
-       IS_ENABLED(CONFIG_SPECTRE_BHI_OFF) ? BHI_MITIGATION_OFF :
-                                            BHI_MITIGATION_AUTO;
+       IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_ON : BHI_MITIGATION_OFF;
 
 static int __init spectre_bhi_parse_cmdline(char *str)
 {
@@ -1643,8 +1639,6 @@ static int __init spectre_bhi_parse_cmdline(char *str)
                bhi_mitigation = BHI_MITIGATION_OFF;
        else if (!strcmp(str, "on"))
                bhi_mitigation = BHI_MITIGATION_ON;
-       else if (!strcmp(str, "auto"))
-               bhi_mitigation = BHI_MITIGATION_AUTO;
        else
                pr_err("Ignoring unknown spectre_bhi option (%s)", str);
 
@@ -1658,9 +1652,11 @@ static void __init bhi_select_mitigation(void)
                return;
 
        /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */
-       if (cpu_feature_enabled(X86_FEATURE_RETPOLINE) &&
-           !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
-               return;
+       if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
+               spec_ctrl_disable_kernel_rrsba();
+               if (rrsba_disabled)
+                       return;
+       }
 
        if (spec_ctrl_bhi_dis())
                return;
@@ -1672,9 +1668,6 @@ static void __init bhi_select_mitigation(void)
        setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
        pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n");
 
-       if (bhi_mitigation == BHI_MITIGATION_AUTO)
-               return;
-
        /* Mitigate syscalls when the mitigation is forced =on */
        setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
        pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n");
@@ -1908,8 +1901,6 @@ static void update_indir_branch_cond(void)
 /* Update the static key controlling the MDS CPU buffer clear in idle */
 static void update_mds_branch_idle(void)
 {
-       u64 ia32_cap = x86_read_arch_cap_msr();
-
        /*
         * Enable the idle clearing if SMT is active on CPUs which are
         * affected only by MSBDS and not any other MDS variant.
@@ -1924,7 +1915,7 @@ static void update_mds_branch_idle(void)
        if (sched_smt_active()) {
                static_branch_enable(&mds_idle_clear);
        } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
-                  (ia32_cap & ARCH_CAP_FBSDP_NO)) {
+                  (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
                static_branch_disable(&mds_idle_clear);
        }
 }
@@ -2809,7 +2800,7 @@ static char *pbrsb_eibrs_state(void)
        }
 }
 
-static const char * const spectre_bhi_state(void)
+static const char *spectre_bhi_state(void)
 {
        if (!boot_cpu_has_bug(X86_BUG_BHI))
                return "; BHI: Not affected";
@@ -2817,13 +2808,12 @@ static const char * const spectre_bhi_state(void)
                return "; BHI: BHI_DIS_S";
        else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP))
                return "; BHI: SW loop, KVM: SW loop";
-       else if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
-                !(x86_read_arch_cap_msr() & ARCH_CAP_RRSBA))
+       else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && rrsba_disabled)
                return "; BHI: Retpoline";
-       else if  (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
-               return "; BHI: Syscall hardening, KVM: SW loop";
+       else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT))
+               return "; BHI: Vulnerable, KVM: SW loop";
 
-       return "; BHI: Vulnerable (Syscall hardening enabled)";
+       return "; BHI: Vulnerable";
 }
 
 static ssize_t spectre_v2_show_state(char *buf)
index 754d91857d634a2c6055ed50afa659ac45749086..605c26c009c8ac61c8560231ea6b35d2381ff2aa 100644 (file)
@@ -1284,25 +1284,25 @@ static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long whi
 
 u64 x86_read_arch_cap_msr(void)
 {
-       u64 ia32_cap = 0;
+       u64 x86_arch_cap_msr = 0;
 
        if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
-               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+               rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
 
-       return ia32_cap;
+       return x86_arch_cap_msr;
 }
 
-static bool arch_cap_mmio_immune(u64 ia32_cap)
+static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
 {
-       return (ia32_cap & ARCH_CAP_FBSDP_NO &&
-               ia32_cap & ARCH_CAP_PSDP_NO &&
-               ia32_cap & ARCH_CAP_SBDR_SSDP_NO);
+       return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
+               x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
+               x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
 }
 
-static bool __init vulnerable_to_rfds(u64 ia32_cap)
+static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
 {
        /* The "immunity" bit trumps everything else: */
-       if (ia32_cap & ARCH_CAP_RFDS_NO)
+       if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
                return false;
 
        /*
@@ -1310,7 +1310,7 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
         * indicate that mitigation is needed because guest is running on a
         * vulnerable hardware or may migrate to such hardware:
         */
-       if (ia32_cap & ARCH_CAP_RFDS_CLEAR)
+       if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
                return true;
 
        /* Only consult the blacklist when there is no enumeration: */
@@ -1319,11 +1319,11 @@ static bool __init vulnerable_to_rfds(u64 ia32_cap)
 
 static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
 {
-       u64 ia32_cap = x86_read_arch_cap_msr();
+       u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
 
        /* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
        if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
-           !(ia32_cap & ARCH_CAP_PSCHANGE_MC_NO))
+           !(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
                setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
 
        if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
@@ -1335,7 +1335,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
                setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
 
        if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
-           !(ia32_cap & ARCH_CAP_SSB_NO) &&
+           !(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
           !cpu_has(c, X86_FEATURE_AMD_SSB_NO))
                setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
 
@@ -1346,17 +1346,17 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
         * Don't use AutoIBRS when SNP is enabled because it degrades host
         * userspace indirect branch performance.
         */
-       if ((ia32_cap & ARCH_CAP_IBRS_ALL) ||
+       if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) ||
            (cpu_has(c, X86_FEATURE_AUTOIBRS) &&
             !cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {
                setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
                if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
-                   !(ia32_cap & ARCH_CAP_PBRSB_NO))
+                   !(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
                        setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
        }
 
        if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
-           !(ia32_cap & ARCH_CAP_MDS_NO)) {
+           !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
                setup_force_cpu_bug(X86_BUG_MDS);
                if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
                        setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
@@ -1375,9 +1375,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
         * TSX_CTRL check alone is not sufficient for cases when the microcode
         * update is not present or running as guest that don't get TSX_CTRL.
         */
-       if (!(ia32_cap & ARCH_CAP_TAA_NO) &&
+       if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
            (cpu_has(c, X86_FEATURE_RTM) ||
-            (ia32_cap & ARCH_CAP_TSX_CTRL_MSR)))
+            (x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
                setup_force_cpu_bug(X86_BUG_TAA);
 
        /*
@@ -1403,7 +1403,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
         * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
         * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
         */
-       if (!arch_cap_mmio_immune(ia32_cap)) {
+       if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
                if (cpu_matches(cpu_vuln_blacklist, MMIO))
                        setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
                else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
@@ -1411,7 +1411,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
        }
 
        if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
-               if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (ia32_cap & ARCH_CAP_RSBA))
+               if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
                        setup_force_cpu_bug(X86_BUG_RETBLEED);
        }
 
@@ -1429,15 +1429,15 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
         * disabling AVX2. The only way to do this in HW is to clear XCR0[2],
         * which means that AVX will be disabled.
         */
-       if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) &&
+       if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
            boot_cpu_has(X86_FEATURE_AVX))
                setup_force_cpu_bug(X86_BUG_GDS);
 
-       if (vulnerable_to_rfds(ia32_cap))
+       if (vulnerable_to_rfds(x86_arch_cap_msr))
                setup_force_cpu_bug(X86_BUG_RFDS);
 
        /* When virtualized, eIBRS could be hidden, assume vulnerable */
-       if (!(ia32_cap & ARCH_CAP_BHI_NO) &&
+       if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
            !cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
            (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
             boot_cpu_has(X86_FEATURE_HYPERVISOR)))
@@ -1447,7 +1447,7 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
                return;
 
        /* Rogue Data Cache Load? No! */
-       if (ia32_cap & ARCH_CAP_RDCL_NO)
+       if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
                return;
 
        setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
index aaca8d235dc2bbee08ab6de2bdb91b231963a7f6..d17c9b71eb4a253eac42acee5a49f3811c83aaff 100644 (file)
@@ -123,7 +123,6 @@ static void topo_set_cpuids(unsigned int cpu, u32 apic_id, u32 acpi_id)
        early_per_cpu(x86_cpu_to_apicid, cpu) = apic_id;
        early_per_cpu(x86_cpu_to_acpiid, cpu) = acpi_id;
 #endif
-       set_cpu_possible(cpu, true);
        set_cpu_present(cpu, true);
 }
 
@@ -210,7 +209,11 @@ static __init void topo_register_apic(u32 apic_id, u32 acpi_id, bool present)
                topo_info.nr_disabled_cpus++;
        }
 
-       /* Register present and possible CPUs in the domain maps */
+       /*
+        * Register present and possible CPUs in the domain
+        * maps. cpu_possible_map will be updated in
+        * topology_init_possible_cpus() after enumeration is done.
+        */
        for (dom = TOPO_SMT_DOMAIN; dom < TOPO_MAX_DOMAIN; dom++)
                set_bit(topo_apicid(apic_id, dom), apic_maps[dom].map);
 }
index 1a8b3ad493afef8eeeea65fe5dba8673517f1240..a7aa6eff4ae5ba26206208479f7530721eebda2d 100644 (file)
@@ -29,11 +29,21 @@ static bool parse_8000_0008(struct topo_scan *tscan)
        if (!sft)
                sft = get_count_order(ecx.cpu_nthreads + 1);
 
-       topology_set_dom(tscan, TOPO_SMT_DOMAIN, sft, ecx.cpu_nthreads + 1);
+       /*
+        * cpu_nthreads describes the number of threads in the package
+        * sft is the number of APIC ID bits per package
+        *
+        * As the number of actual threads per core is not described in
+        * this leaf, just set the CORE domain shift and let the later
+        * parsers set SMT shift. Assume one thread per core by default
+        * which is correct if there are no other CPUID leafs to parse.
+        */
+       topology_update_dom(tscan, TOPO_SMT_DOMAIN, 0, 1);
+       topology_set_dom(tscan, TOPO_CORE_DOMAIN, sft, ecx.cpu_nthreads + 1);
        return true;
 }
 
-static void store_node(struct topo_scan *tscan, unsigned int nr_nodes, u16 node_id)
+static void store_node(struct topo_scan *tscan, u16 nr_nodes, u16 node_id)
 {
        /*
         * Starting with Fam 17h the DIE domain could probably be used to
@@ -73,12 +83,14 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb)
        tscan->c->topo.initial_apicid = leaf.ext_apic_id;
 
        /*
-        * If leaf 0xb is available, then SMT shift is set already. If not
-        * take it from ecx.threads_per_core and use topo_update_dom() -
-        * topology_set_dom() would propagate and overwrite the already
-        * propagated CORE level.
+        * If leaf 0xb is available, then the domain shifts are set
+        * already and nothing to do here.
         */
        if (!has_0xb) {
+               /*
+                * Leaf 0x80000008 set the CORE domain shift already.
+                * Update the SMT domain, but do not propagate it.
+                */
                unsigned int nthreads = leaf.core_nthreads + 1;
 
                topology_update_dom(tscan, TOPO_SMT_DOMAIN, get_count_order(nthreads), nthreads);
@@ -109,13 +121,13 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_0xb)
 
 static bool parse_fam10h_node_id(struct topo_scan *tscan)
 {
-       struct {
-               union {
+       union {
+               struct {
                        u64     node_id         :  3,
                                nodes_per_pkg   :  3,
                                unused          : 58;
-                       u64     msr;
                };
+               u64             msr;
        } nid;
 
        if (!boot_cpu_has(X86_FEATURE_NODEID_MSR))
@@ -135,6 +147,26 @@ static void legacy_set_llc(struct topo_scan *tscan)
        tscan->c->topo.llc_id = apicid >> tscan->dom_shifts[TOPO_CORE_DOMAIN];
 }
 
+static void topoext_fixup(struct topo_scan *tscan)
+{
+       struct cpuinfo_x86 *c = tscan->c;
+       u64 msrval;
+
+       /* Try to re-enable TopologyExtensions if switched off by BIOS */
+       if (cpu_has(c, X86_FEATURE_TOPOEXT) || c->x86_vendor != X86_VENDOR_AMD ||
+           c->x86 != 0x15 || c->x86_model < 0x10 || c->x86_model > 0x6f)
+               return;
+
+       if (msr_set_bit(0xc0011005, 54) <= 0)
+               return;
+
+       rdmsrl(0xc0011005, msrval);
+       if (msrval & BIT_64(54)) {
+               set_cpu_cap(c, X86_FEATURE_TOPOEXT);
+               pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+       }
+}
+
 static void parse_topology_amd(struct topo_scan *tscan)
 {
        bool has_0xb = false;
@@ -164,6 +196,7 @@ static void parse_topology_amd(struct topo_scan *tscan)
 void cpu_parse_topology_amd(struct topo_scan *tscan)
 {
        tscan->amd_nodes_per_pkg = 1;
+       topoext_fixup(tscan);
        parse_topology_amd(tscan);
 
        if (tscan->amd_nodes_per_pkg > 1)
index bdbb557feb5a0ec949e7ac8cde0e87b6d4055f5b..059467086b13123b26630c1e84942980f3001216 100644 (file)
@@ -1409,6 +1409,12 @@ static int blkcg_css_online(struct cgroup_subsys_state *css)
        return 0;
 }
 
+void blkg_init_queue(struct request_queue *q)
+{
+       INIT_LIST_HEAD(&q->blkg_list);
+       mutex_init(&q->blkcg_mutex);
+}
+
 int blkcg_init_disk(struct gendisk *disk)
 {
        struct request_queue *q = disk->queue;
@@ -1416,9 +1422,6 @@ int blkcg_init_disk(struct gendisk *disk)
        bool preloaded;
        int ret;
 
-       INIT_LIST_HEAD(&q->blkg_list);
-       mutex_init(&q->blkcg_mutex);
-
        new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
        if (!new_blkg)
                return -ENOMEM;
index 78b74106bf10c5cbadd655e2da6b2f21416c0622..90b3959d88cfa4a13026b7262001dd1cb030dcf5 100644 (file)
@@ -189,6 +189,7 @@ struct blkcg_policy {
 extern struct blkcg blkcg_root;
 extern bool blkcg_debug_stats;
 
+void blkg_init_queue(struct request_queue *q);
 int blkcg_init_disk(struct gendisk *disk);
 void blkcg_exit_disk(struct gendisk *disk);
 
@@ -482,6 +483,7 @@ struct blkcg {
 };
 
 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
+static inline void blkg_init_queue(struct request_queue *q) { }
 static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
 static inline void blkcg_exit_disk(struct gendisk *disk) { }
 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
index a16b5abdbbf56f44611d34fd238c0ee3a00d72f5..b795ac177281ad7adec63528d53def2fff1139a5 100644 (file)
@@ -442,6 +442,8 @@ struct request_queue *blk_alloc_queue(struct queue_limits *lim, int node_id)
        init_waitqueue_head(&q->mq_freeze_wq);
        mutex_init(&q->mq_freeze_lock);
 
+       blkg_init_queue(q);
+
        /*
         * Init percpu_ref in atomic mode so that it's faster to shutdown.
         * See blk_register_queue() for details.
@@ -1195,6 +1197,7 @@ void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
        if (unlikely(!rq_list_empty(plug->cached_rq)))
                blk_mq_free_plug_rqs(plug);
 
+       plug->cur_ktime = 0;
        current->flags &= ~PF_BLOCK_TS;
 }
 
index 9a85bfbbc45a018e941cd0b778ab612a54cdea09..baa20c85799d54a86df05aa412c2e38849a800b4 100644 (file)
@@ -1347,7 +1347,7 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
 {
        struct ioc *ioc = iocg->ioc;
        struct blkcg_gq *blkg = iocg_to_blkg(iocg);
-       u64 tdelta, delay, new_delay;
+       u64 tdelta, delay, new_delay, shift;
        s64 vover, vover_pct;
        u32 hwa;
 
@@ -1362,8 +1362,9 @@ static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
 
        /* calculate the current delay in effect - 1/2 every second */
        tdelta = now->now - iocg->delay_at;
-       if (iocg->delay)
-               delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
+       shift = div64_u64(tdelta, USEC_PER_SEC);
+       if (iocg->delay && shift < BITS_PER_LONG)
+               delay = iocg->delay >> shift;
        else
                delay = 0;
 
index cdbaef159c4bc3e2f713ac8541a36450271678e7..d2731843f2fccb481eda94e1a1dc980051d2486a 100644 (file)
@@ -182,17 +182,13 @@ static int blk_validate_limits(struct queue_limits *lim)
                return -EINVAL;
 
        /*
-        * Devices that require a virtual boundary do not support scatter/gather
-        * I/O natively, but instead require a descriptor list entry for each
-        * page (which might not be identical to the Linux PAGE_SIZE).  Because
-        * of that they are not limited by our notion of "segment size".
+        * Stacking device may have both virtual boundary and max segment
+        * size limit, so allow this setting now, and long-term the two
+        * might need to move out of stacking limits since we have immutable
+        * bvec and lower layer bio splitting is supposed to handle the two
+        * correctly.
         */
-       if (lim->virt_boundary_mask) {
-               if (WARN_ON_ONCE(lim->max_segment_size &&
-                                lim->max_segment_size != UINT_MAX))
-                       return -EINVAL;
-               lim->max_segment_size = UINT_MAX;
-       } else {
+       if (!lim->virt_boundary_mask) {
                /*
                 * The maximum segment size has an odd historic 64k default that
                 * drivers probably should override.  Just like the I/O size we
index 39f6d1b98fd6a50d5d9df2defe305a23b36f9bcf..51d3f1a55d024cf5600ebd833bdf8ef5ee0627c1 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
  */
 
 #include <linux/firmware.h>
@@ -131,22 +131,6 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
        return 0;
 }
 
-static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
-{
-       int ret;
-
-       ret = ivpu_rpm_get_if_active(vdev);
-       if (ret < 0)
-               return ret;
-
-       *clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
-
-       if (ret)
-               ivpu_rpm_put(vdev);
-
-       return 0;
-}
-
 static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
        struct ivpu_file_priv *file_priv = file->driver_priv;
@@ -170,7 +154,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
                args->value = vdev->platform;
                break;
        case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
-               ret = ivpu_get_core_clock_rate(vdev, &args->value);
+               args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
                break;
        case DRM_IVPU_PARAM_NUM_CONTEXTS:
                args->value = ivpu_get_context_count(vdev);
@@ -387,12 +371,15 @@ int ivpu_shutdown(struct ivpu_device *vdev)
 {
        int ret;
 
-       ivpu_prepare_for_reset(vdev);
+       /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
+       pci_save_state(to_pci_dev(vdev->drm.dev));
 
        ret = ivpu_hw_power_down(vdev);
        if (ret)
                ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
 
+       pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+
        return ret;
 }
 
@@ -530,7 +517,7 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
        vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
        vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
        atomic64_set(&vdev->unique_id_counter, 0);
-       xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
+       xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
        xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
        xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
        lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
@@ -560,11 +547,11 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
        /* Power up early so the rest of init code can access VPU registers */
        ret = ivpu_hw_power_up(vdev);
        if (ret)
-               goto err_power_down;
+               goto err_shutdown;
 
        ret = ivpu_mmu_global_context_init(vdev);
        if (ret)
-               goto err_power_down;
+               goto err_shutdown;
 
        ret = ivpu_mmu_init(vdev);
        if (ret)
@@ -601,10 +588,8 @@ err_mmu_rctx_fini:
        ivpu_mmu_reserved_context_fini(vdev);
 err_mmu_gctx_fini:
        ivpu_mmu_global_context_fini(vdev);
-err_power_down:
-       ivpu_hw_power_down(vdev);
-       if (IVPU_WA(d3hot_after_power_off))
-               pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+err_shutdown:
+       ivpu_shutdown(vdev);
 err_xa_destroy:
        xa_destroy(&vdev->db_xa);
        xa_destroy(&vdev->submitted_jobs_xa);
@@ -628,9 +613,8 @@ static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
 static void ivpu_dev_fini(struct ivpu_device *vdev)
 {
        ivpu_pm_disable(vdev);
+       ivpu_prepare_for_reset(vdev);
        ivpu_shutdown(vdev);
-       if (IVPU_WA(d3hot_after_power_off))
-               pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
 
        ivpu_jobs_abort_all(vdev);
        ivpu_job_done_consumer_fini(vdev);
index 7be0500d9bb8919574b02066b8389c56c6c83f05..bb4374d0eaecc9a25d2f6b28056aa5d8d762bd15 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
  */
 
 #ifndef __IVPU_DRV_H__
@@ -90,7 +90,6 @@
 struct ivpu_wa_table {
        bool punit_disabled;
        bool clear_runtime_mem;
-       bool d3hot_after_power_off;
        bool interrupt_clear_with_0;
        bool disable_clock_relinquish;
        bool disable_d0i3_msg;
index b2909168a0a6902b4fb061910796ac19d5caf6e1..094c659d2800b127bf1c616e34973673c1f55061 100644 (file)
@@ -21,6 +21,7 @@ struct ivpu_hw_ops {
        u32 (*profiling_freq_get)(struct ivpu_device *vdev);
        void (*profiling_freq_drive)(struct ivpu_device *vdev, bool enable);
        u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
+       u32 (*ratio_to_freq)(struct ivpu_device *vdev, u32 ratio);
        u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
        u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
        u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
@@ -130,6 +131,11 @@ static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
        return vdev->hw->ops->reg_pll_freq_get(vdev);
 };
 
+static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+{
+       return vdev->hw->ops->ratio_to_freq(vdev, ratio);
+}
+
 static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
 {
        return vdev->hw->ops->reg_telemetry_offset_get(vdev);
index 9a0c9498baba293cece13e9584f21f7b2067c681..bd25e2d9fb0f45a35d9ef9ca7ca16f14aa151521 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
  */
 
 #include "ivpu_drv.h"
@@ -75,7 +75,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
 {
        vdev->wa.punit_disabled = false;
        vdev->wa.clear_runtime_mem = false;
-       vdev->wa.d3hot_after_power_off = true;
 
        REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, BUTTRESS_ALL_IRQ_MASK);
        if (REGB_RD32(VPU_37XX_BUTTRESS_INTERRUPT_STAT) == BUTTRESS_ALL_IRQ_MASK) {
@@ -86,7 +85,6 @@ static void ivpu_hw_wa_init(struct ivpu_device *vdev)
 
        IVPU_PRINT_WA(punit_disabled);
        IVPU_PRINT_WA(clear_runtime_mem);
-       IVPU_PRINT_WA(d3hot_after_power_off);
        IVPU_PRINT_WA(interrupt_clear_with_0);
 }
 
@@ -805,12 +803,12 @@ static void ivpu_hw_37xx_profiling_freq_drive(struct ivpu_device *vdev, bool ena
        /* Profiling freq - is a debug feature. Unavailable on VPU 37XX. */
 }
 
-static u32 ivpu_hw_37xx_pll_to_freq(u32 ratio, u32 config)
+static u32 ivpu_hw_37xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
 {
        u32 pll_clock = PLL_REF_CLK_FREQ * ratio;
        u32 cpu_clock;
 
-       if ((config & 0xff) == PLL_RATIO_4_3)
+       if ((vdev->hw->config & 0xff) == PLL_RATIO_4_3)
                cpu_clock = pll_clock * 2 / 4;
        else
                cpu_clock = pll_clock * 2 / 5;
@@ -829,7 +827,7 @@ static u32 ivpu_hw_37xx_reg_pll_freq_get(struct ivpu_device *vdev)
        if (!ivpu_is_silicon(vdev))
                return PLL_SIMULATION_FREQ;
 
-       return ivpu_hw_37xx_pll_to_freq(pll_curr_ratio, vdev->hw->config);
+       return ivpu_hw_37xx_ratio_to_freq(vdev, pll_curr_ratio);
 }
 
 static u32 ivpu_hw_37xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
@@ -1052,6 +1050,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
        .profiling_freq_get = ivpu_hw_37xx_profiling_freq_get,
        .profiling_freq_drive = ivpu_hw_37xx_profiling_freq_drive,
        .reg_pll_freq_get = ivpu_hw_37xx_reg_pll_freq_get,
+       .ratio_to_freq = ivpu_hw_37xx_ratio_to_freq,
        .reg_telemetry_offset_get = ivpu_hw_37xx_reg_telemetry_offset_get,
        .reg_telemetry_size_get = ivpu_hw_37xx_reg_telemetry_size_get,
        .reg_telemetry_enable_get = ivpu_hw_37xx_reg_telemetry_enable_get,
index e4eddbf5d11c250bb8ddd2a27843242166896217..b0b88d4c89264a0a95f18edc9b140d720c89279d 100644 (file)
@@ -980,6 +980,11 @@ static u32 ivpu_hw_40xx_reg_pll_freq_get(struct ivpu_device *vdev)
        return PLL_RATIO_TO_FREQ(pll_curr_ratio);
 }
 
+static u32 ivpu_hw_40xx_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
+{
+       return PLL_RATIO_TO_FREQ(ratio);
+}
+
 static u32 ivpu_hw_40xx_reg_telemetry_offset_get(struct ivpu_device *vdev)
 {
        return REGB_RD32(VPU_40XX_BUTTRESS_VPU_TELEMETRY_OFFSET);
@@ -1230,6 +1235,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
        .profiling_freq_get = ivpu_hw_40xx_profiling_freq_get,
        .profiling_freq_drive = ivpu_hw_40xx_profiling_freq_drive,
        .reg_pll_freq_get = ivpu_hw_40xx_reg_pll_freq_get,
+       .ratio_to_freq = ivpu_hw_40xx_ratio_to_freq,
        .reg_telemetry_offset_get = ivpu_hw_40xx_reg_telemetry_offset_get,
        .reg_telemetry_size_get = ivpu_hw_40xx_reg_telemetry_size_get,
        .reg_telemetry_enable_get = ivpu_hw_40xx_reg_telemetry_enable_get,
index 04ac4b9840fbe56341e1552c2783715a83b58e7c..56ff067f63e29559d2e0605645c97bb1a0391142 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
  */
 
 #include <linux/genalloc.h>
@@ -501,7 +501,11 @@ int ivpu_ipc_init(struct ivpu_device *vdev)
        spin_lock_init(&ipc->cons_lock);
        INIT_LIST_HEAD(&ipc->cons_list);
        INIT_LIST_HEAD(&ipc->cb_msg_list);
-       drmm_mutex_init(&vdev->drm, &ipc->lock);
+       ret = drmm_mutex_init(&vdev->drm, &ipc->lock);
+       if (ret) {
+               ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n", ret);
+               goto err_free_rx;
+       }
        ivpu_ipc_reset(vdev);
        return 0;
 
index 91bd640655ab363b51df17a25cb9589293adc804..2e46b322c4505ea5f18997d0ef969f43239f72c8 100644 (file)
@@ -278,7 +278,7 @@ static const char *ivpu_mmu_event_to_str(u32 cmd)
        case IVPU_MMU_EVT_F_VMS_FETCH:
                return "Fetch of VMS caused external abort";
        default:
-               return "Unknown CMDQ command";
+               return "Unknown event";
        }
 }
 
@@ -286,15 +286,15 @@ static const char *ivpu_mmu_cmdq_err_to_str(u32 err)
 {
        switch (err) {
        case IVPU_MMU_CERROR_NONE:
-               return "No CMDQ Error";
+               return "No error";
        case IVPU_MMU_CERROR_ILL:
                return "Illegal command";
        case IVPU_MMU_CERROR_ABT:
-               return "External abort on CMDQ read";
+               return "External abort on command queue read";
        case IVPU_MMU_CERROR_ATC_INV_SYNC:
                return "Sync failed to complete ATS invalidation";
        default:
-               return "Unknown CMDQ Error";
+               return "Unknown error";
        }
 }
 
index 7cce1c928a7f4e8386344fd81d58e7893f72c050..4f5ea466731ffe6b5b2ea178ae907274f26f5b62 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2024 Intel Corporation
  */
 
 #include <linux/highmem.h>
@@ -58,14 +58,11 @@ static int ivpu_suspend(struct ivpu_device *vdev)
 {
        int ret;
 
-       /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
-       pci_save_state(to_pci_dev(vdev->drm.dev));
+       ivpu_prepare_for_reset(vdev);
 
        ret = ivpu_shutdown(vdev);
        if (ret)
-               ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
-
-       pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+               ivpu_err(vdev, "Failed to shutdown NPU: %d\n", ret);
 
        return ret;
 }
@@ -74,10 +71,10 @@ static int ivpu_resume(struct ivpu_device *vdev)
 {
        int ret;
 
-       pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
+retry:
        pci_restore_state(to_pci_dev(vdev->drm.dev));
+       pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D0);
 
-retry:
        ret = ivpu_hw_power_up(vdev);
        if (ret) {
                ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
@@ -100,6 +97,7 @@ err_mmu_disable:
        ivpu_mmu_disable(vdev);
 err_power_down:
        ivpu_hw_power_down(vdev);
+       pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
 
        if (!ivpu_fw_is_cold_boot(vdev)) {
                ivpu_pm_prepare_cold_boot(vdev);
index 7c157bf926956be5cabd6db7c708ff87759c7879..d1464324de9519cdb96e026f3733170788bb786d 100644 (file)
@@ -1843,7 +1843,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
                        if (dep->honor_dep)
                                adev->flags.honor_deps = 1;
 
-                       adev->dep_unmet++;
+                       if (!dep->met)
+                               adev->dep_unmet++;
                }
        }
 }
index 562302e2e57ce5a2651575ad1620b1725d654f6a..6548f10e61d9c72ca89180e011f8e495058302a1 100644 (file)
@@ -666,6 +666,87 @@ static int mobile_lpm_policy = -1;
 module_param(mobile_lpm_policy, int, 0644);
 MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
 
+static char *ahci_mask_port_map;
+module_param_named(mask_port_map, ahci_mask_port_map, charp, 0444);
+MODULE_PARM_DESC(mask_port_map,
+                "32-bits port map masks to ignore controllers ports. "
+                "Valid values are: "
+                "\"<mask>\" to apply the same mask to all AHCI controller "
+                "devices, and \"<pci_dev>=<mask>,<pci_dev>=<mask>,...\" to "
+                "specify different masks for the controllers specified, "
+                "where <pci_dev> is the PCI ID of an AHCI controller in the "
+                "form \"domain:bus:dev.func\"");
+
+static void ahci_apply_port_map_mask(struct device *dev,
+                                    struct ahci_host_priv *hpriv, char *mask_s)
+{
+       unsigned int mask;
+
+       if (kstrtouint(mask_s, 0, &mask)) {
+               dev_err(dev, "Invalid port map mask\n");
+               return;
+       }
+
+       hpriv->mask_port_map = mask;
+}
+
+static void ahci_get_port_map_mask(struct device *dev,
+                                  struct ahci_host_priv *hpriv)
+{
+       char *param, *end, *str, *mask_s;
+       char *name;
+
+       if (!strlen(ahci_mask_port_map))
+               return;
+
+       str = kstrdup(ahci_mask_port_map, GFP_KERNEL);
+       if (!str)
+               return;
+
+       /* Handle single mask case */
+       if (!strchr(str, '=')) {
+               ahci_apply_port_map_mask(dev, hpriv, str);
+               goto free;
+       }
+
+       /*
+        * Mask list case: parse the parameter to apply the mask only if
+        * the device name matches.
+        */
+       param = str;
+       end = param + strlen(param);
+       while (param && param < end && *param) {
+               name = param;
+               param = strchr(name, '=');
+               if (!param)
+                       break;
+
+               *param = '\0';
+               param++;
+               if (param >= end)
+                       break;
+
+               if (strcmp(dev_name(dev), name) != 0) {
+                       param = strchr(param, ',');
+                       if (param)
+                               param++;
+                       continue;
+               }
+
+               mask_s = param;
+               param = strchr(mask_s, ',');
+               if (param) {
+                       *param = '\0';
+                       param++;
+               }
+
+               ahci_apply_port_map_mask(dev, hpriv, mask_s);
+       }
+
+free:
+       kfree(str);
+}
+
 static void ahci_pci_save_initial_config(struct pci_dev *pdev,
                                         struct ahci_host_priv *hpriv)
 {
@@ -688,6 +769,10 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
                          "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
        }
 
+       /* Handle port map masks passed as module parameter. */
+       if (ahci_mask_port_map)
+               ahci_get_port_map_mask(&pdev->dev, hpriv);
+
        ahci_save_initial_config(&pdev->dev, hpriv);
 }
 
index be3412cdb22e78a1d663337698f07b07c66727e4..c449d60d9bb962c80ac7e196d08dd722d2c6950b 100644 (file)
@@ -2539,7 +2539,7 @@ static void ata_dev_config_cdl(struct ata_device *dev)
        bool cdl_enabled;
        u64 val;
 
-       if (ata_id_major_version(dev->id) < 12)
+       if (ata_id_major_version(dev->id) < 11)
                goto not_supported;
 
        if (!ata_log_supported(dev, ATA_LOG_IDENTIFY_DEVICE) ||
index 2f4c58837641077f3ad91974cd11affbe6dcd1e8..e954976891a9f502930a3a7ffc5f31df113d2326 100644 (file)
@@ -4745,7 +4745,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
                         * bail out.
                         */
                        if (ap->pflags & ATA_PFLAG_SUSPENDED)
-                               goto unlock;
+                               goto unlock_ap;
 
                        if (!sdev)
                                continue;
@@ -4758,7 +4758,7 @@ void ata_scsi_dev_rescan(struct work_struct *work)
                        if (do_resume) {
                                ret = scsi_resume_device(sdev);
                                if (ret == -EWOULDBLOCK)
-                                       goto unlock;
+                                       goto unlock_scan;
                                dev->flags &= ~ATA_DFLAG_RESUMING;
                        }
                        ret = scsi_rescan_device(sdev);
@@ -4766,12 +4766,13 @@ void ata_scsi_dev_rescan(struct work_struct *work)
                        spin_lock_irqsave(ap->lock, flags);
 
                        if (ret)
-                               goto unlock;
+                               goto unlock_ap;
                }
        }
 
-unlock:
+unlock_ap:
        spin_unlock_irqrestore(ap->lock, flags);
+unlock_scan:
        mutex_unlock(&ap->scsi_scan_mutex);
 
        /* Reschedule with a delay if scsi_rescan_device() returned an error */
index 89ed6cd6b059ebb0af77dcc0d2b83a72fe995dc4..e9cc8b4786fbfb9eba5d3c1d8c06c3d08477a132 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/of_address.h>
 #include <linux/device.h>
 #include <linux/bitfield.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
 #include <asm/cacheflush.h>
 #include <asm/cacheinfo.h>
 #include <asm/dma-noncoherent.h>
@@ -247,13 +249,49 @@ static irqreturn_t ccache_int_handler(int irq, void *device)
        return IRQ_HANDLED;
 }
 
+static int sifive_ccache_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       unsigned long quirks;
+       int intr_num, rc;
+
+       quirks = (unsigned long)device_get_match_data(dev);
+
+       intr_num = platform_irq_count(pdev);
+       if (!intr_num)
+               return dev_err_probe(dev, -ENODEV, "No interrupts property\n");
+
+       for (int i = 0; i < intr_num; i++) {
+               if (i == DATA_UNCORR && (quirks & QUIRK_BROKEN_DATA_UNCORR))
+                       continue;
+
+               g_irq[i] = platform_get_irq(pdev, i);
+               if (g_irq[i] < 0)
+                       return g_irq[i];
+
+               rc = devm_request_irq(dev, g_irq[i], ccache_int_handler, 0, "ccache_ecc", NULL);
+               if (rc)
+                       return dev_err_probe(dev, rc, "Could not request IRQ %d\n", g_irq[i]);
+       }
+
+       return 0;
+}
+
+static struct platform_driver sifive_ccache_driver = {
+       .probe  = sifive_ccache_probe,
+       .driver = {
+               .name           = "sifive_ccache",
+               .of_match_table = sifive_ccache_ids,
+       },
+};
+
 static int __init sifive_ccache_init(void)
 {
        struct device_node *np;
        struct resource res;
-       int i, rc, intr_num;
        const struct of_device_id *match;
        unsigned long quirks;
+       int rc;
 
        np = of_find_matching_node_and_match(NULL, sifive_ccache_ids, &match);
        if (!np)
@@ -277,28 +315,6 @@ static int __init sifive_ccache_init(void)
                goto err_unmap;
        }
 
-       intr_num = of_property_count_u32_elems(np, "interrupts");
-       if (!intr_num) {
-               pr_err("No interrupts property\n");
-               rc = -ENODEV;
-               goto err_unmap;
-       }
-
-       for (i = 0; i < intr_num; i++) {
-               g_irq[i] = irq_of_parse_and_map(np, i);
-
-               if (i == DATA_UNCORR && (quirks & QUIRK_BROKEN_DATA_UNCORR))
-                       continue;
-
-               rc = request_irq(g_irq[i], ccache_int_handler, 0, "ccache_ecc",
-                                NULL);
-               if (rc) {
-                       pr_err("Could not request IRQ %d\n", g_irq[i]);
-                       goto err_free_irq;
-               }
-       }
-       of_node_put(np);
-
 #ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
        if (quirks & QUIRK_NONSTANDARD_CACHE_OPS) {
                riscv_cbom_block_size = SIFIVE_CCACHE_LINE_SIZE;
@@ -315,11 +331,15 @@ static int __init sifive_ccache_init(void)
 #ifdef CONFIG_DEBUG_FS
        setup_sifive_debug();
 #endif
+
+       rc = platform_driver_register(&sifive_ccache_driver);
+       if (rc)
+               goto err_unmap;
+
+       of_node_put(np);
+
        return 0;
 
-err_free_irq:
-       while (--i >= 0)
-               free_irq(g_irq[i], NULL);
 err_unmap:
        iounmap(ccache_base);
 err_node_put:
index 456be28ba67cb476846c83c532e7bd04e521463f..2597cb43f43871dc0dc629c13b0b0ee3acf1398a 100644 (file)
@@ -702,7 +702,7 @@ static void extract_entropy(void *buf, size_t len)
 
 static void __cold _credit_init_bits(size_t bits)
 {
-       static struct execute_work set_ready;
+       static DECLARE_WORK(set_ready, crng_set_ready);
        unsigned int new, orig, add;
        unsigned long flags;
 
@@ -718,8 +718,8 @@ static void __cold _credit_init_bits(size_t bits)
 
        if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
                crng_reseed(NULL); /* Sets crng_init to CRNG_READY under base_crng.lock. */
-               if (static_key_initialized)
-                       execute_in_process_context(crng_set_ready, &set_ready);
+               if (static_key_initialized && system_unbound_wq)
+                       queue_work(system_unbound_wq, &set_ready);
                atomic_notifier_call_chain(&random_ready_notifier, 0, NULL);
                wake_up_interruptible(&crng_init_wait);
                kill_fasync(&fasync, SIGIO, POLL_IN);
@@ -890,8 +890,8 @@ void __init random_init(void)
 
        /*
         * If we were initialized by the cpu or bootloader before jump labels
-        * are initialized, then we should enable the static branch here, where
-        * it's guaranteed that jump labels have been initialized.
+        * or workqueues are initialized, then we should enable the static
+        * branch here, where it's guaranteed that these have been initialized.
         */
        if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
                crng_set_ready(NULL);
index af5cb818f84d6bf566e6c0a84763d8239d64700f..cb8c155a2c9b3dbdcbf00f198c5783b9559f8a89 100644 (file)
@@ -525,22 +525,11 @@ static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
 {
        struct acpi_device *hb = to_cxl_host_bridge(NULL, dev);
        u32 uid;
-       int rc;
 
        if (kstrtou32(acpi_device_uid(hb), 0, &uid))
                return -EINVAL;
 
-       rc = acpi_get_genport_coordinates(uid, dport->hb_coord);
-       if (rc < 0)
-               return rc;
-
-       /* Adjust back to picoseconds from nanoseconds */
-       for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
-               dport->hb_coord[i].read_latency *= 1000;
-               dport->hb_coord[i].write_latency *= 1000;
-       }
-
-       return 0;
+       return acpi_get_genport_coordinates(uid, dport->coord);
 }
 
 static int add_host_bridge_dport(struct device *match, void *arg)
index eddbbe21450ca9dca5e71bf6ec14866cde0935d3..bb83867d9fec985634bb9b03652f1eaa34fc8a22 100644 (file)
 struct dsmas_entry {
        struct range dpa_range;
        u8 handle;
-       struct access_coordinate coord;
+       struct access_coordinate coord[ACCESS_COORDINATE_MAX];
 
        int entries;
        int qos_class;
 };
 
+static u32 cdat_normalize(u16 entry, u64 base, u8 type)
+{
+       u32 value;
+
+       /*
+        * Check for invalid and overflow values
+        */
+       if (entry == 0xffff || !entry)
+               return 0;
+       else if (base > (UINT_MAX / (entry)))
+               return 0;
+
+       /*
+        * CDAT fields follow the format of HMAT fields. See table 5 Device
+        * Scoped Latency and Bandwidth Information Structure in Coherent Device
+        * Attribute Table (CDAT) Specification v1.01.
+        */
+       value = entry * base;
+       switch (type) {
+       case ACPI_HMAT_ACCESS_LATENCY:
+       case ACPI_HMAT_READ_LATENCY:
+       case ACPI_HMAT_WRITE_LATENCY:
+               value = DIV_ROUND_UP(value, 1000);
+               break;
+       default:
+               break;
+       }
+       return value;
+}
+
 static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
                              const unsigned long end)
 {
@@ -58,8 +88,8 @@ static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
        return 0;
 }
 
-static void cxl_access_coordinate_set(struct access_coordinate *coord,
-                                     int access, unsigned int val)
+static void __cxl_access_coordinate_set(struct access_coordinate *coord,
+                                       int access, unsigned int val)
 {
        switch (access) {
        case ACPI_HMAT_ACCESS_LATENCY:
@@ -85,6 +115,13 @@ static void cxl_access_coordinate_set(struct access_coordinate *coord,
        }
 }
 
+static void cxl_access_coordinate_set(struct access_coordinate *coord,
+                                     int access, unsigned int val)
+{
+       for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+               __cxl_access_coordinate_set(&coord[i], access, val);
+}
+
 static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
                               const unsigned long end)
 {
@@ -97,7 +134,6 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
        __le16 le_val;
        u64 val;
        u16 len;
-       int rc;
 
        len = le16_to_cpu((__force __le16)hdr->length);
        if (len != size || (unsigned long)hdr + len > end) {
@@ -124,12 +160,10 @@ static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
 
        le_base = (__force __le64)dslbis->entry_base_unit;
        le_val = (__force __le16)dslbis->entry[0];
-       rc = check_mul_overflow(le64_to_cpu(le_base),
-                               le16_to_cpu(le_val), &val);
-       if (rc)
-               pr_warn("DSLBIS value overflowed.\n");
+       val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
+                            dslbis->data_type);
 
-       cxl_access_coordinate_set(&dent->coord, dslbis->data_type, val);
+       cxl_access_coordinate_set(dent->coord, dslbis->data_type, val);
 
        return 0;
 }
@@ -163,25 +197,18 @@ static int cxl_cdat_endpoint_process(struct cxl_port *port,
 static int cxl_port_perf_data_calculate(struct cxl_port *port,
                                        struct xarray *dsmas_xa)
 {
-       struct access_coordinate ep_c;
-       struct access_coordinate coord[ACCESS_COORDINATE_MAX];
+       struct access_coordinate ep_c[ACCESS_COORDINATE_MAX];
        struct dsmas_entry *dent;
        int valid_entries = 0;
        unsigned long index;
        int rc;
 
-       rc = cxl_endpoint_get_perf_coordinates(port, &ep_c);
+       rc = cxl_endpoint_get_perf_coordinates(port, ep_c);
        if (rc) {
                dev_dbg(&port->dev, "Failed to retrieve ep perf coordinates.\n");
                return rc;
        }
 
-       rc = cxl_hb_get_perf_coordinates(port, coord);
-       if (rc)  {
-               dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n");
-               return rc;
-       }
-
        struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
 
        if (!cxl_root)
@@ -193,18 +220,10 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
        xa_for_each(dsmas_xa, index, dent) {
                int qos_class;
 
-               cxl_coordinates_combine(&dent->coord, &dent->coord, &ep_c);
-               /*
-                * Keeping the host bridge coordinates separate from the dsmas
-                * coordinates in order to allow calculation of access class
-                * 0 and 1 for region later.
-                */
-               cxl_coordinates_combine(&coord[ACCESS_COORDINATE_CPU],
-                                       &coord[ACCESS_COORDINATE_CPU],
-                                       &dent->coord);
+               cxl_coordinates_combine(dent->coord, dent->coord, ep_c);
                dent->entries = 1;
                rc = cxl_root->ops->qos_class(cxl_root,
-                                             &coord[ACCESS_COORDINATE_CPU],
+                                             &dent->coord[ACCESS_COORDINATE_CPU],
                                              1, &qos_class);
                if (rc != 1)
                        continue;
@@ -222,14 +241,17 @@ static int cxl_port_perf_data_calculate(struct cxl_port *port,
 static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
                              struct cxl_dpa_perf *dpa_perf)
 {
+       for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+               dpa_perf->coord[i] = dent->coord[i];
        dpa_perf->dpa_range = dent->dpa_range;
-       dpa_perf->coord = dent->coord;
        dpa_perf->qos_class = dent->qos_class;
        dev_dbg(dev,
                "DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
                dent->dpa_range.start, dpa_perf->qos_class,
-               dent->coord.read_bandwidth, dent->coord.write_bandwidth,
-               dent->coord.read_latency, dent->coord.write_latency);
+               dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth,
+               dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth,
+               dent->coord[ACCESS_COORDINATE_CPU].read_latency,
+               dent->coord[ACCESS_COORDINATE_CPU].write_latency);
 }
 
 static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
@@ -461,17 +483,16 @@ static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
 
                le_base = (__force __le64)tbl->sslbis_header.entry_base_unit;
                le_val = (__force __le16)tbl->entries[i].latency_or_bandwidth;
-
-               if (check_mul_overflow(le64_to_cpu(le_base),
-                                      le16_to_cpu(le_val), &val))
-                       dev_warn(dev, "SSLBIS value overflowed!\n");
+               val = cdat_normalize(le16_to_cpu(le_val), le64_to_cpu(le_base),
+                                    sslbis->data_type);
 
                xa_for_each(&port->dports, index, dport) {
                        if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
-                           dsp_id == dport->port_id)
-                               cxl_access_coordinate_set(&dport->sw_coord,
+                           dsp_id == dport->port_id) {
+                               cxl_access_coordinate_set(dport->coord,
                                                          sslbis->data_type,
                                                          val);
+                       }
                }
        }
 
@@ -493,6 +514,21 @@ void cxl_switch_parse_cdat(struct cxl_port *port)
 }
 EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
 
+static void __cxl_coordinates_combine(struct access_coordinate *out,
+                                     struct access_coordinate *c1,
+                                     struct access_coordinate *c2)
+{
+               if (c1->write_bandwidth && c2->write_bandwidth)
+                       out->write_bandwidth = min(c1->write_bandwidth,
+                                                  c2->write_bandwidth);
+               out->write_latency = c1->write_latency + c2->write_latency;
+
+               if (c1->read_bandwidth && c2->read_bandwidth)
+                       out->read_bandwidth = min(c1->read_bandwidth,
+                                                 c2->read_bandwidth);
+               out->read_latency = c1->read_latency + c2->read_latency;
+}
+
 /**
  * cxl_coordinates_combine - Combine the two input coordinates
  *
@@ -504,15 +540,8 @@ void cxl_coordinates_combine(struct access_coordinate *out,
                             struct access_coordinate *c1,
                             struct access_coordinate *c2)
 {
-               if (c1->write_bandwidth && c2->write_bandwidth)
-                       out->write_bandwidth = min(c1->write_bandwidth,
-                                                  c2->write_bandwidth);
-               out->write_latency = c1->write_latency + c2->write_latency;
-
-               if (c1->read_bandwidth && c2->read_bandwidth)
-                       out->read_bandwidth = min(c1->read_bandwidth,
-                                                 c2->read_bandwidth);
-               out->read_latency = c1->read_latency + c2->read_latency;
+       for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+               __cxl_coordinates_combine(&out[i], &c1[i], &c2[i]);
 }
 
 MODULE_IMPORT_NS(CXL);
@@ -521,17 +550,13 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
                                    struct cxl_endpoint_decoder *cxled)
 {
        struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
-       struct cxl_port *port = cxlmd->endpoint;
        struct cxl_dev_state *cxlds = cxlmd->cxlds;
        struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
-       struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX];
-       struct access_coordinate coord;
        struct range dpa = {
                        .start = cxled->dpa_res->start,
                        .end = cxled->dpa_res->end,
        };
        struct cxl_dpa_perf *perf;
-       int rc;
 
        switch (cxlr->mode) {
        case CXL_DECODER_RAM:
@@ -549,35 +574,16 @@ void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
        if (!range_contains(&perf->dpa_range, &dpa))
                return;
 
-       rc = cxl_hb_get_perf_coordinates(port, hb_coord);
-       if (rc)  {
-               dev_dbg(&port->dev, "Failed to retrieve hb perf coordinates.\n");
-               return;
-       }
-
        for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
-               /* Pickup the host bridge coords */
-               cxl_coordinates_combine(&coord, &hb_coord[i], &perf->coord);
-
                /* Get total bandwidth and the worst latency for the cxl region */
                cxlr->coord[i].read_latency = max_t(unsigned int,
                                                    cxlr->coord[i].read_latency,
-                                                   coord.read_latency);
+                                                   perf->coord[i].read_latency);
                cxlr->coord[i].write_latency = max_t(unsigned int,
                                                     cxlr->coord[i].write_latency,
-                                                    coord.write_latency);
-               cxlr->coord[i].read_bandwidth += coord.read_bandwidth;
-               cxlr->coord[i].write_bandwidth += coord.write_bandwidth;
-
-               /*
-                * Convert latency to nanosec from picosec to be consistent
-                * with the resulting latency coordinates computed by the
-                * HMAT_REPORTING code.
-                */
-               cxlr->coord[i].read_latency =
-                       DIV_ROUND_UP(cxlr->coord[i].read_latency, 1000);
-               cxlr->coord[i].write_latency =
-                       DIV_ROUND_UP(cxlr->coord[i].write_latency, 1000);
+                                                    perf->coord[i].write_latency);
+               cxlr->coord[i].read_bandwidth += perf->coord[i].read_bandwidth;
+               cxlr->coord[i].write_bandwidth += perf->coord[i].write_bandwidth;
        }
 }
 
index 9adda4795eb786b8658b573dd1e79befbad52255..f0f54aeccc872b50311a14958ddf874860af7982 100644 (file)
@@ -915,7 +915,7 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
 
                payload->handles[i++] = gen->hdr.handle;
                dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
-                       le16_to_cpu(payload->handles[i]));
+                       le16_to_cpu(payload->handles[i - 1]));
 
                if (i == max_handles) {
                        payload->nr_recs = i;
@@ -958,13 +958,14 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
                .payload_in = &log_type,
                .size_in = sizeof(log_type),
                .payload_out = payload,
-               .size_out = mds->payload_size,
                .min_out = struct_size(payload, records, 0),
        };
 
        do {
                int rc, i;
 
+               mbox_cmd.size_out = mds->payload_size;
+
                rc = cxl_internal_send_cmd(mds, &mbox_cmd);
                if (rc) {
                        dev_err_ratelimited(dev,
index 2b0cab556072f560420f7f7bf4d0bcddd0a01b4a..762783bb091afc8a40883c9ab2ee9c0f39e37219 100644 (file)
@@ -2133,36 +2133,44 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
 }
 EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
 
-/**
- * cxl_hb_get_perf_coordinates - Retrieve performance numbers between initiator
- *                              and host bridge
- *
- * @port: endpoint cxl_port
- * @coord: output access coordinates
- *
- * Return: errno on failure, 0 on success.
- */
-int cxl_hb_get_perf_coordinates(struct cxl_port *port,
-                               struct access_coordinate *coord)
+static void add_latency(struct access_coordinate *c, long latency)
 {
-       struct cxl_port *iter = port;
-       struct cxl_dport *dport;
+       for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+               c[i].write_latency += latency;
+               c[i].read_latency += latency;
+       }
+}
 
-       if (!is_cxl_endpoint(port))
-               return -EINVAL;
+static bool coordinates_valid(struct access_coordinate *c)
+{
+       for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+               if (c[i].read_bandwidth && c[i].write_bandwidth &&
+                   c[i].read_latency && c[i].write_latency)
+                       continue;
+               return false;
+       }
 
-       dport = iter->parent_dport;
-       while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
-               iter = to_cxl_port(iter->dev.parent);
-               dport = iter->parent_dport;
+       return true;
+}
+
+static void set_min_bandwidth(struct access_coordinate *c, unsigned int bw)
+{
+       for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+               c[i].write_bandwidth = min(c[i].write_bandwidth, bw);
+               c[i].read_bandwidth = min(c[i].read_bandwidth, bw);
        }
+}
 
-       coord[ACCESS_COORDINATE_LOCAL] =
-               dport->hb_coord[ACCESS_COORDINATE_LOCAL];
-       coord[ACCESS_COORDINATE_CPU] =
-               dport->hb_coord[ACCESS_COORDINATE_CPU];
+static void set_access_coordinates(struct access_coordinate *out,
+                                  struct access_coordinate *in)
+{
+       for (int i = 0; i < ACCESS_COORDINATE_MAX; i++)
+               out[i] = in[i];
+}
 
-       return 0;
+static bool parent_port_is_cxl_root(struct cxl_port *port)
+{
+       return is_cxl_root(to_cxl_port(port->dev.parent));
 }
 
 /**
@@ -2176,35 +2184,53 @@ int cxl_hb_get_perf_coordinates(struct cxl_port *port,
 int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
                                      struct access_coordinate *coord)
 {
-       struct access_coordinate c = {
-               .read_bandwidth = UINT_MAX,
-               .write_bandwidth = UINT_MAX,
+       struct access_coordinate c[] = {
+               {
+                       .read_bandwidth = UINT_MAX,
+                       .write_bandwidth = UINT_MAX,
+               },
+               {
+                       .read_bandwidth = UINT_MAX,
+                       .write_bandwidth = UINT_MAX,
+               },
        };
        struct cxl_port *iter = port;
        struct cxl_dport *dport;
        struct pci_dev *pdev;
        unsigned int bw;
+       bool is_cxl_root;
 
        if (!is_cxl_endpoint(port))
                return -EINVAL;
 
-       dport = iter->parent_dport;
-
        /*
-        * Exit the loop when the parent port of the current port is cxl root.
-        * The iterative loop starts at the endpoint and gathers the
-        * latency of the CXL link from the current iter to the next downstream
-        * port each iteration. If the parent is cxl root then there is
-        * nothing to gather.
+        * Exit the loop when the parent port of the current iter port is cxl
+        * root. The iterative loop starts at the endpoint and gathers the
+        * latency of the CXL link from the current device/port to the connected
+        * downstream port each iteration.
         */
-       while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
-               cxl_coordinates_combine(&c, &c, &dport->sw_coord);
-               c.write_latency += dport->link_latency;
-               c.read_latency += dport->link_latency;
-
-               iter = to_cxl_port(iter->dev.parent);
+       do {
                dport = iter->parent_dport;
-       }
+               iter = to_cxl_port(iter->dev.parent);
+               is_cxl_root = parent_port_is_cxl_root(iter);
+
+               /*
+                * There's no valid access_coordinate for a root port since RPs do not
+                * have CDAT and therefore needs to be skipped.
+                */
+               if (!is_cxl_root) {
+                       if (!coordinates_valid(dport->coord))
+                               return -EINVAL;
+                       cxl_coordinates_combine(c, c, dport->coord);
+               }
+               add_latency(c, dport->link_latency);
+       } while (!is_cxl_root);
+
+       dport = iter->parent_dport;
+       /* Retrieve HB coords */
+       if (!coordinates_valid(dport->coord))
+               return -EINVAL;
+       cxl_coordinates_combine(c, c, dport->coord);
 
        /* Get the calculated PCI paths bandwidth */
        pdev = to_pci_dev(port->uport_dev->parent);
@@ -2213,10 +2239,8 @@ int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
                return -ENXIO;
        bw /= BITS_PER_BYTE;
 
-       c.write_bandwidth = min(c.write_bandwidth, bw);
-       c.read_bandwidth = min(c.read_bandwidth, bw);
-
-       *coord = c;
+       set_min_bandwidth(c, bw);
+       set_access_coordinates(coord, c);
 
        return 0;
 }
index 372786f809555f66509186c3e3476af2fad0d7f8..3c42f984eeafaa54af79ac280cd24c0df62f944f 100644 (file)
@@ -271,6 +271,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
 static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
                                struct cxl_register_map *map)
 {
+       u8 reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
        int bar = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BIR_MASK, reg_lo);
        u64 offset = ((u64)reg_hi << 32) |
                     (reg_lo & CXL_DVSEC_REG_LOCATOR_BLOCK_OFF_LOW_MASK);
@@ -278,11 +279,11 @@ static bool cxl_decode_regblock(struct pci_dev *pdev, u32 reg_lo, u32 reg_hi,
        if (offset > pci_resource_len(pdev, bar)) {
                dev_warn(&pdev->dev,
                         "BAR%d: %pr: too small (offset: %pa, type: %d)\n", bar,
-                        &pdev->resource[bar], &offset, map->reg_type);
+                        &pdev->resource[bar], &offset, reg_type);
                return false;
        }
 
-       map->reg_type = FIELD_GET(CXL_DVSEC_REG_LOCATOR_BLOCK_ID_MASK, reg_lo);
+       map->reg_type = reg_type;
        map->resource = pci_resource_start(pdev, bar) + offset;
        map->max_size = pci_resource_len(pdev, bar) - offset;
        return true;
index 534e25e2f0a48197a0588abd8a46d996bb333ed8..036d17db68e0068752277adf0e5b56c7b526e566 100644 (file)
@@ -663,8 +663,7 @@ struct cxl_rcrb_info {
  * @rch: Indicate whether this dport was enumerated in RCH or VH mode
  * @port: reference to cxl_port that contains this downstream port
  * @regs: Dport parsed register blocks
- * @sw_coord: access coordinates (performance) for switch from CDAT
- * @hb_coord: access coordinates (performance) from ACPI generic port (host bridge)
+ * @coord: access coordinates (bandwidth and latency performance attributes)
  * @link_latency: calculated PCIe downstream latency
  */
 struct cxl_dport {
@@ -675,8 +674,7 @@ struct cxl_dport {
        bool rch;
        struct cxl_port *port;
        struct cxl_regs regs;
-       struct access_coordinate sw_coord;
-       struct access_coordinate hb_coord[ACCESS_COORDINATE_MAX];
+       struct access_coordinate coord[ACCESS_COORDINATE_MAX];
        long link_latency;
 };
 
@@ -884,8 +882,6 @@ void cxl_switch_parse_cdat(struct cxl_port *port);
 
 int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
                                      struct access_coordinate *coord);
-int cxl_hb_get_perf_coordinates(struct cxl_port *port,
-                               struct access_coordinate *coord);
 void cxl_region_perf_data_calculate(struct cxl_region *cxlr,
                                    struct cxl_endpoint_decoder *cxled);
 
index 20fb3b35e89e0473ee8ad42dcd17407086fb8cdb..36cee9c30cebd20488ec5afd216187ef82497e54 100644 (file)
@@ -401,7 +401,7 @@ enum cxl_devtype {
  */
 struct cxl_dpa_perf {
        struct range dpa_range;
-       struct access_coordinate coord;
+       struct access_coordinate coord[ACCESS_COORDINATE_MAX];
        int qos_class;
 };
 
index f2556a8e940156bc4f9d34ae5dc92aac837b688a..9bc2e10381afd9cc6f97d6dd50510c8daa092b5b 100644 (file)
@@ -790,7 +790,7 @@ static void ffa_notification_info_get(void)
 
                        part_id = packed_id_list[ids_processed++];
 
-                       if (!ids_count[list]) { /* Global Notification */
+                       if (ids_count[list] == 1) { /* Global Notification */
                                __do_sched_recv_cb(part_id, 0, false);
                                continue;
                        }
index ea9201e7044cbdbfea4d12bb5ac2390330c5d911..1fa79bba492e880fea5af80a038eddf4cce7c003 100644 (file)
@@ -736,7 +736,7 @@ static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph,
        ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL,
                                   POWERCAP_PAI_GET, 4, domain,
                                   &fc[POWERCAP_FC_PAI].get_addr, NULL,
-                                  &fc[POWERCAP_PAI_GET].rate_limit);
+                                  &fc[POWERCAP_FC_PAI].rate_limit);
 
        *p_fc = fc;
 }
index 350573518503355f6abaa4d24cbcac6368e8930c..130d13e9cd6beb93498469fae489b05e5ba1dfab 100644 (file)
@@ -921,7 +921,7 @@ static int scmi_dbg_raw_mode_open(struct inode *inode, struct file *filp)
        rd->raw = raw;
        filp->private_data = rd;
 
-       return 0;
+       return nonseekable_open(inode, filp);
 }
 
 static int scmi_dbg_raw_mode_release(struct inode *inode, struct file *filp)
@@ -950,6 +950,7 @@ static const struct file_operations scmi_dbg_raw_mode_reset_fops = {
        .open = scmi_dbg_raw_mode_open,
        .release = scmi_dbg_raw_mode_release,
        .write = scmi_dbg_raw_mode_reset_write,
+       .llseek = no_llseek,
        .owner = THIS_MODULE,
 };
 
@@ -959,6 +960,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_fops = {
        .read = scmi_dbg_raw_mode_message_read,
        .write = scmi_dbg_raw_mode_message_write,
        .poll = scmi_dbg_raw_mode_message_poll,
+       .llseek = no_llseek,
        .owner = THIS_MODULE,
 };
 
@@ -975,6 +977,7 @@ static const struct file_operations scmi_dbg_raw_mode_message_async_fops = {
        .read = scmi_dbg_raw_mode_message_read,
        .write = scmi_dbg_raw_mode_message_async_write,
        .poll = scmi_dbg_raw_mode_message_poll,
+       .llseek = no_llseek,
        .owner = THIS_MODULE,
 };
 
@@ -998,6 +1001,7 @@ static const struct file_operations scmi_dbg_raw_mode_notification_fops = {
        .release = scmi_dbg_raw_mode_release,
        .read = scmi_test_dbg_raw_mode_notif_read,
        .poll = scmi_test_dbg_raw_mode_notif_poll,
+       .llseek = no_llseek,
        .owner = THIS_MODULE,
 };
 
@@ -1021,6 +1025,7 @@ static const struct file_operations scmi_dbg_raw_mode_errors_fops = {
        .release = scmi_dbg_raw_mode_release,
        .read = scmi_test_dbg_raw_mode_errors_read,
        .poll = scmi_test_dbg_raw_mode_errors_poll,
+       .llseek = no_llseek,
        .owner = THIS_MODULE,
 };
 
index 1ee62cd58582b6496f0536fa7c45e2dc0305797f..25db014494a4de9bb8c44d0b2bd39d8786c3bb59 100644 (file)
@@ -92,7 +92,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type)
                case 0x5e:
                        return GPIOPANELCTL;
                default:
-                       return -EOPNOTSUPP;
+                       return -ENOTSUPP;
                }
        }
 
index 5ef8af8249806aa6c1b226ed4ab9219cca91d936..c097e310c9e841044a3ef214444170721d116537 100644 (file)
@@ -529,6 +529,7 @@ static const struct of_device_id lpc32xx_gpio_of_match[] = {
        { .compatible = "nxp,lpc3220-gpio", },
        { },
 };
+MODULE_DEVICE_TABLE(of, lpc32xx_gpio_of_match);
 
 static struct platform_driver lpc32xx_gpio_driver = {
        .driver         = {
index c18b6b47384f1b8b9a3a26c3ac7c5f125e82d365..94ca9d03c0949453abf3ad82e013698a7a97ffda 100644 (file)
@@ -104,7 +104,7 @@ static inline int to_reg(int gpio, enum ctrl_register type)
        unsigned int reg = type == CTRL_IN ? GPIO_IN_CTRL_BASE : GPIO_OUT_CTRL_BASE;
 
        if (gpio >= WCOVE_GPIO_NUM)
-               return -EOPNOTSUPP;
+               return -ENOTSUPP;
 
        return reg + gpio;
 }
index 9c62552bec344e370996a028d809934e4a6f4420..b3b84647207ed47463e004e2c72745c6120857d1 100644 (file)
@@ -210,6 +210,7 @@ extern int amdgpu_async_gfx_ring;
 extern int amdgpu_mcbp;
 extern int amdgpu_discovery;
 extern int amdgpu_mes;
+extern int amdgpu_mes_log_enable;
 extern int amdgpu_mes_kiq;
 extern int amdgpu_noretry;
 extern int amdgpu_force_asic_type;
index aa16d51dd8421b38a0a34fcf89263ffbf08af4fd..7753a2e64d4114a280afc99beb341f4af8f4ffac 100644 (file)
@@ -4135,18 +4135,22 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                                        adev->ip_blocks[i].status.hw = true;
                                }
                        }
+               } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+                                  !amdgpu_device_has_display_hardware(adev)) {
+                                       r = psp_gpu_reset(adev);
                } else {
-                       tmp = amdgpu_reset_method;
-                       /* It should do a default reset when loading or reloading the driver,
-                        * regardless of the module parameter reset_method.
-                        */
-                       amdgpu_reset_method = AMD_RESET_METHOD_NONE;
-                       r = amdgpu_asic_reset(adev);
-                       amdgpu_reset_method = tmp;
-                       if (r) {
-                               dev_err(adev->dev, "asic reset on init failed\n");
-                               goto failed;
-                       }
+                               tmp = amdgpu_reset_method;
+                               /* It should do a default reset when loading or reloading the driver,
+                                * regardless of the module parameter reset_method.
+                                */
+                               amdgpu_reset_method = AMD_RESET_METHOD_NONE;
+                               r = amdgpu_asic_reset(adev);
+                               amdgpu_reset_method = tmp;
+               }
+
+               if (r) {
+                 dev_err(adev->dev, "asic reset on init failed\n");
+                 goto failed;
                }
        }
 
index fdd36fb027ab6aa04b31c790af80596bb7da0427..ac5bf01fe8d2a9e9741d00981683b0e32b02f4eb 100644 (file)
@@ -1896,6 +1896,7 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
                break;
        case IP_VERSION(14, 0, 0):
+       case IP_VERSION(14, 0, 1):
                amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
                break;
        default:
index 80b9642f2bc4f25c69e9f30c70138f073e0c6cd2..e4277298cf1aad3518025b898162ea6224e874de 100644 (file)
@@ -195,6 +195,7 @@ int amdgpu_async_gfx_ring = 1;
 int amdgpu_mcbp = -1;
 int amdgpu_discovery = -1;
 int amdgpu_mes;
+int amdgpu_mes_log_enable = 0;
 int amdgpu_mes_kiq;
 int amdgpu_noretry = -1;
 int amdgpu_force_asic_type = -1;
@@ -667,6 +668,15 @@ MODULE_PARM_DESC(mes,
        "Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
 module_param_named(mes, amdgpu_mes, int, 0444);
 
+/**
+ * DOC: mes_log_enable (int)
+ * Enable Micro Engine Scheduler log. This is used to enable/disable MES internal log.
+ * (0 = disabled (default), 1 = enabled)
+ */
+MODULE_PARM_DESC(mes_log_enable,
+       "Enable Micro Engine Scheduler log (0 = disabled (default), 1 = enabled)");
+module_param_named(mes_log_enable, amdgpu_mes_log_enable, int, 0444);
+
 /**
  * DOC: mes_kiq (int)
  * Enable Micro Engine Scheduler KIQ. This is a new engine pipe for kiq.
index 4b3000c21ef2c59cba09ca39e3dc5421208049d5..e4742b65032d1dce16db69ea086c86dd4895e610 100644 (file)
@@ -304,12 +304,15 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
                dma_fence_set_error(finished, -ECANCELED);
 
        if (finished->error < 0) {
-               DRM_INFO("Skip scheduling IBs!\n");
+               dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
+                       ring->name);
        } else {
                r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
                                       &fence);
                if (r)
-                       DRM_ERROR("Error scheduling IBs (%d)\n", r);
+                       dev_err(adev->dev,
+                               "Error scheduling IBs (%d) in ring(%s)", r,
+                               ring->name);
        }
 
        job->job_run_counter++;
index a98e03e0a51f1f741895d253f896e76de29f9aec..a00cf4756ad0e2f371742e760183882773a80243 100644 (file)
@@ -102,7 +102,10 @@ static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
 {
        int r;
 
-       r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
+       if (!amdgpu_mes_log_enable)
+               return 0;
+
+       r = amdgpu_bo_create_kernel(adev, AMDGPU_MES_LOG_BUFFER_SIZE, PAGE_SIZE,
                                    AMDGPU_GEM_DOMAIN_GTT,
                                    &adev->mes.event_log_gpu_obj,
                                    &adev->mes.event_log_gpu_addr,
@@ -1549,12 +1552,11 @@ static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
        uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
 
        seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
-                    mem, PAGE_SIZE, false);
+                    mem, AMDGPU_MES_LOG_BUFFER_SIZE, false);
 
        return 0;
 }
 
-
 DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
 
 #endif
@@ -1565,7 +1567,7 @@ void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
 #if defined(CONFIG_DEBUG_FS)
        struct drm_minor *minor = adev_to_drm(adev)->primary;
        struct dentry *root = minor->debugfs_root;
-       if (adev->enable_mes)
+       if (adev->enable_mes && amdgpu_mes_log_enable)
                debugfs_create_file("amdgpu_mes_event_log", 0444, root,
                                    adev, &amdgpu_debugfs_mes_event_log_fops);
 
index 7d4f93fea937ae1d82ebd95af9cad8dc71586034..4c8fc3117ef8948627ef6a83cb7f603de2991662 100644 (file)
@@ -52,6 +52,7 @@ enum amdgpu_mes_priority_level {
 
 #define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
 #define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
+#define AMDGPU_MES_LOG_BUFFER_SIZE 0x4000 /* Maximu log buffer size for MES */
 
 struct amdgpu_mes_funcs;
 
index d6f808acfb17b79d98664d0fedaa95d8e29a4270..fbb43ae7624f44ebd13ddbe5a78865ea2dba10ab 100644 (file)
@@ -62,6 +62,11 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
        adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
 }
 
+static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev)
+{
+       return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst);
+}
+
 static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
                             uint32_t inst_idx, struct amdgpu_ring *ring)
 {
@@ -87,7 +92,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
        case AMDGPU_RING_TYPE_VCN_ENC:
        case AMDGPU_RING_TYPE_VCN_JPEG:
                ip_blk = AMDGPU_XCP_VCN;
-               if (adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
+               if (aqua_vanjaram_xcp_vcn_shared(adev))
                        inst_mask = 1 << (inst_idx * 2);
                break;
        default:
@@ -140,10 +145,12 @@ static int aqua_vanjaram_xcp_sched_list_update(
 
                aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
 
-               /* VCN is shared by two partitions under CPX MODE */
+               /* VCN may be shared by two partitions under CPX MODE in certain
+                * configs.
+                */
                if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
-                       ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
-                       adev->xcp_mgr->mode == AMDGPU_CPX_PARTITION_MODE)
+                    ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+                   aqua_vanjaram_xcp_vcn_shared(adev))
                        aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
        }
 
index 1770e496c1b7ce21198fdb80d3051c4c961e9b5f..f7325b02a191f726196d4ad0ac6fa3d090ab9977 100644 (file)
@@ -1635,7 +1635,7 @@ static void gfx_v11_0_setup_rb(struct amdgpu_device *adev)
                        active_rb_bitmap |= (0x3 << (i * rb_bitmap_width_per_sa));
        }
 
-       active_rb_bitmap |= global_active_rb_bitmap;
+       active_rb_bitmap &= global_active_rb_bitmap;
        adev->gfx.config.backend_enable_mask = active_rb_bitmap;
        adev->gfx.config.num_rbs = hweight32(active_rb_bitmap);
 }
@@ -5465,6 +5465,7 @@ static void gfx_v11_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
        /* Make sure that we can't skip the SET_Q_MODE packets when the VM
         * changed in any way.
         */
+       ring->set_q_mode_offs = 0;
        ring->set_q_mode_ptr = NULL;
 }
 
index 072c478665ade1a838f810bfadc10b32bf44a5eb..63f281a9984d986961d70511c83b6e65272979b7 100644 (file)
@@ -411,8 +411,11 @@ static int mes_v11_0_set_hw_resources(struct amdgpu_mes *mes)
        mes_set_hw_res_pkt.enable_reg_active_poll = 1;
        mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
        mes_set_hw_res_pkt.oversubscription_timer = 50;
-       mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
-       mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
+       if (amdgpu_mes_log_enable) {
+               mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
+               mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr =
+                                       mes->event_log_gpu_addr;
+       }
 
        return mes_v11_0_submit_pkt_and_poll_completion(mes,
                        &mes_set_hw_res_pkt, sizeof(mes_set_hw_res_pkt),
index 34237a1b1f2e45c40989c2070bdc0ae071ee0c4b..82eab49be82bb99807e5caabf5079b32dfd4cb26 100644 (file)
@@ -1602,19 +1602,9 @@ static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
        u32 sdma_cntl;
 
        sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
-       switch (state) {
-       case AMDGPU_IRQ_STATE_DISABLE:
-               sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL,
-                                         DRAM_ECC_INT_ENABLE, 0);
-               WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
-               break;
-       /* sdma ecc interrupt is enabled by default
-        * driver doesn't need to do anything to
-        * enable the interrupt */
-       case AMDGPU_IRQ_STATE_ENABLE:
-       default:
-               break;
-       }
+       sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, DRAM_ECC_INT_ENABLE,
+                                       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+       WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
 
        return 0;
 }
index 581a3bd11481cc8d44a4f22188551d5a2803cff5..43ca63fe85ac3b0f9236a27766f3a78f42c2fbfb 100644 (file)
@@ -457,10 +457,8 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
 {
        switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
        case IP_VERSION(11, 0, 0):
-               return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
        case IP_VERSION(11, 0, 2):
        case IP_VERSION(11, 0, 3):
-               return false;
        default:
                return true;
        }
@@ -722,7 +720,10 @@ static int soc21_common_early_init(void *handle)
                        AMD_PG_SUPPORT_VCN |
                        AMD_PG_SUPPORT_JPEG |
                        AMD_PG_SUPPORT_GFX_PG;
-               adev->external_rev_id = adev->rev_id + 0x1;
+               if (adev->rev_id == 0)
+                       adev->external_rev_id = 0x1;
+               else
+                       adev->external_rev_id = adev->rev_id + 0x10;
                break;
        case IP_VERSION(11, 5, 1):
                adev->cg_flags =
@@ -869,10 +870,35 @@ static int soc21_common_suspend(void *handle)
        return soc21_common_hw_fini(adev);
 }
 
+static bool soc21_need_reset_on_resume(struct amdgpu_device *adev)
+{
+       u32 sol_reg1, sol_reg2;
+
+       /* Will reset for the following suspend abort cases.
+        * 1) Only reset dGPU side.
+        * 2) S3 suspend got aborted and TOS is active.
+        */
+       if (!(adev->flags & AMD_IS_APU) && adev->in_s3 &&
+           !adev->suspend_complete) {
+               sol_reg1 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+               msleep(100);
+               sol_reg2 = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+
+               return (sol_reg1 != sol_reg2);
+       }
+
+       return false;
+}
+
 static int soc21_common_resume(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       if (soc21_need_reset_on_resume(adev)) {
+               dev_info(adev->dev, "S3 suspend aborted, resetting...");
+               soc21_asic_reset(adev);
+       }
+
        return soc21_common_hw_init(adev);
 }
 
index 84368cf1e17535c16c031ce6677f53769f9e8f94..bd57896ab85d565770bd75484b5443de9891d601 100644 (file)
@@ -225,6 +225,8 @@ static int umsch_mm_v4_0_ring_start(struct amdgpu_umsch_mm *umsch)
 
        WREG32_SOC15(VCN, 0, regVCN_UMSCH_RB_SIZE, ring->ring_size);
 
+       ring->wptr = 0;
+
        data = RREG32_SOC15(VCN, 0, regVCN_RB_ENABLE);
        data &= ~(VCN_RB_ENABLE__AUDIO_RB_EN_MASK);
        WREG32_SOC15(VCN, 0, regVCN_RB_ENABLE, data);
index f9631f4b1a02ca5121d7b382fe128c47c7718ec5..55aa74cbc5325e23451aa255dd5ce016e0aa4df8 100644 (file)
@@ -779,8 +779,8 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
         * nodes, but not more than args->num_of_nodes as that is
         * the amount of memory allocated by user
         */
-       pa = kzalloc((sizeof(struct kfd_process_device_apertures) *
-                               args->num_of_nodes), GFP_KERNEL);
+       pa = kcalloc(args->num_of_nodes, sizeof(struct kfd_process_device_apertures),
+                    GFP_KERNEL);
        if (!pa)
                return -ENOMEM;
 
index 041ec3de55e72f24a6cce44e8a75682bf3381531..719d6d365e15016abca596bb7d9d1994b6e54996 100644 (file)
@@ -960,7 +960,6 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
 {
        struct kfd_node *node;
        int i;
-       int count;
 
        if (!kfd->init_complete)
                return;
@@ -968,12 +967,10 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
        /* for runtime suspend, skip locking kfd */
        if (!run_pm) {
                mutex_lock(&kfd_processes_mutex);
-               count = ++kfd_locked;
-               mutex_unlock(&kfd_processes_mutex);
-
                /* For first KFD device suspend all the KFD processes */
-               if (count == 1)
+               if (++kfd_locked == 1)
                        kfd_suspend_all_processes();
+               mutex_unlock(&kfd_processes_mutex);
        }
 
        for (i = 0; i < kfd->num_nodes; i++) {
@@ -984,7 +981,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
 
 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
 {
-       int ret, count, i;
+       int ret, i;
 
        if (!kfd->init_complete)
                return 0;
@@ -998,12 +995,10 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
        /* for runtime resume, skip unlocking kfd */
        if (!run_pm) {
                mutex_lock(&kfd_processes_mutex);
-               count = --kfd_locked;
-               mutex_unlock(&kfd_processes_mutex);
-
-               WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
-               if (count == 0)
+               if (--kfd_locked == 0)
                        ret = kfd_resume_all_processes();
+               WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error");
+               mutex_unlock(&kfd_processes_mutex);
        }
 
        return ret;
index f4d395e38683db7c85f3a7f5fc922e93b1222f88..0b655555e1678643fb84fa8b3e1640cd35a9a74e 100644 (file)
@@ -2001,6 +2001,7 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
                dev_err(dev, "HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
                while (halt_if_hws_hang)
                        schedule();
+               kfd_hws_hang(dqm);
                return -ETIME;
        }
 
index 71d2d44681b218fc5146f3354464ba0f9c08610e..6d2f60c61decc36711953fa5b0dd67888c652a32 100644 (file)
@@ -148,6 +148,9 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
 #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
 MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
 
+#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
+
 /* Number of bytes in PSP header for firmware. */
 #define PSP_HEADER_BYTES 0x100
 
@@ -3044,6 +3047,10 @@ static int dm_resume(void *handle)
        /* Do mst topology probing after resuming cached state*/
        drm_connector_list_iter_begin(ddev, &iter);
        drm_for_each_connector_iter(connector, &iter) {
+
+               if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+                       continue;
+
                aconnector = to_amdgpu_dm_connector(connector);
                if (aconnector->dc_link->type != dc_connection_mst_branch ||
                    aconnector->mst_root)
@@ -4820,9 +4827,11 @@ static int dm_init_microcode(struct amdgpu_device *adev)
                fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
                break;
        case IP_VERSION(3, 5, 0):
-       case IP_VERSION(3, 5, 1):
                fw_name_dmub = FIRMWARE_DCN_35_DMUB;
                break;
+       case IP_VERSION(3, 5, 1):
+               fw_name_dmub = FIRMWARE_DCN_351_DMUB;
+               break;
        default:
                /* ASIC doesn't support DMUB. */
                return 0;
@@ -5921,6 +5930,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
                &aconnector->base.probed_modes :
                &aconnector->base.modes;
 
+       if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+               return NULL;
+
        if (aconnector->freesync_vid_base.clock != 0)
                return &aconnector->freesync_vid_base;
 
@@ -6306,19 +6318,16 @@ create_stream_for_sink(struct drm_connector *connector,
        if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
                mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
 
-       if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
+       if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+           stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+           stream->signal == SIGNAL_TYPE_EDP) {
                //
                // should decide stream support vsc sdp colorimetry capability
                // before building vsc info packet
                //
-               stream->use_vsc_sdp_for_colorimetry = false;
-               if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
-                       stream->use_vsc_sdp_for_colorimetry =
-                               aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
-               } else {
-                       if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
-                               stream->use_vsc_sdp_for_colorimetry = true;
-               }
+               stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+                                                     stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
+
                if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
                        tf = TRANSFER_FUNC_GAMMA_22;
                mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
@@ -8762,10 +8771,10 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev,
                if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
                        continue;
 
+notify:
                if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
                        continue;
 
-notify:
                aconnector = to_amdgpu_dm_connector(connector);
 
                mutex_lock(&adev->dm.audio_lock);
index 16e72d623630caa74e22bcb6052b162c3ff8f6c6..08c494a7a21bad10929eb3f367a465349bc9ae5c 100644 (file)
@@ -76,10 +76,8 @@ static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder,
 
 static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector)
 {
-       struct drm_device *dev = connector->dev;
-
-       return drm_add_modes_noedid(connector, dev->mode_config.max_width,
-                                   dev->mode_config.max_height);
+       /* Maximum resolution supported by DWB */
+       return drm_add_modes_noedid(connector, 3840, 2160);
 }
 
 static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector,
index 12f3e8aa46d8dfae21b5dd1e9f4ef167ee314f2d..6ad4f4efec5dd3e684428a0fb5b3c7b4a5234075 100644 (file)
@@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa(
        return display_count;
 }
 
-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
+static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+               bool safe_to_lower, bool disable)
 {
        struct dc *dc = clk_mgr_base->ctx->dc;
        int i;
 
        for (i = 0; i < dc->res_pool->pipe_count; ++i) {
-               struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *pipe = safe_to_lower
+                       ? &context->res_ctx.pipe_ctx[i]
+                       : &dc->current_state->res_ctx.pipe_ctx[i];
 
                if (pipe->top_pipe || pipe->prev_odm_pipe)
                        continue;
-               if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
-                                    dc_is_virtual_signal(pipe->stream->signal))) {
+               if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+                                    !pipe->stream->link_enc)) {
                        if (disable) {
-                               pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+                               if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+                                       pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+
                                reset_sync_context_for_pipe(dc, context, i);
                        } else
                                pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
@@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
        }
 
        if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
-               dcn316_disable_otg_wa(clk_mgr_base, context, true);
+               dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
 
                clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
                dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
-               dcn316_disable_otg_wa(clk_mgr_base, context, false);
+               dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
 
                update_dispclk = true;
        }
index 101fe96287cb480bf9ee142ceb998a84ab1027f8..d9c5692c86c21ac15b85af1ba0cae92f4274a255 100644 (file)
 #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK            0x00000007L
 #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK            0x000F0000L
 
+#define regCLK5_0_CLK5_spll_field_8                            0x464b
+#define regCLK5_0_CLK5_spll_field_8_BASE_IDX   0
+
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT   0xd
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK             0x00002000L
+
 #define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
 
 #define REG(reg_name) \
@@ -411,6 +417,17 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs
 {
 }
 
+static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
+{
+       struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+       struct dc_context *ctx = clk_mgr->base.ctx;
+       uint32_t ssc_enable;
+
+       REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
+
+       return ssc_enable == 1;
+}
+
 static void init_clk_states(struct clk_mgr *clk_mgr)
 {
        struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
@@ -428,7 +445,16 @@ static void init_clk_states(struct clk_mgr *clk_mgr)
 
 void dcn35_init_clocks(struct clk_mgr *clk_mgr)
 {
+       struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
        init_clk_states(clk_mgr);
+
+       // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
+       if (dcn35_is_spll_ssc_enabled(clk_mgr))
+               clk_mgr->dp_dto_source_clock_in_khz =
+                       dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
+       else
+               clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+
 }
 static struct clk_bw_params dcn35_bw_params = {
        .vram_type = Ddr4MemType,
@@ -517,6 +543,28 @@ static DpmClocks_t_dcn35 dummy_clocks;
 
 static struct dcn35_watermarks dummy_wms = { 0 };
 
+static struct dcn35_ss_info_table ss_info_table = {
+       .ss_divider = 1000,
+       .ss_percentage = {0, 0, 375, 375, 375}
+};
+
+static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+{
+       struct dc_context *ctx = clk_mgr->base.ctx;
+       uint32_t clock_source;
+
+       REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+       // If it's DFS mode, clock_source is 0.
+       if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
+               clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+
+               if (clk_mgr->dprefclk_ss_percentage != 0) {
+                       clk_mgr->ss_on_dprefclk = true;
+                       clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
+               }
+       }
+}
+
 static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
 {
        int i, num_valid_sets;
@@ -1061,6 +1109,8 @@ void dcn35_clk_mgr_construct(
        dce_clock_read_ss_info(&clk_mgr->base);
        /*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
 
+       dcn35_read_ss_info_from_lut(&clk_mgr->base);
+
        clk_mgr->base.base.bw_params = &dcn35_bw_params;
 
        if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
index 5cc7f8da209c599f7585e8f10e499ef2118f34ff..61986e5cb491967643b61832c8e35dd7a4818d41 100644 (file)
@@ -436,6 +436,15 @@ bool dc_state_add_plane(
                goto out;
        }
 
+       if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
+               /* ODM combine could prevent us from supporting more planes
+                * we will reset ODM slice count back to 1 when all planes have
+                * been removed to maximize the amount of planes supported when
+                * new planes are added.
+                */
+               resource_update_pipes_for_stream_with_slice_count(
+                               state, dc->current_state, dc->res_pool, stream, 1);
+
        otg_master_pipe = resource_get_otg_master_for_stream(
                        &state->res_ctx, stream);
        if (otg_master_pipe)
index 970644b695cd4f1d96f166cc1786987b460cdafd..b5e0289d2fe82aed149fab851ebc1b73213406ac 100644 (file)
@@ -976,7 +976,10 @@ static bool dcn31_program_pix_clk(
        struct bp_pixel_clock_parameters bp_pc_params = {0};
        enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24;
 
-       if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
+       // Apply ssed(spread spectrum) dpref clock for edp only.
+       if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0
+               && pix_clk_params->signal_type == SIGNAL_TYPE_EDP
+               && encoding == DP_8b_10b_ENCODING)
                dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
        // For these signal types Driver to program DP_DTO without calling VBIOS Command table
        if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) {
@@ -1093,9 +1096,6 @@ static bool get_pixel_clk_frequency_100hz(
        unsigned int modulo_hz = 0;
        unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz;
 
-       if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0)
-               dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz;
-
        if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) {
                clock_hz = REG_READ(PHASE[inst]);
 
index f07a4c7e48bc23ed0d2351aef46ef38907ee265f..52eab8fccb7f16e9b1f02d541c030f5736b79ace 100644 (file)
@@ -267,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
                                OTG_V_TOTAL_MAX_SEL, 1,
                                OTG_FORCE_LOCK_ON_EVENT, 0,
                                OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
-
-               // Setup manual flow control for EOF via TRIG_A
-               optc->funcs->setup_manual_trigger(optc);
        }
 }
 
index 246b211b1e85f74d362efac0e38384a2cafb59fc..65333141b1c1b05645f9ba374896dc3ee5a682e5 100644 (file)
@@ -735,7 +735,7 @@ static int smu_early_init(void *handle)
        smu->adev = adev;
        smu->pm_enabled = !!amdgpu_dpm;
        smu->is_apu = false;
-       smu->smu_baco.state = SMU_BACO_STATE_EXIT;
+       smu->smu_baco.state = SMU_BACO_STATE_NONE;
        smu->smu_baco.platform_support = false;
        smu->user_dpm_profile.fan_mode = -1;
 
@@ -1966,10 +1966,25 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
        return 0;
 }
 
+static int smu_reset_mp1_state(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+       int ret = 0;
+
+       if ((!adev->in_runpm) && (!adev->in_suspend) &&
+               (!amdgpu_in_reset(adev)) && amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+                                                                       IP_VERSION(13, 0, 10) &&
+               !amdgpu_device_has_display_hardware(adev))
+               ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
+
+       return ret;
+}
+
 static int smu_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = adev->powerplay.pp_handle;
+       int ret;
 
        if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
                return 0;
@@ -1987,7 +2002,15 @@ static int smu_hw_fini(void *handle)
 
        adev->pm.dpm_enabled = false;
 
-       return smu_smc_hw_cleanup(smu);
+       ret = smu_smc_hw_cleanup(smu);
+       if (ret)
+               return ret;
+
+       ret = smu_reset_mp1_state(smu);
+       if (ret)
+               return ret;
+
+       return 0;
 }
 
 static void smu_late_fini(void *handle)
index a870bdd49a4e3cd4741e1fe852c7a337117451fb..1fa81575788c545a39178275ab036cbe6dcdb0c9 100644 (file)
@@ -424,6 +424,7 @@ enum smu_reset_mode {
 enum smu_baco_state {
        SMU_BACO_STATE_ENTER = 0,
        SMU_BACO_STATE_EXIT,
+       SMU_BACO_STATE_NONE,
 };
 
 struct smu_baco_context {
index 5bb7a63c0602b79012017bb9cfc7705fb581b38d..97522c0852589d63a84009a518b0af4719021ba5 100644 (file)
@@ -144,6 +144,37 @@ typedef struct {
   uint32_t MaxGfxClk;
 } DpmClocks_t;
 
+//Freq in MHz
+//Voltage in milli volts with 2 fractional bits
+typedef struct {
+  uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+  uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+  uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+  uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+  uint32_t VClocks0[NUM_VCN_DPM_LEVELS];
+  uint32_t VClocks1[NUM_VCN_DPM_LEVELS];
+  uint32_t DClocks0[NUM_VCN_DPM_LEVELS];
+  uint32_t DClocks1[NUM_VCN_DPM_LEVELS];
+  uint32_t VPEClocks[NUM_VPE_DPM_LEVELS];
+  uint32_t FclkClocks_Freq[NUM_FCLK_DPM_LEVELS];
+  uint32_t FclkClocks_Voltage[NUM_FCLK_DPM_LEVELS];
+  uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+  MemPstateTable_t MemPstateTable[NUM_MEM_PSTATE_LEVELS];
+
+  uint8_t  NumDcfClkLevelsEnabled;
+  uint8_t  NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+  uint8_t  NumSocClkLevelsEnabled;
+  uint8_t  Vcn0ClkLevelsEnabled;     //Applies to both Vclk0 and Dclk0
+  uint8_t  Vcn1ClkLevelsEnabled;     //Applies to both Vclk1 and Dclk1
+  uint8_t  VpeClkLevelsEnabled;
+  uint8_t  NumMemPstatesEnabled;
+  uint8_t  NumFclkLevelsEnabled;
+  uint8_t  spare;
+
+  uint32_t MinGfxClk;
+  uint32_t MaxGfxClk;
+} DpmClocks_t_v14_0_1;
+
 typedef struct {
   uint16_t CoreFrequency[16];          //Target core frequency [MHz]
   uint16_t CorePower[16];              //CAC calculated core power [mW]
@@ -224,7 +255,7 @@ typedef enum {
 #define TABLE_CUSTOM_DPM            2 // Called by Driver
 #define TABLE_BIOS_GPIO_CONFIG      3 // Called by BIOS
 #define TABLE_DPMCLOCKS             4 // Called by Driver and VBIOS
-#define TABLE_SPARE0                5 // Unused
+#define TABLE_MOMENTARY_PM          5 // Called by Tools
 #define TABLE_MODERN_STDBY          6 // Called by Tools for Modern Standby Log
 #define TABLE_SMU_METRICS           7 // Called by Driver and SMF/PMF
 #define TABLE_COUNT                 8
index 356e0f57a426ffa051fb40611947d9b50355ad87..ddb62586008319ba7c95758e562ca4118ddb5f48 100644 (file)
@@ -42,7 +42,7 @@
 #define FEATURE_EDC_BIT                      7
 #define FEATURE_PLL_POWER_DOWN_BIT           8
 #define FEATURE_VDDOFF_BIT                   9
-#define FEATURE_VCN_DPM_BIT                 10
+#define FEATURE_VCN_DPM_BIT                 10   /* this is for both VCN0 and VCN1 */
 #define FEATURE_DS_MPM_BIT                  11
 #define FEATURE_FCLK_DPM_BIT                12
 #define FEATURE_SOCCLK_DPM_BIT              13
@@ -56,9 +56,9 @@
 #define FEATURE_DS_GFXCLK_BIT               21
 #define FEATURE_DS_SOCCLK_BIT               22
 #define FEATURE_DS_LCLK_BIT                 23
-#define FEATURE_LOW_POWER_DCNCLKS_BIT       24  // for all DISP clks
+#define FEATURE_LOW_POWER_DCNCLKS_BIT       24
 #define FEATURE_DS_SHUBCLK_BIT              25
-#define FEATURE_SPARE0_BIT                  26  //SPARE
+#define FEATURE_RESERVED0_BIT               26
 #define FEATURE_ZSTATES_BIT                 27
 #define FEATURE_IOMMUL2_PG_BIT              28
 #define FEATURE_DS_FCLK_BIT                 29
@@ -66,8 +66,8 @@
 #define FEATURE_DS_MP1CLK_BIT               31
 #define FEATURE_WHISPER_MODE_BIT            32
 #define FEATURE_SMU_LOW_POWER_BIT           33
-#define FEATURE_SMART_L3_RINSER_BIT         34
-#define FEATURE_SPARE1_BIT                  35  //SPARE
+#define FEATURE_RESERVED1_BIT               34  /* v14_0_0 SMART_L3_RINSER; v14_0_1 RESERVED1 */
+#define FEATURE_GFX_DEM_BIT                 35  /* v14_0_0 SPARE; v14_0_1 GFX_DEM */
 #define FEATURE_PSI_BIT                     36
 #define FEATURE_PROCHOT_BIT                 37
 #define FEATURE_CPUOFF_BIT                  38
 #define FEATURE_PERF_LIMIT_BIT              42
 #define FEATURE_CORE_DLDO_BIT               43
 #define FEATURE_DVO_BIT                     44
-#define FEATURE_DS_VCN_BIT                  45
+#define FEATURE_DS_VCN_BIT                  45  /* v14_0_1 this is for both VCN0 and VCN1 */
 #define FEATURE_CPPC_BIT                    46
 #define FEATURE_CPPC_PREFERRED_CORES        47
 #define FEATURE_DF_CSTATES_BIT              48
-#define FEATURE_SPARE2_BIT                  49  //SPARE
+#define FEATURE_FAST_PSTATE_CLDO_BIT        49  /* v14_0_0 SPARE */
 #define FEATURE_ATHUB_PG_BIT                50
 #define FEATURE_VDDOFF_ECO_BIT              51
 #define FEATURE_ZSTATES_ECO_BIT             52
@@ -93,8 +93,8 @@
 #define FEATURE_DS_IPUCLK_BIT               58
 #define FEATURE_DS_VPECLK_BIT               59
 #define FEATURE_VPE_DPM_BIT                 60
-#define FEATURE_SPARE_61                    61
-#define FEATURE_FP_DIDT                     62
+#define FEATURE_SMART_L3_RINSER_BIT         61  /* v14_0_0 SPARE*/
+#define FEATURE_PCC_BIT                     62  /* v14_0_0 FP_DIDT v14_0_1 PCC_BIT */
 #define NUM_FEATURES                        63
 
 // Firmware Header/Footer
@@ -151,6 +151,43 @@ typedef struct {
   // MP1_EXT_SCRATCH7 = RTOS Current Job
 } FwStatus_t;
 
+typedef struct {
+  // MP1_EXT_SCRATCH0
+  uint32_t DpmHandlerID         : 8;
+  uint32_t ActivityMonitorID    : 8;
+  uint32_t DpmTimerID           : 8;
+  uint32_t DpmHubID             : 4;
+  uint32_t DpmHubTask           : 4;
+  // MP1_EXT_SCRATCH1
+  uint32_t CclkSyncStatus       : 8;
+  uint32_t ZstateStatus         : 4;
+  uint32_t Cpu1VddOff           : 4;
+  uint32_t DstateFun            : 4;
+  uint32_t DstateDev            : 4;
+  uint32_t GfxOffStatus         : 2;
+  uint32_t Cpu0Off              : 2;
+  uint32_t Cpu1Off              : 2;
+  uint32_t Cpu0VddOff           : 2;
+  // MP1_EXT_SCRATCH2
+  uint32_t P2JobHandler         :32;
+  // MP1_EXT_SCRATCH3
+  uint32_t PostCode             :32;
+  // MP1_EXT_SCRATCH4
+  uint32_t MsgPortBusy          :15;
+  uint32_t RsmuPmiP1Pending     : 1;
+  uint32_t RsmuPmiP2PendingCnt  : 8;
+  uint32_t DfCstateExitPending  : 1;
+  uint32_t Pc6EntryPending      : 1;
+  uint32_t Pc6ExitPending       : 1;
+  uint32_t WarmResetPending     : 1;
+  uint32_t Mp0ClkPending        : 1;
+  uint32_t InWhisperMode        : 1;
+  uint32_t spare2               : 2;
+  // MP1_EXT_SCRATCH5
+  uint32_t IdleMask             :32;
+  // MP1_EXT_SCRATCH6 = RTOS threads' status
+  // MP1_EXT_SCRATCH7 = RTOS Current Job
+} FwStatus_t_v14_0_1;
 
 #pragma pack(pop)
 
index ca7ce4251482dbdf22b5ea39a5e6ca55e763896d..c4dc5881d8df0953054cf6972d88f212e0c6872c 100644 (file)
 #define PPSMC_MSG_SetHardMinSocclkByFreq        0x13 ///< Set hard min for SOC CLK
 #define PPSMC_MSG_SetSoftMinFclk                0x14 ///< Set hard min for FCLK
 #define PPSMC_MSG_SetSoftMinVcn0                0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0)
-
 #define PPSMC_MSG_EnableGfxImu                  0x16 ///< Enable GFX IMU
-
-#define PPSMC_MSG_spare_0x17                    0x17
-#define PPSMC_MSG_spare_0x18                    0x18
+#define PPSMC_MSG_spare_0x17                    0x17 ///< Get GFX clock frequency
+#define PPSMC_MSG_spare_0x18                    0x18 ///< Get FCLK frequency
 #define PPSMC_MSG_AllowGfxOff                   0x19 ///< Inform PMFW of allowing GFXOFF entry
 #define PPSMC_MSG_DisallowGfxOff                0x1A ///< Inform PMFW of disallowing GFXOFF entry
 #define PPSMC_MSG_SetSoftMaxGfxClk              0x1B ///< Set soft max for GFX CLK
 #define PPSMC_MSG_SetHardMinGfxClk              0x1C ///< Set hard min for GFX CLK
-
 #define PPSMC_MSG_SetSoftMaxSocclkByFreq        0x1D ///< Set soft max for SOC CLK
 #define PPSMC_MSG_SetSoftMaxFclkByFreq          0x1E ///< Set soft max for FCLK
 #define PPSMC_MSG_SetSoftMaxVcn0                0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0)
-#define PPSMC_MSG_spare_0x20                    0x20
+#define PPSMC_MSG_spare_0x20                    0x20 ///< Set power limit percentage
 #define PPSMC_MSG_PowerDownJpeg0                0x21 ///< Power down Jpeg of VCN0
 #define PPSMC_MSG_PowerUpJpeg0                  0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default
-
 #define PPSMC_MSG_SetHardMinFclkByFreq          0x23 ///< Set hard min for FCLK
 #define PPSMC_MSG_SetSoftMinSocclkByFreq        0x24 ///< Set soft min for SOC CLK
 #define PPSMC_MSG_AllowZstates                  0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity
@@ -99,8 +95,8 @@
 #define PPSMC_MSG_PowerUpIspByTile              0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM
 #define PPSMC_MSG_SetHardMinIspiclkByFreq       0x2B ///< Set HardMin by frequency for ISPICLK
 #define PPSMC_MSG_SetHardMinIspxclkByFreq       0x2C ///< Set HardMin by frequency for ISPXCLK
-#define PPSMC_MSG_PowerDownUmsch                0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler
-#define PPSMC_MSG_PowerUpUmsch                  0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler
+#define PPSMC_MSG_PowerDownUmsch                0x2D ///< Power down VCN0.UMSCH (aka VSCH) scheduler
+#define PPSMC_MSG_PowerUpUmsch                  0x2E ///< Power up VCN0.UMSCH (aka VSCH) scheduler
 #define PPSMC_Message_IspStutterOn_MmhubPgDis   0x2F ///< ISP StutterOn mmHub PgDis
 #define PPSMC_Message_IspStutterOff_MmhubPgEn   0x30 ///< ISP StufferOff mmHub PgEn
 #define PPSMC_MSG_PowerUpVpe                    0x31 ///< Power up VPE
 #define PPSMC_MSG_DisableLSdma                  0x35 ///< Disable LSDMA
 #define PPSMC_MSG_SetSoftMaxVpe                 0x36 ///<
 #define PPSMC_MSG_SetSoftMinVpe                 0x37 ///<
-#define PPSMC_Message_Count                     0x38 ///< Total number of PPSMC messages
+#define PPSMC_MSG_AllocMALLCache                0x38 ///< Allocating MALL Cache
+#define PPSMC_MSG_ReleaseMALLCache              0x39 ///< Releasing MALL Cache
+#define PPSMC_Message_Count                     0x3A ///< Total number of PPSMC messages
 /** @}*/
 
 /**
index 3f7463c1c1a91948588ae8ece2fd6c4cbffb1406..4af1985ae44668edf74b40c4f26dbd1bcd83c376 100644 (file)
@@ -27,6 +27,7 @@
 
 #define SMU14_DRIVER_IF_VERSION_INV 0xFFFFFFFF
 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_0 0x7
+#define SMU14_DRIVER_IF_VERSION_SMU_V14_0_1 0x6
 #define SMU14_DRIVER_IF_VERSION_SMU_V14_0_2 0x1
 
 #define FEATURE_MASK(feature) (1ULL << feature)
index 9c03296f92cdd41c868406dfd861bf56a77c2e81..67117ced7c6ae65405fb3a5338743d31270e8cd3 100644 (file)
@@ -2751,7 +2751,13 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
 
        switch (mp1_state) {
        case PP_MP1_STATE_UNLOAD:
-               ret = smu_cmn_set_mp1_state(smu, mp1_state);
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                                                         SMU_MSG_PrepareMp1ForUnload,
+                                                                                         0x55, NULL);
+
+               if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
+                       ret = smu_v13_0_disable_pmfw_state(smu);
+
                break;
        default:
                /* Ignore others */
index bb98156b2fa1d5fff3d71bcea59b2b63f9265b9e..949131bd1ecb215c960b7aabb9ad690da715d90c 100644 (file)
@@ -226,8 +226,18 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
        struct amdgpu_device *adev = smu->adev;
        int ret = 0;
 
-       if (!en && !adev->in_s0ix)
+       if (!en && !adev->in_s0ix) {
+               /* Adds a GFX reset as workaround just before sending the
+                * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering
+                * an invalid state.
+                */
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
+                                                     SMU_RESET_MODE_2, NULL);
+               if (ret)
+                       return ret;
+
                ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+       }
 
        return ret;
 }
index 9e39f99154f94df84495dbce069e2651f2b7f104..07a65e005785d6d0fceddd2564d63e84d08e755e 100644 (file)
@@ -234,7 +234,7 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
                smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
                break;
        case IP_VERSION(14, 0, 1):
-               smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
+               smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1;
                break;
 
        default:
index d6de6d97286c6990e24c79b318f533168c967bd0..63399c00cc28ffaa88725068496f35625b9807cc 100644 (file)
@@ -161,7 +161,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
 
        SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
                PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
-       SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
+       SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)),
                PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
        SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
                PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -171,7 +171,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
                goto err0_out;
        smu_table->metrics_time = 0;
 
-       smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
+       smu_table->clocks_table = kzalloc(max(sizeof(DpmClocks_t), sizeof(DpmClocks_t_v14_0_1)), GFP_KERNEL);
        if (!smu_table->clocks_table)
                goto err1_out;
 
@@ -593,6 +593,60 @@ static int smu_v14_0_0_mode2_reset(struct smu_context *smu)
        return ret;
 }
 
+static int smu_v14_0_1_get_dpm_freq_by_index(struct smu_context *smu,
+                                               enum smu_clk_type clk_type,
+                                               uint32_t dpm_level,
+                                               uint32_t *freq)
+{
+       DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+
+       if (!clk_table || clk_type >= SMU_CLK_COUNT)
+               return -EINVAL;
+
+       switch (clk_type) {
+       case SMU_SOCCLK:
+               if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
+                       return -EINVAL;
+               *freq = clk_table->SocClocks[dpm_level];
+               break;
+       case SMU_VCLK:
+               if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled)
+                       return -EINVAL;
+               *freq = clk_table->VClocks0[dpm_level];
+               break;
+       case SMU_DCLK:
+               if (dpm_level >= clk_table->Vcn0ClkLevelsEnabled)
+                       return -EINVAL;
+               *freq = clk_table->DClocks0[dpm_level];
+               break;
+       case SMU_VCLK1:
+               if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled)
+                       return -EINVAL;
+               *freq = clk_table->VClocks1[dpm_level];
+               break;
+       case SMU_DCLK1:
+               if (dpm_level >= clk_table->Vcn1ClkLevelsEnabled)
+                       return -EINVAL;
+               *freq = clk_table->DClocks1[dpm_level];
+               break;
+       case SMU_UCLK:
+       case SMU_MCLK:
+               if (dpm_level >= clk_table->NumMemPstatesEnabled)
+                       return -EINVAL;
+               *freq = clk_table->MemPstateTable[dpm_level].MemClk;
+               break;
+       case SMU_FCLK:
+               if (dpm_level >= clk_table->NumFclkLevelsEnabled)
+                       return -EINVAL;
+               *freq = clk_table->FclkClocks_Freq[dpm_level];
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu,
                                                enum smu_clk_type clk_type,
                                                uint32_t dpm_level,
@@ -637,6 +691,19 @@ static int smu_v14_0_0_get_dpm_freq_by_index(struct smu_context *smu,
        return 0;
 }
 
+static int smu_v14_0_common_get_dpm_freq_by_index(struct smu_context *smu,
+                                               enum smu_clk_type clk_type,
+                                               uint32_t dpm_level,
+                                               uint32_t *freq)
+{
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+               smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq);
+       else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+               smu_v14_0_1_get_dpm_freq_by_index(smu, clk_type, dpm_level, freq);
+
+       return 0;
+}
+
 static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
                                                enum smu_clk_type clk_type)
 {
@@ -657,6 +724,8 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
                break;
        case SMU_VCLK:
        case SMU_DCLK:
+       case SMU_VCLK1:
+       case SMU_DCLK1:
                feature_id = SMU_FEATURE_VCN_DPM_BIT;
                break;
        default:
@@ -666,6 +735,126 @@ static bool smu_v14_0_0_clk_dpm_is_enabled(struct smu_context *smu,
        return smu_cmn_feature_is_enabled(smu, feature_id);
 }
 
+static int smu_v14_0_1_get_dpm_ultimate_freq(struct smu_context *smu,
+                                                       enum smu_clk_type clk_type,
+                                                       uint32_t *min,
+                                                       uint32_t *max)
+{
+       DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+       uint32_t clock_limit;
+       uint32_t max_dpm_level, min_dpm_level;
+       int ret = 0;
+
+       if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) {
+               switch (clk_type) {
+               case SMU_MCLK:
+               case SMU_UCLK:
+                       clock_limit = smu->smu_table.boot_values.uclk;
+                       break;
+               case SMU_FCLK:
+                       clock_limit = smu->smu_table.boot_values.fclk;
+                       break;
+               case SMU_GFXCLK:
+               case SMU_SCLK:
+                       clock_limit = smu->smu_table.boot_values.gfxclk;
+                       break;
+               case SMU_SOCCLK:
+                       clock_limit = smu->smu_table.boot_values.socclk;
+                       break;
+               case SMU_VCLK:
+               case SMU_VCLK1:
+                       clock_limit = smu->smu_table.boot_values.vclk;
+                       break;
+               case SMU_DCLK:
+               case SMU_DCLK1:
+                       clock_limit = smu->smu_table.boot_values.dclk;
+                       break;
+               default:
+                       clock_limit = 0;
+                       break;
+               }
+
+               /* clock in Mhz unit */
+               if (min)
+                       *min = clock_limit / 100;
+               if (max)
+                       *max = clock_limit / 100;
+
+               return 0;
+       }
+
+       if (max) {
+               switch (clk_type) {
+               case SMU_GFXCLK:
+               case SMU_SCLK:
+                       *max = clk_table->MaxGfxClk;
+                       break;
+               case SMU_MCLK:
+               case SMU_UCLK:
+               case SMU_FCLK:
+                       max_dpm_level = 0;
+                       break;
+               case SMU_SOCCLK:
+                       max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1;
+                       break;
+               case SMU_VCLK:
+               case SMU_DCLK:
+                       max_dpm_level = clk_table->Vcn0ClkLevelsEnabled - 1;
+                       break;
+               case SMU_VCLK1:
+               case SMU_DCLK1:
+                       max_dpm_level = clk_table->Vcn1ClkLevelsEnabled - 1;
+                       break;
+               default:
+                       ret = -EINVAL;
+                       goto failed;
+               }
+
+               if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
+                       ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
+                       if (ret)
+                               goto failed;
+               }
+       }
+
+       if (min) {
+               switch (clk_type) {
+               case SMU_GFXCLK:
+               case SMU_SCLK:
+                       *min = clk_table->MinGfxClk;
+                       break;
+               case SMU_MCLK:
+               case SMU_UCLK:
+                       min_dpm_level = clk_table->NumMemPstatesEnabled - 1;
+                       break;
+               case SMU_FCLK:
+                       min_dpm_level = clk_table->NumFclkLevelsEnabled - 1;
+                       break;
+               case SMU_SOCCLK:
+                       min_dpm_level = 0;
+                       break;
+               case SMU_VCLK:
+               case SMU_DCLK:
+               case SMU_VCLK1:
+               case SMU_DCLK1:
+                       min_dpm_level = 0;
+                       break;
+               default:
+                       ret = -EINVAL;
+                       goto failed;
+               }
+
+               if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
+                       ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
+                       if (ret)
+                               goto failed;
+               }
+       }
+
+failed:
+       return ret;
+}
+
 static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
                                                        enum smu_clk_type clk_type,
                                                        uint32_t *min,
@@ -736,7 +925,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
                }
 
                if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
-                       ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
+                       ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
                        if (ret)
                                goto failed;
                }
@@ -768,7 +957,7 @@ static int smu_v14_0_0_get_dpm_ultimate_freq(struct smu_context *smu,
                }
 
                if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
-                       ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
+                       ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
                        if (ret)
                                goto failed;
                }
@@ -778,6 +967,19 @@ failed:
        return ret;
 }
 
+static int smu_v14_0_common_get_dpm_ultimate_freq(struct smu_context *smu,
+                                                       enum smu_clk_type clk_type,
+                                                       uint32_t *min,
+                                                       uint32_t *max)
+{
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+               smu_v14_0_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
+       else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+               smu_v14_0_1_get_dpm_ultimate_freq(smu, clk_type, min, max);
+
+       return 0;
+}
+
 static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
                                            enum smu_clk_type clk_type,
                                            uint32_t *value)
@@ -811,6 +1013,37 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
        return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value);
 }
 
+static int smu_v14_0_1_get_dpm_level_count(struct smu_context *smu,
+                                          enum smu_clk_type clk_type,
+                                          uint32_t *count)
+{
+       DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+
+       switch (clk_type) {
+       case SMU_SOCCLK:
+               *count = clk_table->NumSocClkLevelsEnabled;
+               break;
+       case SMU_VCLK:
+       case SMU_DCLK:
+               *count = clk_table->Vcn0ClkLevelsEnabled;
+               break;
+       case SMU_VCLK1:
+       case SMU_DCLK1:
+               *count = clk_table->Vcn1ClkLevelsEnabled;
+               break;
+       case SMU_MCLK:
+               *count = clk_table->NumMemPstatesEnabled;
+               break;
+       case SMU_FCLK:
+               *count = clk_table->NumFclkLevelsEnabled;
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
 static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
                                           enum smu_clk_type clk_type,
                                           uint32_t *count)
@@ -840,6 +1073,18 @@ static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
        return 0;
 }
 
+static int smu_v14_0_common_get_dpm_level_count(struct smu_context *smu,
+                                          enum smu_clk_type clk_type,
+                                          uint32_t *count)
+{
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+               smu_v14_0_0_get_dpm_level_count(smu, clk_type, count);
+       else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+               smu_v14_0_1_get_dpm_level_count(smu, clk_type, count);
+
+       return 0;
+}
+
 static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
                                        enum smu_clk_type clk_type, char *buf)
 {
@@ -866,18 +1111,20 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu,
        case SMU_SOCCLK:
        case SMU_VCLK:
        case SMU_DCLK:
+       case SMU_VCLK1:
+       case SMU_DCLK1:
        case SMU_MCLK:
        case SMU_FCLK:
                ret = smu_v14_0_0_get_current_clk_freq(smu, clk_type, &cur_value);
                if (ret)
                        break;
 
-               ret = smu_v14_0_0_get_dpm_level_count(smu, clk_type, &count);
+               ret = smu_v14_0_common_get_dpm_level_count(smu, clk_type, &count);
                if (ret)
                        break;
 
                for (i = 0; i < count; i++) {
-                       ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, i, &value);
+                       ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, i, &value);
                        if (ret)
                                break;
 
@@ -940,8 +1187,13 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu,
                break;
        case SMU_VCLK:
        case SMU_DCLK:
-               msg_set_min = SMU_MSG_SetHardMinVcn;
-               msg_set_max = SMU_MSG_SetSoftMaxVcn;
+               msg_set_min = SMU_MSG_SetHardMinVcn0;
+               msg_set_max = SMU_MSG_SetSoftMaxVcn0;
+               break;
+       case SMU_VCLK1:
+       case SMU_DCLK1:
+               msg_set_min = SMU_MSG_SetHardMinVcn1;
+               msg_set_max = SMU_MSG_SetSoftMaxVcn1;
                break;
        default:
                return -EINVAL;
@@ -971,11 +1223,11 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu,
        case SMU_FCLK:
        case SMU_VCLK:
        case SMU_DCLK:
-               ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
+               ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
                if (ret)
                        break;
 
-               ret = smu_v14_0_0_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
+               ret = smu_v14_0_common_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
                if (ret)
                        break;
 
@@ -1000,25 +1252,25 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
 
        switch (level) {
        case AMD_DPM_FORCED_LEVEL_HIGH:
-               smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
-               smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
-               smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
+               smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
+               smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max);
+               smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max);
                sclk_min = sclk_max;
                fclk_min = fclk_max;
                socclk_min = socclk_max;
                break;
        case AMD_DPM_FORCED_LEVEL_LOW:
-               smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
-               smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
-               smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
+               smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
+               smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL);
+               smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL);
                sclk_max = sclk_min;
                fclk_max = fclk_min;
                socclk_max = socclk_min;
                break;
        case AMD_DPM_FORCED_LEVEL_AUTO:
-               smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
-               smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
-               smu_v14_0_0_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
+               smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
+               smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max);
+               smu_v14_0_common_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max);
                break;
        case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
        case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
@@ -1067,6 +1319,18 @@ static int smu_v14_0_0_set_performance_level(struct smu_context *smu,
        return ret;
 }
 
+static int smu_v14_0_1_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+       DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+
+       smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
+       smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
+       smu->gfx_actual_hard_min_freq = 0;
+       smu->gfx_actual_soft_max_freq = 0;
+
+       return 0;
+}
+
 static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
 {
        DpmClocks_t *clk_table = smu->smu_table.clocks_table;
@@ -1079,6 +1343,16 @@ static int smu_v14_0_0_set_fine_grain_gfx_freq_parameters(struct smu_context *sm
        return 0;
 }
 
+static int smu_v14_0_common_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+               smu_v14_0_0_set_fine_grain_gfx_freq_parameters(smu);
+       else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+               smu_v14_0_1_set_fine_grain_gfx_freq_parameters(smu);
+
+       return 0;
+}
+
 static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu,
                                      bool enable)
 {
@@ -1095,6 +1369,25 @@ static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu,
                                               0, NULL);
 }
 
+static int smu_14_0_1_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+       DpmClocks_t_v14_0_1 *clk_table = smu->smu_table.clocks_table;
+       uint8_t idx;
+
+       /* Only the Clock information of SOC and VPE is copied to provide VPE DPM settings for use. */
+       for (idx = 0; idx < NUM_SOCCLK_DPM_LEVELS; idx++) {
+               clock_table->SocClocks[idx].Freq = (idx < clk_table->NumSocClkLevelsEnabled) ? clk_table->SocClocks[idx]:0;
+               clock_table->SocClocks[idx].Vol = 0;
+       }
+
+       for (idx = 0; idx < NUM_VPE_DPM_LEVELS; idx++) {
+               clock_table->VPEClocks[idx].Freq = (idx < clk_table->VpeClkLevelsEnabled) ? clk_table->VPEClocks[idx]:0;
+               clock_table->VPEClocks[idx].Vol = 0;
+       }
+
+       return 0;
+}
+
 static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
 {
        DpmClocks_t *clk_table = smu->smu_table.clocks_table;
@@ -1114,6 +1407,16 @@ static int smu_14_0_0_get_dpm_table(struct smu_context *smu, struct dpm_clocks *
        return 0;
 }
 
+static int smu_v14_0_common_get_dpm_table(struct smu_context *smu, struct dpm_clocks *clock_table)
+{
+       if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0))
+               smu_14_0_0_get_dpm_table(smu, clock_table);
+       else if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1))
+               smu_14_0_1_get_dpm_table(smu, clock_table);
+
+       return 0;
+}
+
 static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
        .check_fw_status = smu_v14_0_check_fw_status,
        .check_fw_version = smu_v14_0_check_fw_version,
@@ -1135,16 +1438,16 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
        .set_driver_table_location = smu_v14_0_set_driver_table_location,
        .gfx_off_control = smu_v14_0_gfx_off_control,
        .mode2_reset = smu_v14_0_0_mode2_reset,
-       .get_dpm_ultimate_freq = smu_v14_0_0_get_dpm_ultimate_freq,
+       .get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq,
        .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table,
        .print_clk_levels = smu_v14_0_0_print_clk_levels,
        .force_clk_levels = smu_v14_0_0_force_clk_levels,
        .set_performance_level = smu_v14_0_0_set_performance_level,
-       .set_fine_grain_gfx_freq_parameters = smu_v14_0_0_set_fine_grain_gfx_freq_parameters,
+       .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters,
        .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu,
        .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable,
        .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable,
-       .get_dpm_clock_table = smu_14_0_0_get_dpm_table,
+       .get_dpm_clock_table = smu_v14_0_common_get_dpm_table,
 };
 
 static void smu_v14_0_0_set_smu_mailbox_registers(struct smu_context *smu)
index ebb6d8ebd44eb6f70480b9655e6f253e41c77c04..1e9259416980ec49cce1b7fc080f562f002e29c5 100644 (file)
@@ -180,6 +180,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
 {
        struct ast_device *ast = to_ast_device(dev);
        u8 video_on_off = on;
+       u32 i = 0;
 
        // Video On/Off
        ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xE3, (u8) ~AST_DP_VIDEO_ENABLE, on);
@@ -192,6 +193,8 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
                                                ASTDP_MIRROR_VIDEO_ENABLE) != video_on_off) {
                        // wait 1 ms
                        mdelay(1);
+                       if (++i > 200)
+                               break;
                }
        }
 }
index 871e4e2129d6daac8dadcb3262227451c59296c8..0683a129b36285cc96c25d57d3115cb111fc2003 100644 (file)
@@ -777,6 +777,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
        unsigned int total_modes_count = 0;
        struct drm_client_offset *offsets;
        unsigned int connector_count = 0;
+       /* points to modes protected by mode_config.mutex */
        struct drm_display_mode **modes;
        struct drm_crtc **crtcs;
        int i, ret = 0;
@@ -845,7 +846,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
                drm_client_pick_crtcs(client, connectors, connector_count,
                                      crtcs, modes, 0, width, height);
        }
-       mutex_unlock(&dev->mode_config.mutex);
 
        drm_client_modeset_release(client);
 
@@ -875,6 +875,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
                        modeset->y = offset->y;
                }
        }
+       mutex_unlock(&dev->mode_config.mutex);
 
        mutex_unlock(&client->modeset_mutex);
 out:
index ed89b86ea625aaa408064916b982ffa92f9ef4b5..f672bfd70d455156aed1a17c2fb4929c7771962f 100644 (file)
@@ -2534,7 +2534,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
                intel_atomic_get_old_cdclk_state(state);
        const struct intel_cdclk_state *new_cdclk_state =
                intel_atomic_get_new_cdclk_state(state);
-       enum pipe pipe = new_cdclk_state->pipe;
+       struct intel_cdclk_config cdclk_config;
+       enum pipe pipe;
 
        if (!intel_cdclk_changed(&old_cdclk_state->actual,
                                 &new_cdclk_state->actual))
@@ -2543,12 +2544,25 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
        if (IS_DG2(i915))
                intel_cdclk_pcode_pre_notify(state);
 
-       if (pipe == INVALID_PIPE ||
-           old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
-               drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+       if (new_cdclk_state->disable_pipes) {
+               cdclk_config = new_cdclk_state->actual;
+               pipe = INVALID_PIPE;
+       } else {
+               if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
+                       cdclk_config = new_cdclk_state->actual;
+                       pipe = new_cdclk_state->pipe;
+               } else {
+                       cdclk_config = old_cdclk_state->actual;
+                       pipe = INVALID_PIPE;
+               }
 
-               intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
+               cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
+                                                old_cdclk_state->actual.voltage_level);
        }
+
+       drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+       intel_set_cdclk(i915, &cdclk_config, pipe);
 }
 
 /**
@@ -2566,7 +2580,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
                intel_atomic_get_old_cdclk_state(state);
        const struct intel_cdclk_state *new_cdclk_state =
                intel_atomic_get_new_cdclk_state(state);
-       enum pipe pipe = new_cdclk_state->pipe;
+       enum pipe pipe;
 
        if (!intel_cdclk_changed(&old_cdclk_state->actual,
                                 &new_cdclk_state->actual))
@@ -2575,12 +2589,15 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
        if (IS_DG2(i915))
                intel_cdclk_pcode_post_notify(state);
 
-       if (pipe != INVALID_PIPE &&
-           old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
-               drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+       if (!new_cdclk_state->disable_pipes &&
+           new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
+               pipe = new_cdclk_state->pipe;
+       else
+               pipe = INVALID_PIPE;
+
+       drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
 
-               intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
-       }
+       intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
 }
 
 static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
@@ -3058,6 +3075,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
                return NULL;
 
        cdclk_state->pipe = INVALID_PIPE;
+       cdclk_state->disable_pipes = false;
 
        return &cdclk_state->base;
 }
@@ -3236,6 +3254,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
                if (ret)
                        return ret;
 
+               new_cdclk_state->disable_pipes = true;
+
                drm_dbg_kms(&dev_priv->drm,
                            "Modeset required for cdclk change\n");
        }
index 48fd7d39e0cd9c4f6d57970f35531b91aa6ab055..71bc032bfef16efd359757373f256dfc88bd86fc 100644 (file)
@@ -51,6 +51,9 @@ struct intel_cdclk_state {
 
        /* bitmask of active pipes */
        u8 active_pipes;
+
+       /* update cdclk with pipes disabled */
+       bool disable_pipes;
 };
 
 int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
index c587a8efeafcf5e561429d925c2893208908e03f..c17462b4c2ac1930a085eff2256f8642b9ce8830 100644 (file)
@@ -4256,7 +4256,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1,
 static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
                                       const struct intel_crtc_state *crtc_state2)
 {
+       /*
+        * FIXME the modeset sequence is currently wrong and
+        * can't deal with bigjoiner + port sync at the same time.
+        */
        return crtc_state1->hw.active && crtc_state2->hw.active &&
+               !crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes &&
                crtc_state1->output_types == crtc_state2->output_types &&
                crtc_state1->output_format == crtc_state2->output_format &&
                crtc_state1->lane_count == crtc_state2->lane_count &&
index abd62bebc46d0e58d5bc78d8f4500ddcbc6098f1..e583515f9b25a33da4825d10cf42a9f73fa17990 100644 (file)
@@ -2725,7 +2725,11 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
                intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
        int pixel_clock;
 
-       if (has_seamless_m_n(connector))
+       /*
+        * FIXME all joined pipes share the same transcoder.
+        * Need to account for that when updating M/N live.
+        */
+       if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
                pipe_config->update_m_n = true;
 
        if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
index b98a87883fefb016be68ceb72a408258868b55ec..9db43bd81ce2fabe51963e129f135d3e8dd71fa7 100644 (file)
@@ -691,12 +691,15 @@ int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector,
        u8 bcaps;
        int ret;
 
+       *hdcp_capable = false;
+       *hdcp2_capable = false;
        if (!intel_encoder_is_mst(connector->encoder))
                return -EINVAL;
 
        ret =  _intel_dp_hdcp2_get_capability(aux, hdcp2_capable);
        if (ret)
-               return ret;
+               drm_dbg_kms(&i915->drm,
+                           "HDCP2 DPCD capability read failed err: %d\n", ret);
 
        ret = intel_dp_hdcp_read_bcaps(aux, i915, &bcaps);
        if (ret)
index b6e539f1342c29ad97f5f46de8b51d9a358375bb..aabd018bd73743ff354353506b2ce007268a88c5 100644 (file)
@@ -1422,6 +1422,17 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
                return;
        }
 
+       /*
+        * FIXME figure out what is wrong with PSR+bigjoiner and
+        * fix it. Presumably something related to the fact that
+        * PSR is a transcoder level feature.
+        */
+       if (crtc_state->bigjoiner_pipes) {
+               drm_dbg_kms(&dev_priv->drm,
+                           "PSR disabled due to bigjoiner\n");
+               return;
+       }
+
        if (CAN_PANEL_REPLAY(intel_dp))
                crtc_state->has_panel_replay = true;
        else
index eb5bd0743902065d9b4bcac060d1fd340d448069..f542ee1db1d97047eedfffff76c04cbbaf3435ea 100644 (file)
@@ -117,6 +117,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
        const struct drm_display_info *info = &connector->base.display_info;
        int vmin, vmax;
 
+       /*
+        * FIXME all joined pipes share the same transcoder.
+        * Need to account for that during VRR toggle/push/etc.
+        */
+       if (crtc_state->bigjoiner_pipes)
+               return;
+
        if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
                return;
 
index f3dcae4b9d455ed37d3cc3fd1635760cd9e264af..0f83c6d4376ffba646279586479f1710161d6633 100644 (file)
@@ -1403,14 +1403,17 @@ static void guc_cancel_busyness_worker(struct intel_guc *guc)
         * Trying to pass a 'need_sync' or 'in_reset' flag all the way down through
         * every possible call stack is unfeasible. It would be too intrusive to many
         * areas that really don't care about the GuC backend. However, there is the
-        * 'reset_in_progress' flag available, so just use that.
+        * I915_RESET_BACKOFF flag and the gt->reset.mutex can be tested for is_locked.
+        * So just use those. Note that testing both is required due to the hideously
+        * complex nature of the i915 driver's reset code paths.
         *
         * And note that in the case of a reset occurring during driver unload
-        * (wedge_on_fini), skipping the cancel in _prepare (when the reset flag is set
-        * is fine because there is another cancel in _finish (when the reset flag is
-        * not).
+        * (wedged_on_fini), skipping the cancel in reset_prepare/reset_fini (when the
+        * reset flag/mutex are set) is fine because there is another explicit cancel in
+        * intel_guc_submission_fini (when the reset flag/mutex are not).
         */
-       if (guc_to_gt(guc)->uc.reset_in_progress)
+       if (mutex_is_locked(&guc_to_gt(guc)->reset.mutex) ||
+           test_bit(I915_RESET_BACKOFF, &guc_to_gt(guc)->reset.flags))
                cancel_delayed_work(&guc->timestamp.work);
        else
                cancel_delayed_work_sync(&guc->timestamp.work);
@@ -1424,8 +1427,6 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
        unsigned long flags;
        ktime_t unused;
 
-       guc_cancel_busyness_worker(guc);
-
        spin_lock_irqsave(&guc->timestamp.lock, flags);
 
        guc_update_pm_timestamp(guc, &unused);
@@ -2004,13 +2005,6 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
 
 void intel_guc_submission_reset_finish(struct intel_guc *guc)
 {
-       /*
-        * Ensure the busyness worker gets cancelled even on a fatal wedge.
-        * Note that reset_prepare is not allowed to because it confuses lockdep.
-        */
-       if (guc_submission_initialized(guc))
-               guc_cancel_busyness_worker(guc);
-
        /* Reset called during driver load or during wedge? */
        if (unlikely(!guc_submission_initialized(guc) ||
                     !intel_guc_is_fw_running(guc) ||
@@ -2136,6 +2130,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
        if (!guc->submission_initialized)
                return;
 
+       guc_fini_engine_stats(guc);
        guc_flush_destroyed_contexts(guc);
        guc_lrc_desc_pool_destroy_v69(guc);
        i915_sched_engine_put(guc->sched_engine);
index 6dfe5d9456c69e06987be23367c243bb1f8f908e..399bc319180b042cdcf78e2415b16ef52d980c61 100644 (file)
@@ -637,6 +637,10 @@ void intel_uc_reset_finish(struct intel_uc *uc)
 {
        struct intel_guc *guc = &uc->guc;
 
+       /*
+        * NB: The wedge code path results in prepare -> prepare -> finish -> finish.
+        * So this function is sometimes called with the in-progress flag not set.
+        */
        uc->reset_in_progress = false;
 
        /* Firmware expected to be running when this function is called */
index 0674aca0f8a3f593bad4dbe929be4260f5a6219a..cf0b1de1c07124d2fe45d2f7f220f5cebed71227 100644 (file)
@@ -1377,6 +1377,10 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
        if (adreno_is_a618(gpu))
                gpu->ubwc_config.highest_bank_bit = 14;
 
+       if (adreno_is_a619(gpu))
+               /* TODO: Should be 14 but causes corruption at e.g. 1920x1200 on DP */
+               gpu->ubwc_config.highest_bank_bit = 13;
+
        if (adreno_is_a619_holi(gpu))
                gpu->ubwc_config.highest_bank_bit = 13;
 
index 1f5245fc2cdc6ca6ffd109fa6844eda84f79cd32..a847a0f7a73c9f61fde92fcf75f36a4f37dadf07 100644 (file)
@@ -852,7 +852,7 @@ static void a6xx_get_shader_block(struct msm_gpu *gpu,
                        (block->type << 8) | i);
 
                in += CRASHDUMP_READ(in, REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE,
-                       block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
+                       block->size, out);
 
                out += block->size * sizeof(u32);
        }
index 9a9f7092c526a630c8cb8099e7ae0921d6b1d3a1..a3e60ac70689e7f8af8813d978626cd7d4c9fb3e 100644 (file)
@@ -324,6 +324,7 @@ static const struct dpu_wb_cfg x1e80100_wb[] = {
        },
 };
 
+/* TODO: INTF 3, 8 and 7 are used for MST, marked as INTF_NONE for now */
 static const struct dpu_intf_cfg x1e80100_intf[] = {
        {
                .name = "intf_0", .id = INTF_0,
@@ -358,8 +359,8 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
                .name = "intf_3", .id = INTF_3,
                .base = 0x37000, .len = 0x280,
                .features = INTF_SC7280_MASK,
-               .type = INTF_DP,
-               .controller_id = MSM_DP_CONTROLLER_1,
+               .type = INTF_NONE,
+               .controller_id = MSM_DP_CONTROLLER_0,   /* pair with intf_0 for DP MST */
                .prog_fetch_lines_worst_case = 24,
                .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 30),
                .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 31),
@@ -368,7 +369,7 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
                .base = 0x38000, .len = 0x280,
                .features = INTF_SC7280_MASK,
                .type = INTF_DP,
-               .controller_id = MSM_DP_CONTROLLER_2,
+               .controller_id = MSM_DP_CONTROLLER_1,
                .prog_fetch_lines_worst_case = 24,
                .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 20),
                .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 21),
@@ -381,6 +382,33 @@ static const struct dpu_intf_cfg x1e80100_intf[] = {
                .prog_fetch_lines_worst_case = 24,
                .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 22),
                .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 23),
+       }, {
+               .name = "intf_6", .id = INTF_6,
+               .base = 0x3A000, .len = 0x280,
+               .features = INTF_SC7280_MASK,
+               .type = INTF_DP,
+               .controller_id = MSM_DP_CONTROLLER_2,
+               .prog_fetch_lines_worst_case = 24,
+               .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
+               .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+       }, {
+               .name = "intf_7", .id = INTF_7,
+               .base = 0x3b000, .len = 0x280,
+               .features = INTF_SC7280_MASK,
+               .type = INTF_NONE,
+               .controller_id = MSM_DP_CONTROLLER_2,   /* pair with intf_6 for DP MST */
+               .prog_fetch_lines_worst_case = 24,
+               .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 18),
+               .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 19),
+       }, {
+               .name = "intf_8", .id = INTF_8,
+               .base = 0x3c000, .len = 0x280,
+               .features = INTF_SC7280_MASK,
+               .type = INTF_NONE,
+               .controller_id = MSM_DP_CONTROLLER_1,   /* pair with intf_4 for DP MST */
+               .prog_fetch_lines_worst_case = 24,
+               .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12),
+               .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13),
        },
 };
 
index ef871239adb2a37e11c6d364d85f7384403459ee..68fae048a9a837410eb6051f9af52a6e0c399585 100644 (file)
@@ -459,15 +459,15 @@ int dpu_core_perf_debugfs_init(struct dpu_kms *dpu_kms, struct dentry *parent)
                        &perf->core_clk_rate);
        debugfs_create_u32("enable_bw_release", 0600, entry,
                        (u32 *)&perf->enable_bw_release);
-       debugfs_create_u32("threshold_low", 0600, entry,
+       debugfs_create_u32("threshold_low", 0400, entry,
                        (u32 *)&perf->perf_cfg->max_bw_low);
-       debugfs_create_u32("threshold_high", 0600, entry,
+       debugfs_create_u32("threshold_high", 0400, entry,
                        (u32 *)&perf->perf_cfg->max_bw_high);
-       debugfs_create_u32("min_core_ib", 0600, entry,
+       debugfs_create_u32("min_core_ib", 0400, entry,
                        (u32 *)&perf->perf_cfg->min_core_ib);
-       debugfs_create_u32("min_llcc_ib", 0600, entry,
+       debugfs_create_u32("min_llcc_ib", 0400, entry,
                        (u32 *)&perf->perf_cfg->min_llcc_ib);
-       debugfs_create_u32("min_dram_ib", 0600, entry,
+       debugfs_create_u32("min_dram_ib", 0400, entry,
                        (u32 *)&perf->perf_cfg->min_dram_ib);
        debugfs_create_file("perf_mode", 0600, entry,
                        (u32 *)perf, &dpu_core_perf_mode_fops);
index 946dd0135dffcf7dcd2b7f6445c62c048a044e8d..6a0a74832fb64d95adc6b0524ba15bd1faaa0bb1 100644 (file)
@@ -525,14 +525,14 @@ int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms,
        int ret;
 
        if (!irq_cb) {
-               DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
-                         DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
+               DPU_ERROR("IRQ=[%d, %d] NULL callback\n",
+                         DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
                return -EINVAL;
        }
 
        if (!dpu_core_irq_is_valid(irq_idx)) {
-               DPU_ERROR("invalid IRQ=[%d, %d]\n",
-                         DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx));
+               DPU_ERROR("invalid IRQ=[%d, %d] irq_cb:%ps\n",
+                         DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx), irq_cb);
                return -EINVAL;
        }
 
index c4cb82af5c2f2f77ae7c9804f4fd6a12c42d42c0..ffbfde9225898619c11b6fd3d59062ed1a65b719 100644 (file)
@@ -484,7 +484,7 @@ static void dp_display_handle_video_request(struct dp_display_private *dp)
        }
 }
 
-static int dp_display_handle_port_ststus_changed(struct dp_display_private *dp)
+static int dp_display_handle_port_status_changed(struct dp_display_private *dp)
 {
        int rc = 0;
 
@@ -541,7 +541,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
                drm_dbg_dp(dp->drm_dev, "hpd_state=%d sink_request=%d\n",
                                        dp->hpd_state, sink_request);
                if (sink_request & DS_PORT_STATUS_CHANGED)
-                       rc = dp_display_handle_port_ststus_changed(dp);
+                       rc = dp_display_handle_port_status_changed(dp);
                else
                        rc = dp_display_handle_irq_hpd(dp);
        }
@@ -588,6 +588,7 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
        ret = dp_display_usbpd_configure_cb(&pdev->dev);
        if (ret) {      /* link train failed */
                dp->hpd_state = ST_DISCONNECTED;
+               pm_runtime_put_sync(&pdev->dev);
        } else {
                dp->hpd_state = ST_MAINLINK_READY;
        }
@@ -645,6 +646,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
                dp_display_host_phy_exit(dp);
                dp->hpd_state = ST_DISCONNECTED;
                dp_display_notify_disconnect(&dp->dp_display.pdev->dev);
+               pm_runtime_put_sync(&pdev->dev);
                mutex_unlock(&dp->event_mutex);
                return 0;
        }
index e3f61c39df69b4c31ffae28ea7f2ecab500f8863..80166f702a0dbab3a36a489c3c853e35533b4fe2 100644 (file)
@@ -89,7 +89,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb,
 
        for (i = 0; i < n; i++) {
                ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
-               drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)",
+               drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)\n",
                              fb->base.id, i, msm_fb->iova[i], ret);
                if (ret)
                        return ret;
@@ -176,7 +176,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
        const struct msm_format *format;
        int ret, i, n;
 
-       drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)",
+       drm_dbg_state(dev, "create framebuffer: mode_cmd=%p (%dx%d@%4.4s)\n",
                        mode_cmd, mode_cmd->width, mode_cmd->height,
                        (char *)&mode_cmd->pixel_format);
 
@@ -232,7 +232,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 
        refcount_set(&msm_fb->dirtyfb, 1);
 
-       drm_dbg_state(dev, "create: FB ID: %d (%p)", fb->base.id, fb);
+       drm_dbg_state(dev, "create: FB ID: %d (%p)\n", fb->base.id, fb);
 
        return fb;
 
index 84c21ec2ceeae08d8506688f73acf530ef40012b..af6a6fcb11736f6dc7637805647b9c717e684a09 100644 (file)
@@ -149,7 +149,7 @@ int msm_crtc_enable_vblank(struct drm_crtc *crtc)
        struct msm_kms *kms = priv->kms;
        if (!kms)
                return -ENXIO;
-       drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
+       drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
        return vblank_ctrl_queue_work(priv, crtc, true);
 }
 
@@ -160,7 +160,7 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc)
        struct msm_kms *kms = priv->kms;
        if (!kms)
                return;
-       drm_dbg_vbl(dev, "crtc=%u", crtc->base.id);
+       drm_dbg_vbl(dev, "crtc=%u\n", crtc->base.id);
        vblank_ctrl_queue_work(priv, crtc, false);
 }
 
index 4bf486b57101367708bba2b6fe4bdd1d985f1d19..cb05f7f48a98bb53fc3e03b57166466c675acd7c 100644 (file)
@@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
        return ERR_PTR(-EINVAL);
 }
 
+static void of_fini(void *p)
+{
+       kfree(p);
+}
+
 const struct nvbios_source
 nvbios_of = {
        .name = "OpenFirmware",
        .init = of_init,
-       .fini = (void(*)(void *))kfree,
+       .fini = of_fini,
        .read = of_read,
        .size = of_size,
        .rw = false,
index 9994cbd6f1c40c0c798498687f4f5d7168e883c5..9858c1438aa7feda7d84ff5442f611b23f101b2d 100644 (file)
@@ -1112,7 +1112,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
        rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
 
        str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
-       strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES];
+       strings = (char *)rpc + str_offset;
        for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) {
                int name_len = strlen(r535_registry_entries[i].name) + 1;
 
index f38385fe76bbb45d92bf75cf078faec1f8be52ff..b91019cd5acb191a560b7217ff792cf4222004fa 100644 (file)
@@ -502,11 +502,18 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
        mapping_set_unevictable(mapping);
 
        for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
+               /* Can happen if the last fault only partially filled this
+                * section of the pages array before failing. In that case
+                * we skip already filled pages.
+                */
+               if (pages[i])
+                       continue;
+
                pages[i] = shmem_read_mapping_page(mapping, i);
                if (IS_ERR(pages[i])) {
                        ret = PTR_ERR(pages[i]);
                        pages[i] = NULL;
-                       goto err_pages;
+                       goto err_unlock;
                }
        }
 
@@ -514,7 +521,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
        ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
                                        NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
        if (ret)
-               goto err_pages;
+               goto err_unlock;
 
        ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
        if (ret)
@@ -537,8 +544,6 @@ out:
 
 err_map:
        sg_free_table(sgt);
-err_pages:
-       drm_gem_shmem_put_pages(&bo->base);
 err_unlock:
        dma_resv_unlock(obj->resv);
 err_bo:
index 368d26da0d6a233467cdc8ef5820ebf4b7ddb964..9febc8b73f09efaaaac9d6fb8d2776f2148aed89 100644 (file)
@@ -58,16 +58,56 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr,
                           signed long timeout)
 {
        struct qxl_device *qdev;
+       struct qxl_release *release;
+       int count = 0, sc = 0;
+       bool have_drawable_releases;
        unsigned long cur, end = jiffies + timeout;
 
        qdev = container_of(fence->lock, struct qxl_device, release_lock);
+       release = container_of(fence, struct qxl_release, base);
+       have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
 
-       if (!wait_event_timeout(qdev->release_event,
-                               (dma_fence_is_signaled(fence) ||
-                                (qxl_io_notify_oom(qdev), 0)),
-                               timeout))
-               return 0;
+retry:
+       sc++;
+
+       if (dma_fence_is_signaled(fence))
+               goto signaled;
+
+       qxl_io_notify_oom(qdev);
+
+       for (count = 0; count < 11; count++) {
+               if (!qxl_queue_garbage_collect(qdev, true))
+                       break;
+
+               if (dma_fence_is_signaled(fence))
+                       goto signaled;
+       }
+
+       if (dma_fence_is_signaled(fence))
+               goto signaled;
+
+       if (have_drawable_releases || sc < 4) {
+               if (sc > 2)
+                       /* back off */
+                       usleep_range(500, 1000);
+
+               if (time_after(jiffies, end))
+                       return 0;
+
+               if (have_drawable_releases && sc > 300) {
+                       DMA_FENCE_WARN(fence,
+                                      "failed to wait on release %llu after spincount %d\n",
+                                      fence->context & ~0xf0000000, sc);
+                       goto signaled;
+               }
+               goto retry;
+       }
+       /*
+        * yeah, original sync_obj_wait gave up after 3 spins when
+        * have_drawable_releases is not set.
+        */
 
+signaled:
        cur = jiffies;
        if (time_after(cur, end))
                return 0;
index c7d90f96d16a67beddf9395cf1ad611cb6f1cf34..0a304706e01322a6372727a27eb3fd0330471b31 100644 (file)
@@ -666,11 +666,12 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
                [vmw_dma_map_populate] = "Caching DMA mappings.",
                [vmw_dma_map_bind] = "Giving up DMA mappings early."};
 
-       /* TTM currently doesn't fully support SEV encryption. */
-       if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
-               return -EINVAL;
-
-       if (vmw_force_coherent)
+       /*
+        * When running with SEV we always want dma mappings, because
+        * otherwise ttm tt pool pages will bounce through swiotlb running
+        * out of available space.
+        */
+       if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
                dev_priv->map_mode = vmw_dma_alloc_coherent;
        else if (vmw_restrict_iommu)
                dev_priv->map_mode = vmw_dma_map_bind;
index e4db069f0db3f1fd27ed80eb84fc4544ea0831df..6ec375c1c4b6c05aed07ba8432214b3de270c56e 100644 (file)
@@ -108,11 +108,6 @@ int xe_display_create(struct xe_device *xe)
        xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
 
        drmm_mutex_init(&xe->drm, &xe->sb_lock);
-       drmm_mutex_init(&xe->drm, &xe->display.backlight.lock);
-       drmm_mutex_init(&xe->drm, &xe->display.audio.mutex);
-       drmm_mutex_init(&xe->drm, &xe->display.wm.wm_mutex);
-       drmm_mutex_init(&xe->drm, &xe->display.pps.mutex);
-       drmm_mutex_init(&xe->drm, &xe->display.hdcp.hdcp_mutex);
        xe->enabled_irq_mask = ~0;
 
        err = drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
index 0b1266c88a6af39cba103e3447697c0540c0cc0d..deddc8be48c0af2133969c7452d12cd2e104f291 100644 (file)
 #define RING_EXECLIST_STATUS_LO(base)          XE_REG((base) + 0x234)
 #define RING_EXECLIST_STATUS_HI(base)          XE_REG((base) + 0x234 + 4)
 
-#define RING_CONTEXT_CONTROL(base)             XE_REG((base) + 0x244)
+#define RING_CONTEXT_CONTROL(base)             XE_REG((base) + 0x244, XE_REG_OPTION_MASKED)
 #define          CTX_CTRL_INHIBIT_SYN_CTX_SWITCH       REG_BIT(3)
 #define          CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT   REG_BIT(0)
 
index b82233a4160624d2d3dad941327bf7ecff5a3382..9ac7fbe201b3c22fa25959f98af87d453a962a17 100644 (file)
@@ -290,7 +290,7 @@ xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *a
         * As y can be < 2, we compute tau4 = (4 | x) << y
         * and then add 2 when doing the final right shift to account for units
         */
-       tau4 = ((1 << x_w) | x) << y;
+       tau4 = (u64)((1 << x_w) | x) << y;
 
        /* val in hwmon interface units (millisec) */
        out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
@@ -330,7 +330,7 @@ xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *
        r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
        x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
        y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
-       tau4 = ((1 << x_w) | x) << y;
+       tau4 = (u64)((1 << x_w) | x) << y;
        max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
 
        if (val > max_win)
index 1426febe86eb676305772d7ee444b70af8254848..57066faf575eec7edebf335da434b6c1615d935f 100644 (file)
@@ -525,9 +525,8 @@ static const u8 *reg_offsets(struct xe_device *xe, enum xe_engine_class class)
 
 static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
 {
-       regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH) |
-                                   _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
-                                   CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
+       regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
+                                                      CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
 
        /* TODO: Timestamp */
 }
index ee1bb938c493487415445cd41c8b771080464522..2ba4fb9511f63fa894796dec90c89963a3dae1b0 100644 (file)
@@ -227,7 +227,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
                if (vm->flags & XE_VM_FLAG_64K && level == 1)
                        flags = XE_PDE_64K;
 
-               entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (level - 1) *
+               entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
                                                  XE_PAGE_SIZE, pat_index);
                xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
                          entry | flags);
@@ -235,7 +235,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
 
        /* Write PDE's that point to our BO. */
        for (i = 0; i < num_entries - num_level; i++) {
-               entry = vm->pt_ops->pde_encode_bo(bo, i * XE_PAGE_SIZE,
+               entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
                                                  pat_index);
 
                xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
@@ -291,7 +291,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
 #define VM_SA_UPDATE_UNIT_SIZE         (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
 #define NUM_VMUSA_WRITES_PER_UNIT      (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
        drm_suballoc_manager_init(&m->vm_update_sa,
-                                 (map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
+                                 (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
                                  NUM_VMUSA_UNIT_PER_PAGE, 0);
 
        m->pt_bo = bo;
@@ -490,7 +490,7 @@ static void emit_pte(struct xe_migrate *m,
        struct xe_vm *vm = m->q->vm;
        u16 pat_index;
        u32 ptes;
-       u64 ofs = at_pt * XE_PAGE_SIZE;
+       u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
        u64 cur_ofs;
 
        /* Indirect access needs compression enabled uncached PAT index */
index 783975d1384fc4d8e780cb2cdf450b5bab8b55de..7c52757a89db9abde6fb211178b9cedb4b1c7740 100644 (file)
@@ -351,11 +351,6 @@ static int host1x_device_uevent(const struct device *dev,
        return 0;
 }
 
-static int host1x_dma_configure(struct device *dev)
-{
-       return of_dma_configure(dev, dev->of_node, true);
-}
-
 static const struct dev_pm_ops host1x_device_pm_ops = {
        .suspend = pm_generic_suspend,
        .resume = pm_generic_resume,
@@ -369,7 +364,6 @@ const struct bus_type host1x_bus_type = {
        .name = "host1x",
        .match = host1x_device_match,
        .uevent = host1x_device_uevent,
-       .dma_configure = host1x_dma_configure,
        .pm = &host1x_device_pm_ops,
 };
 
@@ -458,8 +452,6 @@ static int host1x_device_add(struct host1x *host1x,
        device->dev.bus = &host1x_bus_type;
        device->dev.parent = host1x->dev;
 
-       of_dma_configure(&device->dev, host1x->dev->of_node, true);
-
        device->dev.dma_parms = &device->dma_parms;
        dma_set_max_seg_size(&device->dev, UINT_MAX);
 
index adbf674355b2b8a472c03bd60092960cb0c742cf..fb8cd8469328ee094619c91eb227a04e24bf66cb 100644 (file)
@@ -153,7 +153,9 @@ void vmbus_free_ring(struct vmbus_channel *channel)
        hv_ringbuffer_cleanup(&channel->inbound);
 
        if (channel->ringbuffer_page) {
-               __free_pages(channel->ringbuffer_page,
+               /* In a CoCo VM leak the memory if it didn't get re-encrypted */
+               if (!channel->ringbuffer_gpadlhandle.decrypted)
+                       __free_pages(channel->ringbuffer_page,
                             get_order(channel->ringbuffer_pagecount
                                       << PAGE_SHIFT));
                channel->ringbuffer_page = NULL;
@@ -436,9 +438,18 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
                (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
 
        ret = create_gpadl_header(type, kbuffer, size, send_offset, &msginfo);
-       if (ret)
+       if (ret) {
+               gpadl->decrypted = false;
                return ret;
+       }
 
+       /*
+        * Set the "decrypted" flag to true for the set_memory_decrypted()
+        * success case. In the failure case, the encryption state of the
+        * memory is unknown. Leave "decrypted" as true to ensure the
+        * memory will be leaked instead of going back on the free list.
+        */
+       gpadl->decrypted = true;
        ret = set_memory_decrypted((unsigned long)kbuffer,
                                   PFN_UP(size));
        if (ret) {
@@ -527,9 +538,15 @@ cleanup:
 
        kfree(msginfo);
 
-       if (ret)
-               set_memory_encrypted((unsigned long)kbuffer,
-                                    PFN_UP(size));
+       if (ret) {
+               /*
+                * If set_memory_encrypted() fails, the decrypted flag is
+                * left as true so the memory is leaked instead of being
+                * put back on the free list.
+                */
+               if (!set_memory_encrypted((unsigned long)kbuffer, PFN_UP(size)))
+                       gpadl->decrypted = false;
+       }
 
        return ret;
 }
@@ -850,6 +867,8 @@ post_msg_err:
        if (ret)
                pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
 
+       gpadl->decrypted = ret;
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
index 3cabeeabb1cacf0627b02110d6f4fc17abc7e4a0..f001ae880e1dbefc6243e6d902e529db43291987 100644 (file)
@@ -237,8 +237,17 @@ int vmbus_connect(void)
                                vmbus_connection.monitor_pages[0], 1);
        ret |= set_memory_decrypted((unsigned long)
                                vmbus_connection.monitor_pages[1], 1);
-       if (ret)
+       if (ret) {
+               /*
+                * If set_memory_decrypted() fails, the encryption state
+                * of the memory is unknown. So leak the memory instead
+                * of risking returning decrypted memory to the free list.
+                * For simplicity, always handle both pages the same.
+                */
+               vmbus_connection.monitor_pages[0] = NULL;
+               vmbus_connection.monitor_pages[1] = NULL;
                goto cleanup;
+       }
 
        /*
         * Set_memory_decrypted() will change the memory contents if
@@ -337,13 +346,19 @@ void vmbus_disconnect(void)
                vmbus_connection.int_page = NULL;
        }
 
-       set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[0], 1);
-       set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[1], 1);
+       if (vmbus_connection.monitor_pages[0]) {
+               if (!set_memory_encrypted(
+                       (unsigned long)vmbus_connection.monitor_pages[0], 1))
+                       hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
+               vmbus_connection.monitor_pages[0] = NULL;
+       }
 
-       hv_free_hyperv_page(vmbus_connection.monitor_pages[0]);
-       hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
-       vmbus_connection.monitor_pages[0] = NULL;
-       vmbus_connection.monitor_pages[1] = NULL;
+       if (vmbus_connection.monitor_pages[1]) {
+               if (!set_memory_encrypted(
+                       (unsigned long)vmbus_connection.monitor_pages[1], 1))
+                       hv_free_hyperv_page(vmbus_connection.monitor_pages[1]);
+               vmbus_connection.monitor_pages[1] = NULL;
+       }
 }
 
 /*
index 4cb17603a8289b259e64dc6a5be215cb1e1a8a57..12a707ab73f85cf363e6503346741a85bc9b82df 100644 (file)
@@ -131,7 +131,7 @@ static ssize_t id_show(struct device *dev, struct device_attribute *dev_attr,
 
        if (!hv_dev->channel)
                return -ENODEV;
-       return sprintf(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
+       return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.child_relid);
 }
 static DEVICE_ATTR_RO(id);
 
@@ -142,7 +142,7 @@ static ssize_t state_show(struct device *dev, struct device_attribute *dev_attr,
 
        if (!hv_dev->channel)
                return -ENODEV;
-       return sprintf(buf, "%d\n", hv_dev->channel->state);
+       return sysfs_emit(buf, "%d\n", hv_dev->channel->state);
 }
 static DEVICE_ATTR_RO(state);
 
@@ -153,7 +153,7 @@ static ssize_t monitor_id_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
-       return sprintf(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
+       return sysfs_emit(buf, "%d\n", hv_dev->channel->offermsg.monitorid);
 }
 static DEVICE_ATTR_RO(monitor_id);
 
@@ -164,8 +164,8 @@ static ssize_t class_id_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
-       return sprintf(buf, "{%pUl}\n",
-                      &hv_dev->channel->offermsg.offer.if_type);
+       return sysfs_emit(buf, "{%pUl}\n",
+                         &hv_dev->channel->offermsg.offer.if_type);
 }
 static DEVICE_ATTR_RO(class_id);
 
@@ -176,8 +176,8 @@ static ssize_t device_id_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
-       return sprintf(buf, "{%pUl}\n",
-                      &hv_dev->channel->offermsg.offer.if_instance);
+       return sysfs_emit(buf, "{%pUl}\n",
+                         &hv_dev->channel->offermsg.offer.if_instance);
 }
 static DEVICE_ATTR_RO(device_id);
 
@@ -186,7 +186,7 @@ static ssize_t modalias_show(struct device *dev,
 {
        struct hv_device *hv_dev = device_to_hv_device(dev);
 
-       return sprintf(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
+       return sysfs_emit(buf, "vmbus:%*phN\n", UUID_SIZE, &hv_dev->dev_type);
 }
 static DEVICE_ATTR_RO(modalias);
 
@@ -199,7 +199,7 @@ static ssize_t numa_node_show(struct device *dev,
        if (!hv_dev->channel)
                return -ENODEV;
 
-       return sprintf(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
+       return sysfs_emit(buf, "%d\n", cpu_to_node(hv_dev->channel->target_cpu));
 }
 static DEVICE_ATTR_RO(numa_node);
 #endif
@@ -212,9 +212,8 @@ static ssize_t server_monitor_pending_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
-       return sprintf(buf, "%d\n",
-                      channel_pending(hv_dev->channel,
-                                      vmbus_connection.monitor_pages[0]));
+       return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
+                         vmbus_connection.monitor_pages[0]));
 }
 static DEVICE_ATTR_RO(server_monitor_pending);
 
@@ -226,9 +225,8 @@ static ssize_t client_monitor_pending_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
-       return sprintf(buf, "%d\n",
-                      channel_pending(hv_dev->channel,
-                                      vmbus_connection.monitor_pages[1]));
+       return sysfs_emit(buf, "%d\n", channel_pending(hv_dev->channel,
+                         vmbus_connection.monitor_pages[1]));
 }
 static DEVICE_ATTR_RO(client_monitor_pending);
 
@@ -240,9 +238,8 @@ static ssize_t server_monitor_latency_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
-       return sprintf(buf, "%d\n",
-                      channel_latency(hv_dev->channel,
-                                      vmbus_connection.monitor_pages[0]));
+       return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
+                         vmbus_connection.monitor_pages[0]));
 }
 static DEVICE_ATTR_RO(server_monitor_latency);
 
@@ -254,9 +251,8 @@ static ssize_t client_monitor_latency_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
-       return sprintf(buf, "%d\n",
-                      channel_latency(hv_dev->channel,
-                                      vmbus_connection.monitor_pages[1]));
+       return sysfs_emit(buf, "%d\n", channel_latency(hv_dev->channel,
+                         vmbus_connection.monitor_pages[1]));
 }
 static DEVICE_ATTR_RO(client_monitor_latency);
 
@@ -268,9 +264,8 @@ static ssize_t server_monitor_conn_id_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
-       return sprintf(buf, "%d\n",
-                      channel_conn_id(hv_dev->channel,
-                                      vmbus_connection.monitor_pages[0]));
+       return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
+                         vmbus_connection.monitor_pages[0]));
 }
 static DEVICE_ATTR_RO(server_monitor_conn_id);
 
@@ -282,9 +277,8 @@ static ssize_t client_monitor_conn_id_show(struct device *dev,
 
        if (!hv_dev->channel)
                return -ENODEV;
-       return sprintf(buf, "%d\n",
-                      channel_conn_id(hv_dev->channel,
-                                      vmbus_connection.monitor_pages[1]));
+       return sysfs_emit(buf, "%d\n", channel_conn_id(hv_dev->channel,
+                         vmbus_connection.monitor_pages[1]));
 }
 static DEVICE_ATTR_RO(client_monitor_conn_id);
 
@@ -303,7 +297,7 @@ static ssize_t out_intr_mask_show(struct device *dev,
        if (ret < 0)
                return ret;
 
-       return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
+       return sysfs_emit(buf, "%d\n", outbound.current_interrupt_mask);
 }
 static DEVICE_ATTR_RO(out_intr_mask);
 
@@ -321,7 +315,7 @@ static ssize_t out_read_index_show(struct device *dev,
                                          &outbound);
        if (ret < 0)
                return ret;
-       return sprintf(buf, "%d\n", outbound.current_read_index);
+       return sysfs_emit(buf, "%d\n", outbound.current_read_index);
 }
 static DEVICE_ATTR_RO(out_read_index);
 
@@ -340,7 +334,7 @@ static ssize_t out_write_index_show(struct device *dev,
                                          &outbound);
        if (ret < 0)
                return ret;
-       return sprintf(buf, "%d\n", outbound.current_write_index);
+       return sysfs_emit(buf, "%d\n", outbound.current_write_index);
 }
 static DEVICE_ATTR_RO(out_write_index);
 
@@ -359,7 +353,7 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
                                          &outbound);
        if (ret < 0)
                return ret;
-       return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
+       return sysfs_emit(buf, "%d\n", outbound.bytes_avail_toread);
 }
 static DEVICE_ATTR_RO(out_read_bytes_avail);
 
@@ -378,7 +372,7 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
                                          &outbound);
        if (ret < 0)
                return ret;
-       return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
+       return sysfs_emit(buf, "%d\n", outbound.bytes_avail_towrite);
 }
 static DEVICE_ATTR_RO(out_write_bytes_avail);
 
@@ -396,7 +390,7 @@ static ssize_t in_intr_mask_show(struct device *dev,
        if (ret < 0)
                return ret;
 
-       return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
+       return sysfs_emit(buf, "%d\n", inbound.current_interrupt_mask);
 }
 static DEVICE_ATTR_RO(in_intr_mask);
 
@@ -414,7 +408,7 @@ static ssize_t in_read_index_show(struct device *dev,
        if (ret < 0)
                return ret;
 
-       return sprintf(buf, "%d\n", inbound.current_read_index);
+       return sysfs_emit(buf, "%d\n", inbound.current_read_index);
 }
 static DEVICE_ATTR_RO(in_read_index);
 
@@ -432,7 +426,7 @@ static ssize_t in_write_index_show(struct device *dev,
        if (ret < 0)
                return ret;
 
-       return sprintf(buf, "%d\n", inbound.current_write_index);
+       return sysfs_emit(buf, "%d\n", inbound.current_write_index);
 }
 static DEVICE_ATTR_RO(in_write_index);
 
@@ -451,7 +445,7 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
        if (ret < 0)
                return ret;
 
-       return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
+       return sysfs_emit(buf, "%d\n", inbound.bytes_avail_toread);
 }
 static DEVICE_ATTR_RO(in_read_bytes_avail);
 
@@ -470,7 +464,7 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
        if (ret < 0)
                return ret;
 
-       return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
+       return sysfs_emit(buf, "%d\n", inbound.bytes_avail_towrite);
 }
 static DEVICE_ATTR_RO(in_write_bytes_avail);
 
@@ -480,7 +474,7 @@ static ssize_t channel_vp_mapping_show(struct device *dev,
 {
        struct hv_device *hv_dev = device_to_hv_device(dev);
        struct vmbus_channel *channel = hv_dev->channel, *cur_sc;
-       int buf_size = PAGE_SIZE, n_written, tot_written;
+       int n_written;
        struct list_head *cur;
 
        if (!channel)
@@ -488,25 +482,21 @@ static ssize_t channel_vp_mapping_show(struct device *dev,
 
        mutex_lock(&vmbus_connection.channel_mutex);
 
-       tot_written = snprintf(buf, buf_size, "%u:%u\n",
-               channel->offermsg.child_relid, channel->target_cpu);
+       n_written = sysfs_emit(buf, "%u:%u\n",
+                              channel->offermsg.child_relid,
+                              channel->target_cpu);
 
        list_for_each(cur, &channel->sc_list) {
-               if (tot_written >= buf_size - 1)
-                       break;
 
                cur_sc = list_entry(cur, struct vmbus_channel, sc_list);
-               n_written = scnprintf(buf + tot_written,
-                                    buf_size - tot_written,
-                                    "%u:%u\n",
-                                    cur_sc->offermsg.child_relid,
-                                    cur_sc->target_cpu);
-               tot_written += n_written;
+               n_written += sysfs_emit_at(buf, n_written, "%u:%u\n",
+                                         cur_sc->offermsg.child_relid,
+                                         cur_sc->target_cpu);
        }
 
        mutex_unlock(&vmbus_connection.channel_mutex);
 
-       return tot_written;
+       return n_written;
 }
 static DEVICE_ATTR_RO(channel_vp_mapping);
 
@@ -516,7 +506,7 @@ static ssize_t vendor_show(struct device *dev,
 {
        struct hv_device *hv_dev = device_to_hv_device(dev);
 
-       return sprintf(buf, "0x%x\n", hv_dev->vendor_id);
+       return sysfs_emit(buf, "0x%x\n", hv_dev->vendor_id);
 }
 static DEVICE_ATTR_RO(vendor);
 
@@ -526,7 +516,7 @@ static ssize_t device_show(struct device *dev,
 {
        struct hv_device *hv_dev = device_to_hv_device(dev);
 
-       return sprintf(buf, "0x%x\n", hv_dev->device_id);
+       return sysfs_emit(buf, "0x%x\n", hv_dev->device_id);
 }
 static DEVICE_ATTR_RO(device);
 
@@ -551,7 +541,7 @@ static ssize_t driver_override_show(struct device *dev,
        ssize_t len;
 
        device_lock(dev);
-       len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
+       len = sysfs_emit(buf, "%s\n", hv_dev->driver_override);
        device_unlock(dev);
 
        return len;
index 33228c1c8980f32a5e8af323587601a4783b5b7f..ac6754a85f3507ee88bd3847359a53292a14dc9f 100644 (file)
@@ -3232,28 +3232,29 @@ static void iommu_snp_enable(void)
                return;
        /*
         * The SNP support requires that IOMMU must be enabled, and is
-        * not configured in the passthrough mode.
+        * configured with V1 page table (DTE[Mode] = 0 is not supported).
         */
        if (no_iommu || iommu_default_passthrough()) {
-               pr_err("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
-               cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
-               return;
+               pr_warn("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
+               goto disable_snp;
+       }
+
+       if (amd_iommu_pgtable != AMD_IOMMU_V1) {
+               pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n");
+               goto disable_snp;
        }
 
        amd_iommu_snp_en = check_feature(FEATURE_SNP);
        if (!amd_iommu_snp_en) {
-               pr_err("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
-               cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
-               return;
+               pr_warn("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
+               goto disable_snp;
        }
 
        pr_info("IOMMU SNP support enabled.\n");
+       return;
 
-       /* Enforce IOMMU v1 pagetable when SNP is enabled. */
-       if (amd_iommu_pgtable != AMD_IOMMU_V1) {
-               pr_warn("Forcing use of AMD IOMMU v1 page table due to SNP.\n");
-               amd_iommu_pgtable = AMD_IOMMU_V1;
-       }
+disable_snp:
+       cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
 #endif
 }
 
index d35c1b8c8e65ce5a9c6f6ae3aae555d91eb4d3d0..e692217fcb28011478139d7dc146d74dcd9456e8 100644 (file)
@@ -1692,26 +1692,29 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
 
 static u16 domain_id_alloc(void)
 {
+       unsigned long flags;
        int id;
 
-       spin_lock(&pd_bitmap_lock);
+       spin_lock_irqsave(&pd_bitmap_lock, flags);
        id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
        BUG_ON(id == 0);
        if (id > 0 && id < MAX_DOMAIN_ID)
                __set_bit(id, amd_iommu_pd_alloc_bitmap);
        else
                id = 0;
-       spin_unlock(&pd_bitmap_lock);
+       spin_unlock_irqrestore(&pd_bitmap_lock, flags);
 
        return id;
 }
 
 static void domain_id_free(int id)
 {
-       spin_lock(&pd_bitmap_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&pd_bitmap_lock, flags);
        if (id > 0 && id < MAX_DOMAIN_ID)
                __clear_bit(id, amd_iommu_pd_alloc_bitmap);
-       spin_unlock(&pd_bitmap_lock);
+       spin_unlock_irqrestore(&pd_bitmap_lock, flags);
 }
 
 static void free_gcr3_tbl_level1(u64 *tbl)
index 50eb9aed47cc585e1307b3d0f47252b2edcdaeb0..a7ecd90303dc42f9fbe120e75f2053b1390c5445 100644 (file)
@@ -4299,9 +4299,11 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
        }
 
        dev_iommu_priv_set(dev, info);
-       ret = device_rbtree_insert(iommu, info);
-       if (ret)
-               goto free;
+       if (pdev && pci_ats_supported(pdev)) {
+               ret = device_rbtree_insert(iommu, info);
+               if (ret)
+                       goto free;
+       }
 
        if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
                ret = intel_pasid_alloc_table(dev);
@@ -4336,7 +4338,8 @@ static void intel_iommu_release_device(struct device *dev)
        struct intel_iommu *iommu = info->iommu;
 
        mutex_lock(&iommu->iopf_lock);
-       device_rbtree_remove(info);
+       if (dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)))
+               device_rbtree_remove(info);
        mutex_unlock(&iommu->iopf_lock);
 
        if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
index cf43e798eca49936e79a20ea5397a6b0e9f1cc82..44083d01852dbf997f8cc4001f3b278ea5d7fa07 100644 (file)
@@ -438,7 +438,7 @@ static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu,
        iommu_pmu_set_filter(domain, event->attr.config1,
                             IOMMU_PMU_FILTER_DOMAIN, idx,
                             event->attr.config1);
-       iommu_pmu_set_filter(pasid, event->attr.config1,
+       iommu_pmu_set_filter(pasid, event->attr.config2,
                             IOMMU_PMU_FILTER_PASID, idx,
                             event->attr.config1);
        iommu_pmu_set_filter(ats, event->attr.config2,
index c1bed89b102614adf6f71070080aa513729f4409..ee3b469e2da1551889ba0e200f386e010bc6f68f 100644 (file)
@@ -66,7 +66,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
        struct page *pages;
        int irq, ret;
 
-       pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
+       pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
        if (!pages) {
                pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
                        iommu->name);
index b8c47f18bc2612407cf58bb80bc041e27967d139..6a2707fe7a78c09d04f84a78d0b498d7a960d73d 100644 (file)
@@ -1790,6 +1790,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
        { .compatible = "mediatek,mt8365-m4u", .data = &mt8365_data},
        {}
 };
+MODULE_DEVICE_TABLE(of, mtk_iommu_of_ids);
 
 static struct platform_driver mtk_iommu_driver = {
        .probe  = mtk_iommu_probe,
index a9fa2a54dc9b39a981ccc4e66f72eff5329de49e..d6e4002200bd33d6219ed09f1c90ccac0e3404e4 100644 (file)
@@ -600,6 +600,7 @@ static const struct of_device_id mtk_iommu_v1_of_ids[] = {
        { .compatible = "mediatek,mt2701-m4u", },
        {}
 };
+MODULE_DEVICE_TABLE(of, mtk_iommu_v1_of_ids);
 
 static const struct component_master_ops mtk_iommu_v1_com_ops = {
        .bind           = mtk_iommu_v1_bind,
index fca888b36680df813c952d8d29e1cf74cd81e167..2a537cbfcb077246c0aee43a5b9f1885a3e0b5f2 100644 (file)
@@ -786,6 +786,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
                                           struct its_cmd_block *cmd,
                                           struct its_cmd_desc *desc)
 {
+       struct its_vpe *vpe = valid_vpe(its, desc->its_vmapp_cmd.vpe);
        unsigned long vpt_addr, vconf_addr;
        u64 target;
        bool alloc;
@@ -798,6 +799,11 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
                if (is_v4_1(its)) {
                        alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
                        its_encode_alloc(cmd, alloc);
+                       /*
+                        * Unmapping a VPE is self-synchronizing on GICv4.1,
+                        * no need to issue a VSYNC.
+                        */
+                       vpe = NULL;
                }
 
                goto out;
@@ -832,7 +838,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
 out:
        its_fixup_cmd(cmd);
 
-       return valid_vpe(its, desc->its_vmapp_cmd.vpe);
+       return vpe;
 }
 
 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
index be8ac24f50b6ad651fd107f9af9a448bb1f7780a..7b8a71ca66dde0f4f6f3c2728107cb48cfcaa706 100644 (file)
@@ -1558,7 +1558,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
                for (j = 0; j < i; j++)
                        if (r1_bio->bios[j])
                                rdev_dec_pending(conf->mirrors[j].rdev, mddev);
-               free_r1bio(r1_bio);
+               mempool_free(r1_bio, &conf->r1bio_pool);
                allow_barrier(conf, bio->bi_iter.bi_sector);
 
                if (bio->bi_opf & REQ_NOWAIT) {
index 088f8ed4fdc4640d706a98d317e79668a6942748..a8ee0df471482393214c379169b3c7a340282296 100644 (file)
@@ -1114,10 +1114,25 @@ static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,
 
        host = slot->host;
 
-       if (slot->vsd)
-               gpiod_set_value(slot->vsd, power_on);
-       if (slot->vio)
-               gpiod_set_value(slot->vio, power_on);
+       if (power_on) {
+               if (slot->vsd) {
+                       gpiod_set_value(slot->vsd, power_on);
+                       msleep(1);
+               }
+               if (slot->vio) {
+                       gpiod_set_value(slot->vio, power_on);
+                       msleep(1);
+               }
+       } else {
+               if (slot->vio) {
+                       gpiod_set_value(slot->vio, power_on);
+                       msleep(50);
+               }
+               if (slot->vsd) {
+                       gpiod_set_value(slot->vsd, power_on);
+                       msleep(50);
+               }
+       }
 
        if (slot->pdata->set_power != NULL)
                slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,
@@ -1254,18 +1269,18 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
        slot->pdata = &host->pdata->slots[id];
 
        /* Check for some optional GPIO controls */
-       slot->vsd = gpiod_get_index_optional(host->dev, "vsd",
-                                            id, GPIOD_OUT_LOW);
+       slot->vsd = devm_gpiod_get_index_optional(host->dev, "vsd",
+                                                 id, GPIOD_OUT_LOW);
        if (IS_ERR(slot->vsd))
                return dev_err_probe(host->dev, PTR_ERR(slot->vsd),
                                     "error looking up VSD GPIO\n");
-       slot->vio = gpiod_get_index_optional(host->dev, "vio",
-                                            id, GPIOD_OUT_LOW);
+       slot->vio = devm_gpiod_get_index_optional(host->dev, "vio",
+                                                 id, GPIOD_OUT_LOW);
        if (IS_ERR(slot->vio))
                return dev_err_probe(host->dev, PTR_ERR(slot->vio),
                                     "error looking up VIO GPIO\n");
-       slot->cover = gpiod_get_index_optional(host->dev, "cover",
-                                               id, GPIOD_IN);
+       slot->cover = devm_gpiod_get_index_optional(host->dev, "cover",
+                                                   id, GPIOD_IN);
        if (IS_ERR(slot->cover))
                return dev_err_probe(host->dev, PTR_ERR(slot->cover),
                                     "error looking up cover switch GPIO\n");
@@ -1379,13 +1394,6 @@ static int mmc_omap_probe(struct platform_device *pdev)
        if (IS_ERR(host->virt_base))
                return PTR_ERR(host->virt_base);
 
-       host->slot_switch = gpiod_get_optional(host->dev, "switch",
-                                              GPIOD_OUT_LOW);
-       if (IS_ERR(host->slot_switch))
-               return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
-                                    "error looking up slot switch GPIO\n");
-
-
        INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);
        INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work);
 
@@ -1404,6 +1412,12 @@ static int mmc_omap_probe(struct platform_device *pdev)
        host->dev = &pdev->dev;
        platform_set_drvdata(pdev, host);
 
+       host->slot_switch = devm_gpiod_get_optional(host->dev, "switch",
+                                                   GPIOD_OUT_LOW);
+       if (IS_ERR(host->slot_switch))
+               return dev_err_probe(host->dev, PTR_ERR(host->slot_switch),
+                                    "error looking up slot switch GPIO\n");
+
        host->id = pdev->id;
        host->irq = irq;
        host->phys_base = res->start;
index a6fcbda64ecc60e5beccf20f2043ab00870cbd5d..2b6ec979a62f2160a7187e024a4b0dc6bf9e08da 100644 (file)
@@ -154,8 +154,11 @@ static void free_netvsc_device(struct rcu_head *head)
        int i;
 
        kfree(nvdev->extension);
-       vfree(nvdev->recv_buf);
-       vfree(nvdev->send_buf);
+
+       if (!nvdev->recv_buf_gpadl_handle.decrypted)
+               vfree(nvdev->recv_buf);
+       if (!nvdev->send_buf_gpadl_handle.decrypted)
+               vfree(nvdev->send_buf);
        bitmap_free(nvdev->send_section_map);
 
        for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
index bf4833221816d492d4adca02d508d33a74879a92..eff7f5df08e27fb25909999a89c6742785038b75 100644 (file)
@@ -3765,14 +3765,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003e, quirk_no_bus_reset);
  */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
 
-/*
- * Apparently the LSI / Agere FW643 can't recover after a Secondary Bus
- * Reset and requires a power-off or suspend/resume and rescan.  Prevent
- * use of that reset.
- */
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATT, 0x5900, quirk_no_bus_reset);
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATT, 0x5901, quirk_no_bus_reset);
-
 /*
  * Some TI KeyStone C667X devices do not support bus/hot reset.  The PCIESS
  * automatically disables LTSSM when Secondary Bus Reset is received and
index b456370166b6bb2158ca0916e0eb9e106f9fd9d7..b4f49720c87f62aa6e8349af12797382f740c2b7 100644 (file)
@@ -208,6 +208,15 @@ static const struct dmi_system_id fwbug_list[] = {
                        DMI_MATCH(DMI_BIOS_VERSION, "03.03"),
                }
        },
+       {
+               .ident = "Framework Laptop 13 (Phoenix)",
+               .driver_data = &quirk_spurious_8042,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Framework"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Laptop 13 (AMD Ryzen 7040Series)"),
+                       DMI_MATCH(DMI_BIOS_VERSION, "03.05"),
+               }
+       },
        {}
 };
 
index 6b26e48ce8ad2a5f4de6e78751ffec8941610336..7d6079b02589cbacbb203bdc42cfac4e42dd601c 100644 (file)
@@ -7,4 +7,4 @@
 obj-$(CONFIG_AMD_PMF) += amd-pmf.o
 amd-pmf-objs := core.o acpi.o sps.o \
                auto-mode.o cnqf.o \
-               tee-if.o spc.o
+               tee-if.o spc.o pmf-quirks.o
index d0cf46e2fc8e8a073149c61c52b27e9cc9051da6..1157ec148880b54ec145a7ed9353a656e36f0b33 100644 (file)
@@ -343,7 +343,10 @@ static int apmf_if_verify_interface(struct amd_pmf_dev *pdev)
        if (err)
                return err;
 
-       pdev->supported_func = output.supported_functions;
+       /* only set if not already set by a quirk */
+       if (!pdev->supported_func)
+               pdev->supported_func = output.supported_functions;
+
        dev_dbg(pdev->dev, "supported functions:0x%x notifications:0x%x version:%u\n",
                output.supported_functions, output.notification_mask, output.version);
 
@@ -437,7 +440,7 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev)
 
        status = acpi_walk_resources(ahandle, METHOD_NAME__CRS, apmf_walk_resources, pmf_dev);
        if (ACPI_FAILURE(status)) {
-               dev_err(pmf_dev->dev, "acpi_walk_resources failed :%d\n", status);
+               dev_dbg(pmf_dev->dev, "acpi_walk_resources failed :%d\n", status);
                return -EINVAL;
        }
 
index 5d4f80698a8b8824bdb59b4e5632ca5f05982c48..64e6e34a2a9acd954f4ce9a916f77673193aba06 100644 (file)
@@ -445,6 +445,7 @@ static int amd_pmf_probe(struct platform_device *pdev)
        mutex_init(&dev->lock);
        mutex_init(&dev->update_mutex);
 
+       amd_pmf_quirks_init(dev);
        apmf_acpi_init(dev);
        platform_set_drvdata(pdev, dev);
        amd_pmf_dbgfs_register(dev);
diff --git a/drivers/platform/x86/amd/pmf/pmf-quirks.c b/drivers/platform/x86/amd/pmf/pmf-quirks.c
new file mode 100644 (file)
index 0000000..0b2eb0a
--- /dev/null
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * AMD Platform Management Framework Driver Quirks
+ *
+ * Copyright (c) 2024, Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Author: Mario Limonciello <mario.limonciello@amd.com>
+ */
+
+#include <linux/dmi.h>
+
+#include "pmf.h"
+
+struct quirk_entry {
+       u32 supported_func;
+};
+
+static struct quirk_entry quirk_no_sps_bug = {
+       .supported_func = 0x4003,
+};
+
+static const struct dmi_system_id fwbug_list[] = {
+       {
+               .ident = "ROG Zephyrus G14",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "GA403UV"),
+               },
+               .driver_data = &quirk_no_sps_bug,
+       },
+       {}
+};
+
+void amd_pmf_quirks_init(struct amd_pmf_dev *dev)
+{
+       const struct dmi_system_id *dmi_id;
+       struct quirk_entry *quirks;
+
+       dmi_id = dmi_first_match(fwbug_list);
+       if (!dmi_id)
+               return;
+
+       quirks = dmi_id->driver_data;
+       if (quirks->supported_func) {
+               dev->supported_func = quirks->supported_func;
+               pr_info("Using supported funcs quirk to avoid %s platform firmware bug\n",
+                       dmi_id->ident);
+       }
+}
+
index 8c4df5753f40d48fefc05c6373a64d0a00469149..eeedd0c0395a89704ce360a6aff9f827566b17b2 100644 (file)
@@ -720,4 +720,7 @@ int apmf_check_smart_pc(struct amd_pmf_dev *pmf_dev);
 void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
 void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in);
 
+/* Quirk infrastructure */
+void amd_pmf_quirks_init(struct amd_pmf_dev *dev);
+
 #endif /* PMF_H */
index 08df9494603c5e2acf152aacfe13fce81a18dc2c..30951f7131cd98bfdaffb70b2aa30ee3ceb7dbdd 100644 (file)
@@ -719,6 +719,7 @@ static struct miscdevice isst_if_char_driver = {
 };
 
 static const struct x86_cpu_id hpm_cpu_ids[] = {
+       X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D,     NULL),
        X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X,     NULL),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X,    NULL),
        {}
index bd75d61ff8a66196d620b5ca2824d8bb16332237..ef730200a04bd94682c781be092a43f15f88190e 100644 (file)
@@ -29,7 +29,7 @@
 #include "uncore-frequency-common.h"
 
 #define        UNCORE_MAJOR_VERSION            0
-#define        UNCORE_MINOR_VERSION            1
+#define        UNCORE_MINOR_VERSION            2
 #define UNCORE_HEADER_INDEX            0
 #define UNCORE_FABRIC_CLUSTER_OFFSET   8
 
@@ -329,7 +329,7 @@ static int uncore_probe(struct auxiliary_device *auxdev, const struct auxiliary_
                        goto remove_clusters;
                }
 
-               if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) != UNCORE_MINOR_VERSION)
+               if (TPMI_MINOR_VERSION(pd_info->ufs_header_ver) > UNCORE_MINOR_VERSION)
                        dev_info(&auxdev->dev, "Uncore: Ignore: Unsupported minor version:%lx\n",
                                 TPMI_MINOR_VERSION(pd_info->ufs_header_ver));
 
index 043736972cb9216c59a7cb3bc6682e056cdb2373..c8425493b95d855a7562406501b7c803ef481b22 100644 (file)
@@ -172,7 +172,6 @@ struct pwm_chip *dwc_pwm_alloc(struct device *dev)
        dwc->clk_ns = 10;
        chip->ops = &dwc_pwm_ops;
 
-       dev_set_drvdata(dev, chip);
        return chip;
 }
 EXPORT_SYMBOL_GPL(dwc_pwm_alloc);
index 676eaf8d7a53f76672527c1871a306cbcdb9b7ba..fb3eadf6fbc464773b17c30235c51f5a4ff6917f 100644 (file)
@@ -31,26 +31,34 @@ static const struct dwc_pwm_info ehl_pwm_info = {
        .size = 0x1000,
 };
 
-static int dwc_pwm_init_one(struct device *dev, void __iomem *base, unsigned int offset)
+static int dwc_pwm_init_one(struct device *dev, struct dwc_pwm_drvdata *ddata, unsigned int idx)
 {
        struct pwm_chip *chip;
        struct dwc_pwm *dwc;
+       int ret;
 
        chip = dwc_pwm_alloc(dev);
        if (IS_ERR(chip))
                return PTR_ERR(chip);
 
        dwc = to_dwc_pwm(chip);
-       dwc->base = base + offset;
+       dwc->base = ddata->io_base + (ddata->info->size * idx);
 
-       return devm_pwmchip_add(dev, chip);
+       ret = devm_pwmchip_add(dev, chip);
+       if (ret)
+               return ret;
+
+       ddata->chips[idx] = chip;
+       return 0;
 }
 
 static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
 {
        const struct dwc_pwm_info *info;
        struct device *dev = &pci->dev;
-       int i, ret;
+       struct dwc_pwm_drvdata *ddata;
+       unsigned int idx;
+       int ret;
 
        ret = pcim_enable_device(pci);
        if (ret)
@@ -63,17 +71,25 @@ static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
                return dev_err_probe(dev, ret, "Failed to iomap PCI BAR\n");
 
        info = (const struct dwc_pwm_info *)id->driver_data;
-
-       for (i = 0; i < info->nr; i++) {
-               /*
-                * No need to check for pcim_iomap_table() failure,
-                * pcim_iomap_regions() already does it for us.
-                */
-               ret = dwc_pwm_init_one(dev, pcim_iomap_table(pci)[0], i * info->size);
+       ddata = devm_kzalloc(dev, struct_size(ddata, chips, info->nr), GFP_KERNEL);
+       if (!ddata)
+               return -ENOMEM;
+
+       /*
+        * No need to check for pcim_iomap_table() failure,
+        * pcim_iomap_regions() already does it for us.
+        */
+       ddata->io_base = pcim_iomap_table(pci)[0];
+       ddata->info = info;
+
+       for (idx = 0; idx < ddata->info->nr; idx++) {
+               ret = dwc_pwm_init_one(dev, ddata, idx);
                if (ret)
                        return ret;
        }
 
+       dev_set_drvdata(dev, ddata);
+
        pm_runtime_put(dev);
        pm_runtime_allow(dev);
 
@@ -88,19 +104,24 @@ static void dwc_pwm_remove(struct pci_dev *pci)
 
 static int dwc_pwm_suspend(struct device *dev)
 {
-       struct pwm_chip *chip = dev_get_drvdata(dev);
-       struct dwc_pwm *dwc = to_dwc_pwm(chip);
-       int i;
-
-       for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
-               if (chip->pwms[i].state.enabled) {
-                       dev_err(dev, "PWM %u in use by consumer (%s)\n",
-                               i, chip->pwms[i].label);
-                       return -EBUSY;
+       struct dwc_pwm_drvdata *ddata = dev_get_drvdata(dev);
+       unsigned int idx;
+
+       for (idx = 0; idx < ddata->info->nr; idx++) {
+               struct pwm_chip *chip = ddata->chips[idx];
+               struct dwc_pwm *dwc = to_dwc_pwm(chip);
+               unsigned int i;
+
+               for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
+                       if (chip->pwms[i].state.enabled) {
+                               dev_err(dev, "PWM %u in use by consumer (%s)\n",
+                                       i, chip->pwms[i].label);
+                               return -EBUSY;
+                       }
+                       dwc->ctx[i].cnt = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(i));
+                       dwc->ctx[i].cnt2 = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(i));
+                       dwc->ctx[i].ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(i));
                }
-               dwc->ctx[i].cnt = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(i));
-               dwc->ctx[i].cnt2 = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(i));
-               dwc->ctx[i].ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(i));
        }
 
        return 0;
@@ -108,14 +129,19 @@ static int dwc_pwm_suspend(struct device *dev)
 
 static int dwc_pwm_resume(struct device *dev)
 {
-       struct pwm_chip *chip = dev_get_drvdata(dev);
-       struct dwc_pwm *dwc = to_dwc_pwm(chip);
-       int i;
-
-       for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
-               dwc_pwm_writel(dwc, dwc->ctx[i].cnt, DWC_TIM_LD_CNT(i));
-               dwc_pwm_writel(dwc, dwc->ctx[i].cnt2, DWC_TIM_LD_CNT2(i));
-               dwc_pwm_writel(dwc, dwc->ctx[i].ctrl, DWC_TIM_CTRL(i));
+       struct dwc_pwm_drvdata *ddata = dev_get_drvdata(dev);
+       unsigned int idx;
+
+       for (idx = 0; idx < ddata->info->nr; idx++) {
+               struct pwm_chip *chip = ddata->chips[idx];
+               struct dwc_pwm *dwc = to_dwc_pwm(chip);
+               unsigned int i;
+
+               for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
+                       dwc_pwm_writel(dwc, dwc->ctx[i].cnt, DWC_TIM_LD_CNT(i));
+                       dwc_pwm_writel(dwc, dwc->ctx[i].cnt2, DWC_TIM_LD_CNT2(i));
+                       dwc_pwm_writel(dwc, dwc->ctx[i].ctrl, DWC_TIM_CTRL(i));
+               }
        }
 
        return 0;
index a8b074841ae8054a5a3737127442a1d0e9979e02..c6e2df5a61227131c50fc3c6351326217371c3a3 100644 (file)
@@ -38,6 +38,12 @@ struct dwc_pwm_info {
        unsigned int size;
 };
 
+struct dwc_pwm_drvdata {
+       const struct dwc_pwm_info *info;
+       void __iomem *io_base;
+       struct pwm_chip *chips[];
+};
+
 struct dwc_pwm_ctx {
        u32 cnt;
        u32 cnt2;
index 20d9762331bd767aa88c7b04d3f4c2e84ff72648..6be3462b109ff29c0d5448a7d3b8f31e068f6adb 100644 (file)
@@ -181,12 +181,14 @@ hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
 {
        if (pdata->send_gpadl.gpadl_handle) {
                vmbus_teardown_gpadl(dev->channel, &pdata->send_gpadl);
-               vfree(pdata->send_buf);
+               if (!pdata->send_gpadl.decrypted)
+                       vfree(pdata->send_buf);
        }
 
        if (pdata->recv_gpadl.gpadl_handle) {
                vmbus_teardown_gpadl(dev->channel, &pdata->recv_gpadl);
-               vfree(pdata->recv_buf);
+               if (!pdata->recv_gpadl.decrypted)
+                       vfree(pdata->recv_buf);
        }
 }
 
@@ -295,7 +297,8 @@ hv_uio_probe(struct hv_device *dev,
        ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
                                    RECV_BUFFER_SIZE, &pdata->recv_gpadl);
        if (ret) {
-               vfree(pdata->recv_buf);
+               if (!pdata->recv_gpadl.decrypted)
+                       vfree(pdata->recv_buf);
                goto fail_close;
        }
 
@@ -317,7 +320,8 @@ hv_uio_probe(struct hv_device *dev,
        ret = vmbus_establish_gpadl(channel, pdata->send_buf,
                                    SEND_BUFFER_SIZE, &pdata->send_gpadl);
        if (ret) {
-               vfree(pdata->send_buf);
+               if (!pdata->send_gpadl.decrypted)
+                       vfree(pdata->send_buf);
                goto fail_close;
        }
 
index 045f666b4f12a2a6416c93dafc2189af03662668..8995730ce0bfc82d193bd7128e51817fba43de76 100644 (file)
@@ -2515,7 +2515,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
                vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
 
                if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
-                       vq_err(vq, "Guest moved used index from %u to %u",
+                       vq_err(vq, "Guest moved avail index from %u to %u",
                                last_avail_idx, vq->avail_idx);
                        return -EFAULT;
                }
@@ -2799,9 +2799,19 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
        r = vhost_get_avail_idx(vq, &avail_idx);
        if (unlikely(r))
                return false;
+
        vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+       if (vq->avail_idx != vq->last_avail_idx) {
+               /* Since we have updated avail_idx, the following
+                * call to vhost_get_vq_desc() will read available
+                * ring entries. Make sure that read happens after
+                * the avail_idx read.
+                */
+               smp_rmb();
+               return false;
+       }
 
-       return vq->avail_idx == vq->last_avail_idx;
+       return true;
 }
 EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
 
@@ -2838,9 +2848,19 @@ bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
                       &vq->avail->idx, r);
                return false;
        }
+
        vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
+       if (vq->avail_idx != vq->last_avail_idx) {
+               /* Since we have updated avail_idx, the following
+                * call to vhost_get_vq_desc() will read available
+                * ring entries. Make sure that read happens after
+                * the avail_idx read.
+                */
+               smp_rmb();
+               return true;
+       }
 
-       return vq->avail_idx != vq->last_avail_idx;
+       return false;
 }
 EXPORT_SYMBOL_GPL(vhost_enable_notify);
 
index b67a28da47026d0299b8a1f8c22a40fc36b1c4a2..a1c467a0e9f719665fc02fa559d5c94545e5725f 100644 (file)
@@ -68,7 +68,6 @@ out:
 static void vmgenid_notify(struct acpi_device *device, u32 event)
 {
        struct vmgenid_state *state = acpi_driver_data(device);
-       char *envp[] = { "NEW_VMGENID=1", NULL };
        u8 old_id[VMGENID_SIZE];
 
        memcpy(old_id, state->this_id, sizeof(old_id));
@@ -76,7 +75,6 @@ static void vmgenid_notify(struct acpi_device *device, u32 event)
        if (!memcmp(old_id, state->this_id, sizeof(old_id)))
                return;
        add_vmfork_randomness(state->this_id, sizeof(state->this_id));
-       kobject_uevent_env(&device->dev.kobj, KOBJ_CHANGE, envp);
 }
 
 static const struct acpi_device_id vmgenid_ids[] = {
index f173587893cb34cadbfb4c6e548c158522c7749d..9510c551dce864d1e7df97f47a0c24fbcb8b8478 100644 (file)
@@ -362,14 +362,16 @@ static const struct bus_type virtio_bus = {
        .remove = virtio_dev_remove,
 };
 
-int register_virtio_driver(struct virtio_driver *driver)
+int __register_virtio_driver(struct virtio_driver *driver, struct module *owner)
 {
        /* Catch this early. */
        BUG_ON(driver->feature_table_size && !driver->feature_table);
        driver->driver.bus = &virtio_bus;
+       driver->driver.owner = owner;
+
        return driver_register(&driver->driver);
 }
-EXPORT_SYMBOL_GPL(register_virtio_driver);
+EXPORT_SYMBOL_GPL(__register_virtio_driver);
 
 void unregister_virtio_driver(struct virtio_driver *driver)
 {
index 114328acde7202ed201fc8e776ed9cd73176d765..fadb1078903d291ce6ce3c7928d79c71c1eb18f8 100644 (file)
@@ -49,13 +49,15 @@ int bch2_backpointer_invalid(struct bch_fs *c, struct bkey_s_c k,
        if (!bch2_dev_exists2(c, bp.k->p.inode))
                return 0;
 
+       struct bch_dev *ca = bch_dev_bkey_exists(c, bp.k->p.inode);
        struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
        int ret = 0;
 
-       bkey_fsck_err_on(!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset)),
+       bkey_fsck_err_on((bp.v->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT) >= ca->mi.bucket_size ||
+                        !bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset)),
                         c, err,
-                        backpointer_pos_wrong,
-                        "backpointer at wrong pos");
+                        backpointer_bucket_offset_wrong,
+                        "backpointer bucket_offset wrong");
 fsck_err:
        return ret;
 }
index da012ca7daee5501fe04be48bc875c918abbb33a..85949b9fd880ce2fcce508ba4018350a5dfac9ca 100644 (file)
@@ -53,14 +53,11 @@ static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
                                           u64 bucket_offset)
 {
        struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
-       struct bpos ret;
-
-       ret = POS(bucket.inode,
-                 (bucket_to_sector(ca, bucket.offset) <<
-                  MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
+       struct bpos ret = POS(bucket.inode,
+                             (bucket_to_sector(ca, bucket.offset) <<
+                              MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
 
        EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
-
        return ret;
 }
 
index a31a5f706929eb2006e4867a38123b9526639cee..91c3c1fef233d118fb083dae3a60a5e779e3cdaf 100644 (file)
@@ -709,6 +709,8 @@ struct btree_trans_buf {
        x(stripe_delete)                                                \
        x(reflink)                                                      \
        x(fallocate)                                                    \
+       x(fsync)                                                        \
+       x(dio_write)                                                    \
        x(discard)                                                      \
        x(discard_fast)                                                 \
        x(invalidate)                                                   \
index 364ae42022af1750f9887d3f16c566a676b30776..085987435a5ea3cfc7354db7ee5392ce62241f05 100644 (file)
@@ -578,7 +578,8 @@ struct bch_member {
        __le64                  nbuckets;       /* device size */
        __le16                  first_bucket;   /* index of first bucket used */
        __le16                  bucket_size;    /* sectors */
-       __le32                  pad;
+       __u8                    btree_bitmap_shift;
+       __u8                    pad[3];
        __le64                  last_mount;     /* time_t */
 
        __le64                  flags;
@@ -587,6 +588,7 @@ struct bch_member {
        __le64                  errors_at_reset[BCH_MEMBER_ERROR_NR];
        __le64                  errors_reset_time;
        __le64                  seq;
+       __le64                  btree_allocated_bitmap;
 };
 
 #define BCH_MEMBER_V1_BYTES    56
@@ -876,7 +878,8 @@ struct bch_sb_field_downgrade {
        x(rebalance_work,               BCH_VERSION(1,  3))             \
        x(member_seq,                   BCH_VERSION(1,  4))             \
        x(subvolume_fs_parent,          BCH_VERSION(1,  5))             \
-       x(btree_subvolume_children,     BCH_VERSION(1,  6))
+       x(btree_subvolume_children,     BCH_VERSION(1,  6))             \
+       x(mi_btree_bitmap,              BCH_VERSION(1,  7))
 
 enum bcachefs_metadata_version {
        bcachefs_metadata_version_min = 9,
@@ -1314,7 +1317,7 @@ static inline __u64 __bset_magic(struct bch_sb *sb)
        x(write_buffer_keys,    11)             \
        x(datetime,             12)
 
-enum {
+enum bch_jset_entry_type {
 #define x(f, nr)       BCH_JSET_ENTRY_##f      = nr,
        BCH_JSET_ENTRY_TYPES()
 #undef x
@@ -1360,7 +1363,7 @@ struct jset_entry_blacklist_v2 {
        x(inodes,               1)              \
        x(key_version,          2)
 
-enum {
+enum bch_fs_usage_type {
 #define x(f, nr)       BCH_FS_USAGE_##f        = nr,
        BCH_FS_USAGE_TYPES()
 #undef x
index cf23ff47bed8be588593a7fb193ee21ca8298c65..3a45d128f608db86d060d43573219d60762e3038 100644 (file)
@@ -314,6 +314,12 @@ static inline unsigned bkeyp_key_u64s(const struct bkey_format *format,
        return bkey_packed(k) ? format->key_u64s : BKEY_U64s;
 }
 
+static inline bool bkeyp_u64s_valid(const struct bkey_format *f,
+                                   const struct bkey_packed *k)
+{
+       return ((unsigned) k->u64s - bkeyp_key_u64s(f, k) <= U8_MAX - BKEY_U64s);
+}
+
 static inline unsigned bkeyp_key_bytes(const struct bkey_format *format,
                                       const struct bkey_packed *k)
 {
index 5e52684764eb14de4d8433abd5954a829648440b..db336a43fc083a79615e81ce9da37ff4877005f9 100644 (file)
@@ -171,11 +171,15 @@ int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
        if (type >= BKEY_TYPE_NR)
                return 0;
 
-       bkey_fsck_err_on((flags & BKEY_INVALID_COMMIT) &&
+       bkey_fsck_err_on((type == BKEY_TYPE_btree ||
+                         (flags & BKEY_INVALID_COMMIT)) &&
                         !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type)), c, err,
                         bkey_invalid_type_for_btree,
                         "invalid key type for btree %s (%s)",
-                        bch2_btree_node_type_str(type), bch2_bkey_types[k.k->type]);
+                        bch2_btree_node_type_str(type),
+                        k.k->type < KEY_TYPE_MAX
+                        ? bch2_bkey_types[k.k->type]
+                        : "(unknown)");
 
        if (btree_node_type_is_extents(type) && !bkey_whiteout(k.k)) {
                bkey_fsck_err_on(k.k->size == 0, c, err,
index 84474324dba9b508141f0e886bafbd8a95d47537..02c70e813face0ce975f1f700e55a34743d286ea 100644 (file)
@@ -709,9 +709,31 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
        struct bch_fs *c = trans->c;
        struct btree_cache *bc = &c->btree_cache;
        struct btree *b;
-       u32 seq;
 
-       BUG_ON(level + 1 >= BTREE_MAX_DEPTH);
+       if (unlikely(level >= BTREE_MAX_DEPTH)) {
+               int ret = bch2_fs_topology_error(c, "attempting to get btree node at level %u, >= max depth %u",
+                                                level, BTREE_MAX_DEPTH);
+               return ERR_PTR(ret);
+       }
+
+       if (unlikely(!bkey_is_btree_ptr(&k->k))) {
+               struct printbuf buf = PRINTBUF;
+               bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
+
+               int ret = bch2_fs_topology_error(c, "attempting to get btree node with non-btree key %s", buf.buf);
+               printbuf_exit(&buf);
+               return ERR_PTR(ret);
+       }
+
+       if (unlikely(k->k.u64s > BKEY_BTREE_PTR_U64s_MAX)) {
+               struct printbuf buf = PRINTBUF;
+               bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
+
+               int ret = bch2_fs_topology_error(c, "attempting to get btree node with too big key %s", buf.buf);
+               printbuf_exit(&buf);
+               return ERR_PTR(ret);
+       }
+
        /*
         * Parent node must be locked, else we could read in a btree node that's
         * been freed:
@@ -752,34 +774,26 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
        }
 
        set_btree_node_read_in_flight(b);
-
        six_unlock_write(&b->c.lock);
-       seq = six_lock_seq(&b->c.lock);
-       six_unlock_intent(&b->c.lock);
 
-       /* Unlock before doing IO: */
-       if (path && sync)
-               bch2_trans_unlock_noassert(trans);
-
-       bch2_btree_node_read(trans, b, sync);
+       if (path) {
+               u32 seq = six_lock_seq(&b->c.lock);
 
-       if (!sync)
-               return NULL;
+               /* Unlock before doing IO: */
+               six_unlock_intent(&b->c.lock);
+               bch2_trans_unlock_noassert(trans);
 
-       if (path) {
-               int ret = bch2_trans_relock(trans) ?:
-                       bch2_btree_path_relock_intent(trans, path);
-               if (ret) {
-                       BUG_ON(!trans->restarted);
-                       return ERR_PTR(ret);
-               }
-       }
+               bch2_btree_node_read(trans, b, sync);
 
-       if (!six_relock_type(&b->c.lock, lock_type, seq)) {
-               BUG_ON(!path);
+               if (!sync)
+                       return NULL;
 
-               trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
-               return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
+               if (!six_relock_type(&b->c.lock, lock_type, seq))
+                       b = NULL;
+       } else {
+               bch2_btree_node_read(trans, b, sync);
+               if (lock_type == SIX_LOCK_read)
+                       six_lock_downgrade(&b->c.lock);
        }
 
        return b;
@@ -1112,18 +1126,19 @@ int bch2_btree_node_prefetch(struct btree_trans *trans,
 {
        struct bch_fs *c = trans->c;
        struct btree_cache *bc = &c->btree_cache;
-       struct btree *b;
 
        BUG_ON(path && !btree_node_locked(path, level + 1));
        BUG_ON(level >= BTREE_MAX_DEPTH);
 
-       b = btree_cache_find(bc, k);
+       struct btree *b = btree_cache_find(bc, k);
        if (b)
                return 0;
 
        b = bch2_btree_node_fill(trans, path, k, btree_id,
                                 level, SIX_LOCK_read, false);
-       return PTR_ERR_OR_ZERO(b);
+       if (!IS_ERR_OR_NULL(b))
+               six_unlock_read(&b->c.lock);
+       return bch2_trans_relock(trans) ?: PTR_ERR_OR_ZERO(b);
 }
 
 void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k)
@@ -1148,6 +1163,8 @@ wait_on_io:
 
        btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
        btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
+       if (unlikely(b->hash_val != btree_ptr_hash_val(k)))
+               goto out;
 
        if (btree_node_dirty(b)) {
                __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
@@ -1162,7 +1179,7 @@ wait_on_io:
        btree_node_data_free(c, b);
        bch2_btree_node_hash_remove(bc, b);
        mutex_unlock(&bc->lock);
-
+out:
        six_unlock_write(&b->c.lock);
        six_unlock_intent(&b->c.lock);
 }
index d2555da55c6da3750af9fab2538e3653118a48d9..ecbd9598f69fd00e86efbe7537a134d6d4c4db06 100644 (file)
@@ -828,6 +828,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
        struct bch_fs *c = trans->c;
        struct bkey deleted = KEY(0, 0, 0);
        struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
+       struct printbuf buf = PRINTBUF;
        int ret = 0;
 
        deleted.p = k->k->p;
@@ -848,11 +849,23 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
        if (ret)
                goto err;
 
+       if (mustfix_fsck_err_on(level && !bch2_dev_btree_bitmap_marked(c, *k),
+                               c, btree_bitmap_not_marked,
+                               "btree ptr not marked in member info btree allocated bitmap\n  %s",
+                               (bch2_bkey_val_to_text(&buf, c, *k),
+                                buf.buf))) {
+               mutex_lock(&c->sb_lock);
+               bch2_dev_btree_bitmap_mark(c, *k);
+               bch2_write_super(c);
+               mutex_unlock(&c->sb_lock);
+       }
+
        ret = commit_do(trans, NULL, NULL, 0,
                        bch2_key_trigger(trans, btree_id, level, old,
                                         unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_GC));
 fsck_err:
 err:
+       printbuf_exit(&buf);
        bch_err_fn(c, ret);
        return ret;
 }
index d7de82ac389354f9a0d5eef0a66c8694f1752b94..9678b2375bedde868e7a168435c9a17fc74eb26a 100644 (file)
@@ -831,7 +831,7 @@ static int bset_key_invalid(struct bch_fs *c, struct btree *b,
                (rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
 }
 
-static bool __bkey_valid(struct bch_fs *c, struct btree *b,
+static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
                         struct bset *i, struct bkey_packed *k)
 {
        if (bkey_p_next(k) > vstruct_last(i))
@@ -840,7 +840,7 @@ static bool __bkey_valid(struct bch_fs *c, struct btree *b,
        if (k->format > KEY_FORMAT_CURRENT)
                return false;
 
-       if (k->u64s < bkeyp_key_u64s(&b->format, k))
+       if (!bkeyp_u64s_valid(&b->format, k))
                return false;
 
        struct printbuf buf = PRINTBUF;
@@ -884,11 +884,13 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
                                 "invalid bkey format %u", k->format))
                        goto drop_this_key;
 
-               if (btree_err_on(k->u64s < bkeyp_key_u64s(&b->format, k),
+               if (btree_err_on(!bkeyp_u64s_valid(&b->format, k),
                                 -BCH_ERR_btree_node_read_err_fixable,
                                 c, NULL, b, i,
                                 btree_node_bkey_bad_u64s,
-                                "k->u64s too small (%u < %u)", k->u64s, bkeyp_key_u64s(&b->format, k)))
+                                "bad k->u64s %u (min %u max %lu)", k->u64s,
+                                bkeyp_key_u64s(&b->format, k),
+                                U8_MAX - BKEY_U64s + bkeyp_key_u64s(&b->format, k)))
                        goto drop_this_key;
 
                if (!write)
@@ -947,13 +949,12 @@ drop_this_key:
                         * do
                         */
 
-                       if (!__bkey_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
+                       if (!bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key))) {
                                for (next_good_key = 1;
                                     next_good_key < (u64 *) vstruct_last(i) - (u64 *) k;
                                     next_good_key++)
-                                       if (__bkey_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
+                                       if (bkey_packed_valid(c, b, i, (void *) ((u64 *) k + next_good_key)))
                                                goto got_good_key;
-
                        }
 
                        /*
@@ -1339,7 +1340,9 @@ start:
                               rb->start_time);
        bio_put(&rb->bio);
 
-       if (saw_error && !btree_node_read_error(b)) {
+       if (saw_error &&
+           !btree_node_read_error(b) &&
+           c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) {
                printbuf_reset(&buf);
                bch2_bpos_to_text(&buf, b->key.k.p);
                bch_err_ratelimited(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
index 1d58d447b386cdf74ecce2609f8aa23851ae4057..1c70836dd7cce4988ef8cf166ee0797fd8f8269e 100644 (file)
@@ -498,8 +498,13 @@ static inline void set_btree_iter_dontneed(struct btree_iter *iter)
 {
        struct btree_trans *trans = iter->trans;
 
-       if (!trans->restarted)
-               btree_iter_path(trans, iter)->preserve = false;
+       if (!iter->path || trans->restarted)
+               return;
+
+       struct btree_path *path = btree_iter_path(trans, iter);
+       path->preserve          = false;
+       if (path->ref == 1)
+               path->should_be_locked  = false;
 }
 
 void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
index 556f76f5c84e1613c332e7443e6bb8b1602dd359..866bd278439f8bb72a0b1e31e672953ff9b3f839 100644 (file)
@@ -133,9 +133,19 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca,
        if (le64_to_cpu(bn->magic) != bset_magic(c))
                return;
 
+       if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(&bn->keys))) {
+               struct nonce nonce = btree_nonce(&bn->keys, 0);
+               unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
+
+               bch2_encrypt(c, BSET_CSUM_TYPE(&bn->keys), nonce, &bn->flags, bytes);
+       }
+
        if (btree_id_is_alloc(BTREE_NODE_ID(bn)))
                return;
 
+       if (BTREE_NODE_LEVEL(bn) >= BTREE_MAX_DEPTH)
+               return;
+
        rcu_read_lock();
        struct found_btree_node n = {
                .btree_id       = BTREE_NODE_ID(bn),
@@ -195,8 +205,13 @@ static int read_btree_nodes_worker(void *p)
                                last_print = jiffies;
                        }
 
-                       try_read_btree_node(w->f, ca, bio, buf,
-                                           bucket * ca->mi.bucket_size + bucket_offset);
+                       u64 sector = bucket * ca->mi.bucket_size + bucket_offset;
+
+                       if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_mi_btree_bitmap &&
+                           !bch2_dev_btree_bitmap_marked_sectors(ca, sector, btree_sectors(c)))
+                               continue;
+
+                       try_read_btree_node(w->f, ca, bio, buf, sector);
                }
 err:
        bio_put(bio);
index aa9da49707404015a558c9c6e9339b733d0c98c3..bbec91e8e6506fa32611b340dc1a3a4a104aeed6 100644 (file)
@@ -397,12 +397,13 @@ static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags
        struct bkey_cached *ck = (void *) path->l[0].b;
        unsigned new_u64s;
        struct bkey_i *new_k;
+       unsigned watermark = flags & BCH_WATERMARK_MASK;
 
        EBUG_ON(path->level);
 
-       if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
-           bch2_btree_key_cache_must_wait(c) &&
-           !(flags & BCH_TRANS_COMMIT_journal_reclaim))
+       if (watermark < BCH_WATERMARK_reclaim &&
+           !test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
+           bch2_btree_key_cache_must_wait(c))
                return -BCH_ERR_btree_insert_need_journal_reclaim;
 
        /*
@@ -499,9 +500,8 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_
 }
 
 static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
-                             struct btree_insert_entry *btree_id_start)
+                             unsigned btree_id_start)
 {
-       struct btree_insert_entry *i;
        bool trans_trigger_run;
        int ret, overwrite;
 
@@ -514,13 +514,13 @@ static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
                do {
                        trans_trigger_run = false;
 
-                       for (i = btree_id_start;
-                            i < trans->updates + trans->nr_updates && i->btree_id <= btree_id;
+                       for (unsigned i = btree_id_start;
+                            i < trans->nr_updates && trans->updates[i].btree_id <= btree_id;
                             i++) {
-                               if (i->btree_id != btree_id)
+                               if (trans->updates[i].btree_id != btree_id)
                                        continue;
 
-                               ret = run_one_trans_trigger(trans, i, overwrite);
+                               ret = run_one_trans_trigger(trans, trans->updates + i, overwrite);
                                if (ret < 0)
                                        return ret;
                                if (ret)
@@ -534,8 +534,7 @@ static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
 
 static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
 {
-       struct btree_insert_entry *btree_id_start = trans->updates;
-       unsigned btree_id = 0;
+       unsigned btree_id = 0, btree_id_start = 0;
        int ret = 0;
 
        /*
@@ -549,8 +548,8 @@ static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
                if (btree_id == BTREE_ID_alloc)
                        continue;
 
-               while (btree_id_start < trans->updates + trans->nr_updates &&
-                      btree_id_start->btree_id < btree_id)
+               while (btree_id_start < trans->nr_updates &&
+                      trans->updates[btree_id_start].btree_id < btree_id)
                        btree_id_start++;
 
                ret = run_btree_triggers(trans, btree_id, btree_id_start);
@@ -558,11 +557,13 @@ static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
                        return ret;
        }
 
-       trans_for_each_update(trans, i) {
+       for (unsigned idx = 0; idx < trans->nr_updates; idx++) {
+               struct btree_insert_entry *i = trans->updates + idx;
+
                if (i->btree_id > BTREE_ID_alloc)
                        break;
                if (i->btree_id == BTREE_ID_alloc) {
-                       ret = run_btree_triggers(trans, BTREE_ID_alloc, i);
+                       ret = run_btree_triggers(trans, BTREE_ID_alloc, idx);
                        if (ret)
                                return ret;
                        break;
@@ -826,7 +827,8 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags
        struct bch_fs *c = trans->c;
        int ret = 0, u64s_delta = 0;
 
-       trans_for_each_update(trans, i) {
+       for (unsigned idx = 0; idx < trans->nr_updates; idx++) {
+               struct btree_insert_entry *i = trans->updates + idx;
                if (i->cached)
                        continue;
 
index c4a5e83a56a436548263445e3b7a329644757cc7..6030c396754f6f494c3c137abd313f6bf80c2ffb 100644 (file)
@@ -21,6 +21,7 @@
 #include "keylist.h"
 #include "recovery_passes.h"
 #include "replicas.h"
+#include "sb-members.h"
 #include "super-io.h"
 #include "trace.h"
 
@@ -605,6 +606,26 @@ static void btree_update_add_key(struct btree_update *as,
        bch2_keylist_push(keys);
 }
 
+static bool btree_update_new_nodes_marked_sb(struct btree_update *as)
+{
+       for_each_keylist_key(&as->new_keys, k)
+               if (!bch2_dev_btree_bitmap_marked(as->c, bkey_i_to_s_c(k)))
+                       return false;
+       return true;
+}
+
+static void btree_update_new_nodes_mark_sb(struct btree_update *as)
+{
+       struct bch_fs *c = as->c;
+
+       mutex_lock(&c->sb_lock);
+       for_each_keylist_key(&as->new_keys, k)
+               bch2_dev_btree_bitmap_mark(c, bkey_i_to_s_c(k));
+
+       bch2_write_super(c);
+       mutex_unlock(&c->sb_lock);
+}
+
 /*
  * The transactional part of an interior btree node update, where we journal the
  * update we did to the interior node and update alloc info:
@@ -662,6 +683,9 @@ static void btree_update_nodes_written(struct btree_update *as)
        if (ret)
                goto err;
 
+       if (!btree_update_new_nodes_marked_sb(as))
+               btree_update_new_nodes_mark_sb(as);
+
        /*
         * Wait for any in flight writes to finish before we free the old nodes
         * on disk:
@@ -1280,23 +1304,29 @@ static void bch2_btree_set_root_inmem(struct bch_fs *c, struct btree *b)
        bch2_recalc_btree_reserve(c);
 }
 
-static void bch2_btree_set_root(struct btree_update *as,
-                               struct btree_trans *trans,
-                               struct btree_path *path,
-                               struct btree *b)
+static int bch2_btree_set_root(struct btree_update *as,
+                              struct btree_trans *trans,
+                              struct btree_path *path,
+                              struct btree *b,
+                              bool nofail)
 {
        struct bch_fs *c = as->c;
-       struct btree *old;
 
        trace_and_count(c, btree_node_set_root, trans, b);
 
-       old = btree_node_root(c, b);
+       struct btree *old = btree_node_root(c, b);
 
        /*
         * Ensure no one is using the old root while we switch to the
         * new root:
         */
-       bch2_btree_node_lock_write_nofail(trans, path, &old->c);
+       if (nofail) {
+               bch2_btree_node_lock_write_nofail(trans, path, &old->c);
+       } else {
+               int ret = bch2_btree_node_lock_write(trans, path, &old->c);
+               if (ret)
+                       return ret;
+       }
 
        bch2_btree_set_root_inmem(c, b);
 
@@ -1310,6 +1340,7 @@ static void bch2_btree_set_root(struct btree_update *as,
         * depend on the new root would have to update the new root.
         */
        bch2_btree_node_unlock_write(trans, path, old);
+       return 0;
 }
 
 /* Interior node updates: */
@@ -1652,15 +1683,16 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
        if (parent) {
                /* Split a non root node */
                ret = bch2_btree_insert_node(as, trans, path, parent, &as->parent_keys);
-               if (ret)
-                       goto err;
        } else if (n3) {
-               bch2_btree_set_root(as, trans, trans->paths + path, n3);
+               ret = bch2_btree_set_root(as, trans, trans->paths + path, n3, false);
        } else {
                /* Root filled up but didn't need to be split */
-               bch2_btree_set_root(as, trans, trans->paths + path, n1);
+               ret = bch2_btree_set_root(as, trans, trans->paths + path, n1, false);
        }
 
+       if (ret)
+               goto err;
+
        if (n3) {
                bch2_btree_update_get_open_buckets(as, n3);
                bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0);
@@ -1863,7 +1895,9 @@ static void __btree_increase_depth(struct btree_update *as, struct btree_trans *
        bch2_keylist_add(&as->parent_keys, &b->key);
        btree_split_insert_keys(as, trans, path_idx, n, &as->parent_keys);
 
-       bch2_btree_set_root(as, trans, path, n);
+       int ret = bch2_btree_set_root(as, trans, path, n, true);
+       BUG_ON(ret);
+
        bch2_btree_update_get_open_buckets(as, n);
        bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
        bch2_trans_node_add(trans, path, n);
@@ -1916,6 +1950,18 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
        BUG_ON(!trans->paths[path].should_be_locked);
        BUG_ON(!btree_node_locked(&trans->paths[path], level));
 
+       /*
+        * Work around a deadlock caused by the btree write buffer not doing
+        * merges and leaving tons of merges for us to do - we really don't need
+        * to be doing merges at all from the interior update path, and if the
+        * interior update path is generating too many new interior updates we
+        * deadlock:
+        */
+       if ((flags & BCH_WATERMARK_MASK) == BCH_WATERMARK_interior_updates)
+               return 0;
+
+       flags &= ~BCH_WATERMARK_MASK;
+
        b = trans->paths[path].l[level].b;
 
        if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) ||
@@ -2061,6 +2107,10 @@ err:
                bch2_path_put(trans, new_path, true);
        bch2_path_put(trans, sib_path, true);
        bch2_trans_verify_locks(trans);
+       if (ret == -BCH_ERR_journal_reclaim_would_deadlock)
+               ret = 0;
+       if (!ret)
+               ret = bch2_trans_relock(trans);
        return ret;
 err_free_update:
        bch2_btree_node_free_never_used(as, trans, n);
@@ -2106,12 +2156,13 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
        if (parent) {
                bch2_keylist_add(&as->parent_keys, &n->key);
                ret = bch2_btree_insert_node(as, trans, iter->path, parent, &as->parent_keys);
-               if (ret)
-                       goto err;
        } else {
-               bch2_btree_set_root(as, trans, btree_iter_path(trans, iter), n);
+               ret = bch2_btree_set_root(as, trans, btree_iter_path(trans, iter), n, false);
        }
 
+       if (ret)
+               goto err;
+
        bch2_btree_update_get_open_buckets(as, n);
        bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
 
index baf63e2fddb64cd8f4c745d0cc80c864c86ffaa6..36a6f42aba5e6fc5a36418c1d7565e07e8f90420 100644 (file)
@@ -316,6 +316,16 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
                            bpos_gt(k->k.k.p, path->l[0].b->key.k.p)) {
                                bch2_btree_node_unlock_write(trans, path, path->l[0].b);
                                write_locked = false;
+
+                               ret = lockrestart_do(trans,
+                                       bch2_btree_iter_traverse(&iter) ?:
+                                       bch2_foreground_maybe_merge(trans, iter.path, 0,
+                                                       BCH_WATERMARK_reclaim|
+                                                       BCH_TRANS_COMMIT_journal_reclaim|
+                                                       BCH_TRANS_COMMIT_no_check_rw|
+                                                       BCH_TRANS_COMMIT_no_enospc));
+                               if (ret)
+                                       goto err;
                        }
                }
 
@@ -382,10 +392,10 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
 
                        ret = commit_do(trans, NULL, NULL,
                                        BCH_WATERMARK_reclaim|
+                                       BCH_TRANS_COMMIT_journal_reclaim|
                                        BCH_TRANS_COMMIT_no_check_rw|
                                        BCH_TRANS_COMMIT_no_enospc|
-                                       BCH_TRANS_COMMIT_no_journal_res|
-                                       BCH_TRANS_COMMIT_journal_reclaim,
+                                       BCH_TRANS_COMMIT_no_journal_res ,
                                        btree_write_buffered_insert(trans, i));
                        if (ret)
                                goto err;
index 00aaf4bb513974a6b9c0353ea9445f92671c32eb..f9af5adabe83638eea7ffd15ea2f730085f81cc1 100644 (file)
@@ -395,14 +395,6 @@ static inline const char *bch2_data_type_str(enum bch_data_type type)
                : "(invalid data type)";
 }
 
-static inline void bch2_prt_data_type(struct printbuf *out, enum bch_data_type type)
-{
-       if (type < BCH_DATA_NR)
-               prt_str(out, __bch2_data_types[type]);
-       else
-               prt_printf(out, "(invalid data type %u)", type);
-}
-
 /* disk reservations: */
 
 static inline void bch2_disk_reservation_put(struct bch_fs *c,
index 4701457f6381ca820e17a12707009c272ed5b4ac..7ed779b411f61e4e3f05a703ce9e091474237939 100644 (file)
@@ -429,15 +429,20 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
                                extent_nonce(version, crc_old), bio);
 
        if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) {
-               bch_err(c, "checksum error in %s() (memory corruption or bug?)\n"
-                       "expected %0llx:%0llx got %0llx:%0llx (old type %s new type %s)",
-                       __func__,
-                       crc_old.csum.hi,
-                       crc_old.csum.lo,
-                       merged.hi,
-                       merged.lo,
-                       bch2_csum_types[crc_old.csum_type],
-                       bch2_csum_types[new_csum_type]);
+               struct printbuf buf = PRINTBUF;
+               prt_printf(&buf, "checksum error in %s() (memory corruption or bug?)\n"
+                          "expected %0llx:%0llx got %0llx:%0llx (old type ",
+                          __func__,
+                          crc_old.csum.hi,
+                          crc_old.csum.lo,
+                          merged.hi,
+                          merged.lo);
+               bch2_prt_csum_type(&buf, crc_old.csum_type);
+               prt_str(&buf, " new type ");
+               bch2_prt_csum_type(&buf, new_csum_type);
+               prt_str(&buf, ")");
+               bch_err(c, "%s", buf.buf);
+               printbuf_exit(&buf);
                return -EIO;
        }
 
index 1b8c2c1016dc6347ce12ef3161d4723835dfa56e..e40499fde9a4019fc75d62f825e9e5583caf803b 100644 (file)
@@ -61,11 +61,12 @@ static inline void bch2_csum_err_msg(struct printbuf *out,
                                     struct bch_csum expected,
                                     struct bch_csum got)
 {
-       prt_printf(out, "checksum error: got ");
+       prt_str(out, "checksum error, type ");
+       bch2_prt_csum_type(out, type);
+       prt_str(out, ": got ");
        bch2_csum_to_text(out, type, got);
        prt_str(out, " should be ");
        bch2_csum_to_text(out, type, expected);
-       prt_printf(out, " type %s", bch2_csum_types[type]);
 }
 
 int bch2_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t);
index 58c2eb45570ff022764720f9beb10ecfa2926367..607fd5e232c902dbb39f3dac84ea2e214e6b106c 100644 (file)
@@ -47,14 +47,6 @@ static inline enum bch_compression_type bch2_compression_opt_to_type(unsigned v)
        return __bch2_compression_opt_to_type[bch2_compression_decode(v).type];
 }
 
-static inline void bch2_prt_compression_type(struct printbuf *out, enum bch_compression_type type)
-{
-       if (type < BCH_COMPRESSION_TYPE_NR)
-               prt_str(out, __bch2_compression_types[type]);
-       else
-               prt_printf(out, "(invalid compression type %u)", type);
-}
-
 int bch2_bio_uncompress_inplace(struct bch_fs *, struct bio *,
                                struct bch_extent_crc_unpacked *);
 int bch2_bio_uncompress(struct bch_fs *, struct bio *, struct bio *,
index 082075244e16aedc824249b239ecec6efb1a07fa..556a217108d32ef35890da0463751afc688186f3 100644 (file)
@@ -131,29 +131,33 @@ fsck_err:
 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
                         struct bkey_s_c k)
 {
-       const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
-       unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
+       const struct bch_stripe *sp = bkey_s_c_to_stripe(k).v;
+       struct bch_stripe s = {};
+
+       memcpy(&s, sp, min(sizeof(s), bkey_val_bytes(k.k)));
+
+       unsigned nr_data = s.nr_blocks - s.nr_redundant;
+
+       prt_printf(out, "algo %u sectors %u blocks %u:%u csum ",
+                  s.algorithm,
+                  le16_to_cpu(s.sectors),
+                  nr_data,
+                  s.nr_redundant);
+       bch2_prt_csum_type(out, s.csum_type);
+       prt_printf(out, " gran %u", 1U << s.csum_granularity_bits);
+
+       for (unsigned i = 0; i < s.nr_blocks; i++) {
+               const struct bch_extent_ptr *ptr = sp->ptrs + i;
+
+               if ((void *) ptr >= bkey_val_end(k))
+                       break;
+
+               bch2_extent_ptr_to_text(out, c, ptr);
 
-       prt_printf(out, "algo %u sectors %u blocks %u:%u csum %u gran %u",
-              s->algorithm,
-              le16_to_cpu(s->sectors),
-              nr_data,
-              s->nr_redundant,
-              s->csum_type,
-              1U << s->csum_granularity_bits);
-
-       for (i = 0; i < s->nr_blocks; i++) {
-               const struct bch_extent_ptr *ptr = s->ptrs + i;
-               struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
-               u32 offset;
-               u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
-
-               prt_printf(out, " %u:%llu:%u", ptr->dev, b, offset);
-               if (i < nr_data)
-                       prt_printf(out, "#%u", stripe_blockcount_get(s, i));
-               prt_printf(out, " gen %u", ptr->gen);
-               if (ptr_stale(ca, ptr))
-                       prt_printf(out, " stale");
+               if (s.csum_type < BCH_CSUM_NR &&
+                   i < nr_data &&
+                   stripe_blockcount_offset(&s, i) < bkey_val_bytes(k.k))
+                       prt_printf(out,  "#%u", stripe_blockcount_get(sp, i));
        }
 }
 
@@ -607,10 +611,8 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
                                struct printbuf err = PRINTBUF;
                                struct bch_dev *ca = bch_dev_bkey_exists(c, v->ptrs[i].dev);
 
-                               prt_printf(&err, "stripe checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)\n",
-                                          want.hi, want.lo,
-                                          got.hi, got.lo,
-                                          bch2_csum_types[v->csum_type]);
+                               prt_str(&err, "stripe ");
+                               bch2_csum_err_msg(&err, v->csum_type, want, got);
                                prt_printf(&err, "  for %ps at %u of\n  ", (void *) _RET_IP_, i);
                                bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key));
                                bch_err_ratelimited(ca, "%s", err.buf);
index f4369b02e805f0a24572a8cf87d18867c3d3301a..f042616888b0a1d47d7797e987c912c58d0945b3 100644 (file)
@@ -32,6 +32,8 @@ static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
 static inline unsigned stripe_csum_offset(const struct bch_stripe *s,
                                          unsigned dev, unsigned csum_idx)
 {
+       EBUG_ON(s->csum_type >= BCH_CSUM_NR);
+
        unsigned csum_bytes = bch_crc_bytes[s->csum_type];
 
        return sizeof(struct bch_stripe) +
index 0e3ca99fbd2de1522c5e8dea8ac313232f60f7f3..1a331e539204852d4db9e7620df0282abe262f1e 100644 (file)
@@ -998,7 +998,9 @@ void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struc
                        prt_str(out, " cached");
                if (ptr->unwritten)
                        prt_str(out, " unwritten");
-               if (ca && ptr_stale(ca, ptr))
+               if (b >= ca->mi.first_bucket &&
+                   b <  ca->mi.nbuckets &&
+                   ptr_stale(ca, ptr))
                        prt_printf(out, " stale");
        }
 }
@@ -1028,11 +1030,12 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
                        struct bch_extent_crc_unpacked crc =
                                bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
 
-                       prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress ",
+                       prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum ",
                               crc.compressed_size,
                               crc.uncompressed_size,
-                              crc.offset, crc.nonce,
-                              bch2_csum_types[crc.csum_type]);
+                              crc.offset, crc.nonce);
+                       bch2_prt_csum_type(out, crc.csum_type);
+                       prt_str(out, " compress ");
                        bch2_prt_compression_type(out, crc.compression_type);
                        break;
                }
index f49e6c0f0f6835968202ab2f1fa194933945554a..b889370a5088113a2417787bdbb4b98a16597063 100644 (file)
@@ -387,6 +387,8 @@ static __always_inline long bch2_dio_write_done(struct dio_write *dio)
        ret = dio->op.error ?: ((long) dio->written << 9);
        bio_put(&dio->op.wbio.bio);
 
+       bch2_write_ref_put(dio->op.c, BCH_WRITE_REF_dio_write);
+
        /* inode->i_dio_count is our ref on inode and thus bch_fs */
        inode_dio_end(&inode->v);
 
@@ -590,22 +592,25 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
        prefetch(&inode->ei_inode);
        prefetch((void *) &inode->ei_inode + 64);
 
+       if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_dio_write))
+               return -EROFS;
+
        inode_lock(&inode->v);
 
        ret = generic_write_checks(req, iter);
        if (unlikely(ret <= 0))
-               goto err;
+               goto err_put_write_ref;
 
        ret = file_remove_privs(file);
        if (unlikely(ret))
-               goto err;
+               goto err_put_write_ref;
 
        ret = file_update_time(file);
        if (unlikely(ret))
-               goto err;
+               goto err_put_write_ref;
 
        if (unlikely((req->ki_pos|iter->count) & (block_bytes(c) - 1)))
-               goto err;
+               goto err_put_write_ref;
 
        inode_dio_begin(&inode->v);
        bch2_pagecache_block_get(inode);
@@ -645,7 +650,7 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
        }
 
        ret = bch2_dio_write_loop(dio);
-err:
+out:
        if (locked)
                inode_unlock(&inode->v);
        return ret;
@@ -653,7 +658,9 @@ err_put_bio:
        bch2_pagecache_block_put(inode);
        bio_put(bio);
        inode_dio_end(&inode->v);
-       goto err;
+err_put_write_ref:
+       bch2_write_ref_put(c, BCH_WRITE_REF_dio_write);
+       goto out;
 }
 
 void bch2_fs_fs_io_direct_exit(struct bch_fs *c)
index 8c70123b6a0c809b6d50040593281c2e9c115828..20b40477425f49449499b11d63930d92e10ed3ba 100644 (file)
@@ -174,18 +174,18 @@ void __bch2_i_sectors_acct(struct bch_fs *c, struct bch_inode_info *inode,
 static int bch2_flush_inode(struct bch_fs *c,
                            struct bch_inode_info *inode)
 {
-       struct bch_inode_unpacked u;
-       int ret;
-
        if (c->opts.journal_flush_disabled)
                return 0;
 
-       ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
-       if (ret)
-               return ret;
+       if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync))
+               return -EROFS;
 
-       return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
-               bch2_inode_flush_nocow_writes(c, inode);
+       struct bch_inode_unpacked u;
+       int ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u) ?:
+                 bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
+                 bch2_inode_flush_nocow_writes(c, inode);
+       bch2_write_ref_put(c, BCH_WRITE_REF_fsync);
+       return ret;
 }
 
 int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
index 725fcf46f6312c267c2a7c05f1eaa6aed5fb83e7..9aa28b52ab926c567f49e0bb68b9c6791fb326e5 100644 (file)
@@ -247,7 +247,7 @@ static void journal_entry_err_msg(struct printbuf *out,
 
        if (entry) {
                prt_str(out, " type=");
-               prt_str(out, bch2_jset_entry_types[entry->type]);
+               bch2_prt_jset_entry_type(out, entry->type);
        }
 
        if (!jset) {
@@ -403,7 +403,8 @@ static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs
        jset_entry_for_each_key(entry, k) {
                if (!first) {
                        prt_newline(out);
-                       prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
+                       bch2_prt_jset_entry_type(out, entry->type);
+                       prt_str(out, ": ");
                }
                prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level);
                bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
@@ -563,9 +564,9 @@ static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
        struct jset_entry_usage *u =
                container_of(entry, struct jset_entry_usage, entry);
 
-       prt_printf(out, "type=%s v=%llu",
-              bch2_fs_usage_types[u->entry.btree_id],
-              le64_to_cpu(u->v));
+       prt_str(out, "type=");
+       bch2_prt_fs_usage_type(out, u->entry.btree_id);
+       prt_printf(out, " v=%llu", le64_to_cpu(u->v));
 }
 
 static int journal_entry_data_usage_validate(struct bch_fs *c,
@@ -827,11 +828,11 @@ int bch2_journal_entry_validate(struct bch_fs *c,
 void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
                                struct jset_entry *entry)
 {
+       bch2_prt_jset_entry_type(out, entry->type);
+
        if (entry->type < BCH_JSET_ENTRY_NR) {
-               prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
+               prt_str(out, ": ");
                bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
-       } else {
-               prt_printf(out, "(unknown type %u)", entry->type);
        }
 }
 
index e1800c4119b5fbaf8ebbfcdaef996e1dd9c35ca8..bb068fd724656cf8307d14022ca537f918b65747 100644 (file)
@@ -43,7 +43,7 @@ const char * const __bch2_btree_ids[] = {
        NULL
 };
 
-const char * const bch2_csum_types[] = {
+static const char * const __bch2_csum_types[] = {
        BCH_CSUM_TYPES()
        NULL
 };
@@ -53,7 +53,7 @@ const char * const bch2_csum_opts[] = {
        NULL
 };
 
-const char * const __bch2_compression_types[] = {
+static const char * const __bch2_compression_types[] = {
        BCH_COMPRESSION_TYPES()
        NULL
 };
@@ -83,18 +83,39 @@ const char * const bch2_member_states[] = {
        NULL
 };
 
-const char * const bch2_jset_entry_types[] = {
+static const char * const __bch2_jset_entry_types[] = {
        BCH_JSET_ENTRY_TYPES()
        NULL
 };
 
-const char * const bch2_fs_usage_types[] = {
+static const char * const __bch2_fs_usage_types[] = {
        BCH_FS_USAGE_TYPES()
        NULL
 };
 
 #undef x
 
+static void prt_str_opt_boundscheck(struct printbuf *out, const char * const opts[],
+                                   unsigned nr, const char *type, unsigned idx)
+{
+       if (idx < nr)
+               prt_str(out, opts[idx]);
+       else
+               prt_printf(out, "(unknown %s %u)", type, idx);
+}
+
+#define PRT_STR_OPT_BOUNDSCHECKED(name, type)                                  \
+void bch2_prt_##name(struct printbuf *out, type t)                             \
+{                                                                              \
+       prt_str_opt_boundscheck(out, __bch2_##name##s, ARRAY_SIZE(__bch2_##name##s) - 1, #name, t);\
+}
+
+PRT_STR_OPT_BOUNDSCHECKED(jset_entry_type,     enum bch_jset_entry_type);
+PRT_STR_OPT_BOUNDSCHECKED(fs_usage_type,       enum bch_fs_usage_type);
+PRT_STR_OPT_BOUNDSCHECKED(data_type,           enum bch_data_type);
+PRT_STR_OPT_BOUNDSCHECKED(csum_type,           enum bch_csum_type);
+PRT_STR_OPT_BOUNDSCHECKED(compression_type,    enum bch_compression_type);
+
 static int bch2_opt_fix_errors_parse(struct bch_fs *c, const char *val, u64 *res,
                                     struct printbuf *err)
 {
index 1ac4135cca1c3dccc71a75a0d062ee30df33111c..84e452835a17d84d36c4d0f3906501578bf702d3 100644 (file)
@@ -16,18 +16,20 @@ extern const char * const bch2_version_upgrade_opts[];
 extern const char * const bch2_sb_features[];
 extern const char * const bch2_sb_compat[];
 extern const char * const __bch2_btree_ids[];
-extern const char * const bch2_csum_types[];
 extern const char * const bch2_csum_opts[];
-extern const char * const __bch2_compression_types[];
 extern const char * const bch2_compression_opts[];
 extern const char * const bch2_str_hash_types[];
 extern const char * const bch2_str_hash_opts[];
 extern const char * const __bch2_data_types[];
 extern const char * const bch2_member_states[];
-extern const char * const bch2_jset_entry_types[];
-extern const char * const bch2_fs_usage_types[];
 extern const char * const bch2_d_types[];
 
+void bch2_prt_jset_entry_type(struct printbuf *,       enum bch_jset_entry_type);
+void bch2_prt_fs_usage_type(struct printbuf *,         enum bch_fs_usage_type);
+void bch2_prt_data_type(struct printbuf *,             enum bch_data_type);
+void bch2_prt_csum_type(struct printbuf *,             enum bch_csum_type);
+void bch2_prt_compression_type(struct printbuf *,      enum bch_compression_type);
+
 static inline const char *bch2_d_type_str(unsigned d_type)
 {
        return (d_type < BCH_DT_MAX ? bch2_d_types[d_type] : NULL) ?: "(bad d_type)";
index cb501460d6152b31a4ae57d9dea6db0792c47c0f..0cec0f7d9703520a3cf24bcc2ca2ce7f86285ebc 100644 (file)
@@ -44,7 +44,7 @@ static int bch2_set_may_go_rw(struct bch_fs *c)
 
        set_bit(BCH_FS_may_go_rw, &c->flags);
 
-       if (keys->nr || c->opts.fsck || !c->sb.clean)
+       if (keys->nr || c->opts.fsck || !c->sb.clean || c->recovery_passes_explicit)
                return bch2_fs_read_write_early(c);
        return 0;
 }
index d6f81179c3a29b6e884f92c94628d512626c6b45..a98ef940b7a3280bd0da0474ef4f387fdcd0cc18 100644 (file)
          BCH_FSCK_ERR_subvol_fs_path_parent_wrong)             \
        x(btree_subvolume_children,                             \
          BIT_ULL(BCH_RECOVERY_PASS_check_subvols),             \
-         BCH_FSCK_ERR_subvol_children_not_set)
+         BCH_FSCK_ERR_subvol_children_not_set)                 \
+       x(mi_btree_bitmap,                                      \
+         BIT_ULL(BCH_RECOVERY_PASS_check_allocations),         \
+         BCH_FSCK_ERR_btree_bitmap_not_marked)
 
 #define DOWNGRADE_TABLE()
 
index d7d609131030a817c5fa2867fc3cee5796fb898c..4ca6e7b0d8aaed2c4b95fff82c2ed964c6a102ad 100644 (file)
        x(bucket_gens_nonzero_for_invalid_buckets,              122)    \
        x(need_discard_freespace_key_to_invalid_dev_bucket,     123)    \
        x(need_discard_freespace_key_bad,                       124)    \
-       x(backpointer_pos_wrong,                                125)    \
+       x(backpointer_bucket_offset_wrong,                      125)    \
        x(backpointer_to_missing_device,                        126)    \
        x(backpointer_to_missing_alloc,                         127)    \
        x(backpointer_to_missing_ptr,                           128)    \
        x(btree_ptr_v2_min_key_bad,                             262)    \
        x(btree_root_unreadable_and_scan_found_nothing,         263)    \
        x(snapshot_node_missing,                                264)    \
-       x(dup_backpointer_to_bad_csum_extent,                   265)
+       x(dup_backpointer_to_bad_csum_extent,                   265)    \
+       x(btree_bitmap_not_marked,                              266)
 
 enum bch_sb_error_id {
 #define x(t, n) BCH_FSCK_ERR_##t = n,
index eff5ce18c69c0600047c1fef688a5980af33c678..522a969345e5289ac87cf53b5f5a735e3b5f8d67 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 
 #include "bcachefs.h"
+#include "btree_cache.h"
 #include "disk_groups.h"
 #include "opts.h"
 #include "replicas.h"
@@ -426,3 +427,55 @@ void bch2_dev_errors_reset(struct bch_dev *ca)
        bch2_write_super(c);
        mutex_unlock(&c->sb_lock);
 }
+
+/*
+ * Per member "range has btree nodes" bitmap:
+ *
+ * This is so that if we ever have to run the btree node scan to repair we don't
+ * have to scan full devices:
+ */
+
+bool bch2_dev_btree_bitmap_marked(struct bch_fs *c, struct bkey_s_c k)
+{
+       bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr)
+               if (!bch2_dev_btree_bitmap_marked_sectors(bch_dev_bkey_exists(c, ptr->dev),
+                                                         ptr->offset, btree_sectors(c)))
+                       return false;
+       return true;
+}
+
+static void __bch2_dev_btree_bitmap_mark(struct bch_sb_field_members_v2 *mi, unsigned dev,
+                               u64 start, unsigned sectors)
+{
+       struct bch_member *m = __bch2_members_v2_get_mut(mi, dev);
+       u64 bitmap = le64_to_cpu(m->btree_allocated_bitmap);
+
+       u64 end = start + sectors;
+
+       int resize = ilog2(roundup_pow_of_two(end)) - (m->btree_bitmap_shift + 6);
+       if (resize > 0) {
+               u64 new_bitmap = 0;
+
+               for (unsigned i = 0; i < 64; i++)
+                       if (bitmap & BIT_ULL(i))
+                               new_bitmap |= BIT_ULL(i >> resize);
+               bitmap = new_bitmap;
+               m->btree_bitmap_shift += resize;
+       }
+
+       for (unsigned bit = sectors >> m->btree_bitmap_shift;
+            bit << m->btree_bitmap_shift < end;
+            bit++)
+               bitmap |= BIT_ULL(bit);
+
+       m->btree_allocated_bitmap = cpu_to_le64(bitmap);
+}
+
+void bch2_dev_btree_bitmap_mark(struct bch_fs *c, struct bkey_s_c k)
+{
+       lockdep_assert_held(&c->sb_lock);
+
+       struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
+       bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr)
+               __bch2_dev_btree_bitmap_mark(mi, ptr->dev, ptr->offset, btree_sectors(c));
+}
index be0a941832715a32634b8c3dea60bbf1685a672f..b27c3e4467cf288d67587143e5343d57d5aa41c9 100644 (file)
@@ -3,6 +3,7 @@
 #define _BCACHEFS_SB_MEMBERS_H
 
 #include "darray.h"
+#include "bkey_types.h"
 
 extern char * const bch2_member_error_strs[];
 
@@ -220,6 +221,8 @@ static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
                        : 1,
                .freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
                .valid          = bch2_member_exists(mi),
+               .btree_bitmap_shift     = mi->btree_bitmap_shift,
+               .btree_allocated_bitmap = le64_to_cpu(mi->btree_allocated_bitmap),
        };
 }
 
@@ -228,4 +231,22 @@ void bch2_sb_members_from_cpu(struct bch_fs *);
 void bch2_dev_io_errors_to_text(struct printbuf *, struct bch_dev *);
 void bch2_dev_errors_reset(struct bch_dev *);
 
+static inline bool bch2_dev_btree_bitmap_marked_sectors(struct bch_dev *ca, u64 start, unsigned sectors)
+{
+       u64 end = start + sectors;
+
+       if (end > 64 << ca->mi.btree_bitmap_shift)
+               return false;
+
+       for (unsigned bit = sectors >> ca->mi.btree_bitmap_shift;
+            bit << ca->mi.btree_bitmap_shift < end;
+            bit++)
+               if (!(ca->mi.btree_allocated_bitmap & BIT_ULL(bit)))
+                       return false;
+       return true;
+}
+
+bool bch2_dev_btree_bitmap_marked(struct bch_fs *, struct bkey_s_c);
+void bch2_dev_btree_bitmap_mark(struct bch_fs *, struct bkey_s_c);
+
 #endif /* _BCACHEFS_SB_MEMBERS_H */
index 5eee055ee2721a3967fb31ca38adcbc5672521d6..08ea3dbbbe97ce11833fe79baa5fd87935919339 100644 (file)
@@ -700,8 +700,11 @@ retry:
                return -ENOMEM;
 
        sb->sb_name = kstrdup(path, GFP_KERNEL);
-       if (!sb->sb_name)
-               return -ENOMEM;
+       if (!sb->sb_name) {
+               ret = -ENOMEM;
+               prt_printf(&err, "error allocating memory for sb_name");
+               goto err;
+       }
 
 #ifndef __KERNEL__
        if (opt_get(*opts, direct_io) == false)
index ec784d975f6655a378207692644975e53271ddca..11bcef170c2c22644108e9fbec9b24eaf478059c 100644 (file)
@@ -37,6 +37,8 @@ struct bch_member_cpu {
        u8                      durability;
        u8                      freespace_initialized;
        u8                      valid;
+       u8                      btree_bitmap_shift;
+       u64                     btree_allocated_bitmap;
 };
 
 #endif /* _BCACHEFS_SUPER_TYPES_H */
index b18b0cc81b594ad6144b43599418418a1caf5e95..5be92fe3f4ea4e115512f0b7a31482919406a507 100644 (file)
@@ -25,6 +25,7 @@
 #include "ec.h"
 #include "inode.h"
 #include "journal.h"
+#include "journal_reclaim.h"
 #include "keylist.h"
 #include "move.h"
 #include "movinggc.h"
@@ -138,6 +139,7 @@ do {                                                                        \
 write_attribute(trigger_gc);
 write_attribute(trigger_discards);
 write_attribute(trigger_invalidates);
+write_attribute(trigger_journal_flush);
 write_attribute(prune_cache);
 write_attribute(btree_wakeup);
 rw_attribute(btree_gc_periodic);
@@ -500,7 +502,7 @@ STORE(bch2_fs)
 
        /* Debugging: */
 
-       if (!test_bit(BCH_FS_rw, &c->flags))
+       if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs))
                return -EROFS;
 
        if (attr == &sysfs_prune_cache) {
@@ -533,6 +535,11 @@ STORE(bch2_fs)
        if (attr == &sysfs_trigger_invalidates)
                bch2_do_invalidates(c);
 
+       if (attr == &sysfs_trigger_journal_flush) {
+               bch2_journal_flush_all_pins(&c->journal);
+               bch2_journal_meta(&c->journal);
+       }
+
 #ifdef CONFIG_BCACHEFS_TESTS
        if (attr == &sysfs_perf_test) {
                char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
@@ -553,6 +560,7 @@ STORE(bch2_fs)
                        size = ret;
        }
 #endif
+       bch2_write_ref_put(c, BCH_WRITE_REF_sysfs);
        return size;
 }
 SYSFS_OPS(bch2_fs);
@@ -651,6 +659,7 @@ struct attribute *bch2_fs_internal_files[] = {
        &sysfs_trigger_gc,
        &sysfs_trigger_discards,
        &sysfs_trigger_invalidates,
+       &sysfs_trigger_journal_flush,
        &sysfs_prune_cache,
        &sysfs_btree_wakeup,
 
index beedd6ed64d39bd7f53ad814c22df36b80b96235..257d044bca9158c95e205ff22ff0d662d0d7f074 100644 (file)
@@ -3464,6 +3464,14 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
        if (root_id != BTRFS_TREE_LOG_OBJECTID) {
                struct btrfs_ref generic_ref = { 0 };
 
+               /*
+                * Assert that the extent buffer is not cleared due to
+                * EXTENT_BUFFER_ZONED_ZEROOUT. Please refer
+                * btrfs_clear_buffer_dirty() and btree_csum_one_bio() for
+                * detail.
+                */
+               ASSERT(btrfs_header_bytenr(buf) != 0);
+
                btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
                                       buf->start, buf->len, parent,
                                       btrfs_header_owner(buf));
index 61594eaf1f8969fc3ba04604e3470a8932450767..2776112dbdf8d471a7cb4d515fdd443e6fadbac5 100644 (file)
@@ -681,31 +681,21 @@ static void end_bbio_data_read(struct btrfs_bio *bbio)
 int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array,
                           gfp_t extra_gfp)
 {
+       const gfp_t gfp = GFP_NOFS | extra_gfp;
        unsigned int allocated;
 
        for (allocated = 0; allocated < nr_pages;) {
                unsigned int last = allocated;
 
-               allocated = alloc_pages_bulk_array(GFP_NOFS | extra_gfp,
-                                                  nr_pages, page_array);
-
-               if (allocated == nr_pages)
-                       return 0;
-
-               /*
-                * During this iteration, no page could be allocated, even
-                * though alloc_pages_bulk_array() falls back to alloc_page()
-                * if  it could not bulk-allocate. So we must be out of memory.
-                */
-               if (allocated == last) {
+               allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array);
+               if (unlikely(allocated == last)) {
+                       /* No progress, fail and do cleanup. */
                        for (int i = 0; i < allocated; i++) {
                                __free_page(page_array[i]);
                                page_array[i] = NULL;
                        }
                        return -ENOMEM;
                }
-
-               memalloc_retry_wait(GFP_NOFS);
        }
        return 0;
 }
@@ -4154,7 +4144,7 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
         * The actual zeroout of the buffer will happen later in
         * btree_csum_one_bio.
         */
-       if (btrfs_is_zoned(fs_info)) {
+       if (btrfs_is_zoned(fs_info) && test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
                set_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags);
                return;
        }
@@ -4193,6 +4183,7 @@ void set_extent_buffer_dirty(struct extent_buffer *eb)
        num_folios = num_extent_folios(eb);
        WARN_ON(atomic_read(&eb->refs) == 0);
        WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags));
+       WARN_ON(test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags));
 
        if (!was_dirty) {
                bool subpage = eb->fs_info->nodesize < PAGE_SIZE;
index 1340d77124ae4db09c3b96548acdf1cd8a6c3fb0..ee9caf7916fb95931e08e41467cc97ddba950c0b 100644 (file)
@@ -795,8 +795,10 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
        ihold(inode);
 
        if (wbc->sync_mode == WB_SYNC_NONE &&
-           ceph_inode_to_fs_client(inode)->write_congested)
+           ceph_inode_to_fs_client(inode)->write_congested) {
+               redirty_page_for_writepage(wbc, page);
                return AOP_WRITEPAGE_ACTIVATE;
+       }
 
        wait_on_page_fscache(page);
 
index 55051ad09c19197e9b12d5d17068d20b04d6d3e6..c4941ba245ac3d0d3ae4e0f2598838b4ceb69ca9 100644 (file)
@@ -4783,13 +4783,13 @@ int ceph_drop_caps_for_unlink(struct inode *inode)
 
                        doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode,
                              ceph_vinop(inode));
-                       spin_lock(&mdsc->cap_unlink_delay_lock);
+                       spin_lock(&mdsc->cap_delay_lock);
                        ci->i_ceph_flags |= CEPH_I_FLUSH;
                        if (!list_empty(&ci->i_cap_delay_list))
                                list_del_init(&ci->i_cap_delay_list);
                        list_add_tail(&ci->i_cap_delay_list,
                                      &mdsc->cap_unlink_delay_list);
-                       spin_unlock(&mdsc->cap_unlink_delay_lock);
+                       spin_unlock(&mdsc->cap_delay_lock);
 
                        /*
                         * Fire the work immediately, because the MDS maybe
index 3ab9c268a8bb398b779cc93d3da98f3d13df8fe3..360b686c3c67cfd1f256c656642957f6ca278427 100644 (file)
@@ -2504,7 +2504,7 @@ static void ceph_cap_unlink_work(struct work_struct *work)
        struct ceph_client *cl = mdsc->fsc->client;
 
        doutc(cl, "begin\n");
-       spin_lock(&mdsc->cap_unlink_delay_lock);
+       spin_lock(&mdsc->cap_delay_lock);
        while (!list_empty(&mdsc->cap_unlink_delay_list)) {
                struct ceph_inode_info *ci;
                struct inode *inode;
@@ -2516,15 +2516,15 @@ static void ceph_cap_unlink_work(struct work_struct *work)
 
                inode = igrab(&ci->netfs.inode);
                if (inode) {
-                       spin_unlock(&mdsc->cap_unlink_delay_lock);
+                       spin_unlock(&mdsc->cap_delay_lock);
                        doutc(cl, "on %p %llx.%llx\n", inode,
                              ceph_vinop(inode));
                        ceph_check_caps(ci, CHECK_CAPS_FLUSH);
                        iput(inode);
-                       spin_lock(&mdsc->cap_unlink_delay_lock);
+                       spin_lock(&mdsc->cap_delay_lock);
                }
        }
-       spin_unlock(&mdsc->cap_unlink_delay_lock);
+       spin_unlock(&mdsc->cap_delay_lock);
        doutc(cl, "done\n");
 }
 
@@ -5404,7 +5404,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
        INIT_LIST_HEAD(&mdsc->cap_wait_list);
        spin_lock_init(&mdsc->cap_delay_lock);
        INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list);
-       spin_lock_init(&mdsc->cap_unlink_delay_lock);
        INIT_LIST_HEAD(&mdsc->snap_flush_list);
        spin_lock_init(&mdsc->snap_flush_lock);
        mdsc->last_cap_flush_tid = 1;
index 03f8ff00874f727adff8b88cc8d538fc989692d8..b88e804152241281e5d1cd5ca90057d9deff9240 100644 (file)
@@ -461,9 +461,8 @@ struct ceph_mds_client {
        struct delayed_work    delayed_work;  /* delayed work */
        unsigned long    last_renew_caps;  /* last time we renewed our caps */
        struct list_head cap_delay_list;   /* caps with delayed release */
-       spinlock_t       cap_delay_lock;   /* protects cap_delay_list */
        struct list_head cap_unlink_delay_list;  /* caps with delayed release for unlink */
-       spinlock_t       cap_unlink_delay_lock;  /* protects cap_unlink_delay_list */
+       spinlock_t       cap_delay_lock;   /* protects cap_delay_list and cap_unlink_delay_list */
        struct list_head snap_flush_list;  /* cap_snaps ready to flush */
        spinlock_t       snap_flush_lock;
 
index e9df2f87072c687073abe9625e66886934497a02..8502ef68459b9842d090a4ac338591778d1b3b24 100644 (file)
@@ -636,11 +636,18 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
         * each file a separate locking class.  Let's differentiate on
         * whether the file has mmap or not for now.
         *
-        * Both paths of the branch look the same.  They're supposed to
+        * For similar reasons, writable and readonly files are given different
+        * lockdep key, because the writable file /sys/power/resume may call vfs
+        * lookup helpers for arbitrary paths and readonly files can be read by
+        * overlayfs from vfs helpers when sysfs is a lower layer of overalyfs.
+        *
+        * All three cases look the same.  They're supposed to
         * look that way and give @of->mutex different static lockdep keys.
         */
        if (has_mmap)
                mutex_init(&of->mutex);
+       else if (file->f_mode & FMODE_WRITE)
+               mutex_init(&of->mutex);
        else
                mutex_init(&of->mutex);
 
index fac938f563ad022ce79cdc5f67321bc7f529cc1c..1955481832e03796170ea8f80361bc25cc452ca6 100644 (file)
@@ -3490,11 +3490,13 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
                    struct dentry *dentry, const u32 *bmval,
                    int ignore_crossmnt)
 {
+       DECLARE_BITMAP(attr_bitmap, ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops));
        struct nfsd4_fattr_args args;
        struct svc_fh *tempfh = NULL;
        int starting_len = xdr->buf->len;
        __be32 *attrlen_p, status;
        int attrlen_offset;
+       u32 attrmask[3];
        int err;
        struct nfsd4_compoundres *resp = rqstp->rq_resp;
        u32 minorversion = resp->cstate.minorversion;
@@ -3502,10 +3504,6 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
                .mnt    = exp->ex_path.mnt,
                .dentry = dentry,
        };
-       union {
-               u32             attrmask[3];
-               unsigned long   mask[2];
-       } u;
        unsigned long bit;
        bool file_modified = false;
        u64 size = 0;
@@ -3521,20 +3519,19 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
        /*
         * Make a local copy of the attribute bitmap that can be modified.
         */
-       memset(&u, 0, sizeof(u));
-       u.attrmask[0] = bmval[0];
-       u.attrmask[1] = bmval[1];
-       u.attrmask[2] = bmval[2];
+       attrmask[0] = bmval[0];
+       attrmask[1] = bmval[1];
+       attrmask[2] = bmval[2];
 
        args.rdattr_err = 0;
        if (exp->ex_fslocs.migrated) {
-               status = fattr_handle_absent_fs(&u.attrmask[0], &u.attrmask[1],
-                                               &u.attrmask[2], &args.rdattr_err);
+               status = fattr_handle_absent_fs(&attrmask[0], &attrmask[1],
+                                               &attrmask[2], &args.rdattr_err);
                if (status)
                        goto out;
        }
        args.size = 0;
-       if (u.attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
+       if (attrmask[0] & (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE)) {
                status = nfsd4_deleg_getattr_conflict(rqstp, d_inode(dentry),
                                        &file_modified, &size);
                if (status)
@@ -3553,16 +3550,16 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
 
        if (!(args.stat.result_mask & STATX_BTIME))
                /* underlying FS does not offer btime so we can't share it */
-               u.attrmask[1] &= ~FATTR4_WORD1_TIME_CREATE;
-       if ((u.attrmask[0] & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
+               attrmask[1] &= ~FATTR4_WORD1_TIME_CREATE;
+       if ((attrmask[0] & (FATTR4_WORD0_FILES_AVAIL | FATTR4_WORD0_FILES_FREE |
                        FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_MAXNAME)) ||
-           (u.attrmask[1] & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
+           (attrmask[1] & (FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE |
                       FATTR4_WORD1_SPACE_TOTAL))) {
                err = vfs_statfs(&path, &args.statfs);
                if (err)
                        goto out_nfserr;
        }
-       if ((u.attrmask[0] & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) &&
+       if ((attrmask[0] & (FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FSID)) &&
            !fhp) {
                tempfh = kmalloc(sizeof(struct svc_fh), GFP_KERNEL);
                status = nfserr_jukebox;
@@ -3577,10 +3574,10 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
                args.fhp = fhp;
 
        args.acl = NULL;
-       if (u.attrmask[0] & FATTR4_WORD0_ACL) {
+       if (attrmask[0] & FATTR4_WORD0_ACL) {
                err = nfsd4_get_nfs4_acl(rqstp, dentry, &args.acl);
                if (err == -EOPNOTSUPP)
-                       u.attrmask[0] &= ~FATTR4_WORD0_ACL;
+                       attrmask[0] &= ~FATTR4_WORD0_ACL;
                else if (err == -EINVAL) {
                        status = nfserr_attrnotsupp;
                        goto out;
@@ -3592,17 +3589,17 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
 
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
        args.context = NULL;
-       if ((u.attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) ||
-            u.attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
+       if ((attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) ||
+            attrmask[0] & FATTR4_WORD0_SUPPORTED_ATTRS) {
                if (exp->ex_flags & NFSEXP_SECURITY_LABEL)
                        err = security_inode_getsecctx(d_inode(dentry),
                                                &args.context, &args.contextlen);
                else
                        err = -EOPNOTSUPP;
                args.contextsupport = (err == 0);
-               if (u.attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) {
+               if (attrmask[2] & FATTR4_WORD2_SECURITY_LABEL) {
                        if (err == -EOPNOTSUPP)
-                               u.attrmask[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
+                               attrmask[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
                        else if (err)
                                goto out_nfserr;
                }
@@ -3610,8 +3607,8 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
 #endif /* CONFIG_NFSD_V4_SECURITY_LABEL */
 
        /* attrmask */
-       status = nfsd4_encode_bitmap4(xdr, u.attrmask[0],
-                                     u.attrmask[1], u.attrmask[2]);
+       status = nfsd4_encode_bitmap4(xdr, attrmask[0], attrmask[1],
+                                     attrmask[2]);
        if (status)
                goto out;
 
@@ -3620,7 +3617,9 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
        attrlen_p = xdr_reserve_space(xdr, XDR_UNIT);
        if (!attrlen_p)
                goto out_resource;
-       for_each_set_bit(bit, (const unsigned long *)&u.mask,
+       bitmap_from_arr32(attr_bitmap, attrmask,
+                         ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops));
+       for_each_set_bit(bit, attr_bitmap,
                         ARRAY_SIZE(nfsd4_enc_fattr4_encode_ops)) {
                status = nfsd4_enc_fattr4_encode_ops[bit](xdr, &args);
                if (status != nfs_ok)
index 13a9d7acf8f8ec151323d18d44a346e060bf0ce2..0ff2491c311d8a669c709fb94eb4a16a54515c68 100644 (file)
@@ -433,8 +433,8 @@ smb2_close_cached_fid(struct kref *ref)
        if (cfid->is_open) {
                rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
                           cfid->fid.volatile_fid);
-               if (rc != -EBUSY && rc != -EAGAIN)
-                       atomic_dec(&cfid->tcon->num_remote_opens);
+               if (rc) /* should we retry on -EBUSY or -EAGAIN? */
+                       cifs_dbg(VFS, "close cached dir rc %d\n", rc);
        }
 
        free_cached_dir(cfid);
index f6a302205f89c456d9fa3adb3dae238deeb97d10..d6669ce4ae87f07415b150eaffcbf429c4fe74bd 100644 (file)
@@ -1077,6 +1077,7 @@ struct cifs_ses {
                                   and after mount option parsing we fill it */
        char *domainName;
        char *password;
+       char *password2; /* When key rotation used, new password may be set before it expires */
        char workstation_name[CIFS_MAX_WORKSTATION_LEN];
        struct session_key auth_key;
        struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */
index 85679ae106fd50a4e3289349e2916204ae3f94fc..4e35970681bf052dc343c23935549600f5ce8859 100644 (file)
@@ -2183,6 +2183,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
        }
 
        ++delim;
+       /* BB consider adding support for password2 (Key Rotation) for multiuser in future */
        ctx->password = kstrndup(delim, len, GFP_KERNEL);
        if (!ctx->password) {
                cifs_dbg(FYI, "Unable to allocate %zd bytes for password\n",
@@ -2206,6 +2207,7 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
                        kfree(ctx->username);
                        ctx->username = NULL;
                        kfree_sensitive(ctx->password);
+                       /* no need to free ctx->password2 since not allocated in this path */
                        ctx->password = NULL;
                        goto out_key_put;
                }
@@ -2317,6 +2319,12 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
                if (!ses->password)
                        goto get_ses_fail;
        }
+       /* ctx->password freed at unmount */
+       if (ctx->password2) {
+               ses->password2 = kstrdup(ctx->password2, GFP_KERNEL);
+               if (!ses->password2)
+                       goto get_ses_fail;
+       }
        if (ctx->domainname) {
                ses->domainName = kstrdup(ctx->domainname, GFP_KERNEL);
                if (!ses->domainName)
index b7bfe705b2c498b83a60131713246bb9d37abf98..6c727d8c31e870ddd0f809db12b21aae76ac80cd 100644 (file)
@@ -162,6 +162,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
        fsparam_string("username", Opt_user),
        fsparam_string("pass", Opt_pass),
        fsparam_string("password", Opt_pass),
+       fsparam_string("password2", Opt_pass2),
        fsparam_string("ip", Opt_ip),
        fsparam_string("addr", Opt_ip),
        fsparam_string("domain", Opt_domain),
@@ -345,6 +346,7 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
        new_ctx->nodename = NULL;
        new_ctx->username = NULL;
        new_ctx->password = NULL;
+       new_ctx->password2 = NULL;
        new_ctx->server_hostname = NULL;
        new_ctx->domainname = NULL;
        new_ctx->UNC = NULL;
@@ -357,6 +359,7 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
        DUP_CTX_STR(prepath);
        DUP_CTX_STR(username);
        DUP_CTX_STR(password);
+       DUP_CTX_STR(password2);
        DUP_CTX_STR(server_hostname);
        DUP_CTX_STR(UNC);
        DUP_CTX_STR(source);
@@ -905,6 +908,8 @@ static int smb3_reconfigure(struct fs_context *fc)
        else  {
                kfree_sensitive(ses->password);
                ses->password = kstrdup(ctx->password, GFP_KERNEL);
+               kfree_sensitive(ses->password2);
+               ses->password2 = kstrdup(ctx->password2, GFP_KERNEL);
        }
        STEAL_STRING(cifs_sb, ctx, domainname);
        STEAL_STRING(cifs_sb, ctx, nodename);
@@ -1305,6 +1310,18 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
                        goto cifs_parse_mount_err;
                }
                break;
+       case Opt_pass2:
+               kfree_sensitive(ctx->password2);
+               ctx->password2 = NULL;
+               if (strlen(param->string) == 0)
+                       break;
+
+               ctx->password2 = kstrdup(param->string, GFP_KERNEL);
+               if (ctx->password2 == NULL) {
+                       cifs_errorf(fc, "OOM when copying password2 string\n");
+                       goto cifs_parse_mount_err;
+               }
+               break;
        case Opt_ip:
                if (strlen(param->string) == 0) {
                        ctx->got_ip = false;
@@ -1608,6 +1625,8 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
  cifs_parse_mount_err:
        kfree_sensitive(ctx->password);
        ctx->password = NULL;
+       kfree_sensitive(ctx->password2);
+       ctx->password2 = NULL;
        return -EINVAL;
 }
 
@@ -1713,6 +1732,8 @@ smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx)
        ctx->username = NULL;
        kfree_sensitive(ctx->password);
        ctx->password = NULL;
+       kfree_sensitive(ctx->password2);
+       ctx->password2 = NULL;
        kfree(ctx->server_hostname);
        ctx->server_hostname = NULL;
        kfree(ctx->UNC);
index 8a35645e0b65b244741da59177a2bcb0acea0256..a947bddeba273ea850b3502f07555a19316266a6 100644 (file)
@@ -145,6 +145,7 @@ enum cifs_param {
        Opt_source,
        Opt_user,
        Opt_pass,
+       Opt_pass2,
        Opt_ip,
        Opt_domain,
        Opt_srcaddr,
@@ -177,6 +178,7 @@ struct smb3_fs_context {
 
        char *username;
        char *password;
+       char *password2;
        char *domainname;
        char *source;
        char *server_hostname;
index 91b07ef9e25ca1c195bef21d06c92f5193633022..60afab5c83d410a9c5122d5f4826ade67cb93dee 100644 (file)
@@ -1105,7 +1105,8 @@ static int cifs_get_fattr(struct cifs_open_info_data *data,
                } else {
                        cifs_open_info_to_fattr(fattr, data, sb);
                }
-               if (!rc && fattr->cf_flags & CIFS_FATTR_DELETE_PENDING)
+               if (!rc && *inode &&
+                   (fattr->cf_flags & CIFS_FATTR_DELETE_PENDING))
                        cifs_mark_open_handles_for_deleted_file(*inode, full_path);
                break;
        case -EREMOTE:
index 33ac4f8f5050c416cd2004ee4516756edd3b11d8..7d15a1969b818439515b5188e8662a3b8f1276ce 100644 (file)
@@ -98,6 +98,7 @@ sesInfoFree(struct cifs_ses *buf_to_free)
        kfree(buf_to_free->serverDomain);
        kfree(buf_to_free->serverNOS);
        kfree_sensitive(buf_to_free->password);
+       kfree_sensitive(buf_to_free->password2);
        kfree(buf_to_free->user_name);
        kfree(buf_to_free->domainName);
        kfree_sensitive(buf_to_free->auth_key.response);
index b156eefa75d7cb4b13d1bf402234f08271a558ad..78c94d0350fe9970fab31564aeba6870d71859bd 100644 (file)
@@ -4964,68 +4964,84 @@ static int smb2_next_header(struct TCP_Server_Info *server, char *buf,
        return 0;
 }
 
-int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
-                      struct dentry *dentry, struct cifs_tcon *tcon,
-                      const char *full_path, umode_t mode, dev_t dev)
+static int __cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+                               struct dentry *dentry, struct cifs_tcon *tcon,
+                               const char *full_path, umode_t mode, dev_t dev)
 {
-       struct cifs_open_info_data buf = {};
        struct TCP_Server_Info *server = tcon->ses->server;
        struct cifs_open_parms oparms;
        struct cifs_io_parms io_parms = {};
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct cifs_fid fid;
        unsigned int bytes_written;
-       struct win_dev *pdev;
+       struct win_dev pdev = {};
        struct kvec iov[2];
        __u32 oplock = server->oplocks ? REQ_OPLOCK : 0;
        int rc;
 
-       if (!S_ISCHR(mode) && !S_ISBLK(mode) && !S_ISFIFO(mode))
+       switch (mode & S_IFMT) {
+       case S_IFCHR:
+               strscpy(pdev.type, "IntxCHR");
+               pdev.major = cpu_to_le64(MAJOR(dev));
+               pdev.minor = cpu_to_le64(MINOR(dev));
+               break;
+       case S_IFBLK:
+               strscpy(pdev.type, "IntxBLK");
+               pdev.major = cpu_to_le64(MAJOR(dev));
+               pdev.minor = cpu_to_le64(MINOR(dev));
+               break;
+       case S_IFIFO:
+               strscpy(pdev.type, "LnxFIFO");
+               break;
+       default:
                return -EPERM;
+       }
 
-       oparms = (struct cifs_open_parms) {
-               .tcon = tcon,
-               .cifs_sb = cifs_sb,
-               .desired_access = GENERIC_WRITE,
-               .create_options = cifs_create_options(cifs_sb, CREATE_NOT_DIR |
-                                                     CREATE_OPTION_SPECIAL),
-               .disposition = FILE_CREATE,
-               .path = full_path,
-               .fid = &fid,
-       };
+       oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, GENERIC_WRITE,
+                            FILE_CREATE, CREATE_NOT_DIR |
+                            CREATE_OPTION_SPECIAL, ACL_NO_MODE);
+       oparms.fid = &fid;
 
-       rc = server->ops->open(xid, &oparms, &oplock, &buf);
+       rc = server->ops->open(xid, &oparms, &oplock, NULL);
        if (rc)
                return rc;
 
-       /*
-        * BB Do not bother to decode buf since no local inode yet to put
-        * timestamps in, but we can reuse it safely.
-        */
-       pdev = (struct win_dev *)&buf.fi;
        io_parms.pid = current->tgid;
        io_parms.tcon = tcon;
-       io_parms.length = sizeof(*pdev);
-       iov[1].iov_base = pdev;
-       iov[1].iov_len = sizeof(*pdev);
-       if (S_ISCHR(mode)) {
-               memcpy(pdev->type, "IntxCHR", 8);
-               pdev->major = cpu_to_le64(MAJOR(dev));
-               pdev->minor = cpu_to_le64(MINOR(dev));
-       } else if (S_ISBLK(mode)) {
-               memcpy(pdev->type, "IntxBLK", 8);
-               pdev->major = cpu_to_le64(MAJOR(dev));
-               pdev->minor = cpu_to_le64(MINOR(dev));
-       } else if (S_ISFIFO(mode)) {
-               memcpy(pdev->type, "LnxFIFO", 8);
-       }
+       io_parms.length = sizeof(pdev);
+       iov[1].iov_base = &pdev;
+       iov[1].iov_len = sizeof(pdev);
 
        rc = server->ops->sync_write(xid, &fid, &io_parms,
                                     &bytes_written, iov, 1);
        server->ops->close(xid, tcon, &fid);
-       d_drop(dentry);
-       /* FIXME: add code here to set EAs */
-       cifs_free_open_info(&buf);
+       return rc;
+}
+
+int cifs_sfu_make_node(unsigned int xid, struct inode *inode,
+                      struct dentry *dentry, struct cifs_tcon *tcon,
+                      const char *full_path, umode_t mode, dev_t dev)
+{
+       struct inode *new = NULL;
+       int rc;
+
+       rc = __cifs_sfu_make_node(xid, inode, dentry, tcon,
+                                 full_path, mode, dev);
+       if (rc)
+               return rc;
+
+       if (tcon->posix_extensions) {
+               rc = smb311_posix_get_inode_info(&new, full_path, NULL,
+                                                inode->i_sb, xid);
+       } else if (tcon->unix_ext) {
+               rc = cifs_get_inode_info_unix(&new, full_path,
+                                             inode->i_sb, xid);
+       } else {
+               rc = cifs_get_inode_info(&new, full_path, NULL,
+                                        inode->i_sb, xid, NULL);
+       }
+       if (!rc)
+               d_instantiate(dentry, new);
        return rc;
 }
 
index c0c4933af5fc386911922b4e23c7869bdea8098b..86c647a947ccd1065a8edb0712e113351839b96f 100644 (file)
@@ -367,6 +367,17 @@ again:
                }
 
                rc = cifs_setup_session(0, ses, server, nls_codepage);
+               if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) {
+                       /*
+                        * Try alternate password for next reconnect (key rotation
+                        * could be enabled on the server e.g.) if an alternate
+                        * password is available and the current password is expired,
+                        * but do not swap on non pwd related errors like host down
+                        */
+                       if (ses->password2)
+                               swap(ses->password2, ses->password);
+               }
+
                if ((rc == -EACCES) && !tcon->retry) {
                        mutex_unlock(&ses->session_mutex);
                        rc = -EHOSTDOWN;
index dc067eeb638744b72be8487102498e9d010b2945..894c6ca1e5002015b378ad8d6ec598de44c25ebe 100644 (file)
@@ -336,6 +336,7 @@ static void update_inode_attr(struct dentry *dentry, struct inode *inode,
 
 /**
  * lookup_file - look up a file in the tracefs filesystem
+ * @parent_ei: Pointer to the eventfs_inode that represents parent of the file
  * @dentry: the dentry to look up
  * @mode: the permission that the file should have.
  * @attr: saved attributes changed by user
@@ -389,6 +390,7 @@ static struct dentry *lookup_file(struct eventfs_inode *parent_ei,
 /**
  * lookup_dir_entry - look up a dir in the tracefs filesystem
  * @dentry: the directory to look up
+ * @pei: Pointer to the parent eventfs_inode if available
  * @ei: the eventfs_inode that represents the directory to create
  *
  * This function will look up a dentry for a directory represented by
@@ -478,16 +480,20 @@ void eventfs_d_release(struct dentry *dentry)
 
 /**
  * lookup_file_dentry - create a dentry for a file of an eventfs_inode
+ * @dentry: The parent dentry under which the new file's dentry will be created
  * @ei: the eventfs_inode that the file will be created under
  * @idx: the index into the entry_attrs[] of the @ei
- * @parent: The parent dentry of the created file.
- * @name: The name of the file to create
  * @mode: The mode of the file.
  * @data: The data to use to set the inode of the file with on open()
  * @fops: The fops of the file to be created.
  *
- * Create a dentry for a file of an eventfs_inode @ei and place it into the
- * address located at @e_dentry.
+ * This function creates a dentry for a file associated with an
+ * eventfs_inode @ei. It uses the entry attributes specified by @idx,
+ * if available. The file will have the specified @mode and its inode will be
+ * set up with @data upon open. The file operations will be set to @fops.
+ *
+ * Return: Returns a pointer to the newly created file's dentry or an error
+ * pointer.
  */
 static struct dentry *
 lookup_file_dentry(struct dentry *dentry,
index c6a124e8d565febb690377ae982f60042ba2383b..964fa7f2400335dc8eb9456c3190aa36f2c0c8ec 100644 (file)
@@ -1048,7 +1048,7 @@ static int zonefs_init_zgroup(struct super_block *sb,
        zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
                    zonefs_zgroup_name(ztype),
                    zgroup->g_nr_zones,
-                   zgroup->g_nr_zones > 1 ? "s" : "");
+                   str_plural(zgroup->g_nr_zones));
 
        return 0;
 }
index 5de954e2b18aaac5c0796466d256f6cd10e1130d..e7796f373d0dac4daa5c322a7ba82983b9a8ac81 100644 (file)
@@ -911,17 +911,19 @@ static inline bool acpi_int_uid_match(struct acpi_device *adev, u64 uid2)
  * acpi_dev_hid_uid_match - Match device by supplied HID and UID
  * @adev: ACPI device to match.
  * @hid2: Hardware ID of the device.
- * @uid2: Unique ID of the device, pass 0 or NULL to not check _UID.
+ * @uid2: Unique ID of the device, pass NULL to not check _UID.
  *
  * Matches HID and UID in @adev with given @hid2 and @uid2. Absence of @uid2
  * will be treated as a match. If user wants to validate @uid2, it should be
  * done before calling this function.
  *
- * Returns: %true if matches or @uid2 is 0 or NULL, %false otherwise.
+ * Returns: %true if matches or @uid2 is NULL, %false otherwise.
  */
 #define acpi_dev_hid_uid_match(adev, hid2, uid2)                       \
        (acpi_dev_hid_match(adev, hid2) &&                              \
-               (!(uid2) || acpi_dev_uid_match(adev, uid2)))
+               /* Distinguish integer 0 from NULL @uid2 */             \
+               (_Generic(uid2, ACPI_STR_TYPES(!(uid2)), default: 0) || \
+               acpi_dev_uid_match(adev, uid2)))
 
 void acpi_dev_clear_dependencies(struct acpi_device *supplier);
 bool acpi_dev_ready_for_enumeration(const struct acpi_device *device);
index 6e794420bd398c7e4848cadebdc107116cfb6af2..b7de3a4eade1c265acc4f92b53d5617d1ae3cb87 100644 (file)
@@ -156,7 +156,10 @@ extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
 
 #else /* !CONFIG_BUG */
 #ifndef HAVE_ARCH_BUG
-#define BUG() do {} while (1)
+#define BUG() do {             \
+       do {} while (1);        \
+       unreachable();          \
+} while (0)
 #endif
 
 #ifndef HAVE_ARCH_BUG_ON
index 87e3d49a4e29bf7af1de43d1da45bfd9ca3791ea..814207e7c37fcf17a65638f68ceda02e400c58fa 100644 (file)
@@ -512,13 +512,9 @@ struct hv_proximity_domain_flags {
        u32 proximity_info_valid : 1;
 } __packed;
 
-/* Not a union in windows but useful for zeroing */
-union hv_proximity_domain_info {
-       struct {
-               u32 domain_id;
-               struct hv_proximity_domain_flags flags;
-       };
-       u64 as_uint64;
+struct hv_proximity_domain_info {
+       u32 domain_id;
+       struct hv_proximity_domain_flags flags;
 } __packed;
 
 struct hv_lp_startup_status {
@@ -532,14 +528,13 @@ struct hv_lp_startup_status {
 } __packed;
 
 /* HvAddLogicalProcessor hypercall */
-struct hv_add_logical_processor_in {
+struct hv_input_add_logical_processor {
        u32 lp_index;
        u32 apic_id;
-       union hv_proximity_domain_info proximity_domain_info;
-       u64 flags;
+       struct hv_proximity_domain_info proximity_domain_info;
 } __packed;
 
-struct hv_add_logical_processor_out {
+struct hv_output_add_logical_processor {
        struct hv_lp_startup_status startup_status;
 } __packed;
 
@@ -560,7 +555,7 @@ struct hv_create_vp {
        u8 padding[3];
        u8 subnode_type;
        u64 subnode_id;
-       union hv_proximity_domain_info proximity_domain_info;
+       struct hv_proximity_domain_info proximity_domain_info;
        u64 flags;
 } __packed;
 
index 99935779682dc29180f556469c4487603b082740..8fe7aaab25990aa2fdebd81463b9ac9dedd36945 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/types.h>
 #include <linux/atomic.h>
 #include <linux/bitops.h>
+#include <acpi/acpi_numa.h>
 #include <linux/cpumask.h>
 #include <linux/nmi.h>
 #include <asm/ptrace.h>
@@ -67,6 +68,19 @@ extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
 bool hv_isolation_type_snp(void);
 bool hv_isolation_type_tdx(void);
 
+static inline struct hv_proximity_domain_info hv_numa_node_to_pxm_info(int node)
+{
+       struct hv_proximity_domain_info pxm_info = {};
+
+       if (node != NUMA_NO_NODE) {
+               pxm_info.domain_id = node_to_pxm(node);
+               pxm_info.flags.proximity_info_valid = 1;
+               pxm_info.flags.proximity_preferred = 1;
+       }
+
+       return pxm_info;
+}
+
 /* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
 static inline int hv_result(u64 status)
 {
index e06bad467f55ef1befdad569f0a8a37875def383..c3f9bb6602ba2135cae645bda4d730cd703a12a6 100644 (file)
@@ -682,4 +682,11 @@ static inline bool dma_fence_is_container(struct dma_fence *fence)
        return dma_fence_is_array(fence) || dma_fence_is_chain(fence);
 }
 
+#define DMA_FENCE_WARN(f, fmt, args...) \
+       do {                                                            \
+               struct dma_fence *__ff = (f);                           \
+               pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
+                        ##args);                                       \
+       } while (0)
+
 #endif /* __LINUX_DMA_FENCE_H */
index 6c75c8bd44a0bb627020ba267c3d0debb2379ba4..1a14e239221f7e9aec06b510d450c9f30b34fc2c 100644 (file)
@@ -2,7 +2,6 @@
 #ifndef __LINUX_GPIO_PROPERTY_H
 #define __LINUX_GPIO_PROPERTY_H
 
-#include <dt-bindings/gpio/gpio.h> /* for GPIO_* flags */
 #include <linux/property.h>
 
 #define PROPERTY_ENTRY_GPIO(_name_, _chip_node_, _idx_, _flags_) \
index 6ef0557b4bff8ed5d14bc18391d356913136c23c..96ceb4095425eb39aa8145fda63cc1d859fb56f5 100644 (file)
@@ -832,6 +832,7 @@ struct vmbus_gpadl {
        u32 gpadl_handle;
        u32 size;
        void *buffer;
+       bool decrypted;
 };
 
 struct vmbus_channel {
index 05df0e399d7c0b84236198f57e0c61e90412beaa..ac333ea81d319526d5fde59bf9f64b5510f94e41 100644 (file)
@@ -13,7 +13,7 @@ enum {
         * A hint to not wake right away but delay until there are enough of
         * tw's queued to match the number of CQEs the task is waiting for.
         *
-        * Must not be used wirh requests generating more than one CQE.
+        * Must not be used with requests generating more than one CQE.
         * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
         */
        IOU_F_TWQ_LAZY_WAKE                     = 1,
index 147feebd508cabfa98a1844fb75e2235fb9b56d6..3f003d5fde5341bd789d0d1286109563624090d3 100644 (file)
@@ -114,7 +114,7 @@ do {                                                \
 # define lockdep_softirq_enter()               do { } while (0)
 # define lockdep_softirq_exit()                        do { } while (0)
 # define lockdep_hrtimer_enter(__hrtimer)      false
-# define lockdep_hrtimer_exit(__context)       do { } while (0)
+# define lockdep_hrtimer_exit(__context)       do { (void)(__context); } while (0)
 # define lockdep_posixtimer_enter()            do { } while (0)
 # define lockdep_posixtimer_exit()             do { } while (0)
 # define lockdep_irq_work_enter(__work)                do { } while (0)
index 29c4e4f243e47d580945626da4a172e2ecff0c5b..f2394a409c9d5e478844b0d6a43011f4447798a0 100644 (file)
@@ -31,9 +31,9 @@ static __always_inline bool rw_base_is_locked(const struct rwbase_rt *rwb)
        return atomic_read(&rwb->readers) != READER_BIAS;
 }
 
-static inline void rw_base_assert_held_write(const struct rwbase_rt *rwb)
+static __always_inline bool rw_base_is_write_locked(const struct rwbase_rt *rwb)
 {
-       WARN_ON(atomic_read(&rwb->readers) != WRITER_BIAS);
+       return atomic_read(&rwb->readers) == WRITER_BIAS;
 }
 
 static __always_inline bool rw_base_is_contended(const struct rwbase_rt *rwb)
index 4f1c18992f768fe67faffa139f259e8213a93f9e..c8b543d428b0a8d4662183f3342e88ec61d10189 100644 (file)
@@ -167,14 +167,14 @@ static __always_inline int rwsem_is_locked(const struct rw_semaphore *sem)
        return rw_base_is_locked(&sem->rwbase);
 }
 
-static inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
+static __always_inline void rwsem_assert_held_nolockdep(const struct rw_semaphore *sem)
 {
        WARN_ON(!rwsem_is_locked(sem));
 }
 
-static inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
+static __always_inline void rwsem_assert_held_write_nolockdep(const struct rw_semaphore *sem)
 {
-       rw_base_assert_held_write(sem);
+       WARN_ON(!rw_base_is_write_locked(&sem->rwbase));
 }
 
 static __always_inline int rwsem_is_contended(struct rw_semaphore *sem)
index b0201747a263a9526c5d60c2c2644a8e064a8439..26c4325aa3734eaa32ffbbdd862b151b93e7fdf8 100644 (file)
@@ -170,7 +170,7 @@ size_t virtio_max_dma_size(const struct virtio_device *vdev);
 
 /**
  * struct virtio_driver - operations for a virtio I/O driver
- * @driver: underlying device driver (populate name and owner).
+ * @driver: underlying device driver (populate name).
  * @id_table: the ids serviced by this driver.
  * @feature_table: an array of feature numbers supported by this driver.
  * @feature_table_size: number of entries in the feature table array.
@@ -208,7 +208,10 @@ static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv)
        return container_of(drv, struct virtio_driver, driver);
 }
 
-int register_virtio_driver(struct virtio_driver *drv);
+/* use a macro to avoid include chaining to get THIS_MODULE */
+#define register_virtio_driver(drv) \
+       __register_virtio_driver(drv, THIS_MODULE)
+int __register_virtio_driver(struct virtio_driver *drv, struct module *owner);
 void unregister_virtio_driver(struct virtio_driver *drv);
 
 /* module_virtio_driver() - Helper macro for drivers that don't do
index ba2d96a1bc2f94703945c5f79294a66af1fe8fd4..f50fcafc69de20b8b20a53a45f29b23f4259a65e 100644 (file)
@@ -609,7 +609,7 @@ TRACE_EVENT(rpcgss_context,
                __field(unsigned int, timeout)
                __field(u32, window_size)
                __field(int, len)
-               __string(acceptor, data)
+               __string_len(acceptor, data, len)
        ),
 
        TP_fast_assign(
@@ -618,7 +618,7 @@ TRACE_EVENT(rpcgss_context,
                __entry->timeout = timeout;
                __entry->window_size = window_size;
                __entry->len = len;
-               strncpy(__get_str(acceptor), data, len);
+               __assign_str(acceptor, data);
        ),
 
        TP_printk("win_size=%u expiry=%lu now=%lu timeout=%u acceptor=%.*s",
index bea6973906134656d84299958258205932c23e04..b95dd84eef2db2311f985921064e65d96e3e2f4c 100644 (file)
 /* Get the config size */
 #define VHOST_VDPA_GET_CONFIG_SIZE     _IOR(VHOST_VIRTIO, 0x79, __u32)
 
-/* Get the count of all virtqueues */
-#define VHOST_VDPA_GET_VQS_COUNT       _IOR(VHOST_VIRTIO, 0x80, __u32)
-
-/* Get the number of virtqueue groups. */
-#define VHOST_VDPA_GET_GROUP_NUM       _IOR(VHOST_VIRTIO, 0x81, __u32)
-
 /* Get the number of address spaces. */
 #define VHOST_VDPA_GET_AS_NUM          _IOR(VHOST_VIRTIO, 0x7A, unsigned int)
 
 #define VHOST_VDPA_GET_VRING_DESC_GROUP        _IOWR(VHOST_VIRTIO, 0x7F,       \
                                              struct vhost_vring_state)
 
+
+/* Get the count of all virtqueues */
+#define VHOST_VDPA_GET_VQS_COUNT       _IOR(VHOST_VIRTIO, 0x80, __u32)
+
+/* Get the number of virtqueue groups. */
+#define VHOST_VDPA_GET_GROUP_NUM       _IOR(VHOST_VIRTIO, 0x81, __u32)
+
 /* Get the queue size of a specific virtqueue.
  * userspace set the vring index in vhost_vring_state.index
  * kernel set the queue size in vhost_vring_state.num
  */
-#define VHOST_VDPA_GET_VRING_SIZE      _IOWR(VHOST_VIRTIO, 0x80,       \
+#define VHOST_VDPA_GET_VRING_SIZE      _IOWR(VHOST_VIRTIO, 0x82,       \
                                              struct vhost_vring_state)
 #endif
index 4521c2b66b98db3c3affc55c7aeb4a69b8eec0a7..c170a2b8d2cf21f06d1c5af8bf57edecb94aaa95 100644 (file)
@@ -2602,19 +2602,6 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
        if (__io_cqring_events_user(ctx) >= min_events)
                return 0;
 
-       if (sig) {
-#ifdef CONFIG_COMPAT
-               if (in_compat_syscall())
-                       ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
-                                                     sigsz);
-               else
-#endif
-                       ret = set_user_sigmask(sig, sigsz);
-
-               if (ret)
-                       return ret;
-       }
-
        init_waitqueue_func_entry(&iowq.wq, io_wake_function);
        iowq.wq.private = current;
        INIT_LIST_HEAD(&iowq.wq.entry);
@@ -2633,6 +2620,19 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                io_napi_adjust_timeout(ctx, &iowq, &ts);
        }
 
+       if (sig) {
+#ifdef CONFIG_COMPAT
+               if (in_compat_syscall())
+                       ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
+                                                     sigsz);
+               else
+#endif
+                       ret = set_user_sigmask(sig, sigsz);
+
+               if (ret)
+                       return ret;
+       }
+
        io_napi_busy_loop(ctx, &iowq);
 
        trace_io_uring_cqring_wait(ctx, min_events);
index 1e7665ff6ef70264b26206f99c34aa5516190129..4afb475d41974b95a86a22bd84771d8c29781c08 100644 (file)
@@ -1276,6 +1276,7 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
 
        if (req_has_async_data(req)) {
                kmsg = req->async_data;
+               kmsg->msg.msg_control_user = sr->msg_control;
        } else {
                ret = io_sendmsg_copy_hdr(req, &iomsg);
                if (ret)
index 8f6affd051f77564f96ca4682a58d0c131f62c56..07ad53b7f11952080e890ed91f99f3e762bf984d 100644 (file)
@@ -3207,7 +3207,8 @@ enum cpu_mitigations {
 };
 
 static enum cpu_mitigations cpu_mitigations __ro_after_init =
-       CPU_MITIGATIONS_AUTO;
+       IS_ENABLED(CONFIG_SPECULATION_MITIGATIONS) ? CPU_MITIGATIONS_AUTO :
+                                                    CPU_MITIGATIONS_OFF;
 
 static int __init mitigations_parse_cmdline(char *arg)
 {
index 86fe172b5958232ee29d481bf2f9fe60a51c5881..a5e0dfc44d24e22641e72bb0362511a33b23a1fd 100644 (file)
  * @alloc_size:        Size of the allocated buffer.
  * @list:      The free list describing the number of free entries available
  *             from each index.
+ * @pad_slots: Number of preceding padding slots. Valid only in the first
+ *             allocated non-padding slot.
  */
 struct io_tlb_slot {
        phys_addr_t orig_addr;
        size_t alloc_size;
-       unsigned int list;
+       unsigned short list;
+       unsigned short pad_slots;
 };
 
 static bool swiotlb_force_bounce;
@@ -287,6 +290,7 @@ static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
                                         mem->nslabs - i);
                mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
                mem->slots[i].alloc_size = 0;
+               mem->slots[i].pad_slots = 0;
        }
 
        memset(vaddr, 0, bytes);
@@ -821,12 +825,30 @@ void swiotlb_dev_init(struct device *dev)
 #endif
 }
 
-/*
- * Return the offset into a iotlb slot required to keep the device happy.
+/**
+ * swiotlb_align_offset() - Get required offset into an IO TLB allocation.
+ * @dev:         Owning device.
+ * @align_mask:  Allocation alignment mask.
+ * @addr:        DMA address.
+ *
+ * Return the minimum offset from the start of an IO TLB allocation which is
+ * required for a given buffer address and allocation alignment to keep the
+ * device happy.
+ *
+ * First, the address bits covered by min_align_mask must be identical in the
+ * original address and the bounce buffer address. High bits are preserved by
+ * choosing a suitable IO TLB slot, but bits below IO_TLB_SHIFT require extra
+ * padding bytes before the bounce buffer.
+ *
+ * Second, @align_mask specifies which bits of the first allocated slot must
+ * be zero. This may require allocating additional padding slots, and then the
+ * offset (in bytes) from the first such padding slot is returned.
  */
-static unsigned int swiotlb_align_offset(struct device *dev, u64 addr)
+static unsigned int swiotlb_align_offset(struct device *dev,
+                                        unsigned int align_mask, u64 addr)
 {
-       return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1);
+       return addr & dma_get_min_align_mask(dev) &
+               (align_mask | (IO_TLB_SIZE - 1));
 }
 
 /*
@@ -841,27 +863,23 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
        size_t alloc_size = mem->slots[index].alloc_size;
        unsigned long pfn = PFN_DOWN(orig_addr);
        unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
-       unsigned int tlb_offset, orig_addr_offset;
+       int tlb_offset;
 
        if (orig_addr == INVALID_PHYS_ADDR)
                return;
 
-       tlb_offset = tlb_addr & (IO_TLB_SIZE - 1);
-       orig_addr_offset = swiotlb_align_offset(dev, orig_addr);
-       if (tlb_offset < orig_addr_offset) {
-               dev_WARN_ONCE(dev, 1,
-                       "Access before mapping start detected. orig offset %u, requested offset %u.\n",
-                       orig_addr_offset, tlb_offset);
-               return;
-       }
-
-       tlb_offset -= orig_addr_offset;
-       if (tlb_offset > alloc_size) {
-               dev_WARN_ONCE(dev, 1,
-                       "Buffer overflow detected. Allocation size: %zu. Mapping size: %zu+%u.\n",
-                       alloc_size, size, tlb_offset);
-               return;
-       }
+       /*
+        * It's valid for tlb_offset to be negative. This can happen when the
+        * "offset" returned by swiotlb_align_offset() is non-zero, and the
+        * tlb_addr is pointing within the first "offset" bytes of the second
+        * or subsequent slots of the allocated swiotlb area. While it's not
+        * valid for tlb_addr to be pointing within the first "offset" bytes
+        * of the first slot, there's no way to check for such an error since
+        * this function can't distinguish the first slot from the second and
+        * subsequent slots.
+        */
+       tlb_offset = (tlb_addr & (IO_TLB_SIZE - 1)) -
+                    swiotlb_align_offset(dev, 0, orig_addr);
 
        orig_addr += tlb_offset;
        alloc_size -= tlb_offset;
@@ -1005,7 +1023,7 @@ static int swiotlb_search_pool_area(struct device *dev, struct io_tlb_pool *pool
        unsigned long max_slots = get_max_slots(boundary_mask);
        unsigned int iotlb_align_mask = dma_get_min_align_mask(dev);
        unsigned int nslots = nr_slots(alloc_size), stride;
-       unsigned int offset = swiotlb_align_offset(dev, orig_addr);
+       unsigned int offset = swiotlb_align_offset(dev, 0, orig_addr);
        unsigned int index, slots_checked, count = 0, i;
        unsigned long flags;
        unsigned int slot_base;
@@ -1328,11 +1346,12 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
                unsigned long attrs)
 {
        struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
-       unsigned int offset = swiotlb_align_offset(dev, orig_addr);
+       unsigned int offset;
        struct io_tlb_pool *pool;
        unsigned int i;
        int index;
        phys_addr_t tlb_addr;
+       unsigned short pad_slots;
 
        if (!mem || !mem->nslabs) {
                dev_warn_ratelimited(dev,
@@ -1349,6 +1368,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
                return (phys_addr_t)DMA_MAPPING_ERROR;
        }
 
+       offset = swiotlb_align_offset(dev, alloc_align_mask, orig_addr);
        index = swiotlb_find_slots(dev, orig_addr,
                                   alloc_size + offset, alloc_align_mask, &pool);
        if (index == -1) {
@@ -1364,6 +1384,10 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
         * This is needed when we sync the memory.  Then we sync the buffer if
         * needed.
         */
+       pad_slots = offset >> IO_TLB_SHIFT;
+       offset &= (IO_TLB_SIZE - 1);
+       index += pad_slots;
+       pool->slots[index].pad_slots = pad_slots;
        for (i = 0; i < nr_slots(alloc_size + offset); i++)
                pool->slots[index + i].orig_addr = slot_addr(orig_addr, i);
        tlb_addr = slot_addr(pool->start, index) + offset;
@@ -1384,13 +1408,17 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
 {
        struct io_tlb_pool *mem = swiotlb_find_pool(dev, tlb_addr);
        unsigned long flags;
-       unsigned int offset = swiotlb_align_offset(dev, tlb_addr);
-       int index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
-       int nslots = nr_slots(mem->slots[index].alloc_size + offset);
-       int aindex = index / mem->area_nslabs;
-       struct io_tlb_area *area = &mem->areas[aindex];
+       unsigned int offset = swiotlb_align_offset(dev, 0, tlb_addr);
+       int index, nslots, aindex;
+       struct io_tlb_area *area;
        int count, i;
 
+       index = (tlb_addr - offset - mem->start) >> IO_TLB_SHIFT;
+       index -= mem->slots[index].pad_slots;
+       nslots = nr_slots(mem->slots[index].alloc_size + offset);
+       aindex = index / mem->area_nslabs;
+       area = &mem->areas[aindex];
+
        /*
         * Return the buffer to the free list by setting the corresponding
         * entries to indicate the number of contiguous entries available.
@@ -1413,6 +1441,7 @@ static void swiotlb_release_slots(struct device *dev, phys_addr_t tlb_addr)
                mem->slots[i].list = ++count;
                mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
                mem->slots[i].alloc_size = 0;
+               mem->slots[i].pad_slots = 0;
        }
 
        /*
@@ -1647,9 +1676,6 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_io_tlb_hiwater, io_tlb_hiwater_get,
 static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
                                         const char *dirname)
 {
-       atomic_long_set(&mem->total_used, 0);
-       atomic_long_set(&mem->used_hiwater, 0);
-
        mem->debugfs = debugfs_create_dir(dirname, io_tlb_default_mem.debugfs);
        if (!mem->nslabs)
                return;
@@ -1660,7 +1686,6 @@ static void swiotlb_create_debugfs_files(struct io_tlb_mem *mem,
        debugfs_create_file("io_tlb_used_hiwater", 0600, mem->debugfs, mem,
                        &fops_io_tlb_hiwater);
 #ifdef CONFIG_SWIOTLB_DYNAMIC
-       atomic_long_set(&mem->transient_nslabs, 0);
        debugfs_create_file("io_tlb_transient_nslabs", 0400, mem->debugfs,
                            mem, &fops_io_tlb_transient_used);
 #endif
index e3ae93bbcb9b50d487727bee16f63f67d66442ac..09f8397bae15fb9c895d060d7708c9bca5ef62f7 100644 (file)
@@ -106,6 +106,12 @@ static void s2idle_enter(void)
        swait_event_exclusive(s2idle_wait_head,
                    s2idle_state == S2IDLE_STATE_WAKE);
 
+       /*
+        * Kick all CPUs to ensure that they resume their timers and restore
+        * consistent system state.
+        */
+       wake_up_all_idle_cpus();
+
        cpus_read_unlock();
 
        raw_spin_lock_irq(&s2idle_lock);
index fb0fdec8719a13ed5fd5eb66d13027e184dce5de..d88b13076b7944e54fefb2802d914f8f7fe1abf5 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
  * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
  */
+#include <linux/compiler.h>
 #include <linux/cpu.h>
 #include <linux/err.h>
 #include <linux/hrtimer.h>
@@ -84,7 +85,7 @@ int tick_is_oneshot_available(void)
  */
 static void tick_periodic(int cpu)
 {
-       if (tick_do_timer_cpu == cpu) {
+       if (READ_ONCE(tick_do_timer_cpu) == cpu) {
                raw_spin_lock(&jiffies_lock);
                write_seqcount_begin(&jiffies_seq);
 
@@ -215,8 +216,8 @@ static void tick_setup_device(struct tick_device *td,
                 * If no cpu took the do_timer update, assign it to
                 * this cpu:
                 */
-               if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) {
-                       tick_do_timer_cpu = cpu;
+               if (READ_ONCE(tick_do_timer_cpu) == TICK_DO_TIMER_BOOT) {
+                       WRITE_ONCE(tick_do_timer_cpu, cpu);
                        tick_next_period = ktime_get();
 #ifdef CONFIG_NO_HZ_FULL
                        /*
@@ -232,7 +233,7 @@ static void tick_setup_device(struct tick_device *td,
                                                !tick_nohz_full_cpu(cpu)) {
                        tick_take_do_timer_from_boot();
                        tick_do_timer_boot_cpu = -1;
-                       WARN_ON(tick_do_timer_cpu != cpu);
+                       WARN_ON(READ_ONCE(tick_do_timer_cpu) != cpu);
 #endif
                }
 
@@ -406,10 +407,10 @@ void tick_assert_timekeeping_handover(void)
 int tick_cpu_dying(unsigned int dying_cpu)
 {
        /*
-        * If the current CPU is the timekeeper, it's the only one that
-        * can safely hand over its duty. Also all online CPUs are in
-        * stop machine, guaranteed not to be idle, therefore it's safe
-        * to pick any online successor.
+        * If the current CPU is the timekeeper, it's the only one that can
+        * safely hand over its duty. Also all online CPUs are in stop
+        * machine, guaranteed not to be idle, therefore there is no
+        * concurrency and it's safe to pick any online successor.
         */
        if (tick_do_timer_cpu == dying_cpu)
                tick_do_timer_cpu = cpumask_first(cpu_online_mask);
index 1331216a9cae749cce5e13b7ff4adcf9ba5fefaa..71a792cd893620eebe73eb1a0fc0c4ff5d454344 100644 (file)
@@ -8,6 +8,7 @@
  *
  *  Started by: Thomas Gleixner and Ingo Molnar
  */
+#include <linux/compiler.h>
 #include <linux/cpu.h>
 #include <linux/err.h>
 #include <linux/hrtimer.h>
@@ -204,7 +205,7 @@ static inline void tick_sched_flag_clear(struct tick_sched *ts,
 
 static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
 {
-       int cpu = smp_processor_id();
+       int tick_cpu, cpu = smp_processor_id();
 
        /*
         * Check if the do_timer duty was dropped. We don't care about
@@ -216,16 +217,18 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
         * If nohz_full is enabled, this should not happen because the
         * 'tick_do_timer_cpu' CPU never relinquishes.
         */
-       if (IS_ENABLED(CONFIG_NO_HZ_COMMON) &&
-           unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) {
+       tick_cpu = READ_ONCE(tick_do_timer_cpu);
+
+       if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && unlikely(tick_cpu == TICK_DO_TIMER_NONE)) {
 #ifdef CONFIG_NO_HZ_FULL
                WARN_ON_ONCE(tick_nohz_full_running);
 #endif
-               tick_do_timer_cpu = cpu;
+               WRITE_ONCE(tick_do_timer_cpu, cpu);
+               tick_cpu = cpu;
        }
 
        /* Check if jiffies need an update */
-       if (tick_do_timer_cpu == cpu)
+       if (tick_cpu == cpu)
                tick_do_update_jiffies64(now);
 
        /*
@@ -610,7 +613,7 @@ bool tick_nohz_cpu_hotpluggable(unsigned int cpu)
         * timers, workqueues, timekeeping, ...) on behalf of full dynticks
         * CPUs. It must remain online when nohz full is enabled.
         */
-       if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
+       if (tick_nohz_full_running && READ_ONCE(tick_do_timer_cpu) == cpu)
                return false;
        return true;
 }
@@ -891,6 +894,7 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
 {
        u64 basemono, next_tick, delta, expires;
        unsigned long basejiff;
+       int tick_cpu;
 
        basemono = get_jiffies_update(&basejiff);
        ts->last_jiffies = basejiff;
@@ -947,9 +951,9 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
         * Otherwise we can sleep as long as we want.
         */
        delta = timekeeping_max_deferment();
-       if (cpu != tick_do_timer_cpu &&
-           (tick_do_timer_cpu != TICK_DO_TIMER_NONE ||
-            !tick_sched_flag_test(ts, TS_FLAG_DO_TIMER_LAST)))
+       tick_cpu = READ_ONCE(tick_do_timer_cpu);
+       if (tick_cpu != cpu &&
+           (tick_cpu != TICK_DO_TIMER_NONE || !tick_sched_flag_test(ts, TS_FLAG_DO_TIMER_LAST)))
                delta = KTIME_MAX;
 
        /* Calculate the next expiry time */
@@ -970,6 +974,7 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
        unsigned long basejiff = ts->last_jiffies;
        u64 basemono = ts->timer_expires_base;
        bool timer_idle = tick_sched_flag_test(ts, TS_FLAG_STOPPED);
+       int tick_cpu;
        u64 expires;
 
        /* Make sure we won't be trying to stop it twice in a row. */
@@ -1007,10 +1012,11 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
         * do_timer() never gets invoked. Keep track of the fact that it
         * was the one which had the do_timer() duty last.
         */
-       if (cpu == tick_do_timer_cpu) {
-               tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+       tick_cpu = READ_ONCE(tick_do_timer_cpu);
+       if (tick_cpu == cpu) {
+               WRITE_ONCE(tick_do_timer_cpu, TICK_DO_TIMER_NONE);
                tick_sched_flag_set(ts, TS_FLAG_DO_TIMER_LAST);
-       } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
+       } else if (tick_cpu != TICK_DO_TIMER_NONE) {
                tick_sched_flag_clear(ts, TS_FLAG_DO_TIMER_LAST);
        }
 
@@ -1173,15 +1179,17 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
                return false;
 
        if (tick_nohz_full_enabled()) {
+               int tick_cpu = READ_ONCE(tick_do_timer_cpu);
+
                /*
                 * Keep the tick alive to guarantee timekeeping progression
                 * if there are full dynticks CPUs around
                 */
-               if (tick_do_timer_cpu == cpu)
+               if (tick_cpu == cpu)
                        return false;
 
                /* Should not happen for nohz-full */
-               if (WARN_ON_ONCE(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
+               if (WARN_ON_ONCE(tick_cpu == TICK_DO_TIMER_NONE))
                        return false;
        }
 
index 61c541c36596d9cdb532d876b56a273f44731928..47345bf1d4a9f7e850db213999c62ecb02f8fea0 100644 (file)
@@ -965,7 +965,7 @@ config FTRACE_RECORD_RECURSION
 
 config FTRACE_RECORD_RECURSION_SIZE
        int "Max number of recursed functions to record"
-       default 128
+       default 128
        depends on FTRACE_RECORD_RECURSION
        help
          This defines the limit of number of functions that can be
index 25476ead681b8411f41d713a77603cdf0653b4ad..6511dc3a00da841bc79554973636056b51c600ff 100644 (file)
@@ -1393,7 +1393,6 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
        old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
        old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
 
-       local_inc(&cpu_buffer->pages_touched);
        /*
         * Just make sure we have seen our old_write and synchronize
         * with any interrupts that come in.
@@ -1430,8 +1429,9 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
                 */
                local_set(&next_page->page->commit, 0);
 
-               /* Again, either we update tail_page or an interrupt does */
-               (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
+               /* Either we update tail_page or an interrupt does */
+               if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page))
+                       local_inc(&cpu_buffer->pages_touched);
        }
 }
 
index 7c364b87352eed92e0f76137091882231f187028..52f75c36bbca4922bec786815bb70ff409f62a61 100644 (file)
@@ -1670,6 +1670,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
        return 0;
 }
 
+#ifdef CONFIG_PERF_EVENTS
 static ssize_t
 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 {
@@ -1684,6 +1685,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
 
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
 }
+#endif
 
 static ssize_t
 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
@@ -2152,10 +2154,12 @@ static const struct file_operations ftrace_event_format_fops = {
        .release = seq_release,
 };
 
+#ifdef CONFIG_PERF_EVENTS
 static const struct file_operations ftrace_event_id_fops = {
        .read = event_id_read,
        .llseek = default_llseek,
 };
+#endif
 
 static const struct file_operations ftrace_event_filter_fops = {
        .open = tracing_open_file_tr,
index 318e2dad27e048c08fea615cef8654aa1fcb7d81..ae57bf69ad4af3aa51fd364e89676469dd87a14d 100644 (file)
@@ -76,6 +76,12 @@ enum {
        DNS
 };
 
+enum {
+       IPV4 = 1,
+       IPV6,
+       IP_TYPE_MAX
+};
+
 static int in_hand_shake;
 
 static char *os_name = "";
@@ -102,6 +108,11 @@ static struct utsname uts_buf;
 
 #define MAX_FILE_NAME 100
 #define ENTRIES_PER_BLOCK 50
+/*
+ * Change this entry if the number of addresses increases in future
+ */
+#define MAX_IP_ENTRIES 64
+#define OUTSTR_BUF_SIZE ((INET6_ADDRSTRLEN + 1) * MAX_IP_ENTRIES)
 
 struct kvp_record {
        char key[HV_KVP_EXCHANGE_MAX_KEY_SIZE];
@@ -1171,6 +1182,18 @@ static int process_ip_string(FILE *f, char *ip_string, int type)
        return 0;
 }
 
+int ip_version_check(const char *input_addr)
+{
+       struct in6_addr addr;
+
+       if (inet_pton(AF_INET, input_addr, &addr))
+               return IPV4;
+       else if (inet_pton(AF_INET6, input_addr, &addr))
+               return IPV6;
+
+       return -EINVAL;
+}
+
 /*
  * Only IPv4 subnet strings needs to be converted to plen
  * For IPv6 the subnet is already privided in plen format
@@ -1197,14 +1220,75 @@ static int kvp_subnet_to_plen(char *subnet_addr_str)
        return plen;
 }
 
+static int process_dns_gateway_nm(FILE *f, char *ip_string, int type,
+                                 int ip_sec)
+{
+       char addr[INET6_ADDRSTRLEN], *output_str;
+       int ip_offset = 0, error = 0, ip_ver;
+       char *param_name;
+
+       if (type == DNS)
+               param_name = "dns";
+       else if (type == GATEWAY)
+               param_name = "gateway";
+       else
+               return -EINVAL;
+
+       output_str = (char *)calloc(OUTSTR_BUF_SIZE, sizeof(char));
+       if (!output_str)
+               return -ENOMEM;
+
+       while (1) {
+               memset(addr, 0, sizeof(addr));
+
+               if (!parse_ip_val_buffer(ip_string, &ip_offset, addr,
+                                        (MAX_IP_ADDR_SIZE * 2)))
+                       break;
+
+               ip_ver = ip_version_check(addr);
+               if (ip_ver < 0)
+                       continue;
+
+               if ((ip_ver == IPV4 && ip_sec == IPV4) ||
+                   (ip_ver == IPV6 && ip_sec == IPV6)) {
+                       /*
+                        * do a bound check to avoid out-of bound writes
+                        */
+                       if ((OUTSTR_BUF_SIZE - strlen(output_str)) >
+                           (strlen(addr) + 1)) {
+                               strncat(output_str, addr,
+                                       OUTSTR_BUF_SIZE -
+                                       strlen(output_str) - 1);
+                               strncat(output_str, ",",
+                                       OUTSTR_BUF_SIZE -
+                                       strlen(output_str) - 1);
+                       }
+               } else {
+                       continue;
+               }
+       }
+
+       if (strlen(output_str)) {
+               /*
+                * This is to get rid of that extra comma character
+                * in the end of the string
+                */
+               output_str[strlen(output_str) - 1] = '\0';
+               error = fprintf(f, "%s=%s\n", param_name, output_str);
+       }
+
+       free(output_str);
+       return error;
+}
+
 static int process_ip_string_nm(FILE *f, char *ip_string, char *subnet,
-                               int is_ipv6)
+                               int ip_sec)
 {
        char addr[INET6_ADDRSTRLEN];
        char subnet_addr[INET6_ADDRSTRLEN];
-       int error, i = 0;
+       int error = 0, i = 0;
        int ip_offset = 0, subnet_offset = 0;
-       int plen;
+       int plen, ip_ver;
 
        memset(addr, 0, sizeof(addr));
        memset(subnet_addr, 0, sizeof(subnet_addr));
@@ -1216,10 +1300,16 @@ static int process_ip_string_nm(FILE *f, char *ip_string, char *subnet,
                                                       subnet_addr,
                                                       (MAX_IP_ADDR_SIZE *
                                                        2))) {
-               if (!is_ipv6)
+               ip_ver = ip_version_check(addr);
+               if (ip_ver < 0)
+                       continue;
+
+               if (ip_ver == IPV4 && ip_sec == IPV4)
                        plen = kvp_subnet_to_plen((char *)subnet_addr);
-               else
+               else if (ip_ver == IPV6 && ip_sec == IPV6)
                        plen = atoi(subnet_addr);
+               else
+                       continue;
 
                if (plen < 0)
                        return plen;
@@ -1233,17 +1323,16 @@ static int process_ip_string_nm(FILE *f, char *ip_string, char *subnet,
                memset(subnet_addr, 0, sizeof(subnet_addr));
        }
 
-       return 0;
+       return error;
 }
 
 static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
 {
-       int error = 0;
+       int error = 0, ip_ver;
        char if_filename[PATH_MAX];
        char nm_filename[PATH_MAX];
        FILE *ifcfg_file, *nmfile;
        char cmd[PATH_MAX];
-       int is_ipv6 = 0;
        char *mac_addr;
        int str_len;
 
@@ -1421,52 +1510,94 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
        if (error)
                goto setval_error;
 
-       if (new_val->addr_family & ADDR_FAMILY_IPV6) {
-               error = fprintf(nmfile, "\n[ipv6]\n");
-               if (error < 0)
-                       goto setval_error;
-               is_ipv6 = 1;
-       } else {
-               error = fprintf(nmfile, "\n[ipv4]\n");
-               if (error < 0)
-                       goto setval_error;
-       }
-
        /*
         * Now we populate the keyfile format
+        *
+        * The keyfile format expects the IPv6 and IPv4 configuration in
+        * different sections. Therefore we iterate through the list twice,
+        * once to populate the IPv4 section and the next time for IPv6
         */
+       ip_ver = IPV4;
+       do {
+               if (ip_ver == IPV4) {
+                       error = fprintf(nmfile, "\n[ipv4]\n");
+                       if (error < 0)
+                               goto setval_error;
+               } else {
+                       error = fprintf(nmfile, "\n[ipv6]\n");
+                       if (error < 0)
+                               goto setval_error;
+               }
 
-       if (new_val->dhcp_enabled) {
-               error = kvp_write_file(nmfile, "method", "", "auto");
-               if (error < 0)
-                       goto setval_error;
-       } else {
-               error = kvp_write_file(nmfile, "method", "", "manual");
+               /*
+                * Write the configuration for ipaddress, netmask, gateway and
+                * name services
+                */
+               error = process_ip_string_nm(nmfile, (char *)new_val->ip_addr,
+                                            (char *)new_val->sub_net,
+                                            ip_ver);
                if (error < 0)
                        goto setval_error;
-       }
 
-       /*
-        * Write the configuration for ipaddress, netmask, gateway and
-        * name services
-        */
-       error = process_ip_string_nm(nmfile, (char *)new_val->ip_addr,
-                                    (char *)new_val->sub_net, is_ipv6);
-       if (error < 0)
-               goto setval_error;
+               /*
+                * As dhcp_enabled is only valid for ipv4, we do not set dhcp
+                * methods for ipv6 based on dhcp_enabled flag.
+                *
+                * For ipv4, set method to manual only when dhcp_enabled is
+                * false and specific ipv4 addresses are configured. If neither
+                * dhcp_enabled is true and no ipv4 addresses are configured,
+                * set method to 'disabled'.
+                *
+                * For ipv6, set method to manual when we configure ipv6
+                * addresses. Otherwise set method to 'auto' so that SLAAC from
+                * RA may be used.
+                */
+               if (ip_ver == IPV4) {
+                       if (new_val->dhcp_enabled) {
+                               error = kvp_write_file(nmfile, "method", "",
+                                                      "auto");
+                               if (error < 0)
+                                       goto setval_error;
+                       } else if (error) {
+                               error = kvp_write_file(nmfile, "method", "",
+                                                      "manual");
+                               if (error < 0)
+                                       goto setval_error;
+                       } else {
+                               error = kvp_write_file(nmfile, "method", "",
+                                                      "disabled");
+                               if (error < 0)
+                                       goto setval_error;
+                       }
+               } else if (ip_ver == IPV6) {
+                       if (error) {
+                               error = kvp_write_file(nmfile, "method", "",
+                                                      "manual");
+                               if (error < 0)
+                                       goto setval_error;
+                       } else {
+                               error = kvp_write_file(nmfile, "method", "",
+                                                      "auto");
+                               if (error < 0)
+                                       goto setval_error;
+                       }
+               }
 
-       /* we do not want ipv4 addresses in ipv6 section and vice versa */
-       if (is_ipv6 != is_ipv4((char *)new_val->gate_way)) {
-               error = fprintf(nmfile, "gateway=%s\n", (char *)new_val->gate_way);
+               error = process_dns_gateway_nm(nmfile,
+                                              (char *)new_val->gate_way,
+                                              GATEWAY, ip_ver);
                if (error < 0)
                        goto setval_error;
-       }
 
-       if (is_ipv6 != is_ipv4((char *)new_val->dns_addr)) {
-               error = fprintf(nmfile, "dns=%s\n", (char *)new_val->dns_addr);
+               error = process_dns_gateway_nm(nmfile,
+                                              (char *)new_val->dns_addr, DNS,
+                                              ip_ver);
                if (error < 0)
                        goto setval_error;
-       }
+
+               ip_ver++;
+       } while (ip_ver < IP_TYPE_MAX);
+
        fclose(nmfile);
        fclose(ifcfg_file);
 
index 908e0d0839369c2e41f090bddc2e9a9b9121b4c9..61c69297e7978fceed700be3ad43a7a870d20de2 100644 (file)
@@ -986,10 +986,12 @@ static void dpa_perf_setup(struct cxl_port *endpoint, struct range *range,
 {
        dpa_perf->qos_class = FAKE_QTG_ID;
        dpa_perf->dpa_range = *range;
-       dpa_perf->coord.read_latency = 500;
-       dpa_perf->coord.write_latency = 500;
-       dpa_perf->coord.read_bandwidth = 1000;
-       dpa_perf->coord.write_bandwidth = 1000;
+       for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+               dpa_perf->coord[i].read_latency = 500;
+               dpa_perf->coord[i].write_latency = 500;
+               dpa_perf->coord[i].read_bandwidth = 1000;
+               dpa_perf->coord[i].write_bandwidth = 1000;
+       }
 }
 
 static void mock_cxl_endpoint_parse_cdat(struct cxl_port *port)
index b1ede624986676a554514105936698fdd2b0a915..b7c8f29c09a978895c1176e1a39aeda8c97e8416 100644 (file)
@@ -18,7 +18,7 @@ echo 'sched:*' > set_event
 
 yield
 
-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
 if [ $count -lt 3 ]; then
     fail "at least fork, exec and exit events should be recorded"
 fi
@@ -29,7 +29,7 @@ echo 1 > events/sched/enable
 
 yield
 
-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
 if [ $count -lt 3 ]; then
     fail "at least fork, exec and exit events should be recorded"
 fi
@@ -40,7 +40,7 @@ echo 0 > events/sched/enable
 
 yield
 
-count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
+count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
 if [ $count -ne 0 ]; then
     fail "any of scheduler events should not be recorded"
 fi
index 4eca3fd1292cf99be961eebc77fa4b57ec0c8721..14bbab0cce13521abbcae9bbd3772a567239c77f 100644 (file)
@@ -51,6 +51,7 @@
 #include <stdarg.h>
 #include <string.h>
 #include <stdio.h>
+#include <sys/utsname.h>
 #endif
 
 #ifndef ARRAY_SIZE
@@ -79,6 +80,9 @@
 #define KSFT_XPASS 3
 #define KSFT_SKIP  4
 
+#ifndef __noreturn
+#define __noreturn       __attribute__((__noreturn__))
+#endif
 #define __printf(a, b)   __attribute__((format(printf, a, b)))
 
 /* counters */
@@ -301,13 +305,13 @@ void ksft_test_result_code(int exit_code, const char *test_name,
        printf("\n");
 }
 
-static inline int ksft_exit_pass(void)
+static inline __noreturn int ksft_exit_pass(void)
 {
        ksft_print_cnts();
        exit(KSFT_PASS);
 }
 
-static inline int ksft_exit_fail(void)
+static inline __noreturn int ksft_exit_fail(void)
 {
        ksft_print_cnts();
        exit(KSFT_FAIL);
@@ -334,7 +338,7 @@ static inline int ksft_exit_fail(void)
                  ksft_cnt.ksft_xfail + \
                  ksft_cnt.ksft_xskip)
 
-static inline __printf(1, 2) int ksft_exit_fail_msg(const char *msg, ...)
+static inline __noreturn __printf(1, 2) int ksft_exit_fail_msg(const char *msg, ...)
 {
        int saved_errno = errno;
        va_list args;
@@ -349,19 +353,19 @@ static inline __printf(1, 2) int ksft_exit_fail_msg(const char *msg, ...)
        exit(KSFT_FAIL);
 }
 
-static inline int ksft_exit_xfail(void)
+static inline __noreturn int ksft_exit_xfail(void)
 {
        ksft_print_cnts();
        exit(KSFT_XFAIL);
 }
 
-static inline int ksft_exit_xpass(void)
+static inline __noreturn int ksft_exit_xpass(void)
 {
        ksft_print_cnts();
        exit(KSFT_XPASS);
 }
 
-static inline __printf(1, 2) int ksft_exit_skip(const char *msg, ...)
+static inline __noreturn __printf(1, 2) int ksft_exit_skip(const char *msg, ...)
 {
        int saved_errno = errno;
        va_list args;
@@ -390,4 +394,21 @@ static inline __printf(1, 2) int ksft_exit_skip(const char *msg, ...)
        exit(KSFT_SKIP);
 }
 
+static inline int ksft_min_kernel_version(unsigned int min_major,
+                                         unsigned int min_minor)
+{
+#ifdef NOLIBC
+       ksft_print_msg("NOLIBC: Can't check kernel version: Function not implemented\n");
+       return 0;
+#else
+       unsigned int major, minor;
+       struct utsname info;
+
+       if (uname(&info) || sscanf(info.release, "%u.%u.", &major, &minor) != 2)
+               ksft_exit_fail_msg("Can't parse kernel version\n");
+
+       return major > min_major || (major == min_major && minor >= min_minor);
+#endif
+}
+
 #endif /* __KSELFTEST_H */
index adb15cae79abc7fe0fe013da6c56b28ca783ab85..ba3ddeda24bf527295acaf32159097c03ac52153 100644 (file)
                FIXTURE_DATA(fixture_name) self; \
                pid_t child = 1; \
                int status = 0; \
+               bool jmp = false; \
                memset(&self, 0, sizeof(FIXTURE_DATA(fixture_name))); \
                if (setjmp(_metadata->env) == 0) { \
                        /* Use the same _metadata. */ \
                                _metadata->exit_code = KSFT_FAIL; \
                        } \
                } \
+               else \
+                       jmp = true; \
                if (child == 0) { \
-                       if (_metadata->setup_completed && !_metadata->teardown_parent) \
+                       if (_metadata->setup_completed && !_metadata->teardown_parent && !jmp) \
                                fixture_name##_teardown(_metadata, &self, variant->data); \
                        _exit(0); \
                } \
index d49dd3ffd0d96abeaa38cd92f3040ef747726541..c001dd79179d5d28e51d69cbad4c7e9a6a026053 100644 (file)
@@ -66,7 +66,7 @@ static int check_diff(struct timeval start, struct timeval end)
        diff = end.tv_usec - start.tv_usec;
        diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC;
 
-       if (abs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
+       if (llabs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
                printf("Diff too high: %lld..", diff);
                return -1;
        }
@@ -184,80 +184,71 @@ static int check_timer_create(int which)
        return 0;
 }
 
-int remain;
-__thread int got_signal;
+static pthread_t ctd_thread;
+static volatile int ctd_count, ctd_failed;
 
-static void *distribution_thread(void *arg)
+static void ctd_sighandler(int sig)
 {
-       while (__atomic_load_n(&remain, __ATOMIC_RELAXED));
-       return NULL;
+       if (pthread_self() != ctd_thread)
+               ctd_failed = 1;
+       ctd_count--;
 }
 
-static void distribution_handler(int nr)
+static void *ctd_thread_func(void *arg)
 {
-       if (!__atomic_exchange_n(&got_signal, 1, __ATOMIC_RELAXED))
-               __atomic_fetch_sub(&remain, 1, __ATOMIC_RELAXED);
-}
-
-/*
- * Test that all running threads _eventually_ receive CLOCK_PROCESS_CPUTIME_ID
- * timer signals. This primarily tests that the kernel does not favour any one.
- */
-static int check_timer_distribution(void)
-{
-       int err, i;
-       timer_t id;
-       const int nthreads = 10;
-       pthread_t threads[nthreads];
        struct itimerspec val = {
                .it_value.tv_sec = 0,
                .it_value.tv_nsec = 1000 * 1000,
                .it_interval.tv_sec = 0,
                .it_interval.tv_nsec = 1000 * 1000,
        };
+       timer_t id;
 
-       remain = nthreads + 1;  /* worker threads + this thread */
-       signal(SIGALRM, distribution_handler);
-       err = timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id);
-       if (err < 0) {
-               ksft_perror("Can't create timer");
-               return -1;
-       }
-       err = timer_settime(id, 0, &val, NULL);
-       if (err < 0) {
-               ksft_perror("Can't set timer");
-               return -1;
-       }
+       /* 1/10 seconds to ensure the leader sleeps */
+       usleep(10000);
 
-       for (i = 0; i < nthreads; i++) {
-               err = pthread_create(&threads[i], NULL, distribution_thread,
-                                    NULL);
-               if (err) {
-                       ksft_print_msg("Can't create thread: %s (%d)\n",
-                                      strerror(errno), errno);
-                       return -1;
-               }
-       }
+       ctd_count = 100;
+       if (timer_create(CLOCK_PROCESS_CPUTIME_ID, NULL, &id))
+               return "Can't create timer\n";
+       if (timer_settime(id, 0, &val, NULL))
+               return "Can't set timer\n";
 
-       /* Wait for all threads to receive the signal. */
-       while (__atomic_load_n(&remain, __ATOMIC_RELAXED));
+       while (ctd_count > 0 && !ctd_failed)
+               ;
 
-       for (i = 0; i < nthreads; i++) {
-               err = pthread_join(threads[i], NULL);
-               if (err) {
-                       ksft_print_msg("Can't join thread: %s (%d)\n",
-                                      strerror(errno), errno);
-                       return -1;
-               }
-       }
+       if (timer_delete(id))
+               return "Can't delete timer\n";
 
-       if (timer_delete(id)) {
-               ksft_perror("Can't delete timer");
-               return -1;
-       }
+       return NULL;
+}
+
+/*
+ * Test that only the running thread receives the timer signal.
+ */
+static int check_timer_distribution(void)
+{
+       const char *errmsg;
 
-       ksft_test_result_pass("check_timer_distribution\n");
+       signal(SIGALRM, ctd_sighandler);
+
+       errmsg = "Can't create thread\n";
+       if (pthread_create(&ctd_thread, NULL, ctd_thread_func, NULL))
+               goto err;
+
+       errmsg = "Can't join thread\n";
+       if (pthread_join(ctd_thread, (void **)&errmsg) || errmsg)
+               goto err;
+
+       if (!ctd_failed)
+               ksft_test_result_pass("check signal distribution\n");
+       else if (ksft_min_kernel_version(6, 3))
+               ksft_test_result_fail("check signal distribution\n");
+       else
+               ksft_test_result_skip("check signal distribution (old kernel)\n");
        return 0;
+err:
+       ksft_print_msg("%s", errmsg);
+       return -1;
 }
 
 int main(int argc, char **argv)
index 48b9a803235a80413f0d94d9eb841d9f045779e8..d13ebde203221ae3fa81835fae684c8e180cf111 100644 (file)
@@ -21,9 +21,6 @@
  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *   GNU General Public License for more details.
  */
-
-
-
 #include <stdio.h>
 #include <stdlib.h>
 #include <time.h>
@@ -62,45 +59,47 @@ int clear_time_state(void)
 #define NUM_FREQ_OUTOFRANGE 4
 #define NUM_FREQ_INVALID 2
 
+#define SHIFTED_PPM (1 << 16)
+
 long valid_freq[NUM_FREQ_VALID] = {
-       -499<<16,
-       -450<<16,
-       -400<<16,
-       -350<<16,
-       -300<<16,
-       -250<<16,
-       -200<<16,
-       -150<<16,
-       -100<<16,
-       -75<<16,
-       -50<<16,
-       -25<<16,
-       -10<<16,
-       -5<<16,
-       -1<<16,
+        -499 * SHIFTED_PPM,
+        -450 * SHIFTED_PPM,
+        -400 * SHIFTED_PPM,
+        -350 * SHIFTED_PPM,
+        -300 * SHIFTED_PPM,
+        -250 * SHIFTED_PPM,
+        -200 * SHIFTED_PPM,
+        -150 * SHIFTED_PPM,
+        -100 * SHIFTED_PPM,
+         -75 * SHIFTED_PPM,
+         -50 * SHIFTED_PPM,
+         -25 * SHIFTED_PPM,
+         -10 * SHIFTED_PPM,
+          -5 * SHIFTED_PPM,
+          -1 * SHIFTED_PPM,
        -1000,
-       1<<16,
-       5<<16,
-       10<<16,
-       25<<16,
-       50<<16,
-       75<<16,
-       100<<16,
-       150<<16,
-       200<<16,
-       250<<16,
-       300<<16,
-       350<<16,
-       400<<16,
-       450<<16,
-       499<<16,
+           1 * SHIFTED_PPM,
+           5 * SHIFTED_PPM,
+          10 * SHIFTED_PPM,
+          25 * SHIFTED_PPM,
+          50 * SHIFTED_PPM,
+          75 * SHIFTED_PPM,
+         100 * SHIFTED_PPM,
+         150 * SHIFTED_PPM,
+         200 * SHIFTED_PPM,
+         250 * SHIFTED_PPM,
+         300 * SHIFTED_PPM,
+         350 * SHIFTED_PPM,
+         400 * SHIFTED_PPM,
+         450 * SHIFTED_PPM,
+         499 * SHIFTED_PPM,
 };
 
 long outofrange_freq[NUM_FREQ_OUTOFRANGE] = {
-       -1000<<16,
-       -550<<16,
-       550<<16,
-       1000<<16,
+       -1000 * SHIFTED_PPM,
+        -550 * SHIFTED_PPM,
+         550 * SHIFTED_PPM,
+        1000 * SHIFTED_PPM,
 };
 
 #define LONG_MAX (~0UL>>1)