Merge tag 'linux-can-fixes-for-5.18-20220429' of git://git.kernel.org/pub/scm/linux...
authorJakub Kicinski <kuba@kernel.org>
Fri, 29 Apr 2022 19:33:54 +0000 (12:33 -0700)
committerJakub Kicinski <kuba@kernel.org>
Fri, 29 Apr 2022 19:33:55 +0000 (12:33 -0700)
Marc Kleine-Budde says:

====================
pull-request: can 2022-04-29

The first patch is by Oliver Hartkopp and removes the ability to
re-binding bounds sockets from the ISOTP. It turned out to be not
needed and brings unnecessary complexity.

The last 4 patches all target the grcan driver. Duoming Zhou's patch
fixes a potential dead lock in the grcan_close() function. Daniel
Hellstrom's patch fixes the dma_alloc_coherent() to use the correct
device. Andreas Larsson's 1st patch fixes a broken system id check,
the 2nd patch fixes the NAPI poll budget usage.

* tag 'linux-can-fixes-for-5.18-20220429' of git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can:
  can: grcan: only use the NAPI poll budget for RX
  can: grcan: grcan_probe(): fix broken system id check for errata workaround needs
  can: grcan: use ofdev->dev when allocating DMA memory
  can: grcan: grcan_close(): fix deadlock
  can: isotp: remove re-binding of bound socket
====================

Link: https://lore.kernel.org/r/20220429125612.1792561-1-mkl@pengutronix.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
272 files changed:
Documentation/devicetree/bindings/dma/qcom,gpi.yaml
Documentation/devicetree/bindings/regulator/richtek,rt5190a-regulator.yaml
Documentation/filesystems/ext4/attributes.rst
Documentation/filesystems/f2fs.rst
Documentation/vm/page_owner.rst
MAINTAINERS
Makefile
arch/arc/boot/dts/hsdk.dts
arch/arc/include/asm/atomic-llsc.h
arch/arc/include/asm/pgtable-levels.h
arch/arc/kernel/disasm.c
arch/arc/kernel/entry.S
arch/arc/kernel/signal.c
arch/arc/kernel/smp.c
arch/arc/kernel/unaligned.c
arch/arc/mm/cache.c
arch/arm/mach-exynos/Kconfig
arch/arm/xen/enlighten.c
arch/arm64/Kconfig
arch/arm64/include/asm/pgtable.h
arch/powerpc/kernel/time.c
arch/powerpc/kvm/book3s_64_mmu_radix.c
arch/powerpc/kvm/book3s_64_vio.c
arch/powerpc/kvm/book3s_64_vio_hv.c
arch/powerpc/kvm/book3s_hv_nested.c
arch/powerpc/kvm/book3s_rtas.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/perf/Makefile
arch/powerpc/perf/power10-pmu.c
arch/powerpc/perf/power9-pmu.c
arch/riscv/Kconfig.socs
arch/riscv/include/asm/kvm_host.h
arch/riscv/kvm/vcpu.c
arch/riscv/kvm/vcpu_exit.c
arch/riscv/mm/init.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/vsie.c
arch/sparc/include/asm/cacheflush_32.h
arch/x86/events/intel/cstate.c
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/kvm.c
arch/x86/kvm/pmu.h
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/svm/sev.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h
arch/x86/kvm/x86.c
drivers/acpi/processor_idle.c
drivers/ata/pata_marvell.c
drivers/block/Kconfig
drivers/block/floppy.c
drivers/cpufreq/qcom-cpufreq-hw.c
drivers/cpufreq/sun50i-cpufreq-nvmem.c
drivers/cpuidle/cpuidle-riscv-sbi.c
drivers/dma/at_xdmac.c
drivers/dma/dw-edma/dw-edma-v0-core.c
drivers/dma/idxd/device.c
drivers/dma/idxd/submit.c
drivers/dma/idxd/sysfs.c
drivers/dma/imx-sdma.c
drivers/dma/mediatek/mtk-uart-apdma.c
drivers/edac/synopsys_edac.c
drivers/firmware/cirrus/cs_dsp.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
drivers/gpu/drm/drm_of.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
drivers/gpu/drm/radeon/radeon_sync.c
drivers/gpu/drm/vc4/Kconfig
drivers/gpu/drm/vc4/vc4_dsi.c
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/idle/intel_idle.c
drivers/input/keyboard/cypress-sf.c
drivers/input/keyboard/omap4-keypad.c
drivers/md/bcache/journal.c
drivers/md/bcache/request.c
drivers/mtd/nand/raw/mtk_ecc.c
drivers/mtd/nand/raw/qcom_nandc.c
drivers/mtd/nand/raw/sh_flctl.c
drivers/perf/arm_pmu.c
drivers/pinctrl/intel/pinctrl-alderlake.c
drivers/pinctrl/mediatek/Kconfig
drivers/pinctrl/pinctrl-pistachio.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/qcom/pinctrl-sm6350.c
drivers/pinctrl/samsung/Kconfig
drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
drivers/pinctrl/stm32/pinctrl-stm32.c
drivers/pinctrl/sunplus/sppctl_sp7021.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/dell/dell-laptop.c
drivers/platform/x86/gigabyte-wmi.c
drivers/platform/x86/intel/pmc/core.h
drivers/platform/x86/intel/sdsi.c
drivers/platform/x86/intel/uncore-frequency/uncore-frequency.c
drivers/scsi/sr_ioctl.c
drivers/thermal/Kconfig
drivers/thermal/gov_user_space.c
drivers/thermal/intel/int340x_thermal/int3400_thermal.c
drivers/thermal/thermal_sysfs.c
drivers/video/fbdev/arkfb.c
drivers/video/fbdev/aty/aty128fb.c
drivers/video/fbdev/aty/atyfb_base.c
drivers/video/fbdev/aty/radeon_pm.c
drivers/video/fbdev/aty/radeonfb.h
drivers/video/fbdev/clps711x-fb.c
drivers/video/fbdev/controlfb.c
drivers/video/fbdev/i740fb.c
drivers/video/fbdev/imxfb.c
drivers/video/fbdev/kyro/fbdev.c
drivers/video/fbdev/matrox/matroxfb_base.h
drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
drivers/video/fbdev/mmp/core.c
drivers/video/fbdev/neofb.c
drivers/video/fbdev/omap/hwa742.c
drivers/video/fbdev/omap/lcdc.c
drivers/video/fbdev/omap/sossi.c
drivers/video/fbdev/platinumfb.c
drivers/video/fbdev/pm2fb.c
drivers/video/fbdev/pxafb.c
drivers/video/fbdev/s3fb.c
drivers/video/fbdev/sh_mobile_lcdcfb.c
drivers/video/fbdev/sis/sis_main.c
drivers/video/fbdev/tridentfb.c
drivers/video/fbdev/udlfb.c
drivers/video/fbdev/valkyriefb.c
drivers/video/fbdev/vt8623fb.c
drivers/video/of_display_timing.c
drivers/xen/gntalloc.c
fs/btrfs/ctree.h
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/scrub.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.h
fs/btrfs/zoned.h
fs/cifs/connect.c
fs/cifs/dfs_cache.c
fs/cifs/smb2ops.c
fs/cifs/transport.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/namei.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/f2fs/checkpoint.c
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/inode.c
fs/f2fs/segment.c
fs/f2fs/super.c
fs/gfs2/file.c
fs/hugetlbfs/inode.c
fs/io_uring.c
fs/jbd2/commit.c
fs/ksmbd/misc.c
fs/ksmbd/misc.h
fs/ksmbd/oplock.c
fs/ksmbd/oplock.h
fs/ksmbd/smb2pdu.c
fs/ksmbd/vfs.c
fs/ksmbd/vfs_cache.c
fs/ksmbd/vfs_cache.h
fs/namespace.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
fs/xfs/xfs_inode.c
fs/xfs/xfs_trans.h
fs/zonefs/super.c
include/linux/dma-buf-map.h [deleted file]
include/linux/hugetlb.h
include/linux/kernel.h
include/linux/kvm_host.h
include/linux/memcontrol.h
include/linux/mm.h
include/linux/mtd/mtd.h
include/linux/sched.h
include/linux/sched/mm.h
include/sound/soc-component.h
include/uapi/linux/fb.h
include/uapi/linux/input-event-codes.h
kernel/events/core.c
kernel/events/internal.h
kernel/events/ring_buffer.c
kernel/kcov.c
kernel/sched/fair.c
lib/hexdump.c
lib/xarray.c
mm/hugetlb.c
mm/kasan/quarantine.c
mm/memcontrol.c
mm/memory-failure.c
mm/mmap.c
mm/mmu_notifier.c
mm/nommu.c
mm/oom_kill.c
mm/page_alloc.c
mm/userfaultfd.c
mm/util.c
mm/vmalloc.c
mm/workingset.c
sound/hda/hdac_i915.c
sound/hda/intel-dsp-config.c
sound/oss/dmasound/dmasound_core.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/atmel/sam9g20_wm8731.c
sound/soc/codecs/cs35l41-lib.c
sound/soc/codecs/lpass-rx-macro.c
sound/soc/codecs/lpass-tx-macro.c
sound/soc/codecs/lpass-va-macro.c
sound/soc/codecs/msm8916-wcd-digital.c
sound/soc/codecs/rk817_codec.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/rt5682s.c
sound/soc/codecs/rt711.c
sound/soc/codecs/wcd934x.c
sound/soc/codecs/wm8731.c
sound/soc/fsl/fsl_sai.c
sound/soc/generic/simple-card-utils.c
sound/soc/intel/boards/sof_es8336.c
sound/soc/intel/boards/sof_rt5682.c
sound/soc/intel/common/soc-acpi-intel-tgl-match.c
sound/soc/meson/aiu-acodec-ctrl.c
sound/soc/meson/aiu-codec-ctrl.c
sound/soc/meson/aiu.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c
sound/soc/soc-topology.c
sound/soc/sof/sof-pci-dev.c
sound/soc/sof/topology.c
sound/usb/midi.c
sound/usb/mixer_maps.c
tools/include/linux/slab.h
tools/perf/builtin-report.c
tools/perf/builtin-script.c
tools/perf/tests/perf-time-to-tsc.c
tools/perf/util/c++/clang.cpp
tools/power/x86/intel-speed-select/Makefile
tools/testing/radix-tree/linux.c
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/kvm_page_table_test.c
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/x86_64/amx_test.c
tools/testing/selftests/kvm/x86_64/emulator_error_test.c
tools/testing/selftests/kvm/x86_64/smm_test.c
tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c
tools/testing/selftests/vm/mremap_test.c
tools/testing/selftests/vm/run_vmtests.sh
virt/kvm/dirty_ring.c
virt/kvm/kvm_main.c
virt/kvm/kvm_mm.h

index e614fe3187bbc0376013ff69b955c4b8b94ceffa..d09d79d7406a3aa8378591c37c763f85683e197f 100644 (file)
@@ -29,6 +29,7 @@ properties:
   interrupts:
     description:
       Interrupt lines for each GPI instance
+    minItems: 1
     maxItems: 13
 
   "#dma-cells":
index 28725c5467fc8a3c8633363077ce7d6a533e81a7..edb411be039041837bedb2ecf9085b3c23d373b3 100644 (file)
@@ -58,7 +58,7 @@ properties:
         type: object
         $ref: regulator.yaml#
         description: |
-          regulator description for buck1 and buck4.
+          regulator description for buck1 to buck4, and ldo.
 
         properties:
           regulator-allowed-modes:
index 54386a010a8d7003a36d8792330715316f7f3129..871d2da7a0a91e73f0f791f5627a57855741f20a 100644 (file)
@@ -76,7 +76,7 @@ The beginning of an extended attribute block is in
      - Checksum of the extended attribute block.
    * - 0x14
      - \_\_u32
-     - h\_reserved[2]
+     - h\_reserved[3]
      - Zero.
 
 The checksum is calculated against the FS UUID, the 64-bit block number
index 4a2426f0485a994482f11244cf1b146966a5fcd0..ad8dc8c040a2766d176728fe089edcf3e0950d8a 100644 (file)
@@ -235,12 +235,6 @@ offgrpjquota                Turn off group journalled quota.
 offprjjquota            Turn off project journalled quota.
 quota                   Enable plain user disk quota accounting.
 noquota                         Disable all plain disk quota option.
-whint_mode=%s           Control which write hints are passed down to block
-                        layer. This supports "off", "user-based", and
-                        "fs-based".  In "off" mode (default), f2fs does not pass
-                        down hints. In "user-based" mode, f2fs tries to pass
-                        down hints given by users. And in "fs-based" mode, f2fs
-                        passes down hints with its policy.
 alloc_mode=%s           Adjust block allocation policy, which supports "reuse"
                         and "default".
 fsync_mode=%s           Control the policy of fsync. Currently supports "posix",
@@ -751,70 +745,6 @@ In order to identify whether the data in the victim segment are valid or not,
 F2FS manages a bitmap. Each bit represents the validity of a block, and the
 bitmap is composed of a bit stream covering whole blocks in main area.
 
-Write-hint Policy
------------------
-
-1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
-
-2) whint_mode=user-based. F2FS tries to pass down hints given by
-users.
-
-===================== ======================== ===================
-User                  F2FS                     Block
-===================== ======================== ===================
-N/A                   META                     WRITE_LIFE_NOT_SET
-N/A                   HOT_NODE                 "
-N/A                   WARM_NODE                "
-N/A                   COLD_NODE                "
-ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
-extension list        "                        "
-
--- buffered io
-WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
-WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
-WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
-WRITE_LIFE_NONE       "                        "
-WRITE_LIFE_MEDIUM     "                        "
-WRITE_LIFE_LONG       "                        "
-
--- direct io
-WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
-WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
-WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
-WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
-WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
-WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
-===================== ======================== ===================
-
-3) whint_mode=fs-based. F2FS passes down hints with its policy.
-
-===================== ======================== ===================
-User                  F2FS                     Block
-===================== ======================== ===================
-N/A                   META                     WRITE_LIFE_MEDIUM;
-N/A                   HOT_NODE                 WRITE_LIFE_NOT_SET
-N/A                   WARM_NODE                "
-N/A                   COLD_NODE                WRITE_LIFE_NONE
-ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
-extension list        "                        "
-
--- buffered io
-WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
-WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
-WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_LONG
-WRITE_LIFE_NONE       "                        "
-WRITE_LIFE_MEDIUM     "                        "
-WRITE_LIFE_LONG       "                        "
-
--- direct io
-WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
-WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
-WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
-WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
-WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
-WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
-===================== ======================== ===================
-
 Fallocate(2) Policy
 -------------------
 
index 65204d7f004f238f37b0654cd91bcc2033fdf7a1..7e0c3f574e78206a3aea7f466284c2b9760586ba 100644 (file)
@@ -110,7 +110,7 @@ Usage
    If you want to sort by the page nums of buf, use the ``-m`` parameter.
    The detailed parameters are:
 
-   fundamental function:
+   fundamental function::
 
        Sort:
                -a              Sort by memory allocation time.
@@ -122,7 +122,7 @@ Usage
                -s              Sort by stack trace.
                -t              Sort by times (default).
 
-   additional function:
+   additional function::
 
        Cull:
                --cull <rules>
@@ -153,6 +153,7 @@ Usage
 
 STANDARD FORMAT SPECIFIERS
 ==========================
+::
 
        KEY             LONG            DESCRIPTION
        p               pid             process ID
index e86a8e2ec3f6c4271563e81f2a78a477d4489dbf..2647adf30569d3fbb1d955aad7c23b2abb951974 100644 (file)
@@ -10239,8 +10239,6 @@ F:      drivers/net/ethernet/sgi/ioc3-eth.c
 IOMAP FILESYSTEM LIBRARY
 M:     Christoph Hellwig <hch@infradead.org>
 M:     Darrick J. Wong <djwong@kernel.org>
-M:     linux-xfs@vger.kernel.org
-M:     linux-fsdevel@vger.kernel.org
 L:     linux-xfs@vger.kernel.org
 L:     linux-fsdevel@vger.kernel.org
 S:     Supported
@@ -10549,6 +10547,7 @@ M:      Andrey Ryabinin <ryabinin.a.a@gmail.com>
 R:     Alexander Potapenko <glider@google.com>
 R:     Andrey Konovalov <andreyknvl@gmail.com>
 R:     Dmitry Vyukov <dvyukov@google.com>
+R:     Vincenzo Frascino <vincenzo.frascino@arm.com>
 L:     kasan-dev@googlegroups.com
 S:     Maintained
 F:     Documentation/dev-tools/kasan.rst
@@ -21599,7 +21598,6 @@ F:      drivers/xen/*swiotlb*
 XFS FILESYSTEM
 C:     irc://irc.oftc.net/xfs
 M:     Darrick J. Wong <djwong@kernel.org>
-M:     linux-xfs@vger.kernel.org
 L:     linux-xfs@vger.kernel.org
 S:     Supported
 W:     http://xfs.org/
index fa5112a0ec1b00db2feab34075879205476b26b7..c3ec1ea423797f5d39a1b4a241e3d0b859c3aec2 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 18
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Superb Owl
 
 # *DOCUMENTATION*
index dcaa44e408ace2adc3bdad5c8545ccaf046d55f9..f48ba03e9b5e7dbf440571ebed1f15a11974abac 100644 (file)
                        cs-gpios = <&creg_gpio 0 GPIO_ACTIVE_LOW>,
                                   <&creg_gpio 1 GPIO_ACTIVE_LOW>;
 
-                       spi-flash@0 {
+                       flash@0 {
                                compatible = "sst26wf016b", "jedec,spi-nor";
                                reg = <0>;
                                #address-cells = <1>;
index 088d348781c1c62847ccc389377bbe940f3548db..1b0ffaeee16d0e2efb720a91b8a7b3d0eb7d815a 100644 (file)
@@ -5,7 +5,7 @@
 
 #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
 
-#define ATOMIC_OP(op, c_op, asm_op)                                    \
+#define ATOMIC_OP(op, asm_op)                                  \
 static inline void arch_atomic_##op(int i, atomic_t *v)                        \
 {                                                                      \
        unsigned int val;                                               \
@@ -21,7 +21,7 @@ static inline void arch_atomic_##op(int i, atomic_t *v)                       \
        : "cc");                                                        \
 }                                                                      \
 
-#define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
+#define ATOMIC_OP_RETURN(op, asm_op)                           \
 static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)        \
 {                                                                      \
        unsigned int val;                                               \
@@ -42,7 +42,7 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)       \
 #define arch_atomic_add_return_relaxed         arch_atomic_add_return_relaxed
 #define arch_atomic_sub_return_relaxed         arch_atomic_sub_return_relaxed
 
-#define ATOMIC_FETCH_OP(op, c_op, asm_op)                              \
+#define ATOMIC_FETCH_OP(op, asm_op)                            \
 static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
 {                                                                      \
        unsigned int val, orig;                                         \
@@ -69,23 +69,23 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)      \
 #define arch_atomic_fetch_or_relaxed           arch_atomic_fetch_or_relaxed
 #define arch_atomic_fetch_xor_relaxed          arch_atomic_fetch_xor_relaxed
 
-#define ATOMIC_OPS(op, c_op, asm_op)                                   \
-       ATOMIC_OP(op, c_op, asm_op)                                     \
-       ATOMIC_OP_RETURN(op, c_op, asm_op)                              \
-       ATOMIC_FETCH_OP(op, c_op, asm_op)
+#define ATOMIC_OPS(op, asm_op)                                 \
+       ATOMIC_OP(op, asm_op)                                   \
+       ATOMIC_OP_RETURN(op, asm_op)                            \
+       ATOMIC_FETCH_OP(op, asm_op)
 
-ATOMIC_OPS(add, +=, add)
-ATOMIC_OPS(sub, -=, sub)
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
 
 #undef ATOMIC_OPS
-#define ATOMIC_OPS(op, c_op, asm_op)                                   \
-       ATOMIC_OP(op, c_op, asm_op)                                     \
-       ATOMIC_FETCH_OP(op, c_op, asm_op)
+#define ATOMIC_OPS(op, asm_op)                                 \
+       ATOMIC_OP(op, asm_op)                                   \
+       ATOMIC_FETCH_OP(op, asm_op)
 
-ATOMIC_OPS(and, &=, and)
-ATOMIC_OPS(andnot, &= ~, bic)
-ATOMIC_OPS(or, |=, or)
-ATOMIC_OPS(xor, ^=, xor)
+ATOMIC_OPS(and, and)
+ATOMIC_OPS(andnot, bic)
+ATOMIC_OPS(or, or)
+ATOMIC_OPS(xor, xor)
 
 #define arch_atomic_andnot             arch_atomic_andnot
 
index 7848348719b26fbbc90b61cff8cf3b1a99ebe6cf..64ca25d199beaa28b91be8ab17f74b953ac7004f 100644 (file)
@@ -98,9 +98,6 @@
 /*
  * 1st level paging: pgd
  */
-#define pgd_index(addr)                ((addr) >> PGDIR_SHIFT)
-#define pgd_offset(mm, addr)   (((mm)->pgd) + pgd_index(addr))
-#define pgd_offset_k(addr)     pgd_offset(&init_mm, addr)
 #define pgd_ERROR(e) \
        pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 
index 03f8b1be0c3a82ba27b54e0ba96243a4766f8b66..1e1db51b69414362f67e62813df484a39f1690ec 100644 (file)
@@ -366,7 +366,7 @@ void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
        case op_SP:     /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */
                /* note: we are ignoring possibility of:
                 * ADD_S, SUB_S, PUSH_S, POP_S as these should not
-                * cause unaliged exception anyway */
+                * cause unaligned exception anyway */
                state->write = BITS(state->words[0], 6, 6);
                state->zz = BITS(state->words[0], 5, 5);
                if (state->zz)
@@ -503,7 +503,6 @@ int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
 {
        struct disasm_state instr;
 
-       memset(&instr, 0, sizeof(struct disasm_state));
        disasm_instr(pc, &instr, 0, regs, cregs);
 
        *next_pc = pc + instr.instr_len;
index dd77a0c8f740b6c8be4c688a909b825467e3ff5d..66ba549b520fc02a5518be15c795ae466b6539ee 100644 (file)
@@ -196,6 +196,7 @@ tracesys_exit:
        st  r0, [sp, PT_r0]     ; sys call return value in pt_regs
 
        ;POST Sys Call Ptrace Hook
+       mov r0, sp              ; pt_regs needed
        bl  @syscall_trace_exit
        b   ret_from_exception ; NOT ret_from_system_call at is saves r0 which
        ; we'd done before calling post hook above
index f748483628f2c22ec98408c056d417f0fe580179..3c1590c27fae3b3e72d37d01a25c5ca691c29bf3 100644 (file)
@@ -319,7 +319,7 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
        regs->ret = (unsigned long)ksig->ka.sa.sa_handler;
 
        /*
-        * handler returns using sigreturn stub provided already by userpsace
+        * handler returns using sigreturn stub provided already by userspace
         * If not, nuke the process right away
         */
        if(!(ksig->ka.sa.sa_flags & SA_RESTORER))
index 78e6d069b1c1279eb556b143fe6413ac5857208e..d947473f1e6da5f906ba25c7841c99bb981055ad 100644 (file)
@@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
 
 struct plat_smp_ops  __weak plat_smp_ops;
 
-/* XXX: per cpu ? Only needed once in early seconday boot */
+/* XXX: per cpu ? Only needed once in early secondary boot */
 struct task_struct *secondary_idle_tsk;
 
 /* Called from start_kernel */
@@ -274,7 +274,7 @@ static void ipi_send_msg_one(int cpu, enum ipi_msg_type msg)
         * and read back old value
         */
        do {
-               new = old = READ_ONCE(*ipi_data_ptr);
+               new = old = *ipi_data_ptr;
                new |= 1U << msg;
        } while (cmpxchg(ipi_data_ptr, old, new) != old);
 
index d63ebd81f1c6d65bd98350aa4b3579c208a56093..99a9b92ed98d629e75783f71eef34f72d84800fd 100644 (file)
@@ -237,7 +237,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
        if (state.fault)
                goto fault;
 
-       /* clear any remanants of delay slot */
+       /* clear any remnants of delay slot */
        if (delay_mode(regs)) {
                regs->ret = regs->bta & ~1U;
                regs->status32 &= ~STATUS_DE_MASK;
index 8aa1231865d15a39b52c11e63eac1b86686d0d11..5446967ea98d3c1715cf5cbb6acaba7ecf2df108 100644 (file)
@@ -401,7 +401,7 @@ static inline void __before_dc_op(const int op)
 {
        if (op == OP_FLUSH_N_INV) {
                /* Dcache provides 2 cmd: FLUSH or INV
-                * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
+                * INV in turn has sub-modes: DISCARD or FLUSH-BEFORE
                 * flush-n-inv is achieved by INV cmd but with IM=1
                 * So toggle INV sub-mode depending on op request and default
                 */
index f7d993628cb700f87b62108141da7658d03c17bf..a9c1efcf7c9cfd8aa323d759f5c13f1eb309d138 100644 (file)
@@ -17,7 +17,6 @@ menuconfig ARCH_EXYNOS
        select EXYNOS_PMU
        select EXYNOS_SROM
        select EXYNOS_PM_DOMAINS if PM_GENERIC_DOMAINS
-       select GPIOLIB
        select HAVE_ARM_ARCH_TIMER if ARCH_EXYNOS5
        select HAVE_ARM_SCU if SMP
        select PINCTRL
index ec5b082f3de6e35914ab6a73fe8e292fa6c08f7e..07eb69f9e7df3d2cbf77719f0c3df62f232ceac7 100644 (file)
@@ -337,12 +337,15 @@ int __init arch_xen_unpopulated_init(struct resource **res)
 
        if (!nr_reg) {
                pr_err("No extended regions are found\n");
+               of_node_put(np);
                return -EINVAL;
        }
 
        regs = kcalloc(nr_reg, sizeof(*regs), GFP_KERNEL);
-       if (!regs)
+       if (!regs) {
+               of_node_put(np);
                return -ENOMEM;
+       }
 
        /*
         * Create resource from extended regions provided by the hypervisor to be
@@ -403,8 +406,8 @@ int __init arch_xen_unpopulated_init(struct resource **res)
        *res = &xen_resource;
 
 err:
+       of_node_put(np);
        kfree(regs);
-
        return rc;
 }
 #endif
@@ -424,8 +427,10 @@ static void __init xen_dt_guest_init(void)
 
        if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) {
                pr_err("Xen grant table region is not found\n");
+               of_node_put(xen_node);
                return;
        }
+       of_node_put(xen_node);
        xen_grant_frames = res.start;
 }
 
index 57c4c995965f8291bf3c89303bc8c00dc0276dd4..20ea89d9ac2fa7cc1564f43ed518a6b18e52270b 100644 (file)
@@ -175,8 +175,6 @@ config ARM64
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_CONTIGUOUS
        select HAVE_DYNAMIC_FTRACE
-       select HAVE_DYNAMIC_FTRACE_WITH_REGS \
-               if $(cc-option,-fpatchable-function-entry=2)
        select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
                if DYNAMIC_FTRACE_WITH_REGS
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -228,6 +226,17 @@ config ARM64
        help
          ARM 64-bit (AArch64) Linux support.
 
+config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS
+       def_bool CC_IS_CLANG
+       # https://github.com/ClangBuiltLinux/linux/issues/1507
+       depends on AS_IS_GNU || (AS_IS_LLVM && (LD_IS_LLD || LD_VERSION >= 23600))
+       select HAVE_DYNAMIC_FTRACE_WITH_REGS
+
+config GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_REGS
+       def_bool CC_IS_GCC
+       depends on $(cc-option,-fpatchable-function-entry=2)
+       select HAVE_DYNAMIC_FTRACE_WITH_REGS
+
 config 64BIT
        def_bool y
 
@@ -678,7 +687,7 @@ config ARM64_ERRATUM_2051678
        default y
        help
          This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678.
-         Affected Coretex-A510 might not respect the ordering rules for
+         Affected Cortex-A510 might not respect the ordering rules for
          hardware update of the page table's dirty bit. The workaround
          is to not enable the feature on affected CPUs.
 
index 94e147e5456ca9998ede2aa20d580c2f4431b9fa..dff2b483ea50927249b3392152c0afdf0c190f3d 100644 (file)
@@ -535,7 +535,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
                                 PMD_TYPE_TABLE)
 #define pmd_sect(pmd)          ((pmd_val(pmd) & PMD_TYPE_MASK) == \
                                 PMD_TYPE_SECT)
-#define pmd_leaf(pmd)          pmd_sect(pmd)
+#define pmd_leaf(pmd)          (pmd_present(pmd) && !pmd_table(pmd))
 #define pmd_bad(pmd)           (!pmd_table(pmd))
 
 #define pmd_leaf_size(pmd)     (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
@@ -625,7 +625,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 #define pud_none(pud)          (!pud_val(pud))
 #define pud_bad(pud)           (!pud_table(pud))
 #define pud_present(pud)       pte_present(pud_pte(pud))
-#define pud_leaf(pud)          pud_sect(pud)
+#define pud_leaf(pud)          (pud_present(pud) && !pud_table(pud))
 #define pud_valid(pud)         pte_valid(pud_pte(pud))
 
 static inline void set_pud(pud_t *pudp, pud_t pud)
index f5cbfe5efd25fdfe2236f40194388511c7f320a4..f80cce0e38994538dcef03bd2dec3f0e03ffa716 100644 (file)
@@ -615,23 +615,22 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
                return;
        }
 
-       /* Conditionally hard-enable interrupts. */
-       if (should_hard_irq_enable()) {
-               /*
-                * Ensure a positive value is written to the decrementer, or
-                * else some CPUs will continue to take decrementer exceptions.
-                * When the PPC_WATCHDOG (decrementer based) is configured,
-                * keep this at most 31 bits, which is about 4 seconds on most
-                * systems, which gives the watchdog a chance of catching timer
-                * interrupt hard lockups.
-                */
-               if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
-                       set_dec(0x7fffffff);
-               else
-                       set_dec(decrementer_max);
+       /*
+        * Ensure a positive value is written to the decrementer, or
+        * else some CPUs will continue to take decrementer exceptions.
+        * When the PPC_WATCHDOG (decrementer based) is configured,
+        * keep this at most 31 bits, which is about 4 seconds on most
+        * systems, which gives the watchdog a chance of catching timer
+        * interrupt hard lockups.
+        */
+       if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
+               set_dec(0x7fffffff);
+       else
+               set_dec(decrementer_max);
 
+       /* Conditionally hard-enable interrupts. */
+       if (should_hard_irq_enable())
                do_hard_irq_enable();
-       }
 
 #if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
        if (atomic_read(&ppc_n_lost_interrupts) != 0)
index e4ce2a35483f6fcb32f00b50c633c563375e1a1d..42851c32ff3bee0eb576b3d90c768fa7313f247c 100644 (file)
@@ -168,9 +168,10 @@ int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
                        return -EINVAL;
                /* Read the entry from guest memory */
                addr = base + (index * sizeof(rpte));
-               vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+
+               kvm_vcpu_srcu_read_lock(vcpu);
                ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
-               srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                if (ret) {
                        if (pte_ret_p)
                                *pte_ret_p = addr;
@@ -246,9 +247,9 @@ int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
 
        /* Read the table to find the root of the radix tree */
        ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
-       vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
        ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
-       srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
        if (ret)
                return ret;
 
index d42b4b6d4a791d17fde665688a3d08e70a08c01e..85cfa6328222b326736f2309d31c12e46a80fc6b 100644 (file)
@@ -420,13 +420,19 @@ static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
        tbl[idx % TCES_PER_PAGE] = tce;
 }
 
-static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
-               unsigned long entry)
+static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
+               struct iommu_table *tbl, unsigned long entry)
 {
-       unsigned long hpa = 0;
-       enum dma_data_direction dir = DMA_NONE;
+       unsigned long i;
+       unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
+       unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
+
+       for (i = 0; i < subpages; ++i) {
+               unsigned long hpa = 0;
+               enum dma_data_direction dir = DMA_NONE;
 
-       iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
+               iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
+       }
 }
 
 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -485,6 +491,8 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
                        break;
        }
 
+       iommu_tce_kill(tbl, io_entry, subpages);
+
        return ret;
 }
 
@@ -544,6 +552,8 @@ static long kvmppc_tce_iommu_map(struct kvm *kvm,
                        break;
        }
 
+       iommu_tce_kill(tbl, io_entry, subpages);
+
        return ret;
 }
 
@@ -590,10 +600,9 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
                        ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
                                        entry, ua, dir);
 
-               iommu_tce_kill(stit->tbl, entry, 1);
 
                if (ret != H_SUCCESS) {
-                       kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
+                       kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
                        goto unlock_exit;
                }
        }
@@ -669,13 +678,13 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
                 */
                if (get_user(tce, tces + i)) {
                        ret = H_TOO_HARD;
-                       goto invalidate_exit;
+                       goto unlock_exit;
                }
                tce = be64_to_cpu(tce);
 
                if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
                        ret = H_PARAMETER;
-                       goto invalidate_exit;
+                       goto unlock_exit;
                }
 
                list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -684,19 +693,15 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
                                        iommu_tce_direction(tce));
 
                        if (ret != H_SUCCESS) {
-                               kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
-                                               entry);
-                               goto invalidate_exit;
+                               kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
+                                                entry + i);
+                               goto unlock_exit;
                        }
                }
 
                kvmppc_tce_put(stt, entry + i, tce);
        }
 
-invalidate_exit:
-       list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
-               iommu_tce_kill(stit->tbl, entry, npages);
-
 unlock_exit:
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
 
@@ -735,20 +740,16 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
                                continue;
 
                        if (ret == H_TOO_HARD)
-                               goto invalidate_exit;
+                               return ret;
 
                        WARN_ON_ONCE(1);
-                       kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
+                       kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
                }
        }
 
        for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
                kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
 
-invalidate_exit:
-       list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
-               iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
index 870b7f0c7ea561fb323439add7e294a3007cdb6f..fdeda6a9cff449b1a13cc45a05ae1629183b13b3 100644 (file)
@@ -247,13 +247,19 @@ static void iommu_tce_kill_rm(struct iommu_table *tbl,
                tbl->it_ops->tce_kill(tbl, entry, pages, true);
 }
 
-static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
-               unsigned long entry)
+static void kvmppc_rm_clear_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *stt,
+               struct iommu_table *tbl, unsigned long entry)
 {
-       unsigned long hpa = 0;
-       enum dma_data_direction dir = DMA_NONE;
+       unsigned long i;
+       unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
+       unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
+
+       for (i = 0; i < subpages; ++i) {
+               unsigned long hpa = 0;
+               enum dma_data_direction dir = DMA_NONE;
 
-       iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, entry, &hpa, &dir);
+               iommu_tce_xchg_no_kill_rm(kvm->mm, tbl, io_entry + i, &hpa, &dir);
+       }
 }
 
 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -316,6 +322,8 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
                        break;
        }
 
+       iommu_tce_kill_rm(tbl, io_entry, subpages);
+
        return ret;
 }
 
@@ -379,6 +387,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
                        break;
        }
 
+       iommu_tce_kill_rm(tbl, io_entry, subpages);
+
        return ret;
 }
 
@@ -420,10 +430,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
                        ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
                                        stit->tbl, entry, ua, dir);
 
-               iommu_tce_kill_rm(stit->tbl, entry, 1);
-
                if (ret != H_SUCCESS) {
-                       kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
+                       kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry);
                        return ret;
                }
        }
@@ -561,7 +569,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
                ua = 0;
                if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
                        ret = H_PARAMETER;
-                       goto invalidate_exit;
+                       goto unlock_exit;
                }
 
                list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -570,19 +578,15 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
                                        iommu_tce_direction(tce));
 
                        if (ret != H_SUCCESS) {
-                               kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
-                                               entry);
-                               goto invalidate_exit;
+                               kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl,
+                                               entry + i);
+                               goto unlock_exit;
                        }
                }
 
                kvmppc_rm_tce_put(stt, entry + i, tce);
        }
 
-invalidate_exit:
-       list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
-               iommu_tce_kill_rm(stit->tbl, entry, npages);
-
 unlock_exit:
        if (!prereg)
                arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
@@ -620,20 +624,16 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
                                continue;
 
                        if (ret == H_TOO_HARD)
-                               goto invalidate_exit;
+                               return ret;
 
                        WARN_ON_ONCE_RM(1);
-                       kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
+                       kvmppc_rm_clear_tce(vcpu->kvm, stt, stit->tbl, entry + i);
                }
        }
 
        for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
                kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value);
 
-invalidate_exit:
-       list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
-               iommu_tce_kill_rm(stit->tbl, ioba >> stt->page_shift, npages);
-
        return ret;
 }
 
index 9d373f8963ee98a9977f4a796f200c6343b5d67b..c943a051c6e700c2b58416aef857b9e310563678 100644 (file)
@@ -306,10 +306,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
        /* copy parameters in */
        hv_ptr = kvmppc_get_gpr(vcpu, 4);
        regs_ptr = kvmppc_get_gpr(vcpu, 5);
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
        err = kvmhv_read_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
                                              hv_ptr, regs_ptr);
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
        if (err)
                return H_PARAMETER;
 
@@ -410,10 +410,10 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
                byteswap_hv_regs(&l2_hv);
                byteswap_pt_regs(&l2_regs);
        }
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
        err = kvmhv_write_guest_state_and_regs(vcpu, &l2_hv, &l2_regs,
                                               hv_ptr, regs_ptr);
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
        if (err)
                return H_AUTHORITY;
 
@@ -600,16 +600,16 @@ long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
                        goto not_found;
 
                /* Write what was loaded into our buffer back to the L1 guest */
-               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
                rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
-               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                if (rc)
                        goto not_found;
        } else {
                /* Load the data to be stored from the L1 guest into our buf */
-               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
                rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
-               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                if (rc)
                        goto not_found;
 
index 0f847f1e5ddd0ba6590642548fe799f6335ca015..6808bda0dbc10c114a9b1d2fab1b4d63d24a6224 100644 (file)
@@ -229,9 +229,9 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
         */
        args_phys = kvmppc_get_gpr(vcpu, 4) & KVM_PAM;
 
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
        rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args));
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
        if (rc)
                goto fail;
 
index 875c30c12db046957bed254f5519a279023e2e8e..533c4232e5abfd926c859fcdfb45c7fee658682a 100644 (file)
@@ -425,9 +425,9 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
                return EMULATE_DONE;
        }
 
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
        rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
        if (rc)
                return EMULATE_DO_MMIO;
 
index 2f46e31c76129799eb13378b31498ae76cbe5611..4f53d0b97539bb6cd31fadfbc5eab06a8054ffc8 100644 (file)
@@ -3,11 +3,11 @@
 obj-y                          += callchain.o callchain_$(BITS).o perf_regs.o
 obj-$(CONFIG_COMPAT)           += callchain_32.o
 
-obj-$(CONFIG_PPC_PERF_CTRS)    += core-book3s.o bhrb.o
+obj-$(CONFIG_PPC_PERF_CTRS)    += core-book3s.o
 obj64-$(CONFIG_PPC_PERF_CTRS)  += ppc970-pmu.o power5-pmu.o \
                                   power5+-pmu.o power6-pmu.o power7-pmu.o \
                                   isa207-common.o power8-pmu.o power9-pmu.o \
-                                  generic-compat-pmu.o power10-pmu.o
+                                  generic-compat-pmu.o power10-pmu.o bhrb.o
 obj32-$(CONFIG_PPC_PERF_CTRS)  += mpc7450-pmu.o
 
 obj-$(CONFIG_PPC_POWERNV)      += imc-pmu.o
index d3398100a60fd36babe6c6c0f13126c4b164ba82..c6d51e7093cf1191d4af1527d07a8d9e9f979b42 100644 (file)
@@ -91,8 +91,8 @@ extern u64 PERF_REG_EXTENDED_MASK;
 
 /* Table of alternatives, sorted by column 0 */
 static const unsigned int power10_event_alternatives[][MAX_ALT] = {
-       { PM_CYC_ALT,                   PM_CYC },
        { PM_INST_CMPL_ALT,             PM_INST_CMPL },
+       { PM_CYC_ALT,                   PM_CYC },
 };
 
 static int power10_get_alternatives(u64 event, unsigned int flags, u64 alt[])
index c9eb5232e68b6ab7b220882b4c502c15f3bb9feb..c393e837648e2efe34287615ef6fd2155a2b59d5 100644 (file)
@@ -133,11 +133,11 @@ int p9_dd22_bl_ev[] = {
 
 /* Table of alternatives, sorted by column 0 */
 static const unsigned int power9_event_alternatives[][MAX_ALT] = {
-       { PM_INST_DISP,                 PM_INST_DISP_ALT },
-       { PM_RUN_CYC_ALT,               PM_RUN_CYC },
-       { PM_RUN_INST_CMPL_ALT,         PM_RUN_INST_CMPL },
-       { PM_LD_MISS_L1,                PM_LD_MISS_L1_ALT },
        { PM_BR_2PATH,                  PM_BR_2PATH_ALT },
+       { PM_INST_DISP,                 PM_INST_DISP_ALT },
+       { PM_RUN_CYC_ALT,               PM_RUN_CYC },
+       { PM_LD_MISS_L1,                PM_LD_MISS_L1_ALT },
+       { PM_RUN_INST_CMPL_ALT,         PM_RUN_INST_CMPL },
 };
 
 static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
index 34592d00dde8c65316b7b4f7325aa7e05fc4ff21..f6ef358d8a2cf6eb628a054ef96794a682a12d3d 100644 (file)
@@ -38,7 +38,7 @@ config SOC_VIRT
        select SIFIVE_PLIC
        select PM_GENERIC_DOMAINS if PM
        select PM_GENERIC_DOMAINS_OF if PM && OF
-       select RISCV_SBI_CPUIDLE if CPU_IDLE
+       select RISCV_SBI_CPUIDLE if CPU_IDLE && RISCV_SBI
        help
          This enables support for QEMU Virt Machine.
 
index 78da839657e52401f34b93acde2bc1cd31fd6cac..cd4bbcecb0fbf04c0ddf3ef1a9928576dd51c385 100644 (file)
@@ -193,9 +193,6 @@ struct kvm_vcpu_arch {
 
        /* Don't run the VCPU (blocked) */
        bool pause;
-
-       /* SRCU lock index for in-kernel run loop */
-       int srcu_idx;
 };
 
 static inline void kvm_arch_hardware_unsetup(void) {}
index 6785aef4cbd46e4d8bfd90c65fe2ea43c2cc1a5f..7461f964d20a92e579e80c2485a41be9da2b051a 100644 (file)
@@ -38,14 +38,16 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
                       sizeof(kvm_vcpu_stats_desc),
 };
 
-#define KVM_RISCV_ISA_ALLOWED  (riscv_isa_extension_mask(a) | \
-                                riscv_isa_extension_mask(c) | \
-                                riscv_isa_extension_mask(d) | \
-                                riscv_isa_extension_mask(f) | \
-                                riscv_isa_extension_mask(i) | \
-                                riscv_isa_extension_mask(m) | \
-                                riscv_isa_extension_mask(s) | \
-                                riscv_isa_extension_mask(u))
+#define KVM_RISCV_ISA_DISABLE_ALLOWED  (riscv_isa_extension_mask(d) | \
+                                       riscv_isa_extension_mask(f))
+
+#define KVM_RISCV_ISA_DISABLE_NOT_ALLOWED      (riscv_isa_extension_mask(a) | \
+                                               riscv_isa_extension_mask(c) | \
+                                               riscv_isa_extension_mask(i) | \
+                                               riscv_isa_extension_mask(m))
+
+#define KVM_RISCV_ISA_ALLOWED (KVM_RISCV_ISA_DISABLE_ALLOWED | \
+                              KVM_RISCV_ISA_DISABLE_NOT_ALLOWED)
 
 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
 {
@@ -219,7 +221,8 @@ static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
        switch (reg_num) {
        case KVM_REG_RISCV_CONFIG_REG(isa):
                if (!vcpu->arch.ran_atleast_once) {
-                       vcpu->arch.isa = reg_val;
+                       /* Ignore the disable request for these extensions */
+                       vcpu->arch.isa = reg_val | KVM_RISCV_ISA_DISABLE_NOT_ALLOWED;
                        vcpu->arch.isa &= riscv_isa_extension_base(NULL);
                        vcpu->arch.isa &= KVM_RISCV_ISA_ALLOWED;
                        kvm_riscv_vcpu_fp_reset(vcpu);
@@ -724,13 +727,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
        /* Mark this VCPU ran at least once */
        vcpu->arch.ran_atleast_once = true;
 
-       vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
 
        /* Process MMIO value returned from user-space */
        if (run->exit_reason == KVM_EXIT_MMIO) {
                ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
                if (ret) {
-                       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+                       kvm_vcpu_srcu_read_unlock(vcpu);
                        return ret;
                }
        }
@@ -739,13 +742,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
        if (run->exit_reason == KVM_EXIT_RISCV_SBI) {
                ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
                if (ret) {
-                       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+                       kvm_vcpu_srcu_read_unlock(vcpu);
                        return ret;
                }
        }
 
        if (run->immediate_exit) {
-               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                return -EINTR;
        }
 
@@ -784,7 +787,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 */
                vcpu->mode = IN_GUEST_MODE;
 
-               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                smp_mb__after_srcu_read_unlock();
 
                /*
@@ -802,7 +805,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                        vcpu->mode = OUTSIDE_GUEST_MODE;
                        local_irq_enable();
                        preempt_enable();
-                       vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+                       kvm_vcpu_srcu_read_lock(vcpu);
                        continue;
                }
 
@@ -846,7 +849,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 
                preempt_enable();
 
-               vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
 
                ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
        }
@@ -855,7 +858,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 
        vcpu_put(vcpu);
 
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
 
        return ret;
 }
index aa8af129e4bb93ed5c48e988ab7e4071e6ef3273..a72c15d4b42a599a2b640b27af8eb699b4f4724e 100644 (file)
@@ -456,9 +456,9 @@ static int stage2_page_fault(struct kvm_vcpu *vcpu, struct kvm_run *run,
 void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
 {
        if (!kvm_arch_vcpu_runnable(vcpu)) {
-               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                kvm_vcpu_halt(vcpu);
-               vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
                kvm_clear_request(KVM_REQ_UNHALT, vcpu);
        }
 }
index 9535bea8688c097a2895dadd34895e93cde58a14..b0793dc0c291e973e7c59a04464ccdcf8c9caddd 100644 (file)
@@ -718,6 +718,7 @@ retry:
                if (!check_l4) {
                        disable_pgtable_l5();
                        check_l4 = true;
+                       memset(early_pg_dir, 0, PAGE_SIZE);
                        goto retry;
                }
                disable_pgtable_l4();
index 9b30beac904db866ba23fecb890784fb8e8eb4a9..af96dc0549a4b57ccce07688cad056c1e3f67fe1 100644 (file)
@@ -1334,11 +1334,11 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
        hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
        VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
 no_timer:
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
        kvm_vcpu_halt(vcpu);
        vcpu->valid_wakeup = false;
        __unset_cpu_idle(vcpu);
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
 
        hrtimer_cancel(&vcpu->arch.ckc_timer);
        return 0;
index 156d1c25a3c1ec9a225669013153bf44c90d5363..da3dabda1a12623a2ee09bb7c1e7abc855793fc0 100644 (file)
@@ -4237,14 +4237,14 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
         * We try to hold kvm->srcu during most of vcpu_run (except when run-
         * ning the guest), so that memslots (and other stuff) are protected
         */
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
 
        do {
                rc = vcpu_pre_run(vcpu);
                if (rc)
                        break;
 
-               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                /*
                 * As PF_VCPU will be used in fault handler, between
                 * guest_enter and guest_exit should be no uaccess.
@@ -4281,12 +4281,12 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                __enable_cpu_timer_accounting(vcpu);
                guest_exit_irqoff();
                local_irq_enable();
-               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
 
                rc = vcpu_post_run(vcpu, exit_reason);
        } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
 
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
        return rc;
 }
 
index acda4b6fc851824d22c3b7b68715c0905a8edfd1..dada78b92691fa01f8d6a96081743a8ca2dd59fe 100644 (file)
@@ -1091,7 +1091,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
 
        handle_last_fault(vcpu, vsie_page);
 
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
 
        /* save current guest state of bp isolation override */
        guest_bp_isolation = test_thread_flag(TIF_ISOLATE_BP_GUEST);
@@ -1133,7 +1133,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
        if (!guest_bp_isolation)
                clear_thread_flag(TIF_ISOLATE_BP_GUEST);
 
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
 
        if (rc == -EINTR) {
                VCPU_EVENT(vcpu, 3, "%s", "machine check");
index 41c6d734a47417647b4115eaf14175f4bebfbe46..adb6991d04554cb49e5b8510823802c538aefab4 100644 (file)
@@ -35,6 +35,7 @@
 #define flush_page_for_dma(addr) \
        sparc32_cachetlb_ops->page_for_dma(addr)
 
+struct page;
 void sparc_flush_page_to_ram(struct page *page);
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
index 5d7762288a243f6b8cb9db4c329d581331566074..48e5db21142c2257a29233e9037fb881583ad85d 100644 (file)
@@ -51,7 +51,7 @@
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
  *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- *                                             TGL,TNT,RKL,ADL,RPL
+ *                                             TGL,TNT,RKL,ADL,RPL,SPR
  *                            Scope: Core
  *     MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
  *                            perf code: 0x03
@@ -62,7 +62,7 @@
  *                            perf code: 0x00
  *                            Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
  *                                             KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
- *                                             RPL
+ *                                             RPL,SPR
  *                            Scope: Package (physical package)
  *     MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
  *                            perf code: 0x01
@@ -74,7 +74,7 @@
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
  *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- *                                             TGL,TNT,RKL,ADL,RPL
+ *                                             TGL,TNT,RKL,ADL,RPL,SPR
  *                            Scope: Package (physical package)
  *     MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
  *                            perf code: 0x03
@@ -675,6 +675,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(ICELAKE,             &icl_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,           &icx_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,           &icx_cstates),
+       X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &icx_cstates),
 
        X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,         &icl_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,           &icl_cstates),
index 3c368b639c0462e2cd8b0757a255e94e4d56f68b..1a6d7e3f6c32c7f88917270b1f478608d6017830 100644 (file)
@@ -118,6 +118,7 @@ KVM_X86_OP_OPTIONAL(mem_enc_register_region)
 KVM_X86_OP_OPTIONAL(mem_enc_unregister_region)
 KVM_X86_OP_OPTIONAL(vm_copy_enc_context_from)
 KVM_X86_OP_OPTIONAL(vm_move_enc_context_from)
+KVM_X86_OP_OPTIONAL(guest_memory_reclaimed)
 KVM_X86_OP(get_msr_feature)
 KVM_X86_OP(can_emulate_instruction)
 KVM_X86_OP(apic_init_signal_blocked)
index e0c0f0e1f754c11fa44374a9e4536a6cbcaeb630..4ff36610af6ab5252d4956aa57d572bf45ee287a 100644 (file)
@@ -1484,6 +1484,7 @@ struct kvm_x86_ops {
        int (*mem_enc_unregister_region)(struct kvm *kvm, struct kvm_enc_region *argp);
        int (*vm_copy_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
        int (*vm_move_enc_context_from)(struct kvm *kvm, unsigned int source_fd);
+       void (*guest_memory_reclaimed)(struct kvm *kvm);
 
        int (*get_msr_feature)(struct kvm_msr_entry *entry);
 
index a22deb58f86d2e6d2f5709af95671be9c63c39c6..8b1c45c9cda8771a446aed8b4a62849dda08e77e 100644 (file)
@@ -69,6 +69,7 @@ static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __align
 DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
 static int has_steal_clock = 0;
 
+static int has_guest_poll = 0;
 /*
  * No need for any "IO delay" on KVM
  */
@@ -706,14 +707,26 @@ static int kvm_cpu_down_prepare(unsigned int cpu)
 
 static int kvm_suspend(void)
 {
+       u64 val = 0;
+
        kvm_guest_cpu_offline(false);
 
+#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
+       if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
+               rdmsrl(MSR_KVM_POLL_CONTROL, val);
+       has_guest_poll = !(val & 1);
+#endif
        return 0;
 }
 
 static void kvm_resume(void)
 {
        kvm_cpu_online(raw_smp_processor_id());
+
+#ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
+       if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll)
+               wrmsrl(MSR_KVM_POLL_CONTROL, 0);
+#endif
 }
 
 static struct syscore_ops kvm_syscore_ops = {
index 9e66fba1d6a37da38caf441db963b016ca8f37c7..22992b049d380f55f36660e5d866943f8856fddc 100644 (file)
@@ -138,6 +138,15 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
        return sample_period;
 }
 
+static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
+{
+       if (!pmc->perf_event || pmc->is_paused)
+               return;
+
+       perf_event_period(pmc->perf_event,
+                         get_sample_period(pmc, pmc->counter));
+}
+
 void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
 void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
 void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);
index 24eb935b6f85c309fe0adafc600a4be098e02d16..b14860863c39417e3ab196c5e54c9b6e392eac19 100644 (file)
@@ -257,6 +257,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
        if (pmc) {
                pmc->counter += data - pmc_read_counter(pmc);
+               pmc_update_sample_period(pmc);
                return 0;
        }
        /* MSR_EVNTSELn */
index 537aaddc852fc420b62bef0c4785bfb7c7b86eec..0ad70c12c7c311d605c7820c1e9f223415d9e319 100644 (file)
@@ -2226,51 +2226,47 @@ int sev_cpu_init(struct svm_cpu_data *sd)
  * Pages used by hardware to hold guest encrypted state must be flushed before
  * returning them to the system.
  */
-static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
-                                  unsigned long len)
+static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
 {
+       int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
+
        /*
-        * If hardware enforced cache coherency for encrypted mappings of the
-        * same physical page is supported, nothing to do.
+        * Note!  The address must be a kernel address, as regular page walk
+        * checks are performed by VM_PAGE_FLUSH, i.e. operating on a user
+        * address is non-deterministic and unsafe.  This function deliberately
+        * takes a pointer to deter passing in a user address.
         */
-       if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
-               return;
+       unsigned long addr = (unsigned long)va;
 
        /*
-        * If the VM Page Flush MSR is supported, use it to flush the page
-        * (using the page virtual address and the guest ASID).
+        * If CPU enforced cache coherency for encrypted mappings of the
+        * same physical page is supported, use CLFLUSHOPT instead. NOTE: cache
+        * flush is still needed in order to work properly with DMA devices.
         */
-       if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
-               struct kvm_sev_info *sev;
-               unsigned long va_start;
-               u64 start, stop;
-
-               /* Align start and stop to page boundaries. */
-               va_start = (unsigned long)va;
-               start = (u64)va_start & PAGE_MASK;
-               stop = PAGE_ALIGN((u64)va_start + len);
-
-               if (start < stop) {
-                       sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
+       if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) {
+               clflush_cache_range(va, PAGE_SIZE);
+               return;
+       }
 
-                       while (start < stop) {
-                               wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
-                                      start | sev->asid);
+       /*
+        * VM Page Flush takes a host virtual address and a guest ASID.  Fall
+        * back to WBINVD if this faults so as not to make any problems worse
+        * by leaving stale encrypted data in the cache.
+        */
+       if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
+               goto do_wbinvd;
 
-                               start += PAGE_SIZE;
-                       }
+       return;
 
-                       return;
-               }
+do_wbinvd:
+       wbinvd_on_all_cpus();
+}
 
-               WARN(1, "Address overflow, using WBINVD\n");
-       }
+void sev_guest_memory_reclaimed(struct kvm *kvm)
+{
+       if (!sev_guest(kvm))
+               return;
 
-       /*
-        * Hardware should always have one of the above features,
-        * but if not, use WBINVD and issue a warning.
-        */
-       WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
        wbinvd_on_all_cpus();
 }
 
@@ -2284,7 +2280,8 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
        svm = to_svm(vcpu);
 
        if (vcpu->arch.guest_state_protected)
-               sev_flush_guest_memory(svm, svm->sev_es.vmsa, PAGE_SIZE);
+               sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa);
+
        __free_page(virt_to_page(svm->sev_es.vmsa));
 
        if (svm->sev_es.ghcb_sa_free)
index bd4c64b362d24a06c9a2e48de0de9803d700c47f..7e45d03cd018a5cc354936fcebc5b14d43c2cbcc 100644 (file)
@@ -4620,6 +4620,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .mem_enc_ioctl = sev_mem_enc_ioctl,
        .mem_enc_register_region = sev_mem_enc_register_region,
        .mem_enc_unregister_region = sev_mem_enc_unregister_region,
+       .guest_memory_reclaimed = sev_guest_memory_reclaimed,
 
        .vm_copy_enc_context_from = sev_vm_copy_enc_context_from,
        .vm_move_enc_context_from = sev_vm_move_enc_context_from,
index f77a7d2d39dd6dfe30b94b5cca2665435f378a00..f76deff71002cbbd3403f43faf7a773bfb14280d 100644 (file)
@@ -609,6 +609,8 @@ int sev_mem_enc_unregister_region(struct kvm *kvm,
                                  struct kvm_enc_region *range);
 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd);
 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd);
+void sev_guest_memory_reclaimed(struct kvm *kvm);
+
 void pre_sev_run(struct vcpu_svm *svm, int cpu);
 void __init sev_set_cpu_caps(void);
 void __init sev_hardware_setup(void);
index f18744f7ff82c9c85ecfee92ff7cc91bde33e894..856c87563883302e8ee4da0eec769fd8e0f99908 100644 (file)
@@ -4618,6 +4618,11 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
                kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
        }
 
+       if (vmx->nested.update_vmcs01_apicv_status) {
+               vmx->nested.update_vmcs01_apicv_status = false;
+               kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
+       }
+
        if ((vm_exit_reason != -1) &&
            (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
                vmx->nested.need_vmcs12_to_shadow_sync = true;
index bc3f8512bb646d76339886342699d369dd827a5b..b82b6709d7a819090bb28106c0b70362fea5d7a1 100644 (file)
@@ -431,15 +431,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                            !(msr & MSR_PMC_FULL_WIDTH_BIT))
                                data = (s64)(s32)data;
                        pmc->counter += data - pmc_read_counter(pmc);
-                       if (pmc->perf_event && !pmc->is_paused)
-                               perf_event_period(pmc->perf_event,
-                                                 get_sample_period(pmc, data));
+                       pmc_update_sample_period(pmc);
                        return 0;
                } else if ((pmc = get_fixed_pmc(pmu, msr))) {
                        pmc->counter += data - pmc_read_counter(pmc);
-                       if (pmc->perf_event && !pmc->is_paused)
-                               perf_event_period(pmc->perf_event,
-                                                 get_sample_period(pmc, data));
+                       pmc_update_sample_period(pmc);
                        return 0;
                } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
                        if (data == pmc->eventsel)
index 04d170c4b61eb48688b85d60b961d5b539509cb4..d58b763df855f6dfaaa761e0d1a4a7c8ccc12d1f 100644 (file)
@@ -4174,6 +4174,11 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
+       if (is_guest_mode(vcpu)) {
+               vmx->nested.update_vmcs01_apicv_status = true;
+               return;
+       }
+
        pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
        if (cpu_has_secondary_exec_ctrls()) {
                if (kvm_vcpu_apicv_active(vcpu))
index 9c6bfcd84008be990153a2bffa327ca3e33e996a..b98c7e96697a9a4925909a418831cdf5d5138b24 100644 (file)
@@ -183,6 +183,7 @@ struct nested_vmx {
        bool change_vmcs01_virtual_apic_mode;
        bool reload_vmcs01_apic_access_page;
        bool update_vmcs01_cpu_dirty_logging;
+       bool update_vmcs01_apicv_status;
 
        /*
         * Enlightened VMCS has been enabled. It does not mean that L1 has to
index 547ba00ef64fc3d12f2e3457221b2b96685688e0..a6ab19afc638b223dcd4ed89c29d6265edbb12ca 100644 (file)
@@ -9111,7 +9111,7 @@ static void kvm_apicv_init(struct kvm *kvm)
 
        if (!enable_apicv)
                set_or_clear_apicv_inhibit(inhibits,
-                                          APICV_INHIBIT_REASON_ABSENT, true);
+                                          APICV_INHIBIT_REASON_DISABLE, true);
 }
 
 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
@@ -9889,6 +9889,11 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
                kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
 }
 
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
+{
+       static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm);
+}
+
 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
 {
        if (!lapic_in_kernel(vcpu))
@@ -10097,7 +10102,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        /* Store vcpu->apicv_active before vcpu->mode.  */
        smp_store_release(&vcpu->mode, IN_GUEST_MODE);
 
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
 
        /*
         * 1) We should set ->mode before checking ->requests.  Please see
@@ -10128,7 +10133,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                smp_wmb();
                local_irq_enable();
                preempt_enable();
-               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
                r = 1;
                goto cancel_injection;
        }
@@ -10254,7 +10259,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        local_irq_enable();
        preempt_enable();
 
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
 
        /*
         * Profile KVM exit RIPs:
@@ -10284,7 +10289,7 @@ out:
 }
 
 /* Called within kvm->srcu read side.  */
-static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
+static inline int vcpu_block(struct kvm_vcpu *vcpu)
 {
        bool hv_timer;
 
@@ -10300,12 +10305,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
                if (hv_timer)
                        kvm_lapic_switch_to_sw_timer(vcpu);
 
-               srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
                        kvm_vcpu_halt(vcpu);
                else
                        kvm_vcpu_block(vcpu);
-               vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
 
                if (hv_timer)
                        kvm_lapic_switch_to_hv_timer(vcpu);
@@ -10347,7 +10352,6 @@ static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
 static int vcpu_run(struct kvm_vcpu *vcpu)
 {
        int r;
-       struct kvm *kvm = vcpu->kvm;
 
        vcpu->arch.l1tf_flush_l1d = true;
 
@@ -10355,7 +10359,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
                if (kvm_vcpu_running(vcpu)) {
                        r = vcpu_enter_guest(vcpu);
                } else {
-                       r = vcpu_block(kvm, vcpu);
+                       r = vcpu_block(vcpu);
                }
 
                if (r <= 0)
@@ -10374,9 +10378,9 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
                }
 
                if (__xfer_to_guest_mode_work_pending()) {
-                       srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+                       kvm_vcpu_srcu_read_unlock(vcpu);
                        r = xfer_to_guest_mode_handle_work(vcpu);
-                       vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+                       kvm_vcpu_srcu_read_lock(vcpu);
                        if (r)
                                return r;
                }
@@ -10387,12 +10391,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
 
 static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
 {
-       int r;
-
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
-       r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
-       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
-       return r;
+       return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
 }
 
 static int complete_emulated_pio(struct kvm_vcpu *vcpu)
@@ -10484,7 +10483,6 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
        struct kvm_run *kvm_run = vcpu->run;
-       struct kvm *kvm = vcpu->kvm;
        int r;
 
        vcpu_load(vcpu);
@@ -10492,7 +10490,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
        kvm_run->flags = 0;
        kvm_load_guest_fpu(vcpu);
 
-       vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+       kvm_vcpu_srcu_read_lock(vcpu);
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                if (kvm_run->immediate_exit) {
                        r = -EINTR;
@@ -10504,9 +10502,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                 */
                WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu));
 
-               srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+               kvm_vcpu_srcu_read_unlock(vcpu);
                kvm_vcpu_block(vcpu);
-               vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
+               kvm_vcpu_srcu_read_lock(vcpu);
 
                if (kvm_apic_accept_events(vcpu) < 0) {
                        r = 0;
@@ -10567,7 +10565,7 @@ out:
        if (kvm_run->kvm_valid_regs)
                store_regs(vcpu);
        post_kvm_run_save(vcpu);
-       srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+       kvm_vcpu_srcu_read_unlock(vcpu);
 
        kvm_sigset_deactivate(vcpu);
        vcpu_put(vcpu);
@@ -10985,6 +10983,9 @@ static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
        struct kvm_vcpu *vcpu;
        unsigned long i;
 
+       if (!enable_apicv)
+               return;
+
        down_write(&kvm->arch.apicv_update_lock);
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -11196,8 +11197,21 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
                r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
                if (r < 0)
                        goto fail_mmu_destroy;
-               if (kvm_apicv_activated(vcpu->kvm))
+
+               /*
+                * Defer evaluating inhibits until the vCPU is first run, as
+                * this vCPU will not get notified of any changes until this
+                * vCPU is visible to other vCPUs (marked online and added to
+                * the set of vCPUs).  Opportunistically mark APICv active as
+                * VMX in particularly is highly unlikely to have inhibits.
+                * Ignore the current per-VM APICv state so that vCPU creation
+                * is guaranteed to run with a deterministic value, the request
+                * will ensure the vCPU gets the correct state before VM-Entry.
+                */
+               if (enable_apicv) {
                        vcpu->arch.apicv_active = true;
+                       kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
+               }
        } else
                static_branch_inc(&kvm_has_noapic_vcpu);
 
index 4556c86c34659e55a6ca82c8a769c37f78ec79c1..eb95e188d62bc27a764c3adf813f5a990b93dff4 100644 (file)
@@ -96,11 +96,6 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
          DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
          DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
         (void *)1},
-       /* T40 can not handle C3 idle state */
-       { set_max_cstate, "IBM ThinkPad T40", {
-         DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
-         DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")},
-        (void *)2},
        {},
 };
 
@@ -795,7 +790,8 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
                if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
                    cx->type == ACPI_STATE_C3) {
                        state->enter_dead = acpi_idle_play_dead;
-                       drv->safe_state_index = count;
+                       if (cx->type != ACPI_STATE_C3)
+                               drv->safe_state_index = count;
                }
                /*
                 * Halt-induced C1 is not good for ->enter_s2idle, because it
index 0c5a51970fbf54994b1536cb5c04dbbe9ec1b95c..014ccb0f45dc4628f8422d079f31727759c3537c 100644 (file)
@@ -77,6 +77,8 @@ static int marvell_cable_detect(struct ata_port *ap)
        switch(ap->port_no)
        {
        case 0:
+               if (!ap->ioaddr.bmdma_addr)
+                       return ATA_CBL_PATA_UNK;
                if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
                        return ATA_CBL_PATA40;
                return ATA_CBL_PATA80;
index 519b6d38d4df65859ac68abf485dd8d3dff32f44..fdb81f2794cde1dd8ed376e362258c887fae1b51 100644 (file)
@@ -33,6 +33,22 @@ config BLK_DEV_FD
          To compile this driver as a module, choose M here: the
          module will be called floppy.
 
+config BLK_DEV_FD_RAWCMD
+       bool "Support for raw floppy disk commands (DEPRECATED)"
+       depends on BLK_DEV_FD
+       help
+         If you want to use actual physical floppies and expect to do
+         special low-level hardware accesses to them (access and use
+         non-standard formats, for example), then enable this.
+
+         Note that the code enabled by this option is rarely used and
+         might be unstable or insecure, and distros should not enable it.
+
+         Note: FDRAWCMD is deprecated and will be removed from the kernel
+         in the near future.
+
+         If unsure, say N.
+
 config AMIGA_FLOPPY
        tristate "Amiga floppy support"
        depends on AMIGA
index 8c647532e3ce99fd285216792590dd95257f0f68..d5b9ff9bcbb2b8ad393fbe2a0a3dc7d70e80935f 100644 (file)
@@ -2982,6 +2982,8 @@ static const char *drive_name(int type, int drive)
                return "(null)";
 }
 
+#ifdef CONFIG_BLK_DEV_FD_RAWCMD
+
 /* raw commands */
 static void raw_cmd_done(int flag)
 {
@@ -3181,6 +3183,35 @@ static int raw_cmd_ioctl(int cmd, void __user *param)
        return ret;
 }
 
+static int floppy_raw_cmd_ioctl(int type, int drive, int cmd,
+                               void __user *param)
+{
+       int ret;
+
+       pr_warn_once("Note: FDRAWCMD is deprecated and will be removed from the kernel in the near future.\n");
+
+       if (type)
+               return -EINVAL;
+       if (lock_fdc(drive))
+               return -EINTR;
+       set_floppy(drive);
+       ret = raw_cmd_ioctl(cmd, param);
+       if (ret == -EINTR)
+               return -EINTR;
+       process_fd_request();
+       return ret;
+}
+
+#else /* CONFIG_BLK_DEV_FD_RAWCMD */
+
+static int floppy_raw_cmd_ioctl(int type, int drive, int cmd,
+                               void __user *param)
+{
+       return -EOPNOTSUPP;
+}
+
+#endif
+
 static int invalidate_drive(struct block_device *bdev)
 {
        /* invalidate the buffer track to force a reread */
@@ -3369,7 +3400,6 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
 {
        int drive = (long)bdev->bd_disk->private_data;
        int type = ITYPE(drive_state[drive].fd_device);
-       int i;
        int ret;
        int size;
        union inparam {
@@ -3520,16 +3550,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
                outparam = &write_errors[drive];
                break;
        case FDRAWCMD:
-               if (type)
-                       return -EINVAL;
-               if (lock_fdc(drive))
-                       return -EINTR;
-               set_floppy(drive);
-               i = raw_cmd_ioctl(cmd, (void __user *)param);
-               if (i == -EINTR)
-                       return -EINTR;
-               process_fd_request();
-               return i;
+               return floppy_raw_cmd_ioctl(type, drive, cmd, (void __user *)param);
        case FDTWADDLE:
                if (lock_fdc(drive))
                        return -EINTR;
index f9d593ff4718300e0cca74e2f51623cf9134023d..0253731d6d25d20ad132eb4756bb634fda561781 100644 (file)
 #define CLK_HW_DIV                     2
 #define LUT_TURBO_IND                  1
 
+#define GT_IRQ_STATUS                  BIT(2)
+
 #define HZ_PER_KHZ                     1000
 
 struct qcom_cpufreq_soc_data {
        u32 reg_enable;
+       u32 reg_domain_state;
        u32 reg_dcvs_ctrl;
        u32 reg_freq_lut;
        u32 reg_volt_lut;
+       u32 reg_intr_clr;
        u32 reg_current_vote;
        u32 reg_perf_state;
        u8 lut_row_size;
@@ -280,37 +284,46 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
        }
 }
 
-static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
+static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
 {
-       unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote);
+       unsigned int lval;
+
+       if (data->soc_data->reg_current_vote)
+               lval = readl_relaxed(data->base + data->soc_data->reg_current_vote) & 0x3ff;
+       else
+               lval = readl_relaxed(data->base + data->soc_data->reg_domain_state) & 0xff;
 
-       return (val & 0x3FF) * 19200;
+       return lval * xo_rate;
 }
 
 static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
 {
        struct cpufreq_policy *policy = data->policy;
-       int cpu = cpumask_first(policy->cpus);
+       int cpu = cpumask_first(policy->related_cpus);
        struct device *dev = get_cpu_device(cpu);
        unsigned long freq_hz, throttled_freq;
        struct dev_pm_opp *opp;
-       unsigned int freq;
 
        /*
         * Get the h/w throttled frequency, normalize it using the
         * registered opp table and use it to calculate thermal pressure.
         */
-       freq = qcom_lmh_get_throttle_freq(data);
-       freq_hz = freq * HZ_PER_KHZ;
+       freq_hz = qcom_lmh_get_throttle_freq(data);
 
        opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
        if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
-               dev_pm_opp_find_freq_ceil(dev, &freq_hz);
+               opp = dev_pm_opp_find_freq_ceil(dev, &freq_hz);
+
+       if (IS_ERR(opp)) {
+               dev_warn(dev, "Can't find the OPP for throttling: %pe!\n", opp);
+       } else {
+               throttled_freq = freq_hz / HZ_PER_KHZ;
 
-       throttled_freq = freq_hz / HZ_PER_KHZ;
+               /* Update thermal pressure (the boost frequencies are accepted) */
+               arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
 
-       /* Update thermal pressure (the boost frequencies are accepted) */
-       arch_update_thermal_pressure(policy->related_cpus, throttled_freq);
+               dev_pm_opp_put(opp);
+       }
 
        /*
         * In the unlikely case policy is unregistered do not enable
@@ -350,6 +363,10 @@ static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
        disable_irq_nosync(c_data->throttle_irq);
        schedule_delayed_work(&c_data->throttle_work, 0);
 
+       if (c_data->soc_data->reg_intr_clr)
+               writel_relaxed(GT_IRQ_STATUS,
+                              c_data->base + c_data->soc_data->reg_intr_clr);
+
        return IRQ_HANDLED;
 }
 
@@ -365,9 +382,11 @@ static const struct qcom_cpufreq_soc_data qcom_soc_data = {
 
 static const struct qcom_cpufreq_soc_data epss_soc_data = {
        .reg_enable = 0x0,
+       .reg_domain_state = 0x20,
        .reg_dcvs_ctrl = 0xb0,
        .reg_freq_lut = 0x100,
        .reg_volt_lut = 0x200,
+       .reg_intr_clr = 0x308,
        .reg_perf_state = 0x320,
        .lut_row_size = 4,
 };
@@ -417,16 +436,39 @@ static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
        return 0;
 }
 
-static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
+static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy *policy)
+{
+       struct qcom_cpufreq_data *data = policy->driver_data;
+       struct platform_device *pdev = cpufreq_get_driver_data();
+       int ret;
+
+       ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus);
+       if (ret)
+               dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n",
+                       data->irq_name, data->throttle_irq);
+
+       return ret;
+}
+
+static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy *policy)
 {
+       struct qcom_cpufreq_data *data = policy->driver_data;
+
        if (data->throttle_irq <= 0)
-               return;
+               return 0;
 
        mutex_lock(&data->throttle_lock);
        data->cancel_throttle = true;
        mutex_unlock(&data->throttle_lock);
 
        cancel_delayed_work_sync(&data->throttle_work);
+       irq_set_affinity_hint(data->throttle_irq, NULL);
+
+       return 0;
+}
+
+static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
+{
        free_irq(data->throttle_irq, data);
 }
 
@@ -583,6 +625,8 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
        .get            = qcom_cpufreq_hw_get,
        .init           = qcom_cpufreq_hw_cpu_init,
        .exit           = qcom_cpufreq_hw_cpu_exit,
+       .online         = qcom_cpufreq_hw_cpu_online,
+       .offline        = qcom_cpufreq_hw_cpu_offline,
        .register_em    = cpufreq_register_em_with_opp,
        .fast_switch    = qcom_cpufreq_hw_fast_switch,
        .name           = "qcom-cpufreq-hw",
index 2deed8d8773fa6c3ff37d0ed958dca7803aab6d8..75e1bf3a08f7cff384b6e6cc0d209600ca94859b 100644 (file)
@@ -98,8 +98,10 @@ static int sun50i_cpufreq_nvmem_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        ret = sun50i_cpufreq_get_efuse(&speed);
-       if (ret)
+       if (ret) {
+               kfree(opp_tables);
                return ret;
+       }
 
        snprintf(name, MAX_NAME_LEN, "speed%d", speed);
 
index b459eda2cd375f7f49cced69f6be831ac73c3de7..5c852e6719924bd74847c75ad6ab15e4496b17f4 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/pm_runtime.h>
 #include <asm/cpuidle.h>
 #include <asm/sbi.h>
+#include <asm/smp.h>
 #include <asm/suspend.h>
 
 #include "dt_idle_states.h"
index 1476156af74b44a0c6d3985d7c01ceff8c960d0c..def564d1e8faf714c167f25df26e109658117b29 100644 (file)
@@ -1453,7 +1453,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
 {
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
-       struct at_xdmac_desc    *desc, *_desc;
+       struct at_xdmac_desc    *desc, *_desc, *iter;
        struct list_head        *descs_list;
        enum dma_status         ret;
        int                     residue, retry;
@@ -1568,11 +1568,13 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
         * microblock.
         */
        descs_list = &desc->descs_list;
-       list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
-               dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
-               residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
-               if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
+       list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
+               dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
+               residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
+               if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
+                       desc = iter;
                        break;
+               }
        }
        residue += cur_ubc << dwidth;
 
index 329fc2e57b703630387a3e71d32e9ae052ef8c2c..33bc1e6c4cf2e74db9fbc54d4cfa8daadaff2ea8 100644 (file)
@@ -414,14 +414,18 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
                SET_CH_32(dw, chan->dir, chan->id, ch_control1,
                          (DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
                /* Linked list */
+
                #ifdef CONFIG_64BIT
-                       SET_CH_64(dw, chan->dir, chan->id, llp.reg,
-                                 chunk->ll_region.paddr);
+               /* llp is not aligned on 64bit -> keep 32bit accesses */
+               SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
+                         lower_32_bits(chunk->ll_region.paddr));
+               SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+                         upper_32_bits(chunk->ll_region.paddr));
                #else /* CONFIG_64BIT */
-                       SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
-                                 lower_32_bits(chunk->ll_region.paddr));
-                       SET_CH_32(dw, chan->dir, chan->id, llp.msb,
-                                 upper_32_bits(chunk->ll_region.paddr));
+               SET_CH_32(dw, chan->dir, chan->id, llp.lsb,
+                         lower_32_bits(chunk->ll_region.paddr));
+               SET_CH_32(dw, chan->dir, chan->id, llp.msb,
+                         upper_32_bits(chunk->ll_region.paddr));
                #endif /* CONFIG_64BIT */
        }
        /* Doorbell */
index 3061fe857d69f53870b91ded79c81bac1d5e8d58..f652da6ab47df1b29147136fd17c390d353c9a79 100644 (file)
@@ -373,7 +373,6 @@ static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
 {
        lockdep_assert_held(&wq->wq_lock);
 
-       idxd_wq_disable_cleanup(wq);
        wq->size = 0;
        wq->group = NULL;
 }
@@ -701,14 +700,17 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
 
                if (wq->state == IDXD_WQ_ENABLED) {
                        idxd_wq_disable_cleanup(wq);
-                       idxd_wq_device_reset_cleanup(wq);
                        wq->state = IDXD_WQ_DISABLED;
                }
+               idxd_wq_device_reset_cleanup(wq);
        }
 }
 
 void idxd_device_clear_state(struct idxd_device *idxd)
 {
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return;
+
        idxd_groups_clear_state(idxd);
        idxd_engines_clear_state(idxd);
        idxd_device_wqs_clear_state(idxd);
index e289fd48711adae53f3e6d8460b7a8ddcde917f2..c01db23e3333f70316ea1bfbca99930fabfdc1cf 100644 (file)
@@ -150,14 +150,15 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
  */
 int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc)
 {
-       int rc, retries = 0;
+       unsigned int retries = wq->enqcmds_retries;
+       int rc;
 
        do {
                rc = enqcmds(portal, desc);
                if (rc == 0)
                        break;
                cpu_relax();
-       } while (retries++ < wq->enqcmds_retries);
+       } while (retries--);
 
        return rc;
 }
index 7e19ab92b61a879bddb19759d4a61b5a36513a63..dfd549685c467ac796c76873b4dca559f33e622c 100644 (file)
@@ -905,6 +905,9 @@ static ssize_t wq_max_transfer_size_store(struct device *dev, struct device_attr
        u64 xfer_size;
        int rc;
 
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
        if (wq->state != IDXD_WQ_DISABLED)
                return -EPERM;
 
@@ -939,6 +942,9 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
        u64 batch_size;
        int rc;
 
+       if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+               return -EPERM;
+
        if (wq->state != IDXD_WQ_DISABLED)
                return -EPERM;
 
index 70c0aa931ddf4a97406dcd74c7206c2f8ce0c8b2..6196a7b3956b1612ba5e15aafcbdf824038ba5aa 100644 (file)
@@ -198,12 +198,12 @@ struct sdma_script_start_addrs {
        s32 per_2_firi_addr;
        s32 mcu_2_firi_addr;
        s32 uart_2_per_addr;
-       s32 uart_2_mcu_ram_addr;
+       s32 uart_2_mcu_addr;
        s32 per_2_app_addr;
        s32 mcu_2_app_addr;
        s32 per_2_per_addr;
        s32 uartsh_2_per_addr;
-       s32 uartsh_2_mcu_ram_addr;
+       s32 uartsh_2_mcu_addr;
        s32 per_2_shp_addr;
        s32 mcu_2_shp_addr;
        s32 ata_2_mcu_addr;
@@ -232,8 +232,8 @@ struct sdma_script_start_addrs {
        s32 mcu_2_ecspi_addr;
        s32 mcu_2_sai_addr;
        s32 sai_2_mcu_addr;
-       s32 uart_2_mcu_addr;
-       s32 uartsh_2_mcu_addr;
+       s32 uart_2_mcu_rom_addr;
+       s32 uartsh_2_mcu_rom_addr;
        /* End of v3 array */
        s32 mcu_2_zqspi_addr;
        /* End of v4 array */
@@ -1796,17 +1796,17 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
                        saddr_arr[i] = addr_arr[i];
 
        /*
-        * get uart_2_mcu_addr/uartsh_2_mcu_addr rom script specially because
-        * they are now replaced by uart_2_mcu_ram_addr/uartsh_2_mcu_ram_addr
-        * to be compatible with legacy freescale/nxp sdma firmware, and they
-        * are located in the bottom part of sdma_script_start_addrs which are
-        * beyond the SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1.
+        * For compatibility with NXP internal legacy kernel before 4.19 which
+        * is based on uart ram script and mainline kernel based on uart rom
+        * script, both uart ram/rom scripts are present in newer sdma
+        * firmware. Use the rom versions if they are present (V3 or newer).
         */
-       if (addr->uart_2_mcu_addr)
-               sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_addr;
-       if (addr->uartsh_2_mcu_addr)
-               sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_addr;
-
+       if (sdma->script_number >= SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3) {
+               if (addr->uart_2_mcu_rom_addr)
+                       sdma->script_addrs->uart_2_mcu_addr = addr->uart_2_mcu_rom_addr;
+               if (addr->uartsh_2_mcu_rom_addr)
+                       sdma->script_addrs->uartsh_2_mcu_addr = addr->uartsh_2_mcu_rom_addr;
+       }
 }
 
 static void sdma_load_firmware(const struct firmware *fw, void *context)
@@ -1885,7 +1885,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
        u32 reg, val, shift, num_map, i;
        int ret = 0;
 
-       if (IS_ERR(np) || IS_ERR(gpr_np))
+       if (IS_ERR(np) || !gpr_np)
                goto out;
 
        event_remap = of_find_property(np, propname, NULL);
@@ -1933,7 +1933,7 @@ static int sdma_event_remap(struct sdma_engine *sdma)
        }
 
 out:
-       if (!IS_ERR(gpr_np))
+       if (gpr_np)
                of_node_put(gpr_np);
 
        return ret;
index 375e7e647df6b5093b156c2cd88e7f7f0e6798d3..a1517ef1f4a0185700343797ef05d8ef6810ed0a 100644 (file)
@@ -274,7 +274,7 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
        unsigned int status;
        int ret;
 
-       ret = pm_runtime_get_sync(mtkd->ddev.dev);
+       ret = pm_runtime_resume_and_get(mtkd->ddev.dev);
        if (ret < 0) {
                pm_runtime_put_noidle(chan->device->dev);
                return ret;
@@ -288,18 +288,21 @@ static int mtk_uart_apdma_alloc_chan_resources(struct dma_chan *chan)
        ret = readx_poll_timeout(readl, c->base + VFF_EN,
                          status, !status, 10, 100);
        if (ret)
-               return ret;
+               goto err_pm;
 
        ret = request_irq(c->irq, mtk_uart_apdma_irq_handler,
                          IRQF_TRIGGER_NONE, KBUILD_MODNAME, chan);
        if (ret < 0) {
                dev_err(chan->device->dev, "Can't request dma IRQ\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_pm;
        }
 
        if (mtkd->support_33bits)
                mtk_uart_apdma_write(c, VFF_4G_SUPPORT, VFF_4G_SUPPORT_CLR_B);
 
+err_pm:
+       pm_runtime_put_noidle(mtkd->ddev.dev);
        return ret;
 }
 
index f05ff02c0656ea3b785e2f70289c3fe05f63408d..40b1abeca8562e2b55dde35aa488e86beb2e9ee4 100644 (file)
 #define ECC_STAT_CECNT_SHIFT           8
 #define ECC_STAT_BITNUM_MASK           0x7F
 
+/* ECC error count register definitions */
+#define ECC_ERRCNT_UECNT_MASK          0xFFFF0000
+#define ECC_ERRCNT_UECNT_SHIFT         16
+#define ECC_ERRCNT_CECNT_MASK          0xFFFF
+
 /* DDR QOS Interrupt register definitions */
 #define DDR_QOS_IRQ_STAT_OFST          0x20200
 #define DDR_QOSUE_MASK                 0x4
@@ -423,15 +428,16 @@ static int zynqmp_get_error_info(struct synps_edac_priv *priv)
        base = priv->baseaddr;
        p = &priv->stat;
 
+       regval = readl(base + ECC_ERRCNT_OFST);
+       p->ce_cnt = regval & ECC_ERRCNT_CECNT_MASK;
+       p->ue_cnt = (regval & ECC_ERRCNT_UECNT_MASK) >> ECC_ERRCNT_UECNT_SHIFT;
+       if (!p->ce_cnt)
+               goto ue_err;
+
        regval = readl(base + ECC_STAT_OFST);
        if (!regval)
                return 1;
 
-       p->ce_cnt = (regval & ECC_STAT_CECNT_MASK) >> ECC_STAT_CECNT_SHIFT;
-       p->ue_cnt = (regval & ECC_STAT_UECNT_MASK) >> ECC_STAT_UECNT_SHIFT;
-       if (!p->ce_cnt)
-               goto ue_err;
-
        p->ceinfo.bitpos = (regval & ECC_STAT_BITNUM_MASK);
 
        regval = readl(base + ECC_CEADDR0_OFST);
index e48108e694f8dcb08f5e91fcb2d89de9a78946a7..7dad6f57d97042cc1a13b48a58c58e3938465e0f 100644 (file)
@@ -955,8 +955,7 @@ static int cs_dsp_create_control(struct cs_dsp *dsp,
        ctl->alg_region = *alg_region;
        if (subname && dsp->fw_ver >= 2) {
                ctl->subname_len = subname_len;
-               ctl->subname = kmemdup(subname,
-                                      strlen(subname) + 1, GFP_KERNEL);
+               ctl->subname = kasprintf(GFP_KERNEL, "%.*s", subname_len, subname);
                if (!ctl->subname) {
                        ret = -ENOMEM;
                        goto err_ctl;
index 085348e0898608dc8a9059219113f570bc6ded04..b7694171655cfd00870e2c078fbc7d977fdef653 100644 (file)
@@ -1601,8 +1601,6 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
 
        gpiochip_set_irq_hooks(gc);
 
-       acpi_gpiochip_request_interrupts(gc);
-
        /*
         * Using barrier() here to prevent compiler from reordering
         * gc->irq.initialized before initialization of above
@@ -1612,6 +1610,8 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
 
        gc->irq.initialized = true;
 
+       acpi_gpiochip_request_interrupts(gc);
+
        return 0;
 }
 
index 970b065e9a6b13929ed6f29bf1fc28e413e8b756..d0d0ea565e3df48b3cccd65425cf36968dce009c 100644 (file)
@@ -128,6 +128,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
                goto free_chunk;
        }
 
+       mutex_lock(&p->ctx->lock);
+
        /* skip guilty context job */
        if (atomic_read(&p->ctx->guilty) == 1) {
                ret = -ECANCELED;
@@ -709,6 +711,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
        dma_fence_put(parser->fence);
 
        if (parser->ctx) {
+               mutex_unlock(&parser->ctx->lock);
                amdgpu_ctx_put(parser->ctx);
        }
        if (parser->bo_list)
@@ -1157,6 +1160,9 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
 {
        int i, r;
 
+       /* TODO: Investigate why we still need the context lock */
+       mutex_unlock(&p->ctx->lock);
+
        for (i = 0; i < p->nchunks; ++i) {
                struct amdgpu_cs_chunk *chunk;
 
@@ -1167,32 +1173,34 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
                case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
                        r = amdgpu_cs_process_fence_dep(p, chunk);
                        if (r)
-                               return r;
+                               goto out;
                        break;
                case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
                        r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
                        if (r)
-                               return r;
+                               goto out;
                        break;
                case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
                        r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
                        if (r)
-                               return r;
+                               goto out;
                        break;
                case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
                        r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
                        if (r)
-                               return r;
+                               goto out;
                        break;
                case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
                        r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
                        if (r)
-                               return r;
+                               goto out;
                        break;
                }
        }
 
-       return 0;
+out:
+       mutex_lock(&p->ctx->lock);
+       return r;
 }
 
 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
@@ -1368,6 +1376,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                goto out;
 
        r = amdgpu_cs_submit(&parser, cs);
+
 out:
        amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
 
index 5981c7d9bd48fa616cc66b49a36bc7cc3cbac308..8f0e6d93bb9cb9ed1c340c27be931262f50400a4 100644 (file)
@@ -237,6 +237,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
 
        kref_init(&ctx->refcount);
        spin_lock_init(&ctx->ring_lock);
+       mutex_init(&ctx->lock);
 
        ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
        ctx->reset_counter_query = ctx->reset_counter;
@@ -357,6 +358,7 @@ static void amdgpu_ctx_fini(struct kref *ref)
                drm_dev_exit(idx);
        }
 
+       mutex_destroy(&ctx->lock);
        kfree(ctx);
 }
 
index d0cbfcea90f72abed197e97575d849eb727d2249..142f2f87d44cea617f01825b44d32beee162e5bc 100644 (file)
@@ -49,6 +49,7 @@ struct amdgpu_ctx {
        bool                            preamble_presented;
        int32_t                         init_priority;
        int32_t                         override_priority;
+       struct mutex                    lock;
        atomic_t                        guilty;
        unsigned long                   ras_counter_ce;
        unsigned long                   ras_counter_ue;
index f4df344509a87245d97f701433a4de41dc702d08..9a2cfab3a177fe17618cd765a04c480e516a938e 100644 (file)
@@ -214,29 +214,6 @@ int drm_of_encoder_active_endpoint(struct device_node *node,
 }
 EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
 
-static int find_panel_or_bridge(struct device_node *node,
-                               struct drm_panel **panel,
-                               struct drm_bridge **bridge)
-{
-       if (panel) {
-               *panel = of_drm_find_panel(node);
-               if (!IS_ERR(*panel))
-                       return 0;
-
-               /* Clear the panel pointer in case of error. */
-               *panel = NULL;
-       }
-
-       /* No panel found yet, check for a bridge next. */
-       if (bridge) {
-               *bridge = of_drm_find_bridge(node);
-               if (*bridge)
-                       return 0;
-       }
-
-       return -EPROBE_DEFER;
-}
-
 /**
  * drm_of_find_panel_or_bridge - return connected panel or bridge device
  * @np: device tree node containing encoder output ports
@@ -259,44 +236,49 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
                                struct drm_panel **panel,
                                struct drm_bridge **bridge)
 {
-       struct device_node *node;
-       int ret;
+       int ret = -EPROBE_DEFER;
+       struct device_node *remote;
 
        if (!panel && !bridge)
                return -EINVAL;
-
        if (panel)
                *panel = NULL;
-       if (bridge)
-               *bridge = NULL;
-
-       /* Check for a graph on the device node first. */
-       if (of_graph_is_present(np)) {
-               node = of_graph_get_remote_node(np, port, endpoint);
-               if (node) {
-                       ret = find_panel_or_bridge(node, panel, bridge);
-                       of_node_put(node);
-
-                       if (!ret)
-                               return 0;
-               }
-       }
 
-       /* Otherwise check for any child node other than port/ports. */
-       for_each_available_child_of_node(np, node) {
-               if (of_node_name_eq(node, "port") ||
-                   of_node_name_eq(node, "ports"))
-                       continue;
+       /*
+        * of_graph_get_remote_node() produces a noisy error message if port
+        * node isn't found and the absence of the port is a legit case here,
+        * so at first we silently check whether graph presents in the
+        * device-tree node.
+        */
+       if (!of_graph_is_present(np))
+               return -ENODEV;
 
-               ret = find_panel_or_bridge(node, panel, bridge);
-               of_node_put(node);
+       remote = of_graph_get_remote_node(np, port, endpoint);
+       if (!remote)
+               return -ENODEV;
+
+       if (panel) {
+               *panel = of_drm_find_panel(remote);
+               if (!IS_ERR(*panel))
+                       ret = 0;
+               else
+                       *panel = NULL;
+       }
+
+       /* No panel found yet, check for a bridge next. */
+       if (bridge) {
+               if (ret) {
+                       *bridge = of_drm_find_bridge(remote);
+                       if (*bridge)
+                               ret = 0;
+               } else {
+                       *bridge = NULL;
+               }
 
-               /* Stop at the first found occurrence. */
-               if (!ret)
-                       return 0;
        }
 
-       return -EPROBE_DEFER;
+       of_node_put(remote);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(drm_of_find_panel_or_bridge);
 
index d667657e360624eeb80543cab361361891ae11eb..f868db8be02a6813d9b18d5fc39067afcff72072 100644 (file)
@@ -4383,13 +4383,20 @@ intel_dp_update_420(struct intel_dp *intel_dp)
 static void
 intel_dp_set_edid(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        struct intel_connector *connector = intel_dp->attached_connector;
        struct edid *edid;
+       bool vrr_capable;
 
        intel_dp_unset_edid(intel_dp);
        edid = intel_dp_get_edid(intel_dp);
        connector->detect_edid = edid;
 
+       vrr_capable = intel_vrr_is_capable(&connector->base);
+       drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
+                   connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
+       drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
+
        intel_dp_update_dfp(intel_dp, edid);
        intel_dp_update_420(intel_dp);
 
@@ -4422,6 +4429,9 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
 
        intel_dp->dfp.ycbcr_444_to_420 = false;
        connector->base.ycbcr_420_allowed = false;
+
+       drm_connector_set_vrr_capable_property(&connector->base,
+                                              false);
 }
 
 static int
@@ -4572,14 +4582,9 @@ static int intel_dp_get_modes(struct drm_connector *connector)
        int num_modes = 0;
 
        edid = intel_connector->detect_edid;
-       if (edid) {
+       if (edid)
                num_modes = intel_connector_update_modes(connector, edid);
 
-               if (intel_vrr_is_capable(connector))
-                       drm_connector_set_vrr_capable_property(connector,
-                                                              true);
-       }
-
        /* Also add fixed mode, which may or may not be present in EDID */
        if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
            intel_connector->panel.fixed_mode) {
index bff8c2d73cdfb858c467a116d05f90b3a6f16143..6c9e6e7f0afd05f8db056eb1e70ff8306f85987b 100644 (file)
@@ -887,6 +887,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
                return false;
        }
 
+       /* Wa_16011303918:adl-p */
+       if (crtc_state->vrr.enable &&
+           IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+               drm_dbg_kms(&dev_priv->drm,
+                           "PSR2 not enabled, not compatible with HW stepping + VRR\n");
+               return false;
+       }
+
+       if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
+               drm_dbg_kms(&dev_priv->drm,
+                           "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
+               return false;
+       }
+
        if (HAS_PSR2_SEL_FETCH(dev_priv)) {
                if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
                    !HAS_PSR_HW_TRACKING(dev_priv)) {
@@ -900,12 +914,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
        if (!crtc_state->enable_psr2_sel_fetch &&
            IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
                drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
-               return false;
+               goto unsupported;
        }
 
        if (!psr2_granularity_check(intel_dp, crtc_state)) {
                drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
-               return false;
+               goto unsupported;
        }
 
        if (!crtc_state->enable_psr2_sel_fetch &&
@@ -914,25 +928,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
                            "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
                            crtc_hdisplay, crtc_vdisplay,
                            psr_max_h, psr_max_v);
-               return false;
-       }
-
-       if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
-               drm_dbg_kms(&dev_priv->drm,
-                           "PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
-               return false;
-       }
-
-       /* Wa_16011303918:adl-p */
-       if (crtc_state->vrr.enable &&
-           IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
-               drm_dbg_kms(&dev_priv->drm,
-                           "PSR2 not enabled, not compatible with HW stepping + VRR\n");
-               return false;
+               goto unsupported;
        }
 
        tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
        return true;
+
+unsupported:
+       crtc_state->enable_psr2_sel_fetch = false;
+       return false;
 }
 
 void intel_psr_compute_config(struct intel_dp *intel_dp,
index 9c36b505daab93fd1f4476451985ee3dffb8d3e2..affa95eb05fcda3fe114dfcb24272de6bc22d388 100644 (file)
@@ -274,7 +274,7 @@ bool msm_use_mmu(struct drm_device *dev)
        struct msm_drm_private *priv = dev->dev_private;
 
        /* a2xx comes with its own MMU */
-       return priv->is_a2xx || device_iommu_mapped(dev->dev);
+       return priv->is_a2xx || iommu_present(&platform_bus_type);
 }
 
 static int msm_init_vram(struct drm_device *dev)
index 46029c5610c80814682a7c7fc32d1e2bf08ff42b..145047e193946a5e4f852802d87c2903029edd20 100644 (file)
@@ -229,7 +229,7 @@ static void rpi_touchscreen_i2c_write(struct rpi_touchscreen *ts,
 
        ret = i2c_smbus_write_byte_data(ts->i2c, reg, val);
        if (ret)
-               dev_err(&ts->dsi->dev, "I2C write failed: %d\n", ret);
+               dev_err(&ts->i2c->dev, "I2C write failed: %d\n", ret);
 }
 
 static int rpi_touchscreen_write(struct rpi_touchscreen *ts, u16 reg, u32 val)
@@ -265,7 +265,7 @@ static int rpi_touchscreen_noop(struct drm_panel *panel)
        return 0;
 }
 
-static int rpi_touchscreen_enable(struct drm_panel *panel)
+static int rpi_touchscreen_prepare(struct drm_panel *panel)
 {
        struct rpi_touchscreen *ts = panel_to_ts(panel);
        int i;
@@ -295,6 +295,13 @@ static int rpi_touchscreen_enable(struct drm_panel *panel)
        rpi_touchscreen_write(ts, DSI_STARTDSI, 0x01);
        msleep(100);
 
+       return 0;
+}
+
+static int rpi_touchscreen_enable(struct drm_panel *panel)
+{
+       struct rpi_touchscreen *ts = panel_to_ts(panel);
+
        /* Turn on the backlight. */
        rpi_touchscreen_i2c_write(ts, REG_PWM, 255);
 
@@ -349,7 +356,7 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel,
 static const struct drm_panel_funcs rpi_touchscreen_funcs = {
        .disable = rpi_touchscreen_disable,
        .unprepare = rpi_touchscreen_noop,
-       .prepare = rpi_touchscreen_noop,
+       .prepare = rpi_touchscreen_prepare,
        .enable = rpi_touchscreen_enable,
        .get_modes = rpi_touchscreen_get_modes,
 };
index b991ba1bcd51308d84afff666d4f56d2d4d91941..f63efd8d5e524b42eb65745e25b310cfee2c7209 100644 (file)
@@ -96,7 +96,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
        struct dma_fence *f;
        int r = 0;
 
-       dma_resv_for_each_fence(&cursor, resv, shared, f) {
+       dma_resv_for_each_fence(&cursor, resv, !shared, f) {
                fence = to_radeon_fence(f);
                if (fence && fence->rdev == rdev)
                        radeon_sync_fence(sync, fence);
index de3424fed2fc7cc1fb88073b039eb8affcddec7b..6cf2621786e684dbd637b5376e3711a35eccc9ff 100644 (file)
@@ -2,6 +2,9 @@
 config DRM_VC4
        tristate "Broadcom VC4 Graphics"
        depends on ARCH_BCM || ARCH_BCM2835 || COMPILE_TEST
+       # Make sure not 'y' when RASPBERRYPI_FIRMWARE is 'm'. This can only
+       # happen when COMPILE_TEST=y, hence the added !RASPBERRYPI_FIRMWARE.
+       depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
        depends on DRM
        depends on SND && SND_SOC
        depends on COMMON_CLK
index 752f921735c67c9a3d4a159c07a8ec1acf8a2de6..98308a17e4ed708b3a9d38cef5b5a06beb001219 100644 (file)
@@ -846,7 +846,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
        unsigned long phy_clock;
        int ret;
 
-       ret = pm_runtime_get_sync(dev);
+       ret = pm_runtime_resume_and_get(dev);
        if (ret) {
                DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port);
                return;
index 31aecc46624b3fa27fea8e5d3bb29b145e7810e8..04c8a378aeed6c3644dbaf8003ae37d72cf903c7 100644 (file)
@@ -46,6 +46,21 @@ vmw_buffer_object(struct ttm_buffer_object *bo)
        return container_of(bo, struct vmw_buffer_object, base);
 }
 
+/**
+ * bo_is_vmw - check if the buffer object is a &vmw_buffer_object
+ * @bo: ttm buffer object to be checked
+ *
+ * Uses destroy function associated with the object to determine if this is
+ * a &vmw_buffer_object.
+ *
+ * Returns:
+ * true if the object is of &vmw_buffer_object type, false if not.
+ */
+static bool bo_is_vmw(struct ttm_buffer_object *bo)
+{
+       return bo->destroy == &vmw_bo_bo_free ||
+              bo->destroy == &vmw_gem_destroy;
+}
 
 /**
  * vmw_bo_pin_in_placement - Validate a buffer to placement.
@@ -615,8 +630,9 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
 
                ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
                vmw_bo_unreference(&vbo);
-               if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
-                            ret != -EBUSY)) {
+               if (unlikely(ret != 0)) {
+                       if (ret == -ERESTARTSYS || ret == -EBUSY)
+                               return -EBUSY;
                        DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
                                  (unsigned int) arg->handle);
                        return ret;
@@ -798,7 +814,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
 {
        /* Is @bo embedded in a struct vmw_buffer_object? */
-       if (vmw_bo_is_vmw_bo(bo))
+       if (!bo_is_vmw(bo))
                return;
 
        /* Kill any cached kernel maps before swapout */
@@ -822,7 +838,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
        struct vmw_buffer_object *vbo;
 
        /* Make sure @bo is embedded in a struct vmw_buffer_object? */
-       if (vmw_bo_is_vmw_bo(bo))
+       if (!bo_is_vmw(bo))
                return;
 
        vbo = container_of(bo, struct vmw_buffer_object, base);
@@ -843,22 +859,3 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
        if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
                vmw_resource_unbind_list(vbo);
 }
-
-/**
- * vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object
- * @bo: buffer object to be checked
- *
- * Uses destroy function associated with the object to determine if this is
- * a &vmw_buffer_object.
- *
- * Returns:
- * true if the object is of &vmw_buffer_object type, false if not.
- */
-bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo)
-{
-       if (bo->destroy == &vmw_bo_bo_free ||
-           bo->destroy == &vmw_gem_destroy)
-               return true;
-
-       return false;
-}
index 26eb5478394aa0b1bd923141074c8ab97af458e5..163c00793eb1c727bba3d06ab3a5ab2ec0f61108 100644 (file)
@@ -998,13 +998,10 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
                goto out_no_fman;
        }
 
-       drm_vma_offset_manager_init(&dev_priv->vma_manager,
-                                   DRM_FILE_PAGE_OFFSET_START,
-                                   DRM_FILE_PAGE_OFFSET_SIZE);
        ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
                              dev_priv->drm.dev,
                              dev_priv->drm.anon_inode->i_mapping,
-                             &dev_priv->vma_manager,
+                             dev_priv->drm.vma_offset_manager,
                              dev_priv->map_mode == vmw_dma_alloc_coherent,
                              false);
        if (unlikely(ret != 0)) {
@@ -1174,7 +1171,6 @@ static void vmw_driver_unload(struct drm_device *dev)
        vmw_devcaps_destroy(dev_priv);
        vmw_vram_manager_fini(dev_priv);
        ttm_device_fini(&dev_priv->bdev);
-       drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
        vmw_release_device_late(dev_priv);
        vmw_fence_manager_takedown(dev_priv->fman);
        if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -1398,7 +1394,7 @@ vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
        struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
 
        return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
-                                    &dev_priv->vma_manager);
+                                    dev_priv->drm.vma_offset_manager);
 }
 
 static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
index 00e8e27e48846206244a14c49447f0b092348d6e..ace7ca150b0362ec69c4ec22530ad19d7ccc3ae7 100644 (file)
@@ -683,6 +683,9 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
            container_of(base, struct vmw_user_surface, prime.base);
        struct vmw_resource *res = &user_srf->srf.res;
 
+       if (base->shareable && res && res->backup)
+               drm_gem_object_put(&res->backup->base.base);
+
        *p_base = NULL;
        vmw_resource_unreference(&res);
 }
@@ -857,6 +860,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
                        goto out_unlock;
                }
                vmw_bo_reference(res->backup);
+               drm_gem_object_get(&res->backup->base.base);
        }
 
        tmp = vmw_resource_reference(&srf->res);
@@ -1513,7 +1517,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
                                                        &res->backup);
                if (ret == 0)
                        vmw_bo_reference(res->backup);
-
        }
 
        if (unlikely(ret != 0)) {
@@ -1561,6 +1564,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
                        drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
                rep->buffer_size = res->backup->base.base.size;
                rep->buffer_handle = backup_handle;
+               if (user_srf->prime.base.shareable)
+                       drm_gem_object_get(&res->backup->base.base);
        } else {
                rep->buffer_map_handle = 0;
                rep->buffer_size = 0;
index b7640cfe00201bf0d37053ff93b687d1bd7720ec..47551ab73ca8a03ab60448adb8fe0d8cee9aed5d 100644 (file)
@@ -69,7 +69,12 @@ static unsigned int preferred_states_mask;
 static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
 
 static unsigned long auto_demotion_disable_flags;
-static bool disable_promotion_to_c1e;
+
+static enum {
+       C1E_PROMOTION_PRESERVE,
+       C1E_PROMOTION_ENABLE,
+       C1E_PROMOTION_DISABLE
+} c1e_promotion = C1E_PROMOTION_PRESERVE;
 
 struct idle_cpu {
        struct cpuidle_state *state_table;
@@ -1398,8 +1403,6 @@ static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
 static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
 #endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
 
-static void c1e_promotion_enable(void);
-
 /**
  * ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
  *
@@ -1578,17 +1581,14 @@ static void __init spr_idle_state_table_update(void)
        unsigned long long msr;
 
        /* Check if user prefers C1E over C1. */
-       if (preferred_states_mask & BIT(2)) {
-               if (preferred_states_mask & BIT(1))
-                       /* Both can't be enabled, stick to the defaults. */
-                       return;
-
+       if ((preferred_states_mask & BIT(2)) &&
+           !(preferred_states_mask & BIT(1))) {
+               /* Disable C1 and enable C1E. */
                spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE;
                spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE;
 
                /* Enable C1E using the "C1E promotion" bit. */
-               c1e_promotion_enable();
-               disable_promotion_to_c1e = false;
+               c1e_promotion = C1E_PROMOTION_ENABLE;
        }
 
        /*
@@ -1754,7 +1754,9 @@ static int intel_idle_cpu_init(unsigned int cpu)
        if (auto_demotion_disable_flags)
                auto_demotion_disable();
 
-       if (disable_promotion_to_c1e)
+       if (c1e_promotion == C1E_PROMOTION_ENABLE)
+               c1e_promotion_enable();
+       else if (c1e_promotion == C1E_PROMOTION_DISABLE)
                c1e_promotion_disable();
 
        return 0;
@@ -1833,7 +1835,8 @@ static int __init intel_idle_init(void)
        if (icpu) {
                cpuidle_state_table = icpu->state_table;
                auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
-               disable_promotion_to_c1e = icpu->disable_promotion_to_c1e;
+               if (icpu->disable_promotion_to_c1e)
+                       c1e_promotion = C1E_PROMOTION_DISABLE;
                if (icpu->use_acpi || force_use_acpi)
                        intel_idle_acpi_cst_extract();
        } else if (!intel_idle_acpi_cst_extract()) {
index c28996028e8030d6e2ad844826bcb0610c1d8f70..9a23eed6a4f4106603069387f4c90e6a5055685f 100644 (file)
@@ -61,6 +61,14 @@ static irqreturn_t cypress_sf_irq_handler(int irq, void *devid)
        return IRQ_HANDLED;
 }
 
+static void cypress_sf_disable_regulators(void *arg)
+{
+       struct cypress_sf_data *touchkey = arg;
+
+       regulator_bulk_disable(ARRAY_SIZE(touchkey->regulators),
+                              touchkey->regulators);
+}
+
 static int cypress_sf_probe(struct i2c_client *client)
 {
        struct cypress_sf_data *touchkey;
@@ -121,6 +129,12 @@ static int cypress_sf_probe(struct i2c_client *client)
                return error;
        }
 
+       error = devm_add_action_or_reset(&client->dev,
+                                        cypress_sf_disable_regulators,
+                                        touchkey);
+       if (error)
+               return error;
+
        touchkey->input_dev = devm_input_allocate_device(&client->dev);
        if (!touchkey->input_dev) {
                dev_err(&client->dev, "Failed to allocate input device\n");
index 43375b38ee5926f31a653dfe1efb801b927332b3..8a7ce41b8c56e1b62bd8e8fe4f51204ca55c54c8 100644 (file)
@@ -393,7 +393,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
         * revision register.
         */
        error = pm_runtime_get_sync(dev);
-       if (error) {
+       if (error < 0) {
                dev_err(dev, "pm_runtime_get_sync() failed\n");
                pm_runtime_put_noidle(dev);
                return error;
index 7c2ca52ca3e43f96f20793d08bd1ff74f7d30522..df5347ea450b56c966d17764e0fec63e6374b9fc 100644 (file)
@@ -771,12 +771,12 @@ static void journal_write_unlocked(struct closure *cl)
 
                bio_reset(bio, ca->bdev, REQ_OP_WRITE | 
                          REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA);
-               bch_bio_map(bio, w->data);
                bio->bi_iter.bi_sector  = PTR_OFFSET(k, i);
                bio->bi_iter.bi_size = sectors << 9;
 
                bio->bi_end_io  = journal_write_endio;
                bio->bi_private = w;
+               bch_bio_map(bio, w->data);
 
                trace_bcache_journal_write(bio, w->data->keys);
                bio_list_add(&list, bio);
index fdd0194f84dd089572f34cdcad1401c8748db9b0..320fcdfef48efae88642c7728b23455160a7a2d3 100644 (file)
@@ -685,7 +685,7 @@ static void do_bio_hook(struct search *s,
 {
        struct bio *bio = &s->bio.bio;
 
-       bio_init_clone(bio->bi_bdev, bio, orig_bio, GFP_NOIO);
+       bio_init_clone(orig_bio->bi_bdev, bio, orig_bio, GFP_NOIO);
        /*
         * bi_end_io can be set separately somewhere else, e.g. the
         * variants in,
index e7df3dac705e24715f5077a9fbf094a7ca394ea0..49ab3448b9b12deaccddd71ae52d677bc9c024ee 100644 (file)
@@ -43,6 +43,7 @@
 
 struct mtk_ecc_caps {
        u32 err_mask;
+       u32 err_shift;
        const u8 *ecc_strength;
        const u32 *ecc_regs;
        u8 num_ecc_strength;
@@ -76,7 +77,7 @@ static const u8 ecc_strength_mt2712[] = {
 };
 
 static const u8 ecc_strength_mt7622[] = {
-       4, 6, 8, 10, 12, 14, 16
+       4, 6, 8, 10, 12
 };
 
 enum mtk_ecc_regs {
@@ -221,7 +222,7 @@ void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
        for (i = 0; i < sectors; i++) {
                offset = (i >> 2) << 2;
                err = readl(ecc->regs + ECC_DECENUM0 + offset);
-               err = err >> ((i % 4) * 8);
+               err = err >> ((i % 4) * ecc->caps->err_shift);
                err &= ecc->caps->err_mask;
                if (err == ecc->caps->err_mask) {
                        /* uncorrectable errors */
@@ -449,6 +450,7 @@ EXPORT_SYMBOL(mtk_ecc_get_parity_bits);
 
 static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
        .err_mask = 0x3f,
+       .err_shift = 8,
        .ecc_strength = ecc_strength_mt2701,
        .ecc_regs = mt2701_ecc_regs,
        .num_ecc_strength = 20,
@@ -459,6 +461,7 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = {
 
 static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
        .err_mask = 0x7f,
+       .err_shift = 8,
        .ecc_strength = ecc_strength_mt2712,
        .ecc_regs = mt2712_ecc_regs,
        .num_ecc_strength = 23,
@@ -468,10 +471,11 @@ static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = {
 };
 
 static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = {
-       .err_mask = 0x3f,
+       .err_mask = 0x1f,
+       .err_shift = 5,
        .ecc_strength = ecc_strength_mt7622,
        .ecc_regs = mt7622_ecc_regs,
-       .num_ecc_strength = 7,
+       .num_ecc_strength = 5,
        .ecc_mode_shift = 4,
        .parity_bits = 13,
        .pg_irq_sel = 0,
index 1a77542c6d67c6e570beea4013b33a48eb81eacd..048b255faa769c123f022028209836365f7f1743 100644 (file)
@@ -2651,10 +2651,23 @@ static int qcom_nand_attach_chip(struct nand_chip *chip)
        ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
 
        mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
+       /* Free the initially allocated BAM transaction for reading the ONFI params */
+       if (nandc->props->is_bam)
+               free_bam_transaction(nandc);
 
        nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
                                     cwperpage);
 
+       /* Now allocate the BAM transaction based on updated max_cwperpage */
+       if (nandc->props->is_bam) {
+               nandc->bam_txn = alloc_bam_transaction(nandc);
+               if (!nandc->bam_txn) {
+                       dev_err(nandc->dev,
+                               "failed to allocate bam transaction\n");
+                       return -ENOMEM;
+               }
+       }
+
        /*
         * DATA_UD_BYTES varies based on whether the read/write command protects
         * spare data with ECC too. We protect spare data by default, so we set
@@ -2955,17 +2968,6 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc,
        if (ret)
                return ret;
 
-       if (nandc->props->is_bam) {
-               free_bam_transaction(nandc);
-               nandc->bam_txn = alloc_bam_transaction(nandc);
-               if (!nandc->bam_txn) {
-                       dev_err(nandc->dev,
-                               "failed to allocate bam transaction\n");
-                       nand_cleanup(chip);
-                       return -ENOMEM;
-               }
-       }
-
        ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
        if (ret)
                nand_cleanup(chip);
index b85b9c6fcc4249f58d36047c01175bfb1ab6f661..a278829469d610d837ccd8b59d335590108341f7 100644 (file)
@@ -384,7 +384,8 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
        dma_addr_t dma_addr;
        dma_cookie_t cookie;
        uint32_t reg;
-       int ret;
+       int ret = 0;
+       unsigned long time_left;
 
        if (dir == DMA_FROM_DEVICE) {
                chan = flctl->chan_fifo0_rx;
@@ -425,13 +426,14 @@ static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
                goto out;
        }
 
-       ret =
+       time_left =
        wait_for_completion_timeout(&flctl->dma_complete,
                                msecs_to_jiffies(3000));
 
-       if (ret <= 0) {
+       if (time_left == 0) {
                dmaengine_terminate_all(chan);
                dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
+               ret = -ETIMEDOUT;
        }
 
 out:
@@ -441,7 +443,7 @@ out:
 
        dma_unmap_single(chan->device->dev, dma_addr, len, dir);
 
-       /* ret > 0 is success */
+       /* ret == 0 is success */
        return ret;
 }
 
@@ -465,7 +467,7 @@ static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
 
        /* initiate DMA transfer */
        if (flctl->chan_fifo0_rx && rlen >= 32 &&
-               flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE) > 0)
+               !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_FROM_DEVICE))
                        goto convert;   /* DMA success */
 
        /* do polling transfer */
@@ -524,7 +526,7 @@ static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
 
        /* initiate DMA transfer */
        if (flctl->chan_fifo0_tx && rlen >= 32 &&
-               flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE) > 0)
+               !flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_TO_DEVICE))
                        return; /* DMA success */
 
        /* do polling transfer */
index 9694370651fa8bc3bd19b91df020240f8144887e..59d3980b8ca2a201d3e1510468364104ec75da63 100644 (file)
@@ -400,6 +400,9 @@ validate_group(struct perf_event *event)
        if (!validate_event(event->pmu, &fake_pmu, leader))
                return -EINVAL;
 
+       if (event == leader)
+               return 0;
+
        for_each_sibling_event(sibling, leader) {
                if (!validate_event(event->pmu, &fake_pmu, sibling))
                        return -EINVAL;
@@ -489,12 +492,7 @@ __hw_perf_event_init(struct perf_event *event)
                local64_set(&hwc->period_left, hwc->sample_period);
        }
 
-       if (event->group_leader != event) {
-               if (validate_group(event) != 0)
-                       return -EINVAL;
-       }
-
-       return 0;
+       return validate_group(event);
 }
 
 static int armpmu_event_init(struct perf_event *event)
index 32ba50efbcebcf020a2b660fbab0c52eb2431d9f..62dbd1e67513dcbe0e28125a420f537750e1c098 100644 (file)
 
 #include "pinctrl-intel.h"
 
-#define ADL_PAD_OWN    0x0a0
-#define ADL_PADCFGLOCK 0x110
-#define ADL_HOSTSW_OWN 0x150
-#define ADL_GPI_IS     0x200
-#define ADL_GPI_IE     0x220
+#define ADL_N_PAD_OWN          0x020
+#define ADL_N_PADCFGLOCK       0x080
+#define ADL_N_HOSTSW_OWN       0x0b0
+#define ADL_N_GPI_IS           0x100
+#define ADL_N_GPI_IE           0x120
+
+#define ADL_S_PAD_OWN          0x0a0
+#define ADL_S_PADCFGLOCK       0x110
+#define ADL_S_HOSTSW_OWN       0x150
+#define ADL_S_GPI_IS           0x200
+#define ADL_S_GPI_IE           0x220
 
 #define ADL_GPP(r, s, e, g)                            \
        {                                               \
                .gpio_base = (g),                       \
        }
 
-#define ADL_COMMUNITY(b, s, e, g)                      \
+#define ADL_N_COMMUNITY(b, s, e, g)                    \
+       {                                               \
+               .barno = (b),                           \
+               .padown_offset = ADL_N_PAD_OWN,         \
+               .padcfglock_offset = ADL_N_PADCFGLOCK,  \
+               .hostown_offset = ADL_N_HOSTSW_OWN,     \
+               .is_offset = ADL_N_GPI_IS,              \
+               .ie_offset = ADL_N_GPI_IE,              \
+               .pin_base = (s),                        \
+               .npins = ((e) - (s) + 1),               \
+               .gpps = (g),                            \
+               .ngpps = ARRAY_SIZE(g),                 \
+       }
+
+#define ADL_S_COMMUNITY(b, s, e, g)                    \
        {                                               \
                .barno = (b),                           \
-               .padown_offset = ADL_PAD_OWN,           \
-               .padcfglock_offset = ADL_PADCFGLOCK,    \
-               .hostown_offset = ADL_HOSTSW_OWN,       \
-               .is_offset = ADL_GPI_IS,                \
-               .ie_offset = ADL_GPI_IE,                \
+               .padown_offset = ADL_S_PAD_OWN,         \
+               .padcfglock_offset = ADL_S_PADCFGLOCK,  \
+               .hostown_offset = ADL_S_HOSTSW_OWN,     \
+               .is_offset = ADL_S_GPI_IS,              \
+               .ie_offset = ADL_S_GPI_IE,              \
                .pin_base = (s),                        \
                .npins = ((e) - (s) + 1),               \
                .gpps = (g),                            \
@@ -342,10 +362,10 @@ static const struct intel_padgroup adln_community5_gpps[] = {
 };
 
 static const struct intel_community adln_communities[] = {
-       ADL_COMMUNITY(0, 0, 66, adln_community0_gpps),
-       ADL_COMMUNITY(1, 67, 168, adln_community1_gpps),
-       ADL_COMMUNITY(2, 169, 248, adln_community4_gpps),
-       ADL_COMMUNITY(3, 249, 256, adln_community5_gpps),
+       ADL_N_COMMUNITY(0, 0, 66, adln_community0_gpps),
+       ADL_N_COMMUNITY(1, 67, 168, adln_community1_gpps),
+       ADL_N_COMMUNITY(2, 169, 248, adln_community4_gpps),
+       ADL_N_COMMUNITY(3, 249, 256, adln_community5_gpps),
 };
 
 static const struct intel_pinctrl_soc_data adln_soc_data = {
@@ -713,11 +733,11 @@ static const struct intel_padgroup adls_community5_gpps[] = {
 };
 
 static const struct intel_community adls_communities[] = {
-       ADL_COMMUNITY(0, 0, 94, adls_community0_gpps),
-       ADL_COMMUNITY(1, 95, 150, adls_community1_gpps),
-       ADL_COMMUNITY(2, 151, 199, adls_community3_gpps),
-       ADL_COMMUNITY(3, 200, 269, adls_community4_gpps),
-       ADL_COMMUNITY(4, 270, 303, adls_community5_gpps),
+       ADL_S_COMMUNITY(0, 0, 94, adls_community0_gpps),
+       ADL_S_COMMUNITY(1, 95, 150, adls_community1_gpps),
+       ADL_S_COMMUNITY(2, 151, 199, adls_community3_gpps),
+       ADL_S_COMMUNITY(3, 200, 269, adls_community4_gpps),
+       ADL_S_COMMUNITY(4, 270, 303, adls_community5_gpps),
 };
 
 static const struct intel_pinctrl_soc_data adls_soc_data = {
index 8dca1ef04965faa59562ccc6d97b0a2f6dff6ee5..40accd110c3d8fe74fad1b01ab0cada0ed94fe8a 100644 (file)
@@ -30,6 +30,7 @@ config PINCTRL_MTK_MOORE
        select GENERIC_PINMUX_FUNCTIONS
        select GPIOLIB
        select OF_GPIO
+       select EINT_MTK
        select PINCTRL_MTK_V2
 
 config PINCTRL_MTK_PARIS
index 8d271c6b0ca4103bc325440f6a86b95ae9bb654b..5de691c630b4fc77e044eac5e874aa06ba3df402 100644 (file)
@@ -1374,10 +1374,10 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
                }
 
                irq = irq_of_parse_and_map(child, 0);
-               if (irq < 0) {
-                       dev_err(pctl->dev, "No IRQ for bank %u: %d\n", i, irq);
+               if (!irq) {
+                       dev_err(pctl->dev, "No IRQ for bank %u\n", i);
                        of_node_put(child);
-                       ret = irq;
+                       ret = -EINVAL;
                        goto err;
                }
 
index a1b598b86aa9f400133fe0649db24a7b5cfa7f82..2cb79e649fcf31e96be5d6d99fd20ec43e7fc512 100644 (file)
@@ -457,95 +457,110 @@ static  struct rockchip_mux_recalced_data rk3128_mux_recalced_data[] = {
 
 static struct rockchip_mux_recalced_data rk3308_mux_recalced_data[] = {
        {
+               /* gpio1b6_sel */
                .num = 1,
                .pin = 14,
                .reg = 0x28,
                .bit = 12,
                .mask = 0xf
        }, {
+               /* gpio1b7_sel */
                .num = 1,
                .pin = 15,
                .reg = 0x2c,
                .bit = 0,
                .mask = 0x3
        }, {
+               /* gpio1c2_sel */
                .num = 1,
                .pin = 18,
                .reg = 0x30,
                .bit = 4,
                .mask = 0xf
        }, {
+               /* gpio1c3_sel */
                .num = 1,
                .pin = 19,
                .reg = 0x30,
                .bit = 8,
                .mask = 0xf
        }, {
+               /* gpio1c4_sel */
                .num = 1,
                .pin = 20,
                .reg = 0x30,
                .bit = 12,
                .mask = 0xf
        }, {
+               /* gpio1c5_sel */
                .num = 1,
                .pin = 21,
                .reg = 0x34,
                .bit = 0,
                .mask = 0xf
        }, {
+               /* gpio1c6_sel */
                .num = 1,
                .pin = 22,
                .reg = 0x34,
                .bit = 4,
                .mask = 0xf
        }, {
+               /* gpio1c7_sel */
                .num = 1,
                .pin = 23,
                .reg = 0x34,
                .bit = 8,
                .mask = 0xf
        }, {
-               .num = 3,
-               .pin = 12,
-               .reg = 0x68,
-               .bit = 8,
-               .mask = 0xf
-       }, {
-               .num = 3,
-               .pin = 13,
-               .reg = 0x68,
-               .bit = 12,
-               .mask = 0xf
-       }, {
+               /* gpio2a2_sel */
                .num = 2,
                .pin = 2,
-               .reg = 0x608,
-               .bit = 0,
-               .mask = 0x7
+               .reg = 0x40,
+               .bit = 4,
+               .mask = 0x3
        }, {
+               /* gpio2a3_sel */
                .num = 2,
                .pin = 3,
-               .reg = 0x608,
-               .bit = 4,
-               .mask = 0x7
+               .reg = 0x40,
+               .bit = 6,
+               .mask = 0x3
        }, {
+               /* gpio2c0_sel */
                .num = 2,
                .pin = 16,
-               .reg = 0x610,
-               .bit = 8,
-               .mask = 0x7
+               .reg = 0x50,
+               .bit = 0,
+               .mask = 0x3
        }, {
+               /* gpio3b2_sel */
                .num = 3,
                .pin = 10,
-               .reg = 0x610,
-               .bit = 0,
-               .mask = 0x7
+               .reg = 0x68,
+               .bit = 4,
+               .mask = 0x3
        }, {
+               /* gpio3b3_sel */
                .num = 3,
                .pin = 11,
-               .reg = 0x610,
-               .bit = 4,
-               .mask = 0x7
+               .reg = 0x68,
+               .bit = 6,
+               .mask = 0x3
+       }, {
+               /* gpio3b4_sel */
+               .num = 3,
+               .pin = 12,
+               .reg = 0x68,
+               .bit = 8,
+               .mask = 0xf
+       }, {
+               /* gpio3b5_sel */
+               .num = 3,
+               .pin = 13,
+               .reg = 0x68,
+               .bit = 12,
+               .mask = 0xf
        },
 };
 
index 4d37b817b2328827124cfa66c66b78780228ef8b..a91a86628f2f87fec7d2cecca8e60b0967f27448 100644 (file)
@@ -264,14 +264,14 @@ static const struct pinctrl_pin_desc sm6350_pins[] = {
        PINCTRL_PIN(153, "GPIO_153"),
        PINCTRL_PIN(154, "GPIO_154"),
        PINCTRL_PIN(155, "GPIO_155"),
-       PINCTRL_PIN(156, "SDC1_RCLK"),
-       PINCTRL_PIN(157, "SDC1_CLK"),
-       PINCTRL_PIN(158, "SDC1_CMD"),
-       PINCTRL_PIN(159, "SDC1_DATA"),
-       PINCTRL_PIN(160, "SDC2_CLK"),
-       PINCTRL_PIN(161, "SDC2_CMD"),
-       PINCTRL_PIN(162, "SDC2_DATA"),
-       PINCTRL_PIN(163, "UFS_RESET"),
+       PINCTRL_PIN(156, "UFS_RESET"),
+       PINCTRL_PIN(157, "SDC1_RCLK"),
+       PINCTRL_PIN(158, "SDC1_CLK"),
+       PINCTRL_PIN(159, "SDC1_CMD"),
+       PINCTRL_PIN(160, "SDC1_DATA"),
+       PINCTRL_PIN(161, "SDC2_CLK"),
+       PINCTRL_PIN(162, "SDC2_CMD"),
+       PINCTRL_PIN(163, "SDC2_DATA"),
 };
 
 #define DECLARE_MSM_GPIO_PINS(pin) \
index dfd805e7686244c1b7faf5d4293db607503a6fbe..7b0576f71376e6ceb376030b68072a5d9c066538 100644 (file)
@@ -4,14 +4,13 @@
 #
 config PINCTRL_SAMSUNG
        bool
-       depends on OF_GPIO
+       select GPIOLIB
        select PINMUX
        select PINCONF
 
 config PINCTRL_EXYNOS
        bool "Pinctrl common driver part for Samsung Exynos SoCs"
-       depends on OF_GPIO
-       depends on ARCH_EXYNOS || ARCH_S5PV210 || COMPILE_TEST
+       depends on ARCH_EXYNOS || ARCH_S5PV210 || (COMPILE_TEST && OF)
        select PINCTRL_SAMSUNG
        select PINCTRL_EXYNOS_ARM if ARM && (ARCH_EXYNOS || ARCH_S5PV210)
        select PINCTRL_EXYNOS_ARM64 if ARM64 && ARCH_EXYNOS
@@ -26,12 +25,10 @@ config PINCTRL_EXYNOS_ARM64
 
 config PINCTRL_S3C24XX
        bool "Samsung S3C24XX SoC pinctrl driver"
-       depends on OF_GPIO
-       depends on ARCH_S3C24XX || COMPILE_TEST
+       depends on ARCH_S3C24XX || (COMPILE_TEST && OF)
        select PINCTRL_SAMSUNG
 
 config PINCTRL_S3C64XX
        bool "Samsung S3C64XX SoC pinctrl driver"
-       depends on OF_GPIO
-       depends on ARCH_S3C64XX || COMPILE_TEST
+       depends on ARCH_S3C64XX || (COMPILE_TEST && OF)
        select PINCTRL_SAMSUNG
index d291819c2f77c0a07114586d68e00654462a99d5..cb965cf9370575027126fc8a5120ed0e9c65e22b 100644 (file)
@@ -770,7 +770,7 @@ static const struct samsung_pin_bank_data fsd_pin_banks2[] __initconst = {
        EXYNOS850_PIN_BANK_EINTN(3, 0x00, "gpq0"),
 };
 
-const struct samsung_pin_ctrl fsd_pin_ctrl[] __initconst = {
+static const struct samsung_pin_ctrl fsd_pin_ctrl[] __initconst = {
        {
                /* pin-controller instance 0 FSYS0 data */
                .pin_banks      = fsd_pin_banks0,
index 9ed76473157076d2b3531b657e3bc4fa1043a151..f7c9459f66283b1dfdf919c5bf8a8bf8db86e31a 100644 (file)
@@ -225,6 +225,13 @@ static void stm32_gpio_free(struct gpio_chip *chip, unsigned offset)
        pinctrl_gpio_free(chip->base + offset);
 }
 
+static int stm32_gpio_get_noclk(struct gpio_chip *chip, unsigned int offset)
+{
+       struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
+
+       return !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
+}
+
 static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
 {
        struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
@@ -232,7 +239,7 @@ static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
 
        clk_enable(bank->clk);
 
-       ret = !!(readl_relaxed(bank->base + STM32_GPIO_IDR) & BIT(offset));
+       ret = stm32_gpio_get_noclk(chip, offset);
 
        clk_disable(bank->clk);
 
@@ -311,8 +318,12 @@ static void stm32_gpio_irq_trigger(struct irq_data *d)
        struct stm32_gpio_bank *bank = d->domain->host_data;
        int level;
 
+       /* Do not access the GPIO if this is not LEVEL triggered IRQ. */
+       if (!(bank->irq_type[d->hwirq] & IRQ_TYPE_LEVEL_MASK))
+               return;
+
        /* If level interrupt type then retrig */
-       level = stm32_gpio_get(&bank->gpio_chip, d->hwirq);
+       level = stm32_gpio_get_noclk(&bank->gpio_chip, d->hwirq);
        if ((level == 0 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_LOW) ||
            (level == 1 && bank->irq_type[d->hwirq] == IRQ_TYPE_LEVEL_HIGH))
                irq_chip_retrigger_hierarchy(d);
@@ -354,6 +365,7 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
 {
        struct stm32_gpio_bank *bank = irq_data->domain->host_data;
        struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
+       unsigned long flags;
        int ret;
 
        ret = stm32_gpio_direction_input(&bank->gpio_chip, irq_data->hwirq);
@@ -367,6 +379,10 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
                return ret;
        }
 
+       flags = irqd_get_trigger_type(irq_data);
+       if (flags & IRQ_TYPE_LEVEL_MASK)
+               clk_enable(bank->clk);
+
        return 0;
 }
 
@@ -374,6 +390,9 @@ static void stm32_gpio_irq_release_resources(struct irq_data *irq_data)
 {
        struct stm32_gpio_bank *bank = irq_data->domain->host_data;
 
+       if (bank->irq_type[irq_data->hwirq] & IRQ_TYPE_LEVEL_MASK)
+               clk_disable(bank->clk);
+
        gpiochip_unlock_as_irq(&bank->gpio_chip, irq_data->hwirq);
 }
 
index 9748345b9298fcf2d150de0a93349917be11439f..cd657760a644721d52c86ca70928b8a7eb0669c8 100644 (file)
@@ -419,7 +419,15 @@ static const struct sppctl_grp sp7021grps_prbp[] = {
        EGRP("PROBE_PORT2", 2, pins_prp2),
 };
 
+/*
+ * Due to compatible reason, the first valid item should start at the third
+ * position of the array. Please keep the first two items of the table
+ * no use (dummy).
+ */
 const struct sppctl_func sppctl_list_funcs[] = {
+       FNCN("", pinmux_type_fpmx, 0x00, 0, 0),
+       FNCN("", pinmux_type_fpmx, 0x00, 0, 0),
+
        FNCN("L2SW_CLK_OUT",        pinmux_type_fpmx, 0x00, 0, 7),
        FNCN("L2SW_MAC_SMI_MDC",    pinmux_type_fpmx, 0x00, 8, 7),
        FNCN("L2SW_LED_FLASH0",     pinmux_type_fpmx, 0x01, 0, 7),
index 2104a2621e5070e1c963576b77e29c7afff06e4d..0e7fbed8a50d6ff50ab4b69e2b326182450728f8 100644 (file)
@@ -371,10 +371,14 @@ static int asus_wmi_evaluate_method_buf(u32 method_id,
 
        switch (obj->type) {
        case ACPI_TYPE_BUFFER:
-               if (obj->buffer.length > size)
+               if (obj->buffer.length > size) {
                        err = -ENOSPC;
-               if (obj->buffer.length == 0)
+                       break;
+               }
+               if (obj->buffer.length == 0) {
                        err = -ENODATA;
+                       break;
+               }
 
                memcpy(ret_buffer, obj->buffer.pointer, obj->buffer.length);
                break;
@@ -2223,9 +2227,10 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
 
        err = fan_curve_get_factory_default(asus, fan_dev);
        if (err) {
-               if (err == -ENODEV || err == -ENODATA)
-                       return 0;
-               return err;
+               pr_debug("fan_curve_get_factory_default(0x%08x) failed: %d\n",
+                        fan_dev, err);
+               /* Don't cause probe to fail on devices without fan-curves */
+               return 0;
        }
 
        *available = true;
index 8230e7a68a5ebdf1b81f3974c44330503725e87d..1321687d923ed5e7d81738e4bf7e7baf1264f7c1 100644 (file)
@@ -80,6 +80,10 @@ static struct quirk_entry quirk_dell_inspiron_1012 = {
        .kbd_led_not_present = true,
 };
 
+static struct quirk_entry quirk_dell_latitude_7520 = {
+       .kbd_missing_ac_tag = true,
+};
+
 static struct platform_driver platform_driver = {
        .driver = {
                .name = "dell-laptop",
@@ -336,6 +340,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
                },
                .driver_data = &quirk_dell_inspiron_1012,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "Dell Latitude 7520",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 7520"),
+               },
+               .driver_data = &quirk_dell_latitude_7520,
+       },
        { }
 };
 
index 658bab4b79648b7cdbcb9455c6053e2c322b0c8f..e87a931eab1e72c1c864fd0edf038eb03077e8bd 100644 (file)
@@ -148,6 +148,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
+       DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660 GAMING X DDR4"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
        DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"),
index a46d3b53bf61af76325301b69c5233309b78a7d9..7a059e02c26565ec16ab3690958dc68a3cc508b4 100644 (file)
@@ -236,7 +236,7 @@ enum ppfear_regs {
 #define ADL_LPM_STATUS_LATCH_EN_OFFSET         0x1704
 #define ADL_LPM_LIVE_STATUS_OFFSET             0x1764
 
-const char *pmc_lpm_modes[] = {
+static const char *pmc_lpm_modes[] = {
        "S0i2.0",
        "S0i2.1",
        "S0i2.2",
index 11d14cc0ff0aeac9105eea7c7b6dc82748c4e672..c830e98dfa386852de5bb973722d3b98e132efbc 100644 (file)
@@ -51,6 +51,8 @@
 #define MBOX_TIMEOUT_US                        2000
 #define MBOX_TIMEOUT_ACQUIRE_US                1000
 #define MBOX_POLLING_PERIOD_US         100
+#define MBOX_ACQUIRE_NUM_RETRIES       5
+#define MBOX_ACQUIRE_RETRY_DELAY_MS    500
 #define MBOX_MAX_PACKETS               4
 
 #define MBOX_OWNER_NONE                        0x00
@@ -81,7 +83,7 @@ enum sdsi_command {
 
 struct sdsi_mbox_info {
        u64     *payload;
-       u64     *buffer;
+       void    *buffer;
        int     size;
 };
 
@@ -163,9 +165,7 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf
        total = 0;
        loop = 0;
        do {
-               int offset = SDSI_SIZE_MAILBOX * loop;
-               void __iomem *addr = priv->mbox_addr + offset;
-               u64 *buf = info->buffer + offset / SDSI_SIZE_CMD;
+               void *buf = info->buffer + (SDSI_SIZE_MAILBOX * loop);
                u32 packet_size;
 
                /* Poll on ready bit */
@@ -196,7 +196,7 @@ static int sdsi_mbox_cmd_read(struct sdsi_priv *priv, struct sdsi_mbox_info *inf
                        break;
                }
 
-               sdsi_memcpy64_fromio(buf, addr, round_up(packet_size, SDSI_SIZE_CMD));
+               sdsi_memcpy64_fromio(buf, priv->mbox_addr, round_up(packet_size, SDSI_SIZE_CMD));
 
                total += packet_size;
 
@@ -243,8 +243,8 @@ static int sdsi_mbox_cmd_write(struct sdsi_priv *priv, struct sdsi_mbox_info *in
                  FIELD_PREP(CTRL_PACKET_SIZE, info->size);
        writeq(control, priv->control_addr);
 
-       /* Poll on run_busy bit */
-       ret = readq_poll_timeout(priv->control_addr, control, !(control & CTRL_RUN_BUSY),
+       /* Poll on ready bit */
+       ret = readq_poll_timeout(priv->control_addr, control, control & CTRL_READY,
                                 MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_US);
 
        if (ret)
@@ -263,7 +263,7 @@ static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info
 {
        u64 control;
        u32 owner;
-       int ret;
+       int ret, retries = 0;
 
        lockdep_assert_held(&priv->mb_lock);
 
@@ -273,13 +273,29 @@ static int sdsi_mbox_acquire(struct sdsi_priv *priv, struct sdsi_mbox_info *info
        if (owner != MBOX_OWNER_NONE)
                return -EBUSY;
 
-       /* Write first qword of payload */
-       writeq(info->payload[0], priv->mbox_addr);
+       /*
+        * If there has been no recent transaction and no one owns the mailbox,
+        * we should acquire it in under 1ms. However, if we've accessed it
+        * recently it may take up to 2.1 seconds to acquire it again.
+        */
+       do {
+               /* Write first qword of payload */
+               writeq(info->payload[0], priv->mbox_addr);
+
+               /* Check for ownership */
+               ret = readq_poll_timeout(priv->control_addr, control,
+                       FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_INBAND,
+                       MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_ACQUIRE_US);
+
+               if (FIELD_GET(CTRL_OWNER, control) == MBOX_OWNER_NONE &&
+                   retries++ < MBOX_ACQUIRE_NUM_RETRIES) {
+                       msleep(MBOX_ACQUIRE_RETRY_DELAY_MS);
+                       continue;
+               }
 
-       /* Check for ownership */
-       ret = readq_poll_timeout(priv->control_addr, control,
-                                FIELD_GET(CTRL_OWNER, control) & MBOX_OWNER_INBAND,
-                                MBOX_POLLING_PERIOD_US, MBOX_TIMEOUT_ACQUIRE_US);
+               /* Either we got it or someone else did. */
+               break;
+       } while (true);
 
        return ret;
 }
index c61f804dd44e820ac65563517b5f1a1a2d02a685..8f9c571d725789ecd4f3e47388b7f86dcdfef403 100644 (file)
@@ -212,6 +212,9 @@ static int __init intel_uncore_init(void)
        const struct x86_cpu_id *id;
        int ret;
 
+       if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
+               return -ENODEV;
+
        id = x86_match_cpu(intel_uncore_cpu_ids);
        if (!id)
                return -ENODEV;
index ddd00efc488252644e732a481d29d3ee5ecc8e3c..fbdb5124d7f7d1d8a541adf463b37f920da469f9 100644 (file)
@@ -41,7 +41,7 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
        int result;
        unsigned char *buffer;
 
-       buffer = kmalloc(32, GFP_KERNEL);
+       buffer = kzalloc(32, GFP_KERNEL);
        if (!buffer)
                return -ENOMEM;
 
@@ -55,10 +55,13 @@ static int sr_read_tochdr(struct cdrom_device_info *cdi,
        cgc.data_direction = DMA_FROM_DEVICE;
 
        result = sr_do_ioctl(cd, &cgc);
+       if (result)
+               goto err;
 
        tochdr->cdth_trk0 = buffer[2];
        tochdr->cdth_trk1 = buffer[3];
 
+err:
        kfree(buffer);
        return result;
 }
@@ -71,7 +74,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
        int result;
        unsigned char *buffer;
 
-       buffer = kmalloc(32, GFP_KERNEL);
+       buffer = kzalloc(32, GFP_KERNEL);
        if (!buffer)
                return -ENOMEM;
 
@@ -86,6 +89,8 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
        cgc.data_direction = DMA_FROM_DEVICE;
 
        result = sr_do_ioctl(cd, &cgc);
+       if (result)
+               goto err;
 
        tocentry->cdte_ctrl = buffer[5] & 0xf;
        tocentry->cdte_adr = buffer[5] >> 4;
@@ -98,6 +103,7 @@ static int sr_read_tocentry(struct cdrom_device_info *cdi,
                tocentry->cdte_addr.lba = (((((buffer[8] << 8) + buffer[9]) << 8)
                        + buffer[10]) << 8) + buffer[11];
 
+err:
        kfree(buffer);
        return result;
 }
@@ -384,7 +390,7 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
 {
        Scsi_CD *cd = cdi->handle;
        struct packet_command cgc;
-       char *buffer = kmalloc(32, GFP_KERNEL);
+       char *buffer = kzalloc(32, GFP_KERNEL);
        int result;
 
        if (!buffer)
@@ -400,10 +406,13 @@ int sr_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
        cgc.data_direction = DMA_FROM_DEVICE;
        cgc.timeout = IOCTL_TIMEOUT;
        result = sr_do_ioctl(cd, &cgc);
+       if (result)
+               goto err;
 
        memcpy(mcn->medium_catalog_number, buffer + 9, 13);
        mcn->medium_catalog_number[13] = 0;
 
+err:
        kfree(buffer);
        return result;
 }
index e37691e0bf206605f8fa63d7ceca24a8b5f2cb0f..0e5cc948373c4bcd14d8f540a33f8fdc93a499b9 100644 (file)
@@ -113,8 +113,10 @@ config THERMAL_DEFAULT_GOV_USER_SPACE
        bool "user_space"
        select THERMAL_GOV_USER_SPACE
        help
-         Select this if you want to let the user space manage the
-         platform thermals.
+         The Userspace governor allows to get trip point crossed
+         notification from the kernel via uevents. It is recommended
+         to use the netlink interface instead which gives richer
+         information about the thermal framework events.
 
 config THERMAL_DEFAULT_GOV_POWER_ALLOCATOR
        bool "power_allocator"
index 64a18e354a20403c41a987bac14d569ba2bd2099..a62a4e90bd3f5b8e2ce57073d4b1aa2df9892aba 100644 (file)
@@ -17,8 +17,7 @@
 
 static int user_space_bind(struct thermal_zone_device *tz)
 {
-       pr_warn_once("Userspace governor deprecated: use thermal netlink " \
-                    "notification instead\n");
+       pr_info_once("Consider using thermal netlink events interface\n");
 
        return 0;
 }
index 4954800b9850256da3eb6c1544594f2aa2d919d5..d97f496bab9be96416499092e9bc042af160c69a 100644 (file)
@@ -68,7 +68,7 @@ static int evaluate_odvp(struct int3400_thermal_priv *priv);
 struct odvp_attr {
        int odvp;
        struct int3400_thermal_priv *priv;
-       struct kobj_attribute attr;
+       struct device_attribute attr;
 };
 
 static ssize_t data_vault_read(struct file *file, struct kobject *kobj,
@@ -311,7 +311,7 @@ end:
        return result;
 }
 
-static ssize_t odvp_show(struct kobject *kobj, struct kobj_attribute *attr,
+static ssize_t odvp_show(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
        struct odvp_attr *odvp_attr;
index f154bada2906445d145c0a0c139cb95d83a0cdac..1c4aac8464a709d3fb01f667daab973a7cace8b2 100644 (file)
@@ -610,9 +610,6 @@ cur_state_store(struct device *dev, struct device_attribute *attr,
        unsigned long state;
        int result;
 
-       dev_warn_once(&cdev->device,
-                     "Setting cooling device state is deprecated\n");
-       
        if (sscanf(buf, "%ld\n", &state) != 1)
                return -EINVAL;
 
index edf169d0816e6290f823729cad218642debb5b2b..eb3e47c58c5f7f8c7e1b4a698577d2938327e314 100644 (file)
@@ -566,6 +566,9 @@ static int arkfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 {
        int rv, mem, step;
 
+       if (!var->pixclock)
+               return -EINVAL;
+
        /* Find appropriate format */
        rv = svga_match_format (arkfb_formats, var, NULL);
        if (rv < 0)
index 6ff16d3132e5f1a16af0459c1701ce8d8c18ed05..b26c81233b6b7c4ff3a570435486cb4c353611fd 100644 (file)
@@ -68,7 +68,6 @@
 #ifdef CONFIG_PPC_PMAC
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
-#include <asm/prom.h>
 #include "../macmodes.h"
 #endif
 
index 1aef3d6ebd8809f5134381ab2d5b9878141a4c0d..a3e6faed7745a4b2c5ab877b152ecce97a777091 100644 (file)
@@ -79,7 +79,6 @@
 
 #ifdef __powerpc__
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include "../macmodes.h"
 #endif
 #ifdef __sparc__
index b5fbd5329652865b573f85fa9b71f190490a6502..97a5972f5b1fb8a0f36228e7e1d2f56baf96d074 100644 (file)
@@ -22,7 +22,6 @@
 
 #ifdef CONFIG_PPC_PMAC
 #include <asm/machdep.h>
-#include <asm/prom.h>
 #include <asm/pmac_feature.h>
 #endif
 
index 93f403cbb41507963a34f293fbd564f3d5694fe8..91d81b5762313ee258b088659c91d6e288227d89 100644 (file)
@@ -21,7 +21,7 @@
 
 #include <asm/io.h>
 
-#if defined(CONFIG_PPC) || defined(CONFIG_SPARC)
+#ifdef CONFIG_SPARC
 #include <asm/prom.h>
 #endif
 
index c5d15c6db28767d04dd7a0d3ab55fbebd78adabb..771ce1f769515554ee65604cd63e5fc66410916d 100644 (file)
@@ -268,8 +268,7 @@ static int clps711x_fb_probe(struct platform_device *pdev)
                goto out_fb_release;
        }
 
-       cfb->syscon =
-               syscon_regmap_lookup_by_compatible("cirrus,ep7209-syscon1");
+       cfb->syscon = syscon_regmap_lookup_by_phandle(np, "syscon");
        if (IS_ERR(cfb->syscon)) {
                ret = PTR_ERR(cfb->syscon);
                goto out_fb_release;
index bd59e7b11ed5305d922b42d33d0dd01c98edb09f..aba46118b208be14b26ea6b8d1b5e7c4cc26b69e 100644 (file)
@@ -47,9 +47,6 @@
 #include <linux/nvram.h>
 #include <linux/adb.h>
 #include <linux/cuda.h>
-#ifdef CONFIG_PPC_PMAC
-#include <asm/prom.h>
-#endif
 #ifdef CONFIG_BOOTX_TEXT
 #include <asm/btext.h>
 #endif
index 52cce0db8bd344a87d01ed0d9433916e955d9555..09dd85553d4f3cf95884eb81851555638162a2a0 100644 (file)
@@ -657,6 +657,9 @@ static int i740fb_decode_var(const struct fb_var_screeninfo *var,
 
 static int i740fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 {
+       if (!var->pixclock)
+               return -EINVAL;
+
        switch (var->bits_per_pixel) {
        case 8:
                var->red.offset = var->green.offset = var->blue.offset = 0;
@@ -740,7 +743,7 @@ static int i740fb_set_par(struct fb_info *info)
        if (i)
                return i;
 
-       memset(info->screen_base, 0, info->screen_size);
+       memset_io(info->screen_base, 0, info->screen_size);
 
        vga_protect(par);
 
index 68288756ffff376fec44c592ef0a913c58e30034..a2f644c97f28026db0bde451e300aa1bf8840001 100644 (file)
@@ -925,10 +925,12 @@ static int imxfb_probe(struct platform_device *pdev)
                                sizeof(struct imx_fb_videomode), GFP_KERNEL);
                if (!fbi->mode) {
                        ret = -ENOMEM;
+                       of_node_put(display_np);
                        goto failed_of_parse;
                }
 
                ret = imxfb_of_read_mode(&pdev->dev, display_np, fbi->mode);
+               of_node_put(display_np);
                if (ret)
                        goto failed_of_parse;
        }
index 25801e8e3f74a9ff0ba1cbd59913dcab6f76e3c4..d57772f96ad26718eb01c0ab83dd6f870364c889 100644 (file)
@@ -494,6 +494,8 @@ static int kyrofb_set_par(struct fb_info *info)
                                    info->var.hsync_len +
                                    info->var.left_margin)) / 1000;
 
+       if (!lineclock)
+               return -EINVAL;
 
        /* time for a frame in ns (precision in 32bpp) */
        frameclock = lineclock * (info->var.yres +
index 759dee996af1b0cc3ad4a94b6b174bde5c33c68a..958be6805f876deecd461a42b0d86982f151054f 100644 (file)
@@ -47,7 +47,6 @@
 #include <asm/unaligned.h>
 
 #if defined(CONFIG_PPC_PMAC)
-#include <asm/prom.h>
 #include "../macmodes.h"
 #endif
 
index 63721337a37787b74dfcb61998f42fe8686c8e1b..a7508f5be343a654e615b45b17308ac888047656 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/interrupt.h>
 #include <linux/pci.h>
 #if defined(CONFIG_OF)
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
 #include <linux/of_platform.h>
 #endif
 #include "mb862xxfb.h"
index 154127256a2c17a8621469b38dbda60584320238..03707461eced633b65effb9ed108c9a8e5f17834 100644 (file)
@@ -127,19 +127,18 @@ EXPORT_SYMBOL_GPL(mmp_unregister_panel);
  */
 struct mmp_path *mmp_get_path(const char *name)
 {
-       struct mmp_path *path;
-       int found = 0;
+       struct mmp_path *path = NULL, *iter;
 
        mutex_lock(&disp_lock);
-       list_for_each_entry(path, &path_list, node) {
-               if (!strcmp(name, path->name)) {
-                       found = 1;
+       list_for_each_entry(iter, &path_list, node) {
+               if (!strcmp(name, iter->name)) {
+                       path = iter;
                        break;
                }
        }
        mutex_unlock(&disp_lock);
 
-       return found ? path : NULL;
+       return path;
 }
 EXPORT_SYMBOL_GPL(mmp_get_path);
 
index 966df2a0736068fdd3e1e786d03fb51087e6184d..28d32cbf496b5554416ed5bfef95f05cfa55e9ee 100644 (file)
@@ -585,7 +585,7 @@ neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 
        DBG("neofb_check_var");
 
-       if (var->pixclock && PICOS2KHZ(var->pixclock) > par->maxClock)
+       if (!var->pixclock || PICOS2KHZ(var->pixclock) > par->maxClock)
                return -EINVAL;
 
        /* Is the mode larger than the LCD panel? */
index b191bef22d9845453b1438454087abf84daa61b9..9d9fe5c3a7a1ac41ebd8d39ab1eb7979a81894b7 100644 (file)
@@ -964,7 +964,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
        if ((r = calc_extif_timings(ext_clk, &extif_mem_div)) < 0)
                goto err3;
        hwa742.extif->set_timings(&hwa742.reg_timings);
-       clk_enable(hwa742.sys_ck);
+       clk_prepare_enable(hwa742.sys_ck);
 
        calc_hwa742_clk_rates(ext_clk, &sys_clk, &pix_clk);
        if ((r = calc_extif_timings(sys_clk, &extif_mem_div)) < 0)
@@ -1023,7 +1023,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode,
 
        return 0;
 err4:
-       clk_disable(hwa742.sys_ck);
+       clk_disable_unprepare(hwa742.sys_ck);
 err3:
        hwa742.extif->cleanup();
 err2:
@@ -1037,7 +1037,7 @@ static void hwa742_cleanup(void)
        hwa742_set_update_mode(OMAPFB_UPDATE_DISABLED);
        hwa742.extif->cleanup();
        hwa742.int_ctrl->cleanup();
-       clk_disable(hwa742.sys_ck);
+       clk_disable_unprepare(hwa742.sys_ck);
 }
 
 struct lcd_ctrl hwa742_ctrl = {
index 7317c9aad677232545e75b7dbc78ad648c674fcb..97d20dc0d1d0276ea704ad27e2454a38250ae085 100644 (file)
@@ -711,7 +711,7 @@ static int omap_lcdc_init(struct omapfb_device *fbdev, int ext_mode,
                dev_err(fbdev->dev, "failed to adjust LCD rate\n");
                goto fail1;
        }
-       clk_enable(lcdc.lcd_ck);
+       clk_prepare_enable(lcdc.lcd_ck);
 
        r = request_irq(OMAP_LCDC_IRQ, lcdc_irq_handler, 0, MODULE_NAME, fbdev);
        if (r) {
@@ -746,7 +746,7 @@ fail4:
 fail3:
        free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
 fail2:
-       clk_disable(lcdc.lcd_ck);
+       clk_disable_unprepare(lcdc.lcd_ck);
 fail1:
        clk_put(lcdc.lcd_ck);
 fail0:
@@ -760,7 +760,7 @@ static void omap_lcdc_cleanup(void)
        free_fbmem();
        omap_free_lcd_dma();
        free_irq(OMAP_LCDC_IRQ, lcdc.fbdev);
-       clk_disable(lcdc.lcd_ck);
+       clk_disable_unprepare(lcdc.lcd_ck);
        clk_put(lcdc.lcd_ck);
 }
 
index 80ac67f27f0da11223183efb1b87d5df8d965622..b9cb8b386627650720b08d09f7edb56c2634b161 100644 (file)
@@ -598,7 +598,7 @@ static int sossi_init(struct omapfb_device *fbdev)
        l &= ~CONF_SOSSI_RESET_R;
        omap_writel(l, MOD_CONF_CTRL_1);
 
-       clk_enable(sossi.fck);
+       clk_prepare_enable(sossi.fck);
        l = omap_readl(ARM_IDLECT2);
        l &= ~(1 << 8);                 /* DMACK_REQ */
        omap_writel(l, ARM_IDLECT2);
@@ -649,7 +649,7 @@ static int sossi_init(struct omapfb_device *fbdev)
        return 0;
 
 err:
-       clk_disable(sossi.fck);
+       clk_disable_unprepare(sossi.fck);
        clk_put(sossi.fck);
        return r;
 }
@@ -657,6 +657,7 @@ err:
 static void sossi_cleanup(void)
 {
        omap_lcdc_free_dma_callback();
+       clk_unprepare(sossi.fck);
        clk_put(sossi.fck);
        iounmap(sossi.base);
 }
index ce413a9df06e4be8ca0b6b1915e5548047c616e5..5b9e26ea6449484ddccdb407760815d1a4574b4c 100644 (file)
@@ -30,9 +30,9 @@
 #include <linux/fb.h>
 #include <linux/init.h>
 #include <linux/nvram.h>
+#include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
-#include <asm/prom.h>
 
 #include "macmodes.h"
 #include "platinumfb.h"
index c68725eebee3bd0cc1c6fa502d9b694a3668f74b..d3be2c64f1c08dce2b1c10eb0be7ee397e030bff 100644 (file)
@@ -1504,9 +1504,7 @@ static const struct fb_ops pm2fb_ops = {
 
 
 /**
- * Device initialisation
- *
- * Initialise and allocate resource for PCI device.
+ * pm2fb_probe - Initialise and allocate resource for PCI device.
  *
  * @pdev:      PCI device.
  * @id:                PCI device ID.
@@ -1711,9 +1709,7 @@ static int pm2fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 }
 
 /**
- * Device removal.
- *
- * Release all device resources.
+ * pm2fb_remove - Release all device resources.
  *
  * @pdev:      PCI device to clean up.
  */
index f1551e00eb12f149e94857d94d1c4747a2d0adb8..8ad91c251fe6a171bbf455d1277c8b664b033b37 100644 (file)
@@ -2256,10 +2256,10 @@ static int pxafb_probe(struct platform_device *dev)
                        goto failed;
                for (i = 0; i < inf->num_modes; i++)
                        inf->modes[i] = pdata->modes[i];
+       } else {
+               inf = of_pxafb_of_mach_info(&dev->dev);
        }
 
-       if (!pdata)
-               inf = of_pxafb_of_mach_info(&dev->dev);
        if (IS_ERR_OR_NULL(inf))
                goto failed;
 
index 5c74253e7b2c03222febf7f46ae0e4de810db6f1..b93c8eb0233692e1c5f096bfa9d7c197227c667d 100644 (file)
@@ -549,6 +549,9 @@ static int s3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
        int rv, mem, step;
        u16 m, n, r;
 
+       if (!var->pixclock)
+               return -EINVAL;
+
        /* Find appropriate format */
        rv = svga_match_format (s3fb_formats, var, NULL);
 
index aa4ebe3192ec973512279389911c550d4a648296..9a4417430b4e948f1f82e60ea58cb733225d379f 100644 (file)
@@ -531,9 +531,6 @@ static void sh_mobile_lcdc_display_off(struct sh_mobile_lcdc_chan *ch)
                ch->tx_dev->ops->display_off(ch->tx_dev);
 }
 
-static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var,
-                                   struct fb_info *info);
-
 /* -----------------------------------------------------------------------------
  * Format helpers
  */
index 742f62986b80b9ea009b5c316ce716c469f24cbc..f28fd69d5eb75919920465ae533ce532633dd7cf 100644 (file)
@@ -4463,7 +4463,7 @@ static void sisfb_post_sis300(struct pci_dev *pdev)
                SiS_SetReg(SISCR, 0x37, 0x02);
                SiS_SetReg(SISPART2, 0x00, 0x1c);
                v4 = 0x00; v5 = 0x00; v6 = 0x10;
-               if(ivideo->SiS_Pr.UseROM) {
+               if (ivideo->SiS_Pr.UseROM && bios) {
                        v4 = bios[0xf5];
                        v5 = bios[0xf6];
                        v6 = bios[0xf7];
index 4d20cb557ff0f6e3f2bf959368828818ad2804c1..319131bd72cffa11a965f69588102012942d40fd 100644 (file)
@@ -996,6 +996,9 @@ static int tridentfb_check_var(struct fb_var_screeninfo *var,
        int ramdac = 230000; /* 230MHz for most 3D chips */
        debug("enter\n");
 
+       if (!var->pixclock)
+               return -EINVAL;
+
        /* check color depth */
        if (bpp == 24)
                bpp = var->bits_per_pixel = 32;
index b6ec0b8e2b7252d4062e0c7a3f1727f74eb550eb..d280733f283b1250a4902caf71a3dbf8b7363561 100644 (file)
@@ -1650,8 +1650,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
        const struct device_attribute *attr;
        struct dlfb_data *dlfb;
        struct fb_info *info;
-       int retval = -ENOMEM;
+       int retval;
        struct usb_device *usbdev = interface_to_usbdev(intf);
+       struct usb_endpoint_descriptor *out;
 
        /* usb initialization */
        dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL);
@@ -1665,6 +1666,12 @@ static int dlfb_usb_probe(struct usb_interface *intf,
        dlfb->udev = usb_get_dev(usbdev);
        usb_set_intfdata(intf, dlfb);
 
+       retval = usb_find_common_endpoints(intf->cur_altsetting, NULL, &out, NULL, NULL);
+       if (retval) {
+               dev_err(&intf->dev, "Device should have at lease 1 bulk endpoint!\n");
+               goto error;
+       }
+
        dev_dbg(&intf->dev, "console enable=%d\n", console);
        dev_dbg(&intf->dev, "fb_defio enable=%d\n", fb_defio);
        dev_dbg(&intf->dev, "shadow enable=%d\n", shadow);
@@ -1674,6 +1681,7 @@ static int dlfb_usb_probe(struct usb_interface *intf,
        if (!dlfb_parse_vendor_descriptor(dlfb, intf)) {
                dev_err(&intf->dev,
                        "firmware not recognized, incompatible device?\n");
+               retval = -ENODEV;
                goto error;
        }
 
@@ -1687,8 +1695,10 @@ static int dlfb_usb_probe(struct usb_interface *intf,
 
        /* allocates framebuffer driver structure, not framebuffer memory */
        info = framebuffer_alloc(0, &dlfb->udev->dev);
-       if (!info)
+       if (!info) {
+               retval = -ENOMEM;
                goto error;
+       }
 
        dlfb->info = info;
        info->par = dlfb;
index 8425afe37d7c08890491eb72017fa32b88f71feb..a6c9d4f26669564655270315402871b25601c4ed 100644 (file)
 #include <linux/nvram.h>
 #include <linux/adb.h>
 #include <linux/cuda.h>
+#include <linux/of_address.h>
 #ifdef CONFIG_MAC
 #include <asm/macintosh.h>
-#else
-#include <asm/prom.h>
 #endif
 
 #include "macmodes.h"
index 7a959e5ba90b83b9a3cf7a3f844274f094019b6b..a92a8c670cf0f28ffda9c391578bd5d51ba69ee0 100644 (file)
@@ -321,6 +321,9 @@ static int vt8623fb_check_var(struct fb_var_screeninfo *var, struct fb_info *inf
 {
        int rv, mem, step;
 
+       if (!var->pixclock)
+               return -EINVAL;
+
        /* Find appropriate format */
        rv = svga_match_format (vt8623fb_formats, var, NULL);
        if (rv < 0)
index f93b6abbe258193a64e2bfaada6d6554196b2e22..bebd371c6b93eaea9f93239854d653fffbedff1c 100644 (file)
@@ -199,7 +199,7 @@ struct display_timings *of_get_display_timings(const struct device_node *np)
                struct display_timing *dt;
                int r;
 
-               dt = kzalloc(sizeof(*dt), GFP_KERNEL);
+               dt = kmalloc(sizeof(*dt), GFP_KERNEL);
                if (!dt) {
                        pr_err("%pOF: could not allocate display_timing struct\n",
                                np);
index 4849f94372a45d6d5667e8dd3cef912d6ff44542..55acb32842a3a7296176c85113850124f7c9d97b 100644 (file)
@@ -178,9 +178,9 @@ static void __del_gref(struct gntalloc_gref *gref)
        unsigned long addr;
 
        if (gref->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
-               uint8_t *tmp = kmap(gref->page);
+               uint8_t *tmp = kmap_local_page(gref->page);
                tmp[gref->notify.pgoff] = 0;
-               kunmap(gref->page);
+               kunmap_local(tmp);
        }
        if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
                notify_remote_via_evtchn(gref->notify.event);
index b7631b88426e37a234ba758a37c6149698d05015..077c95e9baa50648ad027c78e4b9fb77dbe19853 100644 (file)
@@ -1060,6 +1060,7 @@ struct btrfs_fs_info {
         */
        spinlock_t relocation_bg_lock;
        u64 data_reloc_bg;
+       struct mutex zoned_data_reloc_io_lock;
 
        u64 nr_global_roots;
 
index 71fd99b482832c4e635b761400bc8cc537fc3905..f262026219894c13aed8a0fdd9a38f72aa1a93fb 100644 (file)
@@ -734,7 +734,12 @@ static int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info,
 
        btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
 
-       /* Commit dev_replace state and reserve 1 item for it. */
+       /*
+        * Commit dev_replace state and reserve 1 item for it.
+        * This is crucial to ensure we won't miss copying extents for new block
+        * groups that are allocated after we started the device replace, and
+        * must be done after setting up the device replace state.
+        */
        trans = btrfs_start_transaction(root, 1);
        if (IS_ERR(trans)) {
                ret = PTR_ERR(trans);
index 126f244cdf883cdc6c764a30f55aaa1da00982f3..ed8e288cc3694117bef9cb6ed8dfdb3dd4abab79 100644 (file)
@@ -3157,6 +3157,7 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
        mutex_init(&fs_info->reloc_mutex);
        mutex_init(&fs_info->delalloc_root_mutex);
        mutex_init(&fs_info->zoned_meta_io_lock);
+       mutex_init(&fs_info->zoned_data_reloc_io_lock);
        seqlock_init(&fs_info->profiles_lock);
 
        INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
index 724e8fe06aa0bc371334d21325dbe1b4b16a3575..33c19f51d79b000e75e216ced93f984556ef730f 100644 (file)
@@ -2658,6 +2658,7 @@ int btrfs_repair_one_sector(struct inode *inode,
 
        repair_bio = btrfs_bio_alloc(1);
        repair_bbio = btrfs_bio(repair_bio);
+       repair_bbio->file_offset = start;
        repair_bio->bi_opf = REQ_OP_READ;
        repair_bio->bi_end_io = failed_bio->bi_end_io;
        repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
@@ -3333,24 +3334,37 @@ static int alloc_new_bio(struct btrfs_inode *inode,
        ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
        if (ret < 0)
                goto error;
-       if (wbc) {
-               struct block_device *bdev;
 
-               bdev = fs_info->fs_devices->latest_dev->bdev;
-               bio_set_dev(bio, bdev);
-               wbc_init_bio(wbc, bio);
-       }
-       if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
-               struct btrfs_device *device;
+       if (wbc) {
+               /*
+                * For Zone append we need the correct block_device that we are
+                * going to write to set in the bio to be able to respect the
+                * hardware limitation.  Look it up here:
+                */
+               if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+                       struct btrfs_device *dev;
+
+                       dev = btrfs_zoned_get_device(fs_info, disk_bytenr,
+                                                    fs_info->sectorsize);
+                       if (IS_ERR(dev)) {
+                               ret = PTR_ERR(dev);
+                               goto error;
+                       }
 
-               device = btrfs_zoned_get_device(fs_info, disk_bytenr,
-                                               fs_info->sectorsize);
-               if (IS_ERR(device)) {
-                       ret = PTR_ERR(device);
-                       goto error;
+                       bio_set_dev(bio, dev->bdev);
+               } else {
+                       /*
+                        * Otherwise pick the last added device to support
+                        * cgroup writeback.  For multi-device file systems this
+                        * means blk-cgroup policies have to always be set on the
+                        * last added/replaced device.  This is a bit odd but has
+                        * been like that for a long time.
+                        */
+                       bio_set_dev(bio, fs_info->fs_devices->latest_dev->bdev);
                }
-
-               btrfs_bio(bio)->device = device;
+               wbc_init_bio(wbc, bio);
+       } else {
+               ASSERT(bio_op(bio) != REQ_OP_ZONE_APPEND);
        }
        return 0;
 error:
index 5082b9c70f8c9dc57340e7825f7e1437e9ee006d..1c8a43ecfb9ffa6fcadde39b8af660ff1e1e687a 100644 (file)
@@ -7810,8 +7810,6 @@ static blk_status_t btrfs_check_read_dio_bio(struct btrfs_dio_private *dip,
        const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
        struct bio_vec bvec;
        struct bvec_iter iter;
-       const u64 orig_file_offset = dip->file_offset;
-       u64 start = orig_file_offset;
        u32 bio_offset = 0;
        blk_status_t err = BLK_STS_OK;
 
@@ -7821,6 +7819,8 @@ static blk_status_t btrfs_check_read_dio_bio(struct btrfs_dio_private *dip,
                nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec.bv_len);
                pgoff = bvec.bv_offset;
                for (i = 0; i < nr_sectors; i++) {
+                       u64 start = bbio->file_offset + bio_offset;
+
                        ASSERT(pgoff < PAGE_SIZE);
                        if (uptodate &&
                            (!csum || !check_data_csum(inode, bbio,
@@ -7833,17 +7833,13 @@ static blk_status_t btrfs_check_read_dio_bio(struct btrfs_dio_private *dip,
                        } else {
                                int ret;
 
-                               ASSERT((start - orig_file_offset) < UINT_MAX);
-                               ret = btrfs_repair_one_sector(inode,
-                                               &bbio->bio,
-                                               start - orig_file_offset,
-                                               bvec.bv_page, pgoff,
+                               ret = btrfs_repair_one_sector(inode, &bbio->bio,
+                                               bio_offset, bvec.bv_page, pgoff,
                                                start, bbio->mirror_num,
                                                submit_dio_repair_bio);
                                if (ret)
                                        err = errno_to_blk_status(ret);
                        }
-                       start += sectorsize;
                        ASSERT(bio_offset + sectorsize > bio_offset);
                        bio_offset += sectorsize;
                        pgoff += sectorsize;
@@ -7870,6 +7866,7 @@ static blk_status_t btrfs_submit_bio_start_direct_io(struct inode *inode,
 static void btrfs_end_dio_bio(struct bio *bio)
 {
        struct btrfs_dio_private *dip = bio->bi_private;
+       struct btrfs_bio *bbio = btrfs_bio(bio);
        blk_status_t err = bio->bi_status;
 
        if (err)
@@ -7880,12 +7877,12 @@ static void btrfs_end_dio_bio(struct bio *bio)
                           bio->bi_iter.bi_size, err);
 
        if (bio_op(bio) == REQ_OP_READ)
-               err = btrfs_check_read_dio_bio(dip, btrfs_bio(bio), !err);
+               err = btrfs_check_read_dio_bio(dip, bbio, !err);
 
        if (err)
                dip->dio_bio->bi_status = err;
 
-       btrfs_record_physical_zoned(dip->inode, dip->file_offset, bio);
+       btrfs_record_physical_zoned(dip->inode, bbio->file_offset, bio);
 
        bio_put(bio);
        btrfs_dio_private_put(dip);
@@ -8046,6 +8043,7 @@ static void btrfs_submit_direct(const struct iomap_iter *iter,
                bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len);
                bio->bi_private = dip;
                bio->bi_end_io = btrfs_end_dio_bio;
+               btrfs_bio(bio)->file_offset = file_offset;
 
                if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
                        status = extract_ordered_extent(BTRFS_I(inode), bio,
index 11089568b2879e65f37de80db06c0278c0b3e1d7..8cd713d37ad2f35f1375c955f7426ec79e5f39cb 100644 (file)
@@ -3699,6 +3699,31 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                if (!cache)
                        goto skip;
 
+               ASSERT(cache->start <= chunk_offset);
+               /*
+                * We are using the commit root to search for device extents, so
+                * that means we could have found a device extent item from a
+                * block group that was deleted in the current transaction. The
+                * logical start offset of the deleted block group, stored at
+                * @chunk_offset, might be part of the logical address range of
+                * a new block group (which uses different physical extents).
+                * In this case btrfs_lookup_block_group() has returned the new
+                * block group, and its start address is less than @chunk_offset.
+                *
+                * We skip such new block groups, because it's pointless to
+                * process them, as we won't find their extents because we search
+                * for them using the commit root of the extent tree. For a device
+                * replace it's also fine to skip it, we won't miss copying them
+                * to the target device because we have the write duplication
+                * setup through the regular write path (by btrfs_map_block()),
+                * and we have committed a transaction when we started the device
+                * replace, right after setting up the device replace state.
+                */
+               if (cache->start < chunk_offset) {
+                       btrfs_put_block_group(cache);
+                       goto skip;
+               }
+
                if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
                        spin_lock(&cache->lock);
                        if (!cache->to_copy) {
@@ -3822,7 +3847,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                dev_replace->item_needs_writeback = 1;
                up_write(&dev_replace->rwsem);
 
-               ASSERT(cache->start == chunk_offset);
                ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
                                  dev_extent_len);
 
index 571dae8ad65e8cb92e9eebf4f6fa92e746c845c9..09e4f1a04e6fb5d882df1875af3f547de32ac3df 100644 (file)
@@ -3188,6 +3188,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                        ret = btrfs_alloc_log_tree_node(trans, log_root_tree);
                        if (ret) {
                                mutex_unlock(&fs_info->tree_root->log_mutex);
+                               blk_finish_plug(&plug);
                                goto out;
                        }
                }
index bd297f23d19e7f9fccb9326f64dbb7774ae7f396..f3e28f11cfb6e606ab4cdb14d8f45bb2352f85e8 100644 (file)
@@ -328,6 +328,9 @@ struct btrfs_fs_devices {
 struct btrfs_bio {
        unsigned int mirror_num;
 
+       /* for direct I/O */
+       u64 file_offset;
+
        /* @device is for stripe IO submission. */
        struct btrfs_device *device;
        u8 *csum;
index cbf016a7bb5dd393873d555a3d347b9e0fc46d18..6dee76248cb4da37c8a1efe01045e6f9ffeda8fb 100644 (file)
@@ -359,7 +359,7 @@ static inline void btrfs_zoned_data_reloc_lock(struct btrfs_inode *inode)
        struct btrfs_root *root = inode->root;
 
        if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info))
-               btrfs_inode_lock(&inode->vfs_inode, 0);
+               mutex_lock(&root->fs_info->zoned_data_reloc_io_lock);
 }
 
 static inline void btrfs_zoned_data_reloc_unlock(struct btrfs_inode *inode)
@@ -367,7 +367,7 @@ static inline void btrfs_zoned_data_reloc_unlock(struct btrfs_inode *inode)
        struct btrfs_root *root = inode->root;
 
        if (btrfs_is_data_reloc_root(root) && btrfs_is_zoned(root->fs_info))
-               btrfs_inode_unlock(&inode->vfs_inode, 0);
+               mutex_unlock(&root->fs_info->zoned_data_reloc_io_lock);
 }
 
 #endif
index 902e8c6c0f9c27890690515d3886fcb85a555026..42e14f408856d6e6d40f6ecaada3e350f893b4da 100644 (file)
@@ -534,12 +534,19 @@ int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
 {
        /* If tcp session is not an dfs connection, then reconnect to last target server */
        spin_lock(&cifs_tcp_ses_lock);
-       if (!server->is_dfs_conn || !server->origin_fullpath || !server->leaf_fullpath) {
+       if (!server->is_dfs_conn) {
                spin_unlock(&cifs_tcp_ses_lock);
                return __cifs_reconnect(server, mark_smb_session);
        }
        spin_unlock(&cifs_tcp_ses_lock);
 
+       mutex_lock(&server->refpath_lock);
+       if (!server->origin_fullpath || !server->leaf_fullpath) {
+               mutex_unlock(&server->refpath_lock);
+               return __cifs_reconnect(server, mark_smb_session);
+       }
+       mutex_unlock(&server->refpath_lock);
+
        return reconnect_dfs_server(server);
 }
 #else
@@ -3675,9 +3682,11 @@ static void setup_server_referral_paths(struct mount_ctx *mnt_ctx)
 {
        struct TCP_Server_Info *server = mnt_ctx->server;
 
+       mutex_lock(&server->refpath_lock);
        server->origin_fullpath = mnt_ctx->origin_fullpath;
        server->leaf_fullpath = mnt_ctx->leaf_fullpath;
        server->current_fullpath = mnt_ctx->leaf_fullpath;
+       mutex_unlock(&server->refpath_lock);
        mnt_ctx->origin_fullpath = mnt_ctx->leaf_fullpath = NULL;
 }
 
index 30e040da4f096b34480a0e47a85a4f1d2f318dde..956f8e5cf3e7423438b235a99711563c9ccbb205 100644 (file)
@@ -1422,12 +1422,14 @@ static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool
        struct TCP_Server_Info *server = tcon->ses->server;
 
        mutex_lock(&server->refpath_lock);
-       if (strcasecmp(server->leaf_fullpath, server->origin_fullpath))
-               __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, force_refresh);
+       if (server->origin_fullpath) {
+               if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
+                                                       server->origin_fullpath))
+                       __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, force_refresh);
+               __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, force_refresh);
+       }
        mutex_unlock(&server->refpath_lock);
 
-       __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, force_refresh);
-
        return 0;
 }
 
@@ -1530,11 +1532,14 @@ static void refresh_mounts(struct cifs_ses **sessions)
                list_del_init(&tcon->ulist);
 
                mutex_lock(&server->refpath_lock);
-               if (strcasecmp(server->leaf_fullpath, server->origin_fullpath))
-                       __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, false);
+               if (server->origin_fullpath) {
+                       if (server->leaf_fullpath && strcasecmp(server->leaf_fullpath,
+                                                               server->origin_fullpath))
+                               __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, false);
+                       __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, false);
+               }
                mutex_unlock(&server->refpath_lock);
 
-               __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, false);
                cifs_put_tcon(tcon);
        }
 }
index a67df8eaf7026186b6e9efa0413731ff3ae4d167..d6aaeff4a30a530864211b79fbda744b87d6b720 100644 (file)
@@ -1858,9 +1858,17 @@ smb2_copychunk_range(const unsigned int xid,
        int chunks_copied = 0;
        bool chunk_sizes_updated = false;
        ssize_t bytes_written, total_bytes_written = 0;
+       struct inode *inode;
 
        pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
 
+       /*
+        * We need to flush all unwritten data before we can send the
+        * copychunk ioctl to the server.
+        */
+       inode = d_inode(trgtfile->dentry);
+       filemap_write_and_wait(inode->i_mapping);
+
        if (pcchunk == NULL)
                return -ENOMEM;
 
index d9d1c353bafc7daa2d5d09a87227075aef64680a..c667e6ddfe2f7c52b4673a12295875f4d83ccb25 100644 (file)
@@ -464,13 +464,12 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
                return -EIO;
        }
 
-       tr_hdr = kmalloc(sizeof(*tr_hdr), GFP_NOFS);
+       tr_hdr = kzalloc(sizeof(*tr_hdr), GFP_NOFS);
        if (!tr_hdr)
                return -ENOMEM;
 
        memset(&cur_rqst[0], 0, sizeof(cur_rqst));
        memset(&iov, 0, sizeof(iov));
-       memset(tr_hdr, 0, sizeof(*tr_hdr));
 
        iov.iov_base = tr_hdr;
        iov.iov_len = sizeof(*tr_hdr);
index 3f87cca49f0ce31bd0dec08893399235b8289585..a743b1e3b89ec2d58e8f427ba345e55801a62a41 100644 (file)
@@ -2273,6 +2273,10 @@ static inline int ext4_forced_shutdown(struct ext4_sb_info *sbi)
  * Structure of a directory entry
  */
 #define EXT4_NAME_LEN 255
+/*
+ * Base length of the ext4 directory entry excluding the name length
+ */
+#define EXT4_BASE_DIR_LEN (sizeof(struct ext4_dir_entry_2) - EXT4_NAME_LEN)
 
 struct ext4_dir_entry {
        __le32  inode;                  /* Inode number */
@@ -3032,7 +3036,7 @@ extern int ext4_inode_attach_jinode(struct inode *inode);
 extern int ext4_can_truncate(struct inode *inode);
 extern int ext4_truncate(struct inode *);
 extern int ext4_break_layouts(struct inode *);
-extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
+extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
 extern void ext4_set_inode_flags(struct inode *, bool init);
 extern int ext4_alloc_da_blocks(struct inode *inode);
 extern void ext4_set_aops(struct inode *inode);
@@ -3064,6 +3068,7 @@ int ext4_fileattr_set(struct user_namespace *mnt_userns,
                      struct dentry *dentry, struct fileattr *fa);
 int ext4_fileattr_get(struct dentry *dentry, struct fileattr *fa);
 extern void ext4_reset_inode_seed(struct inode *inode);
+int ext4_update_overhead(struct super_block *sb);
 
 /* migrate.c */
 extern int ext4_ext_migrate(struct inode *);
index 0d98cf402282cbb5f361ef92a4a19598be348ade..e473fde6b64b4e80c32ed1fafbf5b6187a5ba3fa 100644 (file)
@@ -4500,9 +4500,9 @@ retry:
        return ret > 0 ? ret2 : ret;
 }
 
-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
+static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len);
 
-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len);
+static int ext4_insert_range(struct file *file, loff_t offset, loff_t len);
 
 static long ext4_zero_range(struct file *file, loff_t offset,
                            loff_t len, int mode)
@@ -4574,6 +4574,10 @@ static long ext4_zero_range(struct file *file, loff_t offset,
        /* Wait all existing dio workers, newcomers will block on i_rwsem */
        inode_dio_wait(inode);
 
+       ret = file_modified(file);
+       if (ret)
+               goto out_mutex;
+
        /* Preallocate the range including the unaligned edges */
        if (partial_begin || partial_end) {
                ret = ext4_alloc_file_blocks(file,
@@ -4690,7 +4694,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
                return -EOPNOTSUPP;
 
        if (mode & FALLOC_FL_PUNCH_HOLE) {
-               ret = ext4_punch_hole(inode, offset, len);
+               ret = ext4_punch_hole(file, offset, len);
                goto exit;
        }
 
@@ -4699,12 +4703,12 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
                goto exit;
 
        if (mode & FALLOC_FL_COLLAPSE_RANGE) {
-               ret = ext4_collapse_range(inode, offset, len);
+               ret = ext4_collapse_range(file, offset, len);
                goto exit;
        }
 
        if (mode & FALLOC_FL_INSERT_RANGE) {
-               ret = ext4_insert_range(inode, offset, len);
+               ret = ext4_insert_range(file, offset, len);
                goto exit;
        }
 
@@ -4740,6 +4744,10 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        /* Wait all existing dio workers, newcomers will block on i_rwsem */
        inode_dio_wait(inode);
 
+       ret = file_modified(file);
+       if (ret)
+               goto out;
+
        ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
        if (ret)
                goto out;
@@ -5241,8 +5249,9 @@ out:
  * This implements the fallocate's collapse range functionality for ext4
  * Returns: 0 and non-zero on error.
  */
-static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
+static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
 {
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        struct address_space *mapping = inode->i_mapping;
        ext4_lblk_t punch_start, punch_stop;
@@ -5294,6 +5303,10 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        /* Wait for existing dio to complete */
        inode_dio_wait(inode);
 
+       ret = file_modified(file);
+       if (ret)
+               goto out_mutex;
+
        /*
         * Prevent page faults from reinstantiating pages we have released from
         * page cache.
@@ -5387,8 +5400,9 @@ out_mutex:
  * by len bytes.
  * Returns 0 on success, error otherwise.
  */
-static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
+static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
 {
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        struct address_space *mapping = inode->i_mapping;
        handle_t *handle;
@@ -5445,6 +5459,10 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
        /* Wait for existing dio to complete */
        inode_dio_wait(inode);
 
+       ret = file_modified(file);
+       if (ret)
+               goto out_mutex;
+
        /*
         * Prevent page faults from reinstantiating pages we have released from
         * page cache.
index 13740f2d0e6109a88bae9f83c0f1ff08bad99bce..646ece9b3455ffc04007f330974e3f2284e01bc1 100644 (file)
@@ -3953,12 +3953,14 @@ int ext4_break_layouts(struct inode *inode)
  * Returns: 0 on success or negative on failure
  */
 
-int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
+int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
 {
+       struct inode *inode = file_inode(file);
        struct super_block *sb = inode->i_sb;
        ext4_lblk_t first_block, stop_block;
        struct address_space *mapping = inode->i_mapping;
-       loff_t first_block_offset, last_block_offset;
+       loff_t first_block_offset, last_block_offset, max_length;
+       struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        handle_t *handle;
        unsigned int credits;
        int ret = 0, ret2 = 0;
@@ -4001,6 +4003,14 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
                   offset;
        }
 
+       /*
+        * For punch hole the length + offset needs to be within one block
+        * before last range. Adjust the length if it goes beyond that limit.
+        */
+       max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
+       if (offset + length > max_length)
+               length = max_length - offset;
+
        if (offset & (sb->s_blocksize - 1) ||
            (offset + length) & (sb->s_blocksize - 1)) {
                /*
@@ -4016,6 +4026,10 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
        /* Wait all existing dio workers, newcomers will block on i_rwsem */
        inode_dio_wait(inode);
 
+       ret = file_modified(file);
+       if (ret)
+               goto out_mutex;
+
        /*
         * Prevent page faults from reinstantiating pages we have released from
         * page cache.
index 992229ca2d83036f9f388cd8b14874949949f086..ba44fa1be70aba8b8dfa30935ee2d9cae2af2e90 100644 (file)
@@ -1652,3 +1652,19 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        return ext4_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
 }
 #endif
+
+static void set_overhead(struct ext4_super_block *es, const void *arg)
+{
+       es->s_overhead_clusters = cpu_to_le32(*((unsigned long *) arg));
+}
+
+int ext4_update_overhead(struct super_block *sb)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+       if (sb_rdonly(sb) || sbi->s_overhead == 0 ||
+           sbi->s_overhead == le32_to_cpu(sbi->s_es->s_overhead_clusters))
+               return 0;
+
+       return ext4_update_superblocks_fn(sb, set_overhead, &sbi->s_overhead);
+}
index e37da8d5cd0c1ef779a5bcb92e9c93740801b743..767b4bfe39c38974fb90767ad2bbcf3d0638d345 100644 (file)
@@ -1466,10 +1466,10 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
 
        de = (struct ext4_dir_entry_2 *)search_buf;
        dlimit = search_buf + buf_size;
-       while ((char *) de < dlimit) {
+       while ((char *) de < dlimit - EXT4_BASE_DIR_LEN) {
                /* this code is executed quadratically often */
                /* do minimal checking `by hand' */
-               if ((char *) de + de->name_len <= dlimit &&
+               if (de->name + de->name_len <= dlimit &&
                    ext4_match(dir, fname, de)) {
                        /* found a match - just to be sure, do
                         * a full check */
index 495ce59fb4ad7781bc44bd8ca0ca5649a63915cd..14695e2b5042ba5e6dbb594ecff5238899c8d955 100644 (file)
@@ -134,8 +134,10 @@ static void ext4_finish_bio(struct bio *bio)
                                continue;
                        }
                        clear_buffer_async_write(bh);
-                       if (bio->bi_status)
+                       if (bio->bi_status) {
+                               set_buffer_write_io_error(bh);
                                buffer_io_error(bh);
+                       }
                } while ((bh = bh->b_this_page) != head);
                spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
                if (!under_io) {
index 81749eaddf4c1212ee3650a3bcf021fbcd532826..1466fbdbc8e345974b07c48c05aee1de79f0a1de 100644 (file)
@@ -1199,20 +1199,25 @@ static void ext4_put_super(struct super_block *sb)
        int aborted = 0;
        int i, err;
 
-       ext4_unregister_li_request(sb);
-       ext4_quota_off_umount(sb);
-
-       flush_work(&sbi->s_error_work);
-       destroy_workqueue(sbi->rsv_conversion_wq);
-       ext4_release_orphan_info(sb);
-
        /*
         * Unregister sysfs before destroying jbd2 journal.
         * Since we could still access attr_journal_task attribute via sysfs
         * path which could have sbi->s_journal->j_task as NULL
+        * Unregister sysfs before flush sbi->s_error_work.
+        * Since user may read /proc/fs/ext4/xx/mb_groups during umount, If
+        * read metadata verify failed then will queue error work.
+        * flush_stashed_error_work will call start_this_handle may trigger
+        * BUG_ON.
         */
        ext4_unregister_sysfs(sb);
 
+       ext4_unregister_li_request(sb);
+       ext4_quota_off_umount(sb);
+
+       flush_work(&sbi->s_error_work);
+       destroy_workqueue(sbi->rsv_conversion_wq);
+       ext4_release_orphan_info(sb);
+
        if (sbi->s_journal) {
                aborted = is_journal_aborted(sbi->s_journal);
                err = jbd2_journal_destroy(sbi->s_journal);
@@ -4172,9 +4177,11 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp,
        ext4_fsblk_t            first_block, last_block, b;
        ext4_group_t            i, ngroups = ext4_get_groups_count(sb);
        int                     s, j, count = 0;
+       int                     has_super = ext4_bg_has_super(sb, grp);
 
        if (!ext4_has_feature_bigalloc(sb))
-               return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) +
+               return (has_super + ext4_bg_num_gdb(sb, grp) +
+                       (has_super ? le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) : 0) +
                        sbi->s_itb_per_group + 2);
 
        first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
@@ -5282,9 +5289,18 @@ no_journal:
         * Get the # of file system overhead blocks from the
         * superblock if present.
         */
-       if (es->s_overhead_clusters)
-               sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
-       else {
+       sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
+       /* ignore the precalculated value if it is ridiculous */
+       if (sbi->s_overhead > ext4_blocks_count(es))
+               sbi->s_overhead = 0;
+       /*
+        * If the bigalloc feature is not enabled recalculating the
+        * overhead doesn't take long, so we might as well just redo
+        * it to make sure we are using the correct value.
+        */
+       if (!ext4_has_feature_bigalloc(sb))
+               sbi->s_overhead = 0;
+       if (sbi->s_overhead == 0) {
                err = ext4_calculate_overhead(sb);
                if (err)
                        goto failed_mount_wq;
@@ -5602,6 +5618,8 @@ static int ext4_fill_super(struct super_block *sb, struct fs_context *fc)
                ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
                         "Quota mode: %s.", descr, ext4_quota_mode(sb));
 
+       /* Update the s_overhead_clusters if necessary */
+       ext4_update_overhead(sb);
        return 0;
 
 free_sbi:
index f5366feea82dcfce0e33653e4001109b78755b7e..909085a78f9c3f0535d00e8c0bca06cf844d35fb 100644 (file)
@@ -98,9 +98,9 @@ repeat:
        }
 
        if (unlikely(!PageUptodate(page))) {
-               if (page->index == sbi->metapage_eio_ofs &&
-                       sbi->metapage_eio_cnt++ == MAX_RETRY_META_PAGE_EIO) {
-                       set_ckpt_flags(sbi, CP_ERROR_FLAG);
+               if (page->index == sbi->metapage_eio_ofs) {
+                       if (sbi->metapage_eio_cnt++ == MAX_RETRY_META_PAGE_EIO)
+                               set_ckpt_flags(sbi, CP_ERROR_FLAG);
                } else {
                        sbi->metapage_eio_ofs = page->index;
                        sbi->metapage_eio_cnt = 0;
index 8e0c2e773c8d92f460622673a5731e66554f19dc..9a1a526f20920bb7dd24fca86724c4c4ecb19fb5 100644 (file)
@@ -388,11 +388,23 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
        return 0;
 }
 
-static void __attach_io_flag(struct f2fs_io_info *fio, unsigned int io_flag)
+static unsigned int f2fs_io_flags(struct f2fs_io_info *fio)
 {
        unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
-       unsigned int fua_flag = io_flag & temp_mask;
-       unsigned int meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
+       unsigned int fua_flag, meta_flag, io_flag;
+       unsigned int op_flags = 0;
+
+       if (fio->op != REQ_OP_WRITE)
+               return 0;
+       if (fio->type == DATA)
+               io_flag = fio->sbi->data_io_flag;
+       else if (fio->type == NODE)
+               io_flag = fio->sbi->node_io_flag;
+       else
+               return 0;
+
+       fua_flag = io_flag & temp_mask;
+       meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
 
        /*
         * data/node io flag bits per temp:
@@ -401,9 +413,10 @@ static void __attach_io_flag(struct f2fs_io_info *fio, unsigned int io_flag)
         * Cold | Warm | Hot | Cold | Warm | Hot |
         */
        if ((1 << fio->temp) & meta_flag)
-               fio->op_flags |= REQ_META;
+               op_flags |= REQ_META;
        if ((1 << fio->temp) & fua_flag)
-               fio->op_flags |= REQ_FUA;
+               op_flags |= REQ_FUA;
+       return op_flags;
 }
 
 static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
@@ -413,14 +426,10 @@ static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
        sector_t sector;
        struct bio *bio;
 
-       if (fio->type == DATA)
-               __attach_io_flag(fio, sbi->data_io_flag);
-       else if (fio->type == NODE)
-               __attach_io_flag(fio, sbi->node_io_flag);
-
        bdev = f2fs_target_device(sbi, fio->new_blkaddr, &sector);
-       bio = bio_alloc_bioset(bdev, npages, fio->op | fio->op_flags, GFP_NOIO,
-                              &f2fs_bioset);
+       bio = bio_alloc_bioset(bdev, npages,
+                               fio->op | fio->op_flags | f2fs_io_flags(fio),
+                               GFP_NOIO, &f2fs_bioset);
        bio->bi_iter.bi_sector = sector;
        if (is_read_io(fio->op)) {
                bio->bi_end_io = f2fs_read_end_io;
index cd1e65bcf0b043d6e892f04522aa97556e8c62f9..8c570de21ed5aaa0b98212cb44b9f584c68ee052 100644 (file)
@@ -154,7 +154,6 @@ struct f2fs_mount_info {
        int s_jquota_fmt;                       /* Format of quota to use */
 #endif
        /* For which write hints are passed down to block layer */
-       int whint_mode;
        int alloc_mode;                 /* segment allocation policy */
        int fsync_mode;                 /* fsync policy */
        int fs_mode;                    /* fs mode: LFS or ADAPTIVE */
@@ -1333,12 +1332,6 @@ enum {
        FS_MODE_FRAGMENT_BLK,           /* block fragmentation mode */
 };
 
-enum {
-       WHINT_MODE_OFF,         /* not pass down write hints */
-       WHINT_MODE_USER,        /* try to pass down hints given by users */
-       WHINT_MODE_FS,          /* pass down hints with F2FS policy */
-};
-
 enum {
        ALLOC_MODE_DEFAULT,     /* stay default */
        ALLOC_MODE_REUSE,       /* reuse segments as much as possible */
@@ -3657,8 +3650,6 @@ void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
 int __init f2fs_create_segment_manager_caches(void);
 void f2fs_destroy_segment_manager_caches(void);
 int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
-enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
-                       enum page_type type, enum temp_type temp);
 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
                        unsigned int segno);
 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
index 71f232dcf3c20665a0dea421797f598a2d77a067..83639238a1fe9ed4225cba8642963b70d29b05ab 100644 (file)
@@ -550,7 +550,8 @@ make_now:
        }
        f2fs_set_inode_flags(inode);
 
-       if (file_should_truncate(inode)) {
+       if (file_should_truncate(inode) &&
+                       !is_sbi_flag_set(sbi, SBI_POR_DOING)) {
                ret = f2fs_truncate(inode);
                if (ret)
                        goto bad_inode;
index 22dfeb9915290120e9dbe190a231715d2bc51617..bd9731cdec565132d659dc31f4752e5c5fbcf5f2 100644 (file)
@@ -3243,101 +3243,6 @@ int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
        }
 }
 
-/* This returns write hints for each segment type. This hints will be
- * passed down to block layer. There are mapping tables which depend on
- * the mount option 'whint_mode'.
- *
- * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
- *
- * 2) whint_mode=user-based. F2FS tries to pass down hints given by users.
- *
- * User                  F2FS                     Block
- * ----                  ----                     -----
- *                       META                     WRITE_LIFE_NOT_SET
- *                       HOT_NODE                 "
- *                       WARM_NODE                "
- *                       COLD_NODE                "
- * ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
- * extension list        "                        "
- *
- * -- buffered io
- * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
- * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
- * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
- * WRITE_LIFE_NONE       "                        "
- * WRITE_LIFE_MEDIUM     "                        "
- * WRITE_LIFE_LONG       "                        "
- *
- * -- direct io
- * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
- * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
- * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
- * WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
- * WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
- * WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
- *
- * 3) whint_mode=fs-based. F2FS passes down hints with its policy.
- *
- * User                  F2FS                     Block
- * ----                  ----                     -----
- *                       META                     WRITE_LIFE_MEDIUM;
- *                       HOT_NODE                 WRITE_LIFE_NOT_SET
- *                       WARM_NODE                "
- *                       COLD_NODE                WRITE_LIFE_NONE
- * ioctl(COLD)           COLD_DATA                WRITE_LIFE_EXTREME
- * extension list        "                        "
- *
- * -- buffered io
- * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
- * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
- * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_LONG
- * WRITE_LIFE_NONE       "                        "
- * WRITE_LIFE_MEDIUM     "                        "
- * WRITE_LIFE_LONG       "                        "
- *
- * -- direct io
- * WRITE_LIFE_EXTREME    COLD_DATA                WRITE_LIFE_EXTREME
- * WRITE_LIFE_SHORT      HOT_DATA                 WRITE_LIFE_SHORT
- * WRITE_LIFE_NOT_SET    WARM_DATA                WRITE_LIFE_NOT_SET
- * WRITE_LIFE_NONE       "                        WRITE_LIFE_NONE
- * WRITE_LIFE_MEDIUM     "                        WRITE_LIFE_MEDIUM
- * WRITE_LIFE_LONG       "                        WRITE_LIFE_LONG
- */
-
-enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
-                               enum page_type type, enum temp_type temp)
-{
-       if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
-               if (type == DATA) {
-                       if (temp == WARM)
-                               return WRITE_LIFE_NOT_SET;
-                       else if (temp == HOT)
-                               return WRITE_LIFE_SHORT;
-                       else if (temp == COLD)
-                               return WRITE_LIFE_EXTREME;
-               } else {
-                       return WRITE_LIFE_NOT_SET;
-               }
-       } else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) {
-               if (type == DATA) {
-                       if (temp == WARM)
-                               return WRITE_LIFE_LONG;
-                       else if (temp == HOT)
-                               return WRITE_LIFE_SHORT;
-                       else if (temp == COLD)
-                               return WRITE_LIFE_EXTREME;
-               } else if (type == NODE) {
-                       if (temp == WARM || temp == HOT)
-                               return WRITE_LIFE_NOT_SET;
-                       else if (temp == COLD)
-                               return WRITE_LIFE_NONE;
-               } else if (type == META) {
-                       return WRITE_LIFE_MEDIUM;
-               }
-       }
-       return WRITE_LIFE_NOT_SET;
-}
-
 static int __get_segment_type_2(struct f2fs_io_info *fio)
 {
        if (fio->type == DATA)
index ea939db18f88da34f01b8a79dafc0c881f202e23..4368f90571bd6194a992ba0bed1b7a109dcee597 100644 (file)
@@ -138,7 +138,6 @@ enum {
        Opt_jqfmt_vfsold,
        Opt_jqfmt_vfsv0,
        Opt_jqfmt_vfsv1,
-       Opt_whint,
        Opt_alloc,
        Opt_fsync,
        Opt_test_dummy_encryption,
@@ -214,7 +213,6 @@ static match_table_t f2fs_tokens = {
        {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
        {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
        {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
-       {Opt_whint, "whint_mode=%s"},
        {Opt_alloc, "alloc_mode=%s"},
        {Opt_fsync, "fsync_mode=%s"},
        {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
@@ -975,22 +973,6 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
                        f2fs_info(sbi, "quota operations not supported");
                        break;
 #endif
-               case Opt_whint:
-                       name = match_strdup(&args[0]);
-                       if (!name)
-                               return -ENOMEM;
-                       if (!strcmp(name, "user-based")) {
-                               F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
-                       } else if (!strcmp(name, "off")) {
-                               F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
-                       } else if (!strcmp(name, "fs-based")) {
-                               F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
-                       } else {
-                               kfree(name);
-                               return -EINVAL;
-                       }
-                       kfree(name);
-                       break;
                case Opt_alloc:
                        name = match_strdup(&args[0]);
                        if (!name)
@@ -1328,12 +1310,6 @@ default_check:
                return -EINVAL;
        }
 
-       /* Not pass down write hints if the number of active logs is lesser
-        * than NR_CURSEG_PERSIST_TYPE.
-        */
-       if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_PERSIST_TYPE)
-               F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
-
        if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
                f2fs_err(sbi, "Allow to mount readonly mode only");
                return -EROFS;
@@ -1978,10 +1954,6 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
                seq_puts(seq, ",prjquota");
 #endif
        f2fs_show_quota_options(seq, sbi->sb);
-       if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
-               seq_printf(seq, ",whint_mode=%s", "user-based");
-       else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
-               seq_printf(seq, ",whint_mode=%s", "fs-based");
 
        fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
 
@@ -2033,7 +2005,6 @@ static void default_options(struct f2fs_sb_info *sbi)
                F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
 
        F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
-       F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
        F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
        F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
        F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
@@ -2314,8 +2285,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
                need_stop_gc = true;
        }
 
-       if (*flags & SB_RDONLY ||
-               F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
+       if (*flags & SB_RDONLY) {
                sync_inodes_sb(sb);
 
                set_sbi_flag(sbi, SBI_IS_DIRTY);
index 22b41acfbbc39a008dddb61d2b66fee883cb06c2..48f01323c37c1b2d7e5bfb5b122569d64bc592d2 100644 (file)
@@ -899,10 +899,10 @@ retry:
        ret = gfs2_glock_nq(gh);
        if (ret)
                goto out_uninit;
-retry_under_glock:
        /* Silently fall back to buffered I/O when writing beyond EOF */
        if (iocb->ki_pos + iov_iter_count(from) > i_size_read(&ip->i_inode))
                goto out;
+retry_under_glock:
 
        from->nofault = true;
        ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
@@ -991,8 +991,6 @@ retry_under_glock:
                if (leftover != window_size) {
                        if (gfs2_holder_queued(&gh))
                                goto retry_under_glock;
-                       if (written)
-                               goto out_uninit;
                        goto retry;
                }
        }
@@ -1069,8 +1067,6 @@ retry_under_glock:
                        from->count = min(from->count, window_size - leftover);
                        if (gfs2_holder_queued(gh))
                                goto retry_under_glock;
-                       if (read && !(iocb->ki_flags & IOCB_DIRECT))
-                               goto out_uninit;
                        goto retry;
                }
        }
index 99c7477cee5c2b003934a86c4bec9d4a75fa08d9..dd3a088db11d1e9b501f087bb3e87426b666312d 100644 (file)
@@ -206,7 +206,7 @@ hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
        info.flags = 0;
        info.length = len;
        info.low_limit = current->mm->mmap_base;
-       info.high_limit = TASK_SIZE;
+       info.high_limit = arch_get_mmap_end(addr);
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
        info.align_offset = 0;
        return vm_unmapped_area(&info);
@@ -222,7 +222,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
        info.flags = VM_UNMAPPED_AREA_TOPDOWN;
        info.length = len;
        info.low_limit = max(PAGE_SIZE, mmap_min_addr);
-       info.high_limit = current->mm->mmap_base;
+       info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
        info.align_offset = 0;
        addr = vm_unmapped_area(&info);
@@ -237,7 +237,7 @@ hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
                VM_BUG_ON(addr != -ENOMEM);
                info.flags = 0;
                info.low_limit = current->mm->mmap_base;
-               info.high_limit = TASK_SIZE;
+               info.high_limit = arch_get_mmap_end(addr);
                addr = vm_unmapped_area(&info);
        }
 
@@ -251,6 +251,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        struct hstate *h = hstate_file(file);
+       const unsigned long mmap_end = arch_get_mmap_end(addr);
 
        if (len & ~huge_page_mask(h))
                return -EINVAL;
@@ -266,7 +267,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
        if (addr) {
                addr = ALIGN(addr, huge_page_size(h));
                vma = find_vma(mm, addr);
-               if (TASK_SIZE - len >= addr &&
+               if (mmap_end - len >= addr &&
                    (!vma || addr + len <= vm_start_gap(vma)))
                        return addr;
        }
index 4479013854d200c689b5864ce3b46d9170a6042a..7625b29153b923ad757bd41265876f937b6e7a38 100644 (file)
@@ -2797,11 +2797,10 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
                /* order with io_complete_rw_iopoll(), e.g. ->result updates */
                if (!smp_load_acquire(&req->iopoll_completed))
                        break;
+               nr_events++;
                if (unlikely(req->flags & REQ_F_CQE_SKIP))
                        continue;
-
                __io_fill_cqe_req(req, req->result, io_put_kbuf(req, 0));
-               nr_events++;
        }
 
        if (unlikely(!nr_events))
@@ -3832,8 +3831,10 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                iovec = NULL;
        }
        ret = io_rw_init_file(req, FMODE_READ);
-       if (unlikely(ret))
+       if (unlikely(ret)) {
+               kfree(iovec);
                return ret;
+       }
        req->result = iov_iter_count(&s->iter);
 
        if (force_nonblock) {
@@ -3958,8 +3959,10 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
                iovec = NULL;
        }
        ret = io_rw_init_file(req, FMODE_WRITE);
-       if (unlikely(ret))
+       if (unlikely(ret)) {
+               kfree(iovec);
                return ret;
+       }
        req->result = iov_iter_count(&s->iter);
 
        if (force_nonblock) {
index 5b9408e3b370d00d995e82934ad9eb96719c9b8b..ac7f067b7bddb794bc40344058602162eeda3739 100644 (file)
@@ -488,7 +488,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        jbd2_journal_wait_updates(journal);
 
        commit_transaction->t_state = T_SWITCH;
-       write_unlock(&journal->j_state_lock);
 
        J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
                        journal->j_max_transaction_buffers);
@@ -508,6 +507,8 @@ void jbd2_journal_commit_transaction(journal_t *journal)
         * has reserved.  This is consistent with the existing behaviour
         * that multiple jbd2_journal_get_write_access() calls to the same
         * buffer are perfectly permissible.
+        * We use journal->j_state_lock here to serialize processing of
+        * t_reserved_list with eviction of buffers from journal_unmap_buffer().
         */
        while (commit_transaction->t_reserved_list) {
                jh = commit_transaction->t_reserved_list;
@@ -527,6 +528,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
                jbd2_journal_refile_buffer(journal, jh);
        }
 
+       write_unlock(&journal->j_state_lock);
        /*
         * Now try to drop any written-back buffers from the journal's
         * checkpoint lists.  We do this *before* commit because it potentially
index 60e7ac62c9172f5cac831b677f184f045a4a0245..1e2076a53bed59ee08f0d7b9cb0ccfd8c2bea56e 100644 (file)
@@ -158,19 +158,41 @@ out:
  * Return : windows path string or error
  */
 
-char *convert_to_nt_pathname(char *filename)
+char *convert_to_nt_pathname(struct ksmbd_share_config *share,
+                            struct path *path)
 {
-       char *ab_pathname;
+       char *pathname, *ab_pathname, *nt_pathname;
+       int share_path_len = share->path_sz;
 
-       if (strlen(filename) == 0)
-               filename = "\\";
+       pathname = kmalloc(PATH_MAX, GFP_KERNEL);
+       if (!pathname)
+               return ERR_PTR(-EACCES);
 
-       ab_pathname = kstrdup(filename, GFP_KERNEL);
-       if (!ab_pathname)
-               return NULL;
+       ab_pathname = d_path(path, pathname, PATH_MAX);
+       if (IS_ERR(ab_pathname)) {
+               nt_pathname = ERR_PTR(-EACCES);
+               goto free_pathname;
+       }
+
+       if (strncmp(ab_pathname, share->path, share_path_len)) {
+               nt_pathname = ERR_PTR(-EACCES);
+               goto free_pathname;
+       }
+
+       nt_pathname = kzalloc(strlen(&ab_pathname[share_path_len]) + 2, GFP_KERNEL);
+       if (!nt_pathname) {
+               nt_pathname = ERR_PTR(-ENOMEM);
+               goto free_pathname;
+       }
+       if (ab_pathname[share_path_len] == '\0')
+               strcpy(nt_pathname, "/");
+       strcat(nt_pathname, &ab_pathname[share_path_len]);
+
+       ksmbd_conv_path_to_windows(nt_pathname);
 
-       ksmbd_conv_path_to_windows(ab_pathname);
-       return ab_pathname;
+free_pathname:
+       kfree(pathname);
+       return nt_pathname;
 }
 
 int get_nlink(struct kstat *st)
index 253366bd0951aa987d91c69143acc9a5d34efaa3..aae2a252945f871d80b1db861d711871fd05c860 100644 (file)
@@ -14,7 +14,8 @@ struct ksmbd_file;
 int match_pattern(const char *str, size_t len, const char *pattern);
 int ksmbd_validate_filename(char *filename);
 int parse_stream_name(char *filename, char **stream_name, int *s_type);
-char *convert_to_nt_pathname(char *filename);
+char *convert_to_nt_pathname(struct ksmbd_share_config *share,
+                            struct path *path);
 int get_nlink(struct kstat *st);
 void ksmbd_conv_path_to_unix(char *path);
 void ksmbd_strip_last_slash(char *path);
index 23871b18a4292275a9cf9f6ffde22307f249f6d5..8b5560574d4c59eb40b3d91c68c9e6473efd4f27 100644 (file)
@@ -1694,33 +1694,3 @@ out:
        read_unlock(&lease_list_lock);
        return ret_op;
 }
-
-int smb2_check_durable_oplock(struct ksmbd_file *fp,
-                             struct lease_ctx_info *lctx, char *name)
-{
-       struct oplock_info *opinfo = opinfo_get(fp);
-       int ret = 0;
-
-       if (opinfo && opinfo->is_lease) {
-               if (!lctx) {
-                       pr_err("open does not include lease\n");
-                       ret = -EBADF;
-                       goto out;
-               }
-               if (memcmp(opinfo->o_lease->lease_key, lctx->lease_key,
-                          SMB2_LEASE_KEY_SIZE)) {
-                       pr_err("invalid lease key\n");
-                       ret = -EBADF;
-                       goto out;
-               }
-               if (name && strcmp(fp->filename, name)) {
-                       pr_err("invalid name reconnect %s\n", name);
-                       ret = -EINVAL;
-                       goto out;
-               }
-       }
-out:
-       if (opinfo)
-               opinfo_put(opinfo);
-       return ret;
-}
index 0cf7a2b5bbc0657c44d49f52d0f92f638fb545e7..09753448f7798de7ff5af0bc1aa38096861d05b5 100644 (file)
@@ -124,6 +124,4 @@ struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
 int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
                        struct lease_ctx_info *lctx);
 void destroy_lease_table(struct ksmbd_conn *conn);
-int smb2_check_durable_oplock(struct ksmbd_file *fp,
-                             struct lease_ctx_info *lctx, char *name);
 #endif /* __KSMBD_OPLOCK_H */
index 3bf6c56c654cfbe806fd0ec634bca17f12bcb717..16c803a9d996f3f429b14f273f391d07576b2890 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/statfs.h>
 #include <linux/ethtool.h>
 #include <linux/falloc.h>
+#include <linux/mount.h>
 
 #include "glob.h"
 #include "smbfsctl.h"
@@ -2918,7 +2919,6 @@ int smb2_open(struct ksmbd_work *work)
                goto err_out;
        }
 
-       fp->filename = name;
        fp->cdoption = req->CreateDisposition;
        fp->daccess = daccess;
        fp->saccess = req->ShareAccess;
@@ -3270,14 +3270,13 @@ err_out1:
                if (!rsp->hdr.Status)
                        rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
 
-               if (!fp || !fp->filename)
-                       kfree(name);
                if (fp)
                        ksmbd_fd_put(work, fp);
                smb2_set_err_rsp(work);
                ksmbd_debug(SMB, "Error response: %x\n", rsp->hdr.Status);
        }
 
+       kfree(name);
        kfree(lc);
 
        return 0;
@@ -3895,8 +3894,6 @@ int smb2_query_dir(struct ksmbd_work *work)
                ksmbd_debug(SMB, "Search pattern is %s\n", srch_ptr);
        }
 
-       ksmbd_debug(SMB, "Directory name is %s\n", dir_fp->filename);
-
        if (srch_flag & SMB2_REOPEN || srch_flag & SMB2_RESTART_SCANS) {
                ksmbd_debug(SMB, "Restart directory scan\n");
                generic_file_llseek(dir_fp->filp, 0, SEEK_SET);
@@ -4390,9 +4387,9 @@ static int get_file_all_info(struct ksmbd_work *work,
                return -EACCES;
        }
 
-       filename = convert_to_nt_pathname(fp->filename);
-       if (!filename)
-               return -ENOMEM;
+       filename = convert_to_nt_pathname(work->tcon->share_conf, &fp->filp->f_path);
+       if (IS_ERR(filename))
+               return PTR_ERR(filename);
 
        inode = file_inode(fp->filp);
        generic_fillattr(file_mnt_user_ns(fp->filp), inode, &stat);
@@ -4999,15 +4996,17 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
        case FS_SECTOR_SIZE_INFORMATION:
        {
                struct smb3_fs_ss_info *info;
+               unsigned int sector_size =
+                       min_t(unsigned int, path.mnt->mnt_sb->s_blocksize, 4096);
 
                info = (struct smb3_fs_ss_info *)(rsp->Buffer);
 
-               info->LogicalBytesPerSector = cpu_to_le32(stfs.f_bsize);
+               info->LogicalBytesPerSector = cpu_to_le32(sector_size);
                info->PhysicalBytesPerSectorForAtomicity =
-                               cpu_to_le32(stfs.f_bsize);
-               info->PhysicalBytesPerSectorForPerf = cpu_to_le32(stfs.f_bsize);
+                               cpu_to_le32(sector_size);
+               info->PhysicalBytesPerSectorForPerf = cpu_to_le32(sector_size);
                info->FSEffPhysicalBytesPerSectorForAtomicity =
-                               cpu_to_le32(stfs.f_bsize);
+                               cpu_to_le32(sector_size);
                info->Flags = cpu_to_le32(SSINFO_FLAGS_ALIGNED_DEVICE |
                                    SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE);
                info->ByteOffsetForSectorAlignment = 0;
@@ -5683,8 +5682,7 @@ static int set_file_allocation_info(struct ksmbd_work *work,
                size = i_size_read(inode);
                rc = ksmbd_vfs_truncate(work, fp, alloc_blks * 512);
                if (rc) {
-                       pr_err("truncate failed! filename : %s, err %d\n",
-                              fp->filename, rc);
+                       pr_err("truncate failed!, err %d\n", rc);
                        return rc;
                }
                if (size < alloc_blks * 512)
@@ -5714,12 +5712,10 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
         * truncated range.
         */
        if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC) {
-               ksmbd_debug(SMB, "filename : %s truncated to newsize %lld\n",
-                           fp->filename, newsize);
+               ksmbd_debug(SMB, "truncated to newsize %lld\n", newsize);
                rc = ksmbd_vfs_truncate(work, fp, newsize);
                if (rc) {
-                       ksmbd_debug(SMB, "truncate failed! filename : %s err %d\n",
-                                   fp->filename, rc);
+                       ksmbd_debug(SMB, "truncate failed!, err %d\n", rc);
                        if (rc != -EAGAIN)
                                rc = -EBADF;
                        return rc;
@@ -5765,8 +5761,10 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
        if (parent_fp) {
                if (parent_fp->daccess & FILE_DELETE_LE) {
                        pr_err("parent dir is opened with delete access\n");
+                       ksmbd_fd_put(work, parent_fp);
                        return -ESHARE;
                }
+               ksmbd_fd_put(work, parent_fp);
        }
 next:
        return smb2_rename(work, fp, user_ns, rename_info,
index 9cebb6ba555b6d69841d1ae2eb4beb3eb2d9c300..dcdd07c6efffd58250dd37868de3a7acbb2a5476 100644 (file)
@@ -398,8 +398,7 @@ int ksmbd_vfs_read(struct ksmbd_work *work, struct ksmbd_file *fp, size_t count,
 
        nbytes = kernel_read(filp, rbuf, count, pos);
        if (nbytes < 0) {
-               pr_err("smb read failed for (%s), err = %zd\n",
-                      fp->filename, nbytes);
+               pr_err("smb read failed, err = %zd\n", nbytes);
                return nbytes;
        }
 
@@ -875,8 +874,7 @@ int ksmbd_vfs_truncate(struct ksmbd_work *work,
 
        err = vfs_truncate(&filp->f_path, size);
        if (err)
-               pr_err("truncate failed for filename : %s err %d\n",
-                      fp->filename, err);
+               pr_err("truncate failed, err %d\n", err);
        return err;
 }
 
index 29c1db66bd0f73844775fe4c3c4966a19e2e9547..c4d59d2735f00fd490efe3e6fe36b635cfdebee8 100644 (file)
@@ -328,7 +328,6 @@ static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
                kfree(smb_lock);
        }
 
-       kfree(fp->filename);
        if (ksmbd_stream_fd(fp))
                kfree(fp->stream.name);
        kmem_cache_free(filp_cache, fp);
@@ -497,6 +496,7 @@ struct ksmbd_file *ksmbd_lookup_fd_inode(struct inode *inode)
        list_for_each_entry(lfp, &ci->m_fp_list, node) {
                if (inode == file_inode(lfp->filp)) {
                        atomic_dec(&ci->m_count);
+                       lfp = ksmbd_fp_get(lfp);
                        read_unlock(&ci->m_lock);
                        return lfp;
                }
index 36239ce31afd5af201befe17693f04cbb94106ac..fcb13413fa8d9366c12e5a0d506412d3e8159060 100644 (file)
@@ -62,7 +62,6 @@ struct ksmbd_inode {
 
 struct ksmbd_file {
        struct file                     *filp;
-       char                            *filename;
        u64                             persistent_id;
        u64                             volatile_id;
 
index a0a36bfa3aa0543b75da0c6784dbacb50f53d805..afe2b64b14f1fa8e8bd9fa1dc1b2726f2df87faf 100644 (file)
@@ -4058,10 +4058,22 @@ static int mount_setattr_prepare(struct mount_kattr *kattr, struct mount *mnt)
        if (err) {
                struct mount *p;
 
-               for (p = mnt; p != m; p = next_mnt(p, mnt)) {
+               /*
+                * If we had to call mnt_hold_writers() MNT_WRITE_HOLD will
+                * be set in @mnt_flags. The loop unsets MNT_WRITE_HOLD for all
+                * mounts and needs to take care to include the first mount.
+                */
+               for (p = mnt; p; p = next_mnt(p, mnt)) {
                        /* If we had to hold writers unblock them. */
                        if (p->mnt.mnt_flags & MNT_WRITE_HOLD)
                                mnt_unhold_writers(p);
+
+                       /*
+                        * We're done once the first mount we changed got
+                        * MNT_WRITE_HOLD unset.
+                        */
+                       if (p == m)
+                               break;
                }
        }
        return err;
index e1afb9e503e1679d562e0c17cc28bda04eefa45a..bf4e60871068205eb30a4909654717782d6bbe84 100644 (file)
@@ -406,7 +406,7 @@ xfs_buf_alloc_pages(
 STATIC int
 _xfs_buf_map_pages(
        struct xfs_buf          *bp,
-       uint                    flags)
+       xfs_buf_flags_t         flags)
 {
        ASSERT(bp->b_flags & _XBF_PAGES);
        if (bp->b_page_count == 1) {
@@ -868,7 +868,7 @@ xfs_buf_read_uncached(
        struct xfs_buftarg      *target,
        xfs_daddr_t             daddr,
        size_t                  numblks,
-       int                     flags,
+       xfs_buf_flags_t         flags,
        struct xfs_buf          **bpp,
        const struct xfs_buf_ops *ops)
 {
@@ -903,7 +903,7 @@ int
 xfs_buf_get_uncached(
        struct xfs_buftarg      *target,
        size_t                  numblks,
-       int                     flags,
+       xfs_buf_flags_t         flags,
        struct xfs_buf          **bpp)
 {
        int                     error;
index edcb6254fa6a87b3d7dbd88413670075dab0a19d..1ee3056ff9cfe9d476da9d5fe74a54e9f9d4d056 100644 (file)
@@ -22,28 +22,28 @@ struct xfs_buf;
 
 #define XFS_BUF_DADDR_NULL     ((xfs_daddr_t) (-1LL))
 
-#define XBF_READ        (1 << 0) /* buffer intended for reading from device */
-#define XBF_WRITE       (1 << 1) /* buffer intended for writing to device */
-#define XBF_READ_AHEAD  (1 << 2) /* asynchronous read-ahead */
-#define XBF_NO_IOACCT   (1 << 3) /* bypass I/O accounting (non-LRU bufs) */
-#define XBF_ASYNC       (1 << 4) /* initiator will not wait for completion */
-#define XBF_DONE        (1 << 5) /* all pages in the buffer uptodate */
-#define XBF_STALE       (1 << 6) /* buffer has been staled, do not find it */
-#define XBF_WRITE_FAIL  (1 << 7) /* async writes have failed on this buffer */
+#define XBF_READ        (1u << 0) /* buffer intended for reading from device */
+#define XBF_WRITE       (1u << 1) /* buffer intended for writing to device */
+#define XBF_READ_AHEAD  (1u << 2) /* asynchronous read-ahead */
+#define XBF_NO_IOACCT   (1u << 3) /* bypass I/O accounting (non-LRU bufs) */
+#define XBF_ASYNC       (1u << 4) /* initiator will not wait for completion */
+#define XBF_DONE        (1u << 5) /* all pages in the buffer uptodate */
+#define XBF_STALE       (1u << 6) /* buffer has been staled, do not find it */
+#define XBF_WRITE_FAIL  (1u << 7) /* async writes have failed on this buffer */
 
 /* buffer type flags for write callbacks */
-#define _XBF_INODES     (1 << 16)/* inode buffer */
-#define _XBF_DQUOTS     (1 << 17)/* dquot buffer */
-#define _XBF_LOGRECOVERY        (1 << 18)/* log recovery buffer */
+#define _XBF_INODES     (1u << 16)/* inode buffer */
+#define _XBF_DQUOTS     (1u << 17)/* dquot buffer */
+#define _XBF_LOGRECOVERY (1u << 18)/* log recovery buffer */
 
 /* flags used only internally */
-#define _XBF_PAGES      (1 << 20)/* backed by refcounted pages */
-#define _XBF_KMEM       (1 << 21)/* backed by heap memory */
-#define _XBF_DELWRI_Q   (1 << 22)/* buffer on a delwri queue */
+#define _XBF_PAGES      (1u << 20)/* backed by refcounted pages */
+#define _XBF_KMEM       (1u << 21)/* backed by heap memory */
+#define _XBF_DELWRI_Q   (1u << 22)/* buffer on a delwri queue */
 
 /* flags used only as arguments to access routines */
-#define XBF_TRYLOCK     (1 << 30)/* lock requested, but do not wait */
-#define XBF_UNMAPPED    (1 << 31)/* do not map the buffer */
+#define XBF_TRYLOCK     (1u << 30)/* lock requested, but do not wait */
+#define XBF_UNMAPPED    (1u << 31)/* do not map the buffer */
 
 typedef unsigned int xfs_buf_flags_t;
 
@@ -58,7 +58,7 @@ typedef unsigned int xfs_buf_flags_t;
        { XBF_WRITE_FAIL,       "WRITE_FAIL" }, \
        { _XBF_INODES,          "INODES" }, \
        { _XBF_DQUOTS,          "DQUOTS" }, \
-       { _XBF_LOGRECOVERY,             "LOG_RECOVERY" }, \
+       { _XBF_LOGRECOVERY,     "LOG_RECOVERY" }, \
        { _XBF_PAGES,           "PAGES" }, \
        { _XBF_KMEM,            "KMEM" }, \
        { _XBF_DELWRI_Q,        "DELWRI_Q" }, \
@@ -247,11 +247,11 @@ xfs_buf_readahead(
        return xfs_buf_readahead_map(target, &map, 1, ops);
 }
 
-int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, int flags,
-               struct xfs_buf **bpp);
+int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
+               xfs_buf_flags_t flags, struct xfs_buf **bpp);
 int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
-                         size_t numblks, int flags, struct xfs_buf **bpp,
-                         const struct xfs_buf_ops *ops);
+               size_t numblks, xfs_buf_flags_t flags, struct xfs_buf **bpp,
+               const struct xfs_buf_ops *ops);
 int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags);
 void xfs_buf_hold(struct xfs_buf *bp);
 
index 9de6205fe134a926838384e1b5a952cbc8ce63f2..39ae53efb3ab6c6c68ce342c2b5137f988e82d5c 100644 (file)
@@ -2594,14 +2594,13 @@ xfs_ifree_cluster(
 }
 
 /*
- * This is called to return an inode to the inode free list.
- * The inode should already be truncated to 0 length and have
- * no pages associated with it.  This routine also assumes that
- * the inode is already a part of the transaction.
+ * This is called to return an inode to the inode free list.  The inode should
+ * already be truncated to 0 length and have no pages associated with it.  This
+ * routine also assumes that the inode is already a part of the transaction.
  *
- * The on-disk copy of the inode will have been added to the list
- * of unlinked inodes in the AGI. We need to remove the inode from
- * that list atomically with respect to freeing it here.
+ * The on-disk copy of the inode will have been added to the list of unlinked
+ * inodes in the AGI. We need to remove the inode from that list atomically with
+ * respect to freeing it here.
  */
 int
 xfs_ifree(
@@ -2623,13 +2622,16 @@ xfs_ifree(
        pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
 
        /*
-        * Pull the on-disk inode from the AGI unlinked list.
+        * Free the inode first so that we guarantee that the AGI lock is going
+        * to be taken before we remove the inode from the unlinked list. This
+        * makes the AGI lock -> unlinked list modification order the same as
+        * used in O_TMPFILE creation.
         */
-       error = xfs_iunlink_remove(tp, pag, ip);
+       error = xfs_difree(tp, pag, ip->i_ino, &xic);
        if (error)
-               goto out;
+               return error;
 
-       error = xfs_difree(tp, pag, ip->i_ino, &xic);
+       error = xfs_iunlink_remove(tp, pag, ip);
        if (error)
                goto out;
 
index de177842b951cafd27dd22e166b1bd23a200d4de..0c82673238f4d81377fc999f187b35cd5cbb615b 100644 (file)
@@ -175,7 +175,7 @@ xfs_trans_get_buf(
        struct xfs_buftarg      *target,
        xfs_daddr_t             blkno,
        int                     numblks,
-       uint                    flags,
+       xfs_buf_flags_t         flags,
        struct xfs_buf          **bpp)
 {
        DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
index 3614c7834007dd0ea587f01778ee7c7b0460a4e6..e20e7c8414896c0fde07647c5065799a927ab03c 100644 (file)
@@ -35,6 +35,17 @@ static inline int zonefs_zone_mgmt(struct inode *inode,
 
        lockdep_assert_held(&zi->i_truncate_mutex);
 
+       /*
+        * With ZNS drives, closing an explicitly open zone that has not been
+        * written will change the zone state to "closed", that is, the zone
+        * will remain active. Since this can then cause failure of explicit
+        * open operation on other zones if the drive active zone resources
+        * are exceeded, make sure that the zone does not remain active by
+        * resetting it.
+        */
+       if (op == REQ_OP_ZONE_CLOSE && !zi->i_wpoffset)
+               op = REQ_OP_ZONE_RESET;
+
        trace_zonefs_zone_mgmt(inode, op);
        ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector,
                               zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS);
@@ -1142,6 +1153,7 @@ static struct inode *zonefs_alloc_inode(struct super_block *sb)
        inode_init_once(&zi->i_vnode);
        mutex_init(&zi->i_truncate_mutex);
        zi->i_wr_refcnt = 0;
+       zi->i_flags = 0;
 
        return &zi->i_vnode;
 }
@@ -1293,12 +1305,13 @@ static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode,
        inc_nlink(parent);
 }
 
-static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
-                                  enum zonefs_ztype type)
+static int zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
+                                 enum zonefs_ztype type)
 {
        struct super_block *sb = inode->i_sb;
        struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
        struct zonefs_inode_info *zi = ZONEFS_I(inode);
+       int ret = 0;
 
        inode->i_ino = zone->start >> sbi->s_zone_sectors_shift;
        inode->i_mode = S_IFREG | sbi->s_perm;
@@ -1323,6 +1336,22 @@ static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
        sb->s_maxbytes = max(zi->i_max_size, sb->s_maxbytes);
        sbi->s_blocks += zi->i_max_size >> sb->s_blocksize_bits;
        sbi->s_used_blocks += zi->i_wpoffset >> sb->s_blocksize_bits;
+
+       /*
+        * For sequential zones, make sure that any open zone is closed first
+        * to ensure that the initial number of open zones is 0, in sync with
+        * the open zone accounting done when the mount option
+        * ZONEFS_MNTOPT_EXPLICIT_OPEN is used.
+        */
+       if (type == ZONEFS_ZTYPE_SEQ &&
+           (zone->cond == BLK_ZONE_COND_IMP_OPEN ||
+            zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
+               mutex_lock(&zi->i_truncate_mutex);
+               ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
+               mutex_unlock(&zi->i_truncate_mutex);
+       }
+
+       return ret;
 }
 
 static struct dentry *zonefs_create_inode(struct dentry *parent,
@@ -1332,6 +1361,7 @@ static struct dentry *zonefs_create_inode(struct dentry *parent,
        struct inode *dir = d_inode(parent);
        struct dentry *dentry;
        struct inode *inode;
+       int ret;
 
        dentry = d_alloc_name(parent, name);
        if (!dentry)
@@ -1342,10 +1372,16 @@ static struct dentry *zonefs_create_inode(struct dentry *parent,
                goto dput;
 
        inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime;
-       if (zone)
-               zonefs_init_file_inode(inode, zone, type);
-       else
+       if (zone) {
+               ret = zonefs_init_file_inode(inode, zone, type);
+               if (ret) {
+                       iput(inode);
+                       goto dput;
+               }
+       } else {
                zonefs_init_dir_inode(dir, inode, type);
+       }
+
        d_add(dentry, inode);
        dir->i_size++;
 
diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h
deleted file mode 100644 (file)
index 19fa0b5..0000000
+++ /dev/null
@@ -1,266 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Pointer to dma-buf-mapped memory, plus helpers.
- */
-
-#ifndef __DMA_BUF_MAP_H__
-#define __DMA_BUF_MAP_H__
-
-#include <linux/io.h>
-#include <linux/string.h>
-
-/**
- * DOC: overview
- *
- * Calling dma-buf's vmap operation returns a pointer to the buffer's memory.
- * Depending on the location of the buffer, users may have to access it with
- * I/O operations or memory load/store operations. For example, copying to
- * system memory could be done with memcpy(), copying to I/O memory would be
- * done with memcpy_toio().
- *
- * .. code-block:: c
- *
- *     void *vaddr = ...; // pointer to system memory
- *     memcpy(vaddr, src, len);
- *
- *     void *vaddr_iomem = ...; // pointer to I/O memory
- *     memcpy_toio(vaddr, _iomem, src, len);
- *
- * When using dma-buf's vmap operation, the returned pointer is encoded as
- * :c:type:`struct dma_buf_map <dma_buf_map>`.
- * :c:type:`struct dma_buf_map <dma_buf_map>` stores the buffer's address in
- * system or I/O memory and a flag that signals the required method of
- * accessing the buffer. Use the returned instance and the helper functions
- * to access the buffer's memory in the correct way.
- *
- * The type :c:type:`struct dma_buf_map <dma_buf_map>` and its helpers are
- * actually independent from the dma-buf infrastructure. When sharing buffers
- * among devices, drivers have to know the location of the memory to access
- * the buffers in a safe way. :c:type:`struct dma_buf_map <dma_buf_map>`
- * solves this problem for dma-buf and its users. If other drivers or
- * sub-systems require similar functionality, the type could be generalized
- * and moved to a more prominent header file.
- *
- * Open-coding access to :c:type:`struct dma_buf_map <dma_buf_map>` is
- * considered bad style. Rather then accessing its fields directly, use one
- * of the provided helper functions, or implement your own. For example,
- * instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be initialized
- * statically with DMA_BUF_MAP_INIT_VADDR(), or at runtime with
- * dma_buf_map_set_vaddr(). These helpers will set an address in system memory.
- *
- * .. code-block:: c
- *
- *     struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf);
- *
- *     dma_buf_map_set_vaddr(&map, 0xdeadbeaf);
- *
- * To set an address in I/O memory, use dma_buf_map_set_vaddr_iomem().
- *
- * .. code-block:: c
- *
- *     dma_buf_map_set_vaddr_iomem(&map, 0xdeadbeaf);
- *
- * Instances of struct dma_buf_map do not have to be cleaned up, but
- * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings
- * always refer to system memory.
- *
- * .. code-block:: c
- *
- *     dma_buf_map_clear(&map);
- *
- * Test if a mapping is valid with either dma_buf_map_is_set() or
- * dma_buf_map_is_null().
- *
- * .. code-block:: c
- *
- *     if (dma_buf_map_is_set(&map) != dma_buf_map_is_null(&map))
- *             // always true
- *
- * Instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be compared
- * for equality with dma_buf_map_is_equal(). Mappings the point to different
- * memory spaces, system or I/O, are never equal. That's even true if both
- * spaces are located in the same address space, both mappings contain the
- * same address value, or both mappings refer to NULL.
- *
- * .. code-block:: c
- *
- *     struct dma_buf_map sys_map; // refers to system memory
- *     struct dma_buf_map io_map; // refers to I/O memory
- *
- *     if (dma_buf_map_is_equal(&sys_map, &io_map))
- *             // always false
- *
- * A set up instance of struct dma_buf_map can be used to access or manipulate
- * the buffer memory. Depending on the location of the memory, the provided
- * helpers will pick the correct operations. Data can be copied into the memory
- * with dma_buf_map_memcpy_to(). The address can be manipulated with
- * dma_buf_map_incr().
- *
- * .. code-block:: c
- *
- *     const void *src = ...; // source buffer
- *     size_t len = ...; // length of src
- *
- *     dma_buf_map_memcpy_to(&map, src, len);
- *     dma_buf_map_incr(&map, len); // go to first byte after the memcpy
- */
-
-/**
- * struct dma_buf_map - Pointer to vmap'ed dma-buf memory.
- * @vaddr_iomem:       The buffer's address if in I/O memory
- * @vaddr:             The buffer's address if in system memory
- * @is_iomem:          True if the dma-buf memory is located in I/O
- *                     memory, or false otherwise.
- */
-struct dma_buf_map {
-       union {
-               void __iomem *vaddr_iomem;
-               void *vaddr;
-       };
-       bool is_iomem;
-};
-
-/**
- * DMA_BUF_MAP_INIT_VADDR - Initializes struct dma_buf_map to an address in system memory
- * @vaddr_:    A system-memory address
- */
-#define DMA_BUF_MAP_INIT_VADDR(vaddr_) \
-       { \
-               .vaddr = (vaddr_), \
-               .is_iomem = false, \
-       }
-
-/**
- * dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory
- * @map:       The dma-buf mapping structure
- * @vaddr:     A system-memory address
- *
- * Sets the address and clears the I/O-memory flag.
- */
-static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr)
-{
-       map->vaddr = vaddr;
-       map->is_iomem = false;
-}
-
-/**
- * dma_buf_map_set_vaddr_iomem - Sets a dma-buf mapping structure to an address in I/O memory
- * @map:               The dma-buf mapping structure
- * @vaddr_iomem:       An I/O-memory address
- *
- * Sets the address and the I/O-memory flag.
- */
-static inline void dma_buf_map_set_vaddr_iomem(struct dma_buf_map *map,
-                                              void __iomem *vaddr_iomem)
-{
-       map->vaddr_iomem = vaddr_iomem;
-       map->is_iomem = true;
-}
-
-/**
- * dma_buf_map_is_equal - Compares two dma-buf mapping structures for equality
- * @lhs:       The dma-buf mapping structure
- * @rhs:       A dma-buf mapping structure to compare with
- *
- * Two dma-buf mapping structures are equal if they both refer to the same type of memory
- * and to the same address within that memory.
- *
- * Returns:
- * True is both structures are equal, or false otherwise.
- */
-static inline bool dma_buf_map_is_equal(const struct dma_buf_map *lhs,
-                                       const struct dma_buf_map *rhs)
-{
-       if (lhs->is_iomem != rhs->is_iomem)
-               return false;
-       else if (lhs->is_iomem)
-               return lhs->vaddr_iomem == rhs->vaddr_iomem;
-       else
-               return lhs->vaddr == rhs->vaddr;
-}
-
-/**
- * dma_buf_map_is_null - Tests for a dma-buf mapping to be NULL
- * @map:       The dma-buf mapping structure
- *
- * Depending on the state of struct dma_buf_map.is_iomem, tests if the
- * mapping is NULL.
- *
- * Returns:
- * True if the mapping is NULL, or false otherwise.
- */
-static inline bool dma_buf_map_is_null(const struct dma_buf_map *map)
-{
-       if (map->is_iomem)
-               return !map->vaddr_iomem;
-       return !map->vaddr;
-}
-
-/**
- * dma_buf_map_is_set - Tests is the dma-buf mapping has been set
- * @map:       The dma-buf mapping structure
- *
- * Depending on the state of struct dma_buf_map.is_iomem, tests if the
- * mapping has been set.
- *
- * Returns:
- * True if the mapping is been set, or false otherwise.
- */
-static inline bool dma_buf_map_is_set(const struct dma_buf_map *map)
-{
-       return !dma_buf_map_is_null(map);
-}
-
-/**
- * dma_buf_map_clear - Clears a dma-buf mapping structure
- * @map:       The dma-buf mapping structure
- *
- * Clears all fields to zero; including struct dma_buf_map.is_iomem. So
- * mapping structures that were set to point to I/O memory are reset for
- * system memory. Pointers are cleared to NULL. This is the default.
- */
-static inline void dma_buf_map_clear(struct dma_buf_map *map)
-{
-       if (map->is_iomem) {
-               map->vaddr_iomem = NULL;
-               map->is_iomem = false;
-       } else {
-               map->vaddr = NULL;
-       }
-}
-
-/**
- * dma_buf_map_memcpy_to - Memcpy into dma-buf mapping
- * @dst:       The dma-buf mapping structure
- * @src:       The source buffer
- * @len:       The number of byte in src
- *
- * Copies data into a dma-buf mapping. The source buffer is in system
- * memory. Depending on the buffer's location, the helper picks the correct
- * method of accessing the memory.
- */
-static inline void dma_buf_map_memcpy_to(struct dma_buf_map *dst, const void *src, size_t len)
-{
-       if (dst->is_iomem)
-               memcpy_toio(dst->vaddr_iomem, src, len);
-       else
-               memcpy(dst->vaddr, src, len);
-}
-
-/**
- * dma_buf_map_incr - Increments the address stored in a dma-buf mapping
- * @map:       The dma-buf mapping structure
- * @incr:      The number of bytes to increment
- *
- * Increments the address stored in a dma-buf mapping. Depending on the
- * buffer's location, the correct value will be updated.
- */
-static inline void dma_buf_map_incr(struct dma_buf_map *map, size_t incr)
-{
-       if (map->is_iomem)
-               map->vaddr_iomem += incr;
-       else
-               map->vaddr += incr;
-}
-
-#endif /* __DMA_BUF_MAP_H__ */
index 53c1b6082a4cd9f42fc14e8259e4818b8c7e3e20..ac2a1d758a80eb4177b599f7780bfe4106021db0 100644 (file)
@@ -169,6 +169,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
                                                long freed);
 bool isolate_huge_page(struct page *page, struct list_head *list);
 int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
+int get_huge_page_for_hwpoison(unsigned long pfn, int flags);
 void putback_active_hugepage(struct page *page);
 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
 void free_huge_page(struct page *page);
@@ -378,6 +379,11 @@ static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
        return 0;
 }
 
+static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+{
+       return 0;
+}
+
 static inline void putback_active_hugepage(struct page *page)
 {
 }
index a890428bcc1a2351b0266c5274dbd5f329a37329..fe6efb24d151a6fa90cc817e6f99e633cca85d4c 100644 (file)
@@ -285,7 +285,7 @@ static inline char *hex_byte_pack_upper(char *buf, u8 byte)
        return buf;
 }
 
-extern int hex_to_bin(char ch);
+extern int hex_to_bin(unsigned char ch);
 extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
 extern char *bin2hex(char *dst, const void *src, size_t count);
 
index 3f9b22c4983a85704667a89e274c364326980574..34eed5f85ed607432bd40559d1f9c6356cb6f083 100644 (file)
@@ -315,7 +315,10 @@ struct kvm_vcpu {
        int cpu;
        int vcpu_id; /* id given by userspace at creation */
        int vcpu_idx; /* index in kvm->vcpus array */
-       int srcu_idx;
+       int ____srcu_idx; /* Don't use this directly.  You've been warned. */
+#ifdef CONFIG_PROVE_RCU
+       int srcu_depth;
+#endif
        int mode;
        u64 requests;
        unsigned long guest_debug;
@@ -840,6 +843,25 @@ static inline void kvm_vm_bugged(struct kvm *kvm)
        unlikely(__ret);                                        \
 })
 
+static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PROVE_RCU
+       WARN_ONCE(vcpu->srcu_depth++,
+                 "KVM: Illegal vCPU srcu_idx LOCK, depth=%d", vcpu->srcu_depth - 1);
+#endif
+       vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+}
+
+static inline void kvm_vcpu_srcu_read_unlock(struct kvm_vcpu *vcpu)
+{
+       srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx);
+
+#ifdef CONFIG_PROVE_RCU
+       WARN_ONCE(--vcpu->srcu_depth,
+                 "KVM: Illegal vCPU srcu_idx UNLOCK, depth=%d", vcpu->srcu_depth);
+#endif
+}
+
 static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
 {
        return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
@@ -2197,6 +2219,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
                                            unsigned long start, unsigned long end);
 
+void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
+
 #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
 int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
 #else
index a68dce3873fccf80a3308b5b7d1743bf001bd2ee..89b14729d59f941ce851162e1fa184b217260466 100644 (file)
@@ -1012,6 +1012,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
 }
 
 void mem_cgroup_flush_stats(void);
+void mem_cgroup_flush_stats_delayed(void);
 
 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
                              int val);
@@ -1455,6 +1456,10 @@ static inline void mem_cgroup_flush_stats(void)
 {
 }
 
+static inline void mem_cgroup_flush_stats_delayed(void)
+{
+}
+
 static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec,
                                            enum node_stat_item idx, int val)
 {
index e34edb77533427f63c4563a33110862fad03413e..9f44254af8ce9e6d229adfe027e3ba5955ce50e8 100644 (file)
@@ -3197,6 +3197,14 @@ extern int sysctl_memory_failure_recovery;
 extern void shake_page(struct page *p);
 extern atomic_long_t num_poisoned_pages __read_mostly;
 extern int soft_offline_page(unsigned long pfn, int flags);
+#ifdef CONFIG_MEMORY_FAILURE
+extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags);
+#else
+static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+{
+       return 0;
+}
+#endif
 
 #ifndef arch_memory_failure
 static inline int arch_memory_failure(unsigned long pfn, int flags)
index 151607e9d64ad0d9f9a2c46849f93591f8a777b0..955aee14b0f7e5791ec6348cbe9d6d3d1c1e94d4 100644 (file)
@@ -389,10 +389,8 @@ struct mtd_info {
        /* List of partitions attached to this MTD device */
        struct list_head partitions;
 
-       union {
-               struct mtd_part part;
-               struct mtd_master master;
-       };
+       struct mtd_part part;
+       struct mtd_master master;
 };
 
 static inline struct mtd_info *mtd_get_master(struct mtd_info *mtd)
index d5e3c00b74e12546c44faad9e00f85a8501ccf98..a8911b1f35aad335ddcf580cfa39a9edffb29b5e 100644 (file)
@@ -1443,6 +1443,7 @@ struct task_struct {
        int                             pagefault_disabled;
 #ifdef CONFIG_MMU
        struct task_struct              *oom_reaper_list;
+       struct timer_list               oom_reaper_timer;
 #endif
 #ifdef CONFIG_VMAP_STACK
        struct vm_struct                *stack_vm_area;
index a80356e9dc69ab5723a41654e37cc869e045e319..1ad1f4bfa02556554db80ae5a493be3d08ce994e 100644 (file)
@@ -136,6 +136,14 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
 #endif /* CONFIG_MEMCG */
 
 #ifdef CONFIG_MMU
+#ifndef arch_get_mmap_end
+#define arch_get_mmap_end(addr)        (TASK_SIZE)
+#endif
+
+#ifndef arch_get_mmap_base
+#define arch_get_mmap_base(addr, base) (base)
+#endif
+
 extern void arch_pick_mmap_layout(struct mm_struct *mm,
                                  struct rlimit *rlim_stack);
 extern unsigned long
index a52080407b98c539760f3823cc2a3c0f1765de90..766dc6f009c0b7e2bc02bfde8dcf2de5048a1c3b 100644 (file)
@@ -179,6 +179,10 @@ struct snd_soc_component_driver {
                                  struct snd_pcm_hw_params *params);
        bool use_dai_pcm_id;    /* use DAI link PCM ID as PCM device number */
        int be_pcm_base;        /* base device ID for all BE PCMs */
+
+#ifdef CONFIG_DEBUG_FS
+       const char *debugfs_prefix;
+#endif
 };
 
 struct snd_soc_component {
index 4c14e8be7267761bf3e9e3aa76a1f1ebe4c11bbc..3a49913d006c9bf6e502ea209336d24d2924effd 100644 (file)
@@ -182,7 +182,7 @@ struct fb_fix_screeninfo {
  *
  * For pseudocolor: offset and length should be the same for all color
  * components. Offset specifies the position of the least significant bit
- * of the pallette index in a pixel value. Length indicates the number
+ * of the palette index in a pixel value. Length indicates the number
  * of available palette entries (i.e. # of entries = 1 << length).
  */
 struct fb_bitfield {
index 7989d9483ea75e2bbaaf78c1fd3d3bca741678ff..dff8e7f17074851211eba754d7eeb19da806931b 100644 (file)
 /* Select an area of screen to be copied */
 #define KEY_SELECTIVE_SCREENSHOT       0x27a
 
+/* Move the focus to the next or previous user controllable element within a UI container */
+#define KEY_NEXT_ELEMENT               0x27b
+#define KEY_PREVIOUS_ELEMENT           0x27c
+
+/* Toggle Autopilot engagement */
+#define KEY_AUTOPILOT_ENGAGE_TOGGLE    0x27d
+
+/* Shortcut Keys */
+#define KEY_MARK_WAYPOINT              0x27e
+#define KEY_SOS                                0x27f
+#define KEY_NAV_CHART                  0x280
+#define KEY_FISHING_CHART              0x281
+#define KEY_SINGLE_RANGE_RADAR         0x282
+#define KEY_DUAL_RANGE_RADAR           0x283
+#define KEY_RADAR_OVERLAY              0x284
+#define KEY_TRADITIONAL_SONAR          0x285
+#define KEY_CLEARVU_SONAR              0x286
+#define KEY_SIDEVU_SONAR               0x287
+#define KEY_NAV_INFO                   0x288
+#define KEY_BRIGHTNESS_MENU            0x289
+
 /*
  * Some keyboards have keys which do not have a defined meaning, these keys
  * are intended to be programmed / bound to macros by the user. For most
index 23bb19716ad3dc032aa3e22e16904eaea6b817cf..7858bafffa9d635d75326df9b46fad015005dae0 100644 (file)
@@ -6247,7 +6247,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 again:
        mutex_lock(&event->mmap_mutex);
        if (event->rb) {
-               if (event->rb->nr_pages != nr_pages) {
+               if (data_page_nr(event->rb) != nr_pages) {
                        ret = -EINVAL;
                        goto unlock;
                }
index 082832738c8fd97c9382fd70cc5266e1a6fcf082..5150d5f84c033e5c2e7594db13e3d40f334d4eb3 100644 (file)
@@ -116,6 +116,11 @@ static inline int page_order(struct perf_buffer *rb)
 }
 #endif
 
+static inline int data_page_nr(struct perf_buffer *rb)
+{
+       return rb->nr_pages << page_order(rb);
+}
+
 static inline unsigned long perf_data_size(struct perf_buffer *rb)
 {
        return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
index 52868716ec358673c3c67fb130278f162c895283..fb35b926024caeda34d6678f4bc98db6b70069b2 100644 (file)
@@ -859,11 +859,6 @@ void rb_free(struct perf_buffer *rb)
 }
 
 #else
-static int data_page_nr(struct perf_buffer *rb)
-{
-       return rb->nr_pages << page_order(rb);
-}
-
 static struct page *
 __perf_mmap_to_page(struct perf_buffer *rb, unsigned long pgoff)
 {
index 475524bd900ab73bb473d8428438a34141922c7a..b3732b210593086b6402090df6ed3cf1c4a53d1b 100644 (file)
@@ -475,8 +475,11 @@ static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
        vma->vm_flags |= VM_DONTEXPAND;
        for (off = 0; off < size; off += PAGE_SIZE) {
                page = vmalloc_to_page(kcov->area + off);
-               if (vm_insert_page(vma, vma->vm_start + off, page))
-                       WARN_ONCE(1, "vm_insert_page() failed");
+               res = vm_insert_page(vma, vma->vm_start + off, page);
+               if (res) {
+                       pr_warn_once("kcov: vm_insert_page() failed\n");
+                       return res;
+               }
        }
        return 0;
 exit:
index d4bd299d67abfe84e56cf99693adc9a12a0ae27d..a68482d66535588d0b8dd9b0738482fda17f2fb6 100644 (file)
@@ -3829,11 +3829,11 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
 
        se->avg.runnable_sum = se->avg.runnable_avg * divider;
 
-       se->avg.load_sum = divider;
-       if (se_weight(se)) {
-               se->avg.load_sum =
-                       div_u64(se->avg.load_avg * se->avg.load_sum, se_weight(se));
-       }
+       se->avg.load_sum = se->avg.load_avg * divider;
+       if (se_weight(se) < se->avg.load_sum)
+               se->avg.load_sum = div_u64(se->avg.load_sum, se_weight(se));
+       else
+               se->avg.load_sum = 1;
 
        enqueue_load_avg(cfs_rq, se);
        cfs_rq->avg.util_avg += se->avg.util_avg;
index 9301578f98e8c6d4acb49df297aa5c47b590ea8b..06833d404398d747b41138fbd58e03ab5e5f688a 100644 (file)
@@ -22,15 +22,33 @@ EXPORT_SYMBOL(hex_asc_upper);
  *
  * hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
  * input.
+ *
+ * This function is used to load cryptographic keys, so it is coded in such a
+ * way that there are no conditions or memory accesses that depend on data.
+ *
+ * Explanation of the logic:
+ * (ch - '9' - 1) is negative if ch <= '9'
+ * ('0' - 1 - ch) is negative if ch >= '0'
+ * we "and" these two values, so the result is negative if ch is in the range
+ *     '0' ... '9'
+ * we are only interested in the sign, so we do a shift ">> 8"; note that right
+ *     shift of a negative value is implementation-defined, so we cast the
+ *     value to (unsigned) before the shift --- we have 0xffffff if ch is in
+ *     the range '0' ... '9', 0 otherwise
+ * we "and" this value with (ch - '0' + 1) --- we have a value 1 ... 10 if ch is
+ *     in the range '0' ... '9', 0 otherwise
+ * we add this value to -1 --- we have a value 0 ... 9 if ch is in the range '0'
+ *     ... '9', -1 otherwise
+ * the next line is similar to the previous one, but we need to decode both
+ *     uppercase and lowercase letters, so we use (ch & 0xdf), which converts
+ *     lowercase to uppercase
  */
-int hex_to_bin(char ch)
+int hex_to_bin(unsigned char ch)
 {
-       if ((ch >= '0') && (ch <= '9'))
-               return ch - '0';
-       ch = tolower(ch);
-       if ((ch >= 'a') && (ch <= 'f'))
-               return ch - 'a' + 10;
-       return -1;
+       unsigned char cu = ch & 0xdf;
+       return -1 +
+               ((ch - '0' +  1) & (unsigned)((ch - '9' - 1) & ('0' - 1 - ch)) >> 8) +
+               ((cu - 'A' + 11) & (unsigned)((cu - 'F' - 1) & ('A' - 1 - cu)) >> 8);
 }
 EXPORT_SYMBOL(hex_to_bin);
 
@@ -45,10 +63,13 @@ EXPORT_SYMBOL(hex_to_bin);
 int hex2bin(u8 *dst, const char *src, size_t count)
 {
        while (count--) {
-               int hi = hex_to_bin(*src++);
-               int lo = hex_to_bin(*src++);
+               int hi, lo;
 
-               if ((hi < 0) || (lo < 0))
+               hi = hex_to_bin(*src++);
+               if (unlikely(hi < 0))
+                       return -EINVAL;
+               lo = hex_to_bin(*src++);
+               if (unlikely(lo < 0))
                        return -EINVAL;
 
                *dst++ = (hi << 4) | lo;
index 4acc88ea7c21744e6b1faa54af6f2029545a1b32..54e646e8e6ee7ab9d46d7c6ed607ff0900eeb366 100644 (file)
@@ -207,6 +207,8 @@ static void *xas_descend(struct xa_state *xas, struct xa_node *node)
        if (xa_is_sibling(entry)) {
                offset = xa_to_sibling(entry);
                entry = xa_entry(xas->xa, node, offset);
+               if (node->shift && xa_is_node(entry))
+                       entry = XA_RETRY_ENTRY;
        }
 
        xas->xa_offset = offset;
index f8ca7cca3c1ab0f9dea7b67081f55bbdd0e2d0f0..3fc721789743e6adb95d3005067392c9d7b53dff 100644 (file)
@@ -6785,6 +6785,16 @@ int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
        return ret;
 }
 
+int get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+{
+       int ret;
+
+       spin_lock_irq(&hugetlb_lock);
+       ret = __get_huge_page_for_hwpoison(pfn, flags);
+       spin_unlock_irq(&hugetlb_lock);
+       return ret;
+}
+
 void putback_active_hugepage(struct page *page)
 {
        spin_lock_irq(&hugetlb_lock);
index 08291ed33e93af757e436b8e2efbe626d50c1f79..0a9def8ce5e8b6d68873231f418b85ac96b40bfa 100644 (file)
@@ -315,6 +315,13 @@ static void per_cpu_remove_cache(void *arg)
        struct qlist_head *q;
 
        q = this_cpu_ptr(&cpu_quarantine);
+       /*
+        * Ensure the ordering between the writing to q->offline and
+        * per_cpu_remove_cache.  Prevent cpu_quarantine from being corrupted
+        * by interrupt.
+        */
+       if (READ_ONCE(q->offline))
+               return;
        qlist_move_cache(q, &to_free, cache);
        qlist_free_all(&to_free, cache);
 }
index 725f767232207dc5c20c7630556ce087b9db3905..598fece89e2b735183103dcd66c04f060128693b 100644 (file)
@@ -587,6 +587,9 @@ static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
 static DEFINE_SPINLOCK(stats_flush_lock);
 static DEFINE_PER_CPU(unsigned int, stats_updates);
 static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
+static u64 flush_next_time;
+
+#define FLUSH_TIME (2UL*HZ)
 
 /*
  * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
@@ -637,6 +640,7 @@ static void __mem_cgroup_flush_stats(void)
        if (!spin_trylock_irqsave(&stats_flush_lock, flag))
                return;
 
+       flush_next_time = jiffies_64 + 2*FLUSH_TIME;
        cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
        atomic_set(&stats_flush_threshold, 0);
        spin_unlock_irqrestore(&stats_flush_lock, flag);
@@ -648,10 +652,16 @@ void mem_cgroup_flush_stats(void)
                __mem_cgroup_flush_stats();
 }
 
+void mem_cgroup_flush_stats_delayed(void)
+{
+       if (time_after64(jiffies_64, flush_next_time))
+               mem_cgroup_flush_stats();
+}
+
 static void flush_memcg_stats_dwork(struct work_struct *w)
 {
        __mem_cgroup_flush_stats();
-       queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
+       queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
 }
 
 /**
index dcb6bb9cf73152f99824cdb5db0c2e2028c3e1b4..27760c19bad754d6f3281f50885d824bdf6c184a 100644 (file)
@@ -1498,50 +1498,113 @@ static int try_to_split_thp_page(struct page *page, const char *msg)
        return 0;
 }
 
-static int memory_failure_hugetlb(unsigned long pfn, int flags)
+/*
+ * Called from hugetlb code with hugetlb_lock held.
+ *
+ * Return values:
+ *   0             - free hugepage
+ *   1             - in-use hugepage
+ *   2             - not a hugepage
+ *   -EBUSY        - the hugepage is busy (try to retry)
+ *   -EHWPOISON    - the hugepage is already hwpoisoned
+ */
+int __get_huge_page_for_hwpoison(unsigned long pfn, int flags)
+{
+       struct page *page = pfn_to_page(pfn);
+       struct page *head = compound_head(page);
+       int ret = 2;    /* fallback to normal page handling */
+       bool count_increased = false;
+
+       if (!PageHeadHuge(head))
+               goto out;
+
+       if (flags & MF_COUNT_INCREASED) {
+               ret = 1;
+               count_increased = true;
+       } else if (HPageFreed(head) || HPageMigratable(head)) {
+               ret = get_page_unless_zero(head);
+               if (ret)
+                       count_increased = true;
+       } else {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       if (TestSetPageHWPoison(head)) {
+               ret = -EHWPOISON;
+               goto out;
+       }
+
+       return ret;
+out:
+       if (count_increased)
+               put_page(head);
+       return ret;
+}
+
+#ifdef CONFIG_HUGETLB_PAGE
+/*
+ * Taking refcount of hugetlb pages needs extra care about race conditions
+ * with basic operations like hugepage allocation/free/demotion.
+ * So some of prechecks for hwpoison (pinning, and testing/setting
+ * PageHWPoison) should be done in single hugetlb_lock range.
+ */
+static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
 {
-       struct page *p = pfn_to_page(pfn);
-       struct page *head = compound_head(p);
        int res;
+       struct page *p = pfn_to_page(pfn);
+       struct page *head;
        unsigned long page_flags;
+       bool retry = true;
 
-       if (TestSetPageHWPoison(head)) {
-               pr_err("Memory failure: %#lx: already hardware poisoned\n",
-                      pfn);
-               res = -EHWPOISON;
-               if (flags & MF_ACTION_REQUIRED)
+       *hugetlb = 1;
+retry:
+       res = get_huge_page_for_hwpoison(pfn, flags);
+       if (res == 2) { /* fallback to normal page handling */
+               *hugetlb = 0;
+               return 0;
+       } else if (res == -EHWPOISON) {
+               pr_err("Memory failure: %#lx: already hardware poisoned\n", pfn);
+               if (flags & MF_ACTION_REQUIRED) {
+                       head = compound_head(p);
                        res = kill_accessing_process(current, page_to_pfn(head), flags);
+               }
+               return res;
+       } else if (res == -EBUSY) {
+               if (retry) {
+                       retry = false;
+                       goto retry;
+               }
+               action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
                return res;
        }
 
+       head = compound_head(p);
+       lock_page(head);
+
+       if (hwpoison_filter(p)) {
+               ClearPageHWPoison(head);
+               res = -EOPNOTSUPP;
+               goto out;
+       }
+
        num_poisoned_pages_inc();
 
-       if (!(flags & MF_COUNT_INCREASED)) {
-               res = get_hwpoison_page(p, flags);
-               if (!res) {
-                       lock_page(head);
-                       if (hwpoison_filter(p)) {
-                               if (TestClearPageHWPoison(head))
-                                       num_poisoned_pages_dec();
-                               unlock_page(head);
-                               return -EOPNOTSUPP;
-                       }
-                       unlock_page(head);
-                       res = MF_FAILED;
-                       if (__page_handle_poison(p)) {
-                               page_ref_inc(p);
-                               res = MF_RECOVERED;
-                       }
-                       action_result(pfn, MF_MSG_FREE_HUGE, res);
-                       return res == MF_RECOVERED ? 0 : -EBUSY;
-               } else if (res < 0) {
-                       action_result(pfn, MF_MSG_UNKNOWN, MF_IGNORED);
-                       return -EBUSY;
+       /*
+        * Handling free hugepage.  The possible race with hugepage allocation
+        * or demotion can be prevented by PageHWPoison flag.
+        */
+       if (res == 0) {
+               unlock_page(head);
+               res = MF_FAILED;
+               if (__page_handle_poison(p)) {
+                       page_ref_inc(p);
+                       res = MF_RECOVERED;
                }
+               action_result(pfn, MF_MSG_FREE_HUGE, res);
+               return res == MF_RECOVERED ? 0 : -EBUSY;
        }
 
-       lock_page(head);
-
        /*
         * The page could have changed compound pages due to race window.
         * If this happens just bail out.
@@ -1554,14 +1617,6 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
 
        page_flags = head->flags;
 
-       if (hwpoison_filter(p)) {
-               if (TestClearPageHWPoison(head))
-                       num_poisoned_pages_dec();
-               put_page(p);
-               res = -EOPNOTSUPP;
-               goto out;
-       }
-
        /*
         * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so
         * simply disable it. In order to make it work properly, we need
@@ -1588,6 +1643,12 @@ out:
        unlock_page(head);
        return res;
 }
+#else
+static inline int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb)
+{
+       return 0;
+}
+#endif
 
 static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
                struct dev_pagemap *pgmap)
@@ -1712,6 +1773,7 @@ int memory_failure(unsigned long pfn, int flags)
        int res = 0;
        unsigned long page_flags;
        bool retry = true;
+       int hugetlb = 0;
 
        if (!sysctl_memory_failure_recovery)
                panic("Memory failure on page %lx", pfn);
@@ -1739,10 +1801,9 @@ int memory_failure(unsigned long pfn, int flags)
        }
 
 try_again:
-       if (PageHuge(p)) {
-               res = memory_failure_hugetlb(pfn, flags);
+       res = try_memory_failure_hugetlb(pfn, flags, &hugetlb);
+       if (hugetlb)
                goto unlock_mutex;
-       }
 
        if (TestSetPageHWPoison(p)) {
                pr_err("Memory failure: %#lx: already hardware poisoned\n",
@@ -1799,6 +1860,19 @@ try_again:
        }
 
        if (PageTransHuge(hpage)) {
+               /*
+                * Bail out before SetPageHasHWPoisoned() if hpage is
+                * huge_zero_page, although PG_has_hwpoisoned is not
+                * checked in set_huge_zero_page().
+                *
+                * TODO: Handle memory failure of huge_zero_page thoroughly.
+                */
+               if (is_huge_zero_page(hpage)) {
+                       action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
+                       res = -EBUSY;
+                       goto unlock_mutex;
+               }
+
                /*
                 * The flag must be set after the refcount is bumped
                 * otherwise it may race with THP split.
index 3aa839f81e63dc8645506af8c99c870b33afd6d8..313b57d55a634a3df997a3c60bfb6d8a29df41ed 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2117,14 +2117,6 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
        return addr;
 }
 
-#ifndef arch_get_mmap_end
-#define arch_get_mmap_end(addr)        (TASK_SIZE)
-#endif
-
-#ifndef arch_get_mmap_base
-#define arch_get_mmap_base(addr, base) (base)
-#endif
-
 /* Get an address range which is currently unmapped.
  * For shmat() with addr=0.
  *
index 459d195d2ff64bb71509189277c71e84d9725033..f45ff1b7626a62834073857f9529d5badd51a934 100644 (file)
@@ -1036,6 +1036,18 @@ int mmu_interval_notifier_insert_locked(
 }
 EXPORT_SYMBOL_GPL(mmu_interval_notifier_insert_locked);
 
+static bool
+mmu_interval_seq_released(struct mmu_notifier_subscriptions *subscriptions,
+                         unsigned long seq)
+{
+       bool ret;
+
+       spin_lock(&subscriptions->lock);
+       ret = subscriptions->invalidate_seq != seq;
+       spin_unlock(&subscriptions->lock);
+       return ret;
+}
+
 /**
  * mmu_interval_notifier_remove - Remove a interval notifier
  * @interval_sub: Interval subscription to unregister
@@ -1083,7 +1095,7 @@ void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub)
        lock_map_release(&__mmu_notifier_invalidate_range_start_map);
        if (seq)
                wait_event(subscriptions->wq,
-                          READ_ONCE(subscriptions->invalidate_seq) != seq);
+                          mmu_interval_seq_released(subscriptions, seq));
 
        /* pairs with mmgrab in mmu_interval_notifier_insert() */
        mmdrop(mm);
index 55a9e48a7a02681667f33e038bb209c13b1ccb3c..9d7afc2d959e4ca99e305c0439991cccdf5ba194 100644 (file)
@@ -226,6 +226,8 @@ void *vmalloc(unsigned long size)
 }
 EXPORT_SYMBOL(vmalloc);
 
+void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc);
+
 /*
  *     vzalloc - allocate virtually contiguous memory with zero fill
  *
index 7ec38194f8e11c927ab5f9f3b7c89ebab33ef0e7..49d7df39b02d0faa8708e78a5b810f5514c4a7cf 100644 (file)
@@ -632,7 +632,7 @@ done:
         */
        set_bit(MMF_OOM_SKIP, &mm->flags);
 
-       /* Drop a reference taken by wake_oom_reaper */
+       /* Drop a reference taken by queue_oom_reaper */
        put_task_struct(tsk);
 }
 
@@ -644,12 +644,12 @@ static int oom_reaper(void *unused)
                struct task_struct *tsk = NULL;
 
                wait_event_freezable(oom_reaper_wait, oom_reaper_list != NULL);
-               spin_lock(&oom_reaper_lock);
+               spin_lock_irq(&oom_reaper_lock);
                if (oom_reaper_list != NULL) {
                        tsk = oom_reaper_list;
                        oom_reaper_list = tsk->oom_reaper_list;
                }
-               spin_unlock(&oom_reaper_lock);
+               spin_unlock_irq(&oom_reaper_lock);
 
                if (tsk)
                        oom_reap_task(tsk);
@@ -658,22 +658,48 @@ static int oom_reaper(void *unused)
        return 0;
 }
 
-static void wake_oom_reaper(struct task_struct *tsk)
+static void wake_oom_reaper(struct timer_list *timer)
 {
-       /* mm is already queued? */
-       if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
-               return;
+       struct task_struct *tsk = container_of(timer, struct task_struct,
+                       oom_reaper_timer);
+       struct mm_struct *mm = tsk->signal->oom_mm;
+       unsigned long flags;
 
-       get_task_struct(tsk);
+       /* The victim managed to terminate on its own - see exit_mmap */
+       if (test_bit(MMF_OOM_SKIP, &mm->flags)) {
+               put_task_struct(tsk);
+               return;
+       }
 
-       spin_lock(&oom_reaper_lock);
+       spin_lock_irqsave(&oom_reaper_lock, flags);
        tsk->oom_reaper_list = oom_reaper_list;
        oom_reaper_list = tsk;
-       spin_unlock(&oom_reaper_lock);
+       spin_unlock_irqrestore(&oom_reaper_lock, flags);
        trace_wake_reaper(tsk->pid);
        wake_up(&oom_reaper_wait);
 }
 
+/*
+ * Give the OOM victim time to exit naturally before invoking the oom_reaping.
+ * The timers timeout is arbitrary... the longer it is, the longer the worst
+ * case scenario for the OOM can take. If it is too small, the oom_reaper can
+ * get in the way and release resources needed by the process exit path.
+ * e.g. The futex robust list can sit in Anon|Private memory that gets reaped
+ * before the exit path is able to wake the futex waiters.
+ */
+#define OOM_REAPER_DELAY (2*HZ)
+static void queue_oom_reaper(struct task_struct *tsk)
+{
+       /* mm is already queued? */
+       if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
+               return;
+
+       get_task_struct(tsk);
+       timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
+       tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
+       add_timer(&tsk->oom_reaper_timer);
+}
+
 static int __init oom_init(void)
 {
        oom_reaper_th = kthread_run(oom_reaper, NULL, "oom_reaper");
@@ -681,7 +707,7 @@ static int __init oom_init(void)
 }
 subsys_initcall(oom_init)
 #else
-static inline void wake_oom_reaper(struct task_struct *tsk)
+static inline void queue_oom_reaper(struct task_struct *tsk)
 {
 }
 #endif /* CONFIG_MMU */
@@ -932,7 +958,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
        rcu_read_unlock();
 
        if (can_oom_reap)
-               wake_oom_reaper(victim);
+               queue_oom_reaper(victim);
 
        mmdrop(mm);
        put_task_struct(victim);
@@ -968,7 +994,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
        task_lock(victim);
        if (task_will_free_mem(victim)) {
                mark_oom_victim(victim);
-               wake_oom_reaper(victim);
+               queue_oom_reaper(victim);
                task_unlock(victim);
                put_task_struct(victim);
                return;
@@ -1067,7 +1093,7 @@ bool out_of_memory(struct oom_control *oc)
         */
        if (task_will_free_mem(current)) {
                mark_oom_victim(current);
-               wake_oom_reaper(current);
+               queue_oom_reaper(current);
                return true;
        }
 
index 33ca8cab21e6ed6bc531af9dbba2bff5aedf8457..0e42038382c12503dac6a1fb9f370850bd41f86b 100644 (file)
@@ -8919,7 +8919,7 @@ void *__init alloc_large_system_hash(const char *tablename,
                                table = memblock_alloc_raw(size,
                                                           SMP_CACHE_BYTES);
                } else if (get_order(size) >= MAX_ORDER || hashdist) {
-                       table = __vmalloc(size, gfp_flags);
+                       table = vmalloc_huge(size, gfp_flags);
                        virt = true;
                        if (table)
                                huge = is_vm_area_hugepages(table);
index 0cb8e5ef17136743f710f0b54c4199075bb7404c..e9bb6db002aa0bdbebc981b4470ee1194d3a5f16 100644 (file)
@@ -72,12 +72,15 @@ int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
        _dst_pte = pte_mkdirty(_dst_pte);
        if (page_in_cache && !vm_shared)
                writable = false;
-       if (writable) {
-               if (wp_copy)
-                       _dst_pte = pte_mkuffd_wp(_dst_pte);
-               else
-                       _dst_pte = pte_mkwrite(_dst_pte);
-       }
+
+       /*
+        * Always mark a PTE as write-protected when needed, regardless of
+        * VM_WRITE, which the user might change.
+        */
+       if (wp_copy)
+               _dst_pte = pte_mkuffd_wp(_dst_pte);
+       else if (writable)
+               _dst_pte = pte_mkwrite(_dst_pte);
 
        dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
 
index 54e5e761a9a90ed383cc18a5ee5f85414ef2a923..3492a9e81aa3a28d73c6008ac601a32c1011aa40 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -592,8 +592,15 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
                return NULL;
        }
 
-       return __vmalloc_node(size, 1, flags, node,
-                       __builtin_return_address(0));
+       /*
+        * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
+        * since the callers already cannot assume anything
+        * about the resulting pointer, and cannot play
+        * protection games.
+        */
+       return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+                       flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
+                       node, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(kvmalloc_node);
 
index 07da85ae825b6455bc128d948092733d5043618c..cadfbb5155ea574ed3c139d4c9f8c37049da04b1 100644 (file)
@@ -2653,15 +2653,18 @@ static void __vunmap(const void *addr, int deallocate_pages)
        vm_remove_mappings(area, deallocate_pages);
 
        if (deallocate_pages) {
-               unsigned int page_order = vm_area_page_order(area);
-               int i, step = 1U << page_order;
+               int i;
 
-               for (i = 0; i < area->nr_pages; i += step) {
+               for (i = 0; i < area->nr_pages; i++) {
                        struct page *page = area->pages[i];
 
                        BUG_ON(!page);
-                       mod_memcg_page_state(page, MEMCG_VMALLOC, -step);
-                       __free_pages(page, page_order);
+                       mod_memcg_page_state(page, MEMCG_VMALLOC, -1);
+                       /*
+                        * High-order allocs for huge vmallocs are split, so
+                        * can be freed as an array of order-0 allocations
+                        */
+                       __free_pages(page, 0);
                        cond_resched();
                }
                atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
@@ -2914,12 +2917,7 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                        if (nr != nr_pages_request)
                                break;
                }
-       } else
-               /*
-                * Compound pages required for remap_vmalloc_page if
-                * high-order pages.
-                */
-               gfp |= __GFP_COMP;
+       }
 
        /* High-order pages or fallback path if "bulk" fails. */
 
@@ -2933,6 +2931,15 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
                        page = alloc_pages_node(nid, gfp, order);
                if (unlikely(!page))
                        break;
+               /*
+                * Higher order allocations must be able to be treated as
+                * indepdenent small pages by callers (as they can with
+                * small-page vmallocs). Some drivers do their own refcounting
+                * on vmalloc_to_page() pages, some use page->mapping,
+                * page->lru, etc.
+                */
+               if (order)
+                       split_page(page, order);
 
                /*
                 * Careful, we allocate and map page-order pages, but
@@ -2992,11 +2999,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 
        atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
        if (gfp_mask & __GFP_ACCOUNT) {
-               int i, step = 1U << page_order;
+               int i;
 
-               for (i = 0; i < area->nr_pages; i += step)
-                       mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC,
-                                            step);
+               for (i = 0; i < area->nr_pages; i++)
+                       mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1);
        }
 
        /*
index 8a3828acc0bfd9256bc10d3c8b5b939786fdac0b..592569a8974c4d41bda81095b1e51c32f6bc7444 100644 (file)
@@ -355,7 +355,7 @@ void workingset_refault(struct folio *folio, void *shadow)
 
        mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr);
 
-       mem_cgroup_flush_stats();
+       mem_cgroup_flush_stats_delayed();
        /*
         * Compare the distance to the existing workingset size. We
         * don't activate pages that couldn't stay resident even if
index 48b8ed752b69c594f91c82be86c714a5deb5da46..3f35972e1cf750fe8226e6093f5ec91303759b97 100644 (file)
@@ -127,11 +127,10 @@ static int i915_gfx_present(struct pci_dev *hdac_pci)
                display_dev = pci_get_class(class, display_dev);
 
                if (display_dev && display_dev->vendor == PCI_VENDOR_ID_INTEL &&
-                   connectivity_check(display_dev, hdac_pci))
+                   connectivity_check(display_dev, hdac_pci)) {
+                       pci_dev_put(display_dev);
                        match = true;
-
-               pci_dev_put(display_dev);
-
+               }
        } while (!match && display_dev);
 
        return match;
index 8b0a16ba27d39dd641616b4ac60f5e3ba2cb221d..a8fe01764b254bd0ff15390ce2cc1b3fa21c064b 100644 (file)
@@ -424,6 +424,15 @@ static const struct config_entry config_table[] = {
                .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
                .device = 0x54c8,
        },
+       /* RaptorLake-P */
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51ca,
+       },
+       {
+               .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE,
+               .device = 0x51cb,
+       },
 #endif
 
 };
index 9c48f3a9e3d1ae88460455ae7405e6f9f6f3ba0d..164335d3c200928042f46c5d20c04614b6c61c5b 100644 (file)
@@ -1428,7 +1428,7 @@ void dmasound_deinit(void)
                unregister_sound_dsp(sq_unit);
 }
 
-static int dmasound_setup(char *str)
+static int __maybe_unused dmasound_setup(char *str)
 {
        int ints[6], size;
 
index 3e086eebf88d007b508e1aee2746ceebaf2bfd18..31fe41795571280ef6d6c29c637157edb0f321d5 100644 (file)
@@ -1395,7 +1395,7 @@ static int hdmi_find_pcm_slot(struct hdmi_spec *spec,
 
  last_try:
        /* the last try; check the empty slots in pins */
-       for (i = 0; i < spec->num_nids; i++) {
+       for (i = 0; i < spec->pcm_used; i++) {
                if (!test_bit(i, &spec->pcm_bitmap))
                        return i;
        }
@@ -2325,7 +2325,9 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
         * dev_num is the device entry number in a pin
         */
 
-       if (codec->mst_no_extra_pcms)
+       if (spec->dyn_pcm_no_legacy && codec->mst_no_extra_pcms)
+               pcm_num = spec->num_cvts;
+       else if (codec->mst_no_extra_pcms)
                pcm_num = spec->num_nids;
        else
                pcm_num = spec->num_nids + spec->dev_num - 1;
@@ -4551,6 +4553,7 @@ HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI",   patch_i915_adlp_hdmi),
 HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI",        patch_i915_icl_hdmi),
 HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_adlp_hdmi),
+HDA_CODEC_ENTRY(0x8086281f, "Raptorlake-P HDMI",       patch_i915_adlp_hdmi),
 HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi),
 HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI",        patch_i915_byt_hdmi),
 HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI",   patch_i915_byt_hdmi),
index 62fbf3772b4123db475cea1a53797eb05bd87ff5..4c0c593f3c0a9cf51e633cff630fa5fea1e55d66 100644 (file)
@@ -7006,6 +7006,7 @@ enum {
        ALC285_FIXUP_LEGION_Y9000X_AUTOMUTE,
        ALC287_FIXUP_LEGION_16ACHG6,
        ALC287_FIXUP_CS35L41_I2C_2,
+       ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED,
        ALC245_FIXUP_CS35L41_SPI_2,
        ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED,
        ALC245_FIXUP_CS35L41_SPI_4,
@@ -8769,6 +8770,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs35l41_fixup_i2c_two,
        },
+       [ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cs35l41_fixup_i2c_two,
+               .chained = true,
+               .chain_id = ALC285_FIXUP_HP_MUTE_LED,
+       },
        [ALC245_FIXUP_CS35L41_SPI_2] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs35l41_fixup_spi_two,
@@ -9025,9 +9032,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8981, "HP Elite Dragonfly G3", ALC245_FIXUP_CS35L41_SPI_4),
        SND_PCI_QUIRK(0x103c, 0x898e, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x103c, 0x898f, "HP EliteBook 835 G9", ALC287_FIXUP_CS35L41_I2C_2),
-       SND_PCI_QUIRK(0x103c, 0x8991, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x103c, 0x8991, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8992, "HP EliteBook 845 G9", ALC287_FIXUP_CS35L41_I2C_2),
-       SND_PCI_QUIRK(0x103c, 0x8994, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x103c, 0x8994, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8995, "HP EliteBook 855 G9", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x103c, 0x89a4, "HP ProBook 440 G9", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89a6, "HP ProBook 450 G9", ALC236_FIXUP_HP_GPIO_LED),
@@ -9163,6 +9170,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x8562, "Clevo NH[57][0-9]RZ[Q]", ALC269_FIXUP_DMIC),
        SND_PCI_QUIRK(0x1558, 0x8668, "Clevo NP50B[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x866d, "Clevo NP5[05]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x867c, "Clevo NP7[01]PNP", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x867d, "Clevo NP7[01]PN[HJK]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8680, "Clevo NJ50LU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8686, "Clevo NH50[CZ]U", ALC256_FIXUP_MIC_NO_PRESENCE_AND_RESUME),
index 33e43013ff770c7b782f23e00cd2b9c215e263bf..0d639a33ad969a6b920a47b8523e0a6c084a5e47 100644 (file)
  */
 #undef ENABLE_MIC_INPUT
 
-static struct clk *mclk;
-
-static int at91sam9g20ek_set_bias_level(struct snd_soc_card *card,
-                                       struct snd_soc_dapm_context *dapm,
-                                       enum snd_soc_bias_level level)
-{
-       static int mclk_on;
-       int ret = 0;
-
-       switch (level) {
-       case SND_SOC_BIAS_ON:
-       case SND_SOC_BIAS_PREPARE:
-               if (!mclk_on)
-                       ret = clk_enable(mclk);
-               if (ret == 0)
-                       mclk_on = 1;
-               break;
-
-       case SND_SOC_BIAS_OFF:
-       case SND_SOC_BIAS_STANDBY:
-               if (mclk_on)
-                       clk_disable(mclk);
-               mclk_on = 0;
-               break;
-       }
-
-       return ret;
-}
-
 static const struct snd_soc_dapm_widget at91sam9g20ek_dapm_widgets[] = {
        SND_SOC_DAPM_MIC("Int Mic", NULL),
        SND_SOC_DAPM_SPK("Ext Spk", NULL),
@@ -135,7 +106,6 @@ static struct snd_soc_card snd_soc_at91sam9g20ek = {
        .owner = THIS_MODULE,
        .dai_link = &at91sam9g20ek_dai,
        .num_links = 1,
-       .set_bias_level = at91sam9g20ek_set_bias_level,
 
        .dapm_widgets = at91sam9g20ek_dapm_widgets,
        .num_dapm_widgets = ARRAY_SIZE(at91sam9g20ek_dapm_widgets),
@@ -148,7 +118,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct device_node *codec_np, *cpu_np;
-       struct clk *pllb;
        struct snd_soc_card *card = &snd_soc_at91sam9g20ek;
        int ret;
 
@@ -162,31 +131,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       /*
-        * Codec MCLK is supplied by PCK0 - set it up.
-        */
-       mclk = clk_get(NULL, "pck0");
-       if (IS_ERR(mclk)) {
-               dev_err(&pdev->dev, "Failed to get MCLK\n");
-               ret = PTR_ERR(mclk);
-               goto err;
-       }
-
-       pllb = clk_get(NULL, "pllb");
-       if (IS_ERR(pllb)) {
-               dev_err(&pdev->dev, "Failed to get PLLB\n");
-               ret = PTR_ERR(pllb);
-               goto err_mclk;
-       }
-       ret = clk_set_parent(mclk, pllb);
-       clk_put(pllb);
-       if (ret != 0) {
-               dev_err(&pdev->dev, "Failed to set MCLK parent\n");
-               goto err_mclk;
-       }
-
-       clk_set_rate(mclk, MCLK_RATE);
-
        card->dev = &pdev->dev;
 
        /* Parse device node info */
@@ -230,9 +174,6 @@ static int at91sam9g20ek_audio_probe(struct platform_device *pdev)
 
        return ret;
 
-err_mclk:
-       clk_put(mclk);
-       mclk = NULL;
 err:
        atmel_ssc_put_audio(0);
        return ret;
@@ -242,8 +183,6 @@ static int at91sam9g20ek_audio_remove(struct platform_device *pdev)
 {
        struct snd_soc_card *card = platform_get_drvdata(pdev);
 
-       clk_disable(mclk);
-       mclk = NULL;
        snd_soc_unregister_card(card);
        atmel_ssc_put_audio(0);
 
index e5a56bcbb223db92b5be11d95d3286723f8b3004..aa6823fbd1a4df01b9cc739e40eb8b0d82f9c5e2 100644 (file)
@@ -822,8 +822,8 @@ int cs35l41_otp_unpack(struct device *dev, struct regmap *regmap)
        word_offset = otp_map_match->word_offset;
 
        for (i = 0; i < otp_map_match->num_elements; i++) {
-               dev_dbg(dev, "bitoffset= %d, word_offset=%d, bit_sum mod 32=%d\n",
-                       bit_offset, word_offset, bit_sum % 32);
+               dev_dbg(dev, "bitoffset= %d, word_offset=%d, bit_sum mod 32=%d otp_map[i].size = %d\n",
+                       bit_offset, word_offset, bit_sum % 32, otp_map[i].size);
                if (bit_offset + otp_map[i].size - 1 >= 32) {
                        otp_val = (otp_mem[word_offset] &
                                        GENMASK(31, bit_offset)) >> bit_offset;
@@ -831,12 +831,14 @@ int cs35l41_otp_unpack(struct device *dev, struct regmap *regmap)
                                        GENMASK(bit_offset + otp_map[i].size - 33, 0)) <<
                                        (32 - bit_offset);
                        bit_offset += otp_map[i].size - 32;
-               } else {
+               } else if (bit_offset + otp_map[i].size - 1 >= 0) {
                        otp_val = (otp_mem[word_offset] &
                                   GENMASK(bit_offset + otp_map[i].size - 1, bit_offset)
                                  ) >> bit_offset;
                        bit_offset += otp_map[i].size;
-               }
+               } else /* both bit_offset and otp_map[i].size are 0 */
+                       otp_val = 0;
+
                bit_sum += otp_map[i].size;
 
                if (bit_offset == 32) {
index 6884ae505e33c150d5b1d8f161f98d4f6cb4c409..3143f9cd7277e4c81294ffe3014c95c866e1d32f 100644 (file)
@@ -3566,12 +3566,16 @@ static int rx_macro_probe(struct platform_device *pdev)
                return PTR_ERR(rx->pds);
 
        base = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(base))
-               return PTR_ERR(base);
+       if (IS_ERR(base)) {
+               ret = PTR_ERR(base);
+               goto err;
+       }
 
        rx->regmap = devm_regmap_init_mmio(dev, base, &rx_regmap_config);
-       if (IS_ERR(rx->regmap))
-               return PTR_ERR(rx->regmap);
+       if (IS_ERR(rx->regmap)) {
+               ret = PTR_ERR(rx->regmap);
+               goto err;
+       }
 
        dev_set_drvdata(dev, rx);
 
@@ -3632,6 +3636,8 @@ err_mclk:
 err_dcodec:
        clk_disable_unprepare(rx->macro);
 err:
+       lpass_macro_pds_exit(rx->pds);
+
        return ret;
 }
 
index 714a411d53379efff549329b3c5ca7a7666dd971..55503ba480bb6350fb482f12e24c77581d0bb16a 100644 (file)
@@ -1828,8 +1828,10 @@ static int tx_macro_probe(struct platform_device *pdev)
                return PTR_ERR(tx->pds);
 
        base = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(base))
-               return PTR_ERR(base);
+       if (IS_ERR(base)) {
+               ret = PTR_ERR(base);
+               goto err;
+       }
 
        /* Update defaults for lpass sc7280 */
        if (of_device_is_compatible(np, "qcom,sc7280-lpass-tx-macro")) {
@@ -1846,8 +1848,10 @@ static int tx_macro_probe(struct platform_device *pdev)
        }
 
        tx->regmap = devm_regmap_init_mmio(dev, base, &tx_regmap_config);
-       if (IS_ERR(tx->regmap))
-               return PTR_ERR(tx->regmap);
+       if (IS_ERR(tx->regmap)) {
+               ret = PTR_ERR(tx->regmap);
+               goto err;
+       }
 
        dev_set_drvdata(dev, tx);
 
@@ -1907,6 +1911,8 @@ err_mclk:
 err_dcodec:
        clk_disable_unprepare(tx->macro);
 err:
+       lpass_macro_pds_exit(tx->pds);
+
        return ret;
 }
 
index f3cb596058e02ab680e5aafcc23710b71b0ea894..d18b56e6043305a3ae0e7670286727f6a97eb761 100644 (file)
@@ -1434,8 +1434,10 @@ static int va_macro_probe(struct platform_device *pdev)
                va->dmic_clk_div = VA_MACRO_CLK_DIV_2;
        } else {
                ret = va_macro_validate_dmic_sample_rate(sample_rate, va);
-               if (!ret)
-                       return -EINVAL;
+               if (!ret) {
+                       ret = -EINVAL;
+                       goto err;
+               }
        }
 
        base = devm_platform_ioremap_resource(pdev, 0);
@@ -1492,6 +1494,8 @@ err_mclk:
 err_dcodec:
        clk_disable_unprepare(va->macro);
 err:
+       lpass_macro_pds_exit(va->pds);
+
        return ret;
 }
 
index 9ad7fc0baf072678b40063b96fa8450738b06d70..20a07c92b2fc29d5749c21453f04d0020bc823ff 100644 (file)
@@ -1206,9 +1206,16 @@ static int msm8916_wcd_digital_probe(struct platform_device *pdev)
 
        dev_set_drvdata(dev, priv);
 
-       return devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
+       ret = devm_snd_soc_register_component(dev, &msm8916_wcd_digital,
                                      msm8916_wcd_digital_dai,
                                      ARRAY_SIZE(msm8916_wcd_digital_dai));
+       if (ret)
+               goto err_mclk;
+
+       return 0;
+
+err_mclk:
+       clk_disable_unprepare(priv->mclk);
 err_clk:
        clk_disable_unprepare(priv->ahbclk);
        return ret;
index 8fffe378618d0ec3f946e3d3985c3d2887638aae..cce6f4e7992f552f31c1bec749596047c7b79258 100644 (file)
@@ -489,7 +489,7 @@ static int rk817_platform_probe(struct platform_device *pdev)
 
        rk817_codec_parse_dt_property(&pdev->dev, rk817_codec_data);
 
-       rk817_codec_data->mclk = clk_get(pdev->dev.parent, "mclk");
+       rk817_codec_data->mclk = devm_clk_get(pdev->dev.parent, "mclk");
        if (IS_ERR(rk817_codec_data->mclk)) {
                dev_dbg(&pdev->dev, "Unable to get mclk\n");
                ret = -ENXIO;
index be68d573a4906406ac3ebebef450ede8a21eef63..2b6c6d6b9771e09b8e776f0c54db33890b57e543 100644 (file)
@@ -1100,6 +1100,15 @@ void rt5682_jack_detect_handler(struct work_struct *work)
                return;
        }
 
+       if (rt5682->is_sdw) {
+               if (pm_runtime_status_suspended(rt5682->slave->dev.parent)) {
+                       dev_dbg(&rt5682->slave->dev,
+                               "%s: parent device is pm_runtime_status_suspended, skipping jack detection\n",
+                               __func__);
+                       return;
+               }
+       }
+
        dapm = snd_soc_component_get_dapm(rt5682->component);
 
        snd_soc_dapm_mutex_lock(dapm);
@@ -2822,14 +2831,11 @@ static int rt5682_bclk_set_rate(struct clk_hw *hw, unsigned long rate,
 
        for_each_component_dais(component, dai)
                if (dai->id == RT5682_AIF1)
-                       break;
-       if (!dai) {
-               dev_err(rt5682->i2c_dev, "dai %d not found in component\n",
-                       RT5682_AIF1);
-               return -ENODEV;
-       }
+                       return rt5682_set_bclk1_ratio(dai, factor);
 
-       return rt5682_set_bclk1_ratio(dai, factor);
+       dev_err(rt5682->i2c_dev, "dai %d not found in component\n",
+               RT5682_AIF1);
+       return -ENODEV;
 }
 
 static const struct clk_ops rt5682_dai_clk_ops[RT5682_DAI_NUM_CLKS] = {
index 1cba8ec7cedb915e47ea8ec2d0acc7085fa0dd99..b55f3ac3a2673e9467cf83cbaf3651780757df6d 100644 (file)
@@ -2687,14 +2687,11 @@ static int rt5682s_bclk_set_rate(struct clk_hw *hw, unsigned long rate,
 
        for_each_component_dais(component, dai)
                if (dai->id == RT5682S_AIF1)
-                       break;
-       if (!dai) {
-               dev_err(component->dev, "dai %d not found in component\n",
-                       RT5682S_AIF1);
-               return -ENODEV;
-       }
+                       return rt5682s_set_bclk1_ratio(dai, factor);
 
-       return rt5682s_set_bclk1_ratio(dai, factor);
+       dev_err(component->dev, "dai %d not found in component\n",
+               RT5682S_AIF1);
+       return -ENODEV;
 }
 
 static const struct clk_ops rt5682s_dai_clk_ops[RT5682S_DAI_NUM_CLKS] = {
index 6770825d037a8a73a7cd43e80805acb834f6c8a4..ea25fd58d43a987cc9fa264497c7fd76bb6005d6 100644 (file)
@@ -245,6 +245,13 @@ static void rt711_jack_detect_handler(struct work_struct *work)
        if (!rt711->component->card->instantiated)
                return;
 
+       if (pm_runtime_status_suspended(rt711->slave->dev.parent)) {
+               dev_dbg(&rt711->slave->dev,
+                       "%s: parent device is pm_runtime_status_suspended, skipping jack detection\n",
+                       __func__);
+               return;
+       }
+
        reg = RT711_VERB_GET_PIN_SENSE | RT711_HP_OUT;
        ret = regmap_read(rt711->regmap, reg, &jack_status);
        if (ret < 0)
index 1e75e93cf28f285657a84f55bb5d9104ceba512a..6298ebe96e941a2bf31d7e0cbb1e861a0c6d3293 100644 (file)
@@ -1274,29 +1274,7 @@ static int wcd934x_set_sido_input_src(struct wcd934x_codec *wcd, int sido_src)
        if (sido_src == wcd->sido_input_src)
                return 0;
 
-       if (sido_src == SIDO_SOURCE_INTERNAL) {
-               regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
-                                  WCD934X_ANA_BUCK_HI_ACCU_EN_MASK, 0);
-               usleep_range(100, 110);
-               regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
-                                  WCD934X_ANA_BUCK_HI_ACCU_PRE_ENX_MASK, 0x0);
-               usleep_range(100, 110);
-               regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
-                                  WCD934X_ANA_RCO_BG_EN_MASK, 0);
-               usleep_range(100, 110);
-               regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
-                                  WCD934X_ANA_BUCK_PRE_EN1_MASK,
-                                  WCD934X_ANA_BUCK_PRE_EN1_ENABLE);
-               usleep_range(100, 110);
-               regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
-                                  WCD934X_ANA_BUCK_PRE_EN2_MASK,
-                                  WCD934X_ANA_BUCK_PRE_EN2_ENABLE);
-               usleep_range(100, 110);
-               regmap_update_bits(wcd->regmap, WCD934X_ANA_BUCK_CTL,
-                                  WCD934X_ANA_BUCK_HI_ACCU_EN_MASK,
-                                  WCD934X_ANA_BUCK_HI_ACCU_ENABLE);
-               usleep_range(100, 110);
-       } else if (sido_src == SIDO_SOURCE_RCO_BG) {
+       if (sido_src == SIDO_SOURCE_RCO_BG) {
                regmap_update_bits(wcd->regmap, WCD934X_ANA_RCO,
                                   WCD934X_ANA_RCO_BG_EN_MASK,
                                   WCD934X_ANA_RCO_BG_ENABLE);
@@ -1382,8 +1360,6 @@ static int wcd934x_disable_ana_bias_and_syclk(struct wcd934x_codec *wcd)
        regmap_update_bits(wcd->regmap, WCD934X_CLK_SYS_MCLK_PRG,
                           WCD934X_EXT_CLK_BUF_EN_MASK |
                           WCD934X_MCLK_EN_MASK, 0x0);
-       wcd934x_set_sido_input_src(wcd, SIDO_SOURCE_INTERNAL);
-
        regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS,
                           WCD934X_ANA_BIAS_EN_MASK, 0);
        regmap_update_bits(wcd->regmap, WCD934X_ANA_BIAS,
index 5d4949c2ec9baba2fe218138a7a2bc921d5cf8a5..b14c6d104e6d99ca73a84779aabdd6d260c01999 100644 (file)
@@ -602,7 +602,7 @@ static int wm8731_hw_init(struct device *dev, struct wm8731_priv *wm8731)
        ret = wm8731_reset(wm8731->regmap);
        if (ret < 0) {
                dev_err(dev, "Failed to issue reset: %d\n", ret);
-               goto err_regulator_enable;
+               goto err;
        }
 
        /* Clear POWEROFF, keep everything else disabled */
@@ -619,10 +619,7 @@ static int wm8731_hw_init(struct device *dev, struct wm8731_priv *wm8731)
 
        regcache_mark_dirty(wm8731->regmap);
 
-err_regulator_enable:
-       /* Regulators will be enabled by bias management */
-       regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
-
+err:
        return ret;
 }
 
@@ -760,21 +757,27 @@ static int wm8731_i2c_probe(struct i2c_client *i2c,
                ret = PTR_ERR(wm8731->regmap);
                dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
                        ret);
-               return ret;
+               goto err_regulator_enable;
        }
 
        ret = wm8731_hw_init(&i2c->dev, wm8731);
        if (ret != 0)
-               return ret;
+               goto err_regulator_enable;
 
        ret = devm_snd_soc_register_component(&i2c->dev,
                        &soc_component_dev_wm8731, &wm8731_dai, 1);
        if (ret != 0) {
                dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
-               return ret;
+               goto err_regulator_enable;
        }
 
        return 0;
+
+err_regulator_enable:
+       /* Regulators will be enabled by bias management */
+       regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies), wm8731->supplies);
+
+       return ret;
 }
 
 static const struct i2c_device_id wm8731_i2c_id[] = {
index 4650a6931a946cc89576e9c7833ad52a6391d7f0..ffc24afb5a7acc2a9ded4c5730421c9044b5030d 100644 (file)
@@ -372,7 +372,7 @@ static int fsl_sai_set_bclk(struct snd_soc_dai *dai, bool tx, u32 freq)
                        continue;
                if (ratio == 1 && !support_1_1_ratio)
                        continue;
-               else if (ratio & 1)
+               if ((ratio & 1) && ratio > 1)
                        continue;
 
                diff = abs((long)clk_rate - ratio * freq);
index 8e037835bc583f4d7e0a83119ad935bf2d9d242a..f2157944247f783cf8f4809d762201213919d9fe 100644 (file)
@@ -364,13 +364,15 @@ static int asoc_simple_set_tdm(struct snd_soc_dai *dai,
                                struct snd_pcm_hw_params *params)
 {
        int sample_bits = params_width(params);
-       int slot_width = simple_dai->slot_width;
-       int slot_count = simple_dai->slots;
+       int slot_width, slot_count;
        int i, ret;
 
        if (!simple_dai || !simple_dai->tdm_width_map)
                return 0;
 
+       slot_width = simple_dai->slot_width;
+       slot_count = simple_dai->slots;
+
        if (slot_width == 0)
                slot_width = sample_bits;
 
index 5e0529aa4f1d2880a4058ddd9b267402dccbdab8..9d617831dd206e2a6c60e07c93734d03ae3821a5 100644 (file)
 #define SOF_ES8336_SSP_CODEC(quirk)            ((quirk) & GENMASK(3, 0))
 #define SOF_ES8336_SSP_CODEC_MASK              (GENMASK(3, 0))
 
-#define SOF_ES8336_TGL_GPIO_QUIRK              BIT(4)
+#define SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK     BIT(4)
 #define SOF_ES8336_ENABLE_DMIC                 BIT(5)
 #define SOF_ES8336_JD_INVERTED                 BIT(6)
+#define SOF_ES8336_HEADPHONE_GPIO              BIT(7)
+#define SOC_ES8336_HEADSET_MIC1                        BIT(8)
 
 static unsigned long quirk;
 
@@ -39,7 +41,7 @@ MODULE_PARM_DESC(quirk, "Board-specific quirk override");
 
 struct sof_es8336_private {
        struct device *codec_dev;
-       struct gpio_desc *gpio_pa;
+       struct gpio_desc *gpio_speakers, *gpio_headphone;
        struct snd_soc_jack jack;
        struct list_head hdmi_pcm_list;
        bool speaker_en;
@@ -51,19 +53,31 @@ struct sof_hdmi_pcm {
        int device;
 };
 
-static const struct acpi_gpio_params pa_enable_gpio = { 0, 0, true };
-static const struct acpi_gpio_mapping acpi_es8336_gpios[] = {
-       { "pa-enable-gpios", &pa_enable_gpio, 1 },
+static const struct acpi_gpio_params enable_gpio0 = { 0, 0, true };
+static const struct acpi_gpio_params enable_gpio1 = { 1, 0, true };
+
+static const struct acpi_gpio_mapping acpi_speakers_enable_gpio0[] = {
+       { "speakers-enable-gpios", &enable_gpio0, 1 },
        { }
 };
 
-static const struct acpi_gpio_params quirk_pa_enable_gpio = { 1, 0, true };
-static const struct acpi_gpio_mapping quirk_acpi_es8336_gpios[] = {
-       { "pa-enable-gpios", &quirk_pa_enable_gpio, 1 },
+static const struct acpi_gpio_mapping acpi_speakers_enable_gpio1[] = {
+       { "speakers-enable-gpios", &enable_gpio1, 1 },
+};
+
+static const struct acpi_gpio_mapping acpi_enable_both_gpios[] = {
+       { "speakers-enable-gpios", &enable_gpio0, 1 },
+       { "headphone-enable-gpios", &enable_gpio1, 1 },
        { }
 };
 
-static const struct acpi_gpio_mapping *gpio_mapping = acpi_es8336_gpios;
+static const struct acpi_gpio_mapping acpi_enable_both_gpios_rev_order[] = {
+       { "speakers-enable-gpios", &enable_gpio1, 1 },
+       { "headphone-enable-gpios", &enable_gpio0, 1 },
+       { }
+};
+
+static const struct acpi_gpio_mapping *gpio_mapping = acpi_speakers_enable_gpio0;
 
 static void log_quirks(struct device *dev)
 {
@@ -71,10 +85,14 @@ static void log_quirks(struct device *dev)
        dev_info(dev, "quirk SSP%ld\n",  SOF_ES8336_SSP_CODEC(quirk));
        if (quirk & SOF_ES8336_ENABLE_DMIC)
                dev_info(dev, "quirk DMIC enabled\n");
-       if (quirk & SOF_ES8336_TGL_GPIO_QUIRK)
-               dev_info(dev, "quirk TGL GPIO enabled\n");
+       if (quirk & SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK)
+               dev_info(dev, "Speakers GPIO1 quirk enabled\n");
+       if (quirk & SOF_ES8336_HEADPHONE_GPIO)
+               dev_info(dev, "quirk headphone GPIO enabled\n");
        if (quirk & SOF_ES8336_JD_INVERTED)
                dev_info(dev, "quirk JD inverted enabled\n");
+       if (quirk & SOC_ES8336_HEADSET_MIC1)
+               dev_info(dev, "quirk headset at mic1 port enabled\n");
 }
 
 static int sof_es8316_speaker_power_event(struct snd_soc_dapm_widget *w,
@@ -83,12 +101,23 @@ static int sof_es8316_speaker_power_event(struct snd_soc_dapm_widget *w,
        struct snd_soc_card *card = w->dapm->card;
        struct sof_es8336_private *priv = snd_soc_card_get_drvdata(card);
 
+       if (priv->speaker_en == !SND_SOC_DAPM_EVENT_ON(event))
+               return 0;
+
+       priv->speaker_en = !SND_SOC_DAPM_EVENT_ON(event);
+
+       if (SND_SOC_DAPM_EVENT_ON(event))
+               msleep(70);
+
+       gpiod_set_value_cansleep(priv->gpio_speakers, priv->speaker_en);
+
+       if (!(quirk & SOF_ES8336_HEADPHONE_GPIO))
+               return 0;
+
        if (SND_SOC_DAPM_EVENT_ON(event))
-               priv->speaker_en = false;
-       else
-               priv->speaker_en = true;
+               msleep(70);
 
-       gpiod_set_value_cansleep(priv->gpio_pa, priv->speaker_en);
+       gpiod_set_value_cansleep(priv->gpio_headphone, priv->speaker_en);
 
        return 0;
 }
@@ -114,18 +143,23 @@ static const struct snd_soc_dapm_route sof_es8316_audio_map[] = {
 
        /*
         * There is no separate speaker output instead the speakers are muxed to
-        * the HP outputs. The mux is controlled by the "Speaker Power" supply.
+        * the HP outputs. The mux is controlled Speaker and/or headphone switch.
         */
        {"Speaker", NULL, "HPOL"},
        {"Speaker", NULL, "HPOR"},
        {"Speaker", NULL, "Speaker Power"},
 };
 
-static const struct snd_soc_dapm_route sof_es8316_intmic_in1_map[] = {
+static const struct snd_soc_dapm_route sof_es8316_headset_mic2_map[] = {
        {"MIC1", NULL, "Internal Mic"},
        {"MIC2", NULL, "Headset Mic"},
 };
 
+static const struct snd_soc_dapm_route sof_es8316_headset_mic1_map[] = {
+       {"MIC2", NULL, "Internal Mic"},
+       {"MIC1", NULL, "Headset Mic"},
+};
+
 static const struct snd_soc_dapm_route dmic_map[] = {
        /* digital mics */
        {"DMic", NULL, "SoC DMIC"},
@@ -199,8 +233,13 @@ static int sof_es8316_init(struct snd_soc_pcm_runtime *runtime)
 
        card->dapm.idle_bias_off = true;
 
-       custom_map = sof_es8316_intmic_in1_map;
-       num_routes = ARRAY_SIZE(sof_es8316_intmic_in1_map);
+       if (quirk & SOC_ES8336_HEADSET_MIC1) {
+               custom_map = sof_es8316_headset_mic1_map;
+               num_routes = ARRAY_SIZE(sof_es8316_headset_mic1_map);
+       } else {
+               custom_map = sof_es8316_headset_mic2_map;
+               num_routes = ARRAY_SIZE(sof_es8316_headset_mic2_map);
+       }
 
        ret = snd_soc_dapm_add_routes(&card->dapm, custom_map, num_routes);
        if (ret)
@@ -233,8 +272,14 @@ static int sof_es8336_quirk_cb(const struct dmi_system_id *id)
 {
        quirk = (unsigned long)id->driver_data;
 
-       if (quirk & SOF_ES8336_TGL_GPIO_QUIRK)
-               gpio_mapping = quirk_acpi_es8336_gpios;
+       if (quirk & SOF_ES8336_HEADPHONE_GPIO) {
+               if (quirk & SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK)
+                       gpio_mapping = acpi_enable_both_gpios;
+               else
+                       gpio_mapping = acpi_enable_both_gpios_rev_order;
+       } else if (quirk & SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK) {
+               gpio_mapping = acpi_speakers_enable_gpio1;
+       }
 
        return 1;
 }
@@ -257,7 +302,16 @@ static const struct dmi_system_id sof_es8336_quirk_table[] = {
                        DMI_MATCH(DMI_SYS_VENDOR, "IP3 tech"),
                        DMI_MATCH(DMI_BOARD_NAME, "WN1"),
                },
-               .driver_data = (void *)(SOF_ES8336_TGL_GPIO_QUIRK)
+               .driver_data = (void *)(SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK)
+       },
+       {
+               .callback = sof_es8336_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HUAWEI"),
+                       DMI_MATCH(DMI_BOARD_NAME, "BOHB-WAX9-PCB-B2"),
+               },
+               .driver_data = (void *)(SOF_ES8336_HEADPHONE_GPIO |
+                                       SOC_ES8336_HEADSET_MIC1)
        },
        {}
 };
@@ -585,10 +639,17 @@ static int sof_es8336_probe(struct platform_device *pdev)
        if (ret)
                dev_warn(codec_dev, "unable to add GPIO mapping table\n");
 
-       priv->gpio_pa = gpiod_get_optional(codec_dev, "pa-enable", GPIOD_OUT_LOW);
-       if (IS_ERR(priv->gpio_pa)) {
-               ret = dev_err_probe(dev, PTR_ERR(priv->gpio_pa),
-                                   "could not get pa-enable GPIO\n");
+       priv->gpio_speakers = gpiod_get_optional(codec_dev, "speakers-enable", GPIOD_OUT_LOW);
+       if (IS_ERR(priv->gpio_speakers)) {
+               ret = dev_err_probe(dev, PTR_ERR(priv->gpio_speakers),
+                                   "could not get speakers-enable GPIO\n");
+               goto err_put_codec;
+       }
+
+       priv->gpio_headphone = gpiod_get_optional(codec_dev, "headphone-enable", GPIOD_OUT_LOW);
+       if (IS_ERR(priv->gpio_headphone)) {
+               ret = dev_err_probe(dev, PTR_ERR(priv->gpio_headphone),
+                                   "could not get headphone-enable GPIO\n");
                goto err_put_codec;
        }
 
@@ -604,7 +665,7 @@ static int sof_es8336_probe(struct platform_device *pdev)
 
        ret = devm_snd_soc_register_card(dev, card);
        if (ret) {
-               gpiod_put(priv->gpio_pa);
+               gpiod_put(priv->gpio_speakers);
                dev_err(dev, "snd_soc_register_card failed: %d\n", ret);
                goto err_put_codec;
        }
@@ -622,7 +683,7 @@ static int sof_es8336_remove(struct platform_device *pdev)
        struct snd_soc_card *card = platform_get_drvdata(pdev);
        struct sof_es8336_private *priv = snd_soc_card_get_drvdata(card);
 
-       gpiod_put(priv->gpio_pa);
+       gpiod_put(priv->gpio_speakers);
        device_remove_software_node(priv->codec_dev);
        put_device(priv->codec_dev);
 
index ebec4d15edaac945e5197512719f33a3aad88961..7126fcb63d9040f121800f00a84ae9f15d3e3664 100644 (file)
@@ -212,6 +212,19 @@ static const struct dmi_system_id sof_rt5682_quirk_table[] = {
                                        SOF_SSP_BT_OFFLOAD_PRESENT),
 
        },
+       {
+               .callback = sof_rt5682_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Brya"),
+                       DMI_MATCH(DMI_OEM_STRING, "AUDIO-MAX98360_ALC5682I_I2S_AMP_SSP2"),
+               },
+               .driver_data = (void *)(SOF_RT5682_MCLK_EN |
+                                       SOF_RT5682_SSP_CODEC(0) |
+                                       SOF_SPEAKER_AMP_PRESENT |
+                                       SOF_MAX98360A_SPEAKER_AMP_PRESENT |
+                                       SOF_RT5682_SSP_AMP(2) |
+                                       SOF_RT5682_NUM_HDMIDEV(4)),
+       },
        {}
 };
 
index 6edc9b7108cd58af79091eb5a610accdfb7ac64f..ef19150e7b2e9a72b10b568aa7a62d3fdb8ecdc4 100644 (file)
@@ -132,13 +132,13 @@ static const struct snd_soc_acpi_adr_device mx8373_1_adr[] = {
        {
                .adr = 0x000123019F837300ull,
                .num_endpoints = 1,
-               .endpoints = &spk_l_endpoint,
+               .endpoints = &spk_r_endpoint,
                .name_prefix = "Right"
        },
        {
                .adr = 0x000127019F837300ull,
                .num_endpoints = 1,
-               .endpoints = &spk_r_endpoint,
+               .endpoints = &spk_l_endpoint,
                .name_prefix = "Left"
        }
 };
index 27a6d3259c50ad6fa26a913cf7b5b45b8c186d97..22e181646bc39e2cf0c2c51a17e4e7f0fa827d68 100644 (file)
@@ -193,6 +193,9 @@ static const struct snd_soc_component_driver aiu_acodec_ctrl_component = {
        .of_xlate_dai_name      = aiu_acodec_of_xlate_dai_name,
        .endianness             = 1,
        .non_legacy_dai_naming  = 1,
+#ifdef CONFIG_DEBUG_FS
+       .debugfs_prefix         = "acodec",
+#endif
 };
 
 int aiu_acodec_ctrl_register_component(struct device *dev)
index c3ea733fce91fdbe79c05bc4ce53a1aed69a95ea..59ee66fc2bcd7e32a456a5fbb72b0a882397dc3a 100644 (file)
@@ -140,6 +140,9 @@ static const struct snd_soc_component_driver aiu_hdmi_ctrl_component = {
        .of_xlate_dai_name      = aiu_hdmi_of_xlate_dai_name,
        .endianness             = 1,
        .non_legacy_dai_naming  = 1,
+#ifdef CONFIG_DEBUG_FS
+       .debugfs_prefix         = "hdmi",
+#endif
 };
 
 int aiu_hdmi_ctrl_register_component(struct device *dev)
index d299a70db7e598300ec2aaac09101806ea0b305e..88e611e64d14f54fcd10caa5c5c0e49f43dec680 100644 (file)
@@ -103,6 +103,9 @@ static const struct snd_soc_component_driver aiu_cpu_component = {
        .pointer                = aiu_fifo_pointer,
        .probe                  = aiu_cpu_component_probe,
        .remove                 = aiu_cpu_component_remove,
+#ifdef CONFIG_DEBUG_FS
+       .debugfs_prefix         = "cpu",
+#endif
 };
 
 static struct snd_soc_dai_driver aiu_cpu_dai_drv[] = {
index ce153ac2c3ab6b95a307546596f14915f948391c..8c7da82a62cab057e5f532e3e3e79059480d3a9f 100644 (file)
@@ -2587,6 +2587,11 @@ int snd_soc_component_initialize(struct snd_soc_component *component,
        component->dev          = dev;
        component->driver       = driver;
 
+#ifdef CONFIG_DEBUG_FS
+       if (!component->debugfs_prefix)
+               component->debugfs_prefix = driver->debugfs_prefix;
+#endif
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(snd_soc_component_initialize);
index b435b5c4cfb7d43af4835b9694eece04f0fe31fa..ca917a849c423de2348542547868f0eccac1964d 100644 (file)
@@ -1687,8 +1687,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
                switch (w->id) {
                case snd_soc_dapm_pre:
                        if (!w->event)
-                               list_for_each_entry_safe_continue(w, n, list,
-                                                                 power_list);
+                               continue;
 
                        if (event == SND_SOC_DAPM_STREAM_START)
                                ret = w->event(w,
@@ -1700,8 +1699,7 @@ static void dapm_seq_run(struct snd_soc_card *card,
 
                case snd_soc_dapm_post:
                        if (!w->event)
-                               list_for_each_entry_safe_continue(w, n, list,
-                                                                 power_list);
+                               continue;
 
                        if (event == SND_SOC_DAPM_STREAM_START)
                                ret = w->event(w,
index 9a954680d492890133dfcb3ecb3e4fe4433262a3..11c9853e9e80775dc9801290e4f0ef2f9d7b7e94 100644 (file)
@@ -1214,7 +1214,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
                be_substream->pcm->nonatomic = 1;
        }
 
-       dpcm = kzalloc(sizeof(struct snd_soc_dpcm), GFP_ATOMIC);
+       dpcm = kzalloc(sizeof(struct snd_soc_dpcm), GFP_KERNEL);
        if (!dpcm)
                return -ENOMEM;
 
index 72e50df7052c3a835c8a0e90f7892e7cd9d1b071..3bb90a8196504b67ddd87d9a8a3df4a01fff3fbb 100644 (file)
@@ -1436,12 +1436,12 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
        template.num_kcontrols = le32_to_cpu(w->num_kcontrols);
        kc = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(*kc), GFP_KERNEL);
        if (!kc)
-               goto err;
+               goto hdr_err;
 
        kcontrol_type = devm_kcalloc(tplg->dev, le32_to_cpu(w->num_kcontrols), sizeof(unsigned int),
                                     GFP_KERNEL);
        if (!kcontrol_type)
-               goto err;
+               goto hdr_err;
 
        for (i = 0; i < le32_to_cpu(w->num_kcontrols); i++) {
                control_hdr = (struct snd_soc_tplg_ctl_hdr *)tplg->pos;
index 4c95967428444e5ac3a5a3c97e6817d27dd11ec9..12f5cff224486dfdb00ebff18ee0eb5956c90a71 100644 (file)
@@ -83,7 +83,14 @@ static const struct dmi_system_id sof_tplg_table[] = {
                },
                .driver_data = "sof-adl-max98357a-rt5682-2way.tplg",
        },
-
+       {
+               .callback = sof_tplg_cb,
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_FAMILY, "Google_Brya"),
+                       DMI_MATCH(DMI_OEM_STRING, "AUDIO-MAX98360_ALC5682I_I2S_AMP_SSP2"),
+               },
+               .driver_data = "sof-adl-max98357a-rt5682.tplg",
+       },
        {}
 };
 
index 9b11e9795a7a04378cfba08323991bad053ac166..3e5b319b44c754bfd567d8c4ce159db6b6335b1c 100644 (file)
@@ -904,8 +904,10 @@ static int sof_control_load(struct snd_soc_component *scomp, int index,
                return -ENOMEM;
 
        scontrol->name = kstrdup(hdr->name, GFP_KERNEL);
-       if (!scontrol->name)
+       if (!scontrol->name) {
+               kfree(scontrol);
                return -ENOMEM;
+       }
 
        scontrol->scomp = scomp;
        scontrol->access = kc->access;
@@ -941,11 +943,13 @@ static int sof_control_load(struct snd_soc_component *scomp, int index,
        default:
                dev_warn(scomp->dev, "control type not supported %d:%d:%d\n",
                         hdr->ops.get, hdr->ops.put, hdr->ops.info);
+               kfree(scontrol->name);
                kfree(scontrol);
                return 0;
        }
 
        if (ret < 0) {
+               kfree(scontrol->name);
                kfree(scontrol);
                return ret;
        }
@@ -1068,6 +1072,46 @@ static int sof_connect_dai_widget(struct snd_soc_component *scomp,
        return 0;
 }
 
+static void sof_disconnect_dai_widget(struct snd_soc_component *scomp,
+                                     struct snd_soc_dapm_widget *w)
+{
+       struct snd_soc_card *card = scomp->card;
+       struct snd_soc_pcm_runtime *rtd;
+       struct snd_soc_dai *cpu_dai;
+       int i;
+
+       if (!w->sname)
+               return;
+
+       list_for_each_entry(rtd, &card->rtd_list, list) {
+               /* does stream match DAI link ? */
+               if (!rtd->dai_link->stream_name ||
+                   strcmp(w->sname, rtd->dai_link->stream_name))
+                       continue;
+
+               switch (w->id) {
+               case snd_soc_dapm_dai_out:
+                       for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+                               if (cpu_dai->capture_widget == w) {
+                                       cpu_dai->capture_widget = NULL;
+                                       break;
+                               }
+                       }
+                       break;
+               case snd_soc_dapm_dai_in:
+                       for_each_rtd_cpu_dais(rtd, i, cpu_dai) {
+                               if (cpu_dai->playback_widget == w) {
+                                       cpu_dai->playback_widget = NULL;
+                                       break;
+                               }
+                       }
+                       break;
+               default:
+                       break;
+               }
+       }
+}
+
 /* bind PCM ID to host component ID */
 static int spcm_bind(struct snd_soc_component *scomp, struct snd_sof_pcm *spcm,
                     int dir)
@@ -1353,6 +1397,9 @@ static int sof_widget_unload(struct snd_soc_component *scomp,
 
                if (dai)
                        list_del(&dai->list);
+
+               sof_disconnect_dai_widget(scomp, widget);
+
                break;
        default:
                break;
@@ -1380,6 +1427,7 @@ static int sof_widget_unload(struct snd_soc_component *scomp,
                }
                kfree(scontrol->ipc_control_data);
                list_del(&scontrol->list);
+               kfree(scontrol->name);
                kfree(scontrol);
        }
 
index 2c01649c70f619d6e9994e81bdc16c171bb6e471..7c6ca2b433a53ee718b093b768e2e442ac9f6c3a 100644 (file)
@@ -1194,6 +1194,7 @@ static void snd_usbmidi_output_drain(struct snd_rawmidi_substream *substream)
                } while (drain_urbs && timeout);
                finish_wait(&ep->drain_wait, &wait);
        }
+       port->active = 0;
        spin_unlock_irq(&ep->buffer_lock);
 }
 
index 64f5544d0a0aa624f01f781c143db72865f353dc..7ef7a8abcc2b11ac0eadeaeda7676ffafa5c01f9 100644 (file)
@@ -599,6 +599,10 @@ static const struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .id = USB_ID(0x0db0, 0x419c),
                .map = msi_mpg_x570s_carbon_max_wifi_alc4080_map,
        },
+       {       /* MSI MAG X570S Torpedo Max */
+               .id = USB_ID(0x0db0, 0xa073),
+               .map = msi_mpg_x570s_carbon_max_wifi_alc4080_map,
+       },
        {       /* MSI TRX40 */
                .id = USB_ID(0x0db0, 0x543d),
                .map = trx40_mobo_map,
index f41d8a0eb1a4204bf312d89de69a066b7d2a0ae1..0616409513eb7947409f23045dd6395a9b68ce0e 100644 (file)
@@ -28,7 +28,13 @@ static inline void *kzalloc(size_t size, gfp_t gfp)
        return kmalloc(size, gfp | __GFP_ZERO);
 }
 
-void *kmem_cache_alloc(struct kmem_cache *cachep, int flags);
+struct list_lru;
+
+void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *, int flags);
+static inline void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
+{
+       return kmem_cache_alloc_lru(cachep, NULL, flags);
+}
 void kmem_cache_free(struct kmem_cache *cachep, void *objp);
 
 struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
index 1ad75c7ba07408484eb116915c9814429fa2f02e..afe4a5539ecc70b71e681b38b7507aa25f03dd0a 100644 (file)
@@ -353,6 +353,7 @@ static int report__setup_sample_type(struct report *rep)
        struct perf_session *session = rep->session;
        u64 sample_type = evlist__combined_sample_type(session->evlist);
        bool is_pipe = perf_data__is_pipe(session->data);
+       struct evsel *evsel;
 
        if (session->itrace_synth_opts->callchain ||
            session->itrace_synth_opts->add_callchain ||
@@ -407,6 +408,19 @@ static int report__setup_sample_type(struct report *rep)
        }
 
        if (sort__mode == SORT_MODE__MEMORY) {
+               /*
+                * FIXUP: prior to kernel 5.18, Arm SPE missed to set
+                * PERF_SAMPLE_DATA_SRC bit in sample type.  For backward
+                * compatibility, set the bit if it's an old perf data file.
+                */
+               evlist__for_each_entry(session->evlist, evsel) {
+                       if (strstr(evsel->name, "arm_spe") &&
+                               !(sample_type & PERF_SAMPLE_DATA_SRC)) {
+                               evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
+                               sample_type |= PERF_SAMPLE_DATA_SRC;
+                       }
+               }
+
                if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
                        ui__error("Selected --mem-mode but no mem data. "
                                  "Did you call perf record without -d?\n");
index a2f1179361886f91107fdadbd057b6c76e9edb3d..cf5eab5431b4c7f33ae788f905ab14591ad91e51 100644 (file)
@@ -461,7 +461,7 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
                return -EINVAL;
 
        if (PRINT_FIELD(DATA_SRC) &&
-           evsel__check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC))
+           evsel__do_check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC, allow_user_set))
                return -EINVAL;
 
        if (PRINT_FIELD(WEIGHT) &&
index cc6df49a65a18fd767117b808e2a86d425c9ac25..4ad0dfbc8b21fda12f5c6468b8fdd1095efa73f3 100644 (file)
@@ -123,6 +123,10 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
                evsel->core.attr.enable_on_exec = 0;
        }
 
+       if (evlist__open(evlist) == -ENOENT) {
+               err = TEST_SKIP;
+               goto out_err;
+       }
        CHECK__(evlist__open(evlist));
 
        CHECK__(evlist__mmap(evlist, UINT_MAX));
index df7b18fb6b6e625dc6c33da85c08b76028b2635c..1aad7d6d34aaa639ceb9eaf260ef88c264179c12 100644 (file)
 #include "llvm/Option/Option.h"
 #include "llvm/Support/FileSystem.h"
 #include "llvm/Support/ManagedStatic.h"
+#if CLANG_VERSION_MAJOR >= 14
+#include "llvm/MC/TargetRegistry.h"
+#else
 #include "llvm/Support/TargetRegistry.h"
+#endif
 #include "llvm/Support/TargetSelect.h"
 #include "llvm/Target/TargetMachine.h"
 #include "llvm/Target/TargetOptions.h"
index 846f785e278d7f4c7071ee0ef93c9372796c3b9a..7221f2f55e8bff4572bac925cf4d2e73ebff9d4e 100644 (file)
@@ -42,7 +42,7 @@ ISST_IN := $(OUTPUT)intel-speed-select-in.o
 $(ISST_IN): prepare FORCE
        $(Q)$(MAKE) $(build)=intel-speed-select
 $(OUTPUT)intel-speed-select: $(ISST_IN)
-       $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+       $(QUIET_LINK)$(CC) $(CFLAGS) $< $(LDFLAGS) -o $@
 
 clean:
        rm -f $(ALL_PROGRAMS)
index 81539f5439546868dc2d8d9bfeed7374aa8ef8f3..d5c1bcba86fe00ffa379a6d5bb38095c084f3ce2 100644 (file)
@@ -25,7 +25,8 @@ struct kmem_cache {
        void (*ctor)(void *);
 };
 
-void *kmem_cache_alloc(struct kmem_cache *cachep, int gfp)
+void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
+               int gfp)
 {
        void *p;
 
index 37db341d4cc5c976e2796d4bed8cc37f2cf99b20..d0d51adec76eb88f12564fa38e52d0df2ba535f2 100644 (file)
 /* CPUID.0x8000_0001.EDX */
 #define CPUID_GBPAGES          (1ul << 26)
 
+/* Page table bitfield declarations */
+#define PTE_PRESENT_MASK        BIT_ULL(0)
+#define PTE_WRITABLE_MASK       BIT_ULL(1)
+#define PTE_USER_MASK           BIT_ULL(2)
+#define PTE_ACCESSED_MASK       BIT_ULL(5)
+#define PTE_DIRTY_MASK          BIT_ULL(6)
+#define PTE_LARGE_MASK          BIT_ULL(7)
+#define PTE_GLOBAL_MASK         BIT_ULL(8)
+#define PTE_NX_MASK             BIT_ULL(63)
+
+#define PAGE_SHIFT             12
+#define PAGE_SIZE              (1ULL << PAGE_SHIFT)
+#define PAGE_MASK              (~(PAGE_SIZE-1))
+
+#define PHYSICAL_PAGE_MASK      GENMASK_ULL(51, 12)
+#define PTE_GET_PFN(pte)        (((pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
+
 /* General Registers in 64-Bit Mode */
 struct gpr64_regs {
        u64 rax;
index ba1fdc3dcf4a90319f1a9d7cd8dd9bbeaaa5f5b4..2c4a7563a4f8adf4416f77c92c3fcf328711e51f 100644 (file)
@@ -278,7 +278,7 @@ static struct kvm_vm *pre_init_before_test(enum vm_guest_mode mode, void *arg)
        else
                guest_test_phys_mem = p->phys_offset;
 #ifdef __s390x__
-       alignment = max(0x100000, alignment);
+       alignment = max(0x100000UL, alignment);
 #endif
        guest_test_phys_mem = align_down(guest_test_phys_mem, alignment);
 
index 9f000dfb55949d04f547caa4ef4d13b44bd60d4c..33ea5e9955d9bddbe844a36cdbf886d6637f8a53 100644 (file)
 
 vm_vaddr_t exception_handlers;
 
-/* Virtual translation table structure declarations */
-struct pageUpperEntry {
-       uint64_t present:1;
-       uint64_t writable:1;
-       uint64_t user:1;
-       uint64_t write_through:1;
-       uint64_t cache_disable:1;
-       uint64_t accessed:1;
-       uint64_t ignored_06:1;
-       uint64_t page_size:1;
-       uint64_t ignored_11_08:4;
-       uint64_t pfn:40;
-       uint64_t ignored_62_52:11;
-       uint64_t execute_disable:1;
-};
-
-struct pageTableEntry {
-       uint64_t present:1;
-       uint64_t writable:1;
-       uint64_t user:1;
-       uint64_t write_through:1;
-       uint64_t cache_disable:1;
-       uint64_t accessed:1;
-       uint64_t dirty:1;
-       uint64_t reserved_07:1;
-       uint64_t global:1;
-       uint64_t ignored_11_09:3;
-       uint64_t pfn:40;
-       uint64_t ignored_62_52:11;
-       uint64_t execute_disable:1;
-};
-
 void regs_dump(FILE *stream, struct kvm_regs *regs,
               uint8_t indent)
 {
@@ -195,23 +163,21 @@ static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
        return &page_table[index];
 }
 
-static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
-                                                   uint64_t pt_pfn,
-                                                   uint64_t vaddr,
-                                                   uint64_t paddr,
-                                                   int level,
-                                                   enum x86_page_size page_size)
+static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
+                                      uint64_t pt_pfn,
+                                      uint64_t vaddr,
+                                      uint64_t paddr,
+                                      int level,
+                                      enum x86_page_size page_size)
 {
-       struct pageUpperEntry *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
-
-       if (!pte->present) {
-               pte->writable = true;
-               pte->present = true;
-               pte->page_size = (level == page_size);
-               if (pte->page_size)
-                       pte->pfn = paddr >> vm->page_shift;
+       uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, level);
+
+       if (!(*pte & PTE_PRESENT_MASK)) {
+               *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
+               if (level == page_size)
+                       *pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK);
                else
-                       pte->pfn = vm_alloc_page_table(vm) >> vm->page_shift;
+                       *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
        } else {
                /*
                 * Entry already present.  Assert that the caller doesn't want
@@ -221,7 +187,7 @@ static struct pageUpperEntry *virt_create_upper_pte(struct kvm_vm *vm,
                TEST_ASSERT(level != page_size,
                            "Cannot create hugepage at level: %u, vaddr: 0x%lx\n",
                            page_size, vaddr);
-               TEST_ASSERT(!pte->page_size,
+               TEST_ASSERT(!(*pte & PTE_LARGE_MASK),
                            "Cannot create page table at level: %u, vaddr: 0x%lx\n",
                            level, vaddr);
        }
@@ -232,8 +198,8 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
                   enum x86_page_size page_size)
 {
        const uint64_t pg_size = 1ull << ((page_size * 9) + 12);
-       struct pageUpperEntry *pml4e, *pdpe, *pde;
-       struct pageTableEntry *pte;
+       uint64_t *pml4e, *pdpe, *pde;
+       uint64_t *pte;
 
        TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K,
                    "Unknown or unsupported guest mode, mode: 0x%x", vm->mode);
@@ -257,24 +223,22 @@ void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
         */
        pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift,
                                      vaddr, paddr, 3, page_size);
-       if (pml4e->page_size)
+       if (*pml4e & PTE_LARGE_MASK)
                return;
 
-       pdpe = virt_create_upper_pte(vm, pml4e->pfn, vaddr, paddr, 2, page_size);
-       if (pdpe->page_size)
+       pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, 2, page_size);
+       if (*pdpe & PTE_LARGE_MASK)
                return;
 
-       pde = virt_create_upper_pte(vm, pdpe->pfn, vaddr, paddr, 1, page_size);
-       if (pde->page_size)
+       pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, 1, page_size);
+       if (*pde & PTE_LARGE_MASK)
                return;
 
        /* Fill in page table entry. */
-       pte = virt_get_pte(vm, pde->pfn, vaddr, 0);
-       TEST_ASSERT(!pte->present,
+       pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, 0);
+       TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
                    "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
-       pte->pfn = paddr >> vm->page_shift;
-       pte->writable = true;
-       pte->present = 1;
+       *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
 }
 
 void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
@@ -282,22 +246,22 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
        __virt_pg_map(vm, vaddr, paddr, X86_PAGE_SIZE_4K);
 }
 
-static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
+static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid,
                                                       uint64_t vaddr)
 {
        uint16_t index[4];
-       struct pageUpperEntry *pml4e, *pdpe, *pde;
-       struct pageTableEntry *pte;
+       uint64_t *pml4e, *pdpe, *pde;
+       uint64_t *pte;
        struct kvm_cpuid_entry2 *entry;
        struct kvm_sregs sregs;
        int max_phy_addr;
-       /* Set the bottom 52 bits. */
-       uint64_t rsvd_mask = 0x000fffffffffffff;
+       uint64_t rsvd_mask = 0;
 
        entry = kvm_get_supported_cpuid_index(0x80000008, 0);
        max_phy_addr = entry->eax & 0x000000ff;
-       /* Clear the bottom bits of the reserved mask. */
-       rsvd_mask = (rsvd_mask >> max_phy_addr) << max_phy_addr;
+       /* Set the high bits in the reserved mask. */
+       if (max_phy_addr < 52)
+               rsvd_mask = GENMASK_ULL(51, max_phy_addr);
 
        /*
         * SDM vol 3, fig 4-11 "Formats of CR3 and Paging-Structure Entries
@@ -307,7 +271,7 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc
         */
        vcpu_sregs_get(vm, vcpuid, &sregs);
        if ((sregs.efer & EFER_NX) == 0) {
-               rsvd_mask |= (1ull << 63);
+               rsvd_mask |= PTE_NX_MASK;
        }
 
        TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
@@ -329,30 +293,29 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc
        index[3] = (vaddr >> 39) & 0x1ffu;
 
        pml4e = addr_gpa2hva(vm, vm->pgd);
-       TEST_ASSERT(pml4e[index[3]].present,
+       TEST_ASSERT(pml4e[index[3]] & PTE_PRESENT_MASK,
                "Expected pml4e to be present for gva: 0x%08lx", vaddr);
-       TEST_ASSERT((*(uint64_t*)(&pml4e[index[3]]) &
-               (rsvd_mask | (1ull << 7))) == 0,
+       TEST_ASSERT((pml4e[index[3]] & (rsvd_mask | PTE_LARGE_MASK)) == 0,
                "Unexpected reserved bits set.");
 
-       pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size);
-       TEST_ASSERT(pdpe[index[2]].present,
+       pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
+       TEST_ASSERT(pdpe[index[2]] & PTE_PRESENT_MASK,
                "Expected pdpe to be present for gva: 0x%08lx", vaddr);
-       TEST_ASSERT(pdpe[index[2]].page_size == 0,
+       TEST_ASSERT(!(pdpe[index[2]] & PTE_LARGE_MASK),
                "Expected pdpe to map a pde not a 1-GByte page.");
-       TEST_ASSERT((*(uint64_t*)(&pdpe[index[2]]) & rsvd_mask) == 0,
+       TEST_ASSERT((pdpe[index[2]] & rsvd_mask) == 0,
                "Unexpected reserved bits set.");
 
-       pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size);
-       TEST_ASSERT(pde[index[1]].present,
+       pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
+       TEST_ASSERT(pde[index[1]] & PTE_PRESENT_MASK,
                "Expected pde to be present for gva: 0x%08lx", vaddr);
-       TEST_ASSERT(pde[index[1]].page_size == 0,
+       TEST_ASSERT(!(pde[index[1]] & PTE_LARGE_MASK),
                "Expected pde to map a pte not a 2-MByte page.");
-       TEST_ASSERT((*(uint64_t*)(&pde[index[1]]) & rsvd_mask) == 0,
+       TEST_ASSERT((pde[index[1]] & rsvd_mask) == 0,
                "Unexpected reserved bits set.");
 
-       pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size);
-       TEST_ASSERT(pte[index[0]].present,
+       pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
+       TEST_ASSERT(pte[index[0]] & PTE_PRESENT_MASK,
                "Expected pte to be present for gva: 0x%08lx", vaddr);
 
        return &pte[index[0]];
@@ -360,7 +323,7 @@ static struct pageTableEntry *_vm_get_page_table_entry(struct kvm_vm *vm, int vc
 
 uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr)
 {
-       struct pageTableEntry *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
+       uint64_t *pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
 
        return *(uint64_t *)pte;
 }
@@ -368,18 +331,17 @@ uint64_t vm_get_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr)
 void vm_set_page_table_entry(struct kvm_vm *vm, int vcpuid, uint64_t vaddr,
                             uint64_t pte)
 {
-       struct pageTableEntry *new_pte = _vm_get_page_table_entry(vm, vcpuid,
-                                                                 vaddr);
+       uint64_t *new_pte = _vm_get_page_table_entry(vm, vcpuid, vaddr);
 
        *(uint64_t *)new_pte = pte;
 }
 
 void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 {
-       struct pageUpperEntry *pml4e, *pml4e_start;
-       struct pageUpperEntry *pdpe, *pdpe_start;
-       struct pageUpperEntry *pde, *pde_start;
-       struct pageTableEntry *pte, *pte_start;
+       uint64_t *pml4e, *pml4e_start;
+       uint64_t *pdpe, *pdpe_start;
+       uint64_t *pde, *pde_start;
+       uint64_t *pte, *pte_start;
 
        if (!vm->pgd_created)
                return;
@@ -389,58 +351,58 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
        fprintf(stream, "%*s      index hvaddr         gpaddr         "
                "addr         w exec dirty\n",
                indent, "");
-       pml4e_start = (struct pageUpperEntry *) addr_gpa2hva(vm, vm->pgd);
+       pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->pgd);
        for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
                pml4e = &pml4e_start[n1];
-               if (!pml4e->present)
+               if (!(*pml4e & PTE_PRESENT_MASK))
                        continue;
-               fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10lx %u "
+               fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u "
                        " %u\n",
                        indent, "",
                        pml4e - pml4e_start, pml4e,
-                       addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->pfn,
-                       pml4e->writable, pml4e->execute_disable);
+                       addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e),
+                       !!(*pml4e & PTE_WRITABLE_MASK), !!(*pml4e & PTE_NX_MASK));
 
-               pdpe_start = addr_gpa2hva(vm, pml4e->pfn * vm->page_size);
+               pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
                for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
                        pdpe = &pdpe_start[n2];
-                       if (!pdpe->present)
+                       if (!(*pdpe & PTE_PRESENT_MASK))
                                continue;
-                       fprintf(stream, "%*spdpe  0x%-3zx %p 0x%-12lx 0x%-10lx "
+                       fprintf(stream, "%*spdpe  0x%-3zx %p 0x%-12lx 0x%-10llx "
                                "%u  %u\n",
                                indent, "",
                                pdpe - pdpe_start, pdpe,
                                addr_hva2gpa(vm, pdpe),
-                               (uint64_t) pdpe->pfn, pdpe->writable,
-                               pdpe->execute_disable);
+                               PTE_GET_PFN(*pdpe), !!(*pdpe & PTE_WRITABLE_MASK),
+                               !!(*pdpe & PTE_NX_MASK));
 
-                       pde_start = addr_gpa2hva(vm, pdpe->pfn * vm->page_size);
+                       pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
                        for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
                                pde = &pde_start[n3];
-                               if (!pde->present)
+                               if (!(*pde & PTE_PRESENT_MASK))
                                        continue;
                                fprintf(stream, "%*spde   0x%-3zx %p "
-                                       "0x%-12lx 0x%-10lx %u  %u\n",
+                                       "0x%-12lx 0x%-10llx %u  %u\n",
                                        indent, "", pde - pde_start, pde,
                                        addr_hva2gpa(vm, pde),
-                                       (uint64_t) pde->pfn, pde->writable,
-                                       pde->execute_disable);
+                                       PTE_GET_PFN(*pde), !!(*pde & PTE_WRITABLE_MASK),
+                                       !!(*pde & PTE_NX_MASK));
 
-                               pte_start = addr_gpa2hva(vm, pde->pfn * vm->page_size);
+                               pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
                                for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
                                        pte = &pte_start[n4];
-                                       if (!pte->present)
+                                       if (!(*pte & PTE_PRESENT_MASK))
                                                continue;
                                        fprintf(stream, "%*spte   0x%-3zx %p "
-                                               "0x%-12lx 0x%-10lx %u  %u "
+                                               "0x%-12lx 0x%-10llx %u  %u "
                                                "    %u    0x%-10lx\n",
                                                indent, "",
                                                pte - pte_start, pte,
                                                addr_hva2gpa(vm, pte),
-                                               (uint64_t) pte->pfn,
-                                               pte->writable,
-                                               pte->execute_disable,
-                                               pte->dirty,
+                                               PTE_GET_PFN(*pte),
+                                               !!(*pte & PTE_WRITABLE_MASK),
+                                               !!(*pte & PTE_NX_MASK),
+                                               !!(*pte & PTE_DIRTY_MASK),
                                                ((uint64_t) n1 << 27)
                                                        | ((uint64_t) n2 << 18)
                                                        | ((uint64_t) n3 << 9)
@@ -558,8 +520,8 @@ static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
 vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
 {
        uint16_t index[4];
-       struct pageUpperEntry *pml4e, *pdpe, *pde;
-       struct pageTableEntry *pte;
+       uint64_t *pml4e, *pdpe, *pde;
+       uint64_t *pte;
 
        TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
                "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
@@ -572,22 +534,22 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
        if (!vm->pgd_created)
                goto unmapped_gva;
        pml4e = addr_gpa2hva(vm, vm->pgd);
-       if (!pml4e[index[3]].present)
+       if (!(pml4e[index[3]] & PTE_PRESENT_MASK))
                goto unmapped_gva;
 
-       pdpe = addr_gpa2hva(vm, pml4e[index[3]].pfn * vm->page_size);
-       if (!pdpe[index[2]].present)
+       pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
+       if (!(pdpe[index[2]] & PTE_PRESENT_MASK))
                goto unmapped_gva;
 
-       pde = addr_gpa2hva(vm, pdpe[index[2]].pfn * vm->page_size);
-       if (!pde[index[1]].present)
+       pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
+       if (!(pde[index[1]] & PTE_PRESENT_MASK))
                goto unmapped_gva;
 
-       pte = addr_gpa2hva(vm, pde[index[1]].pfn * vm->page_size);
-       if (!pte[index[0]].present)
+       pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
+       if (!(pte[index[0]] & PTE_PRESENT_MASK))
                goto unmapped_gva;
 
-       return (pte[index[0]].pfn * vm->page_size) + (gva & 0xfffu);
+       return (PTE_GET_PFN(pte[index[0]]) * vm->page_size) + (gva & ~PAGE_MASK);
 
 unmapped_gva:
        TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
index 52a3ef6629e80610c2d9776f659869fd757c1341..76f65c22796f2e734f243e6d063e2036253c71df 100644 (file)
@@ -29,7 +29,6 @@
 #define X86_FEATURE_XSAVE              (1 << 26)
 #define X86_FEATURE_OSXSAVE            (1 << 27)
 
-#define PAGE_SIZE                      (1 << 12)
 #define NUM_TILES                      8
 #define TILE_SIZE                      1024
 #define XSAVE_SIZE                     ((NUM_TILES * TILE_SIZE) + PAGE_SIZE)
index f070ff0224fa3f88f1247273eb0a83c3e3e61613..aeb3850f81bd10a6d8474de9b5bfcdb14afb9528 100644 (file)
@@ -12,7 +12,6 @@
 #include "vmx.h"
 
 #define VCPU_ID           1
-#define PAGE_SIZE  4096
 #define MAXPHYADDR 36
 
 #define MEM_REGION_GVA 0x0000123456789000
index a626d40fdb48940be29857b01d80820da447ff7d..b4e0c860769e456e44a3fcc01ffe45fc8af551e4 100644 (file)
@@ -21,8 +21,6 @@
 
 #define VCPU_ID              1
 
-#define PAGE_SIZE  4096
-
 #define SMRAM_SIZE 65536
 #define SMRAM_MEMSLOT ((1 << 16) | 1)
 #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
index e683d0ac3e45e1841281e87078bfda7edc91b6b6..19b35c607dc66c4b9969f6816ca77fc2aafa1538 100644 (file)
@@ -32,7 +32,6 @@
 #define MSR_IA32_TSC_ADJUST 0x3b
 #endif
 
-#define PAGE_SIZE      4096
 #define VCPU_ID                5
 
 #define TSC_ADJUST_VALUE (1ll << 32)
index 865e17146815a6585d801a31abf4a63c853d1dff..bcd3708278593dd14585d32510d5cda20a98c677 100644 (file)
@@ -23,7 +23,6 @@
 #define SHINFO_REGION_GVA      0xc0000000ULL
 #define SHINFO_REGION_GPA      0xc0000000ULL
 #define SHINFO_REGION_SLOT     10
-#define PAGE_SIZE              4096
 
 #define DUMMY_REGION_GPA       (SHINFO_REGION_GPA + (2 * PAGE_SIZE))
 #define DUMMY_REGION_SLOT      11
index adc94452b57c6cda757213da12f6b27cec420ad5..b30fe9de1d4f6a31c286f8499356639639ca933e 100644 (file)
@@ -15,7 +15,6 @@
 
 #define HCALL_REGION_GPA       0xc0000000ULL
 #define HCALL_REGION_SLOT      10
-#define PAGE_SIZE              4096
 
 static struct kvm_vm *vm;
 
index 7c0b0617b9f85554d6831f15ce5691b9e0082ff7..db0270127aeb041ab4e53f5f4dcdf7c372a22422 100644 (file)
@@ -6,9 +6,11 @@
 
 #include <errno.h>
 #include <stdlib.h>
+#include <stdio.h>
 #include <string.h>
 #include <sys/mman.h>
 #include <time.h>
+#include <stdbool.h>
 
 #include "../kselftest.h"
 
@@ -63,6 +65,59 @@ enum {
        .expect_failure = should_fail                           \
 }
 
+/*
+ * Returns false if the requested remap region overlaps with an
+ * existing mapping (e.g text, stack) else returns true.
+ */
+static bool is_remap_region_valid(void *addr, unsigned long long size)
+{
+       void *remap_addr = NULL;
+       bool ret = true;
+
+       /* Use MAP_FIXED_NOREPLACE flag to ensure region is not mapped */
+       remap_addr = mmap(addr, size, PROT_READ | PROT_WRITE,
+                                        MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
+                                        -1, 0);
+
+       if (remap_addr == MAP_FAILED) {
+               if (errno == EEXIST)
+                       ret = false;
+       } else {
+               munmap(remap_addr, size);
+       }
+
+       return ret;
+}
+
+/* Returns mmap_min_addr sysctl tunable from procfs */
+static unsigned long long get_mmap_min_addr(void)
+{
+       FILE *fp;
+       int n_matched;
+       static unsigned long long addr;
+
+       if (addr)
+               return addr;
+
+       fp = fopen("/proc/sys/vm/mmap_min_addr", "r");
+       if (fp == NULL) {
+               ksft_print_msg("Failed to open /proc/sys/vm/mmap_min_addr: %s\n",
+                       strerror(errno));
+               exit(KSFT_SKIP);
+       }
+
+       n_matched = fscanf(fp, "%llu", &addr);
+       if (n_matched != 1) {
+               ksft_print_msg("Failed to read /proc/sys/vm/mmap_min_addr: %s\n",
+                       strerror(errno));
+               fclose(fp);
+               exit(KSFT_SKIP);
+       }
+
+       fclose(fp);
+       return addr;
+}
+
 /*
  * Returns the start address of the mapping on success, else returns
  * NULL on failure.
@@ -71,11 +126,18 @@ static void *get_source_mapping(struct config c)
 {
        unsigned long long addr = 0ULL;
        void *src_addr = NULL;
+       unsigned long long mmap_min_addr;
+
+       mmap_min_addr = get_mmap_min_addr();
+
 retry:
        addr += c.src_alignment;
+       if (addr < mmap_min_addr)
+               goto retry;
+
        src_addr = mmap((void *) addr, c.region_size, PROT_READ | PROT_WRITE,
-                       MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
-                       -1, 0);
+                                       MAP_FIXED_NOREPLACE | MAP_ANONYMOUS | MAP_SHARED,
+                                       -1, 0);
        if (src_addr == MAP_FAILED) {
                if (errno == EPERM || errno == EEXIST)
                        goto retry;
@@ -90,8 +152,10 @@ retry:
         * alignment in the tests.
         */
        if (((unsigned long long) src_addr & (c.src_alignment - 1)) ||
-                       !((unsigned long long) src_addr & c.src_alignment))
+                       !((unsigned long long) src_addr & c.src_alignment)) {
+               munmap(src_addr, c.region_size);
                goto retry;
+       }
 
        if (!src_addr)
                goto error;
@@ -140,9 +204,20 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
        if (!((unsigned long long) addr & c.dest_alignment))
                addr = (void *) ((unsigned long long) addr | c.dest_alignment);
 
+       /* Don't destroy existing mappings unless expected to overlap */
+       while (!is_remap_region_valid(addr, c.region_size) && !c.overlapping) {
+               /* Check for unsigned overflow */
+               if (addr + c.dest_alignment < addr) {
+                       ksft_print_msg("Couldn't find a valid region to remap to\n");
+                       ret = -1;
+                       goto out;
+               }
+               addr += c.dest_alignment;
+       }
+
        clock_gettime(CLOCK_MONOTONIC, &t_start);
        dest_addr = mremap(src_addr, c.region_size, c.region_size,
-                       MREMAP_MAYMOVE|MREMAP_FIXED, (char *) addr);
+                                         MREMAP_MAYMOVE|MREMAP_FIXED, (char *) addr);
        clock_gettime(CLOCK_MONOTONIC, &t_end);
 
        if (dest_addr == MAP_FAILED) {
@@ -193,7 +268,7 @@ static void run_mremap_test_case(struct test test_case, int *failures,
 
        if (remap_time < 0) {
                if (test_case.expect_failure)
-                       ksft_test_result_pass("%s\n\tExpected mremap failure\n",
+                       ksft_test_result_xfail("%s\n\tExpected mremap failure\n",
                                              test_case.name);
                else {
                        ksft_test_result_fail("%s\n", test_case.name);
index 3b265f140c25c205130208f416659e84eaf2f937..352ba00cf26b03dc5abb9494ff463c15530549d1 100755 (executable)
@@ -291,11 +291,16 @@ echo "-------------------"
 echo "running mremap_test"
 echo "-------------------"
 ./mremap_test
-if [ $? -ne 0 ]; then
+ret_val=$?
+
+if [ $ret_val -eq 0 ]; then
+       echo "[PASS]"
+elif [ $ret_val -eq $ksft_skip ]; then
+        echo "[SKIP]"
+        exitcode=$ksft_skip
+else
        echo "[FAIL]"
        exitcode=1
-else
-       echo "[PASS]"
 fi
 
 echo "-----------------"
index 222ecc81d7df2d879eaee12a70e93f18d362a789..f4c2a6eb1666b99f2e1bf98be6edb874e956ba95 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * KVM dirty ring implementation
  *
index dfb7dabdbc63dee5435a7041d53e45e261792afa..f30bb8c16f26593ddcdaeb190919b35054e75d25 100644 (file)
@@ -164,6 +164,10 @@ __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
 {
 }
 
+__weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
+{
+}
+
 bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
 {
        /*
@@ -357,6 +361,12 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
 #endif
 
+static void kvm_flush_shadow_all(struct kvm *kvm)
+{
+       kvm_arch_flush_shadow_all(kvm);
+       kvm_arch_guest_memory_reclaimed(kvm);
+}
+
 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
                                               gfp_t gfp_flags)
@@ -485,12 +495,15 @@ typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
                             unsigned long end);
 
+typedef void (*on_unlock_fn_t)(struct kvm *kvm);
+
 struct kvm_hva_range {
        unsigned long start;
        unsigned long end;
        pte_t pte;
        hva_handler_t handler;
        on_lock_fn_t on_lock;
+       on_unlock_fn_t on_unlock;
        bool flush_on_ret;
        bool may_block;
 };
@@ -578,8 +591,11 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
        if (range->flush_on_ret && ret)
                kvm_flush_remote_tlbs(kvm);
 
-       if (locked)
+       if (locked) {
                KVM_MMU_UNLOCK(kvm);
+               if (!IS_KVM_NULL_FN(range->on_unlock))
+                       range->on_unlock(kvm);
+       }
 
        srcu_read_unlock(&kvm->srcu, idx);
 
@@ -600,6 +616,7 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
                .pte            = pte,
                .handler        = handler,
                .on_lock        = (void *)kvm_null_fn,
+               .on_unlock      = (void *)kvm_null_fn,
                .flush_on_ret   = true,
                .may_block      = false,
        };
@@ -619,6 +636,7 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
                .pte            = __pte(0),
                .handler        = handler,
                .on_lock        = (void *)kvm_null_fn,
+               .on_unlock      = (void *)kvm_null_fn,
                .flush_on_ret   = false,
                .may_block      = false,
        };
@@ -662,7 +680,7 @@ void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start,
                kvm->mmu_notifier_range_end = end;
        } else {
                /*
-                * Fully tracking multiple concurrent ranges has dimishing
+                * Fully tracking multiple concurrent ranges has diminishing
                 * returns. Keep things simple and just find the minimal range
                 * which includes the current and new ranges. As there won't be
                 * enough information to subtract a range after its invalidate
@@ -687,6 +705,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
                .pte            = __pte(0),
                .handler        = kvm_unmap_gfn_range,
                .on_lock        = kvm_inc_notifier_count,
+               .on_unlock      = kvm_arch_guest_memory_reclaimed,
                .flush_on_ret   = true,
                .may_block      = mmu_notifier_range_blockable(range),
        };
@@ -741,6 +760,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
                .pte            = __pte(0),
                .handler        = (void *)kvm_null_fn,
                .on_lock        = kvm_dec_notifier_count,
+               .on_unlock      = (void *)kvm_null_fn,
                .flush_on_ret   = false,
                .may_block      = mmu_notifier_range_blockable(range),
        };
@@ -813,7 +833,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
        int idx;
 
        idx = srcu_read_lock(&kvm->srcu);
-       kvm_arch_flush_shadow_all(kvm);
+       kvm_flush_shadow_all(kvm);
        srcu_read_unlock(&kvm->srcu, idx);
 }
 
@@ -955,12 +975,6 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
        int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
                                      kvm_vcpu_stats_header.num_desc;
 
-       /*
-        * Force subsequent debugfs file creations to fail if the VM directory
-        * is not created.
-        */
-       kvm->debugfs_dentry = ERR_PTR(-ENOENT);
-
        if (!debugfs_initialized())
                return 0;
 
@@ -1081,6 +1095,12 @@ static struct kvm *kvm_create_vm(unsigned long type)
 
        BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
 
+       /*
+        * Force subsequent debugfs file creations to fail if the VM directory
+        * is not created (by kvm_create_vm_debugfs()).
+        */
+       kvm->debugfs_dentry = ERR_PTR(-ENOENT);
+
        if (init_srcu_struct(&kvm->srcu))
                goto out_err_no_srcu;
        if (init_srcu_struct(&kvm->irq_srcu))
@@ -1225,7 +1245,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
        WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
        kvm->mn_active_invalidate_count = 0;
 #else
-       kvm_arch_flush_shadow_all(kvm);
+       kvm_flush_shadow_all(kvm);
 #endif
        kvm_arch_destroy_vm(kvm);
        kvm_destroy_devices(kvm);
@@ -1652,6 +1672,7 @@ static void kvm_invalidate_memslot(struct kvm *kvm,
         *      - kvm_is_visible_gfn (mmu_check_root)
         */
        kvm_arch_flush_shadow_memslot(kvm, old);
+       kvm_arch_guest_memory_reclaimed(kvm);
 
        /* Was released by kvm_swap_active_memslots, reacquire. */
        mutex_lock(&kvm->slots_arch_lock);
@@ -1799,7 +1820,7 @@ static int kvm_set_memslot(struct kvm *kvm,
 
        /*
         * No need to refresh new->arch, changes after dropping slots_arch_lock
-        * will directly hit the final, active memsot.  Architectures are
+        * will directly hit the final, active memslot.  Architectures are
         * responsible for knowing that new->arch may be stale.
         */
        kvm_commit_memory_region(kvm, old, new, change);
index 34ca40823260da674376f9d06fbcfdf903ed18aa..41da467d99c95ef31412ac82d00111b88bb4f40a 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only
+/* SPDX-License-Identifier: GPL-2.0-only */
 
 #ifndef __KVM_MM_H__
 #define __KVM_MM_H__ 1