Merge tag 'safesetid-maintainers-correction-5.3-rc2' of git://github.com/micah-morton...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 4 Aug 2019 17:02:13 +0000 (10:02 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 4 Aug 2019 17:02:13 +0000 (10:02 -0700)
Pull SafeSetID maintainer update from Micah Morton:
 "Add entry in MAINTAINERS file for SafeSetID LSM"

* tag 'safesetid-maintainers-correction-5.3-rc2' of git://github.com/micah-morton/linux:
  Add entry in MAINTAINERS file for SafeSetID LSM

286 files changed:
Documentation/vm/hmm.rst
MAINTAINERS
arch/arm/include/asm/dma-mapping.h
arch/arm/mm/Kconfig
arch/arm/mm/dma-mapping.c
arch/arm/mm/init.c
arch/arm64/Makefile
arch/arm64/include/asm/arch_gicv3.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/daifflags.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/ptrace.h
arch/arm64/include/asm/vdso/compat_gettimeofday.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/hw_breakpoint.c
arch/arm64/kernel/module.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kernel/return_address.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/stacktrace.c
arch/arm64/mm/fault.c
arch/mips/vdso/vdso.h
arch/parisc/Makefile
arch/parisc/boot/compressed/Makefile
arch/parisc/boot/compressed/vmlinux.lds.S
arch/parisc/configs/default_defconfig [deleted file]
arch/parisc/configs/defconfig [new file with mode: 0644]
arch/parisc/kernel/ftrace.c
arch/parisc/math-emu/Makefile
arch/parisc/mm/fault.c
arch/riscv/boot/dts/sifive/fu540-c000.dtsi
arch/riscv/configs/defconfig
arch/riscv/kernel/vdso/Makefile
arch/s390/boot/boot.h
arch/s390/boot/kaslr.c
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/qdio.h
arch/s390/include/asm/setup.h
arch/s390/kernel/machine_kexec_reloc.c
arch/s390/kernel/perf_cpum_cf_diag.c
arch/s390/lib/xor.c
arch/s390/mm/fault.c
arch/s390/mm/gmap.c
arch/x86/include/asm/vdso/gettimeofday.h
arch/xtensa/kernel/coprocessor.S
drivers/acpi/device_pm.c
drivers/acpi/scan.c
drivers/ata/libahci_platform.c
drivers/ata/libata-zpodd.c
drivers/block/ataflop.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/bluetooth/hci_ath.c
drivers/bluetooth/hci_bcm.c
drivers/bluetooth/hci_intel.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_mrvl.c
drivers/bluetooth/hci_qca.c
drivers/bluetooth/hci_uart.h
drivers/char/ipmi/ipmb_dev_int.c
drivers/clk/at91/clk-generated.c
drivers/clk/mediatek/clk-mt8183.c
drivers/clk/renesas/renesas-cpg-mssr.c
drivers/clk/sprd/Kconfig
drivers/gpio/gpiolib.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
drivers/gpu/drm/amd/powerplay/smu_v11_0.c
drivers/gpu/drm/amd/powerplay/vega20_ppt.c
drivers/gpu/drm/bochs/bochs_kms.c
drivers/gpu/drm/bridge/Kconfig
drivers/gpu/drm/drm_client.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_scaler.c
drivers/gpu/drm/i915/display/intel_bios.c
drivers/gpu/drm/i915/display/intel_bw.c
drivers/gpu/drm/i915/display/intel_cdclk.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_vbt_defs.h
drivers/gpu/drm/i915/gem/i915_gem_pm.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_engine_pm.c
drivers/gpu/drm/i915/gt/intel_engine_pm.h
drivers/gpu/drm/i915/gt/intel_engine_types.h
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/gt/intel_gt_pm.h
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/intel_ringbuffer.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/gt/mock_engine.c
drivers/gpu/drm/i915/gt/selftest_reset.c
drivers/gpu/drm/i915/gt/selftest_workarounds.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/fb_decoder.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/trace_points.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_wakeref.h
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_svm.c
drivers/i2c/busses/i2c-at91-core.c
drivers/i2c/busses/i2c-at91-master.c
drivers/i2c/busses/i2c-bcm-iproc.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/counters.c
drivers/infiniband/core/device.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/user_mad.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/qplib_res.c
drivers/infiniband/hw/bnxt_re/qplib_res.h
drivers/infiniband/hw/bnxt_re/qplib_sp.c
drivers/infiniband/hw/bnxt_re/qplib_sp.h
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/rc.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/hns/Kconfig
drivers/infiniband/hw/hns/Makefile
drivers/infiniband/hw/hns/hns_roce_db.c
drivers/infiniband/hw/hns/hns_roce_hw_v1.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/sw/siw/siw_cm.c
drivers/infiniband/sw/siw/siw_main.c
drivers/infiniband/sw/siw/siw_qp.c
drivers/iommu/virtio-iommu.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-imx-gpcv2.c
drivers/irqchip/irq-mbigen.c
drivers/md/dm-table.c
drivers/misc/eeprom/at24.c
drivers/mmc/core/queue.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/meson-mx-sdio.c
drivers/mmc/host/sdhci-sprd.c
drivers/perf/arm_pmu.c
drivers/platform/olpc/olpc-xo175-ec.c
drivers/platform/x86/intel_pmc_core.c
drivers/platform/x86/pcengines-apuv2.c
drivers/s390/block/dasd_alias.c
drivers/s390/char/con3215.c
drivers/s390/char/tape_core.c
drivers/s390/cio/vfio_ccw_async.c
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/zcrypt_msgtype6.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/hpsa.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/qla2xxx/qla_init.c
drivers/vhost/vhost.h
drivers/xen/gntdev.c
drivers/xen/privcmd.c
drivers/xen/swiotlb-xen.c
drivers/xen/xen-pciback/conf_space_capability.c
drivers/xen/xlate_mmu.c
fs/block_dev.c
fs/btrfs/backref.c
fs/btrfs/send.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/coredump.c
fs/dax.c
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/super.c
fs/gfs2/bmap.c
fs/io_uring.c
fs/ocfs2/xattr.c
fs/super.c
fs/xfs/scrub/dabtree.c
fs/xfs/xfs_itable.c
include/asm-generic/getorder.h
include/drm/drm_client.h
include/drm/drm_mode_config.h
include/linux/clk.h
include/linux/fs.h
include/linux/gpio/consumer.h
include/linux/hmm.h
include/linux/mod_devicetable.h
include/linux/page-flags-layout.h
include/linux/page-flags.h
include/rdma/ib_verbs.h
include/rdma/rdmavt_qp.h
include/scsi/libfc.h
include/scsi/libfcoe.h
include/trace/events/dma_fence.h
include/trace/events/napi.h
include/trace/events/qdisc.h
include/trace/events/tegra_apb_dma.h
include/uapi/linux/virtio_iommu.h
include/xen/xen-ops.h
kernel/Makefile
kernel/dma/contiguous.c
kernel/dma/mapping.c
kernel/exit.c
kernel/memremap.c [deleted file]
kernel/signal.c
kernel/trace/trace_functions_graph.c
lib/Kconfig.kasan
lib/Makefile
lib/test_meminit.c
lib/vdso/gettimeofday.c
mm/Makefile
mm/balloon_compaction.c
mm/compaction.c
mm/hmm.c
mm/kmemleak.c
mm/memory_hotplug.c
mm/memremap.c [new file with mode: 0644]
mm/migrate.c
mm/slub.c
mm/vmscan.c
security/selinux/ss/policydb.c
sound/core/pcm_native.c
sound/hda/hdac_i915.c
sound/usb/helper.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/powerpc/include/uapi/asm/mman.h
tools/arch/sparc/include/uapi/asm/mman.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/vmx.h
tools/include/uapi/asm-generic/mman-common.h
tools/include/uapi/asm-generic/mman.h
tools/include/uapi/asm-generic/unistd.h
tools/include/uapi/drm/drm.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/if_link.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/sched.h
tools/include/uapi/linux/usbdevice_fs.h
tools/lib/bpf/hashmap.h
tools/perf/Documentation/perf.data-file-format.txt
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
tools/perf/trace/beauty/usbdevfs_ioctl.sh
tools/perf/util/header.c
tools/testing/selftests/cgroup/cgroup_util.c
tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
tools/testing/selftests/kmod/kmod.sh
tools/testing/selftests/livepatch/functions.sh
tools/testing/selftests/pidfd/pidfd_test.c
tools/testing/selftests/x86/test_vsyscall.c

index 7d90964abbb0fb87ff438024bbc2a0411b313639..710ce1c701bf3a0f50809332a9ef4868666dabd4 100644 (file)
@@ -237,7 +237,7 @@ The usage pattern is::
       ret = hmm_range_snapshot(&range);
       if (ret) {
           up_read(&mm->mmap_sem);
-          if (ret == -EAGAIN) {
+          if (ret == -EBUSY) {
             /*
              * No need to check hmm_range_wait_until_valid() return value
              * on retry we will get proper error with hmm_range_snapshot()
index 30b8a83c3afa353ce2a19e4426dbfbf1ea381e79..0f5004592ffc6e730547780703c2383333a716c7 100644 (file)
@@ -6322,7 +6322,8 @@ F:        Documentation/devicetree/bindings/counter/ftm-quaddec.txt
 F:     drivers/counter/ftm-quaddec.c
 
 FLOPPY DRIVER
-S:     Orphan
+M:     Denis Efremov <efremov@linux.com>
+S:     Odd Fixes
 L:     linux-block@vger.kernel.org
 F:     drivers/block/floppy.c
 
index 7e0486ad1318cdf3037b0689340f4faa407ed13b..dba9355e24849ce92bdff29cb0af54fbe46fa771 100644 (file)
@@ -18,7 +18,9 @@ extern const struct dma_map_ops arm_coherent_dma_ops;
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 {
-       return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : NULL;
+       if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE))
+               return &arm_dma_ops;
+       return NULL;
 }
 
 #ifdef __arch_page_to_dma
index 820b60a50125b9ebe2984ba1d09b76e180329381..c54cd7ed90ba5e6a8d64377ef96b19fd4cad9386 100644 (file)
@@ -663,6 +663,11 @@ config ARM_LPAE
        depends on MMU && CPU_32v7 && !CPU_32v6 && !CPU_32v5 && \
                !CPU_32v4 && !CPU_32v3
        select PHYS_ADDR_T_64BIT
+       select SWIOTLB
+       select ARCH_HAS_DMA_COHERENT_TO_PFN
+       select ARCH_HAS_DMA_MMAP_PGPROT
+       select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+       select ARCH_HAS_SYNC_DMA_FOR_CPU
        help
          Say Y if you have an ARMv7 processor supporting the LPAE page
          table format and you would like to access memory beyond the
index 4789c60a86e34552411367282be7309f0d8f779a..6774b03aa405ca4e2dd057fcfe65aa9b9a43a878 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
 #include <linux/dma-contiguous.h>
 #include <linux/highmem.h>
 #include <linux/memblock.h>
@@ -1125,6 +1126,19 @@ int arm_dma_supported(struct device *dev, u64 mask)
 
 static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
 {
+       /*
+        * When CONFIG_ARM_LPAE is set, physical address can extend above
+        * 32-bits, which then can't be addressed by devices that only support
+        * 32-bit DMA.
+        * Use the generic dma-direct / swiotlb ops code in that case, as that
+        * handles bounce buffering for us.
+        *
+        * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the
+        * latter is also selected by the Xen code, but that code for now relies
+        * on non-NULL dev_dma_ops.  To be cleaned up later.
+        */
+       if (IS_ENABLED(CONFIG_ARM_LPAE))
+               return NULL;
        return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
 }
 
@@ -2329,6 +2343,9 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
        const struct dma_map_ops *dma_ops;
 
        dev->archdata.dma_coherent = coherent;
+#ifdef CONFIG_SWIOTLB
+       dev->dma_coherent = coherent;
+#endif
 
        /*
         * Don't override the dma_ops if they have already been set. Ideally
@@ -2363,3 +2380,47 @@ void arch_teardown_dma_ops(struct device *dev)
        /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
        set_dma_ops(dev, NULL);
 }
+
+#ifdef CONFIG_SWIOTLB
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+               size_t size, enum dma_data_direction dir)
+{
+       __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
+                             size, dir);
+}
+
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+               size_t size, enum dma_data_direction dir)
+{
+       __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
+                             size, dir);
+}
+
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+               dma_addr_t dma_addr)
+{
+       return dma_to_pfn(dev, dma_addr);
+}
+
+pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
+               unsigned long attrs)
+{
+       if (!dev_is_dma_coherent(dev))
+               return __get_dma_pgprot(attrs, prot);
+       return prot;
+}
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+               gfp_t gfp, unsigned long attrs)
+{
+       return __dma_alloc(dev, size, dma_handle, gfp,
+                          __get_dma_pgprot(attrs, PAGE_KERNEL), false,
+                          attrs, __builtin_return_address(0));
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+               dma_addr_t dma_handle, unsigned long attrs)
+{
+       __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
+}
+#endif /* CONFIG_SWIOTLB */
index 4920a206dce936fe9ab601bfd569f0133c3d757c..16d373d587c476e3caf81e43f61b5fc325ffa9a4 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/dma-contiguous.h>
 #include <linux/sizes.h>
 #include <linux/stop_machine.h>
+#include <linux/swiotlb.h>
 
 #include <asm/cp15.h>
 #include <asm/mach-types.h>
@@ -463,6 +464,10 @@ static void __init free_highpages(void)
  */
 void __init mem_init(void)
 {
+#ifdef CONFIG_ARM_LPAE
+       swiotlb_init(1);
+#endif
+
        set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
 
        /* this will put all unused low memory onto the freelists */
index bb1f1dbb34e8f9f544cff4f6f22728eec660be54..61de992bbea3fad61895e4f1e796cea460fd9deb 100644 (file)
@@ -52,7 +52,7 @@ ifeq ($(CONFIG_GENERIC_COMPAT_VDSO), y)
 
   ifeq ($(CONFIG_CC_IS_CLANG), y)
     $(warning CROSS_COMPILE_COMPAT is clang, the compat vDSO will not be built)
-  else ifeq ($(CROSS_COMPILE_COMPAT),)
+  else ifeq ($(strip $(CROSS_COMPILE_COMPAT)),)
     $(warning CROSS_COMPILE_COMPAT not defined or empty, the compat vDSO will not be built)
   else ifeq ($(shell which $(CROSS_COMPILE_COMPAT)gcc 2> /dev/null),)
     $(error $(CROSS_COMPILE_COMPAT)gcc not found, check CROSS_COMPILE_COMPAT)
index 79155a8cfe7c06026a583ea9a5a9f0218b543971..89e4c8b7934905657bd6b4c21eb780540bf80bc0 100644 (file)
@@ -155,6 +155,12 @@ static inline void gic_pmr_mask_irqs(void)
        BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF |
                                         GIC_PRIO_PSR_I_SET));
        BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON);
+       /*
+        * Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared
+        * and non-secure PMR accesses are not subject to the shifts that
+        * are applied to IRQ priorities
+        */
+       BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON);
        gic_write_pmr(GIC_PRIO_IRQOFF);
 }
 
index 407e2bf23676c970fbf37d4e264b39cb8f58cad5..c96ffa4722d33cba234afdc64ce4483926eba26c 100644 (file)
  */
 
 enum ftr_type {
-       FTR_EXACT,      /* Use a predefined safe value */
-       FTR_LOWER_SAFE, /* Smaller value is safe */
-       FTR_HIGHER_SAFE,/* Bigger value is safe */
+       FTR_EXACT,                      /* Use a predefined safe value */
+       FTR_LOWER_SAFE,                 /* Smaller value is safe */
+       FTR_HIGHER_SAFE,                /* Bigger value is safe */
+       FTR_HIGHER_OR_ZERO_SAFE,        /* Bigger value is safe, but 0 is biggest */
 };
 
 #define FTR_STRICT     true    /* SANITY check strict matching required */
index 987926ed535e36e856882c6138ee0f7ea54f6576..063c964af705f0c31da0d44d10687f1e3f00ec6c 100644 (file)
@@ -13,6 +13,8 @@
 #define DAIF_PROCCTX           0
 #define DAIF_PROCCTX_NOIRQ     PSR_I_BIT
 #define DAIF_ERRCTX            (PSR_I_BIT | PSR_A_BIT)
+#define DAIF_MASK              (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+
 
 /* mask/save/unmask/restore all exceptions, including interrupts. */
 static inline void local_daif_mask(void)
index 8e79ce9c3f5c43eca7a60207051d1ae8c6f5d671..76a14470258693743eb29a09028751e0f710a2c3 100644 (file)
@@ -105,7 +105,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
        ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
 
 #define alloc_screen_info(x...)                &screen_info
-#define free_screen_info(x...)
+
+static inline void free_screen_info(efi_system_table_t *sys_table_arg,
+                                   struct screen_info *si)
+{
+}
 
 /* redeclare as 'hidden' so the compiler will generate relative references */
 extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
index b7ba75809751e62fb10a024cff7c687721f6e652..fb04f10a78ab35c462e251d0bacce3a05da9be32 100644 (file)
@@ -210,7 +210,11 @@ extern u64                 vabits_user;
 #define __tag_reset(addr)      untagged_addr(addr)
 #define __tag_get(addr)                (__u8)((u64)(addr) >> 56)
 #else
-#define __tag_set(addr, tag)   (addr)
+static inline const void *__tag_set(const void *addr, u8 tag)
+{
+       return addr;
+}
+
 #define __tag_reset(addr)      (addr)
 #define __tag_get(addr)                0
 #endif
@@ -301,8 +305,8 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define page_to_virt(page)     ({                                      \
        unsigned long __addr =                                          \
                ((__page_to_voff(page)) | PAGE_OFFSET);                 \
-       unsigned long __addr_tag =                                      \
-                __tag_set(__addr, page_kasan_tag(page));               \
+       const void *__addr_tag =                                        \
+               __tag_set((void *)__addr, page_kasan_tag(page));        \
        ((void *)__addr_tag);                                           \
 })
 
index 3f5461f7b5607bafe6dc8e84c42e43c0b702a10c..5fdcfe2373389ba630cf72ff94c062b61ca1b025 100644 (file)
@@ -447,8 +447,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
                                 PMD_TYPE_SECT)
 
 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
-#define pud_sect(pud)          (0)
-#define pud_table(pud)         (1)
+static inline bool pud_sect(pud_t pud) { return false; }
+static inline bool pud_table(pud_t pud) { return true; }
 #else
 #define pud_sect(pud)          ((pud_val(pud) & PUD_TYPE_MASK) == \
                                 PUD_TYPE_SECT)
index b1dd039023efb23238e62e555caa5541d042a2ae..1dcf63a9ac1f313975f7fd318f783acd3f21147c 100644 (file)
@@ -30,7 +30,7 @@
  * in the  the priority mask, it indicates that PSR.I should be set and
  * interrupt disabling temporarily does not rely on IRQ priorities.
  */
-#define GIC_PRIO_IRQON                 0xc0
+#define GIC_PRIO_IRQON                 0xe0
 #define GIC_PRIO_IRQOFF                        (GIC_PRIO_IRQON & ~0x80)
 #define GIC_PRIO_PSR_I_SET             (1 << 4)
 
index f4812777f5c594e234b124e728c9f58c9cd8d431..c50ee1b7d5cd61d2146253f4325dc97c1ad8cfdf 100644 (file)
@@ -16,6 +16,8 @@
 
 #define VDSO_HAS_CLOCK_GETRES          1
 
+#define VDSO_HAS_32BIT_FALLBACK                1
+
 static __always_inline
 int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
                          struct timezone *_tz)
@@ -51,6 +53,23 @@ long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
        return ret;
 }
 
+static __always_inline
+long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+       register struct old_timespec32 *ts asm("r1") = _ts;
+       register clockid_t clkid asm("r0") = _clkid;
+       register long ret asm ("r0");
+       register long nr asm("r7") = __NR_compat_clock_gettime;
+
+       asm volatile(
+       "       swi #0\n"
+       : "=r" (ret)
+       : "r" (clkid), "r" (ts), "r" (nr)
+       : "memory");
+
+       return ret;
+}
+
 static __always_inline
 int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
 {
@@ -72,6 +91,27 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
        return ret;
 }
 
+static __always_inline
+int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+       register struct old_timespec32 *ts asm("r1") = _ts;
+       register clockid_t clkid asm("r0") = _clkid;
+       register long ret asm ("r0");
+       register long nr asm("r7") = __NR_compat_clock_getres;
+
+       /* The checks below are required for ABI consistency with arm */
+       if ((_clkid >= MAX_CLOCKS) && (_ts == NULL))
+               return -EINVAL;
+
+       asm volatile(
+       "       swi #0\n"
+       : "=r" (ret)
+       : "r" (clkid), "r" (ts), "r" (nr)
+       : "memory");
+
+       return ret;
+}
+
 static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
 {
        u64 res;
index f29f36a65175c2f0f318710d2909d7635c1f848b..d19d14ba9ae401558e6e387bed2ef0b45aa211ce 100644 (file)
@@ -225,8 +225,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
        /*
         * Linux can handle differing I-cache policies. Userspace JITs will
@@ -468,6 +468,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
        case FTR_LOWER_SAFE:
                ret = new < cur ? new : cur;
                break;
+       case FTR_HIGHER_OR_ZERO_SAFE:
+               if (!cur || !new)
+                       break;
+               /* Fallthrough */
        case FTR_HIGHER_SAFE:
                ret = new > cur ? new : cur;
                break;
index f8719bd308501e23c8ee2c15aa5010115fb08aa8..48222a4760c2e65229fc1f1cd39f6ecc3e1451a3 100644 (file)
@@ -207,16 +207,16 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
 
        list = user_mode(regs) ? &user_step_hook : &kernel_step_hook;
 
-       rcu_read_lock();
-
+       /*
+        * Since single-step exception disables interrupt, this function is
+        * entirely not preemptible, and we can use rcu list safely here.
+        */
        list_for_each_entry_rcu(hook, list, node)       {
                retval = hook->fn(regs, esr);
                if (retval == DBG_HOOK_HANDLED)
                        break;
        }
 
-       rcu_read_unlock();
-
        return retval;
 }
 NOKPROBE_SYMBOL(call_step_hook);
@@ -305,14 +305,16 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
 
        list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
 
-       rcu_read_lock();
+       /*
+        * Since brk exception disables interrupt, this function is
+        * entirely not preemptible, and we can use rcu list safely here.
+        */
        list_for_each_entry_rcu(hook, list, node) {
                unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
 
                if ((comment & ~hook->mask) == hook->imm)
                        fn = hook->fn;
        }
-       rcu_read_unlock();
 
        return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
 }
index dceb8452094876c58cfacf93f350292084c02998..38ee1514cd9cde9135dc6e272b72a50ee702eb91 100644 (file)
@@ -536,13 +536,18 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
                        /* Aligned */
                        break;
                case 1:
-                       /* Allow single byte watchpoint. */
-                       if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
-                               break;
                case 2:
                        /* Allow halfword watchpoints and breakpoints. */
                        if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
                                break;
+
+                       /* Fallthrough */
+               case 3:
+                       /* Allow single byte watchpoint. */
+                       if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
+                               break;
+
+                       /* Fallthrough */
                default:
                        return -EINVAL;
                }
index 46e643e307082c0fee4129724ea74e9a98c99201..03ff15bffbb6db2d2e2cca75a20df06e6f174df0 100644 (file)
@@ -314,18 +314,21 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                /* MOVW instruction relocations. */
                case R_AARCH64_MOVW_UABS_G0_NC:
                        overflow_check = false;
+                       /* Fall through */
                case R_AARCH64_MOVW_UABS_G0:
                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
                                              AARCH64_INSN_IMM_MOVKZ);
                        break;
                case R_AARCH64_MOVW_UABS_G1_NC:
                        overflow_check = false;
+                       /* Fall through */
                case R_AARCH64_MOVW_UABS_G1:
                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
                                              AARCH64_INSN_IMM_MOVKZ);
                        break;
                case R_AARCH64_MOVW_UABS_G2_NC:
                        overflow_check = false;
+                       /* Fall through */
                case R_AARCH64_MOVW_UABS_G2:
                        ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
                                              AARCH64_INSN_IMM_MOVKZ);
@@ -393,6 +396,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                        break;
                case R_AARCH64_ADR_PREL_PG_HI21_NC:
                        overflow_check = false;
+                       /* Fall through */
                case R_AARCH64_ADR_PREL_PG_HI21:
                        ovf = reloc_insn_adrp(me, sechdrs, loc, val);
                        if (ovf && ovf != -ERANGE)
index bd5dfffca272c69dc2ccd4d7aa9f0f61aab5ceaa..c4452827419b0b4d947fb92c8bd2d281fc5d825e 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/ptrace.h>
 #include <asm/cacheflush.h>
 #include <asm/debug-monitors.h>
+#include <asm/daifflags.h>
 #include <asm/system_misc.h>
 #include <asm/insn.h>
 #include <linux/uaccess.h>
@@ -167,33 +168,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
        __this_cpu_write(current_kprobe, p);
 }
 
-/*
- * When PSTATE.D is set (masked), then software step exceptions can not be
- * generated.
- * SPSR's D bit shows the value of PSTATE.D immediately before the
- * exception was taken. PSTATE.D is set while entering into any exception
- * mode, however software clears it for any normal (none-debug-exception)
- * mode in the exception entry. Therefore, when we are entering into kprobe
- * breakpoint handler from any normal mode then SPSR.D bit is already
- * cleared, however it is set when we are entering from any debug exception
- * mode.
- * Since we always need to generate single step exception after a kprobe
- * breakpoint exception therefore we need to clear it unconditionally, when
- * we become sure that the current breakpoint exception is for kprobe.
- */
-static void __kprobes
-spsr_set_debug_flag(struct pt_regs *regs, int mask)
-{
-       unsigned long spsr = regs->pstate;
-
-       if (mask)
-               spsr |= PSR_D_BIT;
-       else
-               spsr &= ~PSR_D_BIT;
-
-       regs->pstate = spsr;
-}
-
 /*
  * Interrupts need to be disabled before single-step mode is set, and not
  * reenabled until after single-step mode ends.
@@ -205,17 +179,17 @@ spsr_set_debug_flag(struct pt_regs *regs, int mask)
 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
                                                struct pt_regs *regs)
 {
-       kcb->saved_irqflag = regs->pstate;
+       kcb->saved_irqflag = regs->pstate & DAIF_MASK;
        regs->pstate |= PSR_I_BIT;
+       /* Unmask PSTATE.D for enabling software step exceptions. */
+       regs->pstate &= ~PSR_D_BIT;
 }
 
 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
                                                struct pt_regs *regs)
 {
-       if (kcb->saved_irqflag & PSR_I_BIT)
-               regs->pstate |= PSR_I_BIT;
-       else
-               regs->pstate &= ~PSR_I_BIT;
+       regs->pstate &= ~DAIF_MASK;
+       regs->pstate |= kcb->saved_irqflag;
 }
 
 static void __kprobes
@@ -252,8 +226,6 @@ static void __kprobes setup_singlestep(struct kprobe *p,
 
                set_ss_context(kcb, slot);      /* mark pending ss */
 
-               spsr_set_debug_flag(regs, 0);
-
                /* IRQs and single stepping do not mix well. */
                kprobes_save_local_irqflag(kcb, regs);
                kernel_enable_single_step(regs);
index c4ae647d2306128d01f6c375e93a5b7ebb22c145..a5e8b3b9d798301285b2db09b031dbba6c481875 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/export.h>
 #include <linux/ftrace.h>
+#include <linux/kprobes.h>
 
 #include <asm/stack_pointer.h>
 #include <asm/stacktrace.h>
@@ -29,6 +30,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
                return 0;
        }
 }
+NOKPROBE_SYMBOL(save_return_addr);
 
 void *return_address(unsigned int level)
 {
@@ -49,3 +51,4 @@ void *return_address(unsigned int level)
                return NULL;
 }
 EXPORT_SYMBOL_GPL(return_address);
+NOKPROBE_SYMBOL(return_address);
index ea90d3bd92539eb7585f768d3b87e565f33af786..018a33e01b0ed2fdac2997b34d6065979bc343e1 100644 (file)
@@ -152,8 +152,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
                                pr_crit("CPU%u: died during early boot\n", cpu);
                                break;
                        }
-                       /* Fall through */
                        pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
+                       /* Fall through */
                case CPU_STUCK_IN_KERNEL:
                        pr_crit("CPU%u: is stuck in kernel\n", cpu);
                        if (status & CPU_STUCK_REASON_52_BIT_VA)
index 2b160ae594ebd98062ba070c4569728b95777ab1..a336cb124320f789b35ec778a73e8bc6791b8177 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/kernel.h>
 #include <linux/export.h>
 #include <linux/ftrace.h>
+#include <linux/kprobes.h>
 #include <linux/sched.h>
 #include <linux/sched/debug.h>
 #include <linux/sched/task_stack.h>
@@ -111,6 +112,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 
        return 0;
 }
+NOKPROBE_SYMBOL(unwind_frame);
 
 void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
                     int (*fn)(struct stackframe *, void *), void *data)
@@ -125,6 +127,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
                        break;
        }
 }
+NOKPROBE_SYMBOL(walk_stackframe);
 
 #ifdef CONFIG_STACKTRACE
 struct stack_trace_data {
index 9568c116ac7fc629994790a3d06125d5e2f44ca9..cfd65b63f36fd05f15557e872bb9e6ec96efa973 100644 (file)
@@ -777,6 +777,53 @@ void __init hook_debug_fault_code(int nr,
        debug_fault_info[nr].name       = name;
 }
 
+/*
+ * In debug exception context, we explicitly disable preemption despite
+ * having interrupts disabled.
+ * This serves two purposes: it makes it much less likely that we would
+ * accidentally schedule in exception context and it will force a warning
+ * if we somehow manage to schedule by accident.
+ */
+static void debug_exception_enter(struct pt_regs *regs)
+{
+       /*
+        * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
+        * already disabled to preserve the last enabled/disabled addresses.
+        */
+       if (interrupts_enabled(regs))
+               trace_hardirqs_off();
+
+       if (user_mode(regs)) {
+               RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
+       } else {
+               /*
+                * We might have interrupted pretty much anything.  In
+                * fact, if we're a debug exception, we can even interrupt
+                * NMI processing. We don't want this code makes in_nmi()
+                * to return true, but we need to notify RCU.
+                */
+               rcu_nmi_enter();
+       }
+
+       preempt_disable();
+
+       /* This code is a bit fragile.  Test it. */
+       RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
+}
+NOKPROBE_SYMBOL(debug_exception_enter);
+
+static void debug_exception_exit(struct pt_regs *regs)
+{
+       preempt_enable_no_resched();
+
+       if (!user_mode(regs))
+               rcu_nmi_exit();
+
+       if (interrupts_enabled(regs))
+               trace_hardirqs_on();
+}
+NOKPROBE_SYMBOL(debug_exception_exit);
+
 #ifdef CONFIG_ARM64_ERRATUM_1463225
 DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
 
@@ -817,12 +864,7 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
        if (cortex_a76_erratum_1463225_debug_handler(regs))
                return;
 
-       /*
-        * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
-        * already disabled to preserve the last enabled/disabled addresses.
-        */
-       if (interrupts_enabled(regs))
-               trace_hardirqs_off();
+       debug_exception_enter(regs);
 
        if (user_mode(regs) && !is_ttbr0_addr(pc))
                arm64_apply_bp_hardening();
@@ -832,7 +874,6 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
                                 inf->sig, inf->code, (void __user *)pc, esr);
        }
 
-       if (interrupts_enabled(regs))
-               trace_hardirqs_on();
+       debug_exception_exit(regs);
 }
 NOKPROBE_SYMBOL(do_debug_exception);
index 14b1931be69c3acf78afc794d4b3f0a3bbac5ecc..b65b169778e31478fcc507c12c88fcb1436cdf6b 100644 (file)
@@ -9,6 +9,7 @@
 #if _MIPS_SIM != _MIPS_SIM_ABI64 && defined(CONFIG_64BIT)
 
 /* Building 32-bit VDSO for the 64-bit kernel. Fake a 32-bit Kconfig. */
+#define BUILD_VDSO32_64
 #undef CONFIG_64BIT
 #define CONFIG_32BIT 1
 #ifndef __ASSEMBLY__
index 8acb8fa1f8d69fefa3d0d47f2e72797aa392cb78..3b77d729057f94740f4df7bfc6d44c76e84b1319 100644 (file)
@@ -19,8 +19,6 @@
 
 KBUILD_IMAGE := vmlinuz
 
-KBUILD_DEFCONFIG := default_defconfig
-
 NM             = sh $(srctree)/arch/parisc/nm
 CHECKFLAGS     += -D__hppa__=1
 LIBGCC         = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
@@ -182,5 +180,8 @@ define archhelp
        @echo  '  zinstall      - Install compressed vmlinuz kernel'
 endef
 
+archclean:
+       $(Q)$(MAKE) $(clean)=$(boot)
+
 archheaders:
        $(Q)$(MAKE) $(build)=arch/parisc/kernel/syscalls all
index 2da8624e5cf62a9ccebdd01cabe2f32e1465db40..1e5879c6a7522892e38e95334cf1fc3fc4519af9 100644 (file)
@@ -12,6 +12,7 @@ UBSAN_SANITIZE := n
 targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
 targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
 targets += misc.o piggy.o sizes.h head.o real2.o firmware.o
+targets += real2.S firmware.c
 
 KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
@@ -55,7 +56,8 @@ $(obj)/misc.o: $(obj)/sizes.h
 CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER
 $(obj)/vmlinux.lds: $(obj)/sizes.h
 
-$(obj)/vmlinux.bin: vmlinux
+OBJCOPYFLAGS_vmlinux.bin := -R .comment -R .note -S
+$(obj)/vmlinux.bin: vmlinux FORCE
        $(call if_changed,objcopy)
 
 vmlinux.bin.all-y := $(obj)/vmlinux.bin
index bfd7872739a38d7a32b211803af5c60c671f6c0d..2ac3a643f2eb3cc2b4846296dd5ba6dfc9c1ff56 100644 (file)
@@ -48,8 +48,8 @@ SECTIONS
                *(.rodata.compressed)
        }
 
-       /* bootloader code and data starts behind area of extracted kernel */
-       . = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START);
+       /* bootloader code and data starts at least behind area of extracted kernel */
+       . = MAX(ABSOLUTE(.), (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START));
 
        /* align on next page boundary */
        . = ALIGN(4096);
diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig
deleted file mode 100644 (file)
index 5b877ca..0000000
+++ /dev/null
@@ -1,206 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=16
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
-CONFIG_SLAB=y
-CONFIG_PROFILING=y
-CONFIG_OPROFILE=m
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PA7100LC=y
-CONFIG_PREEMPT_VOLUNTARY=y
-CONFIG_IOMMU_CCIO=y
-CONFIG_GSC_LASI=y
-CONFIG_GSC_WAX=y
-CONFIG_EISA=y
-CONFIG_PCI=y
-CONFIG_GSC_DINO=y
-CONFIG_PCI_LBA=y
-CONFIG_PCCARD=y
-CONFIG_YENTA=y
-CONFIG_PD6729=y
-CONFIG_I82092=y
-CONFIG_BINFMT_MISC=m
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=m
-CONFIG_NET_KEY=m
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_INET_AH=m
-CONFIG_INET_ESP=m
-CONFIG_INET_DIAG=m
-CONFIG_INET6_AH=y
-CONFIG_INET6_ESP=y
-CONFIG_INET6_IPCOMP=y
-CONFIG_LLC2=m
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-# CONFIG_STANDALONE is not set
-# CONFIG_PREVENT_FIRMWARE_BUILD is not set
-CONFIG_PARPORT=y
-CONFIG_PARPORT_PC=m
-CONFIG_PARPORT_PC_PCMCIA=m
-CONFIG_PARPORT_1284=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_CRYPTOLOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=6144
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECS=y
-CONFIG_BLK_DEV_IDECD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_NS87415=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_ST=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_LASI700=y
-CONFIG_SCSI_SYM53C8XX_2=y
-CONFIG_SCSI_ZALON=y
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=y
-CONFIG_MD_RAID0=y
-CONFIG_MD_RAID1=y
-CONFIG_MD_RAID10=y
-CONFIG_BLK_DEV_DM=y
-CONFIG_NETDEVICES=y
-CONFIG_BONDING=m
-CONFIG_DUMMY=m
-CONFIG_TUN=m
-CONFIG_ACENIC=y
-CONFIG_TIGON3=y
-CONFIG_NET_TULIP=y
-CONFIG_TULIP=y
-CONFIG_LASI_82596=y
-CONFIG_PPP=m
-CONFIG_PPP_BSDCOMP=m
-CONFIG_PPP_DEFLATE=m
-CONFIG_PPPOE=m
-CONFIG_PPP_ASYNC=m
-CONFIG_PPP_SYNC_TTY=m
-# CONFIG_KEYBOARD_HIL_OLD is not set
-CONFIG_MOUSE_SERIAL=y
-CONFIG_LEGACY_PTY_COUNT=64
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_CS=y
-CONFIG_SERIAL_8250_NR_UARTS=17
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_MANY_PORTS=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_PRINTER=m
-CONFIG_PPDEV=m
-# CONFIG_HW_RANDOM is not set
-# CONFIG_HWMON is not set
-CONFIG_FB=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_TILEBLITTING=y
-CONFIG_DUMMY_CONSOLE_COLUMNS=128
-CONFIG_DUMMY_CONSOLE_ROWS=48
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_LOGO=y
-# CONFIG_LOGO_LINUX_MONO is not set
-# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_SOUND=y
-CONFIG_SND=y
-CONFIG_SND_DYNAMIC_MINORS=y
-CONFIG_SND_SEQUENCER=y
-CONFIG_SND_AD1889=y
-CONFIG_SND_HARMONY=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_NTRIG=y
-CONFIG_HID_PANTHERLORD=y
-CONFIG_HID_PETALYNX=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SUNPLUS=y
-CONFIG_HID_TOPSEED=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_UHCI_HCD=y
-CONFIG_EXT2_FS=y
-CONFIG_EXT3_FS=y
-CONFIG_ISO9660_FS=y
-CONFIG_JOLIET=y
-CONFIG_VFAT_FS=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=y
-CONFIG_NFSD_V4=y
-CONFIG_CIFS=m
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=y
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=y
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=y
-CONFIG_DEBUG_FS=y
-CONFIG_HEADERS_INSTALL=y
-CONFIG_HEADERS_CHECK=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_KEYS=y
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_HW is not set
-CONFIG_LIBCRC32C=m
-CONFIG_FONTS=y
diff --git a/arch/parisc/configs/defconfig b/arch/parisc/configs/defconfig
new file mode 100644 (file)
index 0000000..5b877ca
--- /dev/null
@@ -0,0 +1,206 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=m
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PA7100LC=y
+CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_IOMMU_CCIO=y
+CONFIG_GSC_LASI=y
+CONFIG_GSC_WAX=y
+CONFIG_EISA=y
+CONFIG_PCI=y
+CONFIG_GSC_DINO=y
+CONFIG_PCI_LBA=y
+CONFIG_PCCARD=y
+CONFIG_YENTA=y
+CONFIG_PD6729=y
+CONFIG_I82092=y
+CONFIG_BINFMT_MISC=m
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_DIAG=m
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_LLC2=m
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=m
+CONFIG_PARPORT_PC_PCMCIA=m
+CONFIG_PARPORT_1284=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECS=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_NS87415=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_LASI700=y
+CONFIG_SCSI_SYM53C8XX_2=y
+CONFIG_SCSI_ZALON=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_LINEAR=y
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_MD_RAID10=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_TUN=m
+CONFIG_ACENIC=y
+CONFIG_TIGON3=y
+CONFIG_NET_TULIP=y
+CONFIG_TULIP=y
+CONFIG_LASI_82596=y
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_KEYBOARD_HIL_OLD is not set
+CONFIG_MOUSE_SERIAL=y
+CONFIG_LEGACY_PTY_COUNT=64
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_CS=y
+CONFIG_SERIAL_8250_NR_UARTS=17
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_PRINTER=m
+CONFIG_PPDEV=m
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_DUMMY_CONSOLE_COLUMNS=128
+CONFIG_DUMMY_CONSOLE_ROWS=48
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_SEQUENCER=y
+CONFIG_SND_AD1889=y
+CONFIG_SND_HARMONY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_USB=y
+CONFIG_USB_MON=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V4=y
+CONFIG_CIFS=m
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=y
+CONFIG_DEBUG_FS=y
+CONFIG_HEADERS_INSTALL=y
+CONFIG_HEADERS_CHECK=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_KEYS=y
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_HW is not set
+CONFIG_LIBCRC32C=m
+CONFIG_FONTS=y
index d784ccdd8fef6bfd329c48567c988c43b747d3dc..b6fb30f2e4bfd26531145d8612b2be10b253da21 100644 (file)
@@ -181,8 +181,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
        for (i = 0; i < ARRAY_SIZE(insn); i++)
                insn[i] = INSN_NOP;
 
+       __patch_text((void *)rec->ip, INSN_NOP);
        __patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
-                             insn, sizeof(insn));
+                             insn, sizeof(insn)-4);
        return 0;
 }
 #endif
index b6c4b254901abb3874d7d51f244a9d9c468e6604..55c1396580a4b051c2b2efa6bbf9173a4966d152 100644 (file)
@@ -18,3 +18,4 @@ obj-y  := frnd.o driver.o decode_exc.o fpudispatch.o denormal.o \
 # other very old or stripped-down PA-RISC CPUs -- not currently supported
 
 obj-$(CONFIG_MATH_EMULATION)   += unimplemented-math-emulation.o
+CFLAGS_REMOVE_fpudispatch.o    = -Wimplicit-fallthrough=3
index 6dd4669ce7a5572fb48dcd92f9b31b650e731af8..adbd5e2144a34303023372e99b0452b0ce43bafa 100644 (file)
@@ -66,6 +66,7 @@ parisc_acctyp(unsigned long code, unsigned int inst)
        case 0x30000000: /* coproc2 */
                if (bit22set(inst))
                        return VM_WRITE;
+               /* fall through */
 
        case 0x0: /* indexed/memory management */
                if (bit22set(inst)) {
index 9bf63f0ab253479f0cd3fb5dad43fb99ac6b0cbe..42b5ec2231008ede5252f96b506fd50752eef018 100644 (file)
@@ -21,7 +21,6 @@
        cpus {
                #address-cells = <1>;
                #size-cells = <0>;
-               timebase-frequency = <1000000>;
                cpu0: cpu@0 {
                        compatible = "sifive,e51", "sifive,rocket0", "riscv";
                        device_type = "cpu";
index b7b749b18853475394ab0545df73156c67740ed6..93205c0bf71df1b066f25abbaf8945ae0fbbe880 100644 (file)
@@ -34,6 +34,7 @@ CONFIG_PCIEPORTBUS=y
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCIE_XILINX=y
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_SD=y
@@ -53,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
 CONFIG_HVC_RISCV_SBI=y
+CONFIG_SPI=y
+CONFIG_SPI_SIFIVE=y
 # CONFIG_PTP_1588_CLOCK is not set
 CONFIG_DRM=y
 CONFIG_DRM_RADEON=y
@@ -66,8 +69,9 @@ CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_UAS=y
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
 CONFIG_VIRTIO_MMIO=y
-CONFIG_SPI_SIFIVE=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_AUTOFS4_FS=y
@@ -83,8 +87,4 @@ CONFIG_ROOT_NFS=y
 CONFIG_CRYPTO_USER_API_HASH=y
 CONFIG_CRYPTO_DEV_VIRTIO=y
 CONFIG_PRINTK_TIME=y
-CONFIG_SPI=y
-CONFIG_MMC_SPI=y
-CONFIG_MMC=y
-CONFIG_DEVTMPFS_MOUNT=y
 # CONFIG_RCU_TRACE is not set
index f1d6ffe43e42879e66f75ced65b5b5809feba23f..49a5852fd07dd5a023c1899e80bffe542f6dedeb 100644 (file)
@@ -37,7 +37,7 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
 # these symbols in the kernel code rather than hand-coded addresses.
 
 SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
-       -Wl,--hash-style=both
+       -Wl,--build-id -Wl,--hash-style=both
 $(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
        $(call if_changed,vdsold)
 
index 082905d97309c1d9f2abe20d55b7f78121c19799..1c3b2b25763721899d06b06484104cc47dffee83 100644 (file)
@@ -8,6 +8,7 @@ void store_ipl_parmblock(void);
 void setup_boot_command_line(void);
 void parse_boot_command_line(void);
 void setup_memory_end(void);
+void verify_facilities(void);
 void print_missing_facilities(void);
 unsigned long get_random_base(unsigned long safe_addr);
 
index 3bdd8132e56bcb1230638e3480c49a2023863018..c34a6387ce384be2a72810d3cd77fa8786ec41ef 100644 (file)
@@ -7,6 +7,7 @@
 #include <asm/timex.h>
 #include <asm/sclp.h>
 #include "compressed/decompressor.h"
+#include "boot.h"
 
 #define PRNG_MODE_TDES  1
 #define PRNG_MODE_SHA512 2
index e26d4413d34c139d603ecc180da09c6583f2d0c7..74e78ec5beb688f30eec648db92d14e655be0675 100644 (file)
@@ -3,6 +3,7 @@ CONFIG_POSIX_MQUEUE=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
 CONFIG_TASKSTATS=y
@@ -18,55 +19,71 @@ CONFIG_BLK_CGROUP=y
 CONFIG_CFS_BANDWIDTH=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_RDMA=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_HUGETLB=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_BPF=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
-CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
+CONFIG_LIVEPATCH=y
+CONFIG_TUNE_ZEC12=y
+CONFIG_NR_CPUS=512
+CONFIG_NUMA=y
+CONFIG_HZ_100=y
+CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
+CONFIG_CHSC_SCH=y
+CONFIG_VFIO_CCW=m
+CONFIG_VFIO_AP=m
+CONFIG_CRASH_DUMP=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
 CONFIG_OPROFILE=m
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
 CONFIG_STATIC_KEYS_SELFTEST=y
+CONFIG_REFCOUNT_FULL=y
+CONFIG_LOCK_EVENT_COUNTS=y
 CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
 CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_SHA256=y
 CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
-CONFIG_BLK_WBT_SQ=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
 CONFIG_MINIX_SUBPARTITION=y
 CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_CFQ_GROUP_IOSCHED=y
-CONFIG_DEFAULT_DEADLINE=y
-CONFIG_LIVEPATCH=y
-CONFIG_TUNE_ZEC12=y
-CONFIG_NR_CPUS=512
-CONFIG_NUMA=y
-CONFIG_PREEMPT=y
-CONFIG_HZ_100=y
-CONFIG_KEXEC_FILE=y
-CONFIG_KEXEC_VERIFY_SIG=y
-CONFIG_EXPOLINE=y
-CONFIG_EXPOLINE_AUTO=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+CONFIG_BINFMT_MISC=m
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
@@ -82,17 +99,8 @@ CONFIG_ZSMALLOC=m
 CONFIG_ZSMALLOC_STAT=y
 CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
 CONFIG_IDLE_PAGE_TRACKING=y
-CONFIG_PCI=y
-CONFIG_PCI_DEBUG=y
-CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_S390=y
-CONFIG_CHSC_SCH=y
-CONFIG_VFIO_AP=m
-CONFIG_VFIO_CCW=m
-CONFIG_CRASH_DUMP=y
-CONFIG_BINFMT_MISC=m
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
+CONFIG_PERCPU_STATS=y
+CONFIG_GUP_BENCHMARK=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -121,9 +129,6 @@ CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_TCP_CONG_ADVANCED=y
@@ -139,10 +144,6 @@ CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_SIT=m
 CONFIG_IPV6_GRE=m
@@ -264,11 +265,8 @@ CONFIG_IP_VS_SED=m
 CONFIG_IP_VS_NQ=m
 CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
-CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NF_TABLES_IPV4=y
-CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NF_TABLES_ARP=y
-CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -287,10 +285,7 @@ CONFIG_IP_NF_SECURITY=m
 CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NF_TABLES_IPV6=y
-CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -309,7 +304,7 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_SECURITY=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
 CONFIG_RDS=m
 CONFIG_RDS_RDMA=m
 CONFIG_RDS_TCP=m
@@ -375,9 +370,11 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
+CONFIG_PCI=y
+CONFIG_PCI_DEBUG=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_DEVTMPFS=y
-CONFIG_DMA_CMA=y
-CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_CONNECTOR=y
 CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=m
@@ -395,7 +392,6 @@ CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=m
 CONFIG_CHR_DEV_SG=y
 CONFIG_CHR_DEV_SCH=m
@@ -415,17 +411,19 @@ CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
 CONFIG_SCSI_DH_ALUA=m
-CONFIG_SCSI_OSD_INITIATOR=m
-CONFIG_SCSI_OSD_ULD=m
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=y
 CONFIG_MD_LINEAR=m
 CONFIG_MD_MULTIPATH=m
 CONFIG_MD_FAULTY=m
+CONFIG_MD_CLUSTER=m
+CONFIG_BCACHE=m
 CONFIG_BLK_DEV_DM=m
+CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_LOG_USERSPACE=m
 CONFIG_DM_RAID=m
@@ -445,23 +443,78 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
-CONFIG_VXLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
 CONFIG_NLMON=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
 # CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_GOOGLE is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
 CONFIG_MLX5_CORE_EN=y
+# CONFIG_MLXFW is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_MYRI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETERION is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
 CONFIG_PPPOE=m
 CONFIG_PPTP=m
 CONFIG_PPPOL2TP=m
@@ -473,10 +526,13 @@ CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_NULL_TTY=m
 CONFIG_HW_RANDOM_VIRTIO=m
 CONFIG_RAW_DRIVER=m
 CONFIG_HANGCHECK_TIMER=m
 CONFIG_TN3270_FS=y
+CONFIG_PPS=m
+# CONFIG_PTP_1588_CLOCK is not set
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
@@ -498,8 +554,8 @@ CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
-CONFIG_S390_AP_IOMMU=y
 CONFIG_S390_CCW_IOMMU=y
+CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
@@ -519,6 +575,7 @@ CONFIG_OCFS2_FS=m
 CONFIG_BTRFS_FS=y
 CONFIG_BTRFS_FS_POSIX_ACL=y
 CONFIG_BTRFS_DEBUG=y
+CONFIG_BTRFS_ASSERT=y
 CONFIG_NILFS2_FS=m
 CONFIG_FS_DAX=y
 CONFIG_EXPORTFS_BLOCK_OPS=y
@@ -552,8 +609,10 @@ CONFIG_ECRYPT_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
 CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_ZSTD=y
 CONFIG_ROMFS_FS=m
 CONFIG_NFS_FS=m
 CONFIG_NFS_V3_ACL=y
@@ -564,7 +623,6 @@ CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_NFSD_V4_SECURITY_LABEL=y
 CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
 CONFIG_CIFS_STATS2=y
 CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_CIFS_UPCALL=y
@@ -580,19 +638,112 @@ CONFIG_NLS_ISO8859_1=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_UTF8=m
 CONFIG_DLM=m
+CONFIG_UNICODE=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
+CONFIG_IMA=y
+CONFIG_IMA_DEFAULT_HASH_SHA256=y
+CONFIG_IMA_WRITE_POLICY=y
+CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
+CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_STATS=y
+CONFIG_ZCRYPT=m
+CONFIG_PKEY=m
+CONFIG_CRYPTO_PAES_S390=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_GHASH_S390=m
+CONFIG_CRYPTO_CRC32_S390=y
+CONFIG_CORDIC=m
+CONFIG_CRC32_SELFTEST=y
+CONFIG_CRC4=m
+CONFIG_CRC7=m
+CONFIG_CRC8=m
+CONFIG_RANDOM32_SELFTEST=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
+CONFIG_DMA_API_DEBUG=y
+CONFIG_STRING_SELFTEST=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DYNAMIC_DEBUG=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_GDB_SCRIPTS=y
 CONFIG_FRAME_WARN=1024
-CONFIG_READABLE_ASM=y
 CONFIG_UNUSED_SYMBOLS=y
 CONFIG_HEADERS_INSTALL=y
 CONFIG_HEADERS_CHECK=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_PAGE_OWNER=y
 CONFIG_DEBUG_RODATA_TEST=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_SELFTEST=y
@@ -645,7 +796,6 @@ CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_FUNCTION_PROFILER=y
 CONFIG_HIST_TRIGGERS=y
-CONFIG_DMA_API_DEBUG=y
 CONFIG_LKDTM=m
 CONFIG_TEST_LIST_SORT=y
 CONFIG_TEST_SORT=y
@@ -657,85 +807,3 @@ CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
 CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_S390_PTDUMP=y
-CONFIG_PERSISTENT_KEYRINGS=y
-CONFIG_BIG_KEYS=y
-CONFIG_ENCRYPTED_KEYS=m
-CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
-CONFIG_FORTIFY_SOURCE=y
-CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
-CONFIG_SECURITY_SELINUX_DISABLE=y
-CONFIG_INTEGRITY_SIGNATURE=y
-CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
-CONFIG_IMA=y
-CONFIG_IMA_DEFAULT_HASH_SHA256=y
-CONFIG_IMA_WRITE_POLICY=y
-CONFIG_IMA_APPRAISE=y
-CONFIG_CRYPTO_DH=m
-CONFIG_CRYPTO_ECDH=m
-CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
-CONFIG_CRYPTO_PCRYPT=m
-CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
-CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES_TI=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
-CONFIG_CRYPTO_SEED=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_842=m
-CONFIG_CRYPTO_LZ4=m
-CONFIG_CRYPTO_LZ4HC=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
-CONFIG_CRYPTO_USER_API_HASH=m
-CONFIG_CRYPTO_USER_API_SKCIPHER=m
-CONFIG_CRYPTO_USER_API_RNG=m
-CONFIG_CRYPTO_USER_API_AEAD=m
-CONFIG_ZCRYPT=m
-CONFIG_PKEY=m
-CONFIG_CRYPTO_PAES_S390=m
-CONFIG_CRYPTO_SHA1_S390=m
-CONFIG_CRYPTO_SHA256_S390=m
-CONFIG_CRYPTO_SHA512_S390=m
-CONFIG_CRYPTO_DES_S390=m
-CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_PKCS7_MESSAGE_PARSER=y
-CONFIG_SYSTEM_TRUSTED_KEYRING=y
-CONFIG_CRC7=m
-CONFIG_CRC8=m
-CONFIG_RANDOM32_SELFTEST=y
-CONFIG_CORDIC=m
-CONFIG_CMM=m
-CONFIG_APPLDATA_BASE=y
-CONFIG_KVM=m
-CONFIG_KVM_S390_UCONTROL=y
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
index e4bc40073003f96200f716946f40dd4f23337be7..68d3ca83302b1c2ae61727a974a0188aba1dfe4b 100644 (file)
@@ -12,30 +12,51 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
-# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
 CONFIG_CFS_BANDWIDTH=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_RDMA=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_HUGETLB=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_BPF=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
-CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
+CONFIG_LIVEPATCH=y
+CONFIG_TUNE_ZEC12=y
+CONFIG_NR_CPUS=512
+CONFIG_NUMA=y
+# CONFIG_NUMA_EMU is not set
+CONFIG_HZ_100=y
+CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
+CONFIG_CHSC_SCH=y
+CONFIG_VFIO_CCW=m
+CONFIG_VFIO_AP=m
+CONFIG_CRASH_DUMP=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
 CONFIG_OPROFILE=m
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
@@ -47,27 +68,18 @@ CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 CONFIG_MODULE_SIG=y
 CONFIG_MODULE_SIG_SHA256=y
-CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
-CONFIG_BLK_WBT_SQ=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
 CONFIG_MINIX_SUBPARTITION=y
 CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_CFQ_GROUP_IOSCHED=y
-CONFIG_DEFAULT_DEADLINE=y
-CONFIG_LIVEPATCH=y
-CONFIG_TUNE_ZEC12=y
-CONFIG_NR_CPUS=512
-CONFIG_NUMA=y
-CONFIG_HZ_100=y
-CONFIG_KEXEC_FILE=y
-CONFIG_KEXEC_VERIFY_SIG=y
-CONFIG_EXPOLINE=y
-CONFIG_EXPOLINE_AUTO=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+CONFIG_BINFMT_MISC=m
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
@@ -81,16 +93,8 @@ CONFIG_ZSMALLOC=m
 CONFIG_ZSMALLOC_STAT=y
 CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
 CONFIG_IDLE_PAGE_TRACKING=y
-CONFIG_PCI=y
-CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_S390=y
-CONFIG_CHSC_SCH=y
-CONFIG_VFIO_AP=m
-CONFIG_VFIO_CCW=m
-CONFIG_CRASH_DUMP=y
-CONFIG_BINFMT_MISC=m
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
+CONFIG_PERCPU_STATS=y
+CONFIG_GUP_BENCHMARK=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -119,9 +123,6 @@ CONFIG_NET_IPVTI=m
 CONFIG_INET_AH=m
 CONFIG_INET_ESP=m
 CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
 CONFIG_INET_DIAG=m
 CONFIG_INET_UDP_DIAG=m
 CONFIG_TCP_CONG_ADVANCED=y
@@ -137,10 +138,6 @@ CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
 CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
 CONFIG_IPV6_VTI=m
 CONFIG_IPV6_SIT=m
 CONFIG_IPV6_GRE=m
@@ -262,11 +259,8 @@ CONFIG_IP_VS_SED=m
 CONFIG_IP_VS_NQ=m
 CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
-CONFIG_NF_CONNTRACK_IPV4=m
 CONFIG_NF_TABLES_IPV4=y
-CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NF_TABLES_ARP=y
-CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP_NF_MATCH_AH=m
 CONFIG_IP_NF_MATCH_ECN=m
@@ -285,10 +279,7 @@ CONFIG_IP_NF_SECURITY=m
 CONFIG_IP_NF_ARPTABLES=m
 CONFIG_IP_NF_ARPFILTER=m
 CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
 CONFIG_NF_TABLES_IPV6=y
-CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
 CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP6_NF_MATCH_AH=m
 CONFIG_IP6_NF_MATCH_EUI64=m
@@ -307,7 +298,7 @@ CONFIG_IP6_NF_RAW=m
 CONFIG_IP6_NF_SECURITY=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
 CONFIG_RDS=m
 CONFIG_RDS_RDMA=m
 CONFIG_RDS_TCP=m
@@ -372,9 +363,11 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
+CONFIG_UEVENT_HELPER=y
 CONFIG_DEVTMPFS=y
-CONFIG_DMA_CMA=y
-CONFIG_CMA_SIZE_MBYTES=0
 CONFIG_CONNECTOR=y
 CONFIG_ZRAM=m
 CONFIG_BLK_DEV_LOOP=m
@@ -383,6 +376,7 @@ CONFIG_BLK_DEV_DRBD=m
 CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=32768
+# CONFIG_BLK_DEV_XPRAM is not set
 CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_RBD=m
 CONFIG_BLK_DEV_NVME=m
@@ -392,7 +386,6 @@ CONFIG_RAID_ATTRS=m
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
 CONFIG_BLK_DEV_SR=m
 CONFIG_CHR_DEV_SG=y
 CONFIG_CHR_DEV_SCH=m
@@ -412,17 +405,19 @@ CONFIG_SCSI_DH_RDAC=m
 CONFIG_SCSI_DH_HP_SW=m
 CONFIG_SCSI_DH_EMC=m
 CONFIG_SCSI_DH_ALUA=m
-CONFIG_SCSI_OSD_INITIATOR=m
-CONFIG_SCSI_OSD_ULD=m
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=y
 CONFIG_MD_LINEAR=m
 CONFIG_MD_MULTIPATH=m
 CONFIG_MD_FAULTY=m
+CONFIG_MD_CLUSTER=m
+CONFIG_BCACHE=m
 CONFIG_BLK_DEV_DM=m
+CONFIG_DM_UNSTRIPED=m
 CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_LOG_USERSPACE=m
 CONFIG_DM_RAID=m
@@ -435,6 +430,7 @@ CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
 CONFIG_DM_VERITY=m
 CONFIG_DM_SWITCH=m
+CONFIG_DM_INTEGRITY=m
 CONFIG_NETDEVICES=y
 CONFIG_BONDING=m
 CONFIG_DUMMY=m
@@ -442,23 +438,78 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
-CONFIG_VXLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
 CONFIG_VIRTIO_NET=m
 CONFIG_NLMON=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
 # CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_GOOGLE is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 CONFIG_MLX4_EN=m
 CONFIG_MLX5_CORE=m
 CONFIG_MLX5_CORE_EN=y
+# CONFIG_MLXFW is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_MYRI is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETERION is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
 CONFIG_PPP=m
 CONFIG_PPP_BSDCOMP=m
 CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
 CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
 CONFIG_PPPOE=m
 CONFIG_PPTP=m
 CONFIG_PPPOL2TP=m
@@ -470,17 +521,21 @@ CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_NULL_TTY=m
 CONFIG_HW_RANDOM_VIRTIO=m
 CONFIG_RAW_DRIVER=m
 CONFIG_HANGCHECK_TIMER=m
 CONFIG_TN3270_FS=y
+# CONFIG_PTP_1588_CLOCK is not set
 # CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
 CONFIG_DIAG288_WATCHDOG=m
 CONFIG_DRM=y
 CONFIG_DRM_VIRTIO_GPU=y
+# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
@@ -495,8 +550,8 @@ CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
-CONFIG_S390_AP_IOMMU=y
 CONFIG_S390_CCW_IOMMU=y
+CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
@@ -546,8 +601,10 @@ CONFIG_ECRYPT_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
 CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_ZSTD=y
 CONFIG_ROMFS_FS=m
 CONFIG_NFS_FS=m
 CONFIG_NFS_V3_ACL=y
@@ -558,7 +615,6 @@ CONFIG_NFSD_V3_ACL=y
 CONFIG_NFSD_V4=y
 CONFIG_NFSD_V4_SECURITY_LABEL=y
 CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
 CONFIG_CIFS_STATS2=y
 CONFIG_CIFS_WEAK_PW_HASH=y
 CONFIG_CIFS_UPCALL=y
@@ -574,31 +630,7 @@ CONFIG_NLS_ISO8859_1=m
 CONFIG_NLS_ISO8859_15=m
 CONFIG_NLS_UTF8=m
 CONFIG_DLM=m
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_INFO_DWARF4=y
-CONFIG_GDB_SCRIPTS=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_FRAME_WARN=1024
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_PANIC_ON_OOPS=y
-CONFIG_RCU_TORTURE_TEST=m
-CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_LATENCYTOP=y
-CONFIG_SCHED_TRACER=y
-CONFIG_FTRACE_SYSCALLS=y
-CONFIG_STACK_TRACER=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_FUNCTION_PROFILER=y
-CONFIG_HIST_TRIGGERS=y
-CONFIG_LKDTM=m
-CONFIG_PERCPU_TEST=m
-CONFIG_ATOMIC64_SELFTEST=y
-CONFIG_TEST_BPF=m
-CONFIG_BUG_ON_DATA_CORRUPTION=y
-CONFIG_S390_PTDUMP=y
+CONFIG_UNICODE=y
 CONFIG_PERSISTENT_KEYRINGS=y
 CONFIG_BIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=m
@@ -606,7 +638,6 @@ CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
 CONFIG_SECURITY_SELINUX_DISABLE=y
 CONFIG_INTEGRITY_SIGNATURE=y
 CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
@@ -615,31 +646,42 @@ CONFIG_IMA_DEFAULT_HASH_SHA256=y
 CONFIG_IMA_WRITE_POLICY=y
 CONFIG_IMA_APPRAISE=y
 CONFIG_CRYPTO_FIPS=y
-CONFIG_CRYPTO_DH=m
-CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_USER=m
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
 CONFIG_CRYPTO_PCRYPT=m
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECRDSA=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
+CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_XXHASH=m
 CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_RMD128=m
 CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
 CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_AES_TI=m
 CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
 CONFIG_CRYPTO_BLOWFISH=m
 CONFIG_CRYPTO_CAMELLIA=m
 CONFIG_CRYPTO_CAST5=m
@@ -649,16 +691,19 @@ CONFIG_CRYPTO_KHAZAD=m
 CONFIG_CRYPTO_SALSA20=m
 CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SM4=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
 CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
 CONFIG_CRYPTO_ANSI_CPRNG=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_STATS=y
 CONFIG_ZCRYPT=m
 CONFIG_PKEY=m
 CONFIG_CRYPTO_PAES_S390=m
@@ -669,12 +714,34 @@ CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
+CONFIG_CORDIC=m
+CONFIG_CRC4=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
-CONFIG_CORDIC=m
-CONFIG_CMM=m
-CONFIG_APPLDATA_BASE=y
-CONFIG_KVM=m
-CONFIG_KVM_S390_UCONTROL=y
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_FRAME_WARN=1024
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_STACK_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
+CONFIG_LKDTM=m
+CONFIG_PERCPU_TEST=m
+CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_S390_PTDUMP=y
index d92bab844b7352a0b200c67c798bfd4cad6a06cb..be09a208b608437af55f2c3f76d4b444b73ac431 100644 (file)
@@ -1,27 +1,33 @@
 # CONFIG_SWAP is not set
 CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
+# CONFIG_CPU_ISOLATION is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_COMPAT_BRK is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_IBM_PARTITION=y
-CONFIG_DEFAULT_DEADLINE=y
 CONFIG_TUNE_ZEC12=y
 # CONFIG_COMPAT is not set
 CONFIG_NR_CPUS=2
-# CONFIG_HOTPLUG_CPU is not set
 CONFIG_HZ_100=y
 # CONFIG_ARCH_RANDOM is not set
-# CONFIG_COMPACTION is not set
-# CONFIG_MIGRATION is not set
-# CONFIG_BOUNCE is not set
-# CONFIG_CHECK_STACK is not set
+# CONFIG_RELOCATABLE is not set
 # CONFIG_CHSC_SCH is not set
 # CONFIG_SCM_BUS is not set
 CONFIG_CRASH_DUMP=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 # CONFIG_SECCOMP is not set
+# CONFIG_PFAULT is not set
+# CONFIG_S390_HYPFS_FS is not set
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_S390_GUEST is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+# CONFIG_MIGRATION is not set
+# CONFIG_BOUNCE is not set
 CONFIG_NET=y
 # CONFIG_IUCV is not set
 CONFIG_DEVTMPFS=y
@@ -43,7 +49,6 @@ CONFIG_ZFCP=y
 # CONFIG_HVC_IUCV is not set
 # CONFIG_HW_RANDOM_S390 is not set
 CONFIG_RAW_DRIVER=y
-# CONFIG_SCLP_ASYNC is not set
 # CONFIG_HMC_DRV is not set
 # CONFIG_S390_TAPE is not set
 # CONFIG_VMCP is not set
@@ -56,6 +61,7 @@ CONFIG_RAW_DRIVER=y
 CONFIG_CONFIGFS_FS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 # CONFIG_NETWORK_FILESYSTEMS is not set
+# CONFIG_DIMLIB is not set
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
@@ -64,7 +70,4 @@ CONFIG_PANIC_ON_OOPS=y
 # CONFIG_SCHED_DEBUG is not set
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
 # CONFIG_FTRACE is not set
-# CONFIG_PFAULT is not set
-# CONFIG_S390_HYPFS_FS is not set
-# CONFIG_VIRTUALIZATION is not set
-# CONFIG_S390_GUEST is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
index db5ef22c46e4e959eba66e203495f10c45740c7a..f647d565bd6dc1374151f65ab2f76f84d7322e16 100644 (file)
@@ -28,7 +28,7 @@
  * @sliba: storage list information block address
  * @sla: storage list address
  * @slsba: storage list state block address
- * @akey: access key for DLIB
+ * @akey: access key for SLIB
  * @bkey: access key for SL
  * @ckey: access key for SBALs
  * @dkey: access key for SLSB
@@ -50,11 +50,10 @@ struct qdesfmt0 {
 /**
  * struct qdr - queue description record (QDR)
  * @qfmt: queue format
- * @pfmt: implementation dependent parameter format
  * @ac: adapter characteristics
  * @iqdcnt: input queue descriptor count
  * @oqdcnt: output queue descriptor count
- * @iqdsz: inpout queue descriptor size
+ * @iqdsz: input queue descriptor size
  * @oqdsz: output queue descriptor size
  * @qiba: queue information block address
  * @qkey: queue information block key
@@ -62,8 +61,7 @@ struct qdesfmt0 {
  */
 struct qdr {
        u32 qfmt   : 8;
-       u32 pfmt   : 8;
-       u32        : 8;
+       u32        : 16;
        u32 ac     : 8;
        u32        : 8;
        u32 iqdcnt : 8;
@@ -327,6 +325,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
  * struct qdio_initialize - qdio initialization data
  * @cdev: associated ccw device
  * @q_format: queue format
+ * @qdr_ac: feature flags to set
  * @adapter_name: name for the adapter
  * @qib_param_field_format: format for qib_parm_field
  * @qib_param_field: pointer to 128 bytes or NULL, if no param field
@@ -338,6 +337,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
  * @input_handler: handler to be called for input queues
  * @output_handler: handler to be called for output queues
  * @queue_start_poll_array: polling handlers (one per input queue or NULL)
+ * @scan_threshold: # of in-use buffers that triggers scan on output queue
  * @int_parm: interruption parameter
  * @input_sbal_addr_array:  address of no_input_qs * 128 pointers
  * @output_sbal_addr_array: address of no_output_qs * 128 pointers
index c5cfff7b1f91e6dbed77fd4a6ff8761c7a90e661..70bd65724ec4c06310380465c39709f6d3b73ff5 100644 (file)
@@ -84,6 +84,7 @@ extern int noexec_disabled;
 extern int memory_end_set;
 extern unsigned long memory_end;
 extern unsigned long max_physmem_end;
+extern unsigned long __swsusp_reset_dma;
 
 #define MACHINE_IS_VM          (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
 #define MACHINE_IS_KVM         (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
index 1dded39239f86605ef0599159ead4c6143f17978..3b664cb3ec4d3b57ba3ca53fa2c3af1d8f92f9e1 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/elf.h>
+#include <asm/kexec.h>
 
 int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
                         unsigned long addr)
index d4e031f7b9c818bb106616ed47736b6dc80e68df..5f1fd1581330fd855201ecfa44650467330fe993 100644 (file)
@@ -34,7 +34,7 @@ struct cf_diag_csd {          /* Counter set data per CPU */
        unsigned char start[PAGE_SIZE]; /* Counter set at event start */
        unsigned char data[PAGE_SIZE];  /* Counter set at event delete */
 };
-DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd);
+static DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd);
 
 /* Counter sets are stored as data stream in a page sized memory buffer and
  * exported to user space via raw data attached to the event sample data.
index 96580590ccaf04c4f110c6a3e05c0078945bc3de..29d9470dbcebf9dd511a8d9e363976b8ef30146e 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/types.h>
 #include <linux/export.h>
 #include <linux/raid/xor.h>
+#include <asm/xor.h>
 
 static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
 {
index 63507662828fd90e008a7e1b254496a57d548a16..7b0bb475c166496ff054f18a780748a25c028a1c 100644 (file)
@@ -327,6 +327,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int access,
        case VM_FAULT_BADACCESS:
                if (access == VM_EXEC && signal_return(regs) == 0)
                        break;
+               /* fallthrough */
        case VM_FAULT_BADMAP:
                /* Bad memory access. Check if it is kernel or user space. */
                if (user_mode(regs)) {
@@ -336,7 +337,9 @@ static noinline void do_fault_error(struct pt_regs *regs, int access,
                        do_sigsegv(regs, si_code);
                        break;
                }
+               /* fallthrough */
        case VM_FAULT_BADCONTEXT:
+               /* fallthrough */
        case VM_FAULT_PFAULT:
                do_no_context(regs);
                break;
index 1e668b95e0c664352dac7a8ea8c4c4d189d07996..39c3a6e3d26218161bd63e5746d862b9af335edd 100644 (file)
@@ -2424,8 +2424,8 @@ EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
  * This function is assumed to be called with the guest_table_lock
  * held.
  */
-bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
-                                  unsigned long gaddr)
+static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
+                                         unsigned long gaddr)
 {
        if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
                return false;
index ae91429129a6411b8cd324f911ec1b5b57085365..ba71a63cdac479d6428159ffbb0e24ba6c5a40f3 100644 (file)
@@ -96,6 +96,8 @@ long clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
 
 #else
 
+#define VDSO_HAS_32BIT_FALLBACK        1
+
 static __always_inline
 long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
 {
@@ -113,6 +115,23 @@ long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
        return ret;
 }
 
+static __always_inline
+long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+       long ret;
+
+       asm (
+               "mov %%ebx, %%edx \n"
+               "mov %[clock], %%ebx \n"
+               "call __kernel_vsyscall \n"
+               "mov %%edx, %%ebx \n"
+               : "=a" (ret), "=m" (*_ts)
+               : "0" (__NR_clock_gettime), [clock] "g" (_clkid), "c" (_ts)
+               : "edx");
+
+       return ret;
+}
+
 static __always_inline
 long gettimeofday_fallback(struct __kernel_old_timeval *_tv,
                           struct timezone *_tz)
@@ -148,6 +167,23 @@ clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
        return ret;
 }
 
+static __always_inline
+long clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+       long ret;
+
+       asm (
+               "mov %%ebx, %%edx \n"
+               "mov %[clock], %%ebx \n"
+               "call __kernel_vsyscall \n"
+               "mov %%edx, %%ebx \n"
+               : "=a" (ret), "=m" (*_ts)
+               : "0" (__NR_clock_getres), [clock] "g" (_clkid), "c" (_ts)
+               : "edx");
+
+       return ret;
+}
+
 #endif
 
 #ifdef CONFIG_PARAVIRT_CLOCK
index 60c2200200547a7ad620cb54deb588deeee39521..80828b95a51f031653fe7be541cf17e536869f52 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/linkage.h>
 #include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
 #include <asm/processor.h>
 #include <asm/coprocessor.h>
 #include <asm/thread_info.h>
index 28cffaaf9d82ad9bc392ae0288e2ece506d5bcd7..f616b16c1f0be267aa5c116b14a35b095a5ac5d5 100644 (file)
@@ -232,13 +232,15 @@ int acpi_device_set_power(struct acpi_device *device, int state)
                if (device->power.flags.power_resources)
                        result = acpi_power_transition(device, target_state);
        } else {
+               int cur_state = device->power.state;
+
                if (device->power.flags.power_resources) {
                        result = acpi_power_transition(device, ACPI_STATE_D0);
                        if (result)
                                goto end;
                }
 
-               if (device->power.state == ACPI_STATE_D0) {
+               if (cur_state == ACPI_STATE_D0) {
                        int psc;
 
                        /* Nothing to do here if _PSC is not present. */
index 0e28270b0fd81046b2b2a434b0dd65edb4b282a6..aad6be5c0af0a5f4da721eed1fd8b77cd25f3b94 100644 (file)
@@ -2204,6 +2204,12 @@ int __init acpi_scan_init(void)
        acpi_gpe_apply_masked_gpes();
        acpi_update_all_gpes();
 
+       /*
+        * Although we call __add_memory() that is documented to require the
+        * device_hotplug_lock, it is not necessary here because this is an
+        * early code when userspace or any other code path cannot trigger
+        * hotplug/hotunplug operations.
+        */
        mutex_lock(&acpi_scan_lock);
        /*
         * Enumerate devices in the ACPI namespace.
index 3a36e76eca831db26715248b7665a674801aed28..9e9583a6bba99295601cc92399c94e85f623cef3 100644 (file)
@@ -338,6 +338,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
                hpriv->phys[port] = NULL;
                rc = 0;
                break;
+       case -EPROBE_DEFER:
+               /* Do not complain yet */
+               break;
 
        default:
                dev_err(dev,
index 173e6f2dd9af0f12afdc1fee7e372cfa4291e0aa..eefda51f97d351bda5d8437e2d83aaeae16b30f6 100644 (file)
@@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
        unsigned int ret;
        struct rm_feature_desc *desc;
        struct ata_taskfile tf;
-       static const char cdb[] = {  GPCMD_GET_CONFIGURATION,
+       static const char cdb[ATAPI_CDB_LEN] = {  GPCMD_GET_CONFIGURATION,
                        2,      /* only 1 feature descriptor requested */
                        0, 3,   /* 3, removable medium feature */
                        0, 0, 0,/* reserved */
index 85f20e371f2fadd32b9820d9b7cbd912aeba2a71..bd7d3bb8b890b187bf94d19e78562d0868a3ee85 100644 (file)
@@ -1726,6 +1726,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
                /* MSch: invalidate default_params */
                default_params[drive].blocks  = 0;
                set_capacity(floppy->disk, MAX_DISK_SIZE * 2);
+               /* Fall through */
        case FDFMTEND:
        case FDFLUSH:
                /* invalidate the buffer track to force a reread */
index 44c9985f352abd0cfb4ddda003302e09f76ce4bd..3036883fc9f878f1714dcdfe1d6803a27853304a 100644 (file)
@@ -924,6 +924,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
        struct file     *file;
        struct inode    *inode;
        struct address_space *mapping;
+       struct block_device *claimed_bdev = NULL;
        int             lo_flags = 0;
        int             error;
        loff_t          size;
@@ -942,10 +943,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
         * here to avoid changing device under exclusive owner.
         */
        if (!(mode & FMODE_EXCL)) {
-               bdgrab(bdev);
-               error = blkdev_get(bdev, mode | FMODE_EXCL, loop_set_fd);
-               if (error)
+               claimed_bdev = bd_start_claiming(bdev, loop_set_fd);
+               if (IS_ERR(claimed_bdev)) {
+                       error = PTR_ERR(claimed_bdev);
                        goto out_putf;
+               }
        }
 
        error = mutex_lock_killable(&loop_ctl_mutex);
@@ -1015,15 +1017,15 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
        mutex_unlock(&loop_ctl_mutex);
        if (partscan)
                loop_reread_partitions(lo, bdev);
-       if (!(mode & FMODE_EXCL))
-               blkdev_put(bdev, mode | FMODE_EXCL);
+       if (claimed_bdev)
+               bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
        return 0;
 
 out_unlock:
        mutex_unlock(&loop_ctl_mutex);
 out_bdev:
-       if (!(mode & FMODE_EXCL))
-               blkdev_put(bdev, mode | FMODE_EXCL);
+       if (claimed_bdev)
+               bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
 out_putf:
        fput(file);
 out:
index 9bcde2325893183167dcaed84f299af777d62a39..e21d2ded732b735c13f7ebbd02533ed081afe0ca 100644 (file)
@@ -1231,7 +1231,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
                                 struct block_device *bdev)
 {
        sock_shutdown(nbd);
-       kill_bdev(bdev);
+       __invalidate_device(bdev, true);
        nbd_bdev_reset(bdev);
        if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
                               &nbd->config->runtime_flags))
index a55be205b91a3ebfc843849c345d3b8a91745015..dbfe34664633a1c4783d359ad9b0eb5971c15430 100644 (file)
@@ -98,6 +98,9 @@ static int ath_open(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        ath = kzalloc(sizeof(*ath), GFP_KERNEL);
        if (!ath)
                return -ENOMEM;
index 8905ad2edde743742f9706fccfd7350d5fb2709e..ae2624fce913471bf4c8b4a25f90fcffc0a8b965 100644 (file)
@@ -406,6 +406,9 @@ static int bcm_open(struct hci_uart *hu)
 
        bt_dev_dbg(hu->hdev, "hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
        if (!bcm)
                return -ENOMEM;
index 207bae5e0d4631b7b398d8b6d59dc912c4004eed..31f25153087d760a4ed872da5b830ee9ff29c496 100644 (file)
@@ -391,6 +391,9 @@ static int intel_open(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        intel = kzalloc(sizeof(*intel), GFP_KERNEL);
        if (!intel)
                return -ENOMEM;
index 8950e07889fef64c75103d68e27688a54b995c85..85a30fb9177bbe719666d9e00f63e219f3ef3f49 100644 (file)
@@ -292,6 +292,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        return 0;
 }
 
+/* Check the underlying device or tty has flow control support */
+bool hci_uart_has_flow_control(struct hci_uart *hu)
+{
+       /* serdev nodes check if the needed operations are present */
+       if (hu->serdev)
+               return true;
+
+       if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset)
+               return true;
+
+       return false;
+}
+
 /* Flow control or un-flow control the device */
 void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
 {
index f98e5cc343b21c22f7c054da0b315d9995cf818f..fbc3f7c3a5c710c0493a66111b7a7dedb7d664fd 100644 (file)
@@ -59,6 +59,9 @@ static int mrvl_open(struct hci_uart *hu)
 
        BT_DBG("hu %p", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
        if (!mrvl)
                return -ENOMEM;
index 9a5c9c1f9484822a69357c40533649c72027f7da..82a0a3691a63c701075e03ed44a408d13d7c4e66 100644 (file)
@@ -473,6 +473,9 @@ static int qca_open(struct hci_uart *hu)
 
        BT_DBG("hu %p qca_open", hu);
 
+       if (!hci_uart_has_flow_control(hu))
+               return -EOPNOTSUPP;
+
        qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
        if (!qca)
                return -ENOMEM;
index f11af3912ce6c16fd92720e8dccd53d5f3933589..6ab631101019c4198d24266936c179c262501c03 100644 (file)
@@ -104,6 +104,7 @@ int hci_uart_wait_until_sent(struct hci_uart *hu);
 int hci_uart_init_ready(struct hci_uart *hu);
 void hci_uart_init_work(struct work_struct *work);
 void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
+bool hci_uart_has_flow_control(struct hci_uart *hu);
 void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
 void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
                         unsigned int oper_speed);
index 57204335c5f556a47f79a6748d72b4a92e2a57ca..285e0b8f9a9744b88e4f6358dbef11c8439e6f7e 100644 (file)
@@ -76,7 +76,7 @@ static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count,
        struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
        struct ipmb_request_elem *queue_elem;
        struct ipmb_msg msg;
-       ssize_t ret;
+       ssize_t ret = 0;
 
        memset(&msg, 0, sizeof(msg));
 
index 44db83a6d01c265afef52df71b416d126871c8d3..44a46dcc0518b6516764f9a78832a7cbc427f047 100644 (file)
@@ -141,6 +141,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
                        continue;
 
                div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
+               if (div > GENERATED_MAX_DIV + 1)
+                       div = GENERATED_MAX_DIV + 1;
 
                clk_generated_best_diff(req, parent, parent_rate, div,
                                        &best_diff, &best_rate);
index 1aa5f40592514db3e015c67e176d43c44ce22a69..73b7e238eee75e020373b98e3727277676920c21 100644 (file)
@@ -25,9 +25,11 @@ static const struct mtk_fixed_clk top_fixed_clks[] = {
        FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000),
 };
 
+static const struct mtk_fixed_factor top_early_divs[] = {
+       FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, 2),
+};
+
 static const struct mtk_fixed_factor top_divs[] = {
-       FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1,
-               2),
        FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1,
                2),
        FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1,
@@ -1148,37 +1150,57 @@ static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
        return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
 }
 
+static struct clk_onecell_data *top_clk_data;
+
+static void clk_mt8183_top_init_early(struct device_node *node)
+{
+       int i;
+
+       top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+       for (i = 0; i < CLK_TOP_NR_CLK; i++)
+               top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+
+       mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+                       top_clk_data);
+
+       of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+}
+
+CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen",
+                       clk_mt8183_top_init_early);
+
 static int clk_mt8183_top_probe(struct platform_device *pdev)
 {
        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        void __iomem *base;
-       struct clk_onecell_data *clk_data;
        struct device_node *node = pdev->dev.of_node;
 
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
 
-       clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
-
        mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
-               clk_data);
+               top_clk_data);
+
+       mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+               top_clk_data);
 
-       mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+       mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
 
        mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
-               node, &mt8183_clk_lock, clk_data);
+               node, &mt8183_clk_lock, top_clk_data);
 
        mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
-               base, &mt8183_clk_lock, clk_data);
+               base, &mt8183_clk_lock, top_clk_data);
 
        mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
-               base, &mt8183_clk_lock, clk_data);
+               base, &mt8183_clk_lock, top_clk_data);
 
        mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
-               clk_data);
+               top_clk_data);
 
-       return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+       return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
 }
 
 static int clk_mt8183_infra_probe(struct platform_device *pdev)
index 52bbb9ce3807db3164b0a1b6fc0eba9b903e8156..d4075b13067429cded1088e6423b77bb78b5309b 100644 (file)
@@ -572,17 +572,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
        unsigned int reg = id / 32;
        unsigned int bit = id % 32;
        u32 bitmask = BIT(bit);
-       unsigned long flags;
-       u32 value;
 
        dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
 
        /* Reset module */
-       spin_lock_irqsave(&priv->rmw_lock, flags);
-       value = readl(priv->base + SRCR(reg));
-       value |= bitmask;
-       writel(value, priv->base + SRCR(reg));
-       spin_unlock_irqrestore(&priv->rmw_lock, flags);
+       writel(bitmask, priv->base + SRCR(reg));
 
        /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
        udelay(35);
@@ -599,16 +593,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
        unsigned int reg = id / 32;
        unsigned int bit = id % 32;
        u32 bitmask = BIT(bit);
-       unsigned long flags;
-       u32 value;
 
        dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
 
-       spin_lock_irqsave(&priv->rmw_lock, flags);
-       value = readl(priv->base + SRCR(reg));
-       value |= bitmask;
-       writel(value, priv->base + SRCR(reg));
-       spin_unlock_irqrestore(&priv->rmw_lock, flags);
+       writel(bitmask, priv->base + SRCR(reg));
        return 0;
 }
 
index 91d3d721c801e13ba1f44e1c26139922dcc73a00..3c219af2510016ed76f3069743abd23a6c0072f9 100644 (file)
@@ -3,6 +3,7 @@ config SPRD_COMMON_CLK
        tristate "Clock support for Spreadtrum SoCs"
        depends on ARCH_SPRD || COMPILE_TEST
        default ARCH_SPRD
+       select REGMAP_MMIO
 
 if SPRD_COMMON_CLK
 
index 3ee99d070608240914f64839c7ab36336caabe21..f497003f119c993a55e18ac0c54fcf95c138a2be 100644 (file)
@@ -956,9 +956,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
        }
 
        if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
-               irqflags |= IRQF_TRIGGER_RISING;
+               irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+                       IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
        if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
-               irqflags |= IRQF_TRIGGER_FALLING;
+               irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+                       IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
        irqflags |= IRQF_ONESHOT;
 
        INIT_KFIFO(le->events);
@@ -1392,12 +1394,17 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
        for (i = 0; i < chip->ngpio; i++) {
                struct gpio_desc *desc = &gdev->descs[i];
 
-               if (chip->get_direction && gpiochip_line_is_valid(chip, i))
-                       desc->flags = !chip->get_direction(chip, i) ?
-                                       (1 << FLAG_IS_OUT) : 0;
-               else
-                       desc->flags = !chip->direction_input ?
-                                       (1 << FLAG_IS_OUT) : 0;
+               if (chip->get_direction && gpiochip_line_is_valid(chip, i)) {
+                       if (!chip->get_direction(chip, i))
+                               set_bit(FLAG_IS_OUT, &desc->flags);
+                       else
+                               clear_bit(FLAG_IS_OUT, &desc->flags);
+               } else {
+                       if (!chip->direction_input)
+                               set_bit(FLAG_IS_OUT, &desc->flags);
+                       else
+                               clear_bit(FLAG_IS_OUT, &desc->flags);
+               }
        }
 
        acpi_gpiochip_add(chip);
index 1d80222587ad2a8553c24ddeed23db1abadac907..3c88420e3497a44a1d8320f171701a18ee408641 100644 (file)
@@ -394,7 +394,7 @@ config DRM_R128
 config DRM_I810
        tristate "Intel I810"
        # !PREEMPT because of missing ioctl locking
-       depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
+       depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
        help
          Choose this option if you have an Intel I810 graphics card.  If M is
          selected, the module will be called i810.  AGP support is required
index 1d3ee9c42f7eda2f811be1d16ba9e554c498bf57..6a5c96e519b15983bc3fb1ff1037727245245511 100644 (file)
@@ -1140,7 +1140,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                        adev->asic_type != CHIP_FIJI &&
                        adev->asic_type != CHIP_POLARIS10 &&
                        adev->asic_type != CHIP_POLARIS11 &&
-                       adev->asic_type != CHIP_POLARIS12) ?
+                       adev->asic_type != CHIP_POLARIS12 &&
+                       adev->asic_type != CHIP_VEGAM) ?
                        VI_BO_SIZE_ALIGN : 1;
 
        mapping_flags = AMDGPU_VM_PAGE_READABLE;
index e069de8b54e619fbd9a630e34ad9f757731d27ab..4e4094f842e728f6705486010ce6e94fd9605377 100644 (file)
@@ -1044,29 +1044,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
                        return r;
                }
 
-               fence = amdgpu_ctx_get_fence(ctx, entity,
-                                            deps[i].handle);
+               fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
+               amdgpu_ctx_put(ctx);
+
+               if (IS_ERR(fence))
+                       return PTR_ERR(fence);
+               else if (!fence)
+                       continue;
 
                if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
-                       struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
+                       struct drm_sched_fence *s_fence;
                        struct dma_fence *old = fence;
 
+                       s_fence = to_drm_sched_fence(fence);
                        fence = dma_fence_get(&s_fence->scheduled);
                        dma_fence_put(old);
                }
 
-               if (IS_ERR(fence)) {
-                       r = PTR_ERR(fence);
-                       amdgpu_ctx_put(ctx);
+               r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
+               dma_fence_put(fence);
+               if (r)
                        return r;
-               } else if (fence) {
-                       r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
-                                       true);
-                       dma_fence_put(fence);
-                       amdgpu_ctx_put(ctx);
-                       if (r)
-                               return r;
-               }
        }
        return 0;
 }
index 6d54decef7f8156d0a20883151dab0af1c90d6de..5652cc72ed3a9b3adcf004a654e8be57b9cd2552 100644 (file)
@@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
        thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
        bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
 
-       data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+       data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
 
index 03ca8c69114fc22ebec9c25601c420a5d9d6bf2c..2b546567853b45ee155fcc9faa9592b46d38066c 100644 (file)
@@ -159,12 +159,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
        struct amdgpu_device *adev = ddev->dev_private;
        enum amd_pm_state_type pm;
 
-       if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state)
-               pm = amdgpu_smu_get_current_power_state(adev);
-       else if (adev->powerplay.pp_funcs->get_current_power_state)
+       if (is_support_sw_smu(adev)) {
+               if (adev->smu.ppt_funcs->get_current_power_state)
+                       pm = amdgpu_smu_get_current_power_state(adev);
+               else
+                       pm = adev->pm.dpm.user_state;
+       } else if (adev->powerplay.pp_funcs->get_current_power_state) {
                pm = amdgpu_dpm_get_current_power_state(adev);
-       else
+       } else {
                pm = adev->pm.dpm.user_state;
+       }
 
        return snprintf(buf, PAGE_SIZE, "%s\n",
                        (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
@@ -191,7 +195,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
                goto fail;
        }
 
-       if (adev->powerplay.pp_funcs->dispatch_tasks) {
+       if (is_support_sw_smu(adev)) {
+               mutex_lock(&adev->pm.mutex);
+               adev->pm.dpm.user_state = state;
+               mutex_unlock(&adev->pm.mutex);
+       } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
                amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
        } else {
                mutex_lock(&adev->pm.mutex);
@@ -3067,28 +3075,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
        if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
                seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
 
-       /* UVD clocks */
-       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
-               if (!value) {
-                       seq_printf(m, "UVD: Disabled\n");
-               } else {
-                       seq_printf(m, "UVD: Enabled\n");
-                       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
-                               seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
-                       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
-                               seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+       if (adev->asic_type > CHIP_VEGA20) {
+               /* VCN clocks */
+               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
+                       if (!value) {
+                               seq_printf(m, "VCN: Disabled\n");
+                       } else {
+                               seq_printf(m, "VCN: Enabled\n");
+                               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
+                                       seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
+                               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
+                                       seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+                       }
                }
-       }
-       seq_printf(m, "\n");
+               seq_printf(m, "\n");
+       } else {
+               /* UVD clocks */
+               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
+                       if (!value) {
+                               seq_printf(m, "UVD: Disabled\n");
+                       } else {
+                               seq_printf(m, "UVD: Enabled\n");
+                               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
+                                       seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
+                               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
+                                       seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+                       }
+               }
+               seq_printf(m, "\n");
 
-       /* VCE clocks */
-       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
-               if (!value) {
-                       seq_printf(m, "VCE: Disabled\n");
-               } else {
-                       seq_printf(m, "VCE: Enabled\n");
-                       if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
-                               seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
+               /* VCE clocks */
+               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
+                       if (!value) {
+                               seq_printf(m, "VCE: Disabled\n");
+                       } else {
+                               seq_printf(m, "VCE: Enabled\n");
+                               if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
+                                       seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
+                       }
                }
        }
 
index 9f661bf96ed0344b20c47d30fb874e2c229ff289..5b1ebb7f995ae1d5023c8260d83e33c6ad313a43 100644 (file)
@@ -123,6 +123,7 @@ enum amd_pp_sensors {
        AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK,
        AMDGPU_PP_SENSOR_MIN_FAN_RPM,
        AMDGPU_PP_SENSOR_MAX_FAN_RPM,
+       AMDGPU_PP_SENSOR_VCN_POWER_STATE,
 };
 
 enum amd_pp_task {
index c097113c39769b3240bcb0c9f81943efd0334a2e..0685a3388e38ce7ae2667319a22726b0b26c947e 100644 (file)
@@ -306,7 +306,8 @@ int smu_get_power_num_states(struct smu_context *smu,
 
        /* not support power state */
        memset(state_info, 0, sizeof(struct pp_states_info));
-       state_info->nums = 0;
+       state_info->nums = 1;
+       state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
 
        return 0;
 }
@@ -337,6 +338,10 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
                *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
                *size = 4;
                break;
+       case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
+               *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0;
+               *size = 4;
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -723,6 +728,12 @@ static int smu_sw_init(void *handle)
                return ret;
        }
 
+       ret = smu_register_irq_handler(smu);
+       if (ret) {
+               pr_err("Failed to register smc irq handler!\n");
+               return ret;
+       }
+
        return 0;
 }
 
@@ -732,6 +743,9 @@ static int smu_sw_fini(void *handle)
        struct smu_context *smu = &adev->smu;
        int ret;
 
+       kfree(smu->irq_source);
+       smu->irq_source = NULL;
+
        ret = smu_smc_table_sw_fini(smu);
        if (ret) {
                pr_err("Failed to sw fini smc table!\n");
@@ -1088,10 +1102,6 @@ static int smu_hw_init(void *handle)
        if (ret)
                goto failed;
 
-       ret = smu_register_irq_handler(smu);
-       if (ret)
-               goto failed;
-
        if (!smu->pm_enabled)
                adev->pm.dpm_enabled = false;
        else
@@ -1121,9 +1131,6 @@ static int smu_hw_fini(void *handle)
        kfree(table_context->overdrive_table);
        table_context->overdrive_table = NULL;
 
-       kfree(smu->irq_source);
-       smu->irq_source = NULL;
-
        ret = smu_fini_fb_allocations(smu);
        if (ret)
                return ret;
index e32ae9d3373ca3e45fcea4a793be0d3951adf2a5..18e780f566fab78923a415b3f5ce77fe9d06bdfa 100644 (file)
@@ -1111,6 +1111,7 @@ static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
 static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
                          void *value, int *size)
 {
+       struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
        uint32_t sclk, mclk;
        int ret = 0;
 
@@ -1132,6 +1133,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
        case AMDGPU_PP_SENSOR_GPU_TEMP:
                *((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
                break;
+       case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
+               *(uint32_t *)value =  smu10_data->vcn_power_gated ? 0 : 1;
+               *size = 4;
+               break;
        default:
                ret = -EINVAL;
                break;
@@ -1175,18 +1180,22 @@ static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
 
 static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
 {
+       struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
        if (bgate) {
                amdgpu_device_ip_set_powergating_state(hwmgr->adev,
                                                AMD_IP_BLOCK_TYPE_VCN,
                                                AMD_PG_STATE_GATE);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                        PPSMC_MSG_PowerDownVcn, 0);
+               smu10_data->vcn_power_gated = true;
        } else {
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_PowerUpVcn, 0);
                amdgpu_device_ip_set_powergating_state(hwmgr->adev,
                                                AMD_IP_BLOCK_TYPE_VCN,
                                                AMD_PG_STATE_UNGATE);
+               smu10_data->vcn_power_gated = false;
        }
 }
 
index 22e46a289a162da4fef7aa1dd1dc0c3434da27c5..208e6711d5068fc16d7359acaa198190ea7a5a94 100644 (file)
@@ -429,7 +429,6 @@ struct smu_table_context
        struct smu_table                *tables;
        uint32_t                        table_count;
        struct smu_table                memory_pool;
-       uint16_t                        software_shutdown_temp;
        uint8_t                         thermal_controller_type;
        uint16_t                        TDPODLimit;
 
index 4aaad255a288cd1f6f3f6a076a39dad7c997e295..cc0a3b2256aff71ff8d2f4c03499d45190b088fe 100644 (file)
@@ -23,6 +23,7 @@
 
 #include "pp_debug.h"
 #include <linux/firmware.h>
+#include <linux/pci.h>
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
 #include "atomfirmware.h"
@@ -577,28 +578,20 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
 {
        int ret = 0;
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
 
-       if (enable && power_gate->uvd_gated) {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
-                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
-                       if (ret)
-                               return ret;
-               }
-               power_gate->uvd_gated = false;
+       if (enable) {
+               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
+               if (ret)
+                       return ret;
        } else {
-               if (!enable && !power_gate->uvd_gated) {
-                       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
-                               ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
-                               if (ret)
-                                       return ret;
-                       }
-                       power_gate->uvd_gated = true;
-               }
+               ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+               if (ret)
+                       return ret;
        }
 
-       return 0;
+       ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable);
+
+       return ret;
 }
 
 static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
@@ -1573,7 +1566,7 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu)
        uint32_t sclk_freq = 0, uclk_freq = 0;
        uint32_t uclk_level = 0;
 
-       switch (adev->rev_id) {
+       switch (adev->pdev->revision) {
        case 0xf0: /* XTX */
        case 0xc0:
                sclk_freq = NAVI10_PEAK_SCLK_XTX;
@@ -1620,6 +1613,22 @@ static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_fo
        return ret;
 }
 
+static int navi10_get_thermal_temperature_range(struct smu_context *smu,
+                                               struct smu_temperature_range *range)
+{
+       struct smu_table_context *table_context = &smu->smu_table;
+       struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table;
+
+       if (!range || !powerplay_table)
+               return -EINVAL;
+
+       /* The unit is temperature */
+       range->min = 0;
+       range->max = powerplay_table->software_shutdown_temp;
+
+       return 0;
+}
+
 static const struct pptable_funcs navi10_ppt_funcs = {
        .tables_init = navi10_tables_init,
        .alloc_dpm_context = navi10_allocate_dpm_context,
@@ -1657,6 +1666,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .get_ppfeature_status = navi10_get_ppfeature_status,
        .set_ppfeature_status = navi10_set_ppfeature_status,
        .set_performance_level = navi10_set_performance_level,
+       .get_thermal_temperature_range = navi10_get_thermal_temperature_range,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
index caca9091bfcc3566fd6d08a3e14d0d64c5b393d4..ac5b26228e753e2071d32c10e3562aabc97c449f 100644 (file)
@@ -1124,10 +1124,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
                                       struct smu_temperature_range *range)
 {
        struct amdgpu_device *adev = smu->adev;
-       int low = SMU_THERMAL_MINIMUM_ALERT_TEMP *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
+       int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
        uint32_t val;
 
        if (!range)
@@ -1138,6 +1136,9 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
        if (high > range->max)
                high = range->max;
 
+       low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min);
+       high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max);
+
        if (low > high)
                return -EINVAL;
 
@@ -1146,8 +1147,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
        val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
        val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
        val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
-       val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES));
-       val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES));
+       val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
+       val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
        val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
 
        WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
@@ -1186,7 +1187,10 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
 
        if (!smu->pm_enabled)
                return ret;
+
        ret = smu_get_thermal_temperature_range(smu, &range);
+       if (ret)
+               return ret;
 
        if (smu->smu_table.thermal_controller_type) {
                ret = smu_v11_0_set_thermal_range(smu, &range);
@@ -1202,15 +1206,17 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
                        return ret;
        }
 
-       adev->pm.dpm.thermal.min_temp = range.min;
-       adev->pm.dpm.thermal.max_temp = range.max;
-       adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
-       adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
-       adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
-       adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
-       adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
-       adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
-       adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
+       adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
 
        return ret;
 }
index dc139a6feeb1d8f3718b4838909505c9474936b5..dd6fd1c8bf24e0db3a4d16be0f7817dade68dde5 100644 (file)
@@ -450,7 +450,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
        memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable,
               sizeof(PPTable_t));
 
-       table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
        table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
        table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
 
@@ -3234,35 +3233,24 @@ static int vega20_set_watermarks_table(struct smu_context *smu,
        return 0;
 }
 
-static const struct smu_temperature_range vega20_thermal_policy[] =
-{
-       {-273150,  99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
-       { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
-};
-
 static int vega20_get_thermal_temperature_range(struct smu_context *smu,
                                                struct smu_temperature_range *range)
 {
-
+       struct smu_table_context *table_context = &smu->smu_table;
+       ATOM_Vega20_POWERPLAYTABLE *powerplay_table = table_context->power_play_table;
        PPTable_t *pptable = smu->smu_table.driver_pptable;
 
-       if (!range)
+       if (!range || !powerplay_table)
                return -EINVAL;
 
-       memcpy(range, &vega20_thermal_policy[0], sizeof(struct smu_temperature_range));
-
-       range->max = pptable->TedgeLimit *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       range->hotspot_crit_max = pptable->ThotspotLimit *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       range->mem_crit_max = pptable->ThbmLimit *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
-       range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)*
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+       /* The unit is temperature */
+       range->min = 0;
+       range->max = powerplay_table->usSoftwareShutdownTemp;
+       range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE);
+       range->hotspot_crit_max = pptable->ThotspotLimit;
+       range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT);
+       range->mem_crit_max = pptable->ThbmLimit;
+       range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM);
 
 
        return 0;
index bc19dbd531efc237b1407bcee204e28c52474d72..359030d5d818d0bef7b64a29267ca919d24cb1f9 100644 (file)
@@ -191,6 +191,7 @@ int bochs_kms_init(struct bochs_device *bochs)
        bochs->dev->mode_config.fb_base = bochs->fb_base;
        bochs->dev->mode_config.preferred_depth = 24;
        bochs->dev->mode_config.prefer_shadow = 0;
+       bochs->dev->mode_config.prefer_shadow_fbdev = 1;
        bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
 
        bochs->dev->mode_config.funcs = &bochs_mode_funcs;
index ee777469293a4edffcd32fc27d3676539e962365..e4e22bbae2a7c7c07e32a433210725b85f90f533 100644 (file)
@@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC
 config DRM_LVDS_ENCODER
        tristate "Transparent parallel to LVDS encoder support"
        depends on OF
+       select DRM_KMS_HELPER
        select DRM_PANEL_BRIDGE
        help
          Support for transparent parallel to LVDS encoders that don't require
@@ -116,9 +117,10 @@ config DRM_THINE_THC63LVD1024
 
 config DRM_TOSHIBA_TC358764
        tristate "TC358764 DSI/LVDS bridge"
-       depends on DRM && DRM_PANEL
        depends on OF
        select DRM_MIPI_DSI
+       select DRM_KMS_HELPER
+       select DRM_PANEL
        help
          Toshiba TC358764 DSI/LVDS bridge driver.
 
index 410572f142577e91116159246c56b26c67a4d572..e1dafb0cc5e2fbe963720e47739d9203f138a5aa 100644 (file)
@@ -254,7 +254,6 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
        struct drm_device *dev = client->dev;
        struct drm_client_buffer *buffer;
        struct drm_gem_object *obj;
-       void *vaddr;
        int ret;
 
        buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
@@ -281,6 +280,36 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
 
        buffer->gem = obj;
 
+       return buffer;
+
+err_delete:
+       drm_client_buffer_delete(buffer);
+
+       return ERR_PTR(ret);
+}
+
+/**
+ * drm_client_buffer_vmap - Map DRM client buffer into address space
+ * @buffer: DRM client buffer
+ *
+ * This function maps a client buffer into kernel address space. If the
+ * buffer is already mapped, it returns the mapping's address.
+ *
+ * Client buffer mappings are not ref'counted. Each call to
+ * drm_client_buffer_vmap() should be followed by a call to
+ * drm_client_buffer_vunmap(); or the client buffer should be mapped
+ * throughout its lifetime.
+ *
+ * Returns:
+ *     The mapped memory's address
+ */
+void *drm_client_buffer_vmap(struct drm_client_buffer *buffer)
+{
+       void *vaddr;
+
+       if (buffer->vaddr)
+               return buffer->vaddr;
+
        /*
         * FIXME: The dependency on GEM here isn't required, we could
         * convert the driver handle to a dma-buf instead and use the
@@ -289,21 +318,30 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
         * fd_install step out of the driver backend hooks, to make that
         * final step optional for internal users.
         */
-       vaddr = drm_gem_vmap(obj);
-       if (IS_ERR(vaddr)) {
-               ret = PTR_ERR(vaddr);
-               goto err_delete;
-       }
+       vaddr = drm_gem_vmap(buffer->gem);
+       if (IS_ERR(vaddr))
+               return vaddr;
 
        buffer->vaddr = vaddr;
 
-       return buffer;
-
-err_delete:
-       drm_client_buffer_delete(buffer);
+       return vaddr;
+}
+EXPORT_SYMBOL(drm_client_buffer_vmap);
 
-       return ERR_PTR(ret);
+/**
+ * drm_client_buffer_vunmap - Unmap DRM client buffer
+ * @buffer: DRM client buffer
+ *
+ * This function removes a client buffer's memory mapping. Calling this
+ * function is only required by clients that manage their buffer mappings
+ * by themselves.
+ */
+void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
+{
+       drm_gem_vunmap(buffer->gem, buffer->vaddr);
+       buffer->vaddr = NULL;
 }
+EXPORT_SYMBOL(drm_client_buffer_vunmap);
 
 static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
 {
index 1984e5c54d580504fb3417d50afca9e1dd6aa105..a7ba5b4902d664fb72917b9f999b683c3fcaec58 100644 (file)
@@ -403,6 +403,7 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
        struct drm_clip_rect *clip = &helper->dirty_clip;
        struct drm_clip_rect clip_copy;
        unsigned long flags;
+       void *vaddr;
 
        spin_lock_irqsave(&helper->dirty_lock, flags);
        clip_copy = *clip;
@@ -412,10 +413,20 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
 
        /* call dirty callback only when it has been really touched */
        if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) {
+
                /* Generic fbdev uses a shadow buffer */
-               if (helper->buffer)
+               if (helper->buffer) {
+                       vaddr = drm_client_buffer_vmap(helper->buffer);
+                       if (IS_ERR(vaddr))
+                               return;
                        drm_fb_helper_dirty_blit_real(helper, &clip_copy);
-               helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+               }
+               if (helper->fb->funcs->dirty)
+                       helper->fb->funcs->dirty(helper->fb, NULL, 0, 0,
+                                                &clip_copy, 1);
+
+               if (helper->buffer)
+                       drm_client_buffer_vunmap(helper->buffer);
        }
 }
 
@@ -604,6 +615,16 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
 }
 EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
 
+static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
+{
+       struct drm_device *dev = fb_helper->dev;
+       struct drm_framebuffer *fb = fb_helper->fb;
+
+       return dev->mode_config.prefer_shadow_fbdev ||
+              dev->mode_config.prefer_shadow ||
+              fb->funcs->dirty;
+}
+
 static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
                                u32 width, u32 height)
 {
@@ -611,7 +632,7 @@ static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
        struct drm_clip_rect *clip = &helper->dirty_clip;
        unsigned long flags;
 
-       if (!helper->fb->funcs->dirty)
+       if (!drm_fbdev_use_shadow_fb(helper))
                return;
 
        spin_lock_irqsave(&helper->dirty_lock, flags);
@@ -2178,6 +2199,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
        struct drm_framebuffer *fb;
        struct fb_info *fbi;
        u32 format;
+       void *vaddr;
 
        DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
                      sizes->surface_width, sizes->surface_height,
@@ -2200,16 +2222,10 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
        fbi->fbops = &drm_fbdev_fb_ops;
        fbi->screen_size = fb->height * fb->pitches[0];
        fbi->fix.smem_len = fbi->screen_size;
-       fbi->screen_buffer = buffer->vaddr;
-       /* Shamelessly leak the physical address to user-space */
-#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
-       if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
-               fbi->fix.smem_start =
-                       page_to_phys(virt_to_page(fbi->screen_buffer));
-#endif
+
        drm_fb_helper_fill_info(fbi, fb_helper, sizes);
 
-       if (fb->funcs->dirty) {
+       if (drm_fbdev_use_shadow_fb(fb_helper)) {
                struct fb_ops *fbops;
                void *shadow;
 
@@ -2231,6 +2247,19 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
                fbi->fbdefio = &drm_fbdev_defio;
 
                fb_deferred_io_init(fbi);
+       } else {
+               /* buffer is mapped for HW framebuffer */
+               vaddr = drm_client_buffer_vmap(fb_helper->buffer);
+               if (IS_ERR(vaddr))
+                       return PTR_ERR(vaddr);
+
+               fbi->screen_buffer = vaddr;
+               /* Shamelessly leak the physical address to user-space */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+               if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
+                       fbi->fix.smem_start =
+                               page_to_phys(virt_to_page(fbi->screen_buffer));
+#endif
        }
 
        return 0;
index 60ce4a8ad9e1518a7e7dd287c92d158cb897b8a4..6f7d3b3b3628d2229a62f44d59ae6e3e13e645a4 100644 (file)
@@ -2,6 +2,7 @@
 config DRM_EXYNOS
        tristate "DRM Support for Samsung SoC EXYNOS Series"
        depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
+       depends on MMU
        select DRM_KMS_HELPER
        select VIDEOMODE_HELPERS
        select SND_SOC_HDMI_CODEC if SND_SOC
index a594ab7be2c055f0861b00c6052aa82cf8b09499..164d914cbe9a49a437dedb86e5c5214ac645f0a5 100644 (file)
@@ -44,7 +44,7 @@ static unsigned int fimc_mask = 0xc;
 module_param_named(fimc_devs, fimc_mask, uint, 0644);
 MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM");
 
-#define get_fimc_context(dev)  platform_get_drvdata(to_platform_device(dev))
+#define get_fimc_context(dev)  dev_get_drvdata(dev)
 
 enum {
        FIMC_CLK_LCLK,
index 50904eee96f7a4732ca029660a8f00e4bc84d2ec..2a3382d43bc9020722617f421f9d3e7c6ae6bed1 100644 (file)
@@ -267,7 +267,7 @@ static inline void g2d_hw_reset(struct g2d_data *g2d)
 static int g2d_init_cmdlist(struct g2d_data *g2d)
 {
        struct device *dev = g2d->dev;
-       struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+       struct g2d_cmdlist_node *node;
        int nr;
        int ret;
        struct g2d_buf_info *buf_info;
index 1e4b21c49a06fd86b520012a0079f04be5d2e8c1..1c524db9570f8734ce508c868b4975080c4d4fd9 100644 (file)
@@ -58,7 +58,7 @@
 #define GSC_COEF_DEPTH 3
 #define GSC_AUTOSUSPEND_DELAY          2000
 
-#define get_gsc_context(dev)   platform_get_drvdata(to_platform_device(dev))
+#define get_gsc_context(dev)   dev_get_drvdata(dev)
 #define gsc_read(offset)               readl(ctx->regs + (offset))
 #define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
 
index 9af096479e1cde34ebfbb0cdc0de6710941bf908..b24ba948b725eb7db36be6de088366fc769ed6ec 100644 (file)
@@ -94,12 +94,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
        scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
        do {
                cpu_relax();
-       } while (retry > 1 &&
+       } while (--retry > 1 &&
                 scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
        do {
                cpu_relax();
                scaler_write(1, SCALER_INT_EN);
-       } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+       } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
 
        return retry ? 0 : -EIO;
 }
index c4710889cb321aa7d4d7e58365519dbc06d09f56..3ef4e9f573cfe1a825fc7296c27a822f071517f5 100644 (file)
@@ -765,7 +765,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
        }
 
        if (bdb->version >= 226) {
-               u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time;
+               u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
 
                wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
                switch (wakeup_time) {
index 753ac3165061c606ca62d7372cc4825f16fd7dcd..7b908e10d32e65f6808dcffea5d19d8d50c295db 100644 (file)
@@ -178,6 +178,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
                clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
                bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
 
+               bi->num_qgv_points = qi.num_points;
+
                for (j = 0; j < qi.num_points; j++) {
                        const struct intel_qgv_point *sp = &qi.points[j];
                        int ct, bw;
@@ -195,7 +197,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
                        bi->deratedbw[j] = min(maxdebw,
                                               bw * 9 / 10); /* 90% */
 
-                       DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n",
+                       DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
                                      i, j, bi->num_planes, bi->deratedbw[j]);
                }
 
@@ -211,14 +213,17 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
 {
        int i;
 
-       /* Did we initialize the bw limits successfully? */
-       if (dev_priv->max_bw[0].num_planes == 0)
-               return UINT_MAX;
-
        for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
                const struct intel_bw_info *bi =
                        &dev_priv->max_bw[i];
 
+               /*
+                * Pcode will not expose all QGV points when
+                * SAGV is forced to off/min/med/max.
+                */
+               if (qgv_point >= bi->num_qgv_points)
+                       return UINT_MAX;
+
                if (num_planes >= bi->num_planes)
                        return bi->deratedbw[qgv_point];
        }
index 8993ab283562b2dc8fa65ec54ad5a0c05353c010..0d19bbd081225d02daba99ccd8b4a78b7c1a3e36 100644 (file)
@@ -2239,6 +2239,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
        if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
                min_cdclk = max(2 * 96000, min_cdclk);
 
+       /*
+        * "For DP audio configuration, cdclk frequency shall be set to
+        *  meet the following requirements:
+        *  DP Link Frequency(MHz) | Cdclk frequency(MHz)
+        *  270                    | 320 or higher
+        *  162                    | 200 or higher"
+        */
+       if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+           intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
+               min_cdclk = max(crtc_state->port_clock, min_cdclk);
+
        /*
         * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
         * than 320000KHz.
index 30b97ded6fddbbee2eb03e1c0abd8ada9702cd49..592b92782fabfa37fd7fb026fa4525e868db3b8a 100644 (file)
@@ -1839,7 +1839,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
                /* FIXME: assert CPU port conditions for SNB+ */
        }
 
-       trace_intel_pipe_enable(dev_priv, pipe);
+       trace_intel_pipe_enable(crtc);
 
        reg = PIPECONF(cpu_transcoder);
        val = I915_READ(reg);
@@ -1880,7 +1880,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
         */
        assert_planes_disabled(crtc);
 
-       trace_intel_pipe_disable(dev_priv, pipe);
+       trace_intel_pipe_disable(crtc);
 
        reg = PIPECONF(cpu_transcoder);
        val = I915_READ(reg);
index c93ad512014ce2cca2757e4c6e3c0735a135b7c4..2d1939db108f9ef6f858faada9c69b854967b2ed 100644 (file)
@@ -438,16 +438,23 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
 #define ICL_AUX_PW_TO_CH(pw_idx)       \
        ((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
 
+#define ICL_TBT_AUX_PW_TO_CH(pw_idx)   \
+       ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
+
 static void
 icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
                                 struct i915_power_well *power_well)
 {
-       enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
+       int pw_idx = power_well->desc->hsw.idx;
+       bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+       enum aux_ch aux_ch;
        u32 val;
 
+       aux_ch = is_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
+                         ICL_AUX_PW_TO_CH(pw_idx);
        val = I915_READ(DP_AUX_CH_CTL(aux_ch));
        val &= ~DP_AUX_CH_CTL_TBT_IO;
-       if (power_well->desc->hsw.is_tc_tbt)
+       if (is_tbt)
                val |= DP_AUX_CH_CTL_TBT_IO;
        I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
 
index 2f4894e9a03df336a903f24ca13b63d06ba66e97..5ddbe71ab423412307337dd9c1ae900d0b9ebc47 100644 (file)
@@ -478,13 +478,13 @@ struct psr_table {
        /* TP wake up time in multiple of 100 */
        u16 tp1_wakeup_time;
        u16 tp2_tp3_wakeup_time;
-
-       /* PSR2 TP2/TP3 wakeup time for 16 panels */
-       u32 psr2_tp2_tp3_wakeup_time;
 } __packed;
 
 struct bdb_psr {
        struct psr_table psr_table[16];
+
+       /* PSR2 TP2/TP3 wakeup time for 16 panels */
+       u32 psr2_tp2_tp3_wakeup_time;
 } __packed;
 
 /*
index 05011d4a3b88097a58ab6b59ff1b5dd7d42b614e..914b5d4112bbebfb375d9a1a14fd095374d304ab 100644 (file)
@@ -253,14 +253,15 @@ void i915_gem_resume(struct drm_i915_private *i915)
        i915_gem_restore_gtt_mappings(i915);
        i915_gem_restore_fences(i915);
 
+       if (i915_gem_init_hw(i915))
+               goto err_wedged;
+
        /*
         * As we didn't flush the kernel context before suspend, we cannot
         * guarantee that the context image is complete. So let's just reset
         * it and start again.
         */
-       intel_gt_resume(i915);
-
-       if (i915_gem_init_hw(i915))
+       if (intel_gt_resume(i915))
                goto err_wedged;
 
        intel_uc_resume(i915);
index 528b6167833456d9fc6c5a0f8a53197c980a80d1..2caa594322bc3f977e6d5720bf1f4a545fd89b43 100644 (file)
@@ -664,7 +664,15 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
 
        for_each_sgt_page(page, sgt_iter, pages) {
                if (obj->mm.dirty)
-                       set_page_dirty(page);
+                       /*
+                        * As this may not be anonymous memory (e.g. shmem)
+                        * but exist on a real mapping, we have to lock
+                        * the page in order to dirty it -- holding
+                        * the page reference is not sufficient to
+                        * prevent the inode from being truncated.
+                        * Play safe and take the lock.
+                        */
+                       set_page_dirty_lock(page);
 
                mark_page_accessed(page);
                put_page(page);
index 2c454f227c2e47cf94d05e8b2333e998fff5f6f0..23120901c55f410140f0b6029547de14014bb96e 100644 (file)
@@ -126,6 +126,7 @@ static void intel_context_retire(struct i915_active *active)
        if (ce->state)
                __context_unpin_state(ce->state);
 
+       intel_ring_unpin(ce->ring);
        intel_context_put(ce);
 }
 
@@ -160,27 +161,35 @@ int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
 
        intel_context_get(ce);
 
+       err = intel_ring_pin(ce->ring);
+       if (err)
+               goto err_put;
+
        if (!ce->state)
                return 0;
 
        err = __context_pin_state(ce->state, flags);
-       if (err) {
-               i915_active_cancel(&ce->active);
-               intel_context_put(ce);
-               return err;
-       }
+       if (err)
+               goto err_ring;
 
        /* Preallocate tracking nodes */
        if (!i915_gem_context_is_kernel(ce->gem_context)) {
                err = i915_active_acquire_preallocate_barrier(&ce->active,
                                                              ce->engine);
-               if (err) {
-                       i915_active_release(&ce->active);
-                       return err;
-               }
+               if (err)
+                       goto err_state;
        }
 
        return 0;
+
+err_state:
+       __context_unpin_state(ce->state);
+err_ring:
+       intel_ring_unpin(ce->ring);
+err_put:
+       intel_context_put(ce);
+       i915_active_cancel(&ce->active);
+       return err;
 }
 
 void intel_context_active_release(struct intel_context *ce)
index 7fd33e81c2d97dad11122b94d1bc515a03085725..f25632c9b292b2a841fdd75eb162e4b82d1bf02d 100644 (file)
@@ -969,9 +969,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
 u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
 {
        const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+       unsigned int slice = fls(sseu->slice_mask) - 1;
+       unsigned int subslice;
        u32 mcr_s_ss_select;
-       u32 slice = fls(sseu->slice_mask);
-       u32 subslice = fls(sseu->subslice_mask[slice]);
+
+       GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
+       subslice = fls(sseu->subslice_mask[slice]);
+       GEM_BUG_ON(!subslice);
+       subslice--;
 
        if (IS_GEN(dev_priv, 10))
                mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
@@ -1471,6 +1476,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
        struct i915_gpu_error * const error = &engine->i915->gpu_error;
        struct i915_request *rq;
        intel_wakeref_t wakeref;
+       unsigned long flags;
 
        if (header) {
                va_list ap;
@@ -1490,10 +1496,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
                   i915_reset_engine_count(error, engine),
                   i915_reset_count(error));
 
-       rcu_read_lock();
-
        drm_printf(m, "\tRequests:\n");
 
+       spin_lock_irqsave(&engine->active.lock, flags);
        rq = intel_engine_find_active_request(engine);
        if (rq) {
                print_request(m, rq, "\t\tactive ");
@@ -1513,8 +1518,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 
                print_request_ring(m, rq);
        }
-
-       rcu_read_unlock();
+       spin_unlock_irqrestore(&engine->active.lock, flags);
 
        wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
        if (wakeref) {
@@ -1672,7 +1676,6 @@ struct i915_request *
 intel_engine_find_active_request(struct intel_engine_cs *engine)
 {
        struct i915_request *request, *active = NULL;
-       unsigned long flags;
 
        /*
         * We are called by the error capture, reset and to dump engine
@@ -1685,7 +1688,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
         * At all other times, we must assume the GPU is still running, but
         * we only care about the snapshot of this moment.
         */
-       spin_lock_irqsave(&engine->active.lock, flags);
+       lockdep_assert_held(&engine->active.lock);
        list_for_each_entry(request, &engine->active.requests, sched.link) {
                if (i915_request_completed(request))
                        continue;
@@ -1700,7 +1703,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
                active = request;
                break;
        }
-       spin_unlock_irqrestore(&engine->active.lock, flags);
 
        return active;
 }
index 2ce00d3dc42a1a5fa17b2175b78cd3657f88abe1..ae5b6baf6dff1ee77e6ed5fae6df008ac551523c 100644 (file)
@@ -142,27 +142,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
 {
        intel_wakeref_init(&engine->wakeref);
 }
-
-int intel_engines_resume(struct drm_i915_private *i915)
-{
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-       int err = 0;
-
-       intel_gt_pm_get(i915);
-       for_each_engine(engine, i915, id) {
-               intel_engine_pm_get(engine);
-               engine->serial++; /* kernel context lost */
-               err = engine->resume(engine);
-               intel_engine_pm_put(engine);
-               if (err) {
-                       dev_err(i915->drm.dev,
-                               "Failed to restart %s (%d)\n",
-                               engine->name, err);
-                       break;
-               }
-       }
-       intel_gt_pm_put(i915);
-
-       return err;
-}
index b326cd993d60f23008abe85fe640160a23203a76..a11c893f64c6627cbaa266885160528af0a06c42 100644 (file)
@@ -7,16 +7,22 @@
 #ifndef INTEL_ENGINE_PM_H
 #define INTEL_ENGINE_PM_H
 
+#include "intel_engine_types.h"
+#include "intel_wakeref.h"
+
 struct drm_i915_private;
-struct intel_engine_cs;
 
 void intel_engine_pm_get(struct intel_engine_cs *engine);
 void intel_engine_pm_put(struct intel_engine_cs *engine);
 
+static inline bool
+intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
+{
+       return intel_wakeref_get_if_active(&engine->wakeref);
+}
+
 void intel_engine_park(struct intel_engine_cs *engine);
 
 void intel_engine_init__pm(struct intel_engine_cs *engine);
 
-int intel_engines_resume(struct drm_i915_private *i915);
-
 #endif /* INTEL_ENGINE_PM_H */
index 868b220214f81b23087d7d8641345fea137b6640..43e975a26016bf75038a044c55e222aefcf8ff2b 100644 (file)
@@ -70,6 +70,18 @@ struct intel_ring {
        struct list_head request_list;
        struct list_head active_link;
 
+       /*
+        * As we have two types of rings, one global to the engine used
+        * by ringbuffer submission and those that are exclusive to a
+        * context used by execlists, we have to play safe and allow
+        * atomic updates to the pin_count. However, the actual pinning
+        * of the context is either done during initialisation for
+        * ringbuffer submission or serialised as part of the context
+        * pinning for execlists, and so we do not need a mutex ourselves
+        * to serialise intel_ring_pin/intel_ring_unpin.
+        */
+       atomic_t pin_count;
+
        u32 head;
        u32 tail;
        u32 emit;
index 7b59677517629278341981ebdfd71aeb3a75353e..9f8f7f54191f06fbdca97dc8dbcae4a9d8a74ee2 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include "i915_drv.h"
+#include "intel_engine_pm.h"
 #include "intel_gt_pm.h"
 #include "intel_pm.h"
 #include "intel_wakeref.h"
@@ -118,10 +119,11 @@ void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
                intel_engine_reset(engine, false);
 }
 
-void intel_gt_resume(struct drm_i915_private *i915)
+int intel_gt_resume(struct drm_i915_private *i915)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
+       int err = 0;
 
        /*
         * After resume, we may need to poke into the pinned kernel
@@ -129,9 +131,12 @@ void intel_gt_resume(struct drm_i915_private *i915)
         * Only the kernel contexts should remain pinned over suspend,
         * allowing us to fixup the user contexts on their first pin.
         */
+       intel_gt_pm_get(i915);
        for_each_engine(engine, i915, id) {
                struct intel_context *ce;
 
+               intel_engine_pm_get(engine);
+
                ce = engine->kernel_context;
                if (ce)
                        ce->ops->reset(ce);
@@ -139,5 +144,19 @@ void intel_gt_resume(struct drm_i915_private *i915)
                ce = engine->preempt_context;
                if (ce)
                        ce->ops->reset(ce);
+
+               engine->serial++; /* kernel context lost */
+               err = engine->resume(engine);
+
+               intel_engine_pm_put(engine);
+               if (err) {
+                       dev_err(i915->drm.dev,
+                               "Failed to restart %s (%d)\n",
+                               engine->name, err);
+                       break;
+               }
        }
+       intel_gt_pm_put(i915);
+
+       return err;
 }
index 7dd1130a19a480ce02b07d99afb2d2358ec32709..53f342b20181a8e03d651dd48926e4fa747f6ce4 100644 (file)
@@ -22,6 +22,6 @@ void intel_gt_pm_put(struct drm_i915_private *i915);
 void intel_gt_pm_init(struct drm_i915_private *i915);
 
 void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
-void intel_gt_resume(struct drm_i915_private *i915);
+int intel_gt_resume(struct drm_i915_private *i915);
 
 #endif /* INTEL_GT_PM_H */
index b42b5f158295953b7b6da42f4846e96b9d4b4afb..82b7ace62d97ec13f110b4454115ae313d0791e0 100644 (file)
@@ -1414,6 +1414,7 @@ static void execlists_context_destroy(struct kref *kref)
 {
        struct intel_context *ce = container_of(kref, typeof(*ce), ref);
 
+       GEM_BUG_ON(!i915_active_is_idle(&ce->active));
        GEM_BUG_ON(intel_context_is_pinned(ce));
 
        if (ce->state)
@@ -1426,7 +1427,6 @@ static void execlists_context_unpin(struct intel_context *ce)
 {
        i915_gem_context_unpin_hw_id(ce->gem_context);
        i915_gem_object_unpin_map(ce->state->obj);
-       intel_ring_unpin(ce->ring);
 }
 
 static void
@@ -1478,13 +1478,9 @@ __execlists_context_pin(struct intel_context *ce,
                goto unpin_active;
        }
 
-       ret = intel_ring_pin(ce->ring);
-       if (ret)
-               goto unpin_map;
-
        ret = i915_gem_context_pin_hw_id(ce->gem_context);
        if (ret)
-               goto unpin_ring;
+               goto unpin_map;
 
        ce->lrc_desc = lrc_descriptor(ce, engine);
        ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
@@ -1492,8 +1488,6 @@ __execlists_context_pin(struct intel_context *ce,
 
        return 0;
 
-unpin_ring:
-       intel_ring_unpin(ce->ring);
 unpin_map:
        i915_gem_object_unpin_map(ce->state->obj);
 unpin_active:
index 4c478b38e4209aab66e7b3078431ac785602a659..3f907701ef4d658a91a275fe089743a05b1cf702 100644 (file)
@@ -687,7 +687,6 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
         * written to the powercontext is undefined and so we may lose
         * GPU state upon resume, i.e. fail to restart after a reset.
         */
-       intel_engine_pm_get(engine);
        intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
        engine->reset.prepare(engine);
 }
@@ -718,16 +717,21 @@ static void revoke_mmaps(struct drm_i915_private *i915)
        }
 }
 
-static void reset_prepare(struct drm_i915_private *i915)
+static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915)
 {
        struct intel_engine_cs *engine;
+       intel_engine_mask_t awake = 0;
        enum intel_engine_id id;
 
-       intel_gt_pm_get(i915);
-       for_each_engine(engine, i915, id)
+       for_each_engine(engine, i915, id) {
+               if (intel_engine_pm_get_if_awake(engine))
+                       awake |= engine->mask;
                reset_prepare_engine(engine);
+       }
 
        intel_uc_reset_prepare(i915);
+
+       return awake;
 }
 
 static void gt_revoke(struct drm_i915_private *i915)
@@ -761,20 +765,22 @@ static int gt_reset(struct drm_i915_private *i915,
 static void reset_finish_engine(struct intel_engine_cs *engine)
 {
        engine->reset.finish(engine);
-       intel_engine_pm_put(engine);
        intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
+
+       intel_engine_signal_breadcrumbs(engine);
 }
 
-static void reset_finish(struct drm_i915_private *i915)
+static void reset_finish(struct drm_i915_private *i915,
+                        intel_engine_mask_t awake)
 {
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
 
        for_each_engine(engine, i915, id) {
                reset_finish_engine(engine);
-               intel_engine_signal_breadcrumbs(engine);
+               if (awake & engine->mask)
+                       intel_engine_pm_put(engine);
        }
-       intel_gt_pm_put(i915);
 }
 
 static void nop_submit_request(struct i915_request *request)
@@ -798,6 +804,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
 {
        struct i915_gpu_error *error = &i915->gpu_error;
        struct intel_engine_cs *engine;
+       intel_engine_mask_t awake;
        enum intel_engine_id id;
 
        if (test_bit(I915_WEDGED, &error->flags))
@@ -817,7 +824,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
         * rolling the global seqno forward (since this would complete requests
         * for which we haven't set the fence error to EIO yet).
         */
-       reset_prepare(i915);
+       awake = reset_prepare(i915);
 
        /* Even if the GPU reset fails, it should still stop the engines */
        if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
@@ -841,7 +848,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
        for_each_engine(engine, i915, id)
                engine->cancel_requests(engine);
 
-       reset_finish(i915);
+       reset_finish(i915, awake);
 
        GEM_TRACE("end\n");
 }
@@ -951,6 +958,21 @@ static int do_reset(struct drm_i915_private *i915,
        return gt_reset(i915, stalled_mask);
 }
 
+static int resume(struct drm_i915_private *i915)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       int ret;
+
+       for_each_engine(engine, i915, id) {
+               ret = engine->resume(engine);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 /**
  * i915_reset - reset chip after a hang
  * @i915: #drm_i915_private to reset
@@ -973,6 +995,7 @@ void i915_reset(struct drm_i915_private *i915,
                const char *reason)
 {
        struct i915_gpu_error *error = &i915->gpu_error;
+       intel_engine_mask_t awake;
        int ret;
 
        GEM_TRACE("flags=%lx\n", error->flags);
@@ -989,7 +1012,7 @@ void i915_reset(struct drm_i915_private *i915,
                dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
        error->reset_count++;
 
-       reset_prepare(i915);
+       awake = reset_prepare(i915);
 
        if (!intel_has_gpu_reset(i915)) {
                if (i915_modparams.reset)
@@ -1024,13 +1047,17 @@ void i915_reset(struct drm_i915_private *i915,
        if (ret) {
                DRM_ERROR("Failed to initialise HW following reset (%d)\n",
                          ret);
-               goto error;
+               goto taint;
        }
 
+       ret = resume(i915);
+       if (ret)
+               goto taint;
+
        i915_queue_hangcheck(i915);
 
 finish:
-       reset_finish(i915);
+       reset_finish(i915, awake);
 unlock:
        mutex_unlock(&error->wedge_mutex);
        return;
@@ -1081,7 +1108,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
        GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
        GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
 
-       if (!intel_wakeref_active(&engine->wakeref))
+       if (!intel_engine_pm_get_if_awake(engine))
                return 0;
 
        reset_prepare_engine(engine);
@@ -1116,12 +1143,11 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
         * process to program RING_MODE, HWSP and re-enable submission.
         */
        ret = engine->resume(engine);
-       if (ret)
-               goto out;
 
 out:
        intel_engine_cancel_stop_cs(engine);
        reset_finish_engine(engine);
+       intel_engine_pm_put(engine);
        return ret;
 }
 
index c6023bc9452d0d90f8252d824d1492c07bc7022b..12010e79886888c5bbf2c0ae58a888f69e00dd6e 100644 (file)
@@ -1149,16 +1149,16 @@ i915_emit_bb_start(struct i915_request *rq,
 int intel_ring_pin(struct intel_ring *ring)
 {
        struct i915_vma *vma = ring->vma;
-       enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
        unsigned int flags;
        void *addr;
        int ret;
 
-       GEM_BUG_ON(ring->vaddr);
+       if (atomic_fetch_inc(&ring->pin_count))
+               return 0;
 
        ret = i915_timeline_pin(ring->timeline);
        if (ret)
-               return ret;
+               goto err_unpin;
 
        flags = PIN_GLOBAL;
 
@@ -1172,26 +1172,31 @@ int intel_ring_pin(struct intel_ring *ring)
 
        ret = i915_vma_pin(vma, 0, 0, flags);
        if (unlikely(ret))
-               goto unpin_timeline;
+               goto err_timeline;
 
        if (i915_vma_is_map_and_fenceable(vma))
                addr = (void __force *)i915_vma_pin_iomap(vma);
        else
-               addr = i915_gem_object_pin_map(vma->obj, map);
+               addr = i915_gem_object_pin_map(vma->obj,
+                                              i915_coherent_map_type(vma->vm->i915));
        if (IS_ERR(addr)) {
                ret = PTR_ERR(addr);
-               goto unpin_ring;
+               goto err_ring;
        }
 
        vma->obj->pin_global++;
 
+       GEM_BUG_ON(ring->vaddr);
        ring->vaddr = addr;
+
        return 0;
 
-unpin_ring:
+err_ring:
        i915_vma_unpin(vma);
-unpin_timeline:
+err_timeline:
        i915_timeline_unpin(ring->timeline);
+err_unpin:
+       atomic_dec(&ring->pin_count);
        return ret;
 }
 
@@ -1207,16 +1212,19 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail)
 
 void intel_ring_unpin(struct intel_ring *ring)
 {
-       GEM_BUG_ON(!ring->vma);
-       GEM_BUG_ON(!ring->vaddr);
+       if (!atomic_dec_and_test(&ring->pin_count))
+               return;
 
        /* Discard any unused bytes beyond that submitted to hw. */
        intel_ring_reset(ring, ring->tail);
 
+       GEM_BUG_ON(!ring->vma);
        if (i915_vma_is_map_and_fenceable(ring->vma))
                i915_vma_unpin_iomap(ring->vma);
        else
                i915_gem_object_unpin_map(ring->vma->obj);
+
+       GEM_BUG_ON(!ring->vaddr);
        ring->vaddr = NULL;
 
        ring->vma->obj->pin_global--;
@@ -2081,10 +2089,11 @@ static void ring_destroy(struct intel_engine_cs *engine)
        WARN_ON(INTEL_GEN(dev_priv) > 2 &&
                (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
 
+       intel_engine_cleanup_common(engine);
+
        intel_ring_unpin(engine->buffer);
        intel_ring_put(engine->buffer);
 
-       intel_engine_cleanup_common(engine);
        kfree(engine);
 }
 
index 15e90fd2cfdc2e9882cfeff9f2e2e1e4a1ff1a7e..98dfb086320fad58f95b6be36d11a58c7af1fc4d 100644 (file)
@@ -1098,10 +1098,25 @@ static void glk_whitelist_build(struct intel_engine_cs *engine)
 
 static void cfl_whitelist_build(struct intel_engine_cs *engine)
 {
+       struct i915_wa_list *w = &engine->whitelist;
+
        if (engine->class != RENDER_CLASS)
                return;
 
-       gen9_whitelist_build(&engine->whitelist);
+       gen9_whitelist_build(w);
+
+       /*
+        * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
+        *
+        * This covers 4 register which are next to one another :
+        *   - PS_INVOCATION_COUNT
+        *   - PS_INVOCATION_COUNT_UDW
+        *   - PS_DEPTH_COUNT
+        *   - PS_DEPTH_COUNT_UDW
+        */
+       whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+                         RING_FORCE_TO_NONPRIV_RD |
+                         RING_FORCE_TO_NONPRIV_RANGE_4);
 }
 
 static void cnl_whitelist_build(struct intel_engine_cs *engine)
@@ -1129,6 +1144,19 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
 
                /* WaEnableStateCacheRedirectToCS:icl */
                whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+
+               /*
+                * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
+                *
+                * This covers 4 register which are next to one another :
+                *   - PS_INVOCATION_COUNT
+                *   - PS_INVOCATION_COUNT_UDW
+                *   - PS_DEPTH_COUNT
+                *   - PS_DEPTH_COUNT_UDW
+                */
+               whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+                                 RING_FORCE_TO_NONPRIV_RD |
+                                 RING_FORCE_TO_NONPRIV_RANGE_4);
                break;
 
        case VIDEO_DECODE_CLASS:
@@ -1258,8 +1286,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
                        wa_write_or(wal,
                                    GEN7_SARCHKMD,
-                                   GEN7_DISABLE_DEMAND_PREFETCH |
-                                   GEN7_DISABLE_SAMPLER_PREFETCH);
+                                   GEN7_DISABLE_DEMAND_PREFETCH);
+
+               /* Wa_1606682166:icl */
+               wa_write_or(wal,
+                           GEN7_SARCHKMD,
+                           GEN7_DISABLE_SAMPLER_PREFETCH);
        }
 
        if (IS_GEN_RANGE(i915, 9, 11)) {
index 086801b514416d0f175b1088ef87dde907a6708a..486c6953dcb182463ce7ceedcf2df716af8ca53f 100644 (file)
@@ -66,6 +66,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
        ring->base.effective_size = sz;
        ring->base.vaddr = (void *)(ring + 1);
        ring->base.timeline = &ring->timeline;
+       atomic_set(&ring->base.pin_count, 1);
 
        INIT_LIST_HEAD(&ring->base.request_list);
        intel_ring_update_space(&ring->base);
index 89da9e7cc1bae0c65bf64054253a975d5829fc94..b5c590c9ccba9355a5e66a1a33a732c345f0ea4a 100644 (file)
@@ -71,13 +71,16 @@ static int igt_atomic_reset(void *arg)
                goto unlock;
 
        for (p = igt_atomic_phases; p->name; p++) {
+               intel_engine_mask_t awake;
+
                GEM_TRACE("intel_gpu_reset under %s\n", p->name);
 
+               awake = reset_prepare(i915);
                p->critical_section_begin();
                reset_prepare(i915);
                err = intel_gpu_reset(i915, ALL_ENGINES);
-               reset_finish(i915);
                p->critical_section_end();
+               reset_finish(i915, awake);
 
                if (err) {
                        pr_err("intel_gpu_reset failed under %s\n", p->name);
index 9eaf030affd0f43db998da9d4445ca810e33ef2d..44becd9538bed54d0d266a7b9e4b68455bdc9e61 100644 (file)
@@ -925,7 +925,12 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
 
        err = 0;
        for (i = 0; i < engine->whitelist.count; i++) {
-               if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg))
+               const struct i915_wa *wa = &engine->whitelist.list[i];
+
+               if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD)
+                       continue;
+
+               if (!fn(engine, a[i], b[i], wa->reg))
                        err = -EINVAL;
        }
 
index 6ea88270c818ff62680e56f95cfef8a3c3c1f1f3..b09dc315e2dab7674275482b8908b3c902962e85 100644 (file)
@@ -2674,11 +2674,6 @@ static int scan_workload(struct intel_vgpu_workload *workload)
                gma_head == gma_tail)
                return 0;
 
-       if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        ret = ip_gma_set(&s, gma_head);
        if (ret)
                goto out;
@@ -2724,11 +2719,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
        s.workload = workload;
        s.is_ctx_wa = true;
 
-       if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        ret = ip_gma_set(&s, gma_head);
        if (ret)
                goto out;
index 65e847392aea788f2feb17c381f75f0defc62709..8bb292b01271600eae8bd835e71fba2b209f5cc4 100644 (file)
@@ -245,7 +245,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
        plane->hw_format = fmt;
 
        plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
-       if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
+       if (!vgpu_gmadr_is_valid(vgpu, plane->base))
                return  -EINVAL;
 
        plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
@@ -368,7 +368,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
                        alpha_plane, alpha_force);
 
        plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
-       if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
+       if (!vgpu_gmadr_is_valid(vgpu, plane->base))
                return  -EINVAL;
 
        plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
@@ -472,7 +472,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
        plane->drm_format = drm_format;
 
        plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
-       if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
+       if (!vgpu_gmadr_is_valid(vgpu, plane->base))
                return  -EINVAL;
 
        plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
index 53115bdae12be320e3de82562fa6d36a39684686..4b04af569c05c13df7cc4f6ca235abb0b281d025 100644 (file)
@@ -2141,11 +2141,20 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
        struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
        const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
        unsigned long index = off >> info->gtt_entry_size_shift;
+       unsigned long gma;
        struct intel_gvt_gtt_entry e;
 
        if (bytes != 4 && bytes != 8)
                return -EINVAL;
 
+       gma = index << I915_GTT_PAGE_SHIFT;
+       if (!intel_gvt_ggtt_validate_range(vgpu,
+                                          gma, 1 << I915_GTT_PAGE_SHIFT)) {
+               gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
+               memset(p_data, 0, bytes);
+               return 0;
+       }
+
        ggtt_get_guest_entry(ggtt_mm, &e, index);
        memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
                        bytes);
index 144301b778df275e1327217de3eb497ac5317252..23aa3e50cbf89fdf82948c9159ca2455c8f122a1 100644 (file)
@@ -1904,6 +1904,18 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
 
        entry = __gvt_cache_find_gfn(info->vgpu, gfn);
        if (!entry) {
+               ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
+               if (ret)
+                       goto err_unlock;
+
+               ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+               if (ret)
+                       goto err_unmap;
+       } else if (entry->size != size) {
+               /* the same gfn with different size: unmap and re-map */
+               gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
+               __gvt_cache_remove_entry(vgpu, entry);
+
                ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
                if (ret)
                        goto err_unlock;
index 2144fb46d0e1ce5f45c125cf3286694e78ff8708..9f3fd7d96a694a6fe890978dd19e3c38160965e4 100644 (file)
@@ -364,16 +364,13 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
        wa_ctx->indirect_ctx.shadow_va = NULL;
 }
 
-static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
-                                        struct i915_gem_context *ctx)
+static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
+                                         struct i915_gem_context *ctx)
 {
        struct intel_vgpu_mm *mm = workload->shadow_mm;
        struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
        int i = 0;
 
-       if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
-               return -EINVAL;
-
        if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
                px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
        } else {
@@ -384,8 +381,6 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
                        px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
                }
        }
-
-       return 0;
 }
 
 static int
@@ -614,6 +609,8 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 static int prepare_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
+       struct intel_vgpu_submission *s = &vgpu->submission;
+       int ring = workload->ring_id;
        int ret = 0;
 
        ret = intel_vgpu_pin_mm(workload->shadow_mm);
@@ -622,8 +619,16 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
                return ret;
        }
 
+       if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
+           !workload->shadow_mm->ppgtt_mm.shadowed) {
+               gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+               return -EINVAL;
+       }
+
        update_shadow_pdps(workload);
 
+       set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
+
        ret = intel_vgpu_sync_oos_pages(workload->vgpu);
        if (ret) {
                gvt_vgpu_err("fail to vgpu sync oos pages\n");
@@ -674,7 +679,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-       struct intel_vgpu_submission *s = &vgpu->submission;
        struct i915_request *rq;
        int ring_id = workload->ring_id;
        int ret;
@@ -685,13 +689,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
        mutex_lock(&vgpu->vgpu_lock);
        mutex_lock(&dev_priv->drm.struct_mutex);
 
-       ret = set_context_ppgtt_from_shadow(workload,
-                                           s->shadow[ring_id]->gem_context);
-       if (ret < 0) {
-               gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
-               goto err_req;
-       }
-
        ret = intel_gvt_workload_req_alloc(workload);
        if (ret)
                goto err_req;
@@ -990,6 +987,7 @@ static int workload_thread(void *priv)
        int ret;
        bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
+       struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
 
        kfree(p);
 
@@ -1013,6 +1011,8 @@ static int workload_thread(void *priv)
                                workload->ring_id, workload,
                                workload->vgpu->id);
 
+               intel_runtime_pm_get(rpm);
+
                gvt_dbg_sched("ring id %d will dispatch workload %p\n",
                                workload->ring_id, workload);
 
@@ -1042,6 +1042,7 @@ complete:
                        intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
                                        FORCEWAKE_ALL);
 
+               intel_runtime_pm_put_unchecked(rpm);
                if (ret && (vgpu_is_vm_unhealthy(ret)))
                        enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
        }
@@ -1492,6 +1493,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
        intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
                        RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
 
+       if (!intel_gvt_ggtt_validate_range(vgpu, start,
+                               _RING_CTL_BUF_SIZE(ctl))) {
+               gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start);
+               return ERR_PTR(-EINVAL);
+       }
+
        workload = alloc_workload(vgpu);
        if (IS_ERR(workload))
                return workload;
@@ -1516,9 +1523,31 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
                workload->wa_ctx.indirect_ctx.size =
                        (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
                        CACHELINE_BYTES;
+
+               if (workload->wa_ctx.indirect_ctx.size != 0) {
+                       if (!intel_gvt_ggtt_validate_range(vgpu,
+                               workload->wa_ctx.indirect_ctx.guest_gma,
+                               workload->wa_ctx.indirect_ctx.size)) {
+                               kmem_cache_free(s->workloads, workload);
+                               gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
+                                   workload->wa_ctx.indirect_ctx.guest_gma);
+                               return ERR_PTR(-EINVAL);
+                       }
+               }
+
                workload->wa_ctx.per_ctx.guest_gma =
                        per_ctx & PER_CTX_ADDR_MASK;
                workload->wa_ctx.per_ctx.valid = per_ctx & 1;
+               if (workload->wa_ctx.per_ctx.valid) {
+                       if (!intel_gvt_ggtt_validate_range(vgpu,
+                               workload->wa_ctx.per_ctx.guest_gma,
+                               CACHELINE_BYTES)) {
+                               kmem_cache_free(s->workloads, workload);
+                               gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
+                                       workload->wa_ctx.per_ctx.guest_gma);
+                               return ERR_PTR(-EINVAL);
+                       }
+               }
        }
 
        gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
index a3deed692b9c4a5a5faeebac9dea625a2a16ed59..fe552e877e096911a37cda531e8ae7abdbecfa34 100644 (file)
@@ -28,8 +28,6 @@
  *
  */
 
-#include "trace.h"
-
 #ifndef __CHECKER__
 #define CREATE_TRACE_POINTS
 #include "trace.h"
index bc909ec5d9c3bb6a5f543d71bae292091acba5ea..fe7a6ec2c199c49bd2750b4ac92b2690fada1435 100644 (file)
@@ -1674,8 +1674,9 @@ struct drm_i915_private {
        } dram_info;
 
        struct intel_bw_info {
-               int num_planes;
-               int deratedbw[3];
+               unsigned int deratedbw[3]; /* for each QGV point */
+               u8 num_qgv_points;
+               u8 num_planes;
        } max_bw[6];
 
        struct drm_private_obj bw_obj;
index 190ad54fb072dedf5b44cdd0fd18f0fbe006d8b0..8a659d3d7435d14198614250cb7296c2ca956b96 100644 (file)
@@ -46,7 +46,6 @@
 #include "gem/i915_gem_ioctls.h"
 #include "gem/i915_gem_pm.h"
 #include "gem/i915_gemfs.h"
-#include "gt/intel_engine_pm.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_mocs.h"
 #include "gt/intel_reset.h"
@@ -1307,21 +1306,13 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
 
        intel_mocs_init_l3cc_table(dev_priv);
 
-       /* Only when the HW is re-initialised, can we replay the requests */
-       ret = intel_engines_resume(dev_priv);
-       if (ret)
-               goto cleanup_uc;
-
        intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
 
        intel_engines_set_scheduler_caps(dev_priv);
        return 0;
 
-cleanup_uc:
-       intel_uc_fini_hw(dev_priv);
 out:
        intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
        return ret;
 }
 
@@ -1580,6 +1571,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
        if (ret)
                goto err_uc_init;
 
+       /* Only when the HW is re-initialised, can we replay the requests */
+       ret = intel_gt_resume(dev_priv);
+       if (ret)
+               goto err_init_hw;
+
        /*
         * Despite its name intel_init_clock_gating applies both display
         * clock gating workarounds; GT mmio workarounds and the occasional
@@ -1593,20 +1589,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 
        ret = intel_engines_verify_workarounds(dev_priv);
        if (ret)
-               goto err_init_hw;
+               goto err_gt;
 
        ret = __intel_engines_record_defaults(dev_priv);
        if (ret)
-               goto err_init_hw;
+               goto err_gt;
 
        if (i915_inject_load_failure()) {
                ret = -ENODEV;
-               goto err_init_hw;
+               goto err_gt;
        }
 
        if (i915_inject_load_failure()) {
                ret = -EIO;
-               goto err_init_hw;
+               goto err_gt;
        }
 
        intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@@ -1620,7 +1616,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
         * HW as irrevisibly wedged, but keep enough state around that the
         * driver doesn't explode during runtime.
         */
-err_init_hw:
+err_gt:
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
        i915_gem_set_wedged(dev_priv);
@@ -1630,6 +1626,7 @@ err_init_hw:
        i915_gem_drain_workqueue(dev_priv);
 
        mutex_lock(&dev_priv->drm.struct_mutex);
+err_init_hw:
        intel_uc_fini_hw(dev_priv);
 err_uc_init:
        intel_uc_fini(dev_priv);
index 8ab820145ea6cacd8b025ad8f8e2371088a91bc3..7015a97b10979ab147fa4dc90fca9b0c11c8f0a4 100644 (file)
@@ -1444,9 +1444,11 @@ unwind_pd:
        spin_lock(&pdp->lock);
        if (atomic_dec_and_test(&pd->used)) {
                gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
+               pdp->entry[pdpe] = vm->scratch_pd;
                GEM_BUG_ON(!atomic_read(&pdp->used));
                atomic_dec(&pdp->used);
-               free_pd(vm, pd);
+               GEM_BUG_ON(alloc);
+               alloc = pd; /* defer the free to after the lock */
        }
        spin_unlock(&pdp->lock);
 unwind:
@@ -1515,7 +1517,9 @@ unwind_pdp:
        spin_lock(&pml4->lock);
        if (atomic_dec_and_test(&pdp->used)) {
                gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
-               free_pd(vm, pdp);
+               pml4->entry[pml4e] = vm->scratch_pdp;
+               GEM_BUG_ON(alloc);
+               alloc = pdp; /* defer the free until after the lock */
        }
        spin_unlock(&pml4->lock);
 unwind:
index 41a511d5267f456a35f4ed903081a0fcece8f71c..8bc76fcff70d96acfcca3d19fafbcd57bd2a8ee8 100644 (file)
@@ -1418,6 +1418,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
                struct intel_engine_cs *engine = i915->engine[i];
                struct drm_i915_error_engine *ee = &error->engine[i];
                struct i915_request *request;
+               unsigned long flags;
 
                ee->engine_id = -1;
 
@@ -1429,10 +1430,11 @@ static void gem_record_rings(struct i915_gpu_state *error)
                error_record_engine_registers(error, engine, ee);
                error_record_engine_execlists(engine, ee);
 
+               spin_lock_irqsave(&engine->active.lock, flags);
                request = intel_engine_find_active_request(engine);
                if (request) {
                        struct i915_gem_context *ctx = request->gem_context;
-                       struct intel_ring *ring;
+                       struct intel_ring *ring = request->ring;
 
                        ee->vm = ctx->vm ?: &ggtt->vm;
 
@@ -1462,7 +1464,6 @@ static void gem_record_rings(struct i915_gpu_state *error)
                        ee->rq_post = request->postfix;
                        ee->rq_tail = request->tail;
 
-                       ring = request->ring;
                        ee->cpu_ring_head = ring->head;
                        ee->cpu_ring_tail = ring->tail;
                        ee->ringbuffer =
@@ -1470,6 +1471,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
 
                        engine_record_requests(engine, request, ee);
                }
+               spin_unlock_irqrestore(&engine->active.lock, flags);
 
                ee->hws_page =
                        i915_error_object_create(i915,
index a700c5c3d1673516355bdbac0468adca9cfc664e..5140017f9a392c52cf24057b20aec4c331461761 100644 (file)
@@ -1567,28 +1567,10 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
        }
 }
 
-static int hsw_enable_metric_set(struct i915_perf_stream *stream)
+static void delay_after_mux(void)
 {
-       struct drm_i915_private *dev_priv = stream->dev_priv;
-       const struct i915_oa_config *oa_config = stream->oa_config;
-
-       /* PRM:
-        *
-        * OA unit is using “crclk” for its functionality. When trunk
-        * level clock gating takes place, OA clock would be gated,
-        * unable to count the events from non-render clock domain.
-        * Render clock gating must be disabled when OA is enabled to
-        * count the events from non-render domain. Unit level clock
-        * gating for RCS should also be disabled.
-        */
-       I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
-                                   ~GEN7_DOP_CLOCK_GATE_ENABLE));
-       I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
-                                 GEN6_CSUNIT_CLOCK_GATE_DISABLE));
-
-       config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
-
-       /* It apparently takes a fairly long time for a new MUX
+       /*
+        * It apparently takes a fairly long time for a new MUX
         * configuration to be be applied after these register writes.
         * This delay duration was derived empirically based on the
         * render_basic config but hopefully it covers the maximum
@@ -1610,6 +1592,30 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
         * a delay at this location would mitigate any invalid reports.
         */
        usleep_range(15000, 20000);
+}
+
+static int hsw_enable_metric_set(struct i915_perf_stream *stream)
+{
+       struct drm_i915_private *dev_priv = stream->dev_priv;
+       const struct i915_oa_config *oa_config = stream->oa_config;
+
+       /*
+        * PRM:
+        *
+        * OA unit is using “crclk” for its functionality. When trunk
+        * level clock gating takes place, OA clock would be gated,
+        * unable to count the events from non-render clock domain.
+        * Render clock gating must be disabled when OA is enabled to
+        * count the events from non-render domain. Unit level clock
+        * gating for RCS should also be disabled.
+        */
+       I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
+                                   ~GEN7_DOP_CLOCK_GATE_ENABLE));
+       I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
+                                 GEN6_CSUNIT_CLOCK_GATE_DISABLE));
+
+       config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
+       delay_after_mux();
 
        config_oa_regs(dev_priv, oa_config->b_counter_regs,
                       oa_config->b_counter_regs_len);
@@ -1835,6 +1841,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
                return ret;
 
        config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
+       delay_after_mux();
 
        config_oa_regs(dev_priv, oa_config->b_counter_regs,
                       oa_config->b_counter_regs_len);
@@ -2515,6 +2522,9 @@ static int i915_perf_release(struct inode *inode, struct file *file)
        i915_perf_destroy_locked(stream);
        mutex_unlock(&dev_priv->perf.lock);
 
+       /* Release the reference the perf stream kept on the driver. */
+       drm_dev_put(&dev_priv->drm);
+
        return 0;
 }
 
@@ -2650,6 +2660,11 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
        if (!(param->flags & I915_PERF_FLAG_DISABLED))
                i915_perf_enable_locked(stream);
 
+       /* Take a reference on the driver that will be kept with stream_fd
+        * until its release.
+        */
+       drm_dev_get(&dev_priv->drm);
+
        return stream_fd;
 
 err_open:
@@ -3477,9 +3492,13 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
                        dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
                        dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
 
-                       dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
-                       dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
-
+                       if (IS_GEN(dev_priv, 10)) {
+                               dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
+                               dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+                       } else {
+                               dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124;
+                               dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e;
+                       }
                        dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
                }
        }
index f4ce643b3bc3ea00ff563979de1e112b2315dac0..cce426b23a240a8a30449b9a1b220c0de8ce92e3 100644 (file)
 /* watermark/fifo updates */
 
 TRACE_EVENT(intel_pipe_enable,
-           TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
-           TP_ARGS(dev_priv, pipe),
+           TP_PROTO(struct intel_crtc *crtc),
+           TP_ARGS(crtc),
 
            TP_STRUCT__entry(
                             __array(u32, frame, 3)
                             __array(u32, scanline, 3)
                             __field(enum pipe, pipe)
                             ),
-
            TP_fast_assign(
-                          enum pipe _pipe;
-                          for_each_pipe(dev_priv, _pipe) {
-                                  __entry->frame[_pipe] =
-                                          dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
-                                  __entry->scanline[_pipe] =
-                                          intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+                          struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+                          struct intel_crtc *it__;
+                          for_each_intel_crtc(&dev_priv->drm, it__) {
+                                  __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
+                                  __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
                           }
-                          __entry->pipe = pipe;
+                          __entry->pipe = crtc->pipe;
                           ),
 
            TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
@@ -49,8 +47,8 @@ TRACE_EVENT(intel_pipe_enable,
 );
 
 TRACE_EVENT(intel_pipe_disable,
-           TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
-           TP_ARGS(dev_priv, pipe),
+           TP_PROTO(struct intel_crtc *crtc),
+           TP_ARGS(crtc),
 
            TP_STRUCT__entry(
                             __array(u32, frame, 3)
@@ -59,14 +57,13 @@ TRACE_EVENT(intel_pipe_disable,
                             ),
 
            TP_fast_assign(
-                          enum pipe _pipe;
-                          for_each_pipe(dev_priv, _pipe) {
-                                  __entry->frame[_pipe] =
-                                          dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
-                                  __entry->scanline[_pipe] =
-                                          intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+                          struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+                          struct intel_crtc *it__;
+                          for_each_intel_crtc(&dev_priv->drm, it__) {
+                                  __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
+                                  __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
                           }
-                          __entry->pipe = pipe;
+                          __entry->pipe = crtc->pipe;
                           ),
 
            TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
@@ -89,8 +86,7 @@ TRACE_EVENT(intel_pipe_crc,
 
            TP_fast_assign(
                           __entry->pipe = crtc->pipe;
-                          __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
-                                                                                      crtc->pipe);
+                          __entry->frame = intel_crtc_get_vblank_counter(crtc);
                           __entry->scanline = intel_get_crtc_scanline(crtc);
                           memcpy(__entry->crcs, crcs, sizeof(__entry->crcs));
                           ),
@@ -112,9 +108,10 @@ TRACE_EVENT(intel_cpu_fifo_underrun,
                             ),
 
            TP_fast_assign(
+                           struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
                           __entry->pipe = pipe;
-                          __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
-                          __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+                          __entry->frame = intel_crtc_get_vblank_counter(crtc);
+                          __entry->scanline = intel_get_crtc_scanline(crtc);
                           ),
 
            TP_printk("pipe %c, frame=%u, scanline=%u",
@@ -134,9 +131,10 @@ TRACE_EVENT(intel_pch_fifo_underrun,
 
            TP_fast_assign(
                           enum pipe pipe = pch_transcoder;
+                          struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
                           __entry->pipe = pipe;
-                          __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
-                          __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+                          __entry->frame = intel_crtc_get_vblank_counter(crtc);
+                          __entry->scanline = intel_get_crtc_scanline(crtc);
                           ),
 
            TP_printk("pch transcoder %c, frame=%u, scanline=%u",
@@ -156,12 +154,10 @@ TRACE_EVENT(intel_memory_cxsr,
                             ),
 
            TP_fast_assign(
-                          enum pipe pipe;
-                          for_each_pipe(dev_priv, pipe) {
-                                  __entry->frame[pipe] =
-                                          dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
-                                  __entry->scanline[pipe] =
-                                          intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+                          struct intel_crtc *crtc;
+                          for_each_intel_crtc(&dev_priv->drm, crtc) {
+                                  __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc);
+                                  __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc);
                           }
                           __entry->old = old;
                           __entry->new = new;
@@ -198,8 +194,7 @@ TRACE_EVENT(g4x_wm,
 
            TP_fast_assign(
                           __entry->pipe = crtc->pipe;
-                          __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
-                                                                                      crtc->pipe);
+                          __entry->frame = intel_crtc_get_vblank_counter(crtc);
                           __entry->scanline = intel_get_crtc_scanline(crtc);
                           __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
                           __entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
@@ -243,8 +238,7 @@ TRACE_EVENT(vlv_wm,
 
            TP_fast_assign(
                           __entry->pipe = crtc->pipe;
-                          __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
-                                                                                      crtc->pipe);
+                          __entry->frame = intel_crtc_get_vblank_counter(crtc);
                           __entry->scanline = intel_get_crtc_scanline(crtc);
                           __entry->level = wm->level;
                           __entry->cxsr = wm->cxsr;
@@ -278,8 +272,7 @@ TRACE_EVENT(vlv_fifo_size,
 
            TP_fast_assign(
                           __entry->pipe = crtc->pipe;
-                          __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
-                                                                                      crtc->pipe);
+                          __entry->frame = intel_crtc_get_vblank_counter(crtc);
                           __entry->scanline = intel_get_crtc_scanline(crtc);
                           __entry->sprite0_start = sprite0_start;
                           __entry->sprite1_start = sprite1_start;
@@ -310,8 +303,7 @@ TRACE_EVENT(intel_update_plane,
            TP_fast_assign(
                           __entry->pipe = crtc->pipe;
                           __entry->name = plane->name;
-                          __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
-                                                                                      crtc->pipe);
+                          __entry->frame = intel_crtc_get_vblank_counter(crtc);
                           __entry->scanline = intel_get_crtc_scanline(crtc);
                           memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
                           memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
@@ -338,8 +330,7 @@ TRACE_EVENT(intel_disable_plane,
            TP_fast_assign(
                           __entry->pipe = crtc->pipe;
                           __entry->name = plane->name;
-                          __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
-                                                                                      crtc->pipe);
+                          __entry->frame = intel_crtc_get_vblank_counter(crtc);
                           __entry->scanline = intel_get_crtc_scanline(crtc);
                           ),
 
@@ -364,8 +355,7 @@ TRACE_EVENT(i915_pipe_update_start,
 
            TP_fast_assign(
                           __entry->pipe = crtc->pipe;
-                          __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
-                                                                                      crtc->pipe);
+                          __entry->frame = intel_crtc_get_vblank_counter(crtc);
                           __entry->scanline = intel_get_crtc_scanline(crtc);
                           __entry->min = crtc->debug.min_vbl;
                           __entry->max = crtc->debug.max_vbl;
index 502c54428570c7a75130781862bbe308c3fac0e9..8d1aebc3e8574652153553f82370f7a210d14c84 100644 (file)
@@ -221,13 +221,11 @@ __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
 static void
 dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
 {
-       struct drm_printer p;
+       if (debug->count) {
+               struct drm_printer p = drm_debug_printer("i915");
 
-       if (!debug->count)
-               return;
-
-       p = drm_debug_printer("i915");
-       __print_intel_runtime_pm_wakeref(&p, debug);
+               __print_intel_runtime_pm_wakeref(&p, debug);
+       }
 
        kfree(debug->owners);
 }
index 9cbb2ebf575b5f610c27535728c4f9fd79c25ff4..38275310b196dbea82402d3b49648070049c1611 100644 (file)
@@ -65,6 +65,21 @@ intel_wakeref_get(struct intel_runtime_pm *rpm,
        return 0;
 }
 
+/**
+ * intel_wakeref_get_if_in_use: Acquire the wakeref
+ * @wf: the wakeref
+ *
+ * Acquire a hold on the wakeref, but only if the wakeref is already
+ * active.
+ *
+ * Returns: true if the wakeref was acquired, false otherwise.
+ */
+static inline bool
+intel_wakeref_get_if_active(struct intel_wakeref *wf)
+{
+       return atomic_inc_not_zero(&wf->count);
+}
+
 /**
  * intel_wakeref_put: Release the wakeref
  * @i915: the drm_i915_private device
index 1671db47aa579090d48eb18f0ceabf3aa530219f..e9c55d1d6c044273a63e53bd0fbbf674497a7506 100644 (file)
@@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
                        if (priv->lastctx == ctx)
                                break;
+                       /* fall-thru */
                case MSM_SUBMIT_CMD_BUF:
                        /* copy commands into RB: */
                        obj = submit->bos[submit->cmd[i].idx].obj;
@@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
                        if (priv->lastctx == ctx)
                                break;
+                       /* fall-thru */
                case MSM_SUBMIT_CMD_BUF:
                        OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
                        OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
index be39cf01e51ecf73cf28bde06c3f6b6480207c80..dc8ec2c94301b67f057f918e6d7bdb36722938d3 100644 (file)
@@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
                        if (priv->lastctx == ctx)
                                break;
+                       /* fall-thru */
                case MSM_SUBMIT_CMD_BUF:
                        OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
                        OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
index 9acbbc0f323240e641087a158bbea5d4c9fef582..048c8be426f3254c37294cca600a0714d1fd84cf 100644 (file)
@@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                        /* ignore if there has not been a ctx switch: */
                        if (priv->lastctx == ctx)
                                break;
+                       /* fall-thru */
                case MSM_SUBMIT_CMD_BUF:
                        OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
                                CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
index ff14555372d09dec06016da145c60db6a569fe09..78d5fa230c165e9f7a0c784902e0189ee30afc78 100644 (file)
@@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
        mdp5_crtc->enabled = false;
 }
 
+static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
+{
+       struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+       struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
+       u32 count;
+
+       count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
+       drm_crtc_set_max_vblank_count(crtc, count);
+
+       drm_crtc_vblank_on(crtc);
+}
+
 static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
                                    struct drm_crtc_state *old_state)
 {
@@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
        }
 
        /* Restore vblank irq handling after power is enabled */
-       drm_crtc_vblank_on(crtc);
+       mdp5_crtc_vblank_on(crtc);
 
        mdp5_crtc_mode_set_nofb(crtc);
 
@@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
                mdp5_crtc_destroy_state(crtc, crtc->state);
 
        __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
+
+       drm_crtc_vblank_reset(crtc);
 }
 
 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
index 4a60f5fca6b0bdc90d0fdf758c9196a26381120a..fec6ef1ae3b9839a5b5916d15b6f4a58b5e05692 100644 (file)
@@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
        dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
        dev->driver->get_scanout_position = mdp5_get_scanoutpos;
        dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
-       dev->max_vblank_count = 0xffffffff;
+       dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
        dev->vblank_disable_immediate = true;
 
        return kms;
index c226156f2dea8d28948c0056ed02974823974b46..c356f5ccf25360627a62e0e3feb89f7511cbc8fd 100644 (file)
@@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev,
        if (!np)
                return 0;
 
-       drm_of_component_match_add(dev, matchptr, compare_of, np);
+       if (of_device_is_available(np))
+               drm_of_component_match_add(dev, matchptr, compare_of, np);
 
        of_node_put(np);
 
index c2114c748c2fcc28928ff138f64329773d839a60..8cf6362e64bfeb314b2fcc222d16f5394964155e 100644 (file)
@@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj)
        return !msm_obj->vram_node;
 }
 
+/*
+ * Cache sync.. this is a bit over-complicated, to fit dma-mapping
+ * API.  Really GPU cache is out of scope here (handled on cmdstream)
+ * and all we need to do is invalidate newly allocated pages before
+ * mapping to CPU as uncached/writecombine.
+ *
+ * On top of this, we have the added headache, that depending on
+ * display generation, the display's iommu may be wired up to either
+ * the toplevel drm device (mdss), or to the mdp sub-node, meaning
+ * that here we either have dma-direct or iommu ops.
+ *
+ * Let this be a cautionary tail of abstraction gone wrong.
+ */
+
+static void sync_for_device(struct msm_gem_object *msm_obj)
+{
+       struct device *dev = msm_obj->base.dev->dev;
+
+       if (get_dma_ops(dev)) {
+               dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
+                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+       } else {
+               dma_map_sg(dev, msm_obj->sgt->sgl,
+                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+       }
+}
+
+static void sync_for_cpu(struct msm_gem_object *msm_obj)
+{
+       struct device *dev = msm_obj->base.dev->dev;
+
+       if (get_dma_ops(dev)) {
+               dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
+                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+       } else {
+               dma_unmap_sg(dev, msm_obj->sgt->sgl,
+                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+       }
+}
+
 /* allocate pages from VRAM carveout, used when no IOMMU: */
 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
 {
@@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
                 * because display controller, GPU, etc. are not coherent:
                 */
                if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
-                       dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
-                                       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+                       sync_for_device(msm_obj);
        }
 
        return msm_obj->pages;
@@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj)
                         * GPU, etc. are not coherent:
                         */
                        if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
-                               dma_sync_sg_for_cpu(obj->dev->dev, msm_obj->sgt->sgl,
-                                            msm_obj->sgt->nents,
-                                            DMA_BIDIRECTIONAL);
+                               sync_for_cpu(msm_obj);
 
                        sg_free_table(msm_obj->sgt);
                        kfree(msm_obj->sgt);
index 8497768f1b4102ac76d7e5d45257aec2bb72a82b..126703816794e77a521437271e09a34a09015f4b 100644 (file)
@@ -780,7 +780,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
                        drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
                                             connector->display_info.bpc * 3);
 
-       if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+       if (crtc_state->mode_changed) {
                slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
                                                      mstc->port,
                                                      asyh->dp.pbn);
index 8c92374afcf227980d659748052fad072173b773..a835cebb6d901103d9f46e0d90286b52b08bd2c6 100644 (file)
@@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
                fault->inst, fault->addr, fault->access);
 }
 
+static inline bool
+nouveau_range_done(struct hmm_range *range)
+{
+       bool ret = hmm_range_valid(range);
+
+       hmm_range_unregister(range);
+       return ret;
+}
+
+static int
+nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
+{
+       long ret;
+
+       range->default_flags = 0;
+       range->pfn_flags_mask = -1UL;
+
+       ret = hmm_range_register(range, mirror,
+                                range->start, range->end,
+                                PAGE_SHIFT);
+       if (ret) {
+               up_read(&range->vma->vm_mm->mmap_sem);
+               return (int)ret;
+       }
+
+       if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
+               up_read(&range->vma->vm_mm->mmap_sem);
+               return -EAGAIN;
+       }
+
+       ret = hmm_range_fault(range, true);
+       if (ret <= 0) {
+               if (ret == 0)
+                       ret = -EBUSY;
+               up_read(&range->vma->vm_mm->mmap_sem);
+               hmm_range_unregister(range);
+               return ret;
+       }
+       return 0;
+}
+
 static int
 nouveau_svm_fault(struct nvif_notify *notify)
 {
@@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify)
                range.values = nouveau_svm_pfn_values;
                range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
 again:
-               ret = hmm_vma_fault(&svmm->mirror, &range, true);
+               ret = nouveau_range_fault(&svmm->mirror, &range);
                if (ret == 0) {
                        mutex_lock(&svmm->mutex);
-                       if (!hmm_vma_range_done(&range)) {
+                       if (!nouveau_range_done(&range)) {
                                mutex_unlock(&svmm->mutex);
                                goto again;
                        }
@@ -666,8 +707,8 @@ again:
                                                NULL);
                        svmm->vmm->vmm.object.client->super = false;
                        mutex_unlock(&svmm->mutex);
+                       up_read(&svmm->mm->mmap_sem);
                }
-               up_read(&svmm->mm->mmap_sem);
 
                /* Cancel any faults in the window whose pages didn't manage
                 * to keep their valid bit, or stay writeable when required.
index 8d55cdd69ff48eee01a21aa65caa2c145461deed..435c7d7377a36beef286a1c6ef5c749cbecf7386 100644 (file)
@@ -142,7 +142,7 @@ static struct at91_twi_pdata sama5d4_config = {
 
 static struct at91_twi_pdata sama5d2_config = {
        .clk_max_div = 7,
-       .clk_offset = 4,
+       .clk_offset = 3,
        .has_unre_flag = true,
        .has_alt_cmd = true,
        .has_hold_field = true,
index e87232f2e70855c3c5a21b1028597b8fd298a114..a3fcc35ffd3b65b037965efb5ededd808b82ea8b 100644 (file)
@@ -122,9 +122,11 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
        writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
 
        /* send stop when last byte has been written */
-       if (--dev->buf_len == 0)
+       if (--dev->buf_len == 0) {
                if (!dev->use_alt_cmd)
                        at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
+               at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
+       }
 
        dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
 
@@ -542,9 +544,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
                } else {
                        at91_twi_write_next_byte(dev);
                        at91_twi_write(dev, AT91_TWI_IER,
-                                      AT91_TWI_TXCOMP |
-                                      AT91_TWI_NACK |
-                                      AT91_TWI_TXRDY);
+                                      AT91_TWI_TXCOMP | AT91_TWI_NACK |
+                                      (dev->buf_len ? AT91_TWI_TXRDY : 0));
                }
        }
 
index 2c7f145a036e62d291afd5a645430fcb12966c71..d7fd76baec92666b0f51fcaf02af81815615593c 100644 (file)
@@ -392,16 +392,18 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
 static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c)
 {
        struct i2c_msg *msg = iproc_i2c->msg;
+       uint32_t val;
 
        /* Read valid data from RX FIFO */
        while (iproc_i2c->rx_bytes < msg->len) {
-               if (!((iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET) >> M_FIFO_RX_CNT_SHIFT)
-                     & M_FIFO_RX_CNT_MASK))
+               val = iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET);
+
+               /* rx fifo empty */
+               if (!((val >> M_RX_STATUS_SHIFT) & M_RX_STATUS_MASK))
                        break;
 
                msg->buf[iproc_i2c->rx_bytes] =
-                       (iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET) >>
-                       M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
+                       (val >> M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
                iproc_i2c->rx_bytes++;
        }
 }
index d97fb857b0ea9fb3f23de8ebb8b807fa85e0b021..c98ef4c4a0c9ea844c162a48811783762aa13331 100644 (file)
@@ -435,6 +435,7 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
                 * fall through to the write state, as we will need to
                 * send a byte as well
                 */
+               /* Fall through */
 
        case STATE_WRITE:
                /*
index 888d89ce81df07118fd21683241921a1499e6aea..beee7b7e0d9acf6526a52f6205950d561f547067 100644 (file)
@@ -302,7 +302,9 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
                                          struct ib_udata *udata,
                                          struct ib_uobject *uobj)
 {
+       enum ib_qp_type qp_type = attr->qp_type;
        struct ib_qp *qp;
+       bool is_xrc;
 
        if (!dev->ops.create_qp)
                return ERR_PTR(-EOPNOTSUPP);
@@ -320,7 +322,8 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
         * and more importantly they are created internaly by driver,
         * see mlx5 create_dev_resources() as an example.
         */
-       if (attr->qp_type < IB_QPT_XRC_INI) {
+       is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT;
+       if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) {
                qp->res.type = RDMA_RESTRACK_QP;
                if (uobj)
                        rdma_restrack_uadd(&qp->res);
index 01faef7bc0615838d37c57b82b069df0a9c89da9..45d5164e9574af247ae3bb53dd63d9791957ae18 100644 (file)
@@ -393,6 +393,9 @@ u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index)
        u64 sum;
 
        port_counter = &dev->port_data[port].port_counter;
+       if (!port_counter->hstats)
+               return 0;
+
        sum = get_running_counters_hwstat_sum(dev, port, index);
        sum += port_counter->hstats->value[index];
 
@@ -594,7 +597,7 @@ void rdma_counter_init(struct ib_device *dev)
        struct rdma_port_counter *port_counter;
        u32 port;
 
-       if (!dev->ops.alloc_hw_stats || !dev->port_data)
+       if (!dev->port_data)
                return;
 
        rdma_for_each_port(dev, port) {
@@ -602,6 +605,9 @@ void rdma_counter_init(struct ib_device *dev)
                port_counter->mode.mode = RDMA_COUNTER_MODE_NONE;
                mutex_init(&port_counter->lock);
 
+               if (!dev->ops.alloc_hw_stats)
+                       continue;
+
                port_counter->hstats = dev->ops.alloc_hw_stats(dev, port);
                if (!port_counter->hstats)
                        goto fail;
@@ -624,9 +630,6 @@ void rdma_counter_release(struct ib_device *dev)
        struct rdma_port_counter *port_counter;
        u32 port;
 
-       if (!dev->ops.alloc_hw_stats)
-               return;
-
        rdma_for_each_port(dev, port) {
                port_counter = &dev->port_data[port].port_counter;
                kfree(port_counter->hstats);
index 9773145dee0996d0d058230bc6ce18f9c138d34f..ea8661a00651bba68f496a7bd2a398b9289f9a68 100644 (file)
@@ -94,11 +94,17 @@ static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
 static DECLARE_RWSEM(devices_rwsem);
 #define DEVICE_REGISTERED XA_MARK_1
 
-static LIST_HEAD(client_list);
+static u32 highest_client_id;
 #define CLIENT_REGISTERED XA_MARK_1
 static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
 static DECLARE_RWSEM(clients_rwsem);
 
+static void ib_client_put(struct ib_client *client)
+{
+       if (refcount_dec_and_test(&client->uses))
+               complete(&client->uses_zero);
+}
+
 /*
  * If client_data is registered then the corresponding client must also still
  * be registered.
@@ -660,6 +666,14 @@ static int add_client_context(struct ib_device *device,
                return 0;
 
        down_write(&device->client_data_rwsem);
+       /*
+        * So long as the client is registered hold both the client and device
+        * unregistration locks.
+        */
+       if (!refcount_inc_not_zero(&client->uses))
+               goto out_unlock;
+       refcount_inc(&device->refcount);
+
        /*
         * Another caller to add_client_context got here first and has already
         * completely initialized context.
@@ -683,6 +697,9 @@ static int add_client_context(struct ib_device *device,
        return 0;
 
 out:
+       ib_device_put(device);
+       ib_client_put(client);
+out_unlock:
        up_write(&device->client_data_rwsem);
        return ret;
 }
@@ -702,7 +719,7 @@ static void remove_client_context(struct ib_device *device,
        client_data = xa_load(&device->client_data, client_id);
        xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
        client = xa_load(&clients, client_id);
-       downgrade_write(&device->client_data_rwsem);
+       up_write(&device->client_data_rwsem);
 
        /*
         * Notice we cannot be holding any exclusive locks when calling the
@@ -712,17 +729,13 @@ static void remove_client_context(struct ib_device *device,
         *
         * For this reason clients and drivers should not call the
         * unregistration functions will holdling any locks.
-        *
-        * It tempting to drop the client_data_rwsem too, but this is required
-        * to ensure that unregister_client does not return until all clients
-        * are completely unregistered, which is required to avoid module
-        * unloading races.
         */
        if (client->remove)
                client->remove(device, client_data);
 
        xa_erase(&device->client_data, client_id);
-       up_read(&device->client_data_rwsem);
+       ib_device_put(device);
+       ib_client_put(client);
 }
 
 static int alloc_port_data(struct ib_device *device)
@@ -1224,7 +1237,7 @@ static int setup_device(struct ib_device *device)
 
 static void disable_device(struct ib_device *device)
 {
-       struct ib_client *client;
+       u32 cid;
 
        WARN_ON(!refcount_read(&device->refcount));
 
@@ -1232,10 +1245,19 @@ static void disable_device(struct ib_device *device)
        xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
        up_write(&devices_rwsem);
 
+       /*
+        * Remove clients in LIFO order, see assign_client_id. This could be
+        * more efficient if xarray learns to reverse iterate. Since no new
+        * clients can be added to this ib_device past this point we only need
+        * the maximum possible client_id value here.
+        */
        down_read(&clients_rwsem);
-       list_for_each_entry_reverse(client, &client_list, list)
-               remove_client_context(device, client->client_id);
+       cid = highest_client_id;
        up_read(&clients_rwsem);
+       while (cid) {
+               cid--;
+               remove_client_context(device, cid);
+       }
 
        /* Pairs with refcount_set in enable_device */
        ib_device_put(device);
@@ -1662,30 +1684,31 @@ static int assign_client_id(struct ib_client *client)
        /*
         * The add/remove callbacks must be called in FIFO/LIFO order. To
         * achieve this we assign client_ids so they are sorted in
-        * registration order, and retain a linked list we can reverse iterate
-        * to get the LIFO order. The extra linked list can go away if xarray
-        * learns to reverse iterate.
+        * registration order.
         */
-       if (list_empty(&client_list)) {
-               client->client_id = 0;
-       } else {
-               struct ib_client *last;
-
-               last = list_last_entry(&client_list, struct ib_client, list);
-               client->client_id = last->client_id + 1;
-       }
+       client->client_id = highest_client_id;
        ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
        if (ret)
                goto out;
 
+       highest_client_id++;
        xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
-       list_add_tail(&client->list, &client_list);
 
 out:
        up_write(&clients_rwsem);
        return ret;
 }
 
+static void remove_client_id(struct ib_client *client)
+{
+       down_write(&clients_rwsem);
+       xa_erase(&clients, client->client_id);
+       for (; highest_client_id; highest_client_id--)
+               if (xa_load(&clients, highest_client_id - 1))
+                       break;
+       up_write(&clients_rwsem);
+}
+
 /**
  * ib_register_client - Register an IB client
  * @client:Client to register
@@ -1705,6 +1728,8 @@ int ib_register_client(struct ib_client *client)
        unsigned long index;
        int ret;
 
+       refcount_set(&client->uses, 1);
+       init_completion(&client->uses_zero);
        ret = assign_client_id(client);
        if (ret)
                return ret;
@@ -1740,21 +1765,30 @@ void ib_unregister_client(struct ib_client *client)
        unsigned long index;
 
        down_write(&clients_rwsem);
+       ib_client_put(client);
        xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
        up_write(&clients_rwsem);
-       /*
-        * Every device still known must be serialized to make sure we are
-        * done with the client callbacks before we return.
-        */
-       down_read(&devices_rwsem);
-       xa_for_each (&devices, index, device)
+
+       /* We do not want to have locks while calling client->remove() */
+       rcu_read_lock();
+       xa_for_each (&devices, index, device) {
+               if (!ib_device_try_get(device))
+                       continue;
+               rcu_read_unlock();
+
                remove_client_context(device, client->client_id);
-       up_read(&devices_rwsem);
 
-       down_write(&clients_rwsem);
-       list_del(&client->list);
-       xa_erase(&clients, client->client_id);
-       up_write(&clients_rwsem);
+               ib_device_put(device);
+               rcu_read_lock();
+       }
+       rcu_read_unlock();
+
+       /*
+        * remove_client_context() is not a fence, it can return even though a
+        * removal is ongoing. Wait until all removals are completed.
+        */
+       wait_for_completion(&client->uses_zero);
+       remove_client_id(client);
 }
 EXPORT_SYMBOL(ib_unregister_client);
 
index cc99479b2c09dc9258718aa139a91d45055ade12..9947d16edef210d39145720fef6db1ca6240603e 100644 (file)
@@ -3224,18 +3224,18 @@ static int ib_mad_port_open(struct ib_device *device,
        if (has_smi)
                cq_size *= 2;
 
+       port_priv->pd = ib_alloc_pd(device, 0);
+       if (IS_ERR(port_priv->pd)) {
+               dev_err(&device->dev, "Couldn't create ib_mad PD\n");
+               ret = PTR_ERR(port_priv->pd);
+               goto error3;
+       }
+
        port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
                        IB_POLL_UNBOUND_WORKQUEUE);
        if (IS_ERR(port_priv->cq)) {
                dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
                ret = PTR_ERR(port_priv->cq);
-               goto error3;
-       }
-
-       port_priv->pd = ib_alloc_pd(device, 0);
-       if (IS_ERR(port_priv->pd)) {
-               dev_err(&device->dev, "Couldn't create ib_mad PD\n");
-               ret = PTR_ERR(port_priv->pd);
                goto error4;
        }
 
@@ -3278,11 +3278,11 @@ error8:
 error7:
        destroy_mad_qp(&port_priv->qp_info[0]);
 error6:
-       ib_dealloc_pd(port_priv->pd);
-error4:
        ib_free_cq(port_priv->cq);
        cleanup_recv_queue(&port_priv->qp_info[1]);
        cleanup_recv_queue(&port_priv->qp_info[0]);
+error4:
+       ib_dealloc_pd(port_priv->pd);
 error3:
        kfree(port_priv);
 
@@ -3312,8 +3312,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
        destroy_workqueue(port_priv->wq);
        destroy_mad_qp(&port_priv->qp_info[1]);
        destroy_mad_qp(&port_priv->qp_info[0]);
-       ib_dealloc_pd(port_priv->pd);
        ib_free_cq(port_priv->cq);
+       ib_dealloc_pd(port_priv->pd);
        cleanup_recv_queue(&port_priv->qp_info[1]);
        cleanup_recv_queue(&port_priv->qp_info[0]);
        /* XXX: Handle deallocation of MAD registration tables */
index 9f8a48016b4152a248f781d69760ce4fa433897c..ffdeaf6e0b686881bfa563ec0af80fca5e5cb4d5 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/sched.h>
 #include <linux/semaphore.h>
 #include <linux/slab.h>
+#include <linux/nospec.h>
 
 #include <linux/uaccess.h>
 
@@ -884,11 +885,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
 
        if (get_user(id, arg))
                return -EFAULT;
+       if (id >= IB_UMAD_MAX_AGENTS)
+               return -EINVAL;
 
        mutex_lock(&file->port->file_mutex);
        mutex_lock(&file->mutex);
 
-       if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
+       id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
+       if (!__get_agent(file, id)) {
                ret = -EINVAL;
                goto out;
        }
index a91653aabf3899d9c7e3f5dc4be6ab8ff168ac6c..098ab883733eeef71243852940567788eca3cbc2 100644 (file)
@@ -308,6 +308,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
        struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
        struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
        struct bnxt_qplib_gid *gid_to_del;
+       u16 vlan_id = 0xFFFF;
 
        /* Delete the entry from the hardware */
        ctx = *context;
@@ -317,7 +318,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
        if (sgid_tbl && sgid_tbl->active) {
                if (ctx->idx >= sgid_tbl->max)
                        return -EINVAL;
-               gid_to_del = &sgid_tbl->tbl[ctx->idx];
+               gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
+               vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
                /* DEL_GID is called in WQ context(netdevice_event_work_handler)
                 * or via the ib_unregister_device path. In the former case QP1
                 * may not be destroyed yet, in which case just return as FW
@@ -335,7 +337,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
                }
                ctx->refcnt--;
                if (!ctx->refcnt) {
-                       rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
+                       rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
+                                                vlan_id,  true);
                        if (rc) {
                                dev_err(rdev_to_dev(rdev),
                                        "Failed to remove GID: %#x", rc);
index 37928b1111dfc403221983607bab90d2e3326163..bdbde8e22420d39042a91770a36cd2f6c6399c60 100644 (file)
@@ -488,7 +488,7 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
                                     struct bnxt_qplib_sgid_tbl *sgid_tbl,
                                     u16 max)
 {
-       sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
+       sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
        if (!sgid_tbl->tbl)
                return -ENOMEM;
 
@@ -526,9 +526,10 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
        for (i = 0; i < sgid_tbl->max; i++) {
                if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
                           sizeof(bnxt_qplib_gid_zero)))
-                       bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
+                       bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
+                                           sgid_tbl->tbl[i].vlan_id, true);
        }
-       memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
+       memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
        memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
        memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
        sgid_tbl->active = 0;
@@ -537,7 +538,11 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
                                     struct net_device *netdev)
 {
-       memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
+       u32 i;
+
+       for (i = 0; i < sgid_tbl->max; i++)
+               sgid_tbl->tbl[i].vlan_id = 0xffff;
+
        memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
 }
 
index 30c42c92fac72fc91752cb005015bcccdf48d4cf..fbda11a7ab1aa450a0a8099a7a4c1c7ed30ce81b 100644 (file)
@@ -111,7 +111,7 @@ struct bnxt_qplib_pd_tbl {
 };
 
 struct bnxt_qplib_sgid_tbl {
-       struct bnxt_qplib_gid           *tbl;
+       struct bnxt_qplib_gid_info      *tbl;
        u16                             *hw_id;
        u16                             max;
        u16                             active;
index 48793d3512ac4ef5ff6776c44cb20bf94275f097..40296b97d21e6c1534dbe82c4164e57ed78c9018 100644 (file)
@@ -213,12 +213,12 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
                        index, sgid_tbl->max);
                return -EINVAL;
        }
-       memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid));
+       memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid));
        return 0;
 }
 
 int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
-                       struct bnxt_qplib_gid *gid, bool update)
+                       struct bnxt_qplib_gid *gid, u16 vlan_id, bool update)
 {
        struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
                                                   struct bnxt_qplib_res,
@@ -236,7 +236,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
                return -ENOMEM;
        }
        for (index = 0; index < sgid_tbl->max; index++) {
-               if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid)))
+               if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) &&
+                   vlan_id == sgid_tbl->tbl[index].vlan_id)
                        break;
        }
        if (index == sgid_tbl->max) {
@@ -262,8 +263,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
                if (rc)
                        return rc;
        }
-       memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
+       memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero,
               sizeof(bnxt_qplib_gid_zero));
+       sgid_tbl->tbl[index].vlan_id = 0xFFFF;
        sgid_tbl->vlan[index] = 0;
        sgid_tbl->active--;
        dev_dbg(&res->pdev->dev,
@@ -296,7 +298,8 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
        }
        free_idx = sgid_tbl->max;
        for (i = 0; i < sgid_tbl->max; i++) {
-               if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) {
+               if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) &&
+                   sgid_tbl->tbl[i].vlan_id == vlan_id) {
                        dev_dbg(&res->pdev->dev,
                                "SGID entry already exist in entry %d!\n", i);
                        *index = i;
@@ -351,6 +354,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
        }
        /* Add GID to the sgid_tbl */
        memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
+       sgid_tbl->tbl[free_idx].vlan_id = vlan_id;
        sgid_tbl->active++;
        if (vlan_id != 0xFFFF)
                sgid_tbl->vlan[free_idx] = 1;
index 0ec3b12b0bcd4da3417e20daf706fa9e505e139c..13d9432d5ce2220e0f78bcfada846c9f74455ac0 100644 (file)
@@ -84,6 +84,11 @@ struct bnxt_qplib_gid {
        u8                              data[16];
 };
 
+struct bnxt_qplib_gid_info {
+       struct bnxt_qplib_gid gid;
+       u16 vlan_id;
+};
+
 struct bnxt_qplib_ah {
        struct bnxt_qplib_gid           dgid;
        struct bnxt_qplib_pd            *pd;
@@ -221,7 +226,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
                        struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
                        struct bnxt_qplib_gid *gid);
 int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
-                       struct bnxt_qplib_gid *gid, bool update);
+                       struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
 int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
                        struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
                        bool update, u32 *index);
index d5b643a1d9fd2e11365787a93eeace5cd35f3bc8..67052dc3100ce3e13b19ba48cfcd3c01dcc631b9 100644 (file)
@@ -14452,7 +14452,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
                clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
 }
 
-static void init_rxe(struct hfi1_devdata *dd)
+static int init_rxe(struct hfi1_devdata *dd)
 {
        struct rsm_map_table *rmt;
        u64 val;
@@ -14461,6 +14461,9 @@ static void init_rxe(struct hfi1_devdata *dd)
        write_csr(dd, RCV_ERR_MASK, ~0ull);
 
        rmt = alloc_rsm_map_table(dd);
+       if (!rmt)
+               return -ENOMEM;
+
        /* set up QOS, including the QPN map table */
        init_qos(dd, rmt);
        init_fecn_handling(dd, rmt);
@@ -14487,6 +14490,7 @@ static void init_rxe(struct hfi1_devdata *dd)
        val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
                RCV_BYPASS_HDR_SIZE_SHIFT);
        write_csr(dd, RCV_BYPASS, val);
+       return 0;
 }
 
 static void init_other(struct hfi1_devdata *dd)
@@ -15024,7 +15028,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
                goto bail_cleanup;
 
        /* set initial RXE CSRs */
-       init_rxe(dd);
+       ret = init_rxe(dd);
+       if (ret)
+               goto bail_cleanup;
+
        /* set initial TXE CSRs */
        init_txe(dd);
        /* set initial non-RXE, non-TXE CSRs */
index 0477c14633ab8365849a8eecb24d2f0bcafb4feb..024a7c2b6124563cbd317b10cc3c0210b9de1b2e 100644 (file)
@@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
                    cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
                        break;
                trdma_clean_swqe(qp, wqe);
-               rvt_qp_wqe_unreserve(qp, wqe);
                trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
                rvt_qp_complete_swqe(qp,
                                     wqe,
@@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
        if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
            cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
                trdma_clean_swqe(qp, wqe);
-               rvt_qp_wqe_unreserve(qp, wqe);
                trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
                rvt_qp_complete_swqe(qp,
                                     wqe,
index 92acccaaaa86d66dd233cb8fee5ba8b8cf1537be..996fc298207ea9f1520ce956a3e75e3119ca0240 100644 (file)
@@ -1620,6 +1620,7 @@ static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
                flows[i].req = req;
                flows[i].npagesets = 0;
                flows[i].pagesets[0].mapped =  0;
+               flows[i].resync_npkts = 0;
        }
        req->flows = flows;
        return 0;
@@ -1673,34 +1674,6 @@ static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
        return NULL;
 }
 
-static struct tid_rdma_flow *
-__find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail,
-                  u32 psn, u16 *fidx)
-{
-       for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
-             tail = CIRC_NEXT(tail, MAX_FLOWS)) {
-               struct tid_rdma_flow *flow = &req->flows[tail];
-               u32 spsn, lpsn;
-
-               spsn = full_flow_psn(flow, flow->flow_state.spsn);
-               lpsn = full_flow_psn(flow, flow->flow_state.lpsn);
-
-               if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) {
-                       if (fidx)
-                               *fidx = tail;
-                       return flow;
-               }
-       }
-       return NULL;
-}
-
-static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req,
-                                      u32 psn, u16 *fidx)
-{
-       return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn,
-                                 fidx);
-}
-
 /* TID RDMA READ functions */
 u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
                                    struct ib_other_headers *ohdr, u32 *bth1,
@@ -2788,19 +2761,7 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
                         * to prevent continuous Flow Sequence errors for any
                         * packets that could be still in the fabric.
                         */
-                       flow = find_flow(req, psn, NULL);
-                       if (!flow) {
-                               /*
-                                * We can't find the IB PSN matching the
-                                * received KDETH PSN. The only thing we can
-                                * do at this point is report the error to
-                                * the QP.
-                                */
-                               hfi1_kern_read_tid_flow_free(qp);
-                               spin_unlock(&qp->s_lock);
-                               rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
-                               return ret;
-                       }
+                       flow = &req->flows[req->clear_tail];
                        if (priv->s_flags & HFI1_R_TID_SW_PSN) {
                                diff = cmp_psn(psn,
                                               flow->flow_state.r_next_psn);
index c4b243f50c76d660b18922035c4f51ec8b89e177..646f61545ed6be6b76998f0700d2185df22a81e5 100644 (file)
@@ -54,6 +54,7 @@
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
 #include <rdma/opa_addr.h>
+#include <linux/nospec.h>
 
 #include "hfi.h"
 #include "common.h"
@@ -1536,6 +1537,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
        sl = rdma_ah_get_sl(ah_attr);
        if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
                return -EINVAL;
+       sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc));
 
        sc5 = ibp->sl_to_sc[sl];
        if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
index 8bf847bcd8d3267d3581224d5a20d94126955d57..54782197c7172da17763fc5a2d747af31c4df4f5 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 config INFINIBAND_HNS
-       tristate "HNS RoCE Driver"
+       bool "HNS RoCE Driver"
        depends on NET_VENDOR_HISILICON
        depends on ARM64 || (COMPILE_TEST && 64BIT)
        ---help---
@@ -11,7 +11,7 @@ config INFINIBAND_HNS
          To compile HIP06 or HIP08 driver as module, choose M here.
 
 config INFINIBAND_HNS_HIP06
-       bool "Hisilicon Hip06 Family RoCE support"
+       tristate "Hisilicon Hip06 Family RoCE support"
        depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
        ---help---
          RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
@@ -21,7 +21,7 @@ config INFINIBAND_HNS_HIP06
          module will be called hns-roce-hw-v1
 
 config INFINIBAND_HNS_HIP08
-       bool "Hisilicon Hip08 Family RoCE support"
+       tristate "Hisilicon Hip08 Family RoCE support"
        depends on INFINIBAND_HNS && PCI && HNS3
        ---help---
          RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
index e105945b94a11e4d1cd2e360e613335c846a3ea9..449a2d81319dd3ab4658eb19b8f6d03192356bb8 100644 (file)
@@ -9,12 +9,8 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
        hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
        hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
 
-ifdef CONFIG_INFINIBAND_HNS_HIP06
 hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o
-endif
+obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
 
-ifdef CONFIG_INFINIBAND_HNS_HIP08
 hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
-endif
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
index 627aa46ef683b8ec4467acc1404033f21822f627..c00714c2f16a60ca4c9a92c8148f73add6dddc56 100644 (file)
@@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
                         struct ib_udata *udata, unsigned long virt,
                         struct hns_roce_db *db)
 {
+       unsigned long page_addr = virt & PAGE_MASK;
        struct hns_roce_user_db_page *page;
+       unsigned int offset;
        int ret = 0;
 
        mutex_lock(&context->page_mutex);
 
        list_for_each_entry(page, &context->page_list, list)
-               if (page->user_virt == (virt & PAGE_MASK))
+               if (page->user_virt == page_addr)
                        goto found;
 
        page = kmalloc(sizeof(*page), GFP_KERNEL);
@@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
        }
 
        refcount_set(&page->refcount, 1);
-       page->user_virt = (virt & PAGE_MASK);
-       page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
+       page->user_virt = page_addr;
+       page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
        if (IS_ERR(page->umem)) {
                ret = PTR_ERR(page->umem);
                kfree(page);
@@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
        list_add(&page->list, &context->page_list);
 
 found:
-       db->dma = sg_dma_address(page->umem->sg_head.sgl) +
-                 (virt & ~PAGE_MASK);
-       page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK;
-       db->virt_addr = sg_virt(page->umem->sg_head.sgl);
+       offset = virt - page_addr;
+       db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
+       db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
        db->u.user_page = page;
        refcount_inc(&page->refcount);
 
index 81e6dedb1e022c81990ee7e01bc80a0a7b9ec3f9..c07e387a07a38b88a02d175e6ddd889e13ef00bf 100644 (file)
@@ -750,8 +750,10 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
        atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
 
        pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
-       if (!pd)
+       if (!pd) {
+               ret = -ENOMEM;
                goto alloc_mem_failed;
+       }
 
        pd->device  = ibdev;
        ret = hns_roce_alloc_pd(pd, NULL);
index c2a5780cb394e64b013fbeff00e78eb7e3361287..e12a4404096b6da2ec45f27104b49f6fd5ec4bf6 100644 (file)
@@ -5802,13 +5802,12 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
                return;
        }
 
-       if (mpi->mdev_events.notifier_call)
-               mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
-       mpi->mdev_events.notifier_call = NULL;
-
        mpi->ibdev = NULL;
 
        spin_unlock(&port->mp.mpi_lock);
+       if (mpi->mdev_events.notifier_call)
+               mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
+       mpi->mdev_events.notifier_call = NULL;
        mlx5_remove_netdev_notifier(ibdev, port_num);
        spin_lock(&port->mp.mpi_lock);
 
index c482f19958b39754555e8c74cb31906a95a1992a..f6a53455bf8bd7050814c60c5d070e6aade844de 100644 (file)
@@ -481,6 +481,7 @@ struct mlx5_umr_wr {
        u64                             length;
        int                             access_flags;
        u32                             mkey;
+       u8                              ignore_free_state:1;
 };
 
 static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
index 20ece6e0b2fcc1527b4018886b81d9f06633f0d6..b74fad08412fb1388ceefcf3787e5660c3fd7d10 100644 (file)
@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
 static int mr_cache_max_order(struct mlx5_ib_dev *dev);
 static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
-{
-       return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
-}
 
 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
 {
        return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
 }
 
-static bool use_umr(struct mlx5_ib_dev *dev, int order)
-{
-       return order <= mr_cache_max_order(dev) &&
-               umr_can_modify_entity_size(dev);
-}
-
 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
 {
        int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
@@ -545,13 +535,16 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
                return;
 
        c = order2idx(dev, mr->order);
-       if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
-               mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
-               return;
-       }
+       WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
 
-       if (unreg_umr(dev, mr))
+       if (unreg_umr(dev, mr)) {
+               mr->allocated_from_cache = false;
+               destroy_mkey(dev, mr);
+               ent = &cache->ent[c];
+               if (ent->cur < ent->limit)
+                       queue_work(cache->wq, &ent->work);
                return;
+       }
 
        ent = &cache->ent[c];
        spin_lock_irq(&ent->lock);
@@ -1268,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
        struct mlx5_ib_mr *mr = NULL;
-       bool populate_mtts = false;
+       bool use_umr;
        struct ib_umem *umem;
        int page_shift;
        int npages;
@@ -1300,29 +1293,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        if (err < 0)
                return ERR_PTR(err);
 
-       if (use_umr(dev, order)) {
+       use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
+                 (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
+                  !MLX5_CAP_GEN(dev->mdev, atomic));
+
+       if (order <= mr_cache_max_order(dev) && use_umr) {
                mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
                                         page_shift, order, access_flags);
                if (PTR_ERR(mr) == -EAGAIN) {
                        mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
                        mr = NULL;
                }
-               populate_mtts = false;
        } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
                if (access_flags & IB_ACCESS_ON_DEMAND) {
                        err = -EINVAL;
                        pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
                        goto error;
                }
-               populate_mtts = true;
+               use_umr = false;
        }
 
        if (!mr) {
-               if (!umr_can_modify_entity_size(dev))
-                       populate_mtts = true;
                mutex_lock(&dev->slow_path_mutex);
                mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
-                               page_shift, access_flags, populate_mtts);
+                               page_shift, access_flags, !use_umr);
                mutex_unlock(&dev->slow_path_mutex);
        }
 
@@ -1338,7 +1332,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
 
        update_odp_mr(mr);
 
-       if (!populate_mtts) {
+       if (use_umr) {
                int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
 
                if (access_flags & IB_ACCESS_ON_DEMAND)
@@ -1373,9 +1367,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
                return 0;
 
        umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
-                             MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+                             MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
        umrwr.wr.opcode = MLX5_IB_WR_UMR;
+       umrwr.pd = dev->umrc.pd;
        umrwr.mkey = mr->mmkey.key;
+       umrwr.ignore_free_state = 1;
 
        return mlx5_ib_post_send_wait(dev, &umrwr);
 }
@@ -1577,10 +1573,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
                mr->sig = NULL;
        }
 
-       mlx5_free_priv_descs(mr);
-
-       if (!allocated_from_cache)
+       if (!allocated_from_cache) {
                destroy_mkey(dev, mr);
+               mlx5_free_priv_descs(mr);
+       }
 }
 
 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
index 5b642d81e617dc61207f3a690df1e7b304203326..81da82050d05ec2579bcd4145c2902a55b488cd7 100644 (file)
@@ -246,7 +246,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
         * overwrite the same MTTs.  Concurent invalidations might race us,
         * but they will write 0s as well, so no difference in the end result.
         */
-
+       mutex_lock(&umem_odp->umem_mutex);
        for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
                idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
                /*
@@ -278,6 +278,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
                                   idx - blk_start_idx + 1, 0,
                                   MLX5_IB_UPD_XLT_ZAP |
                                   MLX5_IB_UPD_XLT_ATOMIC);
+       mutex_unlock(&umem_odp->umem_mutex);
        /*
         * We are now sure that the device will not access the
         * memory. We can safely unmap it, and mark it as dirty if
@@ -1771,7 +1772,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
 
        num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
                                 w->num_sge, 0);
-       kfree(w);
+       kvfree(w);
 }
 
 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
@@ -1813,7 +1814,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
        if (valid_req)
                queue_work(system_unbound_wq, &work->work);
        else
-               kfree(work);
+               kvfree(work);
 
        srcu_read_unlock(&dev->mr_srcu, srcu_key);
 
index 2a97619ed6034d13e4091659dfdd4cb7b844db46..379328b2598fd59f1de6674df0918f4d6d79a91c 100644 (file)
@@ -1713,7 +1713,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
                }
 
                MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
-               MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
                memcpy(rss_key, ucmd.rx_hash_key, len);
                break;
        }
@@ -4295,10 +4294,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
 
        memset(umr, 0, sizeof(*umr));
 
-       if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
-               umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
-       else
-               umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
+       if (!umrwr->ignore_free_state) {
+               if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+                        /* fail if free */
+                       umr->flags = MLX5_UMR_CHECK_FREE;
+               else
+                       /* fail if not free */
+                       umr->flags = MLX5_UMR_CHECK_NOT_FREE;
+       }
 
        umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
        if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
index 533157a2a3be09e754a8425650ae9d4f5d7bb9a0..f97b3d65b30cc7e9bfb4d7303537d4f65b04b50d 100644 (file)
@@ -125,14 +125,20 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
        struct qedr_dev *dev =
                rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
 
-       return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
+       return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->attr.hw_ver);
 }
 static DEVICE_ATTR_RO(hw_rev);
 
 static ssize_t hca_type_show(struct device *device,
                             struct device_attribute *attr, char *buf)
 {
-       return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
+       struct qedr_dev *dev =
+               rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
+
+       return scnprintf(buf, PAGE_SIZE, "FastLinQ QL%x %s\n",
+                        dev->pdev->device,
+                        rdma_protocol_iwarp(&dev->ibdev, 1) ?
+                        "iWARP" : "RoCE");
 }
 static DEVICE_ATTR_RO(hca_type);
 
index a7cde98e73e8c8616310f4d54e3523998953d2fc..9ce8a1b925d26306f18038c613c69ef09f5983bc 100644 (file)
@@ -220,13 +220,12 @@ static void siw_put_work(struct siw_cm_work *work)
 static void siw_cep_set_inuse(struct siw_cep *cep)
 {
        unsigned long flags;
-       int rv;
 retry:
        spin_lock_irqsave(&cep->lock, flags);
 
        if (cep->in_use) {
                spin_unlock_irqrestore(&cep->lock, flags);
-               rv = wait_event_interruptible(cep->waitq, !cep->in_use);
+               wait_event_interruptible(cep->waitq, !cep->in_use);
                if (signal_pending(current))
                        flush_signals(current);
                goto retry;
index f55c4e80aea409a60349005bfe8ff53ab2ca5b07..d0f140daf65924287833ad03716c4a26e8fb8058 100644 (file)
@@ -612,6 +612,7 @@ static __init int siw_init_module(void)
 
        if (!siw_create_tx_threads()) {
                pr_info("siw: Could not start any TX thread\n");
+               rv = -ENOMEM;
                goto out_error;
        }
        /*
index 11383d9f95ef169b6e75e27b3f1d922dee3dc41b..e27bd5b35b966280e5ecfc2c2cc2affbc75ec597 100644 (file)
@@ -220,12 +220,14 @@ static int siw_qp_enable_crc(struct siw_qp *qp)
 {
        struct siw_rx_stream *c_rx = &qp->rx_stream;
        struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
-       int size = crypto_shash_descsize(siw_crypto_shash) +
-                       sizeof(struct shash_desc);
+       int size;
 
        if (siw_crypto_shash == NULL)
                return -ENOENT;
 
+       size = crypto_shash_descsize(siw_crypto_shash) +
+               sizeof(struct shash_desc);
+
        c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
        c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
        if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) {
index 433f4d2ee9565be632c1e780e7cdedcdf4254e1d..80a740df0737a638e6170f35c0b8b2a52cce37a0 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Virtio driver for the paravirtualized IOMMU
  *
- * Copyright (C) 2018 Arm Limited
+ * Copyright (C) 2019 Arm Limited
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -47,7 +47,10 @@ struct viommu_dev {
        /* Device configuration */
        struct iommu_domain_geometry    geometry;
        u64                             pgsize_bitmap;
-       u8                              domain_bits;
+       u32                             first_domain;
+       u32                             last_domain;
+       /* Supported MAP flags */
+       u32                             map_flags;
        u32                             probe_size;
 };
 
@@ -62,6 +65,7 @@ struct viommu_domain {
        struct viommu_dev               *viommu;
        struct mutex                    mutex; /* protects viommu pointer */
        unsigned int                    id;
+       u32                             map_flags;
 
        spinlock_t                      mappings_lock;
        struct rb_root_cached           mappings;
@@ -113,6 +117,8 @@ static int viommu_get_req_errno(void *buf, size_t len)
                return -ENOENT;
        case VIRTIO_IOMMU_S_FAULT:
                return -EFAULT;
+       case VIRTIO_IOMMU_S_NOMEM:
+               return -ENOMEM;
        case VIRTIO_IOMMU_S_IOERR:
        case VIRTIO_IOMMU_S_DEVERR:
        default:
@@ -607,15 +613,15 @@ static int viommu_domain_finalise(struct viommu_dev *viommu,
 {
        int ret;
        struct viommu_domain *vdomain = to_viommu_domain(domain);
-       unsigned int max_domain = viommu->domain_bits > 31 ? ~0 :
-                                 (1U << viommu->domain_bits) - 1;
 
        vdomain->viommu         = viommu;
+       vdomain->map_flags      = viommu->map_flags;
 
        domain->pgsize_bitmap   = viommu->pgsize_bitmap;
        domain->geometry        = viommu->geometry;
 
-       ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL);
+       ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
+                             viommu->last_domain, GFP_KERNEL);
        if (ret >= 0)
                vdomain->id = (unsigned int)ret;
 
@@ -710,7 +716,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
                      phys_addr_t paddr, size_t size, int prot)
 {
        int ret;
-       int flags;
+       u32 flags;
        struct virtio_iommu_req_map map;
        struct viommu_domain *vdomain = to_viommu_domain(domain);
 
@@ -718,6 +724,9 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
                (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
                (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
 
+       if (flags & ~vdomain->map_flags)
+               return -EINVAL;
+
        ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
        if (ret)
                return ret;
@@ -1027,7 +1036,8 @@ static int viommu_probe(struct virtio_device *vdev)
                goto err_free_vqs;
        }
 
-       viommu->domain_bits = 32;
+       viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
+       viommu->last_domain = ~0U;
 
        /* Optional features */
        virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
@@ -1038,9 +1048,13 @@ static int viommu_probe(struct virtio_device *vdev)
                             struct virtio_iommu_config, input_range.end,
                             &input_end);
 
-       virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS,
-                            struct virtio_iommu_config, domain_bits,
-                            &viommu->domain_bits);
+       virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+                            struct virtio_iommu_config, domain_range.start,
+                            &viommu->first_domain);
+
+       virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+                            struct virtio_iommu_config, domain_range.end,
+                            &viommu->last_domain);
 
        virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE,
                             struct virtio_iommu_config, probe_size,
@@ -1052,6 +1066,9 @@ static int viommu_probe(struct virtio_device *vdev)
                .force_aperture = true,
        };
 
+       if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
+               viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
+
        viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
 
        virtio_device_ready(vdev);
@@ -1130,9 +1147,10 @@ static void viommu_config_changed(struct virtio_device *vdev)
 
 static unsigned int features[] = {
        VIRTIO_IOMMU_F_MAP_UNMAP,
-       VIRTIO_IOMMU_F_DOMAIN_BITS,
        VIRTIO_IOMMU_F_INPUT_RANGE,
+       VIRTIO_IOMMU_F_DOMAIN_RANGE,
        VIRTIO_IOMMU_F_PROBE,
+       VIRTIO_IOMMU_F_MMIO,
 };
 
 static struct virtio_device_id id_table[] = {
index 730fbe0e2a9dfa788b3698cbdcb10b682fe22888..1b5c3672aea277925b57d0e5e9162d7f31dde386 100644 (file)
@@ -3010,7 +3010,7 @@ static int its_vpe_init(struct its_vpe *vpe)
 
        if (!its_alloc_vpe_table(vpe_id)) {
                its_vpe_id_free(vpe_id);
-               its_free_pending_table(vpe->vpt_page);
+               its_free_pending_table(vpt_page);
                return -ENOMEM;
        }
 
index 9bca4896fa6fe4a0e14973354483655f75cf742a..96d927f0f91ad4fa7029130cb9462aa84be7cdf7 100644 (file)
@@ -771,8 +771,10 @@ static void gic_cpu_sys_reg_init(void)
                case 7:
                        write_gicreg(0, ICC_AP0R3_EL1);
                        write_gicreg(0, ICC_AP0R2_EL1);
+               /* Fall through */
                case 6:
                        write_gicreg(0, ICC_AP0R1_EL1);
+               /* Fall through */
                case 5:
                case 4:
                        write_gicreg(0, ICC_AP0R0_EL1);
@@ -786,8 +788,10 @@ static void gic_cpu_sys_reg_init(void)
        case 7:
                write_gicreg(0, ICC_AP1R3_EL1);
                write_gicreg(0, ICC_AP1R2_EL1);
+               /* Fall through */
        case 6:
                write_gicreg(0, ICC_AP1R1_EL1);
+               /* Fall through */
        case 5:
        case 4:
                write_gicreg(0, ICC_AP1R0_EL1);
index bf2237ac5d091cfb48534d3c36197982a41072fe..4f74c15c475557ab2c2c6dd88634df077109e7d6 100644 (file)
@@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
        .irq_unmask             = imx_gpcv2_irq_unmask,
        .irq_set_wake           = imx_gpcv2_irq_set_wake,
        .irq_retrigger          = irq_chip_retrigger_hierarchy,
+       .irq_set_type           = irq_chip_set_type_parent,
 #ifdef CONFIG_SMP
        .irq_set_affinity       = irq_chip_set_affinity_parent,
 #endif
index 3dd28382d5f5dcefc063281fa9be1a03a7e3efd3..3f09f658e8e29e483f18074052b8a64559e97c1c 100644 (file)
@@ -241,12 +241,15 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
 
                parent = platform_bus_type.dev_root;
                child = of_platform_device_create(np, NULL, parent);
-               if (!child)
+               if (!child) {
+                       of_node_put(np);
                        return -ENOMEM;
+               }
 
                if (of_property_read_u32(child->dev.of_node, "num-pins",
                                         &num_pins) < 0) {
                        dev_err(&pdev->dev, "No num-pins property\n");
+                       of_node_put(np);
                        return -EINVAL;
                }
 
@@ -254,8 +257,10 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
                                                           mbigen_write_msg,
                                                           &mbigen_domain_ops,
                                                           mgn_chip);
-               if (!domain)
+               if (!domain) {
+                       of_node_put(np);
                        return -ENOMEM;
+               }
        }
 
        return 0;
index caaee8032afef3705c70862be7458d0d2453f249..7b6c3ee9e75526fbbd7e809394bca38fbfca3fcb 100644 (file)
@@ -882,23 +882,23 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
 
 /* validate the dax capability of the target device span */
 int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
-                                      sector_t start, sector_t len, void *data)
+                       sector_t start, sector_t len, void *data)
 {
        int blocksize = *(int *) data;
 
        return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
-                       start, len);
+                                      start, len);
 }
 
 /* Check devices support synchronous DAX */
-static int device_synchronous(struct dm_target *ti, struct dm_dev *dev,
-                                      sector_t start, sector_t len, void *data)
+static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev,
+                                 sector_t start, sector_t len, void *data)
 {
-       return dax_synchronous(dev->dax_dev);
+       return dev->dax_dev && dax_synchronous(dev->dax_dev);
 }
 
 bool dm_table_supports_dax(struct dm_table *t,
-                         iterate_devices_callout_fn iterate_fn, int *blocksize)
+                          iterate_devices_callout_fn iterate_fn, int *blocksize)
 {
        struct dm_target *ti;
        unsigned i;
@@ -911,7 +911,7 @@ bool dm_table_supports_dax(struct dm_table *t,
                        return false;
 
                if (!ti->type->iterate_devices ||
-                       !ti->type->iterate_devices(ti, iterate_fn, blocksize))
+                   !ti->type->iterate_devices(ti, iterate_fn, blocksize))
                        return false;
        }
 
@@ -1921,7 +1921,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 
        if (dm_table_supports_dax(t, device_supports_dax, &page_size)) {
                blk_queue_flag_set(QUEUE_FLAG_DAX, q);
-               if (dm_table_supports_dax(t, device_synchronous, NULL))
+               if (dm_table_supports_dax(t, device_dax_synchronous, NULL))
                        set_dax_synchronous(t->md->dax_dev);
        }
        else
index 35bf2477693d31a375469f13dd1800e98f3f5c0b..518945b2f73740c2a3f15dde2c73fcaace8cceb8 100644 (file)
@@ -685,7 +685,7 @@ static int at24_probe(struct i2c_client *client)
        nvmem_config.name = dev_name(dev);
        nvmem_config.dev = dev;
        nvmem_config.read_only = !writable;
-       nvmem_config.root_only = true;
+       nvmem_config.root_only = !(flags & AT24_FLAG_IRUGO);
        nvmem_config.owner = THIS_MODULE;
        nvmem_config.compat = true;
        nvmem_config.base_dev = dev;
index e327f80ebe7048ea74ff70a67cfa0e350b15a0e0..7102e2ebc614d0eb18c2122e037f5e5109c9f987 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/kthread.h>
 #include <linux/scatterlist.h>
 #include <linux/dma-mapping.h>
+#include <linux/backing-dev.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
@@ -427,6 +428,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
                goto free_tag_set;
        }
 
+       if (mmc_host_is_spi(host) && host->use_spi_crc)
+               mq->queue->backing_dev_info->capabilities |=
+                       BDI_CAP_STABLE_WRITES;
+
        mq->queue->queuedata = mq;
        blk_queue_rq_timeout(mq->queue, 60 * HZ);
 
index faaaf52a46d278409c2f183cea9969f75f90d069..eea52e2c5a0ce9c48578390b5f74e22770c18f9b 100644 (file)
@@ -2012,8 +2012,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
                                 * delayed. Allowing the transfer to take place
                                 * avoids races and keeps things simple.
                                 */
-                               if ((err != -ETIMEDOUT) &&
-                                   (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
+                               if (err != -ETIMEDOUT) {
                                        state = STATE_SENDING_DATA;
                                        continue;
                                }
index 2d736e4167757dca6ff052fc551fa4d122dbcce3..ba9a63db73da934b94a79d3f882007bca792b39d 100644 (file)
@@ -73,7 +73,7 @@
        #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK               GENMASK(7, 6)
        #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK               BIT(8)
        #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD               BIT(9)
-       #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK          GENMASK(10, 13)
+       #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK          GENMASK(13, 10)
        #define MESON_MX_SDIO_IRQC_SOFT_RESET                   BIT(15)
        #define MESON_MX_SDIO_IRQC_FORCE_HALT                   BIT(30)
        #define MESON_MX_SDIO_IRQC_HALT_HOLE                    BIT(31)
index 6ee340a3fb3a252632b1a714d82eab678173e457..603a5d9f045a87d3b56191d6ef0642f38c8fcdc4 100644 (file)
@@ -624,6 +624,7 @@ err_cleanup_host:
        sdhci_cleanup_host(host);
 
 pm_runtime_disable:
+       pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
 
index 2d06b8095a19cf92f9d5dcd2dc93375c5d27ea2a..df352b334ea77ab20f9ba7b400fca3e27321f129 100644 (file)
@@ -723,8 +723,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
                cpu_pm_pmu_setup(armpmu, cmd);
                break;
        case CPU_PM_EXIT:
-               cpu_pm_pmu_setup(armpmu, cmd);
        case CPU_PM_ENTER_FAILED:
+               cpu_pm_pmu_setup(armpmu, cmd);
                armpmu->start(armpmu);
                break;
        default:
index 48d6f0d875839f76fd0536bbe91079dc876b5bee..83ed1fbf73cfddaf6aaee0a28b9e843c5ac6f1f1 100644 (file)
@@ -736,6 +736,12 @@ static const struct of_device_id olpc_xo175_ec_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, olpc_xo175_ec_of_match);
 
+static const struct spi_device_id olpc_xo175_ec_id_table[] = {
+       { "xo1.75-ec", 0 },
+       {}
+};
+MODULE_DEVICE_TABLE(spi, olpc_xo175_ec_id_table);
+
 static struct spi_driver olpc_xo175_ec_spi_driver = {
        .driver = {
                .name   = "olpc-xo175-ec",
index 235c0b89f824bc8969f2d7d055dd5ac8bbc841aa..c510d0d724759c77c81bda4319f138c60e80c875 100644 (file)
@@ -812,6 +812,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
        INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map),
        INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map),
        INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map),
+       INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
        {}
 };
 
index b0d3110ae378d798660a30f15661a843eb6cefa4..e4c68efac0c253e3419f43b3abc17437c3bb8bd6 100644 (file)
@@ -93,7 +93,7 @@ static struct gpiod_lookup_table gpios_led_table = {
 
 static struct gpio_keys_button apu2_keys_buttons[] = {
        {
-               .code                   = KEY_SETUP,
+               .code                   = KEY_RESTART,
                .active_low             = 1,
                .desc                   = "front button",
                .type                   = EV_KEY,
@@ -255,6 +255,4 @@ MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver");
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table);
 MODULE_ALIAS("platform:pcengines-apuv2");
-MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME);
-MODULE_SOFTDEP("pre: platform:leds-gpio");
-MODULE_SOFTDEP("pre: platform:gpio_keys_polled");
+MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME " platform:leds-gpio platform:gpio_keys_polled");
index b9ce93e9df89295eb72132fcfc81d0257aaa1723..99f86612f7751ad6d47b7abee8661e8deec2b0b2 100644 (file)
@@ -383,6 +383,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
        char msg_format;
        char msg_no;
 
+       /*
+        * intrc values ENODEV, ENOLINK and EPERM
+        * will be optained from sleep_on to indicate that no
+        * IO operation can be started
+        */
+       if (cqr->intrc == -ENODEV)
+               return 1;
+
+       if (cqr->intrc == -ENOLINK)
+               return 1;
+
+       if (cqr->intrc == -EPERM)
+               return 1;
+
        sense = dasd_get_sense(&cqr->irb);
        if (!sense)
                return 0;
@@ -447,12 +461,8 @@ static int read_unit_address_configuration(struct dasd_device *device,
        lcu->flags &= ~NEED_UAC_UPDATE;
        spin_unlock_irqrestore(&lcu->lock, flags);
 
-       do {
-               rc = dasd_sleep_on(cqr);
-               if (rc && suborder_not_supported(cqr))
-                       return -EOPNOTSUPP;
-       } while (rc && (cqr->retries > 0));
-       if (rc) {
+       rc = dasd_sleep_on(cqr);
+       if (rc && !suborder_not_supported(cqr)) {
                spin_lock_irqsave(&lcu->lock, flags);
                lcu->flags |= NEED_UAC_UPDATE;
                spin_unlock_irqrestore(&lcu->lock, flags);
index 8c9d412b6d33bd14b873446260b26caacd31642c..e7cf0a1d4f716d83f2de606e65f5a4ffcde3f01a 100644 (file)
@@ -398,6 +398,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
                }
                if (dstat == 0x08)
                        break;
+               /* else, fall through */
        case 0x04:
                /* Device end interrupt. */
                if ((raw = req->info) == NULL)
index 8d3370da2dfc294e1286caa337bd9d305fb624c5..3e0b2f63a9d222cf2bd0acb0adb2c104e7ab2a7f 100644 (file)
@@ -677,6 +677,7 @@ tape_generic_remove(struct ccw_device *cdev)
        switch (device->tape_state) {
                case TS_INIT:
                        tape_state_set(device, TS_NOT_OPER);
+                       /* fallthrough */
                case TS_NOT_OPER:
                        /*
                         * Nothing to do.
@@ -949,6 +950,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request)
                                break;
                        if (device->tape_state == TS_UNUSED)
                                break;
+                       /* fallthrough */
                default:
                        if (device->tape_state == TS_BLKUSE)
                                break;
@@ -1116,6 +1118,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
                        case -ETIMEDOUT:
                                DBF_LH(1, "(%08x): Request timed out\n",
                                       device->cdev_id);
+                               /* fallthrough */
                        case -EIO:
                                __tape_end_request(device, request, -EIO);
                                break;
index 8c1d2357ef5b8625a88ad5380e6389ec67824b78..7a838e3d7c0fae3a209fdf763a640bd140091c1f 100644 (file)
@@ -70,7 +70,7 @@ static void vfio_ccw_async_region_release(struct vfio_ccw_private *private,
 
 }
 
-const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
+static const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
        .read = vfio_ccw_async_region_read,
        .write = vfio_ccw_async_region_write,
        .release = vfio_ccw_async_region_release,
index 5ea83dc4f1d740e9db1288ed9d8f70423312d5ba..dad2be333d826fd5d49601e6b9c11f11b118b755 100644 (file)
@@ -152,6 +152,7 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
                        ap_msg->receive(aq, ap_msg, aq->reply);
                        break;
                }
+               /* fall through */
        case AP_RESPONSE_NO_PENDING_REPLY:
                if (!status.queue_empty || aq->queue_count <= 0)
                        break;
index 12fe9deb265ea7fb97485805eab192ee232a3b56..a36251d138fb7afe25f3a24cc8bee36d33619b2c 100644 (file)
@@ -801,10 +801,7 @@ static int convert_response_ica(struct zcrypt_queue *zq,
                if (msg->cprbx.cprb_ver_id == 0x02)
                        return convert_type86_ica(zq, reply,
                                                  outputdata, outputdatalength);
-               /*
-                * Fall through, no break, incorrect cprb version is an unknown
-                * response
-                */
+               /* fall through - wrong cprb version is an unknown response */
        default: /* Unknown response type, this should NEVER EVER happen */
                zq->online = 0;
                pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -837,10 +834,7 @@ static int convert_response_xcrb(struct zcrypt_queue *zq,
                }
                if (msg->cprbx.cprb_ver_id == 0x02)
                        return convert_type86_xcrb(zq, reply, xcRB);
-               /*
-                * Fall through, no break, incorrect cprb version is an unknown
-                * response
-                */
+               /* fall through - wrong cprb version is an unknown response */
        default: /* Unknown response type, this should NEVER EVER happen */
                xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
                zq->online = 0;
@@ -870,7 +864,7 @@ static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
                        return convert_error(zq, reply);
                if (msg->cprbx.cprb_ver_id == 0x04)
                        return convert_type86_ep11_xcrb(zq, reply, xcRB);
-       /* Fall through, no break, incorrect cprb version is an unknown resp.*/
+               /* fall through - wrong cprb version is an unknown resp */
        default: /* Unknown response type, this should NEVER EVER happen */
                zq->online = 0;
                pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -900,10 +894,7 @@ static int convert_response_rng(struct zcrypt_queue *zq,
                        return -EINVAL;
                if (msg->cprbx.cprb_ver_id == 0x02)
                        return convert_type86_rng(zq, reply, data);
-               /*
-                * Fall through, no break, incorrect cprb version is an unknown
-                * response
-                */
+               /* fall through - wrong cprb version is an unknown response */
        default: /* Unknown response type, this should NEVER EVER happen */
                zq->online = 0;
                pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
index 1a85fe9e4b7b1b25c20a6bd253b911c4d4582f93..1791a393795daffec93ae58aefb23ee3b9131527 100644 (file)
@@ -2005,7 +2005,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
  */
 static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
 {
-       return (struct fcoe_rport *)(rdata + 1);
+       return container_of(rdata, struct fcoe_rport, rdata);
 }
 
 /**
@@ -2269,7 +2269,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
  */
 static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
                              struct sk_buff *skb,
-                             struct fc_rport_priv *rdata)
+                             struct fcoe_rport *frport)
 {
        struct fip_header *fiph;
        struct fip_desc *desc = NULL;
@@ -2277,16 +2277,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
        struct fip_wwn_desc *wwn = NULL;
        struct fip_vn_desc *vn = NULL;
        struct fip_size_desc *size = NULL;
-       struct fcoe_rport *frport;
        size_t rlen;
        size_t dlen;
        u32 desc_mask = 0;
        u32 dtype;
        u8 sub;
 
-       memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
-       frport = fcoe_ctlr_rport(rdata);
-
        fiph = (struct fip_header *)skb->data;
        frport->flags = ntohs(fiph->fip_flags);
 
@@ -2349,15 +2345,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
                        if (dlen != sizeof(struct fip_wwn_desc))
                                goto len_err;
                        wwn = (struct fip_wwn_desc *)desc;
-                       rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+                       frport->rdata.ids.node_name =
+                               get_unaligned_be64(&wwn->fd_wwn);
                        break;
                case FIP_DT_VN_ID:
                        if (dlen != sizeof(struct fip_vn_desc))
                                goto len_err;
                        vn = (struct fip_vn_desc *)desc;
                        memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
-                       rdata->ids.port_id = ntoh24(vn->fd_fc_id);
-                       rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
+                       frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
+                       frport->rdata.ids.port_name =
+                               get_unaligned_be64(&vn->fd_wwpn);
                        break;
                case FIP_DT_FC4F:
                        if (dlen != sizeof(struct fip_fc4_feat))
@@ -2403,16 +2401,14 @@ static void fcoe_ctlr_vn_send_claim(struct fcoe_ctlr *fip)
 /**
  * fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request.
  * @fip: The FCoE controller
- * @rdata: parsed remote port with frport from the probe request
+ * @frport: parsed FCoE rport from the probe request
  *
  * Called with ctlr_mutex held.
  */
 static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
-                                  struct fc_rport_priv *rdata)
+                                  struct fcoe_rport *frport)
 {
-       struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
-
-       if (rdata->ids.port_id != fip->port_id)
+       if (frport->rdata.ids.port_id != fip->port_id)
                return;
 
        switch (fip->state) {
@@ -2432,7 +2428,7 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
                 * Probe's REC bit is not set.
                 * If we don't reply, we will change our address.
                 */
-               if (fip->lp->wwpn > rdata->ids.port_name &&
+               if (fip->lp->wwpn > frport->rdata.ids.port_name &&
                    !(frport->flags & FIP_FL_REC_OR_P2P)) {
                        LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
                                        "port_id collision\n");
@@ -2456,14 +2452,14 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
 /**
  * fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply.
  * @fip: The FCoE controller
- * @rdata: parsed remote port with frport from the probe request
+ * @frport: parsed FCoE rport from the probe request
  *
  * Called with ctlr_mutex held.
  */
 static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
-                                  struct fc_rport_priv *rdata)
+                                    struct fcoe_rport *frport)
 {
-       if (rdata->ids.port_id != fip->port_id)
+       if (frport->rdata.ids.port_id != fip->port_id)
                return;
        switch (fip->state) {
        case FIP_ST_VNMP_START:
@@ -2486,11 +2482,11 @@ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
 /**
  * fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply.
  * @fip: The FCoE controller
- * @new: newly-parsed remote port with frport as a template for new rdata
+ * @new: newly-parsed FCoE rport as a template for new rdata
  *
  * Called with ctlr_mutex held.
  */
-static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
+static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fcoe_rport *new)
 {
        struct fc_lport *lport = fip->lp;
        struct fc_rport_priv *rdata;
@@ -2498,7 +2494,7 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
        struct fcoe_rport *frport;
        u32 port_id;
 
-       port_id = new->ids.port_id;
+       port_id = new->rdata.ids.port_id;
        if (port_id == fip->port_id)
                return;
 
@@ -2515,22 +2511,28 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
        rdata->disc_id = lport->disc.disc_id;
 
        ids = &rdata->ids;
-       if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) ||
-           (ids->node_name != -1 && ids->node_name != new->ids.node_name)) {
+       if ((ids->port_name != -1 &&
+            ids->port_name != new->rdata.ids.port_name) ||
+           (ids->node_name != -1 &&
+            ids->node_name != new->rdata.ids.node_name)) {
                mutex_unlock(&rdata->rp_mutex);
                LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id);
                fc_rport_logoff(rdata);
                mutex_lock(&rdata->rp_mutex);
        }
-       ids->port_name = new->ids.port_name;
-       ids->node_name = new->ids.node_name;
+       ids->port_name = new->rdata.ids.port_name;
+       ids->node_name = new->rdata.ids.node_name;
        mutex_unlock(&rdata->rp_mutex);
 
        frport = fcoe_ctlr_rport(rdata);
        LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n",
                        port_id, frport->fcoe_len ? "old" : "new",
                        rdata->rp_state);
-       *frport = *fcoe_ctlr_rport(new);
+       frport->fcoe_len = new->fcoe_len;
+       frport->flags = new->flags;
+       frport->login_count = new->login_count;
+       memcpy(frport->enode_mac, new->enode_mac, ETH_ALEN);
+       memcpy(frport->vn_mac, new->vn_mac, ETH_ALEN);
        frport->time = 0;
 }
 
@@ -2562,16 +2564,14 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac)
 /**
  * fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification
  * @fip: The FCoE controller
- * @new: newly-parsed remote port with frport as a template for new rdata
+ * @new: newly-parsed FCoE rport as a template for new rdata
  *
  * Called with ctlr_mutex held.
  */
 static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
-                                     struct fc_rport_priv *new)
+                                     struct fcoe_rport *new)
 {
-       struct fcoe_rport *frport = fcoe_ctlr_rport(new);
-
-       if (frport->flags & FIP_FL_REC_OR_P2P) {
+       if (new->flags & FIP_FL_REC_OR_P2P) {
                LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n");
                fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
                return;
@@ -2580,7 +2580,7 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
        case FIP_ST_VNMP_START:
        case FIP_ST_VNMP_PROBE1:
        case FIP_ST_VNMP_PROBE2:
-               if (new->ids.port_id == fip->port_id) {
+               if (new->rdata.ids.port_id == fip->port_id) {
                        LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
                                        "restart, state %d\n",
                                        fip->state);
@@ -2589,8 +2589,8 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
                break;
        case FIP_ST_VNMP_CLAIM:
        case FIP_ST_VNMP_UP:
-               if (new->ids.port_id == fip->port_id) {
-                       if (new->ids.port_name > fip->lp->wwpn) {
+               if (new->rdata.ids.port_id == fip->port_id) {
+                       if (new->rdata.ids.port_name > fip->lp->wwpn) {
                                LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
                                                "restart, port_id collision\n");
                                fcoe_ctlr_vn_restart(fip);
@@ -2602,15 +2602,16 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
                        break;
                }
                LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n",
-                               new->ids.port_id);
-               fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac,
-                                 min((u32)frport->fcoe_len,
+                               new->rdata.ids.port_id);
+               fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, new->enode_mac,
+                                 min((u32)new->fcoe_len,
                                      fcoe_ctlr_fcoe_size(fip)));
                fcoe_ctlr_vn_add(fip, new);
                break;
        default:
                LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
-                               "ignoring claim from %x\n", new->ids.port_id);
+                               "ignoring claim from %x\n",
+                               new->rdata.ids.port_id);
                break;
        }
 }
@@ -2618,15 +2619,15 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
 /**
  * fcoe_ctlr_vn_claim_resp() - handle received Claim Response
  * @fip: The FCoE controller that received the frame
- * @new: newly-parsed remote port with frport from the Claim Response
+ * @new: newly-parsed FCoE rport from the Claim Response
  *
  * Called with ctlr_mutex held.
  */
 static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip,
-                                   struct fc_rport_priv *new)
+                                   struct fcoe_rport *new)
 {
        LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n",
-                       new->ids.port_id, fcoe_ctlr_state(fip->state));
+                       new->rdata.ids.port_id, fcoe_ctlr_state(fip->state));
        if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM)
                fcoe_ctlr_vn_add(fip, new);
 }
@@ -2634,28 +2635,28 @@ static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip,
 /**
  * fcoe_ctlr_vn_beacon() - handle received beacon.
  * @fip: The FCoE controller that received the frame
- * @new: newly-parsed remote port with frport from the Beacon
+ * @new: newly-parsed FCoE rport from the Beacon
  *
  * Called with ctlr_mutex held.
  */
 static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
-                               struct fc_rport_priv *new)
+                               struct fcoe_rport *new)
 {
        struct fc_lport *lport = fip->lp;
        struct fc_rport_priv *rdata;
        struct fcoe_rport *frport;
 
-       frport = fcoe_ctlr_rport(new);
-       if (frport->flags & FIP_FL_REC_OR_P2P) {
+       if (new->flags & FIP_FL_REC_OR_P2P) {
                LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n");
                fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
                return;
        }
-       rdata = fc_rport_lookup(lport, new->ids.port_id);
+       rdata = fc_rport_lookup(lport, new->rdata.ids.port_id);
        if (rdata) {
-               if (rdata->ids.node_name == new->ids.node_name &&
-                   rdata->ids.port_name == new->ids.port_name) {
+               if (rdata->ids.node_name == new->rdata.ids.node_name &&
+                   rdata->ids.port_name == new->rdata.ids.port_name) {
                        frport = fcoe_ctlr_rport(rdata);
+
                        LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n",
                                        rdata->ids.port_id);
                        if (!frport->time && fip->state == FIP_ST_VNMP_UP) {
@@ -2678,7 +2679,7 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
         * Don't add the neighbor yet.
         */
        LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n",
-                       new->ids.port_id);
+                       new->rdata.ids.port_id);
        if (time_after(jiffies,
                       fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT)))
                fcoe_ctlr_vn_send_claim(fip);
@@ -2738,10 +2739,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
        struct fip_header *fiph;
        enum fip_vn2vn_subcode sub;
-       struct {
-               struct fc_rport_priv rdata;
-               struct fcoe_rport frport;
-       } buf;
+       struct fcoe_rport frport = { };
        int rc, vlan_id = 0;
 
        fiph = (struct fip_header *)skb->data;
@@ -2757,7 +2755,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
                goto drop;
        }
 
-       rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
+       rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
        if (rc) {
                LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
                goto drop;
@@ -2766,19 +2764,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
        mutex_lock(&fip->ctlr_mutex);
        switch (sub) {
        case FIP_SC_VN_PROBE_REQ:
-               fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
+               fcoe_ctlr_vn_probe_req(fip, &frport);
                break;
        case FIP_SC_VN_PROBE_REP:
-               fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
+               fcoe_ctlr_vn_probe_reply(fip, &frport);
                break;
        case FIP_SC_VN_CLAIM_NOTIFY:
-               fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
+               fcoe_ctlr_vn_claim_notify(fip, &frport);
                break;
        case FIP_SC_VN_CLAIM_REP:
-               fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
+               fcoe_ctlr_vn_claim_resp(fip, &frport);
                break;
        case FIP_SC_VN_BEACON:
-               fcoe_ctlr_vn_beacon(fip, &buf.rdata);
+               fcoe_ctlr_vn_beacon(fip, &frport);
                break;
        default:
                LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
@@ -2802,22 +2800,18 @@ drop:
  */
 static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
                              struct sk_buff *skb,
-                             struct fc_rport_priv *rdata)
+                             struct fcoe_rport *frport)
 {
        struct fip_header *fiph;
        struct fip_desc *desc = NULL;
        struct fip_mac_desc *macd = NULL;
        struct fip_wwn_desc *wwn = NULL;
-       struct fcoe_rport *frport;
        size_t rlen;
        size_t dlen;
        u32 desc_mask = 0;
        u32 dtype;
        u8 sub;
 
-       memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
-       frport = fcoe_ctlr_rport(rdata);
-
        fiph = (struct fip_header *)skb->data;
        frport->flags = ntohs(fiph->fip_flags);
 
@@ -2871,7 +2865,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
                        if (dlen != sizeof(struct fip_wwn_desc))
                                goto len_err;
                        wwn = (struct fip_wwn_desc *)desc;
-                       rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+                       frport->rdata.ids.node_name =
+                               get_unaligned_be64(&wwn->fd_wwn);
                        break;
                default:
                        LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
@@ -2957,13 +2952,13 @@ static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip,
 /**
  * fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification.
  * @fip: The FCoE controller
+ * @frport: The newly-parsed FCoE rport from the Discovery Request
  *
  * Called with ctlr_mutex held.
  */
 static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip,
-                                     struct fc_rport_priv *rdata)
+                                     struct fcoe_rport *frport)
 {
-       struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
        enum fip_vlan_subcode sub = FIP_SC_VL_NOTE;
 
        if (fip->mode == FIP_MODE_VN2VN)
@@ -2982,22 +2977,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
        struct fip_header *fiph;
        enum fip_vlan_subcode sub;
-       struct {
-               struct fc_rport_priv rdata;
-               struct fcoe_rport frport;
-       } buf;
+       struct fcoe_rport frport = { };
        int rc;
 
        fiph = (struct fip_header *)skb->data;
        sub = fiph->fip_subcode;
-       rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata);
+       rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
        if (rc) {
                LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
                goto drop;
        }
        mutex_lock(&fip->ctlr_mutex);
        if (sub == FIP_SC_VL_REQ)
-               fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata);
+               fcoe_ctlr_vlan_disc_reply(fip, &frport);
        mutex_unlock(&fip->ctlr_mutex);
 
 drop:
index eaf6177ac9ee1059868a0295bf59da2ec90fb2bc..1bb6aada93fab71e26f0e6c44569ae470a8c9e8b 100644 (file)
@@ -2334,6 +2334,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
        case IOACCEL2_SERV_RESPONSE_COMPLETE:
                switch (c2->error_data.status) {
                case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+                       if (cmd)
+                               cmd->result = 0;
                        break;
                case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
                        cmd->result |= SAM_STAT_CHECK_CONDITION;
@@ -2483,8 +2485,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
 
        /* check for good status */
        if (likely(c2->error_data.serv_response == 0 &&
-                       c2->error_data.status == 0))
+                       c2->error_data.status == 0)) {
+               cmd->result = 0;
                return hpsa_cmd_free_and_done(h, c, cmd);
+       }
 
        /*
         * Any RAID offload error results in retry which will use
@@ -5653,6 +5657,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
        if (c == NULL)
                return SCSI_MLQUEUE_DEVICE_BUSY;
 
+       /*
+        * This is necessary because the SML doesn't zero out this field during
+        * error recovery.
+        */
+       cmd->result = 0;
+
        /*
         * Call alternate submit routine for I/O accelerated commands.
         * Retries always go down the normal I/O path.
@@ -6081,8 +6091,6 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
                if (idx != h->last_collision_tag) { /* Print once per tag */
                        dev_warn(&h->pdev->dev,
                                "%s: tag collision (tag=%d)\n", __func__, idx);
-                       if (c->scsi_cmd != NULL)
-                               scsi_print_command(c->scsi_cmd);
                        if (scmd)
                                scsi_print_command(scmd);
                        h->last_collision_tag = idx;
index e0f3852fdad1543e8d668f828d2555bc78afab3a..da6e97d8dc3bb862a0ad1edd6e613d6af497507a 100644 (file)
@@ -128,6 +128,7 @@ EXPORT_SYMBOL(fc_rport_lookup);
 struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
 {
        struct fc_rport_priv *rdata;
+       size_t rport_priv_size = sizeof(*rdata);
 
        lockdep_assert_held(&lport->disc.disc_mutex);
 
@@ -135,7 +136,9 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
        if (rdata)
                return rdata;
 
-       rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
+       if (lport->rport_priv_size > 0)
+               rport_priv_size = lport->rport_priv_size;
+       rdata = kzalloc(rport_priv_size, GFP_KERNEL);
        if (!rdata)
                return NULL;
 
index 684662888792f6ff0619fcc3c8a6c99a0909e42b..050c0f029ef9eb7c55902901ab667455b256cf42 100644 (file)
@@ -2703,6 +2703,8 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
 {
        u64 required_mask, coherent_mask;
        struct sysinfo s;
+       /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+       int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
 
        if (ioc->is_mcpu_endpoint)
                goto try_32bit;
@@ -2712,17 +2714,17 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
                goto try_32bit;
 
        if (ioc->dma_mask)
-               coherent_mask = DMA_BIT_MASK(64);
+               coherent_mask = DMA_BIT_MASK(dma_mask);
        else
                coherent_mask = DMA_BIT_MASK(32);
 
-       if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
+       if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
            dma_set_coherent_mask(&pdev->dev, coherent_mask))
                goto try_32bit;
 
        ioc->base_add_sg_single = &_base_add_sg_single_64;
        ioc->sge_size = sizeof(Mpi2SGESimple64_t);
-       ioc->dma_mask = 64;
+       ioc->dma_mask = dma_mask;
        goto out;
 
  try_32bit:
@@ -2744,7 +2746,7 @@ static int
 _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
                                      struct pci_dev *pdev)
 {
-       if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+       if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
                if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
                        return -ENODEV;
        }
@@ -4989,7 +4991,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
                total_sz += sz;
        } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
 
-       if (ioc->dma_mask == 64) {
+       if (ioc->dma_mask > 32) {
                if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
                        ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
                                 pci_name(ioc->pdev));
index 4059655639d909e8799fe991a6f4d0961820215c..da83034d47592cbbbb52b51f54b952b3158c7451 100644 (file)
@@ -4877,7 +4877,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
                ql_log(ql_log_warn, vha, 0xd049,
                    "Failed to allocate ct_sns request.\n");
                kfree(fcport);
-               fcport = NULL;
+               return NULL;
        }
 
        INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
index 8192963329138210cde91919aaa184e9ee217a31..42a8c2a13ab1b1fc5b90f3f6b43f69572fa242ac 100644 (file)
@@ -96,7 +96,7 @@ struct vhost_uaddr {
 };
 
 #if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0
-#define VHOST_ARCH_CAN_ACCEL_UACCESS 1
+#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
 #else
 #define VHOST_ARCH_CAN_ACCEL_UACCESS 0
 #endif
index 4c339c7e66e50d79312e00eee4c82492be5accab..a446a7221e13e9ac564ddf446e7fce84f8587420 100644 (file)
@@ -1143,7 +1143,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
                goto out_put_map;
 
        if (!use_ptemod) {
-               err = vm_map_pages(vma, map->pages, map->count);
+               err = vm_map_pages_zero(vma, map->pages, map->count);
                if (err)
                        goto out_put_map;
        } else {
index 2f5ce7230a43e88a1a6ffde1a528a370b8ea8595..c6070e70dd73d6229f522aa3cd890e830589a04d 100644 (file)
@@ -724,25 +724,6 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
        return 0;
 }
 
-struct remap_pfn {
-       struct mm_struct *mm;
-       struct page **pages;
-       pgprot_t prot;
-       unsigned long i;
-};
-
-static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
-{
-       struct remap_pfn *r = data;
-       struct page *page = r->pages[r->i];
-       pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
-
-       set_pte_at(r->mm, addr, ptep, pte);
-       r->i++;
-
-       return 0;
-}
-
 static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
 {
        struct privcmd_data *data = file->private_data;
@@ -774,7 +755,8 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
                goto out;
        }
 
-       if (xen_feature(XENFEAT_auto_translated_physmap)) {
+       if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
+           xen_feature(XENFEAT_auto_translated_physmap)) {
                unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
                struct page **pages;
                unsigned int i;
@@ -808,16 +790,9 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
        if (rc)
                goto out;
 
-       if (xen_feature(XENFEAT_auto_translated_physmap)) {
-               struct remap_pfn r = {
-                       .mm = vma->vm_mm,
-                       .pages = vma->vm_private_data,
-                       .prot = vma->vm_page_prot,
-               };
-
-               rc = apply_to_page_range(r.mm, kdata.addr,
-                                        kdata.num << PAGE_SHIFT,
-                                        remap_pfn_fn, &r);
+       if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
+           xen_feature(XENFEAT_auto_translated_physmap)) {
+               rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
        } else {
                unsigned int domid =
                        (xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
index cfbe46785a3b32ed76dca278fd3b2b93f1a41a3e..ae1df496bf384bdb3d8f9c37769326f4c95f01cb 100644 (file)
@@ -83,34 +83,18 @@ static inline dma_addr_t xen_virt_to_bus(void *address)
        return xen_phys_to_bus(virt_to_phys(address));
 }
 
-static int check_pages_physically_contiguous(unsigned long xen_pfn,
-                                            unsigned int offset,
-                                            size_t length)
+static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
 {
-       unsigned long next_bfn;
-       int i;
-       int nr_pages;
+       unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
+       unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
 
        next_bfn = pfn_to_bfn(xen_pfn);
-       nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
 
-       for (i = 1; i < nr_pages; i++) {
+       for (i = 1; i < nr_pages; i++)
                if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
-                       return 0;
-       }
-       return 1;
-}
+                       return 1;
 
-static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
-{
-       unsigned long xen_pfn = XEN_PFN_DOWN(p);
-       unsigned int offset = p & ~XEN_PAGE_MASK;
-
-       if (offset + size <= XEN_PAGE_SIZE)
-               return 0;
-       if (check_pages_physically_contiguous(xen_pfn, offset, size))
-               return 0;
-       return 1;
+       return 0;
 }
 
 static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
@@ -338,6 +322,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                        xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
                        return NULL;
                }
+               SetPageXenRemapped(virt_to_page(ret));
        }
        memset(ret, 0, size);
        return ret;
@@ -361,8 +346,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
        /* Convert the size to actually allocated. */
        size = 1UL << (order + XEN_PAGE_SHIFT);
 
-       if (((dev_addr + size - 1 <= dma_mask)) ||
-           range_straddles_page_boundary(phys, size))
+       if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
+                    range_straddles_page_boundary(phys, size)) &&
+           TestClearPageXenRemapped(virt_to_page(vaddr)))
                xen_destroy_contiguous_region(phys, order);
 
        xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
index 73427d8e01161ae55fa561599927bb68536bdbd0..e5694133ebe57f37950b134c26c3fbc2a231a221 100644 (file)
@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
 {
        int err;
        u16 old_value;
-       pci_power_t new_state, old_state;
+       pci_power_t new_state;
 
        err = pci_read_config_word(dev, offset, &old_value);
        if (err)
                goto out;
 
-       old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
        new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
 
        new_value &= PM_OK_BITS;
index ba883a80b3c04db30fa08f8f21eba9f569c02598..7b1077f0abcb0839d89a299de2a9d9d60760943a 100644 (file)
@@ -262,3 +262,35 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
        return 0;
 }
 EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
+
+struct remap_pfn {
+       struct mm_struct *mm;
+       struct page **pages;
+       pgprot_t prot;
+       unsigned long i;
+};
+
+static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
+{
+       struct remap_pfn *r = data;
+       struct page *page = r->pages[r->i];
+       pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
+
+       set_pte_at(r->mm, addr, ptep, pte);
+       r->i++;
+
+       return 0;
+}
+
+/* Used by the privcmd module, but has to be built-in on ARM */
+int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
+{
+       struct remap_pfn r = {
+               .mm = vma->vm_mm,
+               .pages = vma->vm_private_data,
+               .prot = vma->vm_page_prot,
+       };
+
+       return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r);
+}
+EXPORT_SYMBOL_GPL(xen_remap_vma_range);
index c2a85b587922d9eacf8c539d00ad2b7efff26a3f..a6f7c892cb4a390f9d6357263cfc17fa8c2db9aa 100644 (file)
@@ -439,6 +439,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
                                        ret = -EAGAIN;
                                goto error;
                        }
+                       ret = dio->size;
 
                        if (polled)
                                WRITE_ONCE(iocb->ki_cookie, qc);
@@ -465,7 +466,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
                                ret = -EAGAIN;
                        goto error;
                }
-               ret += bio->bi_iter.bi_size;
+               ret = dio->size;
 
                bio = bio_alloc(gfp, nr_pages);
                if (!bio) {
@@ -1181,8 +1182,7 @@ static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
  * Pointer to the block device containing @bdev on success, ERR_PTR()
  * value on failure.
  */
-static struct block_device *bd_start_claiming(struct block_device *bdev,
-                                             void *holder)
+struct block_device *bd_start_claiming(struct block_device *bdev, void *holder)
 {
        struct gendisk *disk;
        struct block_device *whole;
@@ -1229,6 +1229,62 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
                return ERR_PTR(err);
        }
 }
+EXPORT_SYMBOL(bd_start_claiming);
+
+static void bd_clear_claiming(struct block_device *whole, void *holder)
+{
+       lockdep_assert_held(&bdev_lock);
+       /* tell others that we're done */
+       BUG_ON(whole->bd_claiming != holder);
+       whole->bd_claiming = NULL;
+       wake_up_bit(&whole->bd_claiming, 0);
+}
+
+/**
+ * bd_finish_claiming - finish claiming of a block device
+ * @bdev: block device of interest
+ * @whole: whole block device (returned from bd_start_claiming())
+ * @holder: holder that has claimed @bdev
+ *
+ * Finish exclusive open of a block device. Mark the device as exlusively
+ * open by the holder and wake up all waiters for exclusive open to finish.
+ */
+void bd_finish_claiming(struct block_device *bdev, struct block_device *whole,
+                       void *holder)
+{
+       spin_lock(&bdev_lock);
+       BUG_ON(!bd_may_claim(bdev, whole, holder));
+       /*
+        * Note that for a whole device bd_holders will be incremented twice,
+        * and bd_holder will be set to bd_may_claim before being set to holder
+        */
+       whole->bd_holders++;
+       whole->bd_holder = bd_may_claim;
+       bdev->bd_holders++;
+       bdev->bd_holder = holder;
+       bd_clear_claiming(whole, holder);
+       spin_unlock(&bdev_lock);
+}
+EXPORT_SYMBOL(bd_finish_claiming);
+
+/**
+ * bd_abort_claiming - abort claiming of a block device
+ * @bdev: block device of interest
+ * @whole: whole block device (returned from bd_start_claiming())
+ * @holder: holder that has claimed @bdev
+ *
+ * Abort claiming of a block device when the exclusive open failed. This can be
+ * also used when exclusive open is not actually desired and we just needed
+ * to block other exclusive openers for a while.
+ */
+void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
+                      void *holder)
+{
+       spin_lock(&bdev_lock);
+       bd_clear_claiming(whole, holder);
+       spin_unlock(&bdev_lock);
+}
+EXPORT_SYMBOL(bd_abort_claiming);
 
 #ifdef CONFIG_SYSFS
 struct bd_holder_disk {
@@ -1698,29 +1754,7 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
 
                /* finish claiming */
                mutex_lock(&bdev->bd_mutex);
-               spin_lock(&bdev_lock);
-
-               if (!res) {
-                       BUG_ON(!bd_may_claim(bdev, whole, holder));
-                       /*
-                        * Note that for a whole device bd_holders
-                        * will be incremented twice, and bd_holder
-                        * will be set to bd_may_claim before being
-                        * set to holder
-                        */
-                       whole->bd_holders++;
-                       whole->bd_holder = bd_may_claim;
-                       bdev->bd_holders++;
-                       bdev->bd_holder = holder;
-               }
-
-               /* tell others that we're done */
-               BUG_ON(whole->bd_claiming != holder);
-               whole->bd_claiming = NULL;
-               wake_up_bit(&whole->bd_claiming, 0);
-
-               spin_unlock(&bdev_lock);
-
+               bd_finish_claiming(bdev, whole, holder);
                /*
                 * Block event polling for write claims if requested.  Any
                 * write holder makes the write_holder state stick until
index 89116afda7a20ff48f41e95a5f6e0ba8ff3da8d6..e5d85311d5d5d4eadd857fda99a99bd881074b38 100644 (file)
@@ -1483,7 +1483,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
        ulist_init(roots);
        ulist_init(tmp);
 
-       trans = btrfs_attach_transaction(root);
+       trans = btrfs_join_transaction_nostart(root);
        if (IS_ERR(trans)) {
                if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
                        ret = PTR_ERR(trans);
index 69b59bf75882eff54415e03ef08943db946ef087..c3c0c064c25dadd317841e1d2761e2629b6ec9f3 100644 (file)
@@ -6322,68 +6322,21 @@ static int changed_extent(struct send_ctx *sctx,
 {
        int ret = 0;
 
-       if (sctx->cur_ino != sctx->cmp_key->objectid) {
-
-               if (result == BTRFS_COMPARE_TREE_CHANGED) {
-                       struct extent_buffer *leaf_l;
-                       struct extent_buffer *leaf_r;
-                       struct btrfs_file_extent_item *ei_l;
-                       struct btrfs_file_extent_item *ei_r;
-
-                       leaf_l = sctx->left_path->nodes[0];
-                       leaf_r = sctx->right_path->nodes[0];
-                       ei_l = btrfs_item_ptr(leaf_l,
-                                             sctx->left_path->slots[0],
-                                             struct btrfs_file_extent_item);
-                       ei_r = btrfs_item_ptr(leaf_r,
-                                             sctx->right_path->slots[0],
-                                             struct btrfs_file_extent_item);
-
-                       /*
-                        * We may have found an extent item that has changed
-                        * only its disk_bytenr field and the corresponding
-                        * inode item was not updated. This case happens due to
-                        * very specific timings during relocation when a leaf
-                        * that contains file extent items is COWed while
-                        * relocation is ongoing and its in the stage where it
-                        * updates data pointers. So when this happens we can
-                        * safely ignore it since we know it's the same extent,
-                        * but just at different logical and physical locations
-                        * (when an extent is fully replaced with a new one, we
-                        * know the generation number must have changed too,
-                        * since snapshot creation implies committing the current
-                        * transaction, and the inode item must have been updated
-                        * as well).
-                        * This replacement of the disk_bytenr happens at
-                        * relocation.c:replace_file_extents() through
-                        * relocation.c:btrfs_reloc_cow_block().
-                        */
-                       if (btrfs_file_extent_generation(leaf_l, ei_l) ==
-                           btrfs_file_extent_generation(leaf_r, ei_r) &&
-                           btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
-                           btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
-                           btrfs_file_extent_compression(leaf_l, ei_l) ==
-                           btrfs_file_extent_compression(leaf_r, ei_r) &&
-                           btrfs_file_extent_encryption(leaf_l, ei_l) ==
-                           btrfs_file_extent_encryption(leaf_r, ei_r) &&
-                           btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
-                           btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
-                           btrfs_file_extent_type(leaf_l, ei_l) ==
-                           btrfs_file_extent_type(leaf_r, ei_r) &&
-                           btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
-                           btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
-                           btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
-                           btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
-                           btrfs_file_extent_offset(leaf_l, ei_l) ==
-                           btrfs_file_extent_offset(leaf_r, ei_r) &&
-                           btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
-                           btrfs_file_extent_num_bytes(leaf_r, ei_r))
-                               return 0;
-               }
-
-               inconsistent_snapshot_error(sctx, result, "extent");
-               return -EIO;
-       }
+       /*
+        * We have found an extent item that changed without the inode item
+        * having changed. This can happen either after relocation (where the
+        * disk_bytenr of an extent item is replaced at
+        * relocation.c:replace_file_extents()) or after deduplication into a
+        * file in both the parent and send snapshots (where an extent item can
+        * get modified or replaced with a new one). Note that deduplication
+        * updates the inode item, but it only changes the iversion (sequence
+        * field in the inode item) of the inode, so if a file is deduplicated
+        * the same amount of times in both the parent and send snapshots, its
+        * iversion becames the same in both snapshots, whence the inode item is
+        * the same on both snapshots.
+        */
+       if (sctx->cur_ino != sctx->cmp_key->objectid)
+               return 0;
 
        if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
                if (result != BTRFS_COMPARE_TREE_DELETED)
index 3b8ae1a8f02df77769542a3c0a0782f452c8c791..e3adb714c04b364869e9da5c36e2f9fa0157b7fb 100644 (file)
@@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
        [TRANS_STATE_COMMIT_START]      = (__TRANS_START | __TRANS_ATTACH),
        [TRANS_STATE_COMMIT_DOING]      = (__TRANS_START |
                                           __TRANS_ATTACH |
-                                          __TRANS_JOIN),
+                                          __TRANS_JOIN |
+                                          __TRANS_JOIN_NOSTART),
        [TRANS_STATE_UNBLOCKED]         = (__TRANS_START |
                                           __TRANS_ATTACH |
                                           __TRANS_JOIN |
-                                          __TRANS_JOIN_NOLOCK),
+                                          __TRANS_JOIN_NOLOCK |
+                                          __TRANS_JOIN_NOSTART),
        [TRANS_STATE_COMPLETED]         = (__TRANS_START |
                                           __TRANS_ATTACH |
                                           __TRANS_JOIN |
-                                          __TRANS_JOIN_NOLOCK),
+                                          __TRANS_JOIN_NOLOCK |
+                                          __TRANS_JOIN_NOSTART),
 };
 
 void btrfs_put_transaction(struct btrfs_transaction *transaction)
@@ -543,7 +546,8 @@ again:
                ret = join_transaction(fs_info, type);
                if (ret == -EBUSY) {
                        wait_current_trans(fs_info);
-                       if (unlikely(type == TRANS_ATTACH))
+                       if (unlikely(type == TRANS_ATTACH ||
+                                    type == TRANS_JOIN_NOSTART))
                                ret = -ENOENT;
                }
        } while (ret == -EBUSY);
@@ -659,6 +663,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root
                                 BTRFS_RESERVE_NO_FLUSH, true);
 }
 
+/*
+ * Similar to regular join but it never starts a transaction when none is
+ * running or after waiting for the current one to finish.
+ */
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
+{
+       return start_transaction(root, 0, TRANS_JOIN_NOSTART,
+                                BTRFS_RESERVE_NO_FLUSH, true);
+}
+
 /*
  * btrfs_attach_transaction() - catch the running transaction
  *
@@ -2037,6 +2051,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
                }
        } else {
                spin_unlock(&fs_info->trans_lock);
+               /*
+                * The previous transaction was aborted and was already removed
+                * from the list of transactions at fs_info->trans_list. So we
+                * abort to prevent writing a new superblock that reflects a
+                * corrupt state (pointing to trees with unwritten nodes/leafs).
+                */
+               if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
+                       ret = -EROFS;
+                       goto cleanup_transaction;
+               }
        }
 
        extwriter_counter_dec(cur_trans, trans->type);
index 527ea94b57d91a6c289cb4b4b67f6a07ecb85934..2c5a6f6e5bb0904eecef07941f9ad76f3ba2f036 100644 (file)
@@ -94,11 +94,13 @@ struct btrfs_transaction {
 #define __TRANS_JOIN           (1U << 11)
 #define __TRANS_JOIN_NOLOCK    (1U << 12)
 #define __TRANS_DUMMY          (1U << 13)
+#define __TRANS_JOIN_NOSTART   (1U << 14)
 
 #define TRANS_START            (__TRANS_START | __TRANS_FREEZABLE)
 #define TRANS_ATTACH           (__TRANS_ATTACH)
 #define TRANS_JOIN             (__TRANS_JOIN | __TRANS_FREEZABLE)
 #define TRANS_JOIN_NOLOCK      (__TRANS_JOIN_NOLOCK)
+#define TRANS_JOIN_NOSTART     (__TRANS_JOIN_NOSTART)
 
 #define TRANS_EXTWRITERS       (__TRANS_START | __TRANS_ATTACH)
 
@@ -183,6 +185,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
                                        int min_factor);
 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
 struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
                                        struct btrfs_root *root);
index e42e17e55bfd5512bee600f6bd435e66e3f90147..b1ea7dfbd1494bd8d990ae03ee1bc21277dbd636 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/stat.h>
 #include <linux/fcntl.h>
 #include <linux/swap.h>
+#include <linux/ctype.h>
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/pagemap.h>
@@ -187,11 +188,13 @@ put_exe_file:
  * name into corename, which must have space for at least
  * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
  */
-static int format_corename(struct core_name *cn, struct coredump_params *cprm)
+static int format_corename(struct core_name *cn, struct coredump_params *cprm,
+                          size_t **argv, int *argc)
 {
        const struct cred *cred = current_cred();
        const char *pat_ptr = core_pattern;
        int ispipe = (*pat_ptr == '|');
+       bool was_space = false;
        int pid_in_pattern = 0;
        int err = 0;
 
@@ -201,12 +204,35 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
                return -ENOMEM;
        cn->corename[0] = '\0';
 
-       if (ispipe)
+       if (ispipe) {
+               int argvs = sizeof(core_pattern) / 2;
+               (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
+               if (!(*argv))
+                       return -ENOMEM;
+               (*argv)[(*argc)++] = 0;
                ++pat_ptr;
+       }
 
        /* Repeat as long as we have more pattern to process and more output
           space */
        while (*pat_ptr) {
+               /*
+                * Split on spaces before doing template expansion so that
+                * %e and %E don't get split if they have spaces in them
+                */
+               if (ispipe) {
+                       if (isspace(*pat_ptr)) {
+                               was_space = true;
+                               pat_ptr++;
+                               continue;
+                       } else if (was_space) {
+                               was_space = false;
+                               err = cn_printf(cn, "%c", '\0');
+                               if (err)
+                                       return err;
+                               (*argv)[(*argc)++] = cn->used;
+                       }
+               }
                if (*pat_ptr != '%') {
                        err = cn_printf(cn, "%c", *pat_ptr++);
                } else {
@@ -546,6 +572,8 @@ void do_coredump(const kernel_siginfo_t *siginfo)
        struct cred *cred;
        int retval = 0;
        int ispipe;
+       size_t *argv = NULL;
+       int argc = 0;
        struct files_struct *displaced;
        /* require nonrelative corefile path and be extra careful */
        bool need_suid_safe = false;
@@ -592,9 +620,10 @@ void do_coredump(const kernel_siginfo_t *siginfo)
 
        old_cred = override_creds(cred);
 
-       ispipe = format_corename(&cn, &cprm);
+       ispipe = format_corename(&cn, &cprm, &argv, &argc);
 
        if (ispipe) {
+               int argi;
                int dump_count;
                char **helper_argv;
                struct subprocess_info *sub_info;
@@ -637,12 +666,16 @@ void do_coredump(const kernel_siginfo_t *siginfo)
                        goto fail_dropcount;
                }
 
-               helper_argv = argv_split(GFP_KERNEL, cn.corename, NULL);
+               helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
+                                           GFP_KERNEL);
                if (!helper_argv) {
                        printk(KERN_WARNING "%s failed to allocate memory\n",
                               __func__);
                        goto fail_dropcount;
                }
+               for (argi = 0; argi < argc; argi++)
+                       helper_argv[argi] = cn.corename + argv[argi];
+               helper_argv[argi] = NULL;
 
                retval = -ENOMEM;
                sub_info = call_usermodehelper_setup(helper_argv[0],
@@ -652,7 +685,7 @@ void do_coredump(const kernel_siginfo_t *siginfo)
                        retval = call_usermodehelper_exec(sub_info,
                                                          UMH_WAIT_EXEC);
 
-               argv_free(helper_argv);
+               kfree(helper_argv);
                if (retval) {
                        printk(KERN_INFO "Core dump to |%s pipe failed\n",
                               cn.corename);
@@ -766,6 +799,7 @@ fail_dropcount:
        if (ispipe)
                atomic_dec(&core_dump_count);
 fail_unlock:
+       kfree(argv);
        kfree(cn.corename);
        coredump_finish(mm, core_dumped);
        revert_creds(old_cred);
index a237141d8787166eddf9701210ed811ffb77138e..b64964ef44f62b8ad3a261396d16809d2667e274 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -266,7 +266,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
 static void put_unlocked_entry(struct xa_state *xas, void *entry)
 {
        /* If we were the only waiter woken, wake the next one */
-       if (entry && dax_is_conflict(entry))
+       if (entry && !dax_is_conflict(entry))
                dax_wake_entry(xas, entry, false);
 }
 
index f8d46df8fa9ee5cd91813b2de5e735e39a2ad4c5..3e58a6f697dd8ca73218c36304083e332d5e4c80 100644 (file)
@@ -1653,19 +1653,12 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
 static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
 {
        struct f2fs_inode_info *fi = F2FS_I(inode);
-       u32 oldflags;
 
        /* Is it quota file? Do not allow user to mess with it */
        if (IS_NOQUOTA(inode))
                return -EPERM;
 
-       oldflags = fi->i_flags;
-
-       if ((iflags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL))
-               if (!capable(CAP_LINUX_IMMUTABLE))
-                       return -EPERM;
-
-       fi->i_flags = iflags | (oldflags & ~mask);
+       fi->i_flags = iflags | (fi->i_flags & ~mask);
 
        if (fi->i_flags & F2FS_PROJINHERIT_FL)
                set_inode_flag(inode, FI_PROJ_INHERIT);
@@ -1770,7 +1763,8 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
 static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
 {
        struct inode *inode = file_inode(filp);
-       u32 fsflags;
+       struct f2fs_inode_info *fi = F2FS_I(inode);
+       u32 fsflags, old_fsflags;
        u32 iflags;
        int ret;
 
@@ -1794,8 +1788,14 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
 
        inode_lock(inode);
 
+       old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
+       ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
+       if (ret)
+               goto out;
+
        ret = f2fs_setflags_common(inode, iflags,
                        f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
+out:
        inode_unlock(inode);
        mnt_drop_write_file(filp);
        return ret;
@@ -2855,52 +2855,32 @@ static inline u32 f2fs_xflags_to_iflags(u32 xflags)
        return iflags;
 }
 
-static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
+static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
 {
-       struct inode *inode = file_inode(filp);
        struct f2fs_inode_info *fi = F2FS_I(inode);
-       struct fsxattr fa;
 
-       memset(&fa, 0, sizeof(struct fsxattr));
-       fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags);
+       simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
 
        if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
-               fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
-                                                       fi->i_projid);
-
-       if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
-               return -EFAULT;
-       return 0;
+               fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
 }
 
-static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
+static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
 {
-       /*
-        * Project Quota ID state is only allowed to change from within the init
-        * namespace. Enforce that restriction only if we are trying to change
-        * the quota ID state. Everything else is allowed in user namespaces.
-        */
-       if (current_user_ns() == &init_user_ns)
-               return 0;
+       struct inode *inode = file_inode(filp);
+       struct fsxattr fa;
 
-       if (__kprojid_val(F2FS_I(inode)->i_projid) != fa->fsx_projid)
-               return -EINVAL;
-
-       if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) {
-               if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
-                       return -EINVAL;
-       } else {
-               if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
-                       return -EINVAL;
-       }
+       f2fs_fill_fsxattr(inode, &fa);
 
+       if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
+               return -EFAULT;
        return 0;
 }
 
 static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
 {
        struct inode *inode = file_inode(filp);
-       struct fsxattr fa;
+       struct fsxattr fa, old_fa;
        u32 iflags;
        int err;
 
@@ -2923,9 +2903,12 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
                return err;
 
        inode_lock(inode);
-       err = f2fs_ioctl_check_project(inode, &fa);
+
+       f2fs_fill_fsxattr(inode, &old_fa);
+       err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
        if (err)
                goto out;
+
        err = f2fs_setflags_common(inode, iflags,
                        f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
        if (err)
index 6691f526fa400b18aed0f632c6bee9106f10ce15..8974672db78f39ef929f2bb61230054ae38471e4 100644 (file)
@@ -796,6 +796,29 @@ static int move_data_block(struct inode *inode, block_t bidx,
        if (lfs_mode)
                down_write(&fio.sbi->io_order_lock);
 
+       mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
+                                       fio.old_blkaddr, false);
+       if (!mpage)
+               goto up_out;
+
+       fio.encrypted_page = mpage;
+
+       /* read source block in mpage */
+       if (!PageUptodate(mpage)) {
+               err = f2fs_submit_page_bio(&fio);
+               if (err) {
+                       f2fs_put_page(mpage, 1);
+                       goto up_out;
+               }
+               lock_page(mpage);
+               if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
+                                               !PageUptodate(mpage))) {
+                       err = -EIO;
+                       f2fs_put_page(mpage, 1);
+                       goto up_out;
+               }
+       }
+
        f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
                                        &sum, CURSEG_COLD_DATA, NULL, false);
 
@@ -803,44 +826,18 @@ static int move_data_block(struct inode *inode, block_t bidx,
                                newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
        if (!fio.encrypted_page) {
                err = -ENOMEM;
-               goto recover_block;
-       }
-
-       mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
-                                       fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
-       if (mpage) {
-               bool updated = false;
-
-               if (PageUptodate(mpage)) {
-                       memcpy(page_address(fio.encrypted_page),
-                                       page_address(mpage), PAGE_SIZE);
-                       updated = true;
-               }
                f2fs_put_page(mpage, 1);
-               invalidate_mapping_pages(META_MAPPING(fio.sbi),
-                                       fio.old_blkaddr, fio.old_blkaddr);
-               if (updated)
-                       goto write_page;
-       }
-
-       err = f2fs_submit_page_bio(&fio);
-       if (err)
-               goto put_page_out;
-
-       /* write page */
-       lock_page(fio.encrypted_page);
-
-       if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
-               err = -EIO;
-               goto put_page_out;
-       }
-       if (unlikely(!PageUptodate(fio.encrypted_page))) {
-               err = -EIO;
-               goto put_page_out;
+               goto recover_block;
        }
 
-write_page:
+       /* write target block */
        f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
+       memcpy(page_address(fio.encrypted_page),
+                               page_address(mpage), PAGE_SIZE);
+       f2fs_put_page(mpage, 1);
+       invalidate_mapping_pages(META_MAPPING(fio.sbi),
+                               fio.old_blkaddr, fio.old_blkaddr);
+
        set_page_dirty(fio.encrypted_page);
        if (clear_page_dirty_for_io(fio.encrypted_page))
                dec_page_count(fio.sbi, F2FS_DIRTY_META);
@@ -871,11 +868,12 @@ write_page:
 put_page_out:
        f2fs_put_page(fio.encrypted_page, 1);
 recover_block:
-       if (lfs_mode)
-               up_write(&fio.sbi->io_order_lock);
        if (err)
                f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
                                                                true, true);
+up_out:
+       if (lfs_mode)
+               up_write(&fio.sbi->io_order_lock);
 put_out:
        f2fs_put_dnode(&dn);
 out:
index 6de6cda440315d68b4055f0c428a6ed302624b30..78a1b873e48ade9259c2592bd263fb86f22fdf16 100644 (file)
@@ -2422,6 +2422,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
        size_t crc_offset = 0;
        __u32 crc = 0;
 
+       if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
+               f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
+                         F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
+               return -EINVAL;
+       }
+
        /* Check checksum_offset and crc in superblock */
        if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
                crc_offset = le32_to_cpu(raw_super->checksum_offset);
@@ -2429,26 +2435,20 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                        offsetof(struct f2fs_super_block, crc)) {
                        f2fs_info(sbi, "Invalid SB checksum offset: %zu",
                                  crc_offset);
-                       return 1;
+                       return -EFSCORRUPTED;
                }
                crc = le32_to_cpu(raw_super->crc);
                if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
                        f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
-                       return 1;
+                       return -EFSCORRUPTED;
                }
        }
 
-       if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
-               f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
-                         F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
-               return 1;
-       }
-
        /* Currently, support only 4KB page cache size */
        if (F2FS_BLKSIZE != PAGE_SIZE) {
                f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB",
                          PAGE_SIZE);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        /* Currently, support only 4KB block size */
@@ -2456,14 +2456,14 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
        if (blocksize != F2FS_BLKSIZE) {
                f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
                          blocksize);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        /* check log blocks per segment */
        if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
                f2fs_info(sbi, "Invalid log blocks per segment (%u)",
                          le32_to_cpu(raw_super->log_blocks_per_seg));
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        /* Currently, support 512/1024/2048/4096 bytes sector size */
@@ -2473,7 +2473,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                                F2FS_MIN_LOG_SECTOR_SIZE) {
                f2fs_info(sbi, "Invalid log sectorsize (%u)",
                          le32_to_cpu(raw_super->log_sectorsize));
-               return 1;
+               return -EFSCORRUPTED;
        }
        if (le32_to_cpu(raw_super->log_sectors_per_block) +
                le32_to_cpu(raw_super->log_sectorsize) !=
@@ -2481,7 +2481,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
                          le32_to_cpu(raw_super->log_sectors_per_block),
                          le32_to_cpu(raw_super->log_sectorsize));
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        segment_count = le32_to_cpu(raw_super->segment_count);
@@ -2495,7 +2495,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
        if (segment_count > F2FS_MAX_SEGMENT ||
                                segment_count < F2FS_MIN_SEGMENTS) {
                f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        if (total_sections > segment_count ||
@@ -2503,25 +2503,25 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                        segs_per_sec > segment_count || !segs_per_sec) {
                f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
                          segment_count, total_sections, segs_per_sec);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        if ((segment_count / segs_per_sec) < total_sections) {
                f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
                          segment_count, segs_per_sec, total_sections);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
                f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
                          segment_count, le64_to_cpu(raw_super->block_count));
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        if (secs_per_zone > total_sections || !secs_per_zone) {
                f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
                          secs_per_zone, total_sections);
-               return 1;
+               return -EFSCORRUPTED;
        }
        if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
                        raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
@@ -2531,7 +2531,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                          le32_to_cpu(raw_super->extension_count),
                          raw_super->hot_ext_count,
                          F2FS_MAX_EXTENSION);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        if (le32_to_cpu(raw_super->cp_payload) >
@@ -2539,7 +2539,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                f2fs_info(sbi, "Insane cp_payload (%u > %u)",
                          le32_to_cpu(raw_super->cp_payload),
                          blocks_per_seg - F2FS_CP_PACKS);
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        /* check reserved ino info */
@@ -2550,12 +2550,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                          le32_to_cpu(raw_super->node_ino),
                          le32_to_cpu(raw_super->meta_ino),
                          le32_to_cpu(raw_super->root_ino));
-               return 1;
+               return -EFSCORRUPTED;
        }
 
        /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
        if (sanity_check_area_boundary(sbi, bh))
-               return 1;
+               return -EFSCORRUPTED;
 
        return 0;
 }
@@ -2870,10 +2870,10 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
                }
 
                /* sanity checking of raw super */
-               if (sanity_check_raw_super(sbi, bh)) {
+               err = sanity_check_raw_super(sbi, bh);
+               if (err) {
                        f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
                                 block + 1);
-                       err = -EFSCORRUPTED;
                        brelse(bh);
                        continue;
                }
index 79581b9bdebb357a62cfd0d821b4de0d74349b03..4df26ef2b2b15689bcfa058b8997bc0da3f99a67 100644 (file)
@@ -1002,11 +1002,16 @@ static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
                                 unsigned copied, struct page *page,
                                 struct iomap *iomap)
 {
+       struct gfs2_trans *tr = current->journal_info;
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
 
        if (page && !gfs2_is_stuffed(ip))
                gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
+
+       if (tr->tr_num_buf_new)
+               __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+
        gfs2_trans_end(sdp);
 }
 
@@ -1099,8 +1104,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
                tr = current->journal_info;
                if (tr->tr_num_buf_new)
                        __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
-               else
-                       gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[0]);
 
                gfs2_trans_end(sdp);
        }
@@ -1181,10 +1184,16 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
 
        if (ip->i_qadata && ip->i_qadata->qa_qd_num)
                gfs2_quota_unlock(ip);
+
+       if (unlikely(!written))
+               goto out_unlock;
+
        if (iomap->flags & IOMAP_F_SIZE_CHANGED)
                mark_inode_dirty(inode);
-       gfs2_write_unlock(inode);
+       set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
 
+out_unlock:
+       gfs2_write_unlock(inode);
 out:
        return 0;
 }
index 012bc0efb9d3cba3241c5fe71883106f5616b092..d542f1cf4428ed79af62c76273d32885490f72ff 100644 (file)
@@ -1838,6 +1838,7 @@ restart:
        do {
                struct sqe_submit *s = &req->submit;
                const struct io_uring_sqe *sqe = s->sqe;
+               unsigned int flags = req->flags;
 
                /* Ensure we clear previously set non-block flag */
                req->rw.ki_flags &= ~IOCB_NOWAIT;
@@ -1883,7 +1884,7 @@ restart:
                kfree(sqe);
 
                /* req from defer and link list needn't decrease async cnt */
-               if (req->flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
+               if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
                        goto out;
 
                if (!async_list)
index 385f3aaa244809b0d442d746fb4e76bd391d7bb2..90c830e3758e2dea9af508efed08b48a8509553f 100644 (file)
@@ -3825,7 +3825,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
        u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
        int low_bucket = 0, bucket, high_bucket;
        struct ocfs2_xattr_bucket *search;
-       u32 last_hash;
        u64 blkno, lower_blkno = 0;
 
        search = ocfs2_xattr_bucket_new(inode);
@@ -3869,8 +3868,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
                if (xh->xh_count)
                        xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
 
-               last_hash = le32_to_cpu(xe->xe_name_hash);
-
                /* record lower_blkno which may be the insert place. */
                lower_blkno = blkno;
 
index 113c58f194255e3fc57166467018d5eab565548a..5960578a40760a26bc0ecd88e28177ce748f4380 100644 (file)
@@ -478,13 +478,10 @@ EXPORT_SYMBOL(generic_shutdown_super);
 
 bool mount_capable(struct fs_context *fc)
 {
-       struct user_namespace *user_ns = fc->global ? &init_user_ns
-                                                   : fc->user_ns;
-
        if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
                return capable(CAP_SYS_ADMIN);
        else
-               return ns_capable(user_ns, CAP_SYS_ADMIN);
+               return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
 }
 
 /**
index 94c4f1de1922f31ea5f9ffe395792fe3a918c531..77ff9f97bcda53747c0d79e4f82e9337d475fc2d 100644 (file)
@@ -278,7 +278,11 @@ xchk_da_btree_block_check_sibling(
        /* Compare upper level pointer to sibling pointer. */
        if (ds->state->altpath.blk[level].blkno != sibling)
                xchk_da_set_corrupt(ds, level);
-       xfs_trans_brelse(ds->dargs.trans, ds->state->altpath.blk[level].bp);
+       if (ds->state->altpath.blk[level].bp) {
+               xfs_trans_brelse(ds->dargs.trans,
+                               ds->state->altpath.blk[level].bp);
+               ds->state->altpath.blk[level].bp = NULL;
+       }
 out:
        return error;
 }
index a8a06bb78ea8e3c942d3d6c45f25f5999ff0a6c3..f5c955d35be4342467815f45ce49b02a89813931 100644 (file)
@@ -272,6 +272,7 @@ xfs_bulkstat_to_bstat(
        struct xfs_bstat                *bs1,
        const struct xfs_bulkstat       *bstat)
 {
+       /* memset is needed here because of padding holes in the structure. */
        memset(bs1, 0, sizeof(struct xfs_bstat));
        bs1->bs_ino = bstat->bs_ino;
        bs1->bs_mode = bstat->bs_mode;
@@ -388,6 +389,8 @@ xfs_inumbers_to_inogrp(
        struct xfs_inogrp               *ig1,
        const struct xfs_inumbers       *ig)
 {
+       /* memset is needed here because of padding holes in the structure. */
+       memset(ig1, 0, sizeof(struct xfs_inogrp));
        ig1->xi_startino = ig->xi_startino;
        ig1->xi_alloccount = ig->xi_alloccount;
        ig1->xi_allocmask = ig->xi_allocmask;
index c64bea7a52bebd5c7332203e1e3a78bfa69b84c3..e9f20b813a699a3fb7630fe76aba3d1ab60c892e 100644 (file)
@@ -7,24 +7,6 @@
 #include <linux/compiler.h>
 #include <linux/log2.h>
 
-/*
- * Runtime evaluation of get_order()
- */
-static inline __attribute_const__
-int __get_order(unsigned long size)
-{
-       int order;
-
-       size--;
-       size >>= PAGE_SHIFT;
-#if BITS_PER_LONG == 32
-       order = fls(size);
-#else
-       order = fls64(size);
-#endif
-       return order;
-}
-
 /**
  * get_order - Determine the allocation order of a memory size
  * @size: The size for which to get the order
@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
  * to hold an object of the specified size.
  *
  * The result is undefined if the size is 0.
- *
- * This function may be used to initialise variables with compile time
- * evaluations of constants.
  */
-#define get_order(n)                                           \
-(                                                              \
-       __builtin_constant_p(n) ? (                             \
-               ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT :     \
-               (((n) < (1UL << PAGE_SHIFT)) ? 0 :              \
-                ilog2((n) - 1) - PAGE_SHIFT + 1)               \
-       ) :                                                     \
-       __get_order(n)                                          \
-)
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+       if (__builtin_constant_p(size)) {
+               if (!size)
+                       return BITS_PER_LONG - PAGE_SHIFT;
+
+               if (size < (1UL << PAGE_SHIFT))
+                       return 0;
+
+               return ilog2((size) - 1) - PAGE_SHIFT + 1;
+       }
+
+       size--;
+       size >>= PAGE_SHIFT;
+#if BITS_PER_LONG == 32
+       return fls(size);
+#else
+       return fls64(size);
+#endif
+}
 
 #endif /* __ASSEMBLY__ */
 
index 72d51d1e9dd9ea33ea83b7c6fcdbdc0cc1f250ef..5cf2c5dd8b1e63d19a52f61fd8393133f73af25b 100644 (file)
@@ -149,6 +149,8 @@ struct drm_client_buffer {
 struct drm_client_buffer *
 drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
 void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
+void *drm_client_buffer_vmap(struct drm_client_buffer *buffer);
+void drm_client_buffer_vunmap(struct drm_client_buffer *buffer);
 
 int drm_client_modeset_create(struct drm_client_dev *client);
 void drm_client_modeset_free(struct drm_client_dev *client);
index 759d462d028bb7d32459c6e86e6e48564bb809a2..f57eea0481e0fd18a79d0c1178108619ae5e0a38 100644 (file)
@@ -852,6 +852,13 @@ struct drm_mode_config {
        /* dumb ioctl parameters */
        uint32_t preferred_depth, prefer_shadow;
 
+       /**
+        * @prefer_shadow_fbdev:
+        *
+        * Hint to framebuffer emulation to prefer shadow-fb rendering.
+        */
+       bool prefer_shadow_fbdev;
+
        /**
         * @quirk_addfb_prefer_xbgr_30bpp:
         *
index 3c096c7a51dc6105bbc90abab34f01786e36beca..853a8f1813947c4e745e0bc4b5c05a9d2197a650 100644 (file)
@@ -359,6 +359,7 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
 /**
  * devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
  * @dev: device for clock "consumer"
+ * @num_clks: the number of clk_bulk_data
  * @clks: pointer to the clk_bulk_data table of consumer
  *
  * Behaves the same as devm_clk_bulk_get() except where there is no clock
index 56b8e358af5c1e6c6e02663a6722909e0a13d70d..997a530ff4e9d038d1e705c582833f8f6817a436 100644 (file)
@@ -2598,6 +2598,12 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
                                               void *holder);
 extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
                                              void *holder);
+extern struct block_device *bd_start_claiming(struct block_device *bdev,
+                                             void *holder);
+extern void bd_finish_claiming(struct block_device *bdev,
+                              struct block_device *whole, void *holder);
+extern void bd_abort_claiming(struct block_device *bdev,
+                             struct block_device *whole, void *holder);
 extern void blkdev_put(struct block_device *bdev, fmode_t mode);
 extern int __blkdev_reread_part(struct block_device *bdev);
 extern int blkdev_reread_part(struct block_device *bdev);
index 9ddcf50a3c5924ad754d40029a3b0dd707ac556f..a7f08fb0f8653224ec2f87b11fad89d554b771c3 100644 (file)
@@ -247,7 +247,7 @@ static inline void gpiod_put(struct gpio_desc *desc)
        might_sleep();
 
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 
 static inline void devm_gpiod_unhinge(struct device *dev,
@@ -256,7 +256,7 @@ static inline void devm_gpiod_unhinge(struct device *dev,
        might_sleep();
 
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 
 static inline void gpiod_put_array(struct gpio_descs *descs)
@@ -264,7 +264,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
        might_sleep();
 
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(descs);
 }
 
 static inline struct gpio_desc *__must_check
@@ -317,7 +317,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
        might_sleep();
 
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 
 static inline void devm_gpiod_put_array(struct device *dev,
@@ -326,32 +326,32 @@ static inline void devm_gpiod_put_array(struct device *dev,
        might_sleep();
 
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(descs);
 }
 
 
 static inline int gpiod_get_direction(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 static inline int gpiod_direction_input(struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 
@@ -359,7 +359,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
 static inline int gpiod_get_value(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 static inline int gpiod_get_array_value(unsigned int array_size,
@@ -368,13 +368,13 @@ static inline int gpiod_get_array_value(unsigned int array_size,
                                        unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline void gpiod_set_value(struct gpio_desc *desc, int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 static inline int gpiod_set_array_value(unsigned int array_size,
                                        struct gpio_desc **desc_array,
@@ -382,13 +382,13 @@ static inline int gpiod_set_array_value(unsigned int array_size,
                                        unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 static inline int gpiod_get_raw_array_value(unsigned int array_size,
@@ -397,13 +397,13 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size,
                                            unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 static inline int gpiod_set_raw_array_value(unsigned int array_size,
                                            struct gpio_desc **desc_array,
@@ -411,14 +411,14 @@ static inline int gpiod_set_raw_array_value(unsigned int array_size,
                                            unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 
 static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
@@ -427,13 +427,13 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
                                     unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
                                            struct gpio_desc **desc_array,
@@ -441,13 +441,13 @@ static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
                                            unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
@@ -456,14 +456,14 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
                                               unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
                                                int value)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
 }
 static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
                                                struct gpio_desc **desc_array,
@@ -471,41 +471,41 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
                                                unsigned long *value_bitmap)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc_array);
        return 0;
 }
 
 static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 
 static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -ENOSYS;
 }
 
 static inline int gpiod_is_active_low(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 static inline int gpiod_cansleep(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return 0;
 }
 
 static inline int gpiod_to_irq(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -EINVAL;
 }
 
@@ -513,7 +513,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
                                          const char *name)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -EINVAL;
 }
 
@@ -525,7 +525,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
 static inline int desc_to_gpio(const struct gpio_desc *desc)
 {
        /* GPIO can never have been requested */
-       WARN_ON(1);
+       WARN_ON(desc);
        return -EINVAL;
 }
 
index b8a08b2a10ca058a192086ab3507d66ccbe984f1..7ef56dc18050a22a9e2968fc80d3f5bf991ae74c 100644 (file)
@@ -484,60 +484,6 @@ long hmm_range_dma_unmap(struct hmm_range *range,
  */
 #define HMM_RANGE_DEFAULT_TIMEOUT 1000
 
-/* This is a temporary helper to avoid merge conflict between trees. */
-static inline bool hmm_vma_range_done(struct hmm_range *range)
-{
-       bool ret = hmm_range_valid(range);
-
-       hmm_range_unregister(range);
-       return ret;
-}
-
-/* This is a temporary helper to avoid merge conflict between trees. */
-static inline int hmm_vma_fault(struct hmm_mirror *mirror,
-                               struct hmm_range *range, bool block)
-{
-       long ret;
-
-       /*
-        * With the old API the driver must set each individual entries with
-        * the requested flags (valid, write, ...). So here we set the mask to
-        * keep intact the entries provided by the driver and zero out the
-        * default_flags.
-        */
-       range->default_flags = 0;
-       range->pfn_flags_mask = -1UL;
-
-       ret = hmm_range_register(range, mirror,
-                                range->start, range->end,
-                                PAGE_SHIFT);
-       if (ret)
-               return (int)ret;
-
-       if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
-               /*
-                * The mmap_sem was taken by driver we release it here and
-                * returns -EAGAIN which correspond to mmap_sem have been
-                * drop in the old API.
-                */
-               up_read(&range->vma->vm_mm->mmap_sem);
-               return -EAGAIN;
-       }
-
-       ret = hmm_range_fault(range, block);
-       if (ret <= 0) {
-               if (ret == -EBUSY || !ret) {
-                       /* Same as above, drop mmap_sem to match old API. */
-                       up_read(&range->vma->vm_mm->mmap_sem);
-                       ret = -EBUSY;
-               } else if (ret == -EAGAIN)
-                       ret = -EBUSY;
-               hmm_range_unregister(range);
-               return ret;
-       }
-       return 0;
-}
-
 /* Below are for HMM internal use only! Not to be used by device driver! */
 static inline void hmm_mm_init(struct mm_struct *mm)
 {
index b2c1648f7e5d12e03c63c90f58dce9e504be08bb..5714fd35a83c43ca40c854ef5b1ddf8350da8aef 100644 (file)
@@ -814,6 +814,7 @@ struct tee_client_device_id {
 /**
  * struct wmi_device_id - WMI device identifier
  * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @context: pointer to driver specific data
  */
 struct wmi_device_id {
        const char guid_string[UUID_STRING_LEN+1];
index 1dda31825ec4ae78c8852f15288a80c4a651bcf3..71283739ffd239c5790dd57f408411451fa68220 100644 (file)
@@ -32,6 +32,7 @@
 
 #endif /* CONFIG_SPARSEMEM */
 
+#ifndef BUILD_VDSO32_64
 /*
  * page->flags layout:
  *
 #define LAST_CPUPID_SHIFT 0
 #endif
 
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#ifdef CONFIG_KASAN_SW_TAGS
+#define KASAN_TAG_WIDTH 8
+#else
+#define KASAN_TAG_WIDTH 0
+#endif
+
+#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
+       <= BITS_PER_LONG - NR_PAGEFLAGS
 #define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
 #else
 #define LAST_CPUPID_WIDTH 0
 #endif
 
-#ifdef CONFIG_KASAN_SW_TAGS
-#define KASAN_TAG_WIDTH 8
 #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
        > BITS_PER_LONG - NR_PAGEFLAGS
-#error "KASAN: not enough bits in page flags for tag"
-#endif
-#else
-#define KASAN_TAG_WIDTH 0
+#error "Not enough bits in page flags"
 #endif
 
 /*
 #define LAST_CPUPID_NOT_IN_PAGE_FLAGS
 #endif
 
+#endif
 #endif /* _LINUX_PAGE_FLAGS_LAYOUT */
index b848517da64c3945e4f0bfe2fc6210ce801f165b..f91cb8898ff0af60d751f7c026c7b9f852fe620b 100644 (file)
@@ -152,6 +152,8 @@ enum pageflags {
        PG_savepinned = PG_dirty,
        /* Has a grant mapping of another (foreign) domain's page. */
        PG_foreign = PG_owner_priv_1,
+       /* Remapped by swiotlb-xen. */
+       PG_xen_remapped = PG_owner_priv_1,
 
        /* SLOB */
        PG_slob_free = PG_private,
@@ -329,6 +331,8 @@ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
        TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
 PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
 PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
+PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
+       TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
 
 PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
        __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
index c5f8a9f1706374fece3b342a3b995d9e31ca18d5..4f225175cb91ef6e43a75b29e9b51084b31db413 100644 (file)
@@ -2647,7 +2647,9 @@ struct ib_client {
                        const union ib_gid *gid,
                        const struct sockaddr *addr,
                        void *client_data);
-       struct list_head list;
+
+       refcount_t uses;
+       struct completion uses_zero;
        u32 client_id;
 
        /* kverbs are not required by the client */
index 0eeea520a85315d12cf838b52f4fbc3d6502221e..e06c77d764634a783578987c2c5143b8c6aae657 100644 (file)
@@ -608,7 +608,7 @@ static inline void rvt_qp_wqe_reserve(
 /**
  * rvt_qp_wqe_unreserve - clean reserved operation
  * @qp - the rvt qp
- * @wqe - the send wqe
+ * @flags - send wqe flags
  *
  * This decrements the reserve use count.
  *
@@ -620,11 +620,9 @@ static inline void rvt_qp_wqe_reserve(
  * the compiler does not juggle the order of the s_last
  * ring index and the decrementing of s_reserved_used.
  */
-static inline void rvt_qp_wqe_unreserve(
-       struct rvt_qp *qp,
-       struct rvt_swqe *wqe)
+static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
 {
-       if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
+       if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
                atomic_dec(&qp->s_reserved_used);
                /* insure no compiler re-order up to s_last change */
                smp_mb__after_atomic();
@@ -853,6 +851,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp,
        u32 byte_len, last;
        int flags = wqe->wr.send_flags;
 
+       rvt_qp_wqe_unreserve(qp, flags);
        rvt_put_qp_swqe(qp, wqe);
 
        need_completion =
index 2d64b53f947cc1e58ef917725d07f4c2c96f3a7e..9b87e1a1c646a389870fa0aa75b047628e919688 100644 (file)
@@ -115,7 +115,7 @@ struct fc_disc_port {
        struct fc_lport    *lp;
        struct list_head   peers;
        struct work_struct rport_work;
-       u32                port_id;
+       u32                port_id;
 };
 
 /**
@@ -155,14 +155,14 @@ struct fc_rport_operations {
  */
 struct fc_rport_libfc_priv {
        struct fc_lport            *local_port;
-       enum fc_rport_state        rp_state;
+       enum fc_rport_state        rp_state;
        u16                        flags;
        #define FC_RP_FLAGS_REC_SUPPORTED       (1 << 0)
        #define FC_RP_FLAGS_RETRY               (1 << 1)
        #define FC_RP_STARTED                   (1 << 2)
        #define FC_RP_FLAGS_CONF_REQ            (1 << 3)
-       unsigned int               e_d_tov;
-       unsigned int               r_a_tov;
+       unsigned int               e_d_tov;
+       unsigned int               r_a_tov;
 };
 
 /**
@@ -191,24 +191,24 @@ struct fc_rport_priv {
        struct fc_lport             *local_port;
        struct fc_rport             *rport;
        struct kref                 kref;
-       enum fc_rport_state         rp_state;
+       enum fc_rport_state         rp_state;
        struct fc_rport_identifiers ids;
        u16                         flags;
-       u16                         max_seq;
+       u16                         max_seq;
        u16                         disc_id;
        u16                         maxframe_size;
-       unsigned int                retries;
-       unsigned int                major_retries;
-       unsigned int                e_d_tov;
-       unsigned int                r_a_tov;
-       struct mutex                rp_mutex;
+       unsigned int                retries;
+       unsigned int                major_retries;
+       unsigned int                e_d_tov;
+       unsigned int                r_a_tov;
+       struct mutex                rp_mutex;
        struct delayed_work         retry_work;
-       enum fc_rport_event         event;
+       enum fc_rport_event         event;
        struct fc_rport_operations  *ops;
-       struct list_head            peers;
-       struct work_struct          event_work;
+       struct list_head            peers;
+       struct work_struct          event_work;
        u32                         supported_classes;
-       u16                         prli_count;
+       u16                         prli_count;
        struct rcu_head             rcu;
        u16                         sp_features;
        u8                          spp_type;
@@ -618,12 +618,12 @@ struct libfc_function_template {
  * @disc_callback: Callback routine called when discovery completes
  */
 struct fc_disc {
-       unsigned char         retry_count;
-       unsigned char         pending;
-       unsigned char         requested;
-       unsigned short        seq_count;
-       unsigned char         buf_len;
-       u16                   disc_id;
+       unsigned char         retry_count;
+       unsigned char         pending;
+       unsigned char         requested;
+       unsigned short        seq_count;
+       unsigned char         buf_len;
+       u16                   disc_id;
 
        struct list_head      rports;
        void                  *priv;
@@ -697,7 +697,7 @@ struct fc_lport {
        struct fc_rport_priv           *ms_rdata;
        struct fc_rport_priv           *ptp_rdata;
        void                           *scsi_priv;
-       struct fc_disc                 disc;
+       struct fc_disc                 disc;
 
        /* Virtual port information */
        struct list_head               vports;
@@ -715,7 +715,7 @@ struct fc_lport {
        u8                             retry_count;
 
        /* Fabric information */
-       u32                            port_id;
+       u32                            port_id;
        u64                            wwpn;
        u64                            wwnn;
        unsigned int                   service_params;
@@ -743,11 +743,11 @@ struct fc_lport {
        struct fc_ns_fts               fcts;
 
        /* Miscellaneous */
-       struct mutex                   lp_mutex;
-       struct list_head               list;
+       struct mutex                   lp_mutex;
+       struct list_head               list;
        struct delayed_work            retry_work;
        void                           *prov[FC_FC4_PROV_SIZE];
-       struct list_head               lport_list;
+       struct list_head               lport_list;
 };
 
 /**
index dc14b52577f73cbaa1d558d001ab139859627f9e..2568cb0627ec0b388aa100391179993b603c2784 100644 (file)
@@ -229,6 +229,7 @@ struct fcoe_fcf {
  * @vn_mac:    VN_Node assigned MAC address for data
  */
 struct fcoe_rport {
+       struct fc_rport_priv rdata;
        unsigned long time;
        u16 fcoe_len;
        u16 flags;
index 2212adda8f77f7d8cb44b0bdb0b22445b1fcb87d..64e92d56c6a8fab3be92894700fea09942b206ab 100644 (file)
@@ -2,7 +2,7 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM dma_fence
 
-#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_TRACE_DMA_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
 #define _TRACE_DMA_FENCE_H
 
 #include <linux/tracepoint.h>
index f3a12566bed057143cbc966530f5df525043a67e..6678cf8b235b826aa0c9f5abbe7a5c5b09024ee8 100644 (file)
@@ -3,7 +3,7 @@
 #define TRACE_SYSTEM napi
 
 #if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_NAPI_H_
+#define _TRACE_NAPI_H
 
 #include <linux/netdevice.h>
 #include <linux/tracepoint.h>
@@ -38,7 +38,7 @@ TRACE_EVENT(napi_poll,
 
 #undef NO_DEV
 
-#endif /* _TRACE_NAPI_H_ */
+#endif /* _TRACE_NAPI_H */
 
 /* This part must be outside protection */
 #include <trace/define_trace.h>
index 60d0d8bd336d08705f98468a87b9a31696c83941..0d1a9ebf55ba4415537a345867948a4d34b536c6 100644 (file)
@@ -2,7 +2,7 @@
 #define TRACE_SYSTEM qdisc
 
 #if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_QDISC_H_
+#define _TRACE_QDISC_H
 
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
@@ -44,7 +44,7 @@ TRACE_EVENT(qdisc_dequeue,
                  __entry->txq_state, __entry->packets, __entry->skbaddr )
 );
 
-#endif /* _TRACE_QDISC_H_ */
+#endif /* _TRACE_QDISC_H */
 
 /* This part must be outside protection */
 #include <trace/define_trace.h>
index 0818f628611095ee10c3b873bbed81355f0f2f7b..971cd02d2dafe760107880c72f16db44ba733863 100644 (file)
@@ -1,5 +1,5 @@
 #if !defined(_TRACE_TEGRA_APB_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_TEGRA_APM_DMA_H
+#define _TRACE_TEGRA_APB_DMA_H
 
 #include <linux/tracepoint.h>
 #include <linux/dmaengine.h>
@@ -55,7 +55,7 @@ TRACE_EVENT(tegra_dma_isr,
        TP_printk("%s: irq %d\n",  __get_str(chan), __entry->irq)
 );
 
-#endif /*  _TRACE_TEGRADMA_H */
+#endif /* _TRACE_TEGRA_APB_DMA_H */
 
 /* This part must be outside protection */
 #include <trace/define_trace.h>
index ba1b460c9944df383b505ed230a8e611483ace75..237e36a280cb636fe053676438ddb80c3383af48 100644 (file)
@@ -1,8 +1,8 @@
 /* SPDX-License-Identifier: BSD-3-Clause */
 /*
- * Virtio-iommu definition v0.9
+ * Virtio-iommu definition v0.12
  *
- * Copyright (C) 2018 Arm Ltd.
+ * Copyright (C) 2019 Arm Ltd.
  */
 #ifndef _UAPI_LINUX_VIRTIO_IOMMU_H
 #define _UAPI_LINUX_VIRTIO_IOMMU_H
 
 /* Feature bits */
 #define VIRTIO_IOMMU_F_INPUT_RANGE             0
-#define VIRTIO_IOMMU_F_DOMAIN_BITS             1
+#define VIRTIO_IOMMU_F_DOMAIN_RANGE            1
 #define VIRTIO_IOMMU_F_MAP_UNMAP               2
 #define VIRTIO_IOMMU_F_BYPASS                  3
 #define VIRTIO_IOMMU_F_PROBE                   4
+#define VIRTIO_IOMMU_F_MMIO                    5
 
-struct virtio_iommu_range {
-       __u64                                   start;
-       __u64                                   end;
+struct virtio_iommu_range_64 {
+       __le64                                  start;
+       __le64                                  end;
+};
+
+struct virtio_iommu_range_32 {
+       __le32                                  start;
+       __le32                                  end;
 };
 
 struct virtio_iommu_config {
        /* Supported page sizes */
-       __u64                                   page_size_mask;
+       __le64                                  page_size_mask;
        /* Supported IOVA range */
-       struct virtio_iommu_range               input_range;
+       struct virtio_iommu_range_64            input_range;
        /* Max domain ID size */
-       __u8                                    domain_bits;
-       __u8                                    padding[3];
+       struct virtio_iommu_range_32            domain_range;
        /* Probe buffer size */
-       __u32                                   probe_size;
+       __le32                                  probe_size;
 };
 
 /* Request types */
@@ -49,6 +54,7 @@ struct virtio_iommu_config {
 #define VIRTIO_IOMMU_S_RANGE                   0x05
 #define VIRTIO_IOMMU_S_NOENT                   0x06
 #define VIRTIO_IOMMU_S_FAULT                   0x07
+#define VIRTIO_IOMMU_S_NOMEM                   0x08
 
 struct virtio_iommu_req_head {
        __u8                                    type;
@@ -78,12 +84,10 @@ struct virtio_iommu_req_detach {
 
 #define VIRTIO_IOMMU_MAP_F_READ                        (1 << 0)
 #define VIRTIO_IOMMU_MAP_F_WRITE               (1 << 1)
-#define VIRTIO_IOMMU_MAP_F_EXEC                        (1 << 2)
-#define VIRTIO_IOMMU_MAP_F_MMIO                        (1 << 3)
+#define VIRTIO_IOMMU_MAP_F_MMIO                        (1 << 2)
 
 #define VIRTIO_IOMMU_MAP_F_MASK                        (VIRTIO_IOMMU_MAP_F_READ |      \
                                                 VIRTIO_IOMMU_MAP_F_WRITE |     \
-                                                VIRTIO_IOMMU_MAP_F_EXEC |      \
                                                 VIRTIO_IOMMU_MAP_F_MMIO)
 
 struct virtio_iommu_req_map {
index 4969817124a8d7c6b462aeb18f54105d1d49e2e3..98b30c1613b28031c27a35990510a2bafc0697c4 100644 (file)
@@ -109,6 +109,9 @@ static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
 }
 #endif
 
+int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long len);
+
 /*
  * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
  * @vma:     VMA to map the pages into
index a8d923b5481ba91d3cb15209eda7838fe1793ead..ef0d95a190b4179848c52041f2d1cfb6b9fbff77 100644 (file)
@@ -111,7 +111,6 @@ obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o
 obj-$(CONFIG_TORTURE_TEST) += torture.o
 
 obj-$(CONFIG_HAS_IOMEM) += iomem.o
-obj-$(CONFIG_ZONE_DEVICE) += memremap.o
 obj-$(CONFIG_RSEQ) += rseq.o
 
 obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o
index bfc0c17f2a3d411fcdee446f8aeeb0c1be0f681e..2bd410f934b3241179a4162a9817ecef0132759c 100644 (file)
@@ -243,8 +243,9 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
 
        /* CMA can be used only in the context which permits sleeping */
        if (cma && gfpflags_allow_blocking(gfp)) {
-               align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
-               page = cma_alloc(cma, count, align, gfp & __GFP_NOWARN);
+               size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
+
+               page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
        }
 
        /* Fallback allocation of normal pages */
@@ -266,7 +267,8 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
  */
 void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
 {
-       if (!cma_release(dev_get_cma_area(dev), page, size >> PAGE_SHIFT))
+       if (!cma_release(dev_get_cma_area(dev), page,
+                        PAGE_ALIGN(size) >> PAGE_SHIFT))
                __free_pages(page, get_order(size));
 }
 
index 1f628e7ac7097ab40e577904a91f26fce28610b8..b945239621d86255d6a259a9a2911c9a393e0b11 100644 (file)
@@ -116,11 +116,16 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
        int ret;
 
        if (!dev_is_dma_coherent(dev)) {
+               unsigned long pfn;
+
                if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
                        return -ENXIO;
 
-               page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr,
-                               dma_addr));
+               /* If the PFN is not valid, we do not have a struct page */
+               pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
+               if (!pfn_valid(pfn))
+                       return -ENXIO;
+               page = pfn_to_page(pfn);
        } else {
                page = virt_to_page(cpu_addr);
        }
@@ -170,7 +175,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
        if (!dev_is_dma_coherent(dev)) {
                if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
                        return -ENXIO;
+
+               /* If the PFN is not valid, we do not have a struct page */
                pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
+               if (!pfn_valid(pfn))
+                       return -ENXIO;
        } else {
                pfn = page_to_pfn(virt_to_page(cpu_addr));
        }
index 4436158a6d30bfb3ad7ed968e1257fdc19d76bd4..5b4a5dcce8f8328dd173ead148f4dc6a5fa0d8c4 100644 (file)
@@ -734,9 +734,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
                autoreap = true;
        }
 
-       tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
-       if (tsk->exit_state == EXIT_DEAD)
+       if (autoreap) {
+               tsk->exit_state = EXIT_DEAD;
                list_add(&tsk->ptrace_entry, &dead);
+       }
 
        /* mt-exec, de_thread() is waiting for group leader */
        if (unlikely(tsk->signal->notify_count < 0))
diff --git a/kernel/memremap.c b/kernel/memremap.c
deleted file mode 100644 (file)
index 6ee03a8..0000000
+++ /dev/null
@@ -1,405 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/kasan.h>
-#include <linux/memory_hotplug.h>
-#include <linux/mm.h>
-#include <linux/pfn_t.h>
-#include <linux/swap.h>
-#include <linux/swapops.h>
-#include <linux/types.h>
-#include <linux/wait_bit.h>
-#include <linux/xarray.h>
-
-static DEFINE_XARRAY(pgmap_array);
-#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
-#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
-
-#ifdef CONFIG_DEV_PAGEMAP_OPS
-DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
-EXPORT_SYMBOL(devmap_managed_key);
-static atomic_t devmap_managed_enable;
-
-static void devmap_managed_enable_put(void *data)
-{
-       if (atomic_dec_and_test(&devmap_managed_enable))
-               static_branch_disable(&devmap_managed_key);
-}
-
-static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
-{
-       if (!pgmap->ops || !pgmap->ops->page_free) {
-               WARN(1, "Missing page_free method\n");
-               return -EINVAL;
-       }
-
-       if (atomic_inc_return(&devmap_managed_enable) == 1)
-               static_branch_enable(&devmap_managed_key);
-       return devm_add_action_or_reset(dev, devmap_managed_enable_put, NULL);
-}
-#else
-static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
-{
-       return -EINVAL;
-}
-#endif /* CONFIG_DEV_PAGEMAP_OPS */
-
-static void pgmap_array_delete(struct resource *res)
-{
-       xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
-                       NULL, GFP_KERNEL);
-       synchronize_rcu();
-}
-
-static unsigned long pfn_first(struct dev_pagemap *pgmap)
-{
-       return PHYS_PFN(pgmap->res.start) +
-               vmem_altmap_offset(pgmap_altmap(pgmap));
-}
-
-static unsigned long pfn_end(struct dev_pagemap *pgmap)
-{
-       const struct resource *res = &pgmap->res;
-
-       return (res->start + resource_size(res)) >> PAGE_SHIFT;
-}
-
-static unsigned long pfn_next(unsigned long pfn)
-{
-       if (pfn % 1024 == 0)
-               cond_resched();
-       return pfn + 1;
-}
-
-#define for_each_device_pfn(pfn, map) \
-       for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
-
-static void dev_pagemap_kill(struct dev_pagemap *pgmap)
-{
-       if (pgmap->ops && pgmap->ops->kill)
-               pgmap->ops->kill(pgmap);
-       else
-               percpu_ref_kill(pgmap->ref);
-}
-
-static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
-{
-       if (pgmap->ops && pgmap->ops->cleanup) {
-               pgmap->ops->cleanup(pgmap);
-       } else {
-               wait_for_completion(&pgmap->done);
-               percpu_ref_exit(pgmap->ref);
-       }
-}
-
-static void devm_memremap_pages_release(void *data)
-{
-       struct dev_pagemap *pgmap = data;
-       struct device *dev = pgmap->dev;
-       struct resource *res = &pgmap->res;
-       unsigned long pfn;
-       int nid;
-
-       dev_pagemap_kill(pgmap);
-       for_each_device_pfn(pfn, pgmap)
-               put_page(pfn_to_page(pfn));
-       dev_pagemap_cleanup(pgmap);
-
-       /* pages are dead and unused, undo the arch mapping */
-       nid = page_to_nid(pfn_to_page(PHYS_PFN(res->start)));
-
-       mem_hotplug_begin();
-       if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
-               pfn = PHYS_PFN(res->start);
-               __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
-                                PHYS_PFN(resource_size(res)), NULL);
-       } else {
-               arch_remove_memory(nid, res->start, resource_size(res),
-                               pgmap_altmap(pgmap));
-               kasan_remove_zero_shadow(__va(res->start), resource_size(res));
-       }
-       mem_hotplug_done();
-
-       untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
-       pgmap_array_delete(res);
-       dev_WARN_ONCE(dev, pgmap->altmap.alloc,
-                     "%s: failed to free all reserved pages\n", __func__);
-}
-
-static void dev_pagemap_percpu_release(struct percpu_ref *ref)
-{
-       struct dev_pagemap *pgmap =
-               container_of(ref, struct dev_pagemap, internal_ref);
-
-       complete(&pgmap->done);
-}
-
-/**
- * devm_memremap_pages - remap and provide memmap backing for the given resource
- * @dev: hosting device for @res
- * @pgmap: pointer to a struct dev_pagemap
- *
- * Notes:
- * 1/ At a minimum the res and type members of @pgmap must be initialized
- *    by the caller before passing it to this function
- *
- * 2/ The altmap field may optionally be initialized, in which case
- *    PGMAP_ALTMAP_VALID must be set in pgmap->flags.
- *
- * 3/ The ref field may optionally be provided, in which pgmap->ref must be
- *    'live' on entry and will be killed and reaped at
- *    devm_memremap_pages_release() time, or if this routine fails.
- *
- * 4/ res is expected to be a host memory range that could feasibly be
- *    treated as a "System RAM" range, i.e. not a device mmio range, but
- *    this is not enforced.
- */
-void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
-{
-       struct resource *res = &pgmap->res;
-       struct dev_pagemap *conflict_pgmap;
-       struct mhp_restrictions restrictions = {
-               /*
-                * We do not want any optional features only our own memmap
-                */
-               .altmap = pgmap_altmap(pgmap),
-       };
-       pgprot_t pgprot = PAGE_KERNEL;
-       int error, nid, is_ram;
-       bool need_devmap_managed = true;
-
-       switch (pgmap->type) {
-       case MEMORY_DEVICE_PRIVATE:
-               if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
-                       WARN(1, "Device private memory not supported\n");
-                       return ERR_PTR(-EINVAL);
-               }
-               if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
-                       WARN(1, "Missing migrate_to_ram method\n");
-                       return ERR_PTR(-EINVAL);
-               }
-               break;
-       case MEMORY_DEVICE_FS_DAX:
-               if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
-                   IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
-                       WARN(1, "File system DAX not supported\n");
-                       return ERR_PTR(-EINVAL);
-               }
-               break;
-       case MEMORY_DEVICE_DEVDAX:
-       case MEMORY_DEVICE_PCI_P2PDMA:
-               need_devmap_managed = false;
-               break;
-       default:
-               WARN(1, "Invalid pgmap type %d\n", pgmap->type);
-               break;
-       }
-
-       if (!pgmap->ref) {
-               if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
-                       return ERR_PTR(-EINVAL);
-
-               init_completion(&pgmap->done);
-               error = percpu_ref_init(&pgmap->internal_ref,
-                               dev_pagemap_percpu_release, 0, GFP_KERNEL);
-               if (error)
-                       return ERR_PTR(error);
-               pgmap->ref = &pgmap->internal_ref;
-       } else {
-               if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
-                       WARN(1, "Missing reference count teardown definition\n");
-                       return ERR_PTR(-EINVAL);
-               }
-       }
-
-       if (need_devmap_managed) {
-               error = devmap_managed_enable_get(dev, pgmap);
-               if (error)
-                       return ERR_PTR(error);
-       }
-
-       conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL);
-       if (conflict_pgmap) {
-               dev_WARN(dev, "Conflicting mapping in same section\n");
-               put_dev_pagemap(conflict_pgmap);
-               error = -ENOMEM;
-               goto err_array;
-       }
-
-       conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL);
-       if (conflict_pgmap) {
-               dev_WARN(dev, "Conflicting mapping in same section\n");
-               put_dev_pagemap(conflict_pgmap);
-               error = -ENOMEM;
-               goto err_array;
-       }
-
-       is_ram = region_intersects(res->start, resource_size(res),
-               IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
-
-       if (is_ram != REGION_DISJOINT) {
-               WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
-                               is_ram == REGION_MIXED ? "mixed" : "ram", res);
-               error = -ENXIO;
-               goto err_array;
-       }
-
-       pgmap->dev = dev;
-
-       error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
-                               PHYS_PFN(res->end), pgmap, GFP_KERNEL));
-       if (error)
-               goto err_array;
-
-       nid = dev_to_node(dev);
-       if (nid < 0)
-               nid = numa_mem_id();
-
-       error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0,
-                       resource_size(res));
-       if (error)
-               goto err_pfn_remap;
-
-       mem_hotplug_begin();
-
-       /*
-        * For device private memory we call add_pages() as we only need to
-        * allocate and initialize struct page for the device memory. More-
-        * over the device memory is un-accessible thus we do not want to
-        * create a linear mapping for the memory like arch_add_memory()
-        * would do.
-        *
-        * For all other device memory types, which are accessible by
-        * the CPU, we do want the linear mapping and thus use
-        * arch_add_memory().
-        */
-       if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
-               error = add_pages(nid, PHYS_PFN(res->start),
-                               PHYS_PFN(resource_size(res)), &restrictions);
-       } else {
-               error = kasan_add_zero_shadow(__va(res->start), resource_size(res));
-               if (error) {
-                       mem_hotplug_done();
-                       goto err_kasan;
-               }
-
-               error = arch_add_memory(nid, res->start, resource_size(res),
-                                       &restrictions);
-       }
-
-       if (!error) {
-               struct zone *zone;
-
-               zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
-               move_pfn_range_to_zone(zone, PHYS_PFN(res->start),
-                               PHYS_PFN(resource_size(res)), restrictions.altmap);
-       }
-
-       mem_hotplug_done();
-       if (error)
-               goto err_add_memory;
-
-       /*
-        * Initialization of the pages has been deferred until now in order
-        * to allow us to do the work while not holding the hotplug lock.
-        */
-       memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
-                               PHYS_PFN(res->start),
-                               PHYS_PFN(resource_size(res)), pgmap);
-       percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
-
-       error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
-                       pgmap);
-       if (error)
-               return ERR_PTR(error);
-
-       return __va(res->start);
-
- err_add_memory:
-       kasan_remove_zero_shadow(__va(res->start), resource_size(res));
- err_kasan:
-       untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
- err_pfn_remap:
-       pgmap_array_delete(res);
- err_array:
-       dev_pagemap_kill(pgmap);
-       dev_pagemap_cleanup(pgmap);
-       return ERR_PTR(error);
-}
-EXPORT_SYMBOL_GPL(devm_memremap_pages);
-
-void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
-{
-       devm_release_action(dev, devm_memremap_pages_release, pgmap);
-}
-EXPORT_SYMBOL_GPL(devm_memunmap_pages);
-
-unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
-{
-       /* number of pfns from base where pfn_to_page() is valid */
-       if (altmap)
-               return altmap->reserve + altmap->free;
-       return 0;
-}
-
-void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
-{
-       altmap->alloc -= nr_pfns;
-}
-
-/**
- * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
- * @pfn: page frame number to lookup page_map
- * @pgmap: optional known pgmap that already has a reference
- *
- * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
- * is non-NULL but does not cover @pfn the reference to it will be released.
- */
-struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
-               struct dev_pagemap *pgmap)
-{
-       resource_size_t phys = PFN_PHYS(pfn);
-
-       /*
-        * In the cached case we're already holding a live reference.
-        */
-       if (pgmap) {
-               if (phys >= pgmap->res.start && phys <= pgmap->res.end)
-                       return pgmap;
-               put_dev_pagemap(pgmap);
-       }
-
-       /* fall back to slow path lookup */
-       rcu_read_lock();
-       pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
-       if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
-               pgmap = NULL;
-       rcu_read_unlock();
-
-       return pgmap;
-}
-EXPORT_SYMBOL_GPL(get_dev_pagemap);
-
-#ifdef CONFIG_DEV_PAGEMAP_OPS
-void __put_devmap_managed_page(struct page *page)
-{
-       int count = page_ref_dec_return(page);
-
-       /*
-        * If refcount is 1 then page is freed and refcount is stable as nobody
-        * holds a reference on the page.
-        */
-       if (count == 1) {
-               /* Clear Active bit in case of parallel mark_page_accessed */
-               __ClearPageActive(page);
-               __ClearPageWaiters(page);
-
-               mem_cgroup_uncharge(page);
-
-               page->pgmap->ops->page_free(page);
-       } else if (!count)
-               __put_page(page);
-}
-EXPORT_SYMBOL(__put_devmap_managed_page);
-#endif /* CONFIG_DEV_PAGEMAP_OPS */
index 91b789dd6e722ef96f8cf4440eb8f5bbc857ac60..e667be6907d71333c44546cd43ebfdde6c9eab3c 100644 (file)
@@ -349,7 +349,7 @@ void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
  * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
  * Group stop states are cleared and the group stop count is consumed if
  * %JOBCTL_STOP_CONSUME was set.  If the consumption completes the group
- * stop, the appropriate %SIGNAL_* flags are set.
+ * stop, the appropriate `SIGNAL_*` flags are set.
  *
  * CONTEXT:
  * Must be called with @task->sighand->siglock held.
@@ -1885,6 +1885,7 @@ static void do_notify_pidfd(struct task_struct *task)
 {
        struct pid *pid;
 
+       WARN_ON(task->exit_state == 0);
        pid = task_pid(task);
        wake_up_all(&pid->wait_pidfd);
 }
index 69ebf3c2f1b5dd2e369232068f9074e0c48b4b48..78af97163147bf4d4fd3a7f5de85e2034fbd4c17 100644 (file)
@@ -137,6 +137,13 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
        if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
                return 0;
 
+       /*
+        * Do not trace a function if it's filtered by set_graph_notrace.
+        * Make the index of ret stack negative to indicate that it should
+        * ignore further functions.  But it needs its own ret stack entry
+        * to recover the original index in order to continue tracing after
+        * returning from the function.
+        */
        if (ftrace_graph_notrace_addr(trace->func)) {
                trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
                /*
@@ -155,16 +162,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
        if (ftrace_graph_ignore_irqs())
                return 0;
 
-       /*
-        * Do not trace a function if it's filtered by set_graph_notrace.
-        * Make the index of ret stack negative to indicate that it should
-        * ignore further functions.  But it needs its own ret stack entry
-        * to recover the original index in order to continue tracing after
-        * returning from the function.
-        */
-       if (ftrace_graph_notrace_addr(trace->func))
-               return 1;
-
        /*
         * Stop here if tracing_threshold is set. We only write function return
         * events to the ring buffer.
index 4fafba1a923b67a2650a0265025e247829d3f9c6..7fa97a8b571778a1619940ab10a9a4bc9e96f823 100644 (file)
@@ -106,7 +106,6 @@ endchoice
 
 config KASAN_STACK_ENABLE
        bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
-       default !(CLANG_VERSION < 90000)
        depends on KASAN
        help
          The LLVM stack address sanitizer has a know problem that
@@ -115,11 +114,11 @@ config KASAN_STACK_ENABLE
          Disabling asan-stack makes it safe to run kernels build
          with clang-8 with KASAN enabled, though it loses some of
          the functionality.
-         This feature is always disabled when compile-testing with clang-8
-         or earlier to avoid cluttering the output in stack overflow
-         warnings, but clang-8 users can still enable it for builds without
-         CONFIG_COMPILE_TEST.  On gcc and later clang versions it is
-         assumed to always be safe to use and enabled by default.
+         This feature is always disabled when compile-testing with clang
+         to avoid cluttering the output in stack overflow warnings,
+         but clang users can still enable it for builds without
+         CONFIG_COMPILE_TEST.  On gcc it is assumed to always be safe
+         to use and enabled by default.
 
 config KASAN_STACK
        int
index 095601ce371dabd7a7e5b3501c61ac4b6516d91b..29c02a924973afefc9a95cac47a6dfbcced66993 100644 (file)
@@ -279,7 +279,8 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
 obj-$(CONFIG_UBSAN) += ubsan.o
 
 UBSAN_SANITIZE_ubsan.o := n
-CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+KASAN_SANITIZE_ubsan.o := n
+CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
 
 obj-$(CONFIG_SBITMAP) += sbitmap.o
 
index 62d19f270cad4c75b9582abb5397e987c0aaca1c..9729f271d15041ac1930eb85922b81ba094a574f 100644 (file)
@@ -222,7 +222,7 @@ static int __init do_kmem_cache_size(size_t size, bool want_ctor,
                 * Copy the buffer to check that it's not wiped on
                 * free().
                 */
-               buf_copy = kmalloc(size, GFP_KERNEL);
+               buf_copy = kmalloc(size, GFP_ATOMIC);
                if (buf_copy)
                        memcpy(buf_copy, buf, size);
 
index 2d1c1f241fd9ec8828f5377616c445c663fe74db..e630e7ff57f1f9102e28820ef98f9c49e23da2ed 100644 (file)
@@ -51,7 +51,7 @@ static int do_hres(const struct vdso_data *vd, clockid_t clk,
                ns = vdso_ts->nsec;
                last = vd->cycle_last;
                if (unlikely((s64)cycles < 0))
-                       return clock_gettime_fallback(clk, ts);
+                       return -1;
 
                ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
                ns >>= vd->shift;
@@ -82,14 +82,14 @@ static void do_coarse(const struct vdso_data *vd, clockid_t clk,
 }
 
 static __maybe_unused int
-__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+__cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
 {
        const struct vdso_data *vd = __arch_get_vdso_data();
        u32 msk;
 
        /* Check for negative values or invalid clocks */
        if (unlikely((u32) clock >= MAX_CLOCKS))
-               goto fallback;
+               return -1;
 
        /*
         * Convert the clockid to a bitmask and use it to check which
@@ -104,9 +104,17 @@ __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
        } else if (msk & VDSO_RAW) {
                return do_hres(&vd[CS_RAW], clock, ts);
        }
+       return -1;
+}
+
+static __maybe_unused int
+__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+       int ret = __cvdso_clock_gettime_common(clock, ts);
 
-fallback:
-       return clock_gettime_fallback(clock, ts);
+       if (unlikely(ret))
+               return clock_gettime_fallback(clock, ts);
+       return 0;
 }
 
 static __maybe_unused int
@@ -115,20 +123,21 @@ __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
        struct __kernel_timespec ts;
        int ret;
 
-       if (res == NULL)
-               goto fallback;
+       ret = __cvdso_clock_gettime_common(clock, &ts);
 
-       ret = __cvdso_clock_gettime(clock, &ts);
+#ifdef VDSO_HAS_32BIT_FALLBACK
+       if (unlikely(ret))
+               return clock_gettime32_fallback(clock, res);
+#else
+       if (unlikely(ret))
+               ret = clock_gettime_fallback(clock, &ts);
+#endif
 
-       if (ret == 0) {
+       if (likely(!ret)) {
                res->tv_sec = ts.tv_sec;
                res->tv_nsec = ts.tv_nsec;
        }
-
        return ret;
-
-fallback:
-       return clock_gettime_fallback(clock, (struct __kernel_timespec *)res);
 }
 
 static __maybe_unused int
@@ -169,17 +178,18 @@ static __maybe_unused time_t __cvdso_time(time_t *time)
 
 #ifdef VDSO_HAS_CLOCK_GETRES
 static __maybe_unused
-int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
 {
        const struct vdso_data *vd = __arch_get_vdso_data();
-       u64 ns;
+       u64 hrtimer_res;
        u32 msk;
-       u64 hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
+       u64 ns;
 
        /* Check for negative values or invalid clocks */
        if (unlikely((u32) clock >= MAX_CLOCKS))
-               goto fallback;
+               return -1;
 
+       hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
        /*
         * Convert the clockid to a bitmask and use it to check which
         * clocks are handled in the VDSO directly.
@@ -201,18 +211,22 @@ int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
                 */
                ns = hrtimer_res;
        } else {
-               goto fallback;
+               return -1;
        }
 
-       if (res) {
-               res->tv_sec = 0;
-               res->tv_nsec = ns;
-       }
+       res->tv_sec = 0;
+       res->tv_nsec = ns;
 
        return 0;
+}
+
+int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+{
+       int ret = __cvdso_clock_getres_common(clock, res);
 
-fallback:
-       return clock_getres_fallback(clock, res);
+       if (unlikely(ret))
+               return clock_getres_fallback(clock, res);
+       return 0;
 }
 
 static __maybe_unused int
@@ -221,19 +235,20 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
        struct __kernel_timespec ts;
        int ret;
 
-       if (res == NULL)
-               goto fallback;
+       ret = __cvdso_clock_getres_common(clock, &ts);
 
-       ret = __cvdso_clock_getres(clock, &ts);
+#ifdef VDSO_HAS_32BIT_FALLBACK
+       if (unlikely(ret))
+               return clock_getres32_fallback(clock, res);
+#else
+       if (unlikely(ret))
+               ret = clock_getres_fallback(clock, &ts);
+#endif
 
-       if (ret == 0) {
+       if (likely(!ret)) {
                res->tv_sec = ts.tv_sec;
                res->tv_nsec = ts.tv_nsec;
        }
-
        return ret;
-
-fallback:
-       return clock_getres_fallback(clock, (struct __kernel_timespec *)res);
 }
 #endif /* VDSO_HAS_CLOCK_GETRES */
index 338e528ad4366f982298e9a8e59eba5e308fab32..d0b295c3b764bb753e4081b5847d6cef577f114d 100644 (file)
@@ -102,5 +102,6 @@ obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
 obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
 obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
 obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o
+obj-$(CONFIG_ZONE_DEVICE) += memremap.o
 obj-$(CONFIG_HMM_MIRROR) += hmm.o
 obj-$(CONFIG_MEMFD_CREATE) += memfd.o
index 83a7b614061f4027d030162d4237c4fdd5b0302e..798275a51887ca82b378ee45c86671894c4bdf74 100644 (file)
@@ -21,7 +21,6 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
         * memory corruption is possible and we should stop execution.
         */
        BUG_ON(!trylock_page(page));
-       list_del(&page->lru);
        balloon_page_insert(b_dev_info, page);
        unlock_page(page);
        __count_vm_event(BALLOON_INFLATE);
@@ -33,8 +32,8 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
  * @b_dev_info: balloon device descriptor where we will insert a new page to
  * @pages: pages to enqueue - allocated using balloon_page_alloc.
  *
- * Driver must call it to properly enqueue a balloon pages before definitively
- * removing it from the guest system.
+ * Driver must call this function to properly enqueue balloon pages before
+ * definitively removing them from the guest system.
  *
  * Return: number of pages that were enqueued.
  */
@@ -47,6 +46,7 @@ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
 
        spin_lock_irqsave(&b_dev_info->pages_lock, flags);
        list_for_each_entry_safe(page, tmp, pages, lru) {
+               list_del(&page->lru);
                balloon_page_enqueue_one(b_dev_info, page);
                n_pages++;
        }
@@ -63,12 +63,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
  * @n_req_pages: number of requested pages.
  *
  * Driver must call this function to properly de-allocate a previous enlisted
- * balloon pages before definetively releasing it back to the guest system.
+ * balloon pages before definitively releasing it back to the guest system.
  * This function tries to remove @n_req_pages from the ballooned pages and
  * return them to the caller in the @pages list.
  *
- * Note that this function may fail to dequeue some pages temporarily empty due
- * to compaction isolated pages.
+ * Note that this function may fail to dequeue some pages even if the balloon
+ * isn't empty - since the page list can be temporarily empty due to compaction
+ * of isolated pages.
  *
  * Return: number of pages that were added to the @pages list.
  */
@@ -112,12 +113,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
 
 /*
  * balloon_page_alloc - allocates a new page for insertion into the balloon
- *                       page list.
+ *                     page list.
+ *
+ * Driver must call this function to properly allocate a new balloon page.
+ * Driver must call balloon_page_enqueue before definitively removing the page
+ * from the guest system.
  *
- * Driver must call it to properly allocate a new enlisted balloon page.
- * Driver must call balloon_page_enqueue before definitively removing it from
- * the guest system.  This function returns the page address for the recently
- * allocated page or NULL in the case we fail to allocate a new page this turn.
+ * Return: struct page for the allocated page or NULL on allocation failure.
  */
 struct page *balloon_page_alloc(void)
 {
@@ -128,15 +130,17 @@ struct page *balloon_page_alloc(void)
 EXPORT_SYMBOL_GPL(balloon_page_alloc);
 
 /*
- * balloon_page_enqueue - allocates a new page and inserts it into the balloon
- *                       page list.
- * @b_dev_info: balloon device descriptor where we will insert a new page to
+ * balloon_page_enqueue - inserts a new page into the balloon page list.
+ *
+ * @b_dev_info: balloon device descriptor where we will insert a new page
  * @page: new page to enqueue - allocated using balloon_page_alloc.
  *
- * Driver must call it to properly enqueue a new allocated balloon page
- * before definitively removing it from the guest system.
- * This function returns the page address for the recently enqueued page or
- * NULL in the case we fail to allocate a new page this turn.
+ * Drivers must call this function to properly enqueue a new allocated balloon
+ * page before definitively removing the page from the guest system.
+ *
+ * Drivers must not call balloon_page_enqueue on pages that have been pushed to
+ * a list with balloon_page_push before removing them with balloon_page_pop. To
+ * enqueue a list of pages, use balloon_page_list_enqueue instead.
  */
 void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
                          struct page *page)
@@ -151,14 +155,23 @@ EXPORT_SYMBOL_GPL(balloon_page_enqueue);
 
 /*
  * balloon_page_dequeue - removes a page from balloon's page list and returns
- *                       the its address to allow the driver release the page.
+ *                       its address to allow the driver to release the page.
  * @b_dev_info: balloon device decriptor where we will grab a page from.
  *
- * Driver must call it to properly de-allocate a previous enlisted balloon page
- * before definetively releasing it back to the guest system.
- * This function returns the page address for the recently dequeued page or
- * NULL in the case we find balloon's page list temporarily empty due to
- * compaction isolated pages.
+ * Driver must call this function to properly dequeue a previously enqueued page
+ * before definitively releasing it back to the guest system.
+ *
+ * Caller must perform its own accounting to ensure that this
+ * function is called only if some pages are actually enqueued.
+ *
+ * Note that this function may fail to dequeue some pages even if there are
+ * some enqueued pages - since the page list can be temporarily empty due to
+ * the compaction of isolated pages.
+ *
+ * TODO: remove the caller accounting requirements, and allow caller to wait
+ * until all pages can be dequeued.
+ *
+ * Return: struct page for the dequeued page, or NULL if no page was dequeued.
  */
 struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
 {
@@ -171,9 +184,9 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
        if (n_pages != 1) {
                /*
                 * If we are unable to dequeue a balloon page because the page
-                * list is empty and there is no isolated pages, then something
+                * list is empty and there are no isolated pages, then something
                 * went out of track and some balloon pages are lost.
-                * BUG() here, otherwise the balloon driver may get stuck into
+                * BUG() here, otherwise the balloon driver may get stuck in
                 * an infinite loop while attempting to release all its pages.
                 */
                spin_lock_irqsave(&b_dev_info->pages_lock, flags);
@@ -224,8 +237,8 @@ int balloon_page_migrate(struct address_space *mapping,
 
        /*
         * We can not easily support the no copy case here so ignore it as it
-        * is unlikely to be use with ballon pages. See include/linux/hmm.h for
-        * user of the MIGRATE_SYNC_NO_COPY mode.
+        * is unlikely to be used with balloon pages. See include/linux/hmm.h
+        * for a user of the MIGRATE_SYNC_NO_COPY mode.
         */
        if (mode == MIGRATE_SYNC_NO_COPY)
                return -EINVAL;
index 9e1b9acb116b9b3efaabd8ee02538c20456b4714..952dc2fb24e50a26bee9621965ec6070b5d13346 100644 (file)
@@ -842,13 +842,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
                /*
                 * Periodically drop the lock (if held) regardless of its
-                * contention, to give chance to IRQs. Abort async compaction
-                * if contended.
+                * contention, to give chance to IRQs. Abort completely if
+                * a fatal signal is pending.
                 */
                if (!(low_pfn % SWAP_CLUSTER_MAX)
                    && compact_unlock_should_abort(&pgdat->lru_lock,
-                                           flags, &locked, cc))
-                       break;
+                                           flags, &locked, cc)) {
+                       low_pfn = 0;
+                       goto fatal_pending;
+               }
 
                if (!pfn_valid_within(low_pfn))
                        goto isolate_fail;
@@ -1060,6 +1062,7 @@ isolate_abort:
        trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
                                                nr_scanned, nr_isolated);
 
+fatal_pending:
        cc->total_migrate_scanned += nr_scanned;
        if (nr_isolated)
                count_compact_events(COMPACTISOLATED, nr_isolated);
index e1eedef129cf5c3425cb5d709b42910cb5409e43..16b6731a34db79b46516ac74da8ec5c59a03e99c 100644 (file)
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -946,7 +946,7 @@ EXPORT_SYMBOL(hmm_range_unregister);
  * @range: range
  * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
  *          permission (for instance asking for write and range is read only),
- *          -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
+ *          -EBUSY if you need to retry, -EFAULT invalid (ie either no valid
  *          vma or it is illegal to access that range), number of valid pages
  *          in range->pfns[] (from range start address).
  *
@@ -967,7 +967,7 @@ long hmm_range_snapshot(struct hmm_range *range)
        do {
                /* If range is no longer valid force retry. */
                if (!range->valid)
-                       return -EAGAIN;
+                       return -EBUSY;
 
                vma = find_vma(hmm->mm, start);
                if (vma == NULL || (vma->vm_flags & device_vma))
@@ -1062,10 +1062,8 @@ long hmm_range_fault(struct hmm_range *range, bool block)
 
        do {
                /* If range is no longer valid force retry. */
-               if (!range->valid) {
-                       up_read(&hmm->mm->mmap_sem);
-                       return -EAGAIN;
-               }
+               if (!range->valid)
+                       return -EBUSY;
 
                vma = find_vma(hmm->mm, start);
                if (vma == NULL || (vma->vm_flags & device_vma))
index dbbd518fb6b3e91dc298956f7abe842361369952..6e9e8cca663e477d328584ac3142d8234d8e3e05 100644 (file)
 /* GFP bitmask for kmemleak internal allocations */
 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
                                 __GFP_NORETRY | __GFP_NOMEMALLOC | \
-                                __GFP_NOWARN | __GFP_NOFAIL)
+                                __GFP_NOWARN)
 
 /* scanning area inside a memory block */
 struct kmemleak_scan_area {
index 2a9bbddb0e55427125e90dde4795b6460cdbdae1..c73f0991316511fb5471d3382f2d719c6cbfd759 100644 (file)
@@ -132,7 +132,6 @@ static void release_memory_resource(struct resource *res)
                return;
        release_resource(res);
        kfree(res);
-       return;
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
@@ -979,7 +978,6 @@ static void rollback_node_hotadd(int nid)
        arch_refresh_nodedata(nid, NULL);
        free_percpu(pgdat->per_cpu_nodestats);
        arch_free_nodedata(pgdat);
-       return;
 }
 
 
diff --git a/mm/memremap.c b/mm/memremap.c
new file mode 100644 (file)
index 0000000..6ee03a8
--- /dev/null
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2015 Intel Corporation. All rights reserved. */
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kasan.h>
+#include <linux/memory_hotplug.h>
+#include <linux/mm.h>
+#include <linux/pfn_t.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <linux/types.h>
+#include <linux/wait_bit.h>
+#include <linux/xarray.h>
+
+static DEFINE_XARRAY(pgmap_array);
+#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
+#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
+
+#ifdef CONFIG_DEV_PAGEMAP_OPS
+DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
+EXPORT_SYMBOL(devmap_managed_key);
+static atomic_t devmap_managed_enable;
+
+static void devmap_managed_enable_put(void *data)
+{
+       if (atomic_dec_and_test(&devmap_managed_enable))
+               static_branch_disable(&devmap_managed_key);
+}
+
+static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
+{
+       if (!pgmap->ops || !pgmap->ops->page_free) {
+               WARN(1, "Missing page_free method\n");
+               return -EINVAL;
+       }
+
+       if (atomic_inc_return(&devmap_managed_enable) == 1)
+               static_branch_enable(&devmap_managed_key);
+       return devm_add_action_or_reset(dev, devmap_managed_enable_put, NULL);
+}
+#else
+static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgmap)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_DEV_PAGEMAP_OPS */
+
+static void pgmap_array_delete(struct resource *res)
+{
+       xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
+                       NULL, GFP_KERNEL);
+       synchronize_rcu();
+}
+
+static unsigned long pfn_first(struct dev_pagemap *pgmap)
+{
+       return PHYS_PFN(pgmap->res.start) +
+               vmem_altmap_offset(pgmap_altmap(pgmap));
+}
+
+static unsigned long pfn_end(struct dev_pagemap *pgmap)
+{
+       const struct resource *res = &pgmap->res;
+
+       return (res->start + resource_size(res)) >> PAGE_SHIFT;
+}
+
+static unsigned long pfn_next(unsigned long pfn)
+{
+       if (pfn % 1024 == 0)
+               cond_resched();
+       return pfn + 1;
+}
+
+#define for_each_device_pfn(pfn, map) \
+       for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
+
+static void dev_pagemap_kill(struct dev_pagemap *pgmap)
+{
+       if (pgmap->ops && pgmap->ops->kill)
+               pgmap->ops->kill(pgmap);
+       else
+               percpu_ref_kill(pgmap->ref);
+}
+
+static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
+{
+       if (pgmap->ops && pgmap->ops->cleanup) {
+               pgmap->ops->cleanup(pgmap);
+       } else {
+               wait_for_completion(&pgmap->done);
+               percpu_ref_exit(pgmap->ref);
+       }
+}
+
+static void devm_memremap_pages_release(void *data)
+{
+       struct dev_pagemap *pgmap = data;
+       struct device *dev = pgmap->dev;
+       struct resource *res = &pgmap->res;
+       unsigned long pfn;
+       int nid;
+
+       dev_pagemap_kill(pgmap);
+       for_each_device_pfn(pfn, pgmap)
+               put_page(pfn_to_page(pfn));
+       dev_pagemap_cleanup(pgmap);
+
+       /* pages are dead and unused, undo the arch mapping */
+       nid = page_to_nid(pfn_to_page(PHYS_PFN(res->start)));
+
+       mem_hotplug_begin();
+       if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+               pfn = PHYS_PFN(res->start);
+               __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
+                                PHYS_PFN(resource_size(res)), NULL);
+       } else {
+               arch_remove_memory(nid, res->start, resource_size(res),
+                               pgmap_altmap(pgmap));
+               kasan_remove_zero_shadow(__va(res->start), resource_size(res));
+       }
+       mem_hotplug_done();
+
+       untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
+       pgmap_array_delete(res);
+       dev_WARN_ONCE(dev, pgmap->altmap.alloc,
+                     "%s: failed to free all reserved pages\n", __func__);
+}
+
+static void dev_pagemap_percpu_release(struct percpu_ref *ref)
+{
+       struct dev_pagemap *pgmap =
+               container_of(ref, struct dev_pagemap, internal_ref);
+
+       complete(&pgmap->done);
+}
+
+/**
+ * devm_memremap_pages - remap and provide memmap backing for the given resource
+ * @dev: hosting device for @res
+ * @pgmap: pointer to a struct dev_pagemap
+ *
+ * Notes:
+ * 1/ At a minimum the res and type members of @pgmap must be initialized
+ *    by the caller before passing it to this function
+ *
+ * 2/ The altmap field may optionally be initialized, in which case
+ *    PGMAP_ALTMAP_VALID must be set in pgmap->flags.
+ *
+ * 3/ The ref field may optionally be provided, in which pgmap->ref must be
+ *    'live' on entry and will be killed and reaped at
+ *    devm_memremap_pages_release() time, or if this routine fails.
+ *
+ * 4/ res is expected to be a host memory range that could feasibly be
+ *    treated as a "System RAM" range, i.e. not a device mmio range, but
+ *    this is not enforced.
+ */
+void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
+{
+       struct resource *res = &pgmap->res;
+       struct dev_pagemap *conflict_pgmap;
+       struct mhp_restrictions restrictions = {
+               /*
+                * We do not want any optional features only our own memmap
+                */
+               .altmap = pgmap_altmap(pgmap),
+       };
+       pgprot_t pgprot = PAGE_KERNEL;
+       int error, nid, is_ram;
+       bool need_devmap_managed = true;
+
+       switch (pgmap->type) {
+       case MEMORY_DEVICE_PRIVATE:
+               if (!IS_ENABLED(CONFIG_DEVICE_PRIVATE)) {
+                       WARN(1, "Device private memory not supported\n");
+                       return ERR_PTR(-EINVAL);
+               }
+               if (!pgmap->ops || !pgmap->ops->migrate_to_ram) {
+                       WARN(1, "Missing migrate_to_ram method\n");
+                       return ERR_PTR(-EINVAL);
+               }
+               break;
+       case MEMORY_DEVICE_FS_DAX:
+               if (!IS_ENABLED(CONFIG_ZONE_DEVICE) ||
+                   IS_ENABLED(CONFIG_FS_DAX_LIMITED)) {
+                       WARN(1, "File system DAX not supported\n");
+                       return ERR_PTR(-EINVAL);
+               }
+               break;
+       case MEMORY_DEVICE_DEVDAX:
+       case MEMORY_DEVICE_PCI_P2PDMA:
+               need_devmap_managed = false;
+               break;
+       default:
+               WARN(1, "Invalid pgmap type %d\n", pgmap->type);
+               break;
+       }
+
+       if (!pgmap->ref) {
+               if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
+                       return ERR_PTR(-EINVAL);
+
+               init_completion(&pgmap->done);
+               error = percpu_ref_init(&pgmap->internal_ref,
+                               dev_pagemap_percpu_release, 0, GFP_KERNEL);
+               if (error)
+                       return ERR_PTR(error);
+               pgmap->ref = &pgmap->internal_ref;
+       } else {
+               if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
+                       WARN(1, "Missing reference count teardown definition\n");
+                       return ERR_PTR(-EINVAL);
+               }
+       }
+
+       if (need_devmap_managed) {
+               error = devmap_managed_enable_get(dev, pgmap);
+               if (error)
+                       return ERR_PTR(error);
+       }
+
+       conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL);
+       if (conflict_pgmap) {
+               dev_WARN(dev, "Conflicting mapping in same section\n");
+               put_dev_pagemap(conflict_pgmap);
+               error = -ENOMEM;
+               goto err_array;
+       }
+
+       conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL);
+       if (conflict_pgmap) {
+               dev_WARN(dev, "Conflicting mapping in same section\n");
+               put_dev_pagemap(conflict_pgmap);
+               error = -ENOMEM;
+               goto err_array;
+       }
+
+       is_ram = region_intersects(res->start, resource_size(res),
+               IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
+
+       if (is_ram != REGION_DISJOINT) {
+               WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
+                               is_ram == REGION_MIXED ? "mixed" : "ram", res);
+               error = -ENXIO;
+               goto err_array;
+       }
+
+       pgmap->dev = dev;
+
+       error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
+                               PHYS_PFN(res->end), pgmap, GFP_KERNEL));
+       if (error)
+               goto err_array;
+
+       nid = dev_to_node(dev);
+       if (nid < 0)
+               nid = numa_mem_id();
+
+       error = track_pfn_remap(NULL, &pgprot, PHYS_PFN(res->start), 0,
+                       resource_size(res));
+       if (error)
+               goto err_pfn_remap;
+
+       mem_hotplug_begin();
+
+       /*
+        * For device private memory we call add_pages() as we only need to
+        * allocate and initialize struct page for the device memory. More-
+        * over the device memory is un-accessible thus we do not want to
+        * create a linear mapping for the memory like arch_add_memory()
+        * would do.
+        *
+        * For all other device memory types, which are accessible by
+        * the CPU, we do want the linear mapping and thus use
+        * arch_add_memory().
+        */
+       if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
+               error = add_pages(nid, PHYS_PFN(res->start),
+                               PHYS_PFN(resource_size(res)), &restrictions);
+       } else {
+               error = kasan_add_zero_shadow(__va(res->start), resource_size(res));
+               if (error) {
+                       mem_hotplug_done();
+                       goto err_kasan;
+               }
+
+               error = arch_add_memory(nid, res->start, resource_size(res),
+                                       &restrictions);
+       }
+
+       if (!error) {
+               struct zone *zone;
+
+               zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
+               move_pfn_range_to_zone(zone, PHYS_PFN(res->start),
+                               PHYS_PFN(resource_size(res)), restrictions.altmap);
+       }
+
+       mem_hotplug_done();
+       if (error)
+               goto err_add_memory;
+
+       /*
+        * Initialization of the pages has been deferred until now in order
+        * to allow us to do the work while not holding the hotplug lock.
+        */
+       memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
+                               PHYS_PFN(res->start),
+                               PHYS_PFN(resource_size(res)), pgmap);
+       percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
+
+       error = devm_add_action_or_reset(dev, devm_memremap_pages_release,
+                       pgmap);
+       if (error)
+               return ERR_PTR(error);
+
+       return __va(res->start);
+
+ err_add_memory:
+       kasan_remove_zero_shadow(__va(res->start), resource_size(res));
+ err_kasan:
+       untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
+ err_pfn_remap:
+       pgmap_array_delete(res);
+ err_array:
+       dev_pagemap_kill(pgmap);
+       dev_pagemap_cleanup(pgmap);
+       return ERR_PTR(error);
+}
+EXPORT_SYMBOL_GPL(devm_memremap_pages);
+
+void devm_memunmap_pages(struct device *dev, struct dev_pagemap *pgmap)
+{
+       devm_release_action(dev, devm_memremap_pages_release, pgmap);
+}
+EXPORT_SYMBOL_GPL(devm_memunmap_pages);
+
+unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
+{
+       /* number of pfns from base where pfn_to_page() is valid */
+       if (altmap)
+               return altmap->reserve + altmap->free;
+       return 0;
+}
+
+void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns)
+{
+       altmap->alloc -= nr_pfns;
+}
+
+/**
+ * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn
+ * @pfn: page frame number to lookup page_map
+ * @pgmap: optional known pgmap that already has a reference
+ *
+ * If @pgmap is non-NULL and covers @pfn it will be returned as-is.  If @pgmap
+ * is non-NULL but does not cover @pfn the reference to it will be released.
+ */
+struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
+               struct dev_pagemap *pgmap)
+{
+       resource_size_t phys = PFN_PHYS(pfn);
+
+       /*
+        * In the cached case we're already holding a live reference.
+        */
+       if (pgmap) {
+               if (phys >= pgmap->res.start && phys <= pgmap->res.end)
+                       return pgmap;
+               put_dev_pagemap(pgmap);
+       }
+
+       /* fall back to slow path lookup */
+       rcu_read_lock();
+       pgmap = xa_load(&pgmap_array, PHYS_PFN(phys));
+       if (pgmap && !percpu_ref_tryget_live(pgmap->ref))
+               pgmap = NULL;
+       rcu_read_unlock();
+
+       return pgmap;
+}
+EXPORT_SYMBOL_GPL(get_dev_pagemap);
+
+#ifdef CONFIG_DEV_PAGEMAP_OPS
+void __put_devmap_managed_page(struct page *page)
+{
+       int count = page_ref_dec_return(page);
+
+       /*
+        * If refcount is 1 then page is freed and refcount is stable as nobody
+        * holds a reference on the page.
+        */
+       if (count == 1) {
+               /* Clear Active bit in case of parallel mark_page_accessed */
+               __ClearPageActive(page);
+               __ClearPageWaiters(page);
+
+               mem_cgroup_uncharge(page);
+
+               page->pgmap->ops->page_free(page);
+       } else if (!count)
+               __put_page(page);
+}
+EXPORT_SYMBOL(__put_devmap_managed_page);
+#endif /* CONFIG_DEV_PAGEMAP_OPS */
index 8992741f10aad4bb1e56c4635ef84aac57176550..a42858d8e00b7bae8d43e9746fb786a16a64e809 100644 (file)
@@ -767,12 +767,12 @@ recheck_buffers:
                        }
                        bh = bh->b_this_page;
                } while (bh != head);
-               spin_unlock(&mapping->private_lock);
                if (busy) {
                        if (invalidated) {
                                rc = -EAGAIN;
                                goto unlock_buffers;
                        }
+                       spin_unlock(&mapping->private_lock);
                        invalidate_bh_lrus();
                        invalidated = true;
                        goto recheck_buffers;
@@ -805,6 +805,8 @@ recheck_buffers:
 
        rc = MIGRATEPAGE_SUCCESS;
 unlock_buffers:
+       if (check_refs)
+               spin_unlock(&mapping->private_lock);
        bh = head;
        do {
                unlock_buffer(bh);
@@ -2338,16 +2340,13 @@ next:
 static void migrate_vma_collect(struct migrate_vma *migrate)
 {
        struct mmu_notifier_range range;
-       struct mm_walk mm_walk;
-
-       mm_walk.pmd_entry = migrate_vma_collect_pmd;
-       mm_walk.pte_entry = NULL;
-       mm_walk.pte_hole = migrate_vma_collect_hole;
-       mm_walk.hugetlb_entry = NULL;
-       mm_walk.test_walk = NULL;
-       mm_walk.vma = migrate->vma;
-       mm_walk.mm = migrate->vma->vm_mm;
-       mm_walk.private = migrate;
+       struct mm_walk mm_walk = {
+               .pmd_entry = migrate_vma_collect_pmd,
+               .pte_hole = migrate_vma_collect_hole,
+               .vma = migrate->vma,
+               .mm = migrate->vma->vm_mm,
+               .private = migrate,
+       };
 
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm_walk.mm,
                                migrate->start,
index e6c030e473649b7a0ee94cea464d46f6c352ea9d..8834563cdb4bd4bedc4c59fad15de6afa6850717 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1432,7 +1432,9 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
        void *old_tail = *tail ? *tail : *head;
        int rsize;
 
-       if (slab_want_init_on_free(s))
+       if (slab_want_init_on_free(s)) {
+               void *p = NULL;
+
                do {
                        object = next;
                        next = get_freepointer(s, object);
@@ -1445,8 +1447,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
                                                           : 0;
                        memset((char *)object + s->inuse, 0,
                               s->size - s->inuse - rsize);
-                       set_freepointer(s, object, next);
+                       set_freepointer(s, object, p);
+                       p = object;
                } while (object != old_tail);
+       }
 
 /*
  * Compiler cannot detect this function can be removed if slab_free_hook()
index 44df66a98f2adbbd2c18c38dec8eed9e45b4a6ce..dbdc46a84f63089de614a2807ef62a70f7c14968 100644 (file)
@@ -699,7 +699,14 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
        unsigned long ret, freed = 0;
        struct shrinker *shrinker;
 
-       if (!mem_cgroup_is_root(memcg))
+       /*
+        * The root memcg might be allocated even though memcg is disabled
+        * via "cgroup_disable=memory" boot parameter.  This could make
+        * mem_cgroup_is_root() return false, then just run memcg slab
+        * shrink, but skip global shrink.  This may result in premature
+        * oom.
+        */
+       if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
                return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
 
        if (!down_read_trylock(&shrinker_rwsem))
index 624ccc6ac744512380ddbf8727f1e1aae5633a78..f8efaa9f647c1d994311d082e526eb5692b4af5c 100644 (file)
@@ -272,6 +272,8 @@ static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
        return v;
 }
 
+static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap);
+
 /*
  * Initialize a policy database structure.
  */
@@ -319,8 +321,10 @@ static int policydb_init(struct policydb *p)
 out:
        hashtab_destroy(p->filename_trans);
        hashtab_destroy(p->range_tr);
-       for (i = 0; i < SYM_NUM; i++)
+       for (i = 0; i < SYM_NUM; i++) {
+               hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
                hashtab_destroy(p->symtab[i].table);
+       }
        return rc;
 }
 
index 12dd9b318db18adcc087c64ae5a4db7e9661d30f..703857aab00fc17e6ca667fdcaf27e94200bc84f 100644 (file)
@@ -1873,6 +1873,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
                if (!to_check)
                        break; /* all drained */
                init_waitqueue_entry(&wait, current);
+               set_current_state(TASK_INTERRUPTIBLE);
                add_wait_queue(&to_check->sleep, &wait);
                snd_pcm_stream_unlock_irq(substream);
                if (runtime->no_period_wakeup)
@@ -1885,7 +1886,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
                        }
                        tout = msecs_to_jiffies(tout * 1000);
                }
-               tout = schedule_timeout_interruptible(tout);
+               tout = schedule_timeout(tout);
 
                snd_pcm_stream_lock_irq(substream);
                group = snd_pcm_stream_group_ref(substream);
index 1192c7561d62c61faa6be93178d6766d313a931c..3c2db3816029810eac81193a6d86554a10cb56cb 100644 (file)
@@ -136,10 +136,12 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
        if (!acomp)
                return -ENODEV;
        if (!acomp->ops) {
-               request_module("i915");
-               /* 60s timeout */
-               wait_for_completion_timeout(&bind_complete,
-                                           msecs_to_jiffies(60 * 1000));
+               if (!IS_ENABLED(CONFIG_MODULES) ||
+                   !request_module("i915")) {
+                       /* 60s timeout */
+                       wait_for_completion_timeout(&bind_complete,
+                                                  msecs_to_jiffies(60 * 1000));
+               }
        }
        if (!acomp->ops) {
                dev_info(bus->dev, "couldn't bind with audio component\n");
index 71d5f540334a23ea2904cb41e67d066c588ee932..4c12cc5b53fda0dd5085cfec00c08cc28d86f53d 100644 (file)
@@ -72,7 +72,7 @@ int snd_usb_pipe_sanity_check(struct usb_device *dev, unsigned int pipe)
        struct usb_host_endpoint *ep;
 
        ep = usb_pipe_endpoint(dev, pipe);
-       if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
+       if (!ep || usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
                return -EINVAL;
        return 0;
 }
index 4602464ebdfbfccd9296593ed5dd8b15f55305ff..a4217c1a5d01178c53346ea3f48ed3415aa956fd 100644 (file)
@@ -214,6 +214,18 @@ struct kvm_vcpu_events {
 #define KVM_REG_ARM_FW_REG(r)          (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
                                         KVM_REG_ARM_FW | ((r) & 0xffff))
 #define KVM_REG_ARM_PSCI_VERSION       KVM_REG_ARM_FW_REG(0)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1    KVM_REG_ARM_FW_REG(1)
+       /* Higher values mean better protection. */
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL          0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL              1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED       2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2    KVM_REG_ARM_FW_REG(2)
+       /* Higher values mean better protection. */
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL          0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN            1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL              2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED       3
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED    (1U << 4)
 
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR      0
index d819a3e8b552b47bd3019eeef6c39ccc4500a6c9..9a507716ae2fe723624d7c7de3c1b88501d5f9a3 100644 (file)
@@ -229,6 +229,16 @@ struct kvm_vcpu_events {
 #define KVM_REG_ARM_FW_REG(r)          (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
                                         KVM_REG_ARM_FW | ((r) & 0xffff))
 #define KVM_REG_ARM_PSCI_VERSION       KVM_REG_ARM_FW_REG(0)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1    KVM_REG_ARM_FW_REG(1)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL          0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL              1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED       2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2    KVM_REG_ARM_FW_REG(2)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL          0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN            1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL              2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED       3
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED            (1U << 4)
 
 /* SVE registers */
 #define KVM_REG_ARM64_SVE              (0x15 << KVM_REG_ARM_COPROC_SHIFT)
index f33105bc5ca612eaa5cea7125bfc0c7a771f13f9..8601d824a9c69417247d8c19c043d71a3f71b724 100644 (file)
@@ -4,12 +4,8 @@
 #define MAP_DENYWRITE  0x0800
 #define MAP_EXECUTABLE 0x1000
 #define MAP_GROWSDOWN  0x0100
-#define MAP_HUGETLB    0x40000
 #define MAP_LOCKED     0x80
-#define MAP_NONBLOCK   0x10000
 #define MAP_NORESERVE   0x40
-#define MAP_POPULATE   0x8000
-#define MAP_STACK      0x20000
 #include <uapi/asm-generic/mman-common.h>
 /* MAP_32BIT is undefined on powerpc, fix it for perf */
 #define MAP_32BIT      0
index 38920eed8cbf11a754270cff08fad9831172b365..7b94dccc843d5de23af1a651b384ed35d1ee9e82 100644 (file)
@@ -4,12 +4,8 @@
 #define MAP_DENYWRITE  0x0800
 #define MAP_EXECUTABLE 0x1000
 #define MAP_GROWSDOWN  0x0200
-#define MAP_HUGETLB    0x40000
 #define MAP_LOCKED      0x100
-#define MAP_NONBLOCK   0x10000
 #define MAP_NORESERVE   0x40
-#define MAP_POPULATE   0x8000
-#define MAP_STACK      0x20000
 #include <uapi/asm-generic/mman-common.h>
 /* MAP_32BIT is undefined on sparc, fix it for perf */
 #define MAP_32BIT      0
index d6ab5b4d15e543800a7a7524517b495fa6305074..503d3f42da1676791d2c4f4a70bfad35743daf4c 100644 (file)
@@ -378,10 +378,11 @@ struct kvm_sync_regs {
        struct kvm_vcpu_events events;
 };
 
-#define KVM_X86_QUIRK_LINT0_REENABLED  (1 << 0)
-#define KVM_X86_QUIRK_CD_NW_CLEARED    (1 << 1)
-#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE  (1 << 2)
-#define KVM_X86_QUIRK_OUT_7E_INC_RIP   (1 << 3)
+#define KVM_X86_QUIRK_LINT0_REENABLED     (1 << 0)
+#define KVM_X86_QUIRK_CD_NW_CLEARED       (1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE     (1 << 2)
+#define KVM_X86_QUIRK_OUT_7E_INC_RIP      (1 << 3)
+#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
 
 #define KVM_STATE_NESTED_FORMAT_VMX    0
 #define KVM_STATE_NESTED_FORMAT_SVM    1       /* unused */
@@ -432,4 +433,17 @@ struct kvm_nested_state {
        } data;
 };
 
+/* for KVM_CAP_PMU_EVENT_FILTER */
+struct kvm_pmu_event_filter {
+       __u32 action;
+       __u32 nevents;
+       __u32 fixed_counter_bitmap;
+       __u32 flags;
+       __u32 pad[4];
+       __u64 events[0];
+};
+
+#define KVM_PMU_EVENT_ALLOW 0
+#define KVM_PMU_EVENT_DENY 1
+
 #endif /* _ASM_X86_KVM_H */
index d213ec5c3766db0dd5176c951b13e5f3c1514cfb..f0b0c90dd398246eb2882050d69c6b53ccca11af 100644 (file)
 
 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
 #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL       2
-#define VMX_ABORT_VMCS_CORRUPTED             3
 #define VMX_ABORT_LOAD_HOST_MSR_FAIL         4
 
 #endif /* _UAPIVMX_H */
index abd238d0f7a48d718728cacde7853c60846bc539..63b1f506ea678200d9433002e207ee9cf7996ad9 100644 (file)
 #define MAP_TYPE       0x0f            /* Mask for type of mapping */
 #define MAP_FIXED      0x10            /* Interpret addr exactly */
 #define MAP_ANONYMOUS  0x20            /* don't use a file */
-#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
-# define MAP_UNINITIALIZED 0x4000000   /* For anonymous mmap, memory could be uninitialized */
-#else
-# define MAP_UNINITIALIZED 0x0         /* Don't support this flag */
-#endif
 
-/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */
+/* 0x0100 - 0x4000 flags are defined in asm-generic/mman.h */
+#define MAP_POPULATE           0x008000        /* populate (prefault) pagetables */
+#define MAP_NONBLOCK           0x010000        /* do not block on IO */
+#define MAP_STACK              0x020000        /* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB            0x040000        /* create a huge page mapping */
+#define MAP_SYNC               0x080000 /* perform synchronous page faults for the mapping */
 #define MAP_FIXED_NOREPLACE    0x100000        /* MAP_FIXED which doesn't unmap underlying mapping */
 
+#define MAP_UNINITIALIZED 0x4000000    /* For anonymous mmap, memory could be
+                                        * uninitialized */
+
 /*
  * Flags for mlock
  */
index 36c197fc44a0d5df08110715f11cb8b3eb6caff7..406f7718f9ad074fdbe7f457e82ecb403e4d8995 100644 (file)
@@ -9,13 +9,11 @@
 #define MAP_EXECUTABLE 0x1000          /* mark it as an executable */
 #define MAP_LOCKED     0x2000          /* pages are locked */
 #define MAP_NORESERVE  0x4000          /* don't check for reservations */
-#define MAP_POPULATE   0x8000          /* populate (prefault) pagetables */
-#define MAP_NONBLOCK   0x10000         /* do not block on IO */
-#define MAP_STACK      0x20000         /* give out an address that is best suited for process/thread stacks */
-#define MAP_HUGETLB    0x40000         /* create a huge page mapping */
-#define MAP_SYNC       0x80000         /* perform synchronous page faults for the mapping */
 
-/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
+/*
+ * Bits [26:31] are reserved, see asm-generic/hugetlb_encode.h
+ * for MAP_HUGETLB usage
+ */
 
 #define MCL_CURRENT    1               /* lock all current mappings */
 #define MCL_FUTURE     2               /* lock all future mappings */
index a87904daf1034449980afb86b952a538f84b9da9..1be0e798e36218c1d1bbbf9b46b42b7deb9c9e57 100644 (file)
@@ -844,9 +844,15 @@ __SYSCALL(__NR_fsconfig, sys_fsconfig)
 __SYSCALL(__NR_fsmount, sys_fsmount)
 #define __NR_fspick 433
 __SYSCALL(__NR_fspick, sys_fspick)
+#define __NR_pidfd_open 434
+__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
+#ifdef __ARCH_WANT_SYS_CLONE3
+#define __NR_clone3 435
+__SYSCALL(__NR_clone3, sys_clone3)
+#endif
 
 #undef __NR_syscalls
-#define __NR_syscalls 434
+#define __NR_syscalls 436
 
 /*
  * 32 bit systems traditionally used different
index 661d73f9a919996f88bec2c37e1e5543b8159373..8a5b2f8f8eb98b0f1f170960239327d420971354 100644 (file)
@@ -50,6 +50,7 @@ typedef unsigned int drm_handle_t;
 
 #else /* One of the BSDs */
 
+#include <stdint.h>
 #include <sys/ioccom.h>
 #include <sys/types.h>
 typedef int8_t   __s8;
index 3a73f5316766c4216416929f774e4c56f9f92024..328d05e77d9f6c6b3783dc67ba7316545bb9c0c3 100644 (file)
@@ -136,6 +136,8 @@ enum drm_i915_gem_engine_class {
 struct i915_engine_class_instance {
        __u16 engine_class; /* see enum drm_i915_gem_engine_class */
        __u16 engine_instance;
+#define I915_ENGINE_CLASS_INVALID_NONE -1
+#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
 };
 
 /**
@@ -355,6 +357,8 @@ typedef struct _drm_i915_sarea {
 #define DRM_I915_PERF_ADD_CONFIG       0x37
 #define DRM_I915_PERF_REMOVE_CONFIG    0x38
 #define DRM_I915_QUERY                 0x39
+#define DRM_I915_GEM_VM_CREATE         0x3a
+#define DRM_I915_GEM_VM_DESTROY                0x3b
 /* Must be kept compact -- no holes */
 
 #define DRM_IOCTL_I915_INIT            DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -415,6 +419,8 @@ typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
 #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG      DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
 #define DRM_IOCTL_I915_QUERY                   DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
+#define DRM_IOCTL_I915_GEM_VM_CREATE   DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
+#define DRM_IOCTL_I915_GEM_VM_DESTROY  DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -598,6 +604,12 @@ typedef struct drm_i915_irq_wait {
  */
 #define I915_PARAM_MMAP_GTT_COHERENT   52
 
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
+ * execution through use of explicit fence support.
+ * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
+ */
+#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
 /* Must be kept compact -- no holes and well documented */
 
 typedef struct drm_i915_getparam {
@@ -1120,7 +1132,16 @@ struct drm_i915_gem_execbuffer2 {
  */
 #define I915_EXEC_FENCE_ARRAY   (1<<19)
 
-#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
+/*
+ * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
+ * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
+ * the batch.
+ *
+ * Returns -EINVAL if the sync_file fd cannot be found.
+ */
+#define I915_EXEC_FENCE_SUBMIT         (1 << 20)
+
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
 
 #define I915_EXEC_CONTEXT_ID_MASK      (0xffffffff)
 #define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1464,8 +1485,9 @@ struct drm_i915_gem_context_create_ext {
        __u32 ctx_id; /* output: id of new context*/
        __u32 flags;
 #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS       (1u << 0)
+#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE      (1u << 1)
 #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
-       (-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1))
+       (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
        __u64 extensions;
 };
 
@@ -1507,6 +1529,41 @@ struct drm_i915_gem_context_param {
  * On creation, all new contexts are marked as recoverable.
  */
 #define I915_CONTEXT_PARAM_RECOVERABLE 0x8
+
+       /*
+        * The id of the associated virtual memory address space (ppGTT) of
+        * this context. Can be retrieved and passed to another context
+        * (on the same fd) for both to use the same ppGTT and so share
+        * address layouts, and avoid reloading the page tables on context
+        * switches between themselves.
+        *
+        * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
+        */
+#define I915_CONTEXT_PARAM_VM          0x9
+
+/*
+ * I915_CONTEXT_PARAM_ENGINES:
+ *
+ * Bind this context to operate on this subset of available engines. Henceforth,
+ * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
+ * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
+ * and upwards. Slots 0...N are filled in using the specified (class, instance).
+ * Use
+ *     engine_class: I915_ENGINE_CLASS_INVALID,
+ *     engine_instance: I915_ENGINE_CLASS_INVALID_NONE
+ * to specify a gap in the array that can be filled in later, e.g. by a
+ * virtual engine used for load balancing.
+ *
+ * Setting the number of engines bound to the context to 0, by passing a zero
+ * sized argument, will revert back to default settings.
+ *
+ * See struct i915_context_param_engines.
+ *
+ * Extensions:
+ *   i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
+ *   i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
+ */
+#define I915_CONTEXT_PARAM_ENGINES     0xa
 /* Must be kept compact -- no holes and well documented */
 
        __u64 value;
@@ -1540,9 +1597,10 @@ struct drm_i915_gem_context_param_sseu {
        struct i915_engine_class_instance engine;
 
        /*
-        * Unused for now. Must be cleared to zero.
+        * Unknown flags must be cleared to zero.
         */
        __u32 flags;
+#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
 
        /*
         * Mask of slices to enable for the context. Valid values are a subset
@@ -1570,12 +1628,115 @@ struct drm_i915_gem_context_param_sseu {
        __u32 rsvd;
 };
 
+/*
+ * i915_context_engines_load_balance:
+ *
+ * Enable load balancing across this set of engines.
+ *
+ * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
+ * used will proxy the execbuffer request onto one of the set of engines
+ * in such a way as to distribute the load evenly across the set.
+ *
+ * The set of engines must be compatible (e.g. the same HW class) as they
+ * will share the same logical GPU context and ring.
+ *
+ * To intermix rendering with the virtual engine and direct rendering onto
+ * the backing engines (bypassing the load balancing proxy), the context must
+ * be defined to use a single timeline for all engines.
+ */
+struct i915_context_engines_load_balance {
+       struct i915_user_extension base;
+
+       __u16 engine_index;
+       __u16 num_siblings;
+       __u32 flags; /* all undefined flags must be zero */
+
+       __u64 mbz64; /* reserved for future use; must be zero */
+
+       struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
+       struct i915_user_extension base; \
+       __u16 engine_index; \
+       __u16 num_siblings; \
+       __u32 flags; \
+       __u64 mbz64; \
+       struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/*
+ * i915_context_engines_bond:
+ *
+ * Constructed bonded pairs for execution within a virtual engine.
+ *
+ * All engines are equal, but some are more equal than others. Given
+ * the distribution of resources in the HW, it may be preferable to run
+ * a request on a given subset of engines in parallel to a request on a
+ * specific engine. We enable this selection of engines within a virtual
+ * engine by specifying bonding pairs, for any given master engine we will
+ * only execute on one of the corresponding siblings within the virtual engine.
+ *
+ * To execute a request in parallel on the master engine and a sibling requires
+ * coordination with a I915_EXEC_FENCE_SUBMIT.
+ */
+struct i915_context_engines_bond {
+       struct i915_user_extension base;
+
+       struct i915_engine_class_instance master;
+
+       __u16 virtual_index; /* index of virtual engine in ctx->engines[] */
+       __u16 num_bonds;
+
+       __u64 flags; /* all undefined flags must be zero */
+       __u64 mbz64[4]; /* reserved for future use; must be zero */
+
+       struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
+       struct i915_user_extension base; \
+       struct i915_engine_class_instance master; \
+       __u16 virtual_index; \
+       __u16 num_bonds; \
+       __u64 flags; \
+       __u64 mbz64[4]; \
+       struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+struct i915_context_param_engines {
+       __u64 extensions; /* linked chain of extension blocks, 0 terminates */
+#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
+#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
+       struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
+       __u64 extensions; \
+       struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
 struct drm_i915_gem_context_create_ext_setparam {
 #define I915_CONTEXT_CREATE_EXT_SETPARAM 0
        struct i915_user_extension base;
        struct drm_i915_gem_context_param param;
 };
 
+struct drm_i915_gem_context_create_ext_clone {
+#define I915_CONTEXT_CREATE_EXT_CLONE 1
+       struct i915_user_extension base;
+       __u32 clone_id;
+       __u32 flags;
+#define I915_CONTEXT_CLONE_ENGINES     (1u << 0)
+#define I915_CONTEXT_CLONE_FLAGS       (1u << 1)
+#define I915_CONTEXT_CLONE_SCHEDATTR   (1u << 2)
+#define I915_CONTEXT_CLONE_SSEU                (1u << 3)
+#define I915_CONTEXT_CLONE_TIMELINE    (1u << 4)
+#define I915_CONTEXT_CLONE_VM          (1u << 5)
+#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
+       __u64 rsvd;
+};
+
 struct drm_i915_gem_context_destroy {
        __u32 ctx_id;
        __u32 pad;
@@ -1821,6 +1982,7 @@ struct drm_i915_perf_oa_config {
 struct drm_i915_query_item {
        __u64 query_id;
 #define DRM_I915_QUERY_TOPOLOGY_INFO    1
+#define DRM_I915_QUERY_ENGINE_INFO     2
 /* Must be kept compact -- no holes and well documented */
 
        /*
@@ -1919,6 +2081,47 @@ struct drm_i915_query_topology_info {
        __u8 data[];
 };
 
+/**
+ * struct drm_i915_engine_info
+ *
+ * Describes one engine and it's capabilities as known to the driver.
+ */
+struct drm_i915_engine_info {
+       /** Engine class and instance. */
+       struct i915_engine_class_instance engine;
+
+       /** Reserved field. */
+       __u32 rsvd0;
+
+       /** Engine flags. */
+       __u64 flags;
+
+       /** Capabilities of this engine. */
+       __u64 capabilities;
+#define I915_VIDEO_CLASS_CAPABILITY_HEVC               (1 << 0)
+#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC    (1 << 1)
+
+       /** Reserved fields. */
+       __u64 rsvd1[4];
+};
+
+/**
+ * struct drm_i915_query_engine_info
+ *
+ * Engine info query enumerates all engines known to the driver by filling in
+ * an array of struct drm_i915_engine_info structures.
+ */
+struct drm_i915_query_engine_info {
+       /** Number of struct drm_i915_engine_info structs following. */
+       __u32 num_engines;
+
+       /** MBZ */
+       __u32 rsvd[3];
+
+       /** Marker for drm_i915_engine_info structures. */
+       struct drm_i915_engine_info engines[];
+};
+
 #if defined(__cplusplus)
 }
 #endif
index 7d113a9602f06801b948ebeaff2fa4bbbbed2ce5..4a8c02cafa9a1220cda77bc2dcf923bac9ab1fce 100644 (file)
@@ -695,6 +695,7 @@ enum {
        IFLA_VF_IB_NODE_GUID,   /* VF Infiniband node GUID */
        IFLA_VF_IB_PORT_GUID,   /* VF Infiniband port GUID */
        IFLA_VF_VLAN_LIST,      /* nested list of vlans, option for QinQ */
+       IFLA_VF_BROADCAST,      /* VF broadcast */
        __IFLA_VF_MAX,
 };
 
@@ -705,6 +706,10 @@ struct ifla_vf_mac {
        __u8 mac[32]; /* MAX_ADDR_LEN */
 };
 
+struct ifla_vf_broadcast {
+       __u8 broadcast[32];
+};
+
 struct ifla_vf_vlan {
        __u32 vf;
        __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
index e7c67be7c15fc3cd429deb6c30f9aa6fe318773e..5e3f12d5359e731290e499c0733e9b6fb496158e 100644 (file)
@@ -995,6 +995,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_ARM_SVE 170
 #define KVM_CAP_ARM_PTRAUTH_ADDRESS 171
 #define KVM_CAP_ARM_PTRAUTH_GENERIC 172
+#define KVM_CAP_PMU_EVENT_FILTER 173
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1329,6 +1330,8 @@ struct kvm_s390_ucas_mapping {
 #define KVM_PPC_GET_RMMU_INFO    _IOW(KVMIO,  0xb0, struct kvm_ppc_rmmu_info)
 /* Available with KVM_CAP_PPC_GET_CPU_CHAR */
 #define KVM_PPC_GET_CPU_CHAR     _IOR(KVMIO,  0xb1, struct kvm_ppc_cpu_char)
+/* Available with KVM_CAP_PMU_EVENT_FILTER */
+#define KVM_SET_PMU_EVENT_FILTER  _IOW(KVMIO,  0xb2, struct kvm_pmu_event_filter)
 
 /* ioctl for vm fd */
 #define KVM_CREATE_DEVICE        _IOWR(KVMIO,  0xe0, struct kvm_create_device)
index ed4ee170bee2a5e7dc0c3cde35e1c5549799c427..b3105ac1381a8d8651f7f2fbe6b700bd745ead01 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef _UAPI_LINUX_SCHED_H
 #define _UAPI_LINUX_SCHED_H
 
+#include <linux/types.h>
+
 /*
  * cloning flags:
  */
 #define CLONE_NEWNET           0x40000000      /* New network namespace */
 #define CLONE_IO               0x80000000      /* Clone io context */
 
+/*
+ * Arguments for the clone3 syscall
+ */
+struct clone_args {
+       __aligned_u64 flags;
+       __aligned_u64 pidfd;
+       __aligned_u64 child_tid;
+       __aligned_u64 parent_tid;
+       __aligned_u64 exit_signal;
+       __aligned_u64 stack;
+       __aligned_u64 stack_size;
+       __aligned_u64 tls;
+};
+
 /*
  * Scheduling policies
  */
 #define SCHED_FLAG_RESET_ON_FORK       0x01
 #define SCHED_FLAG_RECLAIM             0x02
 #define SCHED_FLAG_DL_OVERRUN          0x04
+#define SCHED_FLAG_KEEP_POLICY         0x08
+#define SCHED_FLAG_KEEP_PARAMS         0x10
+#define SCHED_FLAG_UTIL_CLAMP_MIN      0x20
+#define SCHED_FLAG_UTIL_CLAMP_MAX      0x40
+
+#define SCHED_FLAG_KEEP_ALL    (SCHED_FLAG_KEEP_POLICY | \
+                                SCHED_FLAG_KEEP_PARAMS)
+
+#define SCHED_FLAG_UTIL_CLAMP  (SCHED_FLAG_UTIL_CLAMP_MIN | \
+                                SCHED_FLAG_UTIL_CLAMP_MAX)
 
 #define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK       | \
                         SCHED_FLAG_RECLAIM             | \
-                        SCHED_FLAG_DL_OVERRUN)
+                        SCHED_FLAG_DL_OVERRUN          | \
+                        SCHED_FLAG_KEEP_ALL            | \
+                        SCHED_FLAG_UTIL_CLAMP)
 
 #endif /* _UAPI_LINUX_SCHED_H */
index 964e87217be4eaf9b73354fe31cd259915e1d29b..78efe870c2b7c85c9814ac90c04e137618bfd866 100644 (file)
@@ -76,6 +76,26 @@ struct usbdevfs_connectinfo {
        unsigned char slow;
 };
 
+struct usbdevfs_conninfo_ex {
+       __u32 size;             /* Size of the structure from the kernel's */
+                               /* point of view. Can be used by userspace */
+                               /* to determine how much data can be       */
+                               /* used/trusted.                           */
+       __u32 busnum;           /* USB bus number, as enumerated by the    */
+                               /* kernel, the device is connected to.     */
+       __u32 devnum;           /* Device address on the bus.              */
+       __u32 speed;            /* USB_SPEED_* constants from ch9.h        */
+       __u8 num_ports;         /* Number of ports the device is connected */
+                               /* to on the way to the root hub. It may   */
+                               /* be bigger than size of 'ports' array so */
+                               /* userspace can detect overflows.         */
+       __u8 ports[7];          /* List of ports on the way from the root  */
+                               /* hub to the device. Current limit in     */
+                               /* USB specification is 7 tiers (root hub, */
+                               /* 5 intermediate hubs, device), which     */
+                               /* gives at most 6 port entries.           */
+};
+
 #define USBDEVFS_URB_SHORT_NOT_OK      0x01
 #define USBDEVFS_URB_ISO_ASAP          0x02
 #define USBDEVFS_URB_BULK_CONTINUATION 0x04
@@ -137,6 +157,7 @@ struct usbdevfs_hub_portinfo {
 #define USBDEVFS_CAP_REAP_AFTER_DISCONNECT     0x10
 #define USBDEVFS_CAP_MMAP                      0x20
 #define USBDEVFS_CAP_DROP_PRIVILEGES           0x40
+#define USBDEVFS_CAP_CONNINFO_EX               0x80
 
 /* USBDEVFS_DISCONNECT_CLAIM flags & struct */
 
@@ -197,5 +218,10 @@ struct usbdevfs_streams {
 #define USBDEVFS_FREE_STREAMS      _IOR('U', 29, struct usbdevfs_streams)
 #define USBDEVFS_DROP_PRIVILEGES   _IOW('U', 30, __u32)
 #define USBDEVFS_GET_SPEED         _IO('U', 31)
+/*
+ * Returns struct usbdevfs_conninfo_ex; length is variable to allow
+ * extending size of the data returned.
+ */
+#define USBDEVFS_CONNINFO_EX(len)  _IOC(_IOC_READ, 'U', 32, len)
 
 #endif /* _UAPI_LINUX_USBDEVICE_FS_H */
index 03748a7421466c66ad9649561eda2cde29dc9f70..bae8879cdf58ae659ae8bfa71da0cf3f3877b294 100644 (file)
 
 #include <stdbool.h>
 #include <stddef.h>
+#ifdef __GLIBC__
+#include <bits/wordsize.h>
+#else
+#include <bits/reg.h>
+#endif
 #include "libbpf_internal.h"
 
 static inline size_t hash_bits(size_t h, int bits)
index 5f54feb199777e7855b6fa571447d453d421b0cf..d030c87ed9f57d0589faa99615b59f49761da895 100644 (file)
@@ -126,7 +126,7 @@ vendor,family,model,stepping. For example: GenuineIntel,6,69,1
 
        HEADER_TOTAL_MEM = 10,
 
-An uint64_t with the total memory in bytes.
+An uint64_t with the total memory in kilobytes.
 
        HEADER_CMDLINE = 11,
 
index b4e6f9e6204aa874f03337adc47f9bba0297f707..c29976eca4a8a86bdd2fc062b3e521afc0338301 100644 (file)
 431    common  fsconfig                __x64_sys_fsconfig
 432    common  fsmount                 __x64_sys_fsmount
 433    common  fspick                  __x64_sys_fspick
+434    common  pidfd_open              __x64_sys_pidfd_open
+435    common  clone3                  __x64_sys_clone3/ptregs
 
 #
 # x32-specific system call numbers start at 512 to avoid cache impact
index 930b80f422e83b933d5688bab64862c45cdee987..aa597ae53747074a4f831acde0a0bc5b07c0cc46 100755 (executable)
@@ -3,10 +3,13 @@
 
 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
 
+# also as:
+# #define USBDEVFS_CONNINFO_EX(len)  _IOC(_IOC_READ, 'U', 32, len)
+
 printf "static const char *usbdevfs_ioctl_cmds[] = {\n"
-regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)[[:space:]]+_IO[WR]{0,2}\([[:space:]]*'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*"
-egrep $regex ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \
-       sed -r "s/$regex/\2 \1/g"       | \
+regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)(\(\w+\))?[[:space:]]+_IO[CWR]{0,2}\([[:space:]]*(_IOC_\w+,[[:space:]]*)?'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*"
+egrep "$regex" ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \
+       sed -r "s/$regex/\4 \1/g"       | \
        sort | xargs printf "\t[%s] = \"%s\",\n"
 printf "};\n\n"
 printf "#if 0\n"
index 20111f8da5cb10f5acdbb00af4c11be2f6342008..1903d7ec9797609ee0a56a7f5a0a5505790992b1 100644 (file)
@@ -3559,6 +3559,13 @@ int perf_session__read_header(struct perf_session *session)
                           data->file.path);
        }
 
+       if (f_header.attr_size == 0) {
+               pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
+                      "Was the 'perf record' command properly terminated?\n",
+                      data->file.path);
+               return -EINVAL;
+       }
+
        nr_attrs = f_header.attrs.size / f_header.attr_size;
        lseek(fd, f_header.attrs.offset, SEEK_SET);
 
@@ -3639,7 +3646,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
        size += sizeof(struct perf_event_header);
        size += ids * sizeof(u64);
 
-       ev = malloc(size);
+       ev = zalloc(size);
 
        if (ev == NULL)
                return -ENOMEM;
index 4c223266299aa7c90c1288bcbed7fd891f802f3d..bdb69599c4bdc2526943cf9a690e1ef0697872dd 100644 (file)
@@ -191,8 +191,7 @@ int cg_find_unified_root(char *root, size_t len)
                strtok(NULL, delim);
                strtok(NULL, delim);
 
-               if (strcmp(fs, "cgroup") == 0 &&
-                   strcmp(type, "cgroup2") == 0) {
+               if (strcmp(type, "cgroup2") == 0) {
                        strncpy(root, mount, len);
                        return 0;
                }
index 71231ad2dbfb5d2d4c3082a6669578dab59fcaef..47315fe48d5af51aa0593b0e0546dfff142976ba 100755 (executable)
@@ -262,7 +262,7 @@ test_mc_aware()
 
        stop_traffic
 
-       log_test "UC performace under MC overload"
+       log_test "UC performance under MC overload"
 
        echo "UC-only throughput  $(humanize $ucth1)"
        echo "UC+MC throughput    $(humanize $ucth2)"
@@ -316,7 +316,7 @@ test_uc_aware()
 
        stop_traffic
 
-       log_test "MC performace under UC overload"
+       log_test "MC performance under UC overload"
        echo "    ingress UC throughput $(humanize ${uc_ir})"
        echo "    egress UC throughput  $(humanize ${uc_er})"
        echo "    sent $attempts BC ARPs, got $passes responses"
index 0a76314b441494fcc38844c322fcaf1114912021..8b944cf042f6c8a98c8a303e230c47b1f57c75d4 100755 (executable)
@@ -28,7 +28,7 @@
 # override by exporting to your environment prior running this script.
 # For instance this script assumes you do not have xfs loaded upon boot.
 # If this is false, export DEFAULT_KMOD_FS="ext4" prior to running this
-# script if the filesyste module you don't have loaded upon bootup
+# script if the filesystem module you don't have loaded upon bootup
 # is ext4 instead. Refer to allow_user_defaults() for a list of user
 # override variables possible.
 #
@@ -263,7 +263,7 @@ config_get_test_result()
 config_reset()
 {
        if ! echo -n "1" >"$DIR"/reset; then
-               echo "$0: reset shuld have worked" >&2
+               echo "$0: reset should have worked" >&2
                exit 1
        fi
 }
@@ -488,7 +488,7 @@ usage()
        echo Example uses:
        echo
        echo "${TEST_NAME}.sh           -- executes all tests"
-       echo "${TEST_NAME}.sh -t 0008   -- Executes test ID 0008 number of times is recomended"
+       echo "${TEST_NAME}.sh -t 0008   -- Executes test ID 0008 number of times is recommended"
        echo "${TEST_NAME}.sh -w 0008   -- Watch test ID 0008 run until an error occurs"
        echo "${TEST_NAME}.sh -s 0008   -- Run test ID 0008 once"
        echo "${TEST_NAME}.sh -c 0008 3 -- Run test ID 0008 three times"
index 30195449c63c92c92ffaa8de326d44ec5a152e49..edcfeace465509a3f17c6d7eb381b5da97ffeebc 100644 (file)
@@ -13,6 +13,14 @@ function log() {
        echo "$1" > /dev/kmsg
 }
 
+# skip(msg) - testing can't proceed
+#      msg - explanation
+function skip() {
+       log "SKIP: $1"
+       echo "SKIP: $1" >&2
+       exit 4
+}
+
 # die(msg) - game over, man
 #      msg - dying words
 function die() {
@@ -43,6 +51,12 @@ function loop_until() {
        done
 }
 
+function assert_mod() {
+       local mod="$1"
+
+       modprobe --dry-run "$mod" &>/dev/null
+}
+
 function is_livepatch_mod() {
        local mod="$1"
 
@@ -75,6 +89,9 @@ function __load_mod() {
 function load_mod() {
        local mod="$1"; shift
 
+       assert_mod "$mod" ||
+               skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root"
+
        is_livepatch_mod "$mod" &&
                die "use load_lp() to load the livepatch module $mod"
 
@@ -88,6 +105,9 @@ function load_mod() {
 function load_lp_nowait() {
        local mod="$1"; shift
 
+       assert_mod "$mod" ||
+               skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root"
+
        is_livepatch_mod "$mod" ||
                die "module $mod is not a livepatch"
 
index 7eaa8a3de26277212aa8350bb90e9098c9118741..b632965e60eb0758a1197faedd44f0b3e7d5c829 100644 (file)
@@ -339,13 +339,9 @@ static int test_pidfd_send_signal_syscall_support(void)
 
        ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0);
        if (ret < 0) {
-               /*
-                * pidfd_send_signal() will currently return ENOSYS when
-                * CONFIG_PROC_FS is not set.
-                */
                if (errno == ENOSYS)
                        ksft_exit_skip(
-                               "%s test: pidfd_send_signal() syscall not supported (Ensure that CONFIG_PROC_FS=y is set)\n",
+                               "%s test: pidfd_send_signal() syscall not supported\n",
                                test_name);
 
                ksft_exit_fail_msg("%s test: Failed to send signal\n",
index 4602326b8f5b9dbe7bd2f9965088030acd5c871d..a4f4d4cf22c3b47c40acf7ae4c6d25d335e3b313 100644 (file)
@@ -451,7 +451,7 @@ static int test_vsys_x(void)
                printf("[OK]\tExecuting the vsyscall page failed: #PF(0x%lx)\n",
                       segv_err);
        } else {
-               printf("[FAILT]\tExecution failed with the wrong error: #PF(0x%lx)\n",
+               printf("[FAIL]\tExecution failed with the wrong error: #PF(0x%lx)\n",
                       segv_err);
                return 1;
        }