Merge branches 'for-4.20/upstream-fixes', 'for-4.21/core', 'for-4.21/hid-asus', ...
authorJiri Kosina <jkosina@suse.cz>
Thu, 3 Jan 2019 11:50:28 +0000 (12:50 +0100)
committerJiri Kosina <jkosina@suse.cz>
Thu, 3 Jan 2019 11:50:28 +0000 (12:50 +0100)
1850 files changed:
.mailmap
CREDITS
Documentation/ABI/testing/sysfs-class-led-trigger-pattern
Documentation/ABI/testing/sysfs-class-net-dsa
Documentation/admin-guide/cgroup-v2.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/pm/cpufreq.rst
Documentation/admin-guide/security-bugs.rst
Documentation/arm64/silicon-errata.txt
Documentation/core-api/xarray.rst
Documentation/cpu-freq/cpufreq-stats.txt
Documentation/crypto/asymmetric-keys.txt
Documentation/devicetree/bindings/arm/cpu-capacity.txt
Documentation/devicetree/bindings/arm/shmobile.txt
Documentation/devicetree/bindings/clock/clock-bindings.txt
Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt [deleted file]
Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt [new file with mode: 0644]
Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt [deleted file]
Documentation/devicetree/bindings/display/panel/simple-panel.txt
Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
Documentation/devicetree/bindings/i2c/i2c-omap.txt
Documentation/devicetree/bindings/input/input-reset.txt
Documentation/devicetree/bindings/media/rockchip-vpu.txt [deleted file]
Documentation/devicetree/bindings/net/can/holt_hi311x.txt
Documentation/devicetree/bindings/net/can/rcar_can.txt
Documentation/devicetree/bindings/net/dsa/dsa.txt
Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt
Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt
Documentation/devicetree/bindings/spi/spi-uniphier.txt
Documentation/devicetree/bindings/timer/csky,gx6605s-timer.txt [new file with mode: 0644]
Documentation/devicetree/bindings/timer/csky,mptimer.txt [new file with mode: 0644]
Documentation/filesystems/overlayfs.txt
Documentation/filesystems/porting
Documentation/filesystems/ubifs-authentication.md [new file with mode: 0644]
Documentation/filesystems/ubifs.txt
Documentation/filesystems/vfs.txt
Documentation/i2c/busses/i2c-nvidia-gpu [new file with mode: 0644]
Documentation/input/event-codes.rst
Documentation/kbuild/makefiles.txt
Documentation/media/uapi/mediactl/media-ioc-request-alloc.rst
Documentation/media/uapi/mediactl/media-request-ioc-queue.rst
Documentation/media/uapi/mediactl/media-request-ioc-reinit.rst
Documentation/media/uapi/mediactl/request-api.rst
Documentation/media/uapi/mediactl/request-func-close.rst
Documentation/media/uapi/mediactl/request-func-ioctl.rst
Documentation/media/uapi/mediactl/request-func-poll.rst
Documentation/media/uapi/v4l/dev-meta.rst
Documentation/media/uapi/v4l/vidioc-g-fmt.rst
Documentation/networking/ice.rst
Documentation/networking/ip-sysctl.txt
Documentation/networking/rxrpc.txt
Documentation/process/index.rst
Documentation/process/programming-language.rst [new file with mode: 0644]
Documentation/security/keys/core.rst
Documentation/security/self-protection.rst
Documentation/sysctl/kernel.txt
Documentation/userspace-api/spec_ctrl.rst
Documentation/x86/boot.txt
Documentation/x86/x86_64/mm.txt
Documentation/x86/zero-page.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/asm/termios.h
arch/alpha/include/uapi/asm/ioctls.h
arch/alpha/include/uapi/asm/termbits.h
arch/arc/Kconfig
arch/arc/Makefile
arch/arc/boot/dts/hsdk.dts
arch/arc/configs/axs101_defconfig
arch/arc/configs/axs103_defconfig
arch/arc/configs/axs103_smp_defconfig
arch/arc/configs/hsdk_defconfig
arch/arc/configs/nps_defconfig
arch/arc/configs/nsim_700_defconfig
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/configs/tb10x_defconfig
arch/arc/configs/vdk_hs38_defconfig
arch/arc/configs/vdk_hs38_smp_defconfig
arch/arc/include/asm/cache.h
arch/arc/include/asm/io.h
arch/arc/kernel/setup.c
arch/arc/mm/cache.c
arch/arc/mm/fault.c
arch/arm/boot/dts/am3517-evm.dts
arch/arm/boot/dts/am3517-som.dtsi
arch/arm/boot/dts/arm-realview-pb1176.dts
arch/arm/boot/dts/arm-realview-pb11mp.dts
arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
arch/arm/boot/dts/bcm2837-rpi-3-b.dts
arch/arm/boot/dts/imx51-zii-rdu1.dts
arch/arm/boot/dts/imx53-ppd.dts
arch/arm/boot/dts/imx6sll.dtsi
arch/arm/boot/dts/imx6sx-sdb.dtsi
arch/arm/boot/dts/imx7d-nitrogen7.dts
arch/arm/boot/dts/imx7d-pico.dtsi
arch/arm/boot/dts/logicpd-som-lv.dtsi
arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts
arch/arm/boot/dts/rk3288-veyron.dtsi
arch/arm/boot/dts/sama5d2.dtsi
arch/arm/boot/dts/stm32mp157c.dtsi
arch/arm/boot/dts/sun8i-a83t-bananapi-m3.dts
arch/arm/boot/dts/vf610m4-colibri.dts
arch/arm/include/asm/cputype.h
arch/arm/include/asm/pgtable-2level.h
arch/arm/include/asm/proc-fns.h
arch/arm/kernel/bugs.c
arch/arm/kernel/ftrace.c
arch/arm/kernel/head-common.S
arch/arm/kernel/setup.c
arch/arm/kernel/smp.c
arch/arm/mach-davinci/da830.c
arch/arm/mach-davinci/da850.c
arch/arm/mach-davinci/devices-da8xx.c
arch/arm/mach-davinci/dm355.c
arch/arm/mach-davinci/dm365.c
arch/arm/mach-davinci/dm644x.c
arch/arm/mach-davinci/dm646x.c
arch/arm/mach-imx/cpuidle-imx6sx.c
arch/arm/mach-mmp/cputype.h
arch/arm/mach-omap1/board-ams-delta.c
arch/arm/mach-omap2/display.c
arch/arm/mach-omap2/prm44xx.c
arch/arm/mm/cache-v7.S
arch/arm/mm/cache-v7m.S
arch/arm/mm/dma-mapping.c
arch/arm/mm/proc-macros.S
arch/arm/mm/proc-v7-bugs.c
arch/arm/mm/proc-v7.S
arch/arm/plat-orion/mpp.c
arch/arm/probes/kprobes/opt-arm.c
arch/arm/vfp/vfpmodule.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
arch/arm64/boot/dts/marvell/armada-ap806-quad.dtsi
arch/arm64/boot/dts/marvell/armada-ap806.dtsi
arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
arch/arm64/boot/dts/mediatek/mt7622.dtsi
arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi
arch/arm64/boot/dts/qcom/sdm845-mtp.dts
arch/arm64/boot/dts/renesas/r8a7795.dtsi
arch/arm64/boot/dts/renesas/r8a77980-condor.dts
arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts
arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi
arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi
arch/arm64/configs/defconfig
arch/arm64/include/asm/ftrace.h
arch/arm64/include/asm/percpu.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/tlbflush.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/crash_dump.c
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/probes/kprobes.c
arch/arm64/kernel/process.c
arch/arm64/kernel/setup.c
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/init.c
arch/arm64/mm/mmu.c
arch/arm64/net/bpf_jit_comp.c
arch/csky/Kconfig.debug
arch/csky/Makefile
arch/csky/boot/dts/Makefile
arch/csky/include/asm/mmu_context.h
arch/ia64/include/asm/numa.h
arch/ia64/kernel/acpi.c
arch/ia64/mm/numa.c
arch/m68k/include/asm/pgtable_mm.h
arch/microblaze/include/asm/pgtable.h
arch/microblaze/kernel/ftrace.c
arch/mips/Makefile
arch/mips/cavium-octeon/executive/cvmx-helper.c
arch/mips/configs/cavium_octeon_defconfig
arch/mips/include/asm/syscall.h
arch/mips/kernel/ftrace.c
arch/mips/kernel/setup.c
arch/mips/kernel/traps.c
arch/mips/loongson64/loongson-3/numa.c
arch/mips/mm/dma-noncoherent.c
arch/mips/ralink/mt7620.c
arch/mips/sgi-ip27/ip27-memory.c
arch/mips/vdso/Makefile
arch/nds32/include/asm/pgtable.h
arch/nds32/kernel/ftrace.c
arch/parisc/Makefile
arch/parisc/include/asm/pgtable.h
arch/parisc/include/asm/spinlock.h
arch/parisc/kernel/ftrace.c
arch/parisc/kernel/syscall.S
arch/powerpc/Kconfig
arch/powerpc/Makefile
arch/powerpc/boot/dts/fsl/t2080rdb.dts
arch/powerpc/boot/dts/mpc885ads.dts
arch/powerpc/include/asm/code-patching.h
arch/powerpc/include/asm/io.h
arch/powerpc/include/asm/mmu-8xx.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/include/asm/ptrace.h
arch/powerpc/include/asm/rtas.h
arch/powerpc/kernel/head_8xx.S
arch/powerpc/kernel/process.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/trace/ftrace.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/emulate.c
arch/powerpc/kvm/trace.h
arch/powerpc/kvm/trace_booke.h
arch/powerpc/kvm/trace_hv.h
arch/powerpc/kvm/trace_pr.h
arch/powerpc/mm/8xx_mmu.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/slb.c
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/perf/8xx-pmu.c
arch/powerpc/platforms/40x/Kconfig
arch/powerpc/platforms/44x/Kconfig
arch/powerpc/platforms/powernv/npu-dma.c
arch/powerpc/platforms/pseries/lparcfg.c
arch/powerpc/xmon/Makefile
arch/riscv/Makefile
arch/riscv/boot/.gitignore [new file with mode: 0644]
arch/riscv/boot/Makefile [new file with mode: 0644]
arch/riscv/boot/install.sh [new file with mode: 0644]
arch/riscv/configs/defconfig
arch/riscv/include/asm/module.h
arch/riscv/include/asm/ptrace.h
arch/riscv/include/asm/uaccess.h
arch/riscv/include/asm/unistd.h
arch/riscv/include/uapi/asm/syscalls.h [deleted file]
arch/riscv/include/uapi/asm/unistd.h [new file with mode: 0644]
arch/riscv/kernel/cpu.c
arch/riscv/kernel/ftrace.c
arch/riscv/kernel/head.S
arch/riscv/kernel/module.c
arch/riscv/kernel/vmlinux.lds.S
arch/riscv/lib/Makefile
arch/s390/Makefile
arch/s390/boot/compressed/Makefile
arch/s390/configs/debug_defconfig
arch/s390/configs/performance_defconfig
arch/s390/defconfig
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/pgalloc.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/thread_info.h
arch/s390/include/asm/tlb.h
arch/s390/kernel/entry.S
arch/s390/kernel/ftrace.c
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/vdso32/Makefile
arch/s390/kernel/vdso64/Makefile
arch/s390/kernel/vmlinux.lds.S
arch/s390/mm/pgalloc.c
arch/s390/numa/numa.c
arch/sh/kernel/ftrace.c
arch/sparc/kernel/ftrace.c
arch/sparc/kernel/iommu.c
arch/sparc/kernel/perf_event.c
arch/sparc/kernel/signal32.c
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/signal_64.c
arch/sparc/kernel/systbls_64.S
arch/sparc/net/bpf_jit_comp_64.c
arch/um/drivers/ubd_kern.c
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/compressed/eboot.c
arch/x86/boot/cpucheck.c
arch/x86/boot/early_serial_console.c
arch/x86/boot/edd.c
arch/x86/boot/header.S
arch/x86/boot/main.c
arch/x86/boot/memory.c
arch/x86/boot/regs.c
arch/x86/boot/video-vesa.c
arch/x86/boot/video.c
arch/x86/entry/calling.h
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/entry_64_compat.S
arch/x86/entry/vdso/Makefile
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/uncore.h
arch/x86/events/intel/uncore_snb.c
arch/x86/events/perf_event.h
arch/x86/include/asm/bootparam_utils.h
arch/x86/include/asm/compat.h
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/fpu/internal.h
arch/x86/include/asm/ftrace.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/nospec-branch.h
arch/x86/include/asm/page_64_types.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/pgtable_64_types.h
arch/x86/include/asm/qspinlock.h
arch/x86/include/asm/spec-ctrl.h
arch/x86/include/asm/switch_to.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/x86_init.h
arch/x86/include/asm/xen/page.h
arch/x86/include/uapi/asm/bootparam.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/mtrr/if.c
arch/x86/kernel/cpu/vmware.c
arch/x86/kernel/early_printk.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/head32.c
arch/x86/kernel/head64.c
arch/x86/kernel/kprobes/opt.c
arch/x86/kernel/ldt.c
arch/x86/kernel/msr.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/paravirt_patch_32.c
arch/x86/kernel/paravirt_patch_64.c
arch/x86/kernel/process.c
arch/x86/kernel/process.h [new file with mode: 0644]
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/setup.c
arch/x86/kernel/sys_x86_64.c
arch/x86/kernel/traps.c
arch/x86/kernel/vsmp_64.c
arch/x86/kvm/emulate.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/hugetlbpage.c
arch/x86/mm/mmap.c
arch/x86/mm/numa_emulation.c
arch/x86/mm/pageattr.c
arch/x86/mm/tlb.c
arch/x86/platform/efi/early_printk.c
arch/x86/tools/relocs.c
arch/x86/um/asm/elf.h
arch/x86/xen/enlighten.c
arch/x86/xen/mmu_pv.c
arch/x86/xen/multicalls.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/x86/xen/spinlock.c
arch/xtensa/Kconfig
arch/xtensa/boot/Makefile
arch/xtensa/include/asm/processor.h
arch/xtensa/kernel/asm-offsets.c
arch/xtensa/kernel/head.S
arch/xtensa/kernel/process.c
arch/xtensa/kernel/ptrace.c
arch/xtensa/kernel/vmlinux.lds.S
arch/xtensa/mm/init.c
block/bfq-cgroup.c
block/bfq-iosched.c
block/bfq-iosched.h
block/bfq-wf2q.c
block/bio.c
block/blk-cgroup.c
block/blk-core.c
block/blk-iolatency.c
block/blk-lib.c
block/blk-merge.c
block/blk-mq.c
block/blk-sysfs.c
block/blk-throttle.c
block/blk.h
block/bounce.c
block/cfq-iosched.c
crypto/Kconfig
crypto/asymmetric_keys/Kconfig
crypto/asymmetric_keys/Makefile
crypto/asymmetric_keys/asym_tpm.c [new file with mode: 0644]
crypto/asymmetric_keys/asymmetric_keys.h
crypto/asymmetric_keys/asymmetric_type.c
crypto/asymmetric_keys/pkcs7_parser.c
crypto/asymmetric_keys/pkcs8.asn1 [new file with mode: 0644]
crypto/asymmetric_keys/pkcs8_parser.c [new file with mode: 0644]
crypto/asymmetric_keys/public_key.c
crypto/asymmetric_keys/signature.c
crypto/asymmetric_keys/tpm.asn1 [new file with mode: 0644]
crypto/asymmetric_keys/tpm_parser.c [new file with mode: 0644]
crypto/asymmetric_keys/x509_cert_parser.c
crypto/cbc.c
crypto/cfb.c
crypto/crypto_user_base.c
crypto/crypto_user_stat.c
crypto/pcbc.c
crypto/rsa-pkcs1pad.c
crypto/simd.c
drivers/acpi/Kconfig
drivers/acpi/acpi_platform.c
drivers/acpi/acpica/exserial.c
drivers/acpi/arm64/iort.c
drivers/acpi/device_pm.c
drivers/acpi/nfit/core.c
drivers/acpi/nfit/mce.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/android/binder_alloc.h
drivers/ata/libata-core.c
drivers/ata/sata_rcar.c
drivers/atm/firestream.c
drivers/auxdisplay/panel.c
drivers/base/devres.c
drivers/block/brd.c
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_receiver.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/nbd.c
drivers/block/xen-blkfront.c
drivers/clk/clk-fixed-factor.c
drivers/clk/meson/axg.c
drivers/clk/meson/gxbb.c
drivers/clk/mmp/clk.c
drivers/clk/mvebu/cp110-system-controller.c
drivers/clk/qcom/common.c
drivers/clk/qcom/gcc-qcs404.c
drivers/clk/zynqmp/clkc.c
drivers/clocksource/Kconfig
drivers/clocksource/Makefile
drivers/clocksource/i8253.c
drivers/clocksource/timer-gx6605s.c [new file with mode: 0644]
drivers/clocksource/timer-mp-csky.c [new file with mode: 0644]
drivers/cpufreq/imx6q-cpufreq.c
drivers/cpufreq/ti-cpufreq.c
drivers/cpuidle/cpuidle-arm.c
drivers/crypto/hisilicon/sec/sec_algs.c
drivers/dma-buf/udmabuf.c
drivers/dma/at_hdmac.c
drivers/dma/dw/core.c
drivers/dma/imx-sdma.c
drivers/dma/ti/cppi41.c
drivers/edac/Kconfig
drivers/edac/skx_edac.c
drivers/firmware/efi/arm-init.c
drivers/firmware/efi/arm-runtime.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/efivars.c
drivers/firmware/efi/libstub/arm-stub.c
drivers/firmware/efi/libstub/fdt.c
drivers/firmware/efi/memmap.c
drivers/firmware/efi/runtime-wrappers.c
drivers/fsi/Kconfig
drivers/fsi/fsi-sbefifo.c
drivers/fsi/fsi-scom.c
drivers/gnss/serial.c
drivers/gnss/sirf.c
drivers/gpio/gpio-davinci.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpio-pxa.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
drivers/gpu/drm/amd/display/dc/os_types.h
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
drivers/gpu/drm/ast/ast_drv.c
drivers/gpu/drm/ast/ast_fb.c
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/bridge/ti-sn65dsi86.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_auth.c
drivers/gpu/drm/drm_connector.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fourcc.c
drivers/gpu/drm/drm_internal.h
drivers/gpu/drm/drm_lease.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/etnaviv/etnaviv_sched.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_fbdev.c
drivers/gpu/drm/i915/gvt/aperture_gm.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_gpu_error.h
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_cdclk.c
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_lpe_audio.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/selftests/huge_pages.c
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
drivers/gpu/drm/meson/meson_crtc.c
drivers/gpu/drm/meson/meson_dw_hdmi.c
drivers/gpu/drm/meson/meson_venc.c
drivers/gpu/drm/meson/meson_viu.c
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/hdmi/hdmi.h
drivers/gpu/drm/msm/hdmi/hdmi_connector.c
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_debugfs.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/msm/msm_rd.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/omapdrm/displays/panel-dpi.c
drivers/gpu/drm/omapdrm/dss/dsi.c
drivers/gpu/drm/omapdrm/dss/dss.c
drivers/gpu/drm/omapdrm/dss/hdmi4.c
drivers/gpu/drm/omapdrm/dss/hdmi5.c
drivers/gpu/drm/omapdrm/dss/omapdss.h
drivers/gpu/drm/omapdrm/dss/venc.c
drivers/gpu/drm/omapdrm/omap_crtc.c
drivers/gpu/drm/omapdrm/omap_encoder.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/rcar-du/rcar_du_group.c
drivers/gpu/drm/sun4i/sun4i_lvds.c
drivers/gpu/drm/sun4i/sun4i_rgb.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/vc4/vc4_plane.c
drivers/gpu/vga/vga_switcheroo.c
drivers/hid/hid-asus.c
drivers/hid/hid-core.c
drivers/hid/hid-cougar.c
drivers/hid/hid-debug.c
drivers/hid/hid-hyperv.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-sensor-custom.c
drivers/hid/hid-sensor-hub.c
drivers/hid/hidraw.c
drivers/hid/intel-ish-hid/ipc/pci-ish.c
drivers/hv/channel.c
drivers/hv/channel_mgmt.c
drivers/hv/connection.c
drivers/hv/hv_kvp.c
drivers/hv/hyperv_vmbus.h
drivers/hwmon/hwmon.c
drivers/hwmon/ibmpowernv.c
drivers/hwmon/ina2xx.c
drivers/hwmon/mlxreg-fan.c
drivers/hwmon/raspberrypi-hwmon.c
drivers/hwmon/w83795.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/Makefile
drivers/i2c/busses/i2c-axxia.c
drivers/i2c/busses/i2c-nvidia-gpu.c [new file with mode: 0644]
drivers/i2c/busses/i2c-qcom-geni.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-scmi.c
drivers/i2c/busses/i2c-uniphier-f.c
drivers/i2c/busses/i2c-uniphier.c
drivers/i2c/i2c-core-base.c
drivers/ide/ide-proc.c
drivers/ide/pmac.c
drivers/iio/accel/hid-sensor-accel-3d.c
drivers/iio/gyro/hid-sensor-gyro-3d.c
drivers/iio/humidity/hid-sensor-humidity.c
drivers/iio/light/hid-sensor-als.c
drivers/iio/light/hid-sensor-prox.c
drivers/iio/magnetometer/hid-sensor-magn-3d.c
drivers/iio/magnetometer/st_magn_buffer.c
drivers/iio/orientation/hid-sensor-incl-3d.c
drivers/iio/pressure/hid-sensor-press.c
drivers/iio/temperature/hid-sensor-temperature.c
drivers/infiniband/core/roce_gid_mgmt.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/sw/rdmavt/ah.c
drivers/infiniband/sw/rdmavt/ah.h
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/atkbd.c
drivers/input/keyboard/cros_ec_keyb.c
drivers/input/keyboard/matrix_keypad.c
drivers/input/keyboard/omap4-keypad.c
drivers/input/mouse/elan_i2c_core.c
drivers/input/mouse/synaptics.c
drivers/input/serio/hyperv-keyboard.c
drivers/input/touchscreen/migor_ts.c
drivers/input/touchscreen/st1232.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel-svm.c
drivers/iommu/ipmmu-vmsa.c
drivers/irqchip/irq-mvebu-sei.c
drivers/isdn/mISDN/l1oip_core.c
drivers/leds/trigger/ledtrig-pattern.c
drivers/md/raid0.c
drivers/media/cec/cec-adap.c
drivers/media/dvb-frontends/dvb-pll.c
drivers/media/i2c/tc358743.c
drivers/media/media-request.c
drivers/media/pci/intel/ipu3/ipu3-cio2.c
drivers/media/platform/omap3isp/isp.c
drivers/media/platform/vicodec/vicodec-core.c
drivers/media/platform/vim2m.c
drivers/media/usb/gspca/gspca.c
drivers/media/v4l2-core/v4l2-ctrls.c
drivers/media/v4l2-core/v4l2-event.c
drivers/media/v4l2-core/v4l2-mem2mem.c
drivers/mfd/cros_ec_dev.c
drivers/misc/atmel-ssc.c
drivers/misc/lkdtm/Makefile
drivers/misc/lkdtm/core.c
drivers/misc/lkdtm/lkdtm.h
drivers/misc/lkdtm/stackleak.c [new file with mode: 0644]
drivers/misc/mic/scif/scif_rma.c
drivers/misc/sgi-gru/grukdump.c
drivers/misc/vmw_vmci/vmci_queue_pair.c
drivers/mmc/host/sdhci-pci-core.c
drivers/mtd/devices/Kconfig
drivers/mtd/maps/sa1100-flash.c
drivers/mtd/nand/bbt.c
drivers/mtd/nand/raw/atmel/nand-controller.c
drivers/mtd/nand/raw/nand_base.c
drivers/mtd/nand/raw/qcom_nandc.c
drivers/mtd/spi-nor/cadence-quadspi.c
drivers/mtd/spi-nor/spi-nor.c
drivers/mtd/ubi/attach.c
drivers/mtd/ubi/build.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c
drivers/net/can/dev.c
drivers/net/can/flexcan.c
drivers/net/can/rcar/rcar_can.c
drivers/net/can/rx-offload.c
drivers/net/can/spi/hi311x.c
drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
drivers/net/can/usb/ucan.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/mv88e6060.c
drivers/net/dsa/mv88e6xxx/global1.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amazon/ena/ena_netdev.h
drivers/net/ethernet/amd/sunlance.c
drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
drivers/net/ethernet/aquantia/atlantic/aq_hw.h
drivers/net/ethernet/aquantia/atlantic/aq_main.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.h
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
drivers/net/ethernet/atheros/alx/alx.h
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
drivers/net/ethernet/cavium/thunder/nic_main.c
drivers/net/ethernet/cavium/thunder/nicvf_main.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/chelsio/Kconfig
drivers/net/ethernet/chelsio/cxgb4/Makefile
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/cortina/gemini.c
drivers/net/ethernet/faraday/ftmac100.c
drivers/net/ethernet/freescale/fman/fman.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
drivers/net/ethernet/ibm/emac/emac.h
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/intel/Kconfig
drivers/net/ethernet/intel/fm10k/fm10k_iov.c
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/fm10k/fm10k_pci.c
drivers/net/ethernet/intel/fm10k/fm10k_type.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_hw_autogen.h
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_switch.c
drivers/net/ethernet/intel/ice/ice_switch.h
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx.h
drivers/net/ethernet/intel/ice/ice_type.h
drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
drivers/net/ethernet/intel/igb/e1000_i210.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/ixgbe/Makefile
drivers/net/ethernet/intel/ixgbe/ixgbe.h
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
drivers/net/ethernet/intel/ixgbevf/Makefile
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/lantiq_xrx200.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2/mvpp2.h
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/mellanox/mlx4/Kconfig
drivers/net/ethernet/mellanox/mlx4/alloc.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/microchip/lan743x_main.c
drivers/net/ethernet/microchip/lan743x_main.h
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_debug.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_fcoe.c
drivers/net/ethernet/qlogic/qed/qed_int.c
drivers/net/ethernet/qlogic/qed/qed_iscsi.c
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_mcp.c
drivers/net/ethernet/qlogic/qed/qed_rdma.c
drivers/net/ethernet/qlogic/qed/qed_rdma.h
drivers/net/ethernet/qlogic/qed/qed_roce.c
drivers/net/ethernet/qlogic/qed/qed_sp.h
drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
drivers/net/ethernet/qlogic/qed/qed_spq.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/socionext/sni_ave.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/descs_com.h
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/via/via-velocity.c
drivers/net/fddi/defza.c
drivers/net/fddi/defza.h
drivers/net/macvlan.c
drivers/net/ntb_netdev.c
drivers/net/phy/broadcom.c
drivers/net/phy/mdio-gpio.c
drivers/net/phy/mscc.c
drivers/net/phy/phy_device.c
drivers/net/phy/realtek.c
drivers/net/phy/sfp-bus.c
drivers/net/rionet.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/ipheth.c
drivers/net/usb/smsc95xx.c
drivers/net/virtio_net.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.h
drivers/net/wireless/intel/iwlwifi/fw/runtime.h
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mediatek/mt76/Kconfig
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt76x02.h
drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
drivers/net/wireless/ti/wlcore/sdio.c
drivers/ntb/hw/idt/Kconfig
drivers/ntb/hw/idt/ntb_hw_idt.c
drivers/ntb/hw/idt/ntb_hw_idt.h
drivers/ntb/hw/intel/ntb_hw_gen1.c
drivers/ntb/ntb_transport.c
drivers/nvdimm/nd-core.h
drivers/nvdimm/pfn_devs.c
drivers/nvdimm/region_devs.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/core.c
drivers/nvme/target/io-cmd-file.c
drivers/nvme/target/rdma.c
drivers/nvmem/core.c
drivers/of/base.c
drivers/of/device.c
drivers/of/of_numa.c
drivers/opp/of.c
drivers/opp/ti-opp-supply.c
drivers/pci/controller/dwc/pci-imx6.c
drivers/pci/controller/dwc/pci-layerscape.c
drivers/pci/controller/dwc/pcie-designware-ep.c
drivers/pci/pci-acpi.c
drivers/pci/pci.c
drivers/pci/pcie/aspm.c
drivers/phy/qualcomm/phy-qcom-qusb2.c
drivers/phy/socionext/Kconfig
drivers/pinctrl/meson/pinctrl-meson-gxbb.c
drivers/pinctrl/meson/pinctrl-meson-gxl.c
drivers/pinctrl/meson/pinctrl-meson.c
drivers/pinctrl/meson/pinctrl-meson8.c
drivers/pinctrl/meson/pinctrl-meson8b.c
drivers/pwm/Kconfig
drivers/pwm/pwm-lpss-platform.c
drivers/pwm/pwm-lpss.c
drivers/pwm/pwm-lpss.h
drivers/pwm/pwm-rcar.c
drivers/pwm/pwm-renesas-tpu.c
drivers/pwm/pwm-tegra.c
drivers/pwm/sysfs.c
drivers/rtc/hctosys.c
drivers/rtc/rtc-cmos.c
drivers/rtc/rtc-hid-sensor-time.c
drivers/rtc/rtc-pcf2127.c
drivers/s390/cio/vfio_ccw_cp.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/zcrypt_cex2a.c
drivers/s390/crypto/zcrypt_cex2c.c
drivers/s390/crypto/zcrypt_cex4.c
drivers/s390/net/ism_drv.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.h
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/virtio/virtio_ccw.c
drivers/sbus/char/display7seg.c
drivers/sbus/char/envctrl.c
drivers/scsi/3w-9xxx.c
drivers/scsi/3w-sas.c
drivers/scsi/Kconfig
drivers/scsi/NCR5380.c
drivers/scsi/aha152x.c
drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
drivers/scsi/libiscsi.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/mvsas/mv_sas.c
drivers/scsi/myrb.c
drivers/scsi/myrs.c
drivers/scsi/pcmcia/aha152x_core.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mr.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_nx2.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi_lib.c
drivers/scsi/storvsc_drv.c
drivers/scsi/ufs/ufs-hisi.c
drivers/scsi/ufs/ufs_quirks.h
drivers/scsi/ufs/ufshcd.c
drivers/scsi/vmw_pvscsi.c
drivers/slimbus/qcom-ngd-ctrl.c
drivers/slimbus/slimbus.h
drivers/soc/ti/knav_qmss.h
drivers/soc/ti/knav_qmss_acc.c
drivers/soc/ti/knav_qmss_queue.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-omap2-mcspi.c
drivers/staging/comedi/comedi.h
drivers/staging/comedi/drivers/ni_mio_common.c
drivers/staging/media/davinci_vpfe/dm365_ipipeif.c
drivers/staging/media/sunxi/cedrus/TODO
drivers/staging/media/sunxi/cedrus/cedrus.c
drivers/staging/most/core.c
drivers/staging/mt7621-dma/mtk-hsdma.c
drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
drivers/staging/rtl8712/mlme_linux.c
drivers/staging/rtl8712/rtl871x_mlme.c
drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/target_core_alua.c
drivers/target/target_core_file.c
drivers/target/target_core_transport.c
drivers/thermal/armada_thermal.c
drivers/thermal/broadcom/bcm2835_thermal.c
drivers/thermal/broadcom/brcmstb_thermal.c
drivers/thunderbolt/switch.c
drivers/tty/serial/8250/8250_mtk.c
drivers/tty/serial/kgdboc.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/suncore.c
drivers/tty/tty_baudrate.c
drivers/tty/tty_io.c
drivers/tty/tty_port.c
drivers/tty/vt/vt.c
drivers/uio/uio.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/hub.c
drivers/usb/core/quirks.c
drivers/usb/core/usb.c
drivers/usb/dwc2/pci.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/u_ether.c
drivers/usb/gadget/udc/omap_udc.c
drivers/usb/host/hwa-hc.c
drivers/usb/host/xhci-histb.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mtk.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci-tegra.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/appledisplay.c
drivers/usb/serial/console.c
drivers/usb/storage/unusual_realtek.h
drivers/usb/typec/ucsi/Kconfig
drivers/usb/typec/ucsi/Makefile
drivers/usb/typec/ucsi/ucsi_ccg.c [new file with mode: 0644]
drivers/usb/usbip/usbip_common.c
drivers/vhost/scsi.c
drivers/vhost/vhost.c
drivers/vhost/vsock.c
drivers/video/backlight/pwm_bl.c
drivers/virtio/virtio_balloon.c
drivers/xen/balloon.c
drivers/xen/grant-table.c
drivers/xen/privcmd-buf.c
drivers/xen/pvcalls-back.c
drivers/xen/pvcalls-front.c
drivers/xen/xlate_mmu.c
fs/9p/vfs_addr.c
fs/9p/vfs_dir.c
fs/9p/xattr.c
fs/afs/Kconfig
fs/afs/Makefile
fs/afs/addr_list.c
fs/afs/afs.h
fs/afs/cache.c
fs/afs/callback.c
fs/afs/cell.c
fs/afs/cmservice.c
fs/afs/dir.c
fs/afs/dynroot.c
fs/afs/file.c
fs/afs/flock.c
fs/afs/fs_probe.c [new file with mode: 0644]
fs/afs/fsclient.c
fs/afs/inode.c
fs/afs/internal.h
fs/afs/misc.c
fs/afs/mntpt.c
fs/afs/proc.c
fs/afs/protocol_yfs.h [new file with mode: 0644]
fs/afs/rotate.c
fs/afs/rxrpc.c
fs/afs/security.c
fs/afs/server.c
fs/afs/server_list.c
fs/afs/super.c
fs/afs/vl_list.c [new file with mode: 0644]
fs/afs/vl_probe.c [new file with mode: 0644]
fs/afs/vl_rotate.c [new file with mode: 0644]
fs/afs/vlclient.c
fs/afs/volume.c
fs/afs/write.c
fs/afs/xattr.c
fs/afs/yfsclient.c [new file with mode: 0644]
fs/aio.c
fs/bfs/inode.c
fs/block_dev.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/relocation.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/btrfs/tree-checker.c
fs/btrfs/tree-log.c
fs/buffer.c
fs/cachefiles/namei.c
fs/cachefiles/rdwr.c
fs/cachefiles/xattr.c
fs/ceph/file.c
fs/ceph/mds_client.c
fs/ceph/quota.c
fs/cifs/Kconfig
fs/cifs/cifs_debug.c
fs/cifs/cifs_spnego.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smbdirect.c
fs/cifs/trace.h
fs/cifs/transport.c
fs/dax.c
fs/direct-io.c
fs/dlm/lowcomms.c
fs/exofs/super.c
fs/exportfs/expfs.c
fs/ext2/super.c
fs/ext2/xattr.c
fs/ext4/ext4.h
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/ext4/page-io.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/fscache/object.c
fs/fuse/dev.c
fs/fuse/file.c
fs/gfs2/bmap.c
fs/gfs2/rgrp.c
fs/hfs/btree.c
fs/hfsplus/btree.c
fs/inode.c
fs/ioctl.c
fs/iomap.c
fs/namespace.c
fs/nfs/callback_proc.c
fs/nfs/delegation.c
fs/nfs/direct.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/flexfilelayout/flexfilelayout.h
fs/nfs/flexfilelayout/flexfilelayoutdev.c
fs/nfs/nfs42proc.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4file.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfsd/nfs4proc.c
fs/nfsd/vfs.c
fs/nilfs2/btnode.c
fs/notify/fanotify/fanotify.c
fs/notify/fsnotify.c
fs/ntfs/namei.c
fs/ocfs2/aops.c
fs/ocfs2/buffer_head_io.c
fs/ocfs2/cluster/masklog.h
fs/ocfs2/cluster/tcp.c
fs/ocfs2/dir.c
fs/ocfs2/dlmglue.c
fs/ocfs2/export.c
fs/ocfs2/file.c
fs/ocfs2/journal.c
fs/ocfs2/move_extents.c
fs/ocfs2/refcounttree.c
fs/ocfs2/refcounttree.h
fs/ocfs2/stackglue.c
fs/ocfs2/stackglue.h
fs/orangefs/inode.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/file.c
fs/overlayfs/inode.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/super.c
fs/overlayfs/util.c
fs/proc/base.c
fs/pstore/ram.c
fs/read_write.c
fs/splice.c
fs/sysv/inode.c
fs/ubifs/Kconfig
fs/ubifs/Makefile
fs/ubifs/auth.c [new file with mode: 0644]
fs/ubifs/debug.c
fs/ubifs/gc.c
fs/ubifs/io.c
fs/ubifs/journal.c
fs/ubifs/log.c
fs/ubifs/lpt.c
fs/ubifs/lpt_commit.c
fs/ubifs/master.c
fs/ubifs/misc.h
fs/ubifs/recovery.c
fs/ubifs/replay.c
fs/ubifs/sb.c
fs/ubifs/super.c
fs/ubifs/tnc.c
fs/ubifs/tnc_commit.c
fs/ubifs/tnc_misc.c
fs/ubifs/ubifs-media.h
fs/ubifs/ubifs.h
fs/udf/super.c
fs/udf/unicode.c
fs/userfaultfd.c
fs/xfs/libxfs/xfs_attr_leaf.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_btree.c
fs/xfs/libxfs/xfs_ialloc_btree.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_bmap_util.h
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_file.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_message.c
fs/xfs/xfs_qm_bhv.c
fs/xfs/xfs_reflink.c
fs/xfs/xfs_reflink.h
fs/xfs/xfs_trace.h
include/asm-generic/4level-fixup.h
include/asm-generic/5level-fixup.h
include/asm-generic/pgtable-nop4d-hack.h
include/asm-generic/pgtable-nop4d.h
include/asm-generic/pgtable-nopmd.h
include/asm-generic/pgtable-nopud.h
include/asm-generic/pgtable.h
include/crypto/asym_tpm_subtype.h [new file with mode: 0644]
include/crypto/public_key.h
include/drm/drm_connector.h
include/keys/asymmetric-subtype.h
include/keys/trusted.h [new file with mode: 0644]
include/linux/adxl.h
include/linux/avf/virtchnl.h
include/linux/bio.h
include/linux/blk-cgroup.h
include/linux/blk_types.h
include/linux/bpf_verifier.h
include/linux/can/dev.h
include/linux/can/rx-offload.h
include/linux/ceph/ceph_features.h
include/linux/cgroup.h
include/linux/compat.h
include/linux/compiler-clang.h
include/linux/compiler-gcc.h
include/linux/compiler-intel.h
include/linux/compiler.h
include/linux/compiler_attributes.h [new file with mode: 0644]
include/linux/compiler_types.h
include/linux/cpuhotplug.h
include/linux/dax.h
include/linux/dma-direct.h
include/linux/efi.h
include/linux/filter.h
include/linux/fs.h
include/linux/fscache-cache.h
include/linux/ftrace.h
include/linux/gfp.h
include/linux/hid-sensor-hub.h
include/linux/hid.h
include/linux/hyperv.h
include/linux/i8253.h
include/linux/inetdevice.h
include/linux/key-type.h
include/linux/keyctl.h [new file with mode: 0644]
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mtd/nand.h
include/linux/net_dim.h
include/linux/netdevice.h
include/linux/netfilter/ipset/ip_set.h
include/linux/netfilter/ipset/ip_set_comment.h
include/linux/netfilter/nf_conntrack_proto_gre.h
include/linux/nmi.h
include/linux/notifier.h
include/linux/platform_data/gpio-davinci.h
include/linux/psi.h
include/linux/pstore.h
include/linux/ptrace.h
include/linux/sched.h
include/linux/sched/smt.h [new file with mode: 0644]
include/linux/sfp.h
include/linux/skbuff.h
include/linux/stackleak.h [new file with mode: 0644]
include/linux/sunrpc/gss_krb5.h
include/linux/sunrpc/xdr.h
include/linux/tcp.h
include/linux/tracehook.h
include/linux/tracepoint.h
include/linux/tty.h
include/linux/uio.h
include/linux/usb.h
include/linux/usb/quirks.h
include/linux/writeback.h
include/linux/xarray.h
include/media/media-request.h
include/media/v4l2-mem2mem.h
include/net/addrconf.h
include/net/af_rxrpc.h
include/net/af_unix.h
include/net/if_inet6.h
include/net/neighbour.h
include/net/netfilter/ipv4/nf_nat_masquerade.h
include/net/netfilter/ipv6/nf_nat_masquerade.h
include/net/netfilter/nf_conntrack_l4proto.h
include/net/sctp/sctp.h
include/net/sctp/structs.h
include/sound/pcm_params.h
include/sound/soc.h
include/trace/events/afs.h
include/trace/events/kyber.h
include/trace/events/rxrpc.h
include/trace/events/sched.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/bpf.h
include/uapi/linux/input-event-codes.h
include/uapi/linux/keyctl.h
include/uapi/linux/kfd_ioctl.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/netfilter_bridge.h
include/uapi/linux/perf_event.h
include/uapi/linux/prctl.h
include/uapi/linux/sctp.h
include/uapi/linux/v4l2-controls.h
include/uapi/linux/virtio_balloon.h
include/xen/balloon.h
include/xen/xen-ops.h
init/Kconfig
init/initramfs.c
kernel/Makefile
kernel/bpf/btf.c
kernel/bpf/core.c
kernel/bpf/local_storage.c
kernel/bpf/queue_stack_maps.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup.c
kernel/configs/kvm_guest.config
kernel/cpu.c
kernel/debug/kdb/kdb_bt.c
kernel/debug/kdb/kdb_io.c
kernel/debug/kdb/kdb_keyboard.c
kernel/debug/kdb/kdb_main.c
kernel/debug/kdb/kdb_private.h
kernel/debug/kdb/kdb_support.c
kernel/dma/swiotlb.c
kernel/events/core.c
kernel/events/uprobes.c
kernel/fork.c
kernel/irq/matrix.c
kernel/kcov.c
kernel/kexec_file.c
kernel/ptrace.c
kernel/resource.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/psi.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/stats.h
kernel/sched/topology.c
kernel/stackleak.c [new file with mode: 0644]
kernel/sysctl.c
kernel/time/posix-cpu-timers.c
kernel/time/time.c
kernel/trace/blktrace.c
kernel/trace/bpf_trace.c
kernel/trace/ftrace.c
kernel/trace/trace.h
kernel/trace/trace_functions_graph.c
kernel/trace/trace_irqsoff.c
kernel/trace/trace_probe.c
kernel/trace/trace_sched_wakeup.c
kernel/user_namespace.c
lib/debugobjects.c
lib/iov_iter.c
lib/raid6/test/Makefile
lib/test_firmware.c
lib/test_hexdump.c
lib/test_kmod.c
lib/test_xarray.c
lib/ubsan.c
lib/xarray.c
mm/filemap.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/khugepaged.c
mm/memblock.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/page_alloc.c
mm/page_io.c
mm/page_poison.c
mm/percpu.c
mm/rmap.c
mm/shmem.c
mm/swapfile.c
mm/truncate.c
mm/userfaultfd.c
mm/vmstat.c
mm/z3fold.c
net/9p/client.c
net/9p/trans_virtio.c
net/batman-adv/bat_v_elp.c
net/batman-adv/fragmentation.c
net/bluetooth/6lowpan.c
net/bluetooth/a2mp.c
net/bluetooth/smp.c
net/bpf/test_run.c
net/bridge/br_private.h
net/bridge/br_vlan.c
net/can/raw.c
net/ceph/messenger.c
net/core/dev.c
net/core/filter.c
net/core/flow_dissector.c
net/core/netpoll.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/dsa/master.c
net/dsa/slave.c
net/ipv4/igmp.c
net/ipv4/inet_fragment.c
net/ipv4/ip_fragment.c
net/ipv4/ip_input.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel_core.c
net/ipv4/netfilter/ipt_MASQUERADE.c
net/ipv4/netfilter/nf_nat_masquerade_ipv4.c
net/ipv4/netfilter/nft_masq_ipv4.c
net/ipv4/tcp_bpf.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/anycast.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_input.c
net/ipv6/ip6_output.c
net/ipv6/netfilter.c
net/ipv6/netfilter/ip6t_MASQUERADE.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
net/ipv6/netfilter/nft_masq_ipv6.c
net/ipv6/reassembly.c
net/ipv6/route.c
net/ipv6/seg6_iptunnel.c
net/l2tp/l2tp_core.c
net/mac80211/cfg.c
net/mac80211/iface.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/status.c
net/mac80211/tx.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_netportnet.c
net/netfilter/ipset/ip_set_list_set.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conncount.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_proto_dccp.c
net/netfilter/nf_conntrack_proto_generic.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_conntrack_proto_icmp.c
net/netfilter/nf_conntrack_proto_icmpv6.c
net/netfilter/nf_conntrack_proto_sctp.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_conntrack_proto_udp.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nft_compat.c
net/netfilter/nft_flow_offload.c
net/netfilter/nft_numgen.c
net/netfilter/nft_osf.c
net/netfilter/xt_IDLETIMER.c
net/netfilter/xt_RATEEST.c
net/netfilter/xt_hashlimit.c
net/openvswitch/conntrack.c
net/openvswitch/flow_netlink.c
net/packet/af_packet.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_event.c
net/rxrpc/output.c
net/sched/act_mirred.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/cls_flower.c
net/sched/sch_fq.c
net/sched/sch_netem.c
net/sctp/associola.c
net/sctp/chunk.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/sm_make_chunk.c
net/sctp/socket.c
net/sctp/stream.c
net/smc/af_smc.c
net/smc/smc_cdc.c
net/smc/smc_cdc.h
net/smc/smc_clc.c
net/smc/smc_core.c
net/smc/smc_core.h
net/smc/smc_ism.c
net/smc/smc_ism.h
net/smc/smc_wr.c
net/socket.c
net/sunrpc/auth_generic.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/gss_krb5_mech.c
net/sunrpc/auth_gss/gss_krb5_seal.c
net/sunrpc/auth_gss/gss_krb5_wrap.c
net/sunrpc/clnt.c
net/sunrpc/svcsock.c
net/sunrpc/xdr.c
net/sunrpc/xprt.c
net/sunrpc/xprtsock.c
net/tipc/discover.c
net/tipc/link.c
net/tipc/net.c
net/tipc/net.h
net/tipc/node.c
net/tipc/socket.c
net/tipc/topsrv.c
net/tls/tls_device.c
net/tls/tls_sw.c
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/sme.c
net/wireless/util.c
net/x25/af_x25.c
net/x25/x25_in.c
net/xfrm/Kconfig
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
samples/hidraw/hid-example.c
scripts/Kbuild.include
scripts/Makefile.build
scripts/Makefile.extrawarn
scripts/Makefile.gcc-plugins
scripts/faddr2line
scripts/gcc-plugins/Kconfig
scripts/gcc-plugins/stackleak_plugin.c [new file with mode: 0644]
scripts/kconfig/Makefile
scripts/kconfig/conf.c
scripts/kconfig/merge_config.sh
scripts/package/builddeb
scripts/package/mkdebian
scripts/package/mkspec
scripts/setlocalversion
scripts/spdxcheck.py
scripts/unifdef.c
security/apparmor/apparmorfs.c
security/apparmor/file.c
security/apparmor/include/cred.h
security/apparmor/include/net.h
security/apparmor/include/policy.h
security/apparmor/include/secid.h
security/apparmor/lib.c
security/apparmor/lsm.c
security/apparmor/net.c
security/apparmor/policy.c
security/apparmor/policy_unpack.c
security/apparmor/secid.c
security/integrity/digsig_asymmetric.c
security/keys/Makefile
security/keys/compat.c
security/keys/internal.h
security/keys/keyctl.c
security/keys/keyctl_pkey.c [new file with mode: 0644]
security/keys/trusted.c
security/keys/trusted.h [deleted file]
security/selinux/hooks.c
security/selinux/nlmsgtab.c
security/selinux/ss/mls.c
sound/core/control.c
sound/core/oss/pcm_oss.c
sound/core/oss/pcm_plugin.c
sound/core/pcm_native.c
sound/firewire/amdtp-stream.c
sound/firewire/dice/dice.c
sound/isa/wss/wss_lib.c
sound/pci/ac97/ac97_codec.c
sound/pci/ca0106/ca0106.h
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/thinkpad_helper.c
sound/soc/codecs/hdac_hdmi.c
sound/soc/codecs/pcm186x.h
sound/soc/codecs/pcm3060.c
sound/soc/codecs/wm_adsp.c
sound/soc/intel/Kconfig
sound/soc/intel/boards/Kconfig
sound/soc/intel/boards/cht_bsw_max98090_ti.c
sound/soc/intel/skylake/skl.c
sound/soc/omap/omap-abe-twl6040.c
sound/soc/omap/omap-dmic.c
sound/soc/omap/omap-mcbsp.c
sound/soc/omap/omap-mcpdm.c
sound/soc/qcom/common.c
sound/soc/qcom/qdsp6/q6afe-dai.c
sound/soc/qcom/qdsp6/q6afe.c
sound/soc/qcom/qdsp6/q6asm-dai.c
sound/soc/qcom/qdsp6/q6routing.c
sound/soc/rockchip/rockchip_pcm.c
sound/soc/sh/rcar/ssi.c
sound/soc/soc-acpi.c
sound/soc/soc-core.c
sound/soc/stm/stm32_sai_sub.c
sound/soc/sunxi/Kconfig
sound/soc/sunxi/sun8i-codec.c
sound/sparc/cs4231.c
sound/usb/card.c
sound/usb/quirks-table.h
sound/usb/quirks.c
tools/arch/arm64/include/asm/barrier.h
tools/arch/arm64/include/uapi/asm/unistd.h
tools/arch/powerpc/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
tools/bpf/bpftool/Documentation/bpftool-map.rst
tools/bpf/bpftool/Documentation/bpftool-net.rst
tools/bpf/bpftool/Documentation/bpftool-perf.rst
tools/bpf/bpftool/Documentation/bpftool-prog.rst
tools/bpf/bpftool/Documentation/bpftool.rst
tools/bpf/bpftool/btf_dumper.c
tools/bpf/bpftool/common.c
tools/bpf/bpftool/main.h
tools/bpf/bpftool/prog.c
tools/build/Makefile.feature
tools/build/feature/Makefile
tools/build/feature/test-all.c
tools/build/feature/test-get_current_dir_name.c [new file with mode: 0644]
tools/include/uapi/asm-generic/ioctls.h
tools/include/uapi/asm-generic/unistd.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/fs.h [new file with mode: 0644]
tools/include/uapi/linux/if_link.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/mman.h
tools/include/uapi/linux/netlink.h
tools/include/uapi/linux/perf_event.h
tools/include/uapi/linux/pkt_cls.h [new file with mode: 0644]
tools/include/uapi/linux/prctl.h
tools/include/uapi/linux/tc_act/tc_bpf.h [new file with mode: 0644]
tools/include/uapi/sound/asound.h
tools/lib/bpf/libbpf.c
tools/lib/subcmd/parse-options.c
tools/lib/subcmd/parse-options.h
tools/objtool/check.c
tools/objtool/check.h
tools/objtool/elf.c
tools/objtool/elf.h
tools/perf/Documentation/build-xed.txt [new file with mode: 0644]
tools/perf/Documentation/intel-pt.txt
tools/perf/Documentation/itrace.txt
tools/perf/Documentation/perf-list.txt
tools/perf/Documentation/perf-script.txt
tools/perf/Documentation/perf-top.txt
tools/perf/Documentation/perf-trace.txt
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
tools/perf/arch/sparc/Makefile
tools/perf/arch/sparc/annotate/instructions.c [new file with mode: 0644]
tools/perf/builtin-record.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/check-headers.sh
tools/perf/examples/bpf/augmented_raw_syscalls.c [new file with mode: 0644]
tools/perf/jvmti/jvmti_agent.c
tools/perf/perf.h
tools/perf/scripts/python/call-graph-from-sql.py [deleted file]
tools/perf/scripts/python/export-to-postgresql.py
tools/perf/scripts/python/export-to-sqlite.py
tools/perf/scripts/python/exported-sql-viewer.py [new file with mode: 0755]
tools/perf/tests/attr/base-record
tools/perf/tests/attr/test-record-group-sampling
tools/perf/trace/beauty/Build
tools/perf/trace/beauty/beauty.h
tools/perf/trace/beauty/clone.c
tools/perf/trace/beauty/drm_ioctl.sh
tools/perf/trace/beauty/eventfd.c
tools/perf/trace/beauty/fcntl.c
tools/perf/trace/beauty/flock.c
tools/perf/trace/beauty/futex_op.c
tools/perf/trace/beauty/futex_val3.c
tools/perf/trace/beauty/ioctl.c
tools/perf/trace/beauty/kcmp.c
tools/perf/trace/beauty/kcmp_type.sh
tools/perf/trace/beauty/kvm_ioctl.sh
tools/perf/trace/beauty/madvise_behavior.sh
tools/perf/trace/beauty/mmap.c
tools/perf/trace/beauty/mmap_flags.sh [new file with mode: 0755]
tools/perf/trace/beauty/mode_t.c
tools/perf/trace/beauty/mount_flags.c [new file with mode: 0644]
tools/perf/trace/beauty/mount_flags.sh [new file with mode: 0755]
tools/perf/trace/beauty/msg_flags.c
tools/perf/trace/beauty/open_flags.c
tools/perf/trace/beauty/perf_event_open.c
tools/perf/trace/beauty/perf_ioctl.sh
tools/perf/trace/beauty/pid.c
tools/perf/trace/beauty/pkey_alloc.c
tools/perf/trace/beauty/pkey_alloc_access_rights.sh
tools/perf/trace/beauty/prctl.c
tools/perf/trace/beauty/prctl_option.sh
tools/perf/trace/beauty/sched_policy.c
tools/perf/trace/beauty/seccomp.c
tools/perf/trace/beauty/signum.c
tools/perf/trace/beauty/sndrv_ctl_ioctl.sh
tools/perf/trace/beauty/sndrv_pcm_ioctl.sh
tools/perf/trace/beauty/sockaddr.c
tools/perf/trace/beauty/socket.c
tools/perf/trace/beauty/socket_ipproto.sh
tools/perf/trace/beauty/socket_type.c
tools/perf/trace/beauty/statx.c
tools/perf/trace/beauty/vhost_virtio_ioctl.sh
tools/perf/trace/beauty/waitid_options.c
tools/perf/util/Build
tools/perf/util/annotate.c
tools/perf/util/auxtrace.c
tools/perf/util/auxtrace.h
tools/perf/util/cs-etm.c
tools/perf/util/env.h
tools/perf/util/event.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/genelf.h
tools/perf/util/get_current_dir_name.c [new file with mode: 0644]
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/intel-bts.c
tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
tools/perf/util/intel-pt-decoder/intel-pt-log.c
tools/perf/util/intel-pt-decoder/intel-pt-log.h
tools/perf/util/intel-pt.c
tools/perf/util/machine.c
tools/perf/util/namespaces.c
tools/perf/util/namespaces.h
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/parse-events.l
tools/perf/util/pmu.c
tools/perf/util/symbol-elf.c
tools/perf/util/symbol.h
tools/perf/util/thread-stack.c
tools/perf/util/thread-stack.h
tools/perf/util/thread.c
tools/perf/util/thread.h
tools/perf/util/unwind-libdw.c
tools/perf/util/util.h
tools/power/cpupower/Makefile
tools/power/cpupower/bench/Makefile
tools/power/cpupower/debug/x86_64/Makefile
tools/power/cpupower/lib/cpufreq.c
tools/power/cpupower/lib/cpuidle.c
tools/power/cpupower/lib/cpupower.c
tools/power/cpupower/lib/cpupower_intern.h
tools/testing/nvdimm/test/nfit.c
tools/testing/selftests/Makefile
tools/testing/selftests/bpf/bpf_helpers.h
tools/testing/selftests/bpf/flow_dissector_load.c
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/bpf/test_netcnt.c
tools/testing/selftests/bpf/test_sk_lookup_kern.c
tools/testing/selftests/bpf/test_skb_cgroup_id.sh
tools/testing/selftests/bpf/test_sock_addr.sh
tools/testing/selftests/bpf/test_verifier.c
tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
tools/testing/selftests/netfilter/Makefile [new file with mode: 0644]
tools/testing/selftests/netfilter/config [new file with mode: 0644]
tools/testing/selftests/netfilter/nft_trans_stress.sh [new file with mode: 0755]
tools/testing/selftests/powerpc/cache_shape/Makefile
tools/testing/selftests/powerpc/mm/wild_bctr.c
tools/testing/selftests/powerpc/pmu/ebb/Makefile
tools/testing/selftests/powerpc/ptrace/Makefile
tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c
tools/testing/selftests/powerpc/security/Makefile
tools/testing/selftests/powerpc/security/rfi_flush.c
tools/testing/selftests/powerpc/signal/Makefile
tools/testing/selftests/powerpc/switch_endian/Makefile
tools/testing/selftests/powerpc/utils.c
tools/testing/selftests/proc/proc-self-map-files-002.c
tools/testing/selftests/tc-testing/tdc.py

index a76be45fef6ca5b2d23139304ff5ea338bdd1d07..28fecafa65069c1af077453a4159a6a290949982 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -159,6 +159,7 @@ Peter Oruba <peter@oruba.de>
 Peter Oruba <peter.oruba@amd.com>
 Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
 Praveen BP <praveenbp@ti.com>
+Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
 Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
 Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
 Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
diff --git a/CREDITS b/CREDITS
index 5befd2d714d0037548bed049a979dc4fcee1d300..7d397ee675242954fa00269477522cdfdd885068 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -2138,6 +2138,10 @@ E: paul@laufernet.com
 D: Soundblaster driver fixes, ISAPnP quirk
 S: California, USA
 
+N: Jarkko Lavinen
+E: jarkko.lavinen@nokia.com
+D: OMAP MMC support
+
 N: Jonathan Layes
 D: ARPD support
 
@@ -2200,6 +2204,10 @@ S: Post Office Box 371
 S: North Little Rock, Arkansas 72115
 S: USA
 
+N: Christopher Li
+E: sparse@chrisli.org
+D: Sparse maintainer 2009 - 2018
+
 N: Stephan Linz
 E: linz@mazet.de
 E: Stephan.Linz@gmx.de
@@ -2533,6 +2541,10 @@ S: Ormond
 S: Victoria 3163
 S: Australia
 
+N: Eric Miao
+E: eric.y.miao@gmail.com
+D: MMP support
+
 N: Pauline Middelink
 E: middelin@polyware.nl
 D: General low-level bug fixes, /proc fixes, identd support
@@ -4107,6 +4119,10 @@ S: 1507 145th Place SE #B5
 S: Bellevue, Washington 98007
 S: USA
 
+N: Haojian Zhuang
+E: haojian.zhuang@gmail.com
+D: MMP support
+
 N: Richard Zidlicky
 E: rz@linux-m68k.org, rdzidlic@geocities.com
 W: http://www.geocities.com/rdzidlic
index fb3d1e03b8819bb950e961172d27dac3a3191129..1e5d172e064624d96216eae51a4be60c02a29979 100644 (file)
@@ -37,8 +37,8 @@ Description:
                  0-|   /             \/             \/
                    +---0----1----2----3----4----5----6------------> time (s)
 
-               2. To make the LED go instantly from one brigntess value to another,
-               we should use use zero-time lengths (the brightness must be same as
+               2. To make the LED go instantly from one brightness value to another,
+               we should use zero-time lengths (the brightness must be same as
                the previous tuple's). So the format should be:
                "brightness_1 duration_1 brightness_1 0 brightness_2 duration_2
                brightness_2 0 ...". For example:
index f240221e071ef7b76f6f3d4cdd584a34a4e6f212..985d84c585c669084d37fb3df5391a93b4094816 100644 (file)
@@ -1,4 +1,4 @@
-What:          /sys/class/net/<iface>/tagging
+What:          /sys/class/net/<iface>/dsa/tagging
 Date:          August 2018
 KernelVersion: 4.20
 Contact:       netdev@vger.kernel.org
index 8384c681a4b2e0cb88595b0e95eb6660ab5d7857..476722b7b6367ca38bf0e3263f3e132b515dcfd6 100644 (file)
@@ -1879,10 +1879,8 @@ following two functions.
 
   wbc_init_bio(@wbc, @bio)
        Should be called for each bio carrying writeback data and
-       associates the bio with the inode's owner cgroup and the
-       corresponding request queue.  This must be called after
-       a queue (device) has been associated with the bio and
-       before submission.
+       associates the bio with the inode's owner cgroup.  Can be
+       called anytime between bio allocation and submission.
 
   wbc_account_io(@wbc, @page, @bytes)
        Should be called for each data segment being written out.
@@ -1901,7 +1899,7 @@ the configuration, the bio may be executed at a lower priority and if
 the writeback session is holding shared resources, e.g. a journal
 entry, may lead to priority inversion.  There is no one easy solution
 for the problem.  Filesystems can try to work around specific problem
-cases by skipping wbc_init_bio() or using bio_associate_create_blkg()
+cases by skipping wbc_init_bio() or using bio_associate_blkcg()
 directly.
 
 
index b90fe3b6bc6c78555d5470bbfec95fd7bd691063..aefd358a5ca36b14963442a689fc76abde6a8f31 100644 (file)
                        causing system reset or hang due to sending
                        INIT from AP to BSP.
 
-       disable_counter_freezing [HW]
+       perf_v4_pmi=    [X86,INTEL]
+                       Format: <bool>
                        Disable Intel PMU counter freezing feature.
                        The feature only exists starting from
                        Arch Perfmon v4 (Skylake and newer).
                        earlyprintk=serial[,0x...[,baudrate]]
                        earlyprintk=ttySn[,baudrate]
                        earlyprintk=dbgp[debugController#]
-                       earlyprintk=pciserial,bus:device.function[,baudrate]
+                       earlyprintk=pciserial[,force],bus:device.function[,baudrate]
                        earlyprintk=xdbc[xhciController#]
 
                        earlyprintk is useful when the kernel crashes before
 
                        The sclp output can only be used on s390.
 
+                       The optional "force" to "pciserial" enables use of a
+                       PCI device even when its classcode is not of the
+                       UART class.
+
        edac_report=    [HW,EDAC] Control how to report EDAC event
                        Format: {"on" | "off" | "force"}
                        on: enable EDAC to report H/W event. May be overridden
                        before loading.
                        See Documentation/blockdev/ramdisk.txt.
 
+       psi=            [KNL] Enable or disable pressure stall information
+                       tracking.
+                       Format: <bool>
+
        psmouse.proto=  [HW,MOUSE] Highest PS2 mouse protocol extension to
                        probe for; one of (bare|imps|exps|lifebook|any).
        psmouse.rate=   [HW,MOUSE] Set desired mouse report rate, in reports
 
        spectre_v2=     [X86] Control mitigation of Spectre variant 2
                        (indirect branch speculation) vulnerability.
+                       The default operation protects the kernel from
+                       user space attacks.
 
-                       on   - unconditionally enable
-                       off  - unconditionally disable
+                       on   - unconditionally enable, implies
+                              spectre_v2_user=on
+                       off  - unconditionally disable, implies
+                              spectre_v2_user=off
                        auto - kernel detects whether your CPU model is
                               vulnerable
 
                        CONFIG_RETPOLINE configuration option, and the
                        compiler with which the kernel was built.
 
+                       Selecting 'on' will also enable the mitigation
+                       against user space to user space task attacks.
+
+                       Selecting 'off' will disable both the kernel and
+                       the user space protections.
+
                        Specific mitigations can also be selected manually:
 
                        retpoline         - replace indirect branches
                        Not specifying this option is equivalent to
                        spectre_v2=auto.
 
+       spectre_v2_user=
+                       [X86] Control mitigation of Spectre variant 2
+                       (indirect branch speculation) vulnerability between
+                       user space tasks
+
+                       on      - Unconditionally enable mitigations. Is
+                                 enforced by spectre_v2=on
+
+                       off     - Unconditionally disable mitigations. Is
+                                 enforced by spectre_v2=off
+
+                       prctl   - Indirect branch speculation is enabled,
+                                 but mitigation can be enabled via prctl
+                                 per thread.  The mitigation control state
+                                 is inherited on fork.
+
+                       prctl,ibpb
+                               - Like "prctl" above, but only STIBP is
+                                 controlled per thread. IBPB is issued
+                                 always when switching between different user
+                                 space processes.
+
+                       seccomp
+                               - Same as "prctl" above, but all seccomp
+                                 threads will enable the mitigation unless
+                                 they explicitly opt out.
+
+                       seccomp,ibpb
+                               - Like "seccomp" above, but only STIBP is
+                                 controlled per thread. IBPB is issued
+                                 always when switching between different
+                                 user space processes.
+
+                       auto    - Kernel selects the mitigation depending on
+                                 the available CPU features and vulnerability.
+
+                       Default mitigation:
+                       If CONFIG_SECCOMP=y then "seccomp", otherwise "prctl"
+
+                       Not specifying this option is equivalent to
+                       spectre_v2_user=auto.
+
        spec_store_bypass_disable=
                        [HW] Control Speculative Store Bypass (SSB) Disable mitigation
                        (Speculative Store Bypass vulnerability)
                                        prevent spurious wakeup);
                                n = USB_QUIRK_DELAY_CTRL_MSG (Device needs a
                                        pause after every control message);
+                               o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra
+                                       delay after resetting its port);
                        Example: quirks=0781:5580:bk,0a5c:5834:gij
 
        usbhid.mousepoll=
index 47153e64dfb530465ca01d28272e058293eb08b5..7eca9026a9ed2c3ed2a35b7e2184660e8caa9fdf 100644 (file)
@@ -150,7 +150,7 @@ data structures necessary to handle the given policy and, possibly, to add
 a governor ``sysfs`` interface to it.  Next, the governor is started by
 invoking its ``->start()`` callback.
 
-That callback it expected to register per-CPU utilization update callbacks for
+That callback is expected to register per-CPU utilization update callbacks for
 all of the online CPUs belonging to the given policy with the CPU scheduler.
 The utilization update callbacks will be invoked by the CPU scheduler on
 important events, like task enqueue and dequeue, on every iteration of the
index 164bf71149fdf2e6ad099b45196cbd6d1d8347cc..30187d49dc2c7d38869c4073e60093d315c3fc4e 100644 (file)
@@ -32,16 +32,17 @@ Disclosure and embargoed information
 The security list is not a disclosure channel.  For that, see Coordination
 below.
 
-Once a robust fix has been developed, our preference is to release the
-fix in a timely fashion, treating it no differently than any of the other
-thousands of changes and fixes the Linux kernel project releases every
-month.
-
-However, at the request of the reporter, we will postpone releasing the
-fix for up to 5 business days after the date of the report or after the
-embargo has lifted; whichever comes first.  The only exception to that
-rule is if the bug is publicly known, in which case the preference is to
-release the fix as soon as it's available.
+Once a robust fix has been developed, the release process starts.  Fixes
+for publicly known bugs are released immediately.
+
+Although our preference is to release fixes for publicly undisclosed bugs
+as soon as they become available, this may be postponed at the request of
+the reporter or an affected party for up to 7 calendar days from the start
+of the release process, with an exceptional extension to 14 calendar days
+if it is agreed that the criticality of the bug requires more time.  The
+only valid reason for deferring the publication of a fix is to accommodate
+the logistics of QA and large scale rollouts which require release
+coordination.
 
 Whilst embargoed information may be shared with trusted individuals in
 order to develop a fix, such information will not be published alongside
index 76ccded8b74c028cc23288c995459f11f737998c..8f95776211447e7e18efc933c1568c5d5cd3503d 100644 (file)
@@ -57,6 +57,7 @@ stable kernels.
 | ARM            | Cortex-A73      | #858921         | ARM64_ERRATUM_858921        |
 | ARM            | Cortex-A55      | #1024718        | ARM64_ERRATUM_1024718       |
 | ARM            | Cortex-A76      | #1188873        | ARM64_ERRATUM_1188873       |
+| ARM            | Cortex-A76      | #1286807        | ARM64_ERRATUM_1286807       |
 | ARM            | MMU-500         | #841119,#826419 | N/A                         |
 |                |                 |                 |                             |
 | Cavium         | ThunderX ITS    | #22375, #24313  | CAVIUM_ERRATUM_22375        |
index a4e705108f428e970b4c93be376ed3644dcb3649..dbe96cb5558ef5024fd9607a902c25ad430b4cb7 100644 (file)
@@ -74,7 +74,8 @@ using :c:func:`xa_load`.  xa_store will overwrite any entry with the
 new entry and return the previous entry stored at that index.  You can
 use :c:func:`xa_erase` instead of calling :c:func:`xa_store` with a
 ``NULL`` entry.  There is no difference between an entry that has never
-been stored to and one that has most recently had ``NULL`` stored to it.
+been stored to, one that has been erased and one that has most recently
+had ``NULL`` stored to it.
 
 You can conditionally replace an entry at an index by using
 :c:func:`xa_cmpxchg`.  Like :c:func:`cmpxchg`, it will only succeed if
@@ -105,23 +106,44 @@ may result in the entry being marked at some, but not all of the other
 indices.  Storing into one index may result in the entry retrieved by
 some, but not all of the other indices changing.
 
+Sometimes you need to ensure that a subsequent call to :c:func:`xa_store`
+will not need to allocate memory.  The :c:func:`xa_reserve` function
+will store a reserved entry at the indicated index.  Users of the normal
+API will see this entry as containing ``NULL``.  If you do not need to
+use the reserved entry, you can call :c:func:`xa_release` to remove the
+unused entry.  If another user has stored to the entry in the meantime,
+:c:func:`xa_release` will do nothing; if instead you want the entry to
+become ``NULL``, you should use :c:func:`xa_erase`.
+
+If all entries in the array are ``NULL``, the :c:func:`xa_empty` function
+will return ``true``.
+
 Finally, you can remove all entries from an XArray by calling
 :c:func:`xa_destroy`.  If the XArray entries are pointers, you may wish
 to free the entries first.  You can do this by iterating over all present
 entries in the XArray using the :c:func:`xa_for_each` iterator.
 
-ID assignment
--------------
+Allocating XArrays
+------------------
+
+If you use :c:func:`DEFINE_XARRAY_ALLOC` to define the XArray, or
+initialise it by passing ``XA_FLAGS_ALLOC`` to :c:func:`xa_init_flags`,
+the XArray changes to track whether entries are in use or not.
 
 You can call :c:func:`xa_alloc` to store the entry at any unused index
 in the XArray.  If you need to modify the array from interrupt context,
 you can use :c:func:`xa_alloc_bh` or :c:func:`xa_alloc_irq` to disable
-interrupts while allocating the ID.  Unlike :c:func:`xa_store`, allocating
-a ``NULL`` pointer does not delete an entry.  Instead it reserves an
-entry like :c:func:`xa_reserve` and you can release it using either
-:c:func:`xa_erase` or :c:func:`xa_release`.  To use ID assignment, the
-XArray must be defined with :c:func:`DEFINE_XARRAY_ALLOC`, or initialised
-by passing ``XA_FLAGS_ALLOC`` to :c:func:`xa_init_flags`,
+interrupts while allocating the ID.
+
+Using :c:func:`xa_store`, :c:func:`xa_cmpxchg` or :c:func:`xa_insert`
+will mark the entry as being allocated.  Unlike a normal XArray, storing
+``NULL`` will mark the entry as being in use, like :c:func:`xa_reserve`.
+To free an entry, use :c:func:`xa_erase` (or :c:func:`xa_release` if
+you only want to free the entry if it's ``NULL``).
+
+You cannot use ``XA_MARK_0`` with an allocating XArray as this mark
+is used to track whether an entry is free or not.  The other marks are
+available for your use.
 
 Memory allocation
 -----------------
@@ -158,6 +180,8 @@ Takes RCU read lock:
 
 Takes xa_lock internally:
  * :c:func:`xa_store`
+ * :c:func:`xa_store_bh`
+ * :c:func:`xa_store_irq`
  * :c:func:`xa_insert`
  * :c:func:`xa_erase`
  * :c:func:`xa_erase_bh`
@@ -167,6 +191,9 @@ Takes xa_lock internally:
  * :c:func:`xa_alloc`
  * :c:func:`xa_alloc_bh`
  * :c:func:`xa_alloc_irq`
+ * :c:func:`xa_reserve`
+ * :c:func:`xa_reserve_bh`
+ * :c:func:`xa_reserve_irq`
  * :c:func:`xa_destroy`
  * :c:func:`xa_set_mark`
  * :c:func:`xa_clear_mark`
@@ -177,6 +204,7 @@ Assumes xa_lock held on entry:
  * :c:func:`__xa_erase`
  * :c:func:`__xa_cmpxchg`
  * :c:func:`__xa_alloc`
+ * :c:func:`__xa_reserve`
  * :c:func:`__xa_set_mark`
  * :c:func:`__xa_clear_mark`
 
@@ -234,7 +262,8 @@ Sharing the XArray with interrupt context is also possible, either
 using :c:func:`xa_lock_irqsave` in both the interrupt handler and process
 context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock`
 in the interrupt handler.  Some of the more common patterns have helper
-functions such as :c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`.
+functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`,
+:c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`.
 
 Sometimes you need to protect access to the XArray with a mutex because
 that lock sits above another mutex in the locking hierarchy.  That does
@@ -322,7 +351,8 @@ to :c:func:`xas_retry`, and retry the operation if it returns ``true``.
      - :c:func:`xa_is_zero`
      - Zero entries appear as ``NULL`` through the Normal API, but occupy
        an entry in the XArray which can be used to reserve the index for
-       future use.
+       future use.  This is used by allocating XArrays for allocated entries
+       which are ``NULL``.
 
 Other internal entries may be added in the future.  As far as possible, they
 will be handled by :c:func:`xas_retry`.
index a873855c811d63f3a47cd2ec830404abb89d48c8..14378cecb1723f7d9b4b4f3b0b1ccd0ca3126c26 100644 (file)
@@ -86,9 +86,11 @@ transitions.
 This will give a fine grained information about all the CPU frequency
 transitions. The cat output here is a two dimensional matrix, where an entry
 <i,j> (row i, column j) represents the count of number of transitions from 
-Freq_i to Freq_j. Freq_i is in descending order with increasing rows and 
-Freq_j is in descending order with increasing columns. The output here also 
-contains the actual freq values for each row and column for better readability.
+Freq_i to Freq_j. Freq_i rows and Freq_j columns follow the sorting order in
+which the driver has provided the frequency table initially to the cpufreq core
+and so can be sorted (ascending or descending) or unsorted.  The output here
+also contains the actual freq values for each row and column for better
+readability.
 
 If the transition table is bigger than PAGE_SIZE, reading this will
 return an -EFBIG error.
index 5969bf42562a8752535f3183de6a9b66fa07985b..8763866b11cfd0f6af865ed67d8ba6db75a48b60 100644 (file)
@@ -183,6 +183,10 @@ and looks like the following:
 
                void (*describe)(const struct key *key, struct seq_file *m);
                void (*destroy)(void *payload);
+               int (*query)(const struct kernel_pkey_params *params,
+                            struct kernel_pkey_query *info);
+               int (*eds_op)(struct kernel_pkey_params *params,
+                             const void *in, void *out);
                int (*verify_signature)(const struct key *key,
                                        const struct public_key_signature *sig);
        };
@@ -207,12 +211,22 @@ There are a number of operations defined by the subtype:
      asymmetric key will look after freeing the fingerprint and releasing the
      reference on the subtype module.
 
- (3) verify_signature().
+ (3) query().
 
-     Optional.  These are the entry points for the key usage operations.
-     Currently there is only the one defined.  If not set, the caller will be
-     given -ENOTSUPP.  The subtype may do anything it likes to implement an
-     operation, including offloading to hardware.
+     Mandatory.  This is a function for querying the capabilities of a key.
+
+ (4) eds_op().
+
+     Optional.  This is the entry point for the encryption, decryption and
+     signature creation operations (which are distinguished by the operation ID
+     in the parameter struct).  The subtype may do anything it likes to
+     implement an operation, including offloading to hardware.
+
+ (5) verify_signature().
+
+     Optional.  This is the entry point for signature verification.  The
+     subtype may do anything it likes to implement an operation, including
+     offloading to hardware.
 
 
 ==========================
@@ -234,6 +248,8 @@ Examples of blob formats for which parsers could be implemented include:
  - X.509 ASN.1 stream.
  - Pointer to TPM key.
  - Pointer to UEFI key.
+ - PKCS#8 private key [RFC 5208].
+ - PKCS#5 encrypted private key [RFC 2898].
 
 During key instantiation each parser in the list is tried until one doesn't
 return -EBADMSG.
index 9b5685a1d15d9821efb9dd6d34a29ae1f788e31f..84262cdb8d29ae3f95ef94f6e7b9900897774c17 100644 (file)
@@ -59,9 +59,11 @@ mhz values (normalized w.r.t. the highest value found while parsing the DT).
 ===========================================
 
 Example 1 (ARM 64-bit, 6-cpu system, two clusters):
-capacities-dmips-mhz are scaled w.r.t. 1024 (cpu@0 and cpu@1)
-supposing cluster0@max-freq=1100 and custer1@max-freq=850,
-final capacities are 1024 for cluster0 and 446 for cluster1
+The capacities-dmips-mhz or DMIPS/MHz values (scaled to 1024)
+are 1024 and 578 for cluster0 and cluster1. Further normalization
+is done by the operating system based on cluster0@max-freq=1100 and
+custer1@max-freq=850, final capacities are 1024 for cluster0 and
+446 for cluster1 (576*850/1100).
 
 cpus {
        #address-cells = <2>;
index f5e0f82fd5031efb1570361eabf2d096769ef7f5..58c4256d37a39e5082cdb5f354548dbe0cf160d6 100644 (file)
@@ -27,7 +27,7 @@ SoCs:
     compatible = "renesas,r8a77470"
   - RZ/G2M (R8A774A1)
     compatible = "renesas,r8a774a1"
-  - RZ/G2E (RA8774C0)
+  - RZ/G2E (R8A774C0)
     compatible = "renesas,r8a774c0"
   - R-Car M1A (R8A77781)
     compatible = "renesas,r8a7778"
index 2ec489eebe723afb0f6cf1700d7869e9d84f0ac6..b646bbcf7f92489063b9f44acdb449ef8c84b416 100644 (file)
@@ -168,3 +168,19 @@ a shared clock is forbidden.
 
 Configuration of common clocks, which affect multiple consumer devices can
 be similarly specified in the clock provider node.
+
+==Protected clocks==
+
+Some platforms or firmwares may not fully expose all the clocks to the OS, such
+as in situations where those clks are used by drivers running in ARM secure
+execution levels. Such a configuration can be specified in device tree with the
+protected-clocks property in the form of a clock specifier list. This property should
+only be specified in the node that is providing the clocks being protected:
+
+   clock-controller@a000f000 {
+        compatible = "vendor,clk95;
+        reg = <0xa000f000 0x1000>
+        #clocks-cells = <1>;
+        ...
+        protected-clocks = <UART3_CLK>, <SPI5_CLK>;
+   };
diff --git a/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt b/Documentation/devicetree/bindings/cpufreq/arm_big_little_dt.txt
deleted file mode 100644 (file)
index 2aa06ac..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-Generic ARM big LITTLE cpufreq driver's DT glue
------------------------------------------------
-
-This is DT specific glue layer for generic cpufreq driver for big LITTLE
-systems.
-
-Both required and optional properties listed below must be defined
-under node /cpus/cpu@x. Where x is the first cpu inside a cluster.
-
-FIXME: Cpus should boot in the order specified in DT and all cpus for a cluster
-must be present contiguously. Generic DT driver will check only node 'x' for
-cpu:x.
-
-Required properties:
-- operating-points: Refer to Documentation/devicetree/bindings/opp/opp.txt
-  for details
-
-Optional properties:
-- clock-latency: Specify the possible maximum transition latency for clock,
-  in unit of nanoseconds.
-
-Examples:
-
-cpus {
-       #address-cells = <1>;
-       #size-cells = <0>;
-
-       cpu@0 {
-               compatible = "arm,cortex-a15";
-               reg = <0>;
-               next-level-cache = <&L2>;
-               operating-points = <
-                       /* kHz    uV */
-                       792000  1100000
-                       396000  950000
-                       198000  850000
-               >;
-               clock-latency = <61036>; /* two CLK32 periods */
-       };
-
-       cpu@1 {
-               compatible = "arm,cortex-a15";
-               reg = <1>;
-               next-level-cache = <&L2>;
-       };
-
-       cpu@100 {
-               compatible = "arm,cortex-a7";
-               reg = <100>;
-               next-level-cache = <&L2>;
-               operating-points = <
-                       /* kHz    uV */
-                       792000  950000
-                       396000  750000
-                       198000  450000
-               >;
-               clock-latency = <61036>; /* two CLK32 periods */
-       };
-
-       cpu@101 {
-               compatible = "arm,cortex-a7";
-               reg = <101>;
-               next-level-cache = <&L2>;
-       };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt b/Documentation/devicetree/bindings/display/panel/innolux,p120zdg-bf1.txt
new file mode 100644 (file)
index 0000000..513f034
--- /dev/null
@@ -0,0 +1,22 @@
+Innolux P120ZDG-BF1 12.02 inch eDP 2K display panel
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
+
+Required properties:
+- compatible: should be "innolux,p120zdg-bf1"
+- power-supply: regulator to provide the supply voltage
+
+Optional properties:
+- enable-gpios: GPIO pin to enable or disable the panel
+- backlight: phandle of the backlight device attached to the panel
+- no-hpd: If HPD isn't hooked up; add this property.
+
+Example:
+       panel_edp: panel-edp {
+               compatible = "innolux,p120zdg-bf1";
+               enable-gpios = <&msmgpio 31 GPIO_ACTIVE_LOW>;
+               power-supply = <&pm8916_l2>;
+               backlight = <&backlight>;
+               no-hpd;
+       };
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt b/Documentation/devicetree/bindings/display/panel/innolux,tv123wam.txt
deleted file mode 100644 (file)
index a9b3526..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-Innolux TV123WAM 12.3 inch eDP 2K display panel
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-Required properties:
-- compatible: should be "innolux,tv123wam"
-- power-supply: regulator to provide the supply voltage
-
-Optional properties:
-- enable-gpios: GPIO pin to enable or disable the panel
-- backlight: phandle of the backlight device attached to the panel
-
-Example:
-       panel_edp: panel-edp {
-               compatible = "innolux,tv123wam";
-               enable-gpios = <&msmgpio 31 GPIO_ACTIVE_LOW>;
-               power-supply = <&pm8916_l2>;
-               backlight = <&backlight>;
-       };
index 45a457ad38f0f078eed709424e1e237ebcfe420f..b2b872c710f24d69996eb3cb4922ba8dd3915a14 100644 (file)
@@ -11,6 +11,9 @@ Optional properties:
 - ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing
 - enable-gpios: GPIO pin to enable or disable the panel
 - backlight: phandle of the backlight device attached to the panel
+- no-hpd: This panel is supposed to communicate that it's ready via HPD
+  (hot plug detect) signal, but the signal isn't hooked up so we should
+  hardcode the max delay from the panel spec when powering up the panel.
 
 Example:
 
index 091c8dfd322910e14712d4a818e9879538abf3d9..b245363d6d60a0b6a42a6a4d675e1219faedc03d 100644 (file)
@@ -3,6 +3,7 @@
 Required properties:
 - compatible :
   - "fsl,imx7ulp-lpi2c" for LPI2C compatible with the one integrated on i.MX7ULP soc
+  - "fsl,imx8qxp-lpi2c" for LPI2C compatible with the one integrated on i.MX8QXP soc
 - reg : address and length of the lpi2c master registers
 - interrupts : lpi2c interrupt
 - clocks : lpi2c clock specifier
index 7e49839d41249ca5168b0de1ea02781a2798486d..4b90ba9f31b70b712c285af7ef7f7be180b39b14 100644 (file)
@@ -1,8 +1,12 @@
 I2C for OMAP platforms
 
 Required properties :
-- compatible : Must be "ti,omap2420-i2c", "ti,omap2430-i2c", "ti,omap3-i2c"
-  or "ti,omap4-i2c"
+- compatible : Must be
+       "ti,omap2420-i2c" for OMAP2420 SoCs
+       "ti,omap2430-i2c" for OMAP2430 SoCs
+       "ti,omap3-i2c" for OMAP3 SoCs
+       "ti,omap4-i2c" for OMAP4+ SoCs
+       "ti,am654-i2c", "ti,omap4-i2c" for AM654 SoCs
 - ti,hwmods : Must be "i2c<n>", n being the instance number (1-based)
 - #address-cells = <1>;
 - #size-cells = <0>;
index 2bb2626fdb78b4521fc06a08fec7107ec85a71a3..1ca6cc5ebf8ed83a2d9a5d5773ea9ff820d93b34 100644 (file)
@@ -12,7 +12,7 @@ The /chosen node should contain a 'linux,sysrq-reset-seq' child node to define
 a set of keys.
 
 Required property:
-sysrq-reset-seq: array of Linux keycodes, one keycode per cell.
+keyset: array of Linux keycodes, one keycode per cell.
 
 Optional property:
 timeout-ms: duration keys must be pressed together in milliseconds before
diff --git a/Documentation/devicetree/bindings/media/rockchip-vpu.txt b/Documentation/devicetree/bindings/media/rockchip-vpu.txt
deleted file mode 100644 (file)
index 35dc464..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-device-tree bindings for rockchip VPU codec
-
-Rockchip (Video Processing Unit) present in various Rockchip platforms,
-such as RK3288 and RK3399.
-
-Required properties:
-- compatible: value should be one of the following
-               "rockchip,rk3288-vpu";
-               "rockchip,rk3399-vpu";
-- interrupts: encoding and decoding interrupt specifiers
-- interrupt-names: should be "vepu" and "vdpu"
-- clocks: phandle to VPU aclk, hclk clocks
-- clock-names: should be "aclk" and "hclk"
-- power-domains: phandle to power domain node
-- iommus: phandle to a iommu node
-
-Example:
-SoC-specific DT entry:
-       vpu: video-codec@ff9a0000 {
-               compatible = "rockchip,rk3288-vpu";
-               reg = <0x0 0xff9a0000 0x0 0x800>;
-               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
-               interrupt-names = "vepu", "vdpu";
-               clocks = <&cru ACLK_VCODEC>, <&cru HCLK_VCODEC>;
-               clock-names = "aclk", "hclk";
-               power-domains = <&power RK3288_PD_VIDEO>;
-               iommus = <&vpu_mmu>;
-       };
index 903a78da65be288cf750af872584b60e9f42c06f..3a9926f99937039022d283817beac8e9bcbbc926 100644 (file)
@@ -17,7 +17,7 @@ Example:
                reg = <1>;
                clocks = <&clk32m>;
                interrupt-parent = <&gpio4>;
-               interrupts = <13 IRQ_TYPE_EDGE_RISING>;
+               interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
                vdd-supply = <&reg5v0>;
                xceiver-supply = <&reg5v0>;
        };
index cc4372842bf37670284d9b676699b5eb00882de9..9936b9ee67c36672afeb050a5641aa44cebbb728 100644 (file)
@@ -5,6 +5,7 @@ Required properties:
 - compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC.
              "renesas,can-r8a7744" if CAN controller is a part of R8A7744 SoC.
              "renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC.
+             "renesas,can-r8a774a1" if CAN controller is a part of R8A774A1 SoC.
              "renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
              "renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
              "renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC.
@@ -14,26 +15,32 @@ Required properties:
              "renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC.
              "renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC.
              "renesas,can-r8a7796" if CAN controller is a part of R8A7796 SoC.
+             "renesas,can-r8a77965" if CAN controller is a part of R8A77965 SoC.
              "renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device.
              "renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1
              compatible device.
-             "renesas,rcar-gen3-can" for a generic R-Car Gen3 compatible device.
+             "renesas,rcar-gen3-can" for a generic R-Car Gen3 or RZ/G2
+             compatible device.
              When compatible with the generic version, nodes must list the
              SoC-specific version corresponding to the platform first
              followed by the generic version.
 
 - reg: physical base address and size of the R-Car CAN register map.
 - interrupts: interrupt specifier for the sole interrupt.
-- clocks: phandles and clock specifiers for 3 CAN clock inputs.
-- clock-names: 3 clock input name strings: "clkp1", "clkp2", "can_clk".
+- clocks: phandles and clock specifiers for 2 CAN clock inputs for RZ/G2
+         devices.
+         phandles and clock specifiers for 3 CAN clock inputs for every other
+         SoC.
+- clock-names: 2 clock input name strings for RZ/G2: "clkp1", "can_clk".
+              3 clock input name strings for every other SoC: "clkp1", "clkp2",
+              "can_clk".
 - pinctrl-0: pin control group to be used for this controller.
 - pinctrl-names: must be "default".
 
-Required properties for "renesas,can-r8a7795" and "renesas,can-r8a7796"
-compatible:
-In R8A7795 and R8A7796 SoCs, "clkp2" can be CANFD clock. This is a div6 clock
-and can be used by both CAN and CAN FD controller at the same time. It needs to
-be scaled to maximum frequency if any of these controllers use it. This is done
+Required properties for R8A7795, R8A7796 and R8A77965:
+For the denoted SoCs, "clkp2" can be CANFD clock. This is a div6 clock and can
+be used by both CAN and CAN FD controller at the same time. It needs to be
+scaled to maximum frequency if any of these controllers use it. This is done
 using the below properties:
 
 - assigned-clocks: phandle of clkp2(CANFD) clock.
@@ -42,8 +49,9 @@ using the below properties:
 Optional properties:
 - renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are:
                            <0x0> (default) : Peripheral clock (clkp1)
-                           <0x1> : Peripheral clock (clkp2)
-                           <0x3> : Externally input clock
+                           <0x1> : Peripheral clock (clkp2) (not supported by
+                                   RZ/G2 devices)
+                           <0x3> : External input clock
 
 Example
 -------
index 3ceeb8de11963572cc1bd8ce324433e0dbf6bd03..35694c0c376b91c8b51982d1dce992682b983d92 100644 (file)
@@ -7,7 +7,7 @@ limitations.
 Current Binding
 ---------------
 
-Switches are true Linux devices and can be probes by any means. Once
+Switches are true Linux devices and can be probed by any means. Once
 probed, they register to the DSA framework, passing a node
 pointer. This node is expected to fulfil the following binding, and
 may contain additional properties as required by the device it is
index adf20b2bdf71504935cea1dca96a17e03393ad98..fbc198d5dd39eaee1838a37368a7c8025938f530 100644 (file)
@@ -40,24 +40,36 @@ Required properties:
                "ref" for 19.2 MHz ref clk,
                "com_aux" for phy common block aux clock,
                "ref_aux" for phy reference aux clock,
+
+               For "qcom,ipq8074-qmp-pcie-phy": no clocks are listed.
                For "qcom,msm8996-qmp-pcie-phy" must contain:
                        "aux", "cfg_ahb", "ref".
                For "qcom,msm8996-qmp-usb3-phy" must contain:
                        "aux", "cfg_ahb", "ref".
-               For "qcom,qmp-v3-usb3-phy" must contain:
+               For "qcom,sdm845-qmp-usb3-phy" must contain:
+                       "aux", "cfg_ahb", "ref", "com_aux".
+               For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
                        "aux", "cfg_ahb", "ref", "com_aux".
+               For "qcom,sdm845-qmp-ufs-phy" must contain:
+                       "ref", "ref_aux".
 
  - resets: a list of phandles and reset controller specifier pairs,
           one for each entry in reset-names.
  - reset-names: "phy" for reset of phy block,
                "common" for phy common block reset,
-               "cfg" for phy's ahb cfg block reset (Optional).
+               "cfg" for phy's ahb cfg block reset.
+
+               For "qcom,ipq8074-qmp-pcie-phy" must contain:
+                       "phy", "common".
                For "qcom,msm8996-qmp-pcie-phy" must contain:
-                "phy", "common", "cfg".
+                       "phy", "common", "cfg".
                For "qcom,msm8996-qmp-usb3-phy" must contain
-                "phy", "common".
-               For "qcom,ipq8074-qmp-pcie-phy" must contain:
-                "phy", "common".
+                       "phy", "common".
+               For "qcom,sdm845-qmp-usb3-phy" must contain:
+                       "phy", "common".
+               For "qcom,sdm845-qmp-usb3-uni-phy" must contain:
+                       "phy", "common".
+               For "qcom,sdm845-qmp-ufs-phy": no resets are listed.
 
  - vdda-phy-supply: Phandle to a regulator supply to PHY core block.
  - vdda-pll-supply: Phandle to 1.8V regulator supply to PHY refclk pll block.
@@ -79,9 +91,10 @@ Required properties for child node:
 
  - #phy-cells: must be 0
 
+Required properties child node of pcie and usb3 qmp phys:
  - clocks: a list of phandles and clock-specifier pairs,
           one for each entry in clock-names.
- - clock-names: Must contain following for pcie and usb qmp phys:
+ - clock-names: Must contain following:
                 "pipe<lane-number>" for pipe clock specific to each lane.
  - clock-output-names: Name of the PHY clock that will be the parent for
                       the above pipe clock.
@@ -91,9 +104,11 @@ Required properties for child node:
                        (or)
                  "pcie20_phy1_pipe_clk"
 
+Required properties for child node of PHYs with lane reset, AKA:
+       "qcom,msm8996-qmp-pcie-phy"
  - resets: a list of phandles and reset controller specifier pairs,
           one for each entry in reset-names.
- - reset-names: Must contain following for pcie qmp phys:
+ - reset-names: Must contain following:
                 "lane<lane-number>" for reset specific to each lane.
 
 Example:
index 06a363d9ccef9069124ae813bfb449d8cab182c7..b9a1d7402128b95437341b1e2d16d516fddfa114 100644 (file)
@@ -7,6 +7,7 @@ Required properties:
   for da850  - compatible = "ti,da850-ecap", "ti,am3352-ecap", "ti,am33xx-ecap";
   for dra746 - compatible = "ti,dra746-ecap", "ti,am3352-ecap";
   for 66ak2g - compatible = "ti,k2g-ecap", "ti,am3352-ecap";
+  for am654  - compatible = "ti,am654-ecap", "ti,am3352-ecap";
 - #pwm-cells: should be 3. See pwm.txt in this directory for a description of
   the cells format. The PWM channel index ranges from 0 to 4. The only third
   cell flag supported by this binding is PWM_POLARITY_INVERTED.
index e1ef6afbe3a74a89d9040b9d6642cd2c22aa2a0c..7f31fe7e209348ceb04a471e08be0f60b4fff65d 100644 (file)
@@ -3,7 +3,9 @@
 Required Properties:
 - compatible: should be "renesas,pwm-rcar" and one of the following.
  - "renesas,pwm-r8a7743": for RZ/G1M
+ - "renesas,pwm-r8a7744": for RZ/G1N
  - "renesas,pwm-r8a7745": for RZ/G1E
+ - "renesas,pwm-r8a774a1": for RZ/G2M
  - "renesas,pwm-r8a7778": for R-Car M1A
  - "renesas,pwm-r8a7779": for R-Car H1
  - "renesas,pwm-r8a7790": for R-Car H2
@@ -12,6 +14,8 @@ Required Properties:
  - "renesas,pwm-r8a7795": for R-Car H3
  - "renesas,pwm-r8a7796": for R-Car M3-W
  - "renesas,pwm-r8a77965": for R-Car M3-N
+ - "renesas,pwm-r8a77970": for R-Car V3M
+ - "renesas,pwm-r8a77980": for R-Car V3H
  - "renesas,pwm-r8a77990": for R-Car E3
  - "renesas,pwm-r8a77995": for R-Car D3
 - reg: base address and length of the registers block for the PWM.
index d53a16715da6ac33dea19a4a66ad38310feacc95..848a92b53d810eeb055544cbb037a38a54e05371 100644 (file)
@@ -2,13 +2,19 @@
 
 Required Properties:
 
-  - compatible: should be one of the following.
+  - compatible: must contain one or more of the following:
     - "renesas,tpu-r8a73a4": for R8A73A4 (R-Mobile APE6) compatible PWM controller.
     - "renesas,tpu-r8a7740": for R8A7740 (R-Mobile A1) compatible PWM controller.
     - "renesas,tpu-r8a7743": for R8A7743 (RZ/G1M) compatible PWM controller.
+    - "renesas,tpu-r8a7744": for R8A7744 (RZ/G1N) compatible PWM controller.
     - "renesas,tpu-r8a7745": for R8A7745 (RZ/G1E) compatible PWM controller.
     - "renesas,tpu-r8a7790": for R8A7790 (R-Car H2) compatible PWM controller.
-    - "renesas,tpu": for generic R-Car and RZ/G1 TPU PWM controller.
+    - "renesas,tpu-r8a77970": for R8A77970 (R-Car V3M) compatible PWM
+                             controller.
+    - "renesas,tpu-r8a77980": for R8A77980 (R-Car V3H) compatible PWM
+                             controller.
+    - "renesas,tpu": for the generic TPU PWM controller; this is a fallback for
+                    the entries listed above.
 
   - reg: Base address and length of each memory resource used by the PWM
     controller hardware module.
index 504a4ecfc7b16869192c666e903a9d884d9f052d..b04e66a52de5dfc4ca66a3efc5339e4986275033 100644 (file)
@@ -5,18 +5,20 @@ UniPhier SoCs have SCSSI which supports SPI single channel.
 Required properties:
  - compatible: should be "socionext,uniphier-scssi"
  - reg: address and length of the spi master registers
- - #address-cells: must be <1>, see spi-bus.txt
- - #size-cells: must be <0>, see spi-bus.txt
- - clocks: A phandle to the clock for the device.
- - resets: A phandle to the reset control for the device.
+ - interrupts: a single interrupt specifier
+ - pinctrl-names: should be "default"
+ - pinctrl-0: pin control state for the default mode
+ - clocks: a phandle to the clock for the device
+ - resets: a phandle to the reset control for the device
 
 Example:
 
 spi0: spi@54006000 {
        compatible = "socionext,uniphier-scssi";
        reg = <0x54006000 0x100>;
-       #address-cells = <1>;
-       #size-cells = <0>;
+       interrupts = <0 39 4>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_spi0>;
        clocks = <&peri_clk 11>;
        resets = <&peri_rst 11>;
 };
diff --git a/Documentation/devicetree/bindings/timer/csky,gx6605s-timer.txt b/Documentation/devicetree/bindings/timer/csky,gx6605s-timer.txt
new file mode 100644 (file)
index 0000000..6b04344
--- /dev/null
@@ -0,0 +1,42 @@
+=================
+gx6605s SOC Timer
+=================
+
+The timer is used in gx6605s soc as system timer and the driver
+contain clk event and clk source.
+
+==============================
+timer node bindings definition
+==============================
+
+       Description: Describes gx6605s SOC timer
+
+       PROPERTIES
+
+       - compatible
+               Usage: required
+               Value type: <string>
+               Definition: must be "csky,gx6605s-timer"
+       - reg
+               Usage: required
+               Value type: <u32 u32>
+               Definition: <phyaddr size> in soc from cpu view
+       - clocks
+               Usage: required
+               Value type: phandle + clock specifier cells
+               Definition: must be input clk node
+       - interrupt
+               Usage: required
+               Value type: <u32>
+               Definition: must be timer irq num defined by soc
+
+Examples:
+---------
+
+       timer0: timer@20a000 {
+               compatible = "csky,gx6605s-timer";
+               reg = <0x0020a000 0x400>;
+               clocks = <&dummy_apb_clk>;
+               interrupts = <10>;
+               interrupt-parent = <&intc>;
+       };
diff --git a/Documentation/devicetree/bindings/timer/csky,mptimer.txt b/Documentation/devicetree/bindings/timer/csky,mptimer.txt
new file mode 100644 (file)
index 0000000..15cfec0
--- /dev/null
@@ -0,0 +1,42 @@
+============================
+C-SKY Multi-processors Timer
+============================
+
+C-SKY multi-processors timer is designed for C-SKY SMP system and the
+regs is accessed by cpu co-processor 4 registers with mtcr/mfcr.
+
+ - PTIM_CTLR "cr<0, 14>" Control reg to start reset timer.
+ - PTIM_TSR  "cr<1, 14>" Interrupt cleanup status reg.
+ - PTIM_CCVR "cr<3, 14>" Current counter value reg.
+ - PTIM_LVR  "cr<6, 14>" Window value reg to triger next event.
+
+==============================
+timer node bindings definition
+==============================
+
+       Description: Describes SMP timer
+
+       PROPERTIES
+
+       - compatible
+               Usage: required
+               Value type: <string>
+               Definition: must be "csky,mptimer"
+       - clocks
+               Usage: required
+               Value type: <node>
+               Definition: must be input clk node
+       - interrupts
+               Usage: required
+               Value type: <u32>
+               Definition: must be timer irq num defined by soc
+
+Examples:
+---------
+
+       timer: timer {
+               compatible = "csky,mptimer";
+               clocks = <&dummy_apb_clk>;
+               interrupts = <16>;
+               interrupt-parent = <&intc>;
+       };
index 51c136c821bfb0a190e7daa67ebdc2e6faaf878b..eef7d9d259e8570d102be8c7f1641158950262c2 100644 (file)
@@ -286,6 +286,12 @@ pointed by REDIRECT. This should not be possible on local system as setting
 "trusted." xattrs will require CAP_SYS_ADMIN. But it should be possible
 for untrusted layers like from a pen drive.
 
+Note: redirect_dir={off|nofollow|follow(*)} conflicts with metacopy=on, and
+results in an error.
+
+(*) redirect_dir=follow only conflicts with metacopy=on if upperdir=... is
+given.
+
 Sharing and copying layers
 --------------------------
 
index 321d74b73937231d0dbf927ec89689082d14fcce..cf43bc4dbf319b4f642feaea0608d3bd07b075d6 100644 (file)
@@ -623,6 +623,11 @@ in your dentry operations instead.
        On success you get a new struct file sharing the mount/dentry with the
        original, on failure - ERR_PTR().
 --
+[mandatory]
+       ->clone_file_range() and ->dedupe_file_range have been replaced with
+       ->remap_file_range().  See Documentation/filesystems/vfs.txt for more
+       information.
+--
 [recommended]
        ->lookup() instances doing an equivalent of
                if (IS_ERR(inode))
diff --git a/Documentation/filesystems/ubifs-authentication.md b/Documentation/filesystems/ubifs-authentication.md
new file mode 100644 (file)
index 0000000..028b3e2
--- /dev/null
@@ -0,0 +1,426 @@
+% UBIFS Authentication
+% sigma star gmbh
+% 2018
+
+# Introduction
+
+UBIFS utilizes the fscrypt framework to provide confidentiality for file
+contents and file names. This prevents attacks where an attacker is able to
+read contents of the filesystem on a single point in time. A classic example
+is a lost smartphone where the attacker is unable to read personal data stored
+on the device without the filesystem decryption key.
+
+At the current state, UBIFS encryption however does not prevent attacks where
+the attacker is able to modify the filesystem contents and the user uses the
+device afterwards. In such a scenario an attacker can modify filesystem
+contents arbitrarily without the user noticing. One example is to modify a
+binary to perform a malicious action when executed [DMC-CBC-ATTACK]. Since
+most of the filesystem metadata of UBIFS is stored in plain, this makes it
+fairly easy to swap files and replace their contents.
+
+Other full disk encryption systems like dm-crypt cover all filesystem metadata,
+which makes such kinds of attacks more complicated, but not impossible.
+Especially, if the attacker is given access to the device multiple points in
+time. For dm-crypt and other filesystems that build upon the Linux block IO
+layer, the dm-integrity or dm-verity subsystems [DM-INTEGRITY, DM-VERITY]
+can be used to get full data authentication at the block layer.
+These can also be combined with dm-crypt [CRYPTSETUP2].
+
+This document describes an approach to get file contents _and_ full metadata
+authentication for UBIFS. Since UBIFS uses fscrypt for file contents and file
+name encryption, the authentication system could be tied into fscrypt such that
+existing features like key derivation can be utilized. It should however also
+be possible to use UBIFS authentication without using encryption.
+
+
+## MTD, UBI & UBIFS
+
+On Linux, the MTD (Memory Technology Devices) subsystem provides a uniform
+interface to access raw flash devices. One of the more prominent subsystems that
+work on top of MTD is UBI (Unsorted Block Images). It provides volume management
+for flash devices and is thus somewhat similar to LVM for block devices. In
+addition, it deals with flash-specific wear-leveling and transparent I/O error
+handling. UBI offers logical erase blocks (LEBs) to the layers on top of it
+and maps them transparently to physical erase blocks (PEBs) on the flash.
+
+UBIFS is a filesystem for raw flash which operates on top of UBI. Thus, wear
+leveling and some flash specifics are left to UBI, while UBIFS focuses on
+scalability, performance and recoverability.
+
+
+
+       +------------+ +*******+ +-----------+ +-----+
+       |            | * UBIFS * | UBI-BLOCK | | ... |
+       | JFFS/JFFS2 | +*******+ +-----------+ +-----+
+       |            | +-----------------------------+ +-----------+ +-----+
+       |            | |              UBI            | | MTD-BLOCK | | ... |
+       +------------+ +-----------------------------+ +-----------+ +-----+
+       +------------------------------------------------------------------+
+       |                  MEMORY TECHNOLOGY DEVICES (MTD)                 |
+       +------------------------------------------------------------------+
+       +-----------------------------+ +--------------------------+ +-----+
+       |         NAND DRIVERS        | |        NOR DRIVERS       | | ... |
+       +-----------------------------+ +--------------------------+ +-----+
+
+            Figure 1: Linux kernel subsystems for dealing with raw flash
+
+
+
+Internally, UBIFS maintains multiple data structures which are persisted on
+the flash:
+
+- *Index*: an on-flash B+ tree where the leaf nodes contain filesystem data
+- *Journal*: an additional data structure to collect FS changes before updating
+  the on-flash index and reduce flash wear.
+- *Tree Node Cache (TNC)*: an in-memory B+ tree that reflects the current FS
+  state to avoid frequent flash reads. It is basically the in-memory
+  representation of the index, but contains additional attributes.
+- *LEB property tree (LPT)*: an on-flash B+ tree for free space accounting per
+  UBI LEB.
+
+In the remainder of this section we will cover the on-flash UBIFS data
+structures in more detail. The TNC is of less importance here since it is never
+persisted onto the flash directly. More details on UBIFS can also be found in
+[UBIFS-WP].
+
+
+### UBIFS Index & Tree Node Cache
+
+Basic on-flash UBIFS entities are called *nodes*. UBIFS knows different types
+of nodes. Eg. data nodes (`struct ubifs_data_node`) which store chunks of file
+contents or inode nodes (`struct ubifs_ino_node`) which represent VFS inodes.
+Almost all types of nodes share a common header (`ubifs_ch`) containing basic
+information like node type, node length, a sequence number, etc. (see
+`fs/ubifs/ubifs-media.h`in kernel source). Exceptions are entries of the LPT
+and some less important node types like padding nodes which are used to pad
+unusable content at the end of LEBs.
+
+To avoid re-writing the whole B+ tree on every single change, it is implemented
+as *wandering tree*, where only the changed nodes are re-written and previous
+versions of them are obsoleted without erasing them right away. As a result,
+the index is not stored in a single place on the flash, but *wanders* around
+and there are obsolete parts on the flash as long as the LEB containing them is
+not reused by UBIFS. To find the most recent version of the index, UBIFS stores
+a special node called *master node* into UBI LEB 1 which always points to the
+most recent root node of the UBIFS index. For recoverability, the master node
+is additionally duplicated to LEB 2. Mounting UBIFS is thus a simple read of
+LEB 1 and 2 to get the current master node and from there get the location of
+the most recent on-flash index.
+
+The TNC is the in-memory representation of the on-flash index. It contains some
+additional runtime attributes per node which are not persisted. One of these is
+a dirty-flag which marks nodes that have to be persisted the next time the
+index is written onto the flash. The TNC acts as a write-back cache and all
+modifications of the on-flash index are done through the TNC. Like other caches,
+the TNC does not have to mirror the full index into memory, but reads parts of
+it from flash whenever needed. A *commit* is the UBIFS operation of updating the
+on-flash filesystem structures like the index. On every commit, the TNC nodes
+marked as dirty are written to the flash to update the persisted index.
+
+
+### Journal
+
+To avoid wearing out the flash, the index is only persisted (*commited*) when
+certain conditions are met (eg. `fsync(2)`). The journal is used to record
+any changes (in form of inode nodes, data nodes etc.) between commits
+of the index. During mount, the journal is read from the flash and replayed
+onto the TNC (which will be created on-demand from the on-flash index).
+
+UBIFS reserves a bunch of LEBs just for the journal called *log area*. The
+amount of log area LEBs is configured on filesystem creation (using
+`mkfs.ubifs`) and stored in the superblock node. The log area contains only
+two types of nodes: *reference nodes* and *commit start nodes*. A commit start
+node is written whenever an index commit is performed. Reference nodes are
+written on every journal update. Each reference node points to the position of
+other nodes (inode nodes, data nodes etc.) on the flash that are part of this
+journal entry. These nodes are called *buds* and describe the actual filesystem
+changes including their data.
+
+The log area is maintained as a ring. Whenever the journal is almost full,
+a commit is initiated. This also writes a commit start node so that during
+mount, UBIFS will seek for the most recent commit start node and just replay
+every reference node after that. Every reference node before the commit start
+node will be ignored as they are already part of the on-flash index.
+
+When writing a journal entry, UBIFS first ensures that enough space is
+available to write the reference node and buds part of this entry. Then, the
+reference node is written and afterwards the buds describing the file changes.
+On replay, UBIFS will record every reference node and inspect the location of
+the referenced LEBs to discover the buds. If these are corrupt or missing,
+UBIFS will attempt to recover them by re-reading the LEB. This is however only
+done for the last referenced LEB of the journal. Only this can become corrupt
+because of a power cut. If the recovery fails, UBIFS will not mount. An error
+for every other LEB will directly cause UBIFS to fail the mount operation.
+
+
+       | ----    LOG AREA     ---- | ----------    MAIN AREA    ------------ |
+
+        -----+------+-----+--------+----   ------+-----+-----+---------------
+        \    |      |     |        |   /  /      |     |     |               \
+        / CS |  REF | REF |        |   \  \ DENT | INO | INO |               /
+        \    |      |     |        |   /  /      |     |     |               \
+         ----+------+-----+--------+---   -------+-----+-----+----------------
+                 |     |                  ^            ^
+                 |     |                  |            |
+                 +------------------------+            |
+                       |                               |
+                       +-------------------------------+
+
+
+                Figure 2: UBIFS flash layout of log area with commit start nodes
+                          (CS) and reference nodes (REF) pointing to main area
+                          containing their buds
+
+
+### LEB Property Tree/Table
+
+The LEB property tree is used to store per-LEB information. This includes the
+LEB type and amount of free and *dirty* (old, obsolete content) space [1] on
+the LEB. The type is important, because UBIFS never mixes index nodes with data
+nodes on a single LEB and thus each LEB has a specific purpose. This again is
+useful for free space calculations. See [UBIFS-WP] for more details.
+
+The LEB property tree again is a B+ tree, but it is much smaller than the
+index. Due to its smaller size it is always written as one chunk on every
+commit. Thus, saving the LPT is an atomic operation.
+
+
+[1] Since LEBs can only be appended and never overwritten, there is a
+difference between free space ie. the remaining space left on the LEB to be
+written to without erasing it and previously written content that is obsolete
+but can't be overwritten without erasing the full LEB.
+
+
+# UBIFS Authentication
+
+This chapter introduces UBIFS authentication which enables UBIFS to verify
+the authenticity and integrity of metadata and file contents stored on flash.
+
+
+## Threat Model
+
+UBIFS authentication enables detection of offline data modification. While it
+does not prevent it, it enables (trusted) code to check the integrity and
+authenticity of on-flash file contents and filesystem metadata. This covers
+attacks where file contents are swapped.
+
+UBIFS authentication will not protect against rollback of full flash contents.
+Ie. an attacker can still dump the flash and restore it at a later time without
+detection. It will also not protect against partial rollback of individual
+index commits. That means that an attacker is able to partially undo changes.
+This is possible because UBIFS does not immediately overwrites obsolete
+versions of the index tree or the journal, but instead marks them as obsolete
+and garbage collection erases them at a later time. An attacker can use this by
+erasing parts of the current tree and restoring old versions that are still on
+the flash and have not yet been erased. This is possible, because every commit
+will always write a new version of the index root node and the master node
+without overwriting the previous version. This is further helped by the
+wear-leveling operations of UBI which copies contents from one physical
+eraseblock to another and does not atomically erase the first eraseblock.
+
+UBIFS authentication does not cover attacks where an attacker is able to
+execute code on the device after the authentication key was provided.
+Additional measures like secure boot and trusted boot have to be taken to
+ensure that only trusted code is executed on a device.
+
+
+## Authentication
+
+To be able to fully trust data read from flash, all UBIFS data structures
+stored on flash are authenticated. That is:
+
+- The index which includes file contents, file metadata like extended
+  attributes, file length etc.
+- The journal which also contains file contents and metadata by recording changes
+  to the filesystem
+- The LPT which stores UBI LEB metadata which UBIFS uses for free space accounting
+
+
+### Index Authentication
+
+Through UBIFS' concept of a wandering tree, it already takes care of only
+updating and persisting changed parts from leaf node up to the root node
+of the full B+ tree. This enables us to augment the index nodes of the tree
+with a hash over each node's child nodes. As a result, the index basically also
+a Merkle tree. Since the leaf nodes of the index contain the actual filesystem
+data, the hashes of their parent index nodes thus cover all the file contents
+and file metadata. When a file changes, the UBIFS index is updated accordingly
+from the leaf nodes up to the root node including the master node. This process
+can be hooked to recompute the hash only for each changed node at the same time.
+Whenever a file is read, UBIFS can verify the hashes from each leaf node up to
+the root node to ensure the node's integrity.
+
+To ensure the authenticity of the whole index, the UBIFS master node stores a
+keyed hash (HMAC) over its own contents and a hash of the root node of the index
+tree. As mentioned above, the master node is always written to the flash whenever
+the index is persisted (ie. on index commit).
+
+Using this approach only UBIFS index nodes and the master node are changed to
+include a hash. All other types of nodes will remain unchanged. This reduces
+the storage overhead which is precious for users of UBIFS (ie. embedded
+devices).
+
+
+                             +---------------+
+                             |  Master Node  |
+                             |    (hash)     |
+                             +---------------+
+                                     |
+                                     v
+                            +-------------------+
+                            |  Index Node #1    |
+                            |                   |
+                            | branch0   branchn |
+                            | (hash)    (hash)  |
+                            +-------------------+
+                               |    ...   |  (fanout: 8)
+                               |          |
+                       +-------+          +------+
+                       |                         |
+                       v                         v
+            +-------------------+       +-------------------+
+            |  Index Node #2    |       |  Index Node #3    |
+            |                   |       |                   |
+            | branch0   branchn |       | branch0   branchn |
+            | (hash)    (hash)  |       | (hash)    (hash)  |
+            +-------------------+       +-------------------+
+                 |   ...                     |   ...   |
+                 v                           v         v
+               +-----------+         +----------+  +-----------+
+               | Data Node |         | INO Node |  | DENT Node |
+               +-----------+         +----------+  +-----------+
+
+
+           Figure 3: Coverage areas of index node hash and master node HMAC
+
+
+
+The most important part for robustness and power-cut safety is to atomically
+persist the hash and file contents. Here the existing UBIFS logic for how
+changed nodes are persisted is already designed for this purpose such that
+UBIFS can safely recover if a power-cut occurs while persisting. Adding
+hashes to index nodes does not change this since each hash will be persisted
+atomically together with its respective node.
+
+
+### Journal Authentication
+
+The journal is authenticated too. Since the journal is continuously written
+it is necessary to also add authentication information frequently to the
+journal so that in case of a powercut not too much data can't be authenticated.
+This is done by creating a continuous hash beginning from the commit start node
+over the previous reference nodes, the current reference node, and the bud
+nodes. From time to time whenever it is suitable authentication nodes are added
+between the bud nodes. This new node type contains a HMAC over the current state
+of the hash chain. That way a journal can be authenticated up to the last
+authentication node. The tail of the journal which may not have a authentication
+node cannot be authenticated and is skipped during journal replay.
+
+We get this picture for journal authentication:
+
+    ,,,,,,,,
+    ,......,...........................................
+    ,. CS  ,               hash1.----.           hash2.----.
+    ,.  |  ,                    .    |hmac            .    |hmac
+    ,.  v  ,                    .    v                .    v
+    ,.REF#0,-> bud -> bud -> bud.-> auth -> bud -> bud.-> auth ...
+    ,..|...,...........................................
+    ,  |   ,
+    ,  |   ,,,,,,,,,,,,,,,
+    .  |            hash3,----.
+    ,  |                 ,    |hmac
+    ,  v                 ,    v
+    , REF#1 -> bud -> bud,-> auth ...
+    ,,,|,,,,,,,,,,,,,,,,,,
+       v
+      REF#2 -> ...
+       |
+       V
+      ...
+
+Since the hash also includes the reference nodes an attacker cannot reorder or
+skip any journal heads for replay. An attacker can only remove bud nodes or
+reference nodes from the end of the journal, effectively rewinding the
+filesystem at maximum back to the last commit.
+
+The location of the log area is stored in the master node. Since the master
+node is authenticated with a HMAC as described above, it is not possible to
+tamper with that without detection. The size of the log area is specified when
+the filesystem is created using `mkfs.ubifs` and stored in the superblock node.
+To avoid tampering with this and other values stored there, a HMAC is added to
+the superblock struct. The superblock node is stored in LEB 0 and is only
+modified on feature flag or similar changes, but never on file changes.
+
+
+### LPT Authentication
+
+The location of the LPT root node on the flash is stored in the UBIFS master
+node. Since the LPT is written and read atomically on every commit, there is
+no need to authenticate individual nodes of the tree. It suffices to
+protect the integrity of the full LPT by a simple hash stored in the master
+node. Since the master node itself is authenticated, the LPTs authenticity can
+be verified by verifying the authenticity of the master node and comparing the
+LTP hash stored there with the hash computed from the read on-flash LPT.
+
+
+## Key Management
+
+For simplicity, UBIFS authentication uses a single key to compute the HMACs
+of superblock, master, commit start and reference nodes. This key has to be
+available on creation of the filesystem (`mkfs.ubifs`) to authenticate the
+superblock node. Further, it has to be available on mount of the filesystem
+to verify authenticated nodes and generate new HMACs for changes.
+
+UBIFS authentication is intended to operate side-by-side with UBIFS encryption
+(fscrypt) to provide confidentiality and authenticity. Since UBIFS encryption
+has a different approach of encryption policies per directory, there can be
+multiple fscrypt master keys and there might be folders without encryption.
+UBIFS authentication on the other hand has an all-or-nothing approach in the
+sense that it either authenticates everything of the filesystem or nothing.
+Because of this and because UBIFS authentication should also be usable without
+encryption, it does not share the same master key with fscrypt, but manages
+a dedicated authentication key.
+
+The API for providing the authentication key has yet to be defined, but the
+key can eg. be provided by userspace through a keyring similar to the way it
+is currently done in fscrypt. It should however be noted that the current
+fscrypt approach has shown its flaws and the userspace API will eventually
+change [FSCRYPT-POLICY2].
+
+Nevertheless, it will be possible for a user to provide a single passphrase
+or key in userspace that covers UBIFS authentication and encryption. This can
+be solved by the corresponding userspace tools which derive a second key for
+authentication in addition to the derived fscrypt master key used for
+encryption.
+
+To be able to check if the proper key is available on mount, the UBIFS
+superblock node will additionally store a hash of the authentication key. This
+approach is similar to the approach proposed for fscrypt encryption policy v2
+[FSCRYPT-POLICY2].
+
+
+# Future Extensions
+
+In certain cases where a vendor wants to provide an authenticated filesystem
+image to customers, it should be possible to do so without sharing the secret
+UBIFS authentication key. Instead, in addition the each HMAC a digital
+signature could be stored where the vendor shares the public key alongside the
+filesystem image. In case this filesystem has to be modified afterwards,
+UBIFS can exchange all digital signatures with HMACs on first mount similar
+to the way the IMA/EVM subsystem deals with such situations. The HMAC key
+will then have to be provided beforehand in the normal way.
+
+
+# References
+
+[CRYPTSETUP2]        http://www.saout.de/pipermail/dm-crypt/2017-November/005745.html
+
+[DMC-CBC-ATTACK]     http://www.jakoblell.com/blog/2013/12/22/practical-malleability-attack-against-cbc-encrypted-luks-partitions/
+
+[DM-INTEGRITY]       https://www.kernel.org/doc/Documentation/device-mapper/dm-integrity.txt
+
+[DM-VERITY]          https://www.kernel.org/doc/Documentation/device-mapper/verity.txt
+
+[FSCRYPT-POLICY2]    https://www.spinics.net/lists/linux-ext4/msg58710.html
+
+[UBIFS-WP]           http://www.linux-mtd.infradead.org/doc/ubifs_whitepaper.pdf
index a0a61d2f389f409602d2ac266e7e803c22f74406..acc80442a3bbecc97c6d5ba9b516cb37cb3a7478 100644 (file)
@@ -91,6 +91,13 @@ chk_data_crc         do not skip checking CRCs on data nodes
 compr=none              override default compressor and set it to "none"
 compr=lzo               override default compressor and set it to "lzo"
 compr=zlib              override default compressor and set it to "zlib"
+auth_key=              specify the key used for authenticating the filesystem.
+                       Passing this option makes authentication mandatory.
+                       The passed key must be present in the kernel keyring
+                       and must be of type 'logon'
+auth_hash_name=                The hash algorithm used for authentication. Used for
+                       both hashing and for creating HMACs. Typical values
+                       include "sha256" or "sha512"
 
 
 Quick usage instructions
index a6c6a8af48a296cf9b7197c8f065370814efd90d..5f71a252e2e0f52b17c4fb6076baa57ae34e1ec4 100644 (file)
@@ -883,8 +883,9 @@ struct file_operations {
        unsigned (*mmap_capabilities)(struct file *);
 #endif
        ssize_t (*copy_file_range)(struct file *, loff_t, struct file *, loff_t, size_t, unsigned int);
-       int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
-       int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t, u64);
+       loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
+                                  struct file *file_out, loff_t pos_out,
+                                  loff_t len, unsigned int remap_flags);
        int (*fadvise)(struct file *, loff_t, loff_t, int);
 };
 
@@ -960,11 +961,18 @@ otherwise noted.
 
   copy_file_range: called by the copy_file_range(2) system call.
 
-  clone_file_range: called by the ioctl(2) system call for FICLONERANGE and
-       FICLONE commands.
-
-  dedupe_file_range: called by the ioctl(2) system call for FIDEDUPERANGE
-       command.
+  remap_file_range: called by the ioctl(2) system call for FICLONERANGE and
+       FICLONE and FIDEDUPERANGE commands to remap file ranges.  An
+       implementation should remap len bytes at pos_in of the source file into
+       the dest file at pos_out.  Implementations must handle callers passing
+       in len == 0; this means "remap to the end of the source file".  The
+       return value should the number of bytes remapped, or the usual
+       negative error code if errors occurred before any bytes were remapped.
+       The remap_flags parameter accepts REMAP_FILE_* flags.  If
+       REMAP_FILE_DEDUP is set then the implementation must only remap if the
+       requested file ranges have identical contents.  If REMAP_CAN_SHORTEN is
+       set, the caller is ok with the implementation shortening the request
+       length to satisfy alignment or EOF requirements (or any other reason).
 
   fadvise: possibly called by the fadvise64() system call.
 
diff --git a/Documentation/i2c/busses/i2c-nvidia-gpu b/Documentation/i2c/busses/i2c-nvidia-gpu
new file mode 100644 (file)
index 0000000..31884d2
--- /dev/null
@@ -0,0 +1,18 @@
+Kernel driver i2c-nvidia-gpu
+
+Datasheet: not publicly available.
+
+Authors:
+       Ajay Gupta <ajayg@nvidia.com>
+
+Description
+-----------
+
+i2c-nvidia-gpu is a driver for I2C controller included in NVIDIA Turing
+and later GPUs and it is used to communicate with Type-C controller on GPUs.
+
+If your 'lspci -v' listing shows something like the following,
+
+01:00.3 Serial bus controller [0c80]: NVIDIA Corporation Device 1ad9 (rev a1)
+
+then this driver should support the I2C controller of your GPU.
index a8c0873beb952e620db9bb2f2df823624ac90650..b24b5343f5eb3a5e527f55b0a4dd0ff0095606cd 100644 (file)
@@ -190,7 +190,26 @@ A few EV_REL codes have special meanings:
 * REL_WHEEL, REL_HWHEEL:
 
   - These codes are used for vertical and horizontal scroll wheels,
-    respectively.
+    respectively. The value is the number of detents moved on the wheel, the
+    physical size of which varies by device. For high-resolution wheels
+    this may be an approximation based on the high-resolution scroll events,
+    see REL_WHEEL_HI_RES. These event codes are legacy codes and
+    REL_WHEEL_HI_RES and REL_HWHEEL_HI_RES should be preferred where
+    available.
+
+* REL_WHEEL_HI_RES, REL_HWHEEL_HI_RES:
+
+  - High-resolution scroll wheel data. The accumulated value 120 represents
+    movement by one detent. For devices that do not provide high-resolution
+    scrolling, the value is always a multiple of 120. For devices with
+    high-resolution scrolling, the value may be a fraction of 120.
+
+    If a vertical scroll wheel supports high-resolution scrolling, this code
+    will be emitted in addition to REL_WHEEL or REL_HWHEEL. The REL_WHEEL
+    and REL_HWHEEL may be an approximation based on the high-resolution
+    scroll events. There is no guarantee that the high-resolution data
+    is a multiple of 120 at the time of an emulated REL_WHEEL or REL_HWHEEL
+    event.
 
 EV_ABS
 ------
index 7b6a2b2bdc98db2e794a261ff7be17dc3df0ae26..8da26c6dd886a9d9006184f4d9d5c5cf43e71b2e 100644 (file)
@@ -537,21 +537,6 @@ more details, with real examples.
        The third parameter may be a text as in this example, but it may also
        be an expanded variable or a macro.
 
-    cc-fullversion
-       cc-fullversion is useful when the exact version of gcc is needed.
-       One typical use-case is when a specific GCC version is broken.
-       cc-fullversion points out a more specific version than cc-version does.
-
-       Example:
-               #arch/powerpc/Makefile
-               $(Q)if test "$(cc-fullversion)" = "040200" ; then \
-                       echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \
-                       false ; \
-               fi
-
-       In this example for a specific GCC version the build will error out
-       explaining to the user why it stops.
-
     cc-cross-prefix
        cc-cross-prefix is used to check if there exists a $(CC) in path with
        one of the listed prefixes. The first prefix where there exist a
index 0f8b31874002c79777e58a3e33892742ab10e495..de131f00c24966e29dde6e61b489cafef39c9b9e 100644 (file)
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+..    modify it under the terms of the GNU General Public License as
+..    published by the Free Software Foundation; either version 2 of
+..    the License, or (at your option) any later version.
+..
+..    This file is distributed in the hope that it will be useful,
+..    but WITHOUT ANY WARRANTY; without even the implied warranty of
+..    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+..    GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+..    document under the terms of the GNU Free Documentation License,
+..    Version 1.1 or any later version published by the Free Software
+..    Foundation, with no Invariant Sections, no Front-Cover Texts
+..    and no Back-Cover Texts. A copy of the license is included at
+..    Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
 
 .. _media_ioc_request_alloc:
 
index 6dd2d7fea7144502bb271ab2e156d028332cafcb..5d2604345e191c9a34fe63d3b57a2ed719e57fa0 100644 (file)
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+..    modify it under the terms of the GNU General Public License as
+..    published by the Free Software Foundation; either version 2 of
+..    the License, or (at your option) any later version.
+..
+..    This file is distributed in the hope that it will be useful,
+..    but WITHOUT ANY WARRANTY; without even the implied warranty of
+..    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+..    GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+..    document under the terms of the GNU Free Documentation License,
+..    Version 1.1 or any later version published by the Free Software
+..    Foundation, with no Invariant Sections, no Front-Cover Texts
+..    and no Back-Cover Texts. A copy of the license is included at
+..    Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
 
 .. _media_request_ioc_queue:
 
index febe888494c8dfb6e46ebb61ab39141155f3a343..ec61960c81ce9b4eb47381eef9b5dfb57df1e7fb 100644 (file)
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+..    modify it under the terms of the GNU General Public License as
+..    published by the Free Software Foundation; either version 2 of
+..    the License, or (at your option) any later version.
+..
+..    This file is distributed in the hope that it will be useful,
+..    but WITHOUT ANY WARRANTY; without even the implied warranty of
+..    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+..    GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+..    document under the terms of the GNU Free Documentation License,
+..    Version 1.1 or any later version published by the Free Software
+..    Foundation, with no Invariant Sections, no Front-Cover Texts
+..    and no Back-Cover Texts. A copy of the license is included at
+..    Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
 
 .. _media_request_ioc_reinit:
 
index 5f4a23029c487ca110bb81dcae3025d99f1ae285..945113dcb2185762e4d74d20712868755dbee0ca 100644 (file)
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+..    modify it under the terms of the GNU General Public License as
+..    published by the Free Software Foundation; either version 2 of
+..    the License, or (at your option) any later version.
+..
+..    This file is distributed in the hope that it will be useful,
+..    but WITHOUT ANY WARRANTY; without even the implied warranty of
+..    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+..    GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+..    document under the terms of the GNU Free Documentation License,
+..    Version 1.1 or any later version published by the Free Software
+..    Foundation, with no Invariant Sections, no Front-Cover Texts
+..    and no Back-Cover Texts. A copy of the license is included at
+..    Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
 
 .. _media-request-api:
 
index 098d7f2b9548231d3cd058847c05b42c7aa34642..dcf3f35bcf176d8a0d1a33e3b0d4a42c8832c72a 100644 (file)
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+..    modify it under the terms of the GNU General Public License as
+..    published by the Free Software Foundation; either version 2 of
+..    the License, or (at your option) any later version.
+..
+..    This file is distributed in the hope that it will be useful,
+..    but WITHOUT ANY WARRANTY; without even the implied warranty of
+..    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+..    GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+..    document under the terms of the GNU Free Documentation License,
+..    Version 1.1 or any later version published by the Free Software
+..    Foundation, with no Invariant Sections, no Front-Cover Texts
+..    and no Back-Cover Texts. A copy of the license is included at
+..    Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
 
 .. _request-func-close:
 
index ff7b072a69991970aba4e3ea35c897bc10f0f4b4..11a22f8878439cb1fbae0837879b7e1b3c64d5da 100644 (file)
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+..    modify it under the terms of the GNU General Public License as
+..    published by the Free Software Foundation; either version 2 of
+..    the License, or (at your option) any later version.
+..
+..    This file is distributed in the hope that it will be useful,
+..    but WITHOUT ANY WARRANTY; without even the implied warranty of
+..    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+..    GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+..    document under the terms of the GNU Free Documentation License,
+..    Version 1.1 or any later version published by the Free Software
+..    Foundation, with no Invariant Sections, no Front-Cover Texts
+..    and no Back-Cover Texts. A copy of the license is included at
+..    Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
 
 .. _request-func-ioctl:
 
index 85191254f381abea160fceef5c7e2fbbbefb9c7b..2609fd54d519cb2379f6ec19033ea87ac7c3469d 100644 (file)
@@ -1,4 +1,28 @@
-.. SPDX-License-Identifier: GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
+.. This file is dual-licensed: you can use it either under the terms
+.. of the GPL or the GFDL 1.1+ license, at your option. Note that this
+.. dual licensing only applies to this file, and not this project as a
+.. whole.
+..
+.. a) This file is free software; you can redistribute it and/or
+..    modify it under the terms of the GNU General Public License as
+..    published by the Free Software Foundation; either version 2 of
+..    the License, or (at your option) any later version.
+..
+..    This file is distributed in the hope that it will be useful,
+..    but WITHOUT ANY WARRANTY; without even the implied warranty of
+..    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+..    GNU General Public License for more details.
+..
+.. Or, alternatively,
+..
+.. b) Permission is granted to copy, distribute and/or modify this
+..    document under the terms of the GNU Free Documentation License,
+..    Version 1.1 or any later version published by the Free Software
+..    Foundation, with no Invariant Sections, no Front-Cover Texts
+..    and no Back-Cover Texts. A copy of the license is included at
+..    Documentation/media/uapi/fdl-appendix.rst.
+..
+.. TODO: replace it to GPL-2.0 OR GFDL-1.1-or-later WITH no-invariant-sections
 
 .. _request-func-poll:
 
index f7ac8d0d3af14a1a6d951df74847972220711133..b65dc078abeb8ca4a1a9a045eddc2d1b8844c333 100644 (file)
@@ -40,7 +40,7 @@ To use the :ref:`format` ioctls applications set the ``type`` field of the
 the desired operation. Both drivers and applications must set the remainder of
 the :c:type:`v4l2_format` structure to 0.
 
-.. _v4l2-meta-format:
+.. c:type:: v4l2_meta_format
 
 .. tabularcolumns:: |p{1.4cm}|p{2.2cm}|p{13.9cm}|
 
index 3ead350e099f97a6146ff98bf0126a8388149883..9ea494a8facab2cca0b51745cda1af6f0f53fd4c 100644 (file)
@@ -132,6 +132,11 @@ The format as returned by :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>` must be identical
       - ``sdr``
       - Definition of a data format, see :ref:`pixfmt`, used by SDR
        capture and output devices.
+    * -
+      - struct :c:type:`v4l2_meta_format`
+      - ``meta``
+      - Definition of a metadata format, see :ref:`meta-formats`, used by
+       metadata capture devices.
     * -
       - __u8
       - ``raw_data``\ [200]
index 1e4948c9e9897afb3ac0231a2c1c76ea310968ee..4d118b827bbb7ed1f9e7c221ae1c10ceee7bb779 100644 (file)
@@ -20,7 +20,7 @@ Enabling the driver
 The driver is enabled via the standard kernel configuration system,
 using the make command::
 
-  make oldconfig/silentoldconfig/menuconfig/etc.
+  make oldconfig/menuconfig/etc.
 
 The driver is located in the menu structure at:
 
index 163b5ff1073cd0a852d9ed32e06c599cefcfdd75..32b21571adfeb5bc4b5aec9b25ec14ecebc8e0e5 100644 (file)
@@ -316,6 +316,17 @@ tcp_frto - INTEGER
 
        By default it's enabled with a non-zero value. 0 disables F-RTO.
 
+tcp_fwmark_accept - BOOLEAN
+       If set, incoming connections to listening sockets that do not have a
+       socket mark will set the mark of the accepting socket to the fwmark of
+       the incoming SYN packet. This will cause all packets on that connection
+       (starting from the first SYNACK) to be sent with that fwmark. The
+       listening socket's mark is unchanged. Listening sockets that already
+       have a fwmark set via setsockopt(SOL_SOCKET, SO_MARK, ...) are
+       unaffected.
+
+       Default: 0
+
 tcp_invalid_ratelimit - INTEGER
        Limit the maximal rate for sending duplicate acknowledgments
        in response to incoming TCP packets that are for an existing
index 605e00cdd6beb1d024519ab8417464fc12064724..89f1302d593a5c0404ed8a434ba580e8440a9139 100644 (file)
@@ -1056,18 +1056,23 @@ The kernel interface functions are as follows:
 
        u32 rxrpc_kernel_check_life(struct socket *sock,
                                    struct rxrpc_call *call);
+       void rxrpc_kernel_probe_life(struct socket *sock,
+                                    struct rxrpc_call *call);
 
-     This returns a number that is updated when ACKs are received from the peer
-     (notably including PING RESPONSE ACKs which we can elicit by sending PING
-     ACKs to see if the call still exists on the server).  The caller should
-     compare the numbers of two calls to see if the call is still alive after
-     waiting for a suitable interval.
+     The first function returns a number that is updated when ACKs are received
+     from the peer (notably including PING RESPONSE ACKs which we can elicit by
+     sending PING ACKs to see if the call still exists on the server).  The
+     caller should compare the numbers of two calls to see if the call is still
+     alive after waiting for a suitable interval.
 
      This allows the caller to work out if the server is still contactable and
      if the call is still alive on the server whilst waiting for the server to
      process a client operation.
 
-     This function may transmit a PING ACK.
+     The second function causes a ping ACK to be transmitted to try to provoke
+     the peer into responding, which would then cause the value returned by the
+     first function to change.  Note that this must be called in TASK_RUNNING
+     state.
 
  (*) Get reply timestamp.
 
index 757808526d9a8bbb2197dc54a532ea766e5c4be3..878ebfda7eeff378a2fee48e3b361aa6b3587896 100644 (file)
@@ -25,6 +25,7 @@ Below are the essential guides that every developer should read.
    code-of-conduct-interpretation
    development-process
    submitting-patches
+   programming-language
    coding-style
    maintainer-pgp-guide
    email-clients
diff --git a/Documentation/process/programming-language.rst b/Documentation/process/programming-language.rst
new file mode 100644 (file)
index 0000000..e5f5f06
--- /dev/null
@@ -0,0 +1,45 @@
+.. _programming_language:
+
+Programming Language
+====================
+
+The kernel is written in the C programming language [c-language]_.
+More precisely, the kernel is typically compiled with ``gcc`` [gcc]_
+under ``-std=gnu89`` [gcc-c-dialect-options]_: the GNU dialect of ISO C90
+(including some C99 features).
+
+This dialect contains many extensions to the language [gnu-extensions]_,
+and many of them are used within the kernel as a matter of course.
+
+There is some support for compiling the kernel with ``clang`` [clang]_
+and ``icc`` [icc]_ for several of the architectures, although at the time
+of writing it is not completed, requiring third-party patches.
+
+Attributes
+----------
+
+One of the common extensions used throughout the kernel are attributes
+[gcc-attribute-syntax]_. Attributes allow to introduce
+implementation-defined semantics to language entities (like variables,
+functions or types) without having to make significant syntactic changes
+to the language (e.g. adding a new keyword) [n2049]_.
+
+In some cases, attributes are optional (i.e. a compiler not supporting them
+should still produce proper code, even if it is slower or does not perform
+as many compile-time checks/diagnostics).
+
+The kernel defines pseudo-keywords (e.g. ``__pure``) instead of using
+directly the GNU attribute syntax (e.g. ``__attribute__((__pure__))``)
+in order to feature detect which ones can be used and/or to shorten the code.
+
+Please refer to ``include/linux/compiler_attributes.h`` for more information.
+
+.. [c-language] http://www.open-std.org/jtc1/sc22/wg14/www/standards
+.. [gcc] https://gcc.gnu.org
+.. [clang] https://clang.llvm.org
+.. [icc] https://software.intel.com/en-us/c-compilers
+.. [gcc-c-dialect-options] https://gcc.gnu.org/onlinedocs/gcc/C-Dialect-Options.html
+.. [gnu-extensions] https://gcc.gnu.org/onlinedocs/gcc/C-Extensions.html
+.. [gcc-attribute-syntax] https://gcc.gnu.org/onlinedocs/gcc/Attribute-Syntax.html
+.. [n2049] http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2049.pdf
+
index 9ce7256c6edba8b605e9928a42159d717f6d7cf5..9521c4207f014d11f4edd33bcefdc788d0299d6a 100644 (file)
@@ -859,6 +859,7 @@ The keyctl syscall functions are:
      and either the buffer length or the OtherInfo length exceeds the
      allowed length.
 
+
   *  Restrict keyring linkage::
 
        long keyctl(KEYCTL_RESTRICT_KEYRING, key_serial_t keyring,
@@ -890,6 +891,116 @@ The keyctl syscall functions are:
      applicable to the asymmetric key type.
 
 
+  *  Query an asymmetric key::
+
+       long keyctl(KEYCTL_PKEY_QUERY,
+                   key_serial_t key_id, unsigned long reserved,
+                   struct keyctl_pkey_query *info);
+
+     Get information about an asymmetric key.  The information is returned in
+     the keyctl_pkey_query struct::
+
+       __u32   supported_ops;
+       __u32   key_size;
+       __u16   max_data_size;
+       __u16   max_sig_size;
+       __u16   max_enc_size;
+       __u16   max_dec_size;
+       __u32   __spare[10];
+
+     ``supported_ops`` contains a bit mask of flags indicating which ops are
+     supported.  This is constructed from a bitwise-OR of::
+
+       KEYCTL_SUPPORTS_{ENCRYPT,DECRYPT,SIGN,VERIFY}
+
+     ``key_size`` indicated the size of the key in bits.
+
+     ``max_*_size`` indicate the maximum sizes in bytes of a blob of data to be
+     signed, a signature blob, a blob to be encrypted and a blob to be
+     decrypted.
+
+     ``__spare[]`` must be set to 0.  This is intended for future use to hand
+     over one or more passphrases needed unlock a key.
+
+     If successful, 0 is returned.  If the key is not an asymmetric key,
+     EOPNOTSUPP is returned.
+
+
+  *  Encrypt, decrypt, sign or verify a blob using an asymmetric key::
+
+       long keyctl(KEYCTL_PKEY_ENCRYPT,
+                   const struct keyctl_pkey_params *params,
+                   const char *info,
+                   const void *in,
+                   void *out);
+
+       long keyctl(KEYCTL_PKEY_DECRYPT,
+                   const struct keyctl_pkey_params *params,
+                   const char *info,
+                   const void *in,
+                   void *out);
+
+       long keyctl(KEYCTL_PKEY_SIGN,
+                   const struct keyctl_pkey_params *params,
+                   const char *info,
+                   const void *in,
+                   void *out);
+
+       long keyctl(KEYCTL_PKEY_VERIFY,
+                   const struct keyctl_pkey_params *params,
+                   const char *info,
+                   const void *in,
+                   const void *in2);
+
+     Use an asymmetric key to perform a public-key cryptographic operation a
+     blob of data.  For encryption and verification, the asymmetric key may
+     only need the public parts to be available, but for decryption and signing
+     the private parts are required also.
+
+     The parameter block pointed to by params contains a number of integer
+     values::
+
+       __s32           key_id;
+       __u32           in_len;
+       __u32           out_len;
+       __u32           in2_len;
+
+     ``key_id`` is the ID of the asymmetric key to be used.  ``in_len`` and
+     ``in2_len`` indicate the amount of data in the in and in2 buffers and
+     ``out_len`` indicates the size of the out buffer as appropriate for the
+     above operations.
+
+     For a given operation, the in and out buffers are used as follows::
+
+       Operation ID            in,in_len       out,out_len     in2,in2_len
+       ======================= =============== =============== ===============
+       KEYCTL_PKEY_ENCRYPT     Raw data        Encrypted data  -
+       KEYCTL_PKEY_DECRYPT     Encrypted data  Raw data        -
+       KEYCTL_PKEY_SIGN        Raw data        Signature       -
+       KEYCTL_PKEY_VERIFY      Raw data        -               Signature
+
+     ``info`` is a string of key=value pairs that supply supplementary
+     information.  These include:
+
+       ``enc=<encoding>`` The encoding of the encrypted/signature blob.  This
+                       can be "pkcs1" for RSASSA-PKCS1-v1.5 or
+                       RSAES-PKCS1-v1.5; "pss" for "RSASSA-PSS"; "oaep" for
+                       "RSAES-OAEP".  If omitted or is "raw", the raw output
+                       of the encryption function is specified.
+
+       ``hash=<algo>`` If the data buffer contains the output of a hash
+                       function and the encoding includes some indication of
+                       which hash function was used, the hash function can be
+                       specified with this, eg. "hash=sha256".
+
+     The ``__spare[]`` space in the parameter block must be set to 0.  This is
+     intended, amongst other things, to allow the passing of passphrases
+     required to unlock a key.
+
+     If successful, encrypt, decrypt and sign all return the amount of data
+     written into the output buffer.  Verification returns 0 on success.
+
+
 Kernel Services
 ===============
 
@@ -1483,6 +1594,112 @@ The structure has a number of fields, some of which are mandatory:
      attempted key link operation. If there is no match, -EINVAL is returned.
 
 
+  *  ``int (*asym_eds_op)(struct kernel_pkey_params *params,
+                         const void *in, void *out);``
+     ``int (*asym_verify_signature)(struct kernel_pkey_params *params,
+                                   const void *in, const void *in2);``
+
+     These methods are optional.  If provided the first allows a key to be
+     used to encrypt, decrypt or sign a blob of data, and the second allows a
+     key to verify a signature.
+
+     In all cases, the following information is provided in the params block::
+
+       struct kernel_pkey_params {
+               struct key      *key;
+               const char      *encoding;
+               const char      *hash_algo;
+               char            *info;
+               __u32           in_len;
+               union {
+                       __u32   out_len;
+                       __u32   in2_len;
+               };
+               enum kernel_pkey_operation op : 8;
+       };
+
+     This includes the key to be used; a string indicating the encoding to use
+     (for instance, "pkcs1" may be used with an RSA key to indicate
+     RSASSA-PKCS1-v1.5 or RSAES-PKCS1-v1.5 encoding or "raw" if no encoding);
+     the name of the hash algorithm used to generate the data for a signature
+     (if appropriate); the sizes of the input and output (or second input)
+     buffers; and the ID of the operation to be performed.
+
+     For a given operation ID, the input and output buffers are used as
+     follows::
+
+       Operation ID            in,in_len       out,out_len     in2,in2_len
+       ======================= =============== =============== ===============
+       kernel_pkey_encrypt     Raw data        Encrypted data  -
+       kernel_pkey_decrypt     Encrypted data  Raw data        -
+       kernel_pkey_sign        Raw data        Signature       -
+       kernel_pkey_verify      Raw data        -               Signature
+
+     asym_eds_op() deals with encryption, decryption and signature creation as
+     specified by params->op.  Note that params->op is also set for
+     asym_verify_signature().
+
+     Encrypting and signature creation both take raw data in the input buffer
+     and return the encrypted result in the output buffer.  Padding may have
+     been added if an encoding was set.  In the case of signature creation,
+     depending on the encoding, the padding created may need to indicate the
+     digest algorithm - the name of which should be supplied in hash_algo.
+
+     Decryption takes encrypted data in the input buffer and returns the raw
+     data in the output buffer.  Padding will get checked and stripped off if
+     an encoding was set.
+
+     Verification takes raw data in the input buffer and the signature in the
+     second input buffer and checks that the one matches the other.  Padding
+     will be validated.  Depending on the encoding, the digest algorithm used
+     to generate the raw data may need to be indicated in hash_algo.
+
+     If successful, asym_eds_op() should return the number of bytes written
+     into the output buffer.  asym_verify_signature() should return 0.
+
+     A variety of errors may be returned, including EOPNOTSUPP if the operation
+     is not supported; EKEYREJECTED if verification fails; ENOPKG if the
+     required crypto isn't available.
+
+
+  *  ``int (*asym_query)(const struct kernel_pkey_params *params,
+                        struct kernel_pkey_query *info);``
+
+     This method is optional.  If provided it allows information about the
+     public or asymmetric key held in the key to be determined.
+
+     The parameter block is as for asym_eds_op() and co. but in_len and out_len
+     are unused.  The encoding and hash_algo fields should be used to reduce
+     the returned buffer/data sizes as appropriate.
+
+     If successful, the following information is filled in::
+
+       struct kernel_pkey_query {
+               __u32           supported_ops;
+               __u32           key_size;
+               __u16           max_data_size;
+               __u16           max_sig_size;
+               __u16           max_enc_size;
+               __u16           max_dec_size;
+       };
+
+     The supported_ops field will contain a bitmask indicating what operations
+     are supported by the key, including encryption of a blob, decryption of a
+     blob, signing a blob and verifying the signature on a blob.  The following
+     constants are defined for this::
+
+       KEYCTL_SUPPORTS_{ENCRYPT,DECRYPT,SIGN,VERIFY}
+
+     The key_size field is the size of the key in bits.  max_data_size and
+     max_sig_size are the maximum raw data and signature sizes for creation and
+     verification of a signature; max_enc_size and max_dec_size are the maximum
+     raw data and signature sizes for encryption and decryption.  The
+     max_*_size fields are measured in bytes.
+
+     If successful, 0 will be returned.  If the key doesn't support this,
+     EOPNOTSUPP will be returned.
+
+
 Request-Key Callback Service
 ============================
 
index e1ca698e000639720e9c718ec28613177cad189e..f584fb74b4ff2852eead1a46df316f14d3aeef69 100644 (file)
@@ -302,11 +302,11 @@ sure structure holes are cleared.
 Memory poisoning
 ----------------
 
-When releasing memory, it is best to poison the contents (clear stack on
-syscall return, wipe heap memory on a free), to avoid reuse attacks that
-rely on the old contents of memory. This frustrates many uninitialized
-variable attacks, stack content exposures, heap content exposures, and
-use-after-free attacks.
+When releasing memory, it is best to poison the contents, to avoid reuse
+attacks that rely on the old contents of memory. E.g., clear stack on a
+syscall return (``CONFIG_GCC_PLUGIN_STACKLEAK``), wipe heap memory on a
+free. This frustrates many uninitialized variable attacks, stack content
+exposures, heap content exposures, and use-after-free attacks.
 
 Destination tracking
 --------------------
index 37a679501ddc68bc0ab26c58444794c0d30c8f40..1b8775298cf7a0223c04aa7098cf1d1e4d24fefd 100644 (file)
@@ -89,6 +89,7 @@ show up in /proc/sys/kernel:
 - shmmni
 - softlockup_all_cpu_backtrace
 - soft_watchdog
+- stack_erasing
 - stop-a                      [ SPARC only ]
 - sysrq                       ==> Documentation/admin-guide/sysrq.rst
 - sysctl_writes_strict
@@ -987,6 +988,23 @@ detect a hard lockup condition.
 
 ==============================================================
 
+stack_erasing
+
+This parameter can be used to control kernel stack erasing at the end
+of syscalls for kernels built with CONFIG_GCC_PLUGIN_STACKLEAK.
+
+That erasing reduces the information which kernel stack leak bugs
+can reveal and blocks some uninitialized stack variable attacks.
+The tradeoff is the performance impact: on a single CPU system kernel
+compilation sees a 1% slowdown, other systems and workloads may vary.
+
+  0: kernel stack erasing is disabled, STACKLEAK_METRICS are not updated.
+
+  1: kernel stack erasing is enabled (default), it is performed before
+     returning to the userspace at the end of syscalls.
+
+==============================================================
+
 tainted:
 
 Non-zero if the kernel has been tainted. Numeric values, which can be
index 32f3d55c54b75e1c6642a3d328a1dc404dfd4dc9..c4dbe6f7cdae8c8c76e706d773322af79cc26b78 100644 (file)
@@ -92,3 +92,12 @@ Speculation misfeature controls
    * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
    * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
    * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
+
+- PR_SPEC_INDIR_BRANCH: Indirect Branch Speculation in User Processes
+                        (Mitigate Spectre V2 style attacks against user processes)
+
+  Invocations:
+   * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
+   * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
index 7727db8f94bce6af42b5cc1920bf2a46798ad0b4..5e9b826b5f62fd1cf8dfcae07fc5bde66594428b 100644 (file)
@@ -61,18 +61,6 @@ Protocol 2.12:       (Kernel 3.8) Added the xloadflags field and extension fields
                to struct boot_params for loading bzImage and ramdisk
                above 4G in 64bit.
 
-Protocol 2.13: (Kernel 3.14) Support 32- and 64-bit flags being set in
-               xloadflags to support booting a 64-bit kernel from 32-bit
-               EFI
-
-Protocol 2.14: (Kernel 4.20) Added acpi_rsdp_addr holding the physical
-               address of the ACPI RSDP table.
-               The bootloader updates version with:
-               0x8000 | min(kernel-version, bootloader-version)
-               kernel-version being the protocol version supported by
-               the kernel and bootloader-version the protocol version
-               supported by the bootloader.
-
 **** MEMORY LAYOUT
 
 The traditional memory map for the kernel loader, used for Image or
@@ -209,7 +197,6 @@ Offset      Proto   Name            Meaning
 0258/8 2.10+   pref_address    Preferred loading address
 0260/4 2.10+   init_size       Linear memory required during initialization
 0264/4 2.11+   handover_offset Offset of handover entry point
-0268/8 2.14+   acpi_rsdp_addr  Physical address of RSDP table
 
 (1) For backwards compatibility, if the setup_sects field contains 0, the
     real value is 4.
@@ -322,7 +309,7 @@ Protocol:   2.00+
   Contains the magic number "HdrS" (0x53726448).
 
 Field name:    version
-Type:          modify
+Type:          read
 Offset/size:   0x206/2
 Protocol:      2.00+
 
@@ -330,12 +317,6 @@ Protocol:  2.00+
   e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version
   10.17.
 
-  Up to protocol version 2.13 this information is only read by the
-  bootloader. From protocol version 2.14 onwards the bootloader will
-  write the used protocol version or-ed with 0x8000 to the field. The
-  used protocol version will be the minimum of the supported protocol
-  versions of the bootloader and the kernel.
-
 Field name:    realmode_swtch
 Type:          modify (optional)
 Offset/size:   0x208/4
@@ -763,17 +744,6 @@ Offset/size:       0x264/4
 
   See EFI HANDOVER PROTOCOL below for more details.
 
-Field name:    acpi_rsdp_addr
-Type:          write
-Offset/size:   0x268/8
-Protocol:      2.14+
-
-  This field can be set by the boot loader to tell the kernel the
-  physical address of the ACPI RSDP table.
-
-  A value of 0 indicates the kernel should fall back to the standard
-  methods to locate the RSDP.
-
 
 **** THE IMAGE CHECKSUM
 
index 702898633b0007a1e50670fd05c7c24d58123c6c..804f9426ed17bdcf0c8fb6dc682ae9254050beb9 100644 (file)
@@ -34,23 +34,24 @@ __________________|____________|__________________|_________|___________________
 ____________________________________________________________|___________________________________________________________
                   |            |                  |         |
  ffff800000000000 | -128    TB | ffff87ffffffffff |    8 TB | ... guard hole, also reserved for hypervisor
- ffff880000000000 | -120    TB | ffffc7ffffffffff |   64 TB | direct mapping of all physical memory (page_offset_base)
- ffffc80000000000 |  -56    TB | ffffc8ffffffffff |    1 TB | ... unused hole
+ ffff880000000000 | -120    TB | ffff887fffffffff |  0.5 TB | LDT remap for PTI
+ ffff888000000000 | -119.5  TB | ffffc87fffffffff |   64 TB | direct mapping of all physical memory (page_offset_base)
+ ffffc88000000000 |  -55.5  TB | ffffc8ffffffffff |  0.5 TB | ... unused hole
  ffffc90000000000 |  -55    TB | ffffe8ffffffffff |   32 TB | vmalloc/ioremap space (vmalloc_base)
  ffffe90000000000 |  -23    TB | ffffe9ffffffffff |    1 TB | ... unused hole
  ffffea0000000000 |  -22    TB | ffffeaffffffffff |    1 TB | virtual memory map (vmemmap_base)
  ffffeb0000000000 |  -21    TB | ffffebffffffffff |    1 TB | ... unused hole
  ffffec0000000000 |  -20    TB | fffffbffffffffff |   16 TB | KASAN shadow memory
- fffffc0000000000 |   -4    TB | fffffdffffffffff |    2 TB | ... unused hole
-                  |            |                  |         | vaddr_end for KASLR
- fffffe0000000000 |   -2    TB | fffffe7fffffffff |  0.5 TB | cpu_entry_area mapping
- fffffe8000000000 |   -1.5  TB | fffffeffffffffff |  0.5 TB | LDT remap for PTI
- ffffff0000000000 |   -1    TB | ffffff7fffffffff |  0.5 TB | %esp fixup stacks
 __________________|____________|__________________|_________|____________________________________________________________
                                                             |
-                                                            | Identical layout to the 47-bit one from here on:
+                                                            | Identical layout to the 56-bit one from here on:
 ____________________________________________________________|____________________________________________________________
                   |            |                  |         |
+ fffffc0000000000 |   -4    TB | fffffdffffffffff |    2 TB | ... unused hole
+                  |            |                  |         | vaddr_end for KASLR
+ fffffe0000000000 |   -2    TB | fffffe7fffffffff |  0.5 TB | cpu_entry_area mapping
+ fffffe8000000000 |   -1.5  TB | fffffeffffffffff |  0.5 TB | ... unused hole
+ ffffff0000000000 |   -1    TB | ffffff7fffffffff |  0.5 TB | %esp fixup stacks
  ffffff8000000000 | -512    GB | ffffffeeffffffff |  444 GB | ... unused hole
  ffffffef00000000 |  -68    GB | fffffffeffffffff |   64 GB | EFI region mapping space
  ffffffff00000000 |   -4    GB | ffffffff7fffffff |    2 GB | ... unused hole
@@ -83,7 +84,7 @@ Notes:
 __________________|____________|__________________|_________|___________________________________________________________
                   |            |                  |         |
  0000800000000000 |  +64    PB | ffff7fffffffffff | ~16K PB | ... huge, still almost 64 bits wide hole of non-canonical
-                  |            |                  |         |     virtual memory addresses up to the -128 TB
+                  |            |                  |         |     virtual memory addresses up to the -64 PB
                   |            |                  |         |     starting offset of kernel mappings.
 __________________|____________|__________________|_________|___________________________________________________________
                                                             |
@@ -91,23 +92,24 @@ __________________|____________|__________________|_________|___________________
 ____________________________________________________________|___________________________________________________________
                   |            |                  |         |
  ff00000000000000 |  -64    PB | ff0fffffffffffff |    4 PB | ... guard hole, also reserved for hypervisor
- ff10000000000000 |  -60    PB | ff8fffffffffffff |   32 PB | direct mapping of all physical memory (page_offset_base)
- ff90000000000000 |  -28    PB | ff9fffffffffffff |    4 PB | LDT remap for PTI
+ ff10000000000000 |  -60    PB | ff10ffffffffffff | 0.25 PB | LDT remap for PTI
+ ff11000000000000 |  -59.75 PB | ff90ffffffffffff |   32 PB | direct mapping of all physical memory (page_offset_base)
+ ff91000000000000 |  -27.75 PB | ff9fffffffffffff | 3.75 PB | ... unused hole
  ffa0000000000000 |  -24    PB | ffd1ffffffffffff | 12.5 PB | vmalloc/ioremap space (vmalloc_base)
  ffd2000000000000 |  -11.5  PB | ffd3ffffffffffff |  0.5 PB | ... unused hole
  ffd4000000000000 |  -11    PB | ffd5ffffffffffff |  0.5 PB | virtual memory map (vmemmap_base)
  ffd6000000000000 |  -10.5  PB | ffdeffffffffffff | 2.25 PB | ... unused hole
  ffdf000000000000 |   -8.25 PB | fffffdffffffffff |   ~8 PB | KASAN shadow memory
- fffffc0000000000 |   -4    TB | fffffdffffffffff |    2 TB | ... unused hole
-                  |            |                  |         | vaddr_end for KASLR
- fffffe0000000000 |   -2    TB | fffffe7fffffffff |  0.5 TB | cpu_entry_area mapping
- fffffe8000000000 |   -1.5  TB | fffffeffffffffff |  0.5 TB | ... unused hole
- ffffff0000000000 |   -1    TB | ffffff7fffffffff |  0.5 TB | %esp fixup stacks
 __________________|____________|__________________|_________|____________________________________________________________
                                                             |
                                                             | Identical layout to the 47-bit one from here on:
 ____________________________________________________________|____________________________________________________________
                   |            |                  |         |
+ fffffc0000000000 |   -4    TB | fffffdffffffffff |    2 TB | ... unused hole
+                  |            |                  |         | vaddr_end for KASLR
+ fffffe0000000000 |   -2    TB | fffffe7fffffffff |  0.5 TB | cpu_entry_area mapping
+ fffffe8000000000 |   -1.5  TB | fffffeffffffffff |  0.5 TB | ... unused hole
+ ffffff0000000000 |   -1    TB | ffffff7fffffffff |  0.5 TB | %esp fixup stacks
  ffffff8000000000 | -512    GB | ffffffeeffffffff |  444 GB | ... unused hole
  ffffffef00000000 |  -68    GB | fffffffeffffffff |   64 GB | EFI region mapping space
  ffffffff00000000 |   -4    GB | ffffffff7fffffff |    2 GB | ... unused hole
@@ -146,3 +148,6 @@ Their order is preserved but their base will be offset early at boot time.
 Be very careful vs. KASLR when changing anything here. The KASLR address
 range must not overlap with anything except the KASAN shadow area, which is
 correct as KASAN disables KASLR.
+
+For both 4- and 5-level layouts, the STACKLEAK_POISON value in the last 2MB
+hole: ffffffffffff4111
index 97b7adbceda4828ab217301a305a3b20892d0603..68aed077f7b62ed0e70315bde602bfbec50a78bf 100644 (file)
@@ -25,7 +25,7 @@ Offset        Proto   Name            Meaning
 0C8/004        ALL     ext_cmd_line_ptr  cmd_line_ptr high 32bits
 140/080        ALL     edid_info       Video mode setup (struct edid_info)
 1C0/020        ALL     efi_info        EFI 32 information (struct efi_info)
-1E0/004        ALL     alk_mem_k       Alternative mem check, in KB
+1E0/004        ALL     alt_mem_k       Alternative mem check, in KB
 1E4/004        ALL     scratch         Scratch field for the kernel setup code
 1E8/001        ALL     e820_entries    Number of entries in e820_table (below)
 1E9/001        ALL     eddbuf_entries  Number of entries in eddbuf (below)
index 2f9723c075c397e3a68e0b4c8347d475a8642207..0767f1d1b579eb0286426cd80d91e6feb67e47ff 100644 (file)
@@ -180,6 +180,7 @@ F:  drivers/net/hamradio/6pack.c
 
 8169 10/100/1000 GIGABIT ETHERNET DRIVER
 M:     Realtek linux nic maintainers <nic_swsd@realtek.com>
+M:     Heiner Kallweit <hkallweit1@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/realtek/r8169.c
@@ -717,7 +718,7 @@ F:  include/linux/mfd/altera-a10sr.h
 F:     include/dt-bindings/reset/altr,rst-mgr-a10sr.h
 
 ALTERA TRIPLE SPEED ETHERNET DRIVER
-M:     Vince Bridgers <vbridger@opensource.altera.com>
+M:     Thor Thayer <thor.thayer@linux.intel.com>
 L:     netdev@vger.kernel.org
 L:     nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
 S:     Maintained
@@ -1471,6 +1472,7 @@ F:        drivers/clk/sirf/
 F:     drivers/clocksource/timer-prima2.c
 F:     drivers/clocksource/timer-atlas7.c
 N:     [^a-z]sirf
+X:     drivers/gnss
 
 ARM/EBSA110 MACHINE SUPPORT
 M:     Russell King <linux@armlinux.org.uk>
@@ -1737,13 +1739,17 @@ ARM/Mediatek SoC support
 M:     Matthias Brugger <matthias.bgg@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-mediatek@lists.infradead.org (moderated for non-subscribers)
+W:     https://mtk.bcnfs.org/
+C:     irc://chat.freenode.net/linux-mediatek
 S:     Maintained
 F:     arch/arm/boot/dts/mt6*
 F:     arch/arm/boot/dts/mt7*
 F:     arch/arm/boot/dts/mt8*
 F:     arch/arm/mach-mediatek/
 F:     arch/arm64/boot/dts/mediatek/
+F:     drivers/soc/mediatek/
 N:     mtk
+N:     mt[678]
 K:     mediatek
 
 ARM/Mediatek USB3 PHY DRIVER
@@ -1922,7 +1928,6 @@ ARM/QUALCOMM SUPPORT
 M:     Andy Gross <andy.gross@linaro.org>
 M:     David Brown <david.brown@linaro.org>
 L:     linux-arm-msm@vger.kernel.org
-L:     linux-soc@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/soc/qcom/
 F:     arch/arm/boot/dts/qcom-*.dts
@@ -2490,7 +2495,7 @@ F:        drivers/net/wireless/ath/*
 ATHEROS ATH5K WIRELESS DRIVER
 M:     Jiri Slaby <jirislaby@gmail.com>
 M:     Nick Kossifidis <mickflemm@gmail.com>
-M:     "Luis R. Rodriguez" <mcgrof@do-not-panic.com>
+M:     Luis Chamberlain <mcgrof@kernel.org>
 L:     linux-wireless@vger.kernel.org
 W:     http://wireless.kernel.org/en/users/Drivers/ath5k
 S:     Maintained
@@ -2800,7 +2805,7 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git
 Q:     https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
 S:     Supported
-F:     arch/x86/net/bpf_jit*
+F:     arch/*/net/*
 F:     Documentation/networking/filter.txt
 F:     Documentation/bpf/
 F:     include/linux/bpf*
@@ -2820,6 +2825,67 @@ F:       tools/bpf/
 F:     tools/lib/bpf/
 F:     tools/testing/selftests/bpf/
 
+BPF JIT for ARM
+M:     Shubham Bansal <illusionist.neo@gmail.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     arch/arm/net/
+
+BPF JIT for ARM64
+M:     Daniel Borkmann <daniel@iogearbox.net>
+M:     Alexei Starovoitov <ast@kernel.org>
+M:     Zi Shen Lim <zlim.lnx@gmail.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     arch/arm64/net/
+
+BPF JIT for MIPS (32-BIT AND 64-BIT)
+M:     Paul Burton <paul.burton@mips.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     arch/mips/net/
+
+BPF JIT for NFP NICs
+M:     Jakub Kicinski <jakub.kicinski@netronome.com>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     drivers/net/ethernet/netronome/nfp/bpf/
+
+BPF JIT for POWERPC (32-BIT AND 64-BIT)
+M:     Naveen N. Rao <naveen.n.rao@linux.ibm.com>
+M:     Sandipan Das <sandipan@linux.ibm.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     arch/powerpc/net/
+
+BPF JIT for S390
+M:     Martin Schwidefsky <schwidefsky@de.ibm.com>
+M:     Heiko Carstens <heiko.carstens@de.ibm.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     arch/s390/net/
+X:     arch/s390/net/pnet.c
+
+BPF JIT for SPARC (32-BIT AND 64-BIT)
+M:     David S. Miller <davem@davemloft.net>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     arch/sparc/net/
+
+BPF JIT for X86 32-BIT
+M:     Wang YanQing <udknight@gmail.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     arch/x86/net/bpf_jit_comp32.c
+
+BPF JIT for X86 64-BIT
+M:     Alexei Starovoitov <ast@kernel.org>
+M:     Daniel Borkmann <daniel@iogearbox.net>
+L:     netdev@vger.kernel.org
+S:     Supported
+F:     arch/x86/net/
+X:     arch/x86/net/bpf_jit_comp32.c
+
 BROADCOM B44 10/100 ETHERNET DRIVER
 M:     Michael Chan <michael.chan@broadcom.com>
 L:     netdev@vger.kernel.org
@@ -2860,7 +2926,7 @@ F:        drivers/staging/vc04_services
 BROADCOM BCM47XX MIPS ARCHITECTURE
 M:     Hauke Mehrtens <hauke@hauke-m.de>
 M:     RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/mips/brcm/
 F:     arch/mips/bcm47xx/*
@@ -2869,7 +2935,6 @@ F:        arch/mips/include/asm/mach-bcm47xx/*
 BROADCOM BCM5301X ARM ARCHITECTURE
 M:     Hauke Mehrtens <hauke@hauke-m.de>
 M:     RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>
-M:     Jon Mason <jonmason@broadcom.com>
 M:     bcm-kernel-feedback-list@broadcom.com
 L:     linux-arm-kernel@lists.infradead.org
 S:     Maintained
@@ -2924,7 +2989,7 @@ F:        drivers/cpufreq/bmips-cpufreq.c
 BROADCOM BMIPS MIPS ARCHITECTURE
 M:     Kevin Cernekee <cernekee@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 T:     git git://github.com/broadcom/stblinux.git
 S:     Maintained
 F:     arch/mips/bmips/*
@@ -3015,7 +3080,6 @@ F:        drivers/net/ethernet/broadcom/genet/
 BROADCOM IPROC ARM ARCHITECTURE
 M:     Ray Jui <rjui@broadcom.com>
 M:     Scott Branden <sbranden@broadcom.com>
-M:     Jon Mason <jonmason@broadcom.com>
 M:     bcm-kernel-feedback-list@broadcom.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:     git git://github.com/broadcom/cygnus-linux.git
@@ -3062,7 +3126,7 @@ F:        include/uapi/rdma/bnxt_re-abi.h
 
 BROADCOM NVRAM DRIVER
 M:     RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     drivers/firmware/broadcom/*
 
@@ -3212,11 +3276,16 @@ S:      Maintained
 F:     sound/pci/oxygen/
 
 C-SKY ARCHITECTURE
-M:     Guo Ren <ren_guo@c-sky.com>
+M:     Guo Ren <guoren@kernel.org>
 T:     git https://github.com/c-sky/csky-linux.git
 S:     Supported
 F:     arch/csky/
 F:     Documentation/devicetree/bindings/csky/
+F:     drivers/irqchip/irq-csky-*
+F:     Documentation/devicetree/bindings/interrupt-controller/csky,*
+F:     drivers/clocksource/timer-gx6605s.c
+F:     drivers/clocksource/timer-mp-csky.c
+F:     Documentation/devicetree/bindings/timer/csky,*
 K:     csky
 N:     csky
 
@@ -3276,6 +3345,12 @@ F:       include/uapi/linux/caif/
 F:     include/net/caif/
 F:     net/caif/
 
+CAKE QDISC
+M:     Toke Høiland-Jørgensen <toke@toke.dk>
+L:     cake@lists.bufferbloat.net (moderated for non-subscribers)
+S:     Maintained
+F:     net/sched/sch_cake.c
+
 CALGARY x86-64 IOMMU
 M:     Muli Ben-Yehuda <mulix@mulix.org>
 M:     Jon Mason <jdmason@kudzu.us>
@@ -3737,6 +3812,11 @@ L:       platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/compal-laptop.c
 
+COMPILER ATTRIBUTES
+M:     Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
+S:     Maintained
+F:     include/linux/compiler_attributes.h
+
 CONEXANT ACCESSRUNNER USB DRIVER
 L:     accessrunner-general@lists.sourceforge.net
 W:     http://accessrunner.sourceforge.net/
@@ -4153,7 +4233,7 @@ F:        net/decnet/
 
 DECSTATION PLATFORM SUPPORT
 M:     "Maciej W. Rozycki" <macro@linux-mips.org>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 W:     http://www.linux-mips.org/wiki/DECstation
 S:     Maintained
 F:     arch/mips/dec/
@@ -5244,7 +5324,7 @@ EDAC-CAVIUM OCTEON
 M:     Ralf Baechle <ralf@linux-mips.org>
 M:     David Daney <david.daney@cavium.com>
 L:     linux-edac@vger.kernel.org
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Supported
 F:     drivers/edac/octeon_edac*
 
@@ -5523,6 +5603,7 @@ F:        net/bridge/
 ETHERNET PHY LIBRARY
 M:     Andrew Lunn <andrew@lunn.ch>
 M:     Florian Fainelli <f.fainelli@gmail.com>
+M:     Heiner Kallweit <hkallweit1@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-bus-mdio
@@ -5761,7 +5842,7 @@ F:        include/uapi/linux/firewire*.h
 F:     tools/firewire/
 
 FIRMWARE LOADER (request_firmware)
-M:     Luis R. Rodriguez <mcgrof@kernel.org>
+M:     Luis Chamberlain <mcgrof@kernel.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/firmware_class/
@@ -5878,6 +5959,14 @@ L:       linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/busses/i2c-cpm.c
 
+FREESCALE IMX LPI2C DRIVER
+M:     Dong Aisheng <aisheng.dong@nxp.com>
+L:     linux-i2c@vger.kernel.org
+L:     linux-imx@nxp.com
+S:     Maintained
+F:     drivers/i2c/busses/i2c-imx-lpi2c.c
+F:     Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.txt
+
 FREESCALE IMX / MXC FEC DRIVER
 M:     Fugang Duan <fugang.duan@nxp.com>
 L:     netdev@vger.kernel.org
@@ -6237,6 +6326,7 @@ F:        include/uapi/linux/gigaset_dev.h
 
 GNSS SUBSYSTEM
 M:     Johan Hovold <johan@kernel.org>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/johan/gnss.git
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-class-gnss
 F:     Documentation/devicetree/bindings/gnss/
@@ -6286,6 +6376,7 @@ F:        tools/testing/selftests/gpio/
 
 GPIO SUBSYSTEM
 M:     Linus Walleij <linus.walleij@linaro.org>
+M:     Bartosz Golaszewski <bgolaszewski@baylibre.com>
 L:     linux-gpio@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
 S:     Maintained
@@ -6848,6 +6939,13 @@ L:       linux-acpi@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/i2c-core-acpi.c
 
+I2C CONTROLLER DRIVER FOR NVIDIA GPU
+M:     Ajay Gupta <ajayg@nvidia.com>
+L:     linux-i2c@vger.kernel.org
+S:     Maintained
+F:     Documentation/i2c/busses/i2c-nvidia-gpu
+F:     drivers/i2c/busses/i2c-nvidia-gpu.c
+
 I2C MUXES
 M:     Peter Rosin <peda@axentia.se>
 L:     linux-i2c@vger.kernel.org
@@ -7416,6 +7514,20 @@ S:       Maintained
 F:     Documentation/fb/intelfb.txt
 F:     drivers/video/fbdev/intelfb/
 
+INTEL GPIO DRIVERS
+M:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+L:     linux-gpio@vger.kernel.org
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
+F:     drivers/gpio/gpio-ich.c
+F:     drivers/gpio/gpio-intel-mid.c
+F:     drivers/gpio/gpio-lynxpoint.c
+F:     drivers/gpio/gpio-merrifield.c
+F:     drivers/gpio/gpio-ml-ioh.c
+F:     drivers/gpio/gpio-pch.c
+F:     drivers/gpio/gpio-sch.c
+F:     drivers/gpio/gpio-sodaville.c
+
 INTEL GVT-g DRIVERS (Intel GPU Virtualization)
 M:     Zhenyu Wang <zhenyuw@linux.intel.com>
 M:     Zhi Wang <zhi.a.wang@intel.com>
@@ -7426,12 +7538,6 @@ T:       git https://github.com/intel/gvt-linux.git
 S:     Supported
 F:     drivers/gpu/drm/i915/gvt/
 
-INTEL PMIC GPIO DRIVER
-R:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-S:     Maintained
-F:     drivers/gpio/gpio-*cove.c
-F:     drivers/gpio/gpio-msic.c
-
 INTEL HID EVENT DRIVER
 M:     Alex Hung <alex.hung@canonical.com>
 L:     platform-driver-x86@vger.kernel.org
@@ -7519,12 +7625,6 @@ W:       https://01.org/linux-acpi
 S:     Supported
 F:     drivers/platform/x86/intel_menlow.c
 
-INTEL MERRIFIELD GPIO DRIVER
-M:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
-L:     linux-gpio@vger.kernel.org
-S:     Maintained
-F:     drivers/gpio/gpio-merrifield.c
-
 INTEL MIC DRIVERS (mic)
 M:     Sudeep Dutt <sudeep.dutt@intel.com>
 M:     Ashutosh Dixit <ashutosh.dixit@intel.com>
@@ -7557,6 +7657,13 @@ F:       drivers/platform/x86/intel_punit_ipc.c
 F:     arch/x86/include/asm/intel_pmc_ipc.h
 F:     arch/x86/include/asm/intel_punit_ipc.h
 
+INTEL PMIC GPIO DRIVERS
+M:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
+F:     drivers/gpio/gpio-*cove.c
+F:     drivers/gpio/gpio-msic.c
+
 INTEL MULTIFUNCTION PMIC DEVICE DRIVERS
 R:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
 S:     Maintained
@@ -7665,7 +7772,7 @@ F:        Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.txt
 
 IOC3 ETHERNET DRIVER
 M:     Ralf Baechle <ralf@linux-mips.org>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/sgi/ioc3-eth.c
 
@@ -8036,7 +8143,7 @@ F:        tools/testing/selftests/
 F:     Documentation/dev-tools/kselftest*
 
 KERNEL USERMODE HELPER
-M:     "Luis R. Rodriguez" <mcgrof@kernel.org>
+M:     Luis Chamberlain <mcgrof@kernel.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     kernel/umh.c
@@ -8093,7 +8200,7 @@ F:        arch/arm64/kvm/
 
 KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
 M:     James Hogan <jhogan@kernel.org>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Supported
 F:     arch/mips/include/uapi/asm/kvm*
 F:     arch/mips/include/asm/kvm*
@@ -8212,7 +8319,7 @@ F:        mm/kmemleak.c
 F:     mm/kmemleak-test.c
 
 KMOD KERNEL MODULE LOADER - USERMODE HELPER
-M:     "Luis R. Rodriguez" <mcgrof@kernel.org>
+M:     Luis Chamberlain <mcgrof@kernel.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     kernel/kmod.c
@@ -8266,7 +8373,7 @@ F:        drivers/net/dsa/lantiq_gswip.c
 
 LANTIQ MIPS ARCHITECTURE
 M:     John Crispin <john@phrozen.org>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     arch/mips/lantiq
 F:     drivers/soc/lantiq
@@ -8354,7 +8461,7 @@ F:        drivers/media/dvb-frontends/lgdt3305.*
 LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
 M:     Viresh Kumar <vireshk@kernel.org>
 L:     linux-ide@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
 S:     Maintained
 F:     include/linux/pata_arasan_cf_data.h
 F:     drivers/ata/pata_arasan_cf.c
@@ -8371,7 +8478,7 @@ F:        drivers/ata/ata_generic.c
 LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
 M:     Linus Walleij <linus.walleij@linaro.org>
 L:     linux-ide@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
 S:     Maintained
 F:     drivers/ata/pata_ftide010.c
 F:     drivers/ata/sata_gemini.c
@@ -8390,7 +8497,7 @@ F:        include/linux/ahci_platform.h
 LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER
 M:     Mikael Pettersson <mikpelinux@gmail.com>
 L:     linux-ide@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
 S:     Maintained
 F:     drivers/ata/sata_promise.*
 
@@ -8829,7 +8936,7 @@ S:        Maintained
 
 MARDUK (CREATOR CI40) DEVICE TREE SUPPORT
 M:     Rahul Bedarkar <rahulbedarkar89@gmail.com>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     arch/mips/boot/dts/img/pistachio_marduk.dts
 
@@ -9788,7 +9895,7 @@ F:        drivers/dma/at_xdmac.c
 
 MICROSEMI MIPS SOCS
 M:     Alexandre Belloni <alexandre.belloni@bootlin.com>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     arch/mips/generic/board-ocelot.c
 F:     arch/mips/configs/generic/board-ocelot.config
@@ -9828,7 +9935,7 @@ MIPS
 M:     Ralf Baechle <ralf@linux-mips.org>
 M:     Paul Burton <paul.burton@mips.com>
 M:     James Hogan <jhogan@kernel.org>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 W:     http://www.linux-mips.org/
 T:     git git://git.linux-mips.org/pub/scm/ralf/linux.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
@@ -9841,7 +9948,7 @@ F:        drivers/platform/mips/
 
 MIPS BOSTON DEVELOPMENT BOARD
 M:     Paul Burton <paul.burton@mips.com>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/clock/img,boston-clock.txt
 F:     arch/mips/boot/dts/img/boston.dts
@@ -9851,7 +9958,7 @@ F:        include/dt-bindings/clock/boston-clock.h
 
 MIPS GENERIC PLATFORM
 M:     Paul Burton <paul.burton@mips.com>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/power/mti,mips-cpc.txt
 F:     arch/mips/generic/
@@ -9859,7 +9966,7 @@ F:        arch/mips/tools/generic-board-config.sh
 
 MIPS/LOONGSON1 ARCHITECTURE
 M:     Keguang Zhang <keguang.zhang@gmail.com>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     arch/mips/loongson32/
 F:     arch/mips/include/asm/mach-loongson32/
@@ -9868,7 +9975,7 @@ F:        drivers/*/*/*loongson1*
 
 MIPS/LOONGSON2 ARCHITECTURE
 M:     Jiaxun Yang <jiaxun.yang@flygoat.com>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     arch/mips/loongson64/fuloong-2e/
 F:     arch/mips/loongson64/lemote-2f/
@@ -9878,7 +9985,7 @@ F:        drivers/*/*/*loongson2*
 
 MIPS/LOONGSON3 ARCHITECTURE
 M:     Huacai Chen <chenhc@lemote.com>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     arch/mips/loongson64/
 F:     arch/mips/include/asm/mach-loongson64/
@@ -9888,7 +9995,7 @@ F:        drivers/*/*/*loongson3*
 
 MIPS RINT INSTRUCTION EMULATION
 M:     Aleksandar Markovic <aleksandar.markovic@mips.com>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Supported
 F:     arch/mips/math-emu/sp_rint.c
 F:     arch/mips/math-emu/dp_rint.c
@@ -9902,12 +10009,9 @@ S:      Odd Fixes
 F:     drivers/media/radio/radio-miropcm20*
 
 MMP SUPPORT
-M:     Eric Miao <eric.y.miao@gmail.com>
-M:     Haojian Zhuang <haojian.zhuang@gmail.com>
+R:     Lubomir Rintel <lkundrak@v3.sk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-T:     git git://github.com/hzhuang1/linux.git
-T:     git git://git.linaro.org/people/ycmiao/pxa-linux.git
-S:     Maintained
+S:     Odd Fixes
 F:     arch/arm/boot/dts/mmp*
 F:     arch/arm/mach-mmp/
 
@@ -10771,6 +10875,14 @@ L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     arch/arm/mach-omap2/omap_hwmod.*
 
+OMAP I2C DRIVER
+M:     Vignesh R <vigneshr@ti.com>
+L:     linux-omap@vger.kernel.org
+L:     linux-i2c@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/i2c/i2c-omap.txt
+F:     drivers/i2c/busses/i2c-omap.c
+
 OMAP IMAGING SUBSYSTEM (OMAP3 ISP and OMAP4 ISS)
 M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
 L:     linux-media@vger.kernel.org
@@ -10780,9 +10892,9 @@ F:      drivers/media/platform/omap3isp/
 F:     drivers/staging/media/omap4iss/
 
 OMAP MMC SUPPORT
-M:     Jarkko Lavinen <jarkko.lavinen@nokia.com>
+M:     Aaro Koskinen <aaro.koskinen@iki.fi>
 L:     linux-omap@vger.kernel.org
-S:     Maintained
+S:     Odd Fixes
 F:     drivers/mmc/host/omap.c
 
 OMAP POWER MANAGEMENT SUPPORT
@@ -10865,7 +10977,7 @@ F:      include/linux/platform_data/i2c-omap.h
 
 ONION OMEGA2+ BOARD
 M:     Harvey Hunt <harveyhuntnexus@gmail.com>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     arch/mips/boot/dts/ralink/omega2p.dts
 
@@ -11717,6 +11829,7 @@ F:      Documentation/devicetree/bindings/pinctrl/fsl,*
 PIN CONTROLLER - INTEL
 M:     Mika Westerberg <mika.westerberg@linux.intel.com>
 M:     Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
 S:     Maintained
 F:     drivers/pinctrl/intel/
 
@@ -11773,7 +11886,7 @@ F:      drivers/pinctrl/spear/
 
 PISTACHIO SOC SUPPORT
 M:     James Hartley <james.hartley@sondrel.com>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Odd Fixes
 F:     arch/mips/pistachio/
 F:     arch/mips/include/asm/mach-pistachio/
@@ -11953,7 +12066,7 @@ F:      kernel/printk/
 F:     include/linux/printk.h
 
 PRISM54 WIRELESS DRIVER
-M:     "Luis R. Rodriguez" <mcgrof@gmail.com>
+M:     Luis Chamberlain <mcgrof@kernel.org>
 L:     linux-wireless@vger.kernel.org
 W:     http://wireless.kernel.org/en/users/Drivers/p54
 S:     Obsolete
@@ -11967,9 +12080,10 @@ S:     Maintained
 F:     fs/proc/
 F:     include/linux/proc_fs.h
 F:     tools/testing/selftests/proc/
+F:     Documentation/filesystems/proc.txt
 
 PROC SYSCTL
-M:     "Luis R. Rodriguez" <mcgrof@kernel.org>
+M:     Luis Chamberlain <mcgrof@kernel.org>
 M:     Kees Cook <keescook@chromium.org>
 L:     linux-kernel@vger.kernel.org
 L:     linux-fsdevel@vger.kernel.org
@@ -12432,7 +12546,7 @@ F:      drivers/media/usb/rainshadow-cec/*
 
 RALINK MIPS ARCHITECTURE
 M:     John Crispin <john@phrozen.org>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     arch/mips/ralink
 
@@ -12452,7 +12566,7 @@ F:      drivers/block/brd.c
 
 RANCHU VIRTUAL BOARD FOR MIPS
 M:     Miodrag Dinic <miodrag.dinic@mips.com>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Supported
 F:     arch/mips/generic/board-ranchu.c
 F:     arch/mips/configs/generic/board-ranchu.config
@@ -13777,6 +13891,13 @@ F:     drivers/md/raid*
 F:     include/linux/raid/
 F:     include/uapi/linux/raid/
 
+SOCIONEXT (SNI) AVE NETWORK DRIVER
+M:     Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/ethernet/socionext/sni_ave.c
+F:     Documentation/devicetree/bindings/net/socionext,uniphier-ave4.txt
+
 SOCIONEXT (SNI) NETSEC NETWORK DRIVER
 M:     Jassi Brar <jaswinder.singh@linaro.org>
 L:     netdev@vger.kernel.org
@@ -13902,6 +14023,7 @@ S:      Supported
 F:     Documentation/devicetree/bindings/sound/
 F:     Documentation/sound/soc/
 F:     sound/soc/
+F:     include/dt-bindings/sound/
 F:     include/sound/soc*
 
 SOUNDWIRE SUBSYSTEM
@@ -13949,11 +14071,10 @@ F:    drivers/tty/serial/sunzilog.h
 F:     drivers/tty/vcc.c
 
 SPARSE CHECKER
-M:     "Christopher Li" <sparse@chrisli.org>
+M:     "Luc Van Oostenryck" <luc.vanoostenryck@gmail.com>
 L:     linux-sparse@vger.kernel.org
 W:     https://sparse.wiki.kernel.org/
 T:     git git://git.kernel.org/pub/scm/devel/sparse/sparse.git
-T:     git git://git.kernel.org/pub/scm/devel/sparse/chrisl/sparse.git
 S:     Maintained
 F:     include/linux/compiler.h
 
@@ -14050,6 +14171,7 @@ F:      Documentation/devicetree/bindings/iio/proximity/vl53l0x.txt
 
 STABLE BRANCH
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+M:     Sasha Levin <sashal@kernel.org>
 L:     stable@vger.kernel.org
 S:     Supported
 F:     Documentation/process/stable-kernel-rules.rst
@@ -15187,7 +15309,7 @@ F:      arch/um/os-Linux/drivers/
 TURBOCHANNEL SUBSYSTEM
 M:     "Maciej W. Rozycki" <macro@linux-mips.org>
 M:     Ralf Baechle <ralf@linux-mips.org>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 Q:     http://patchwork.linux-mips.org/project/linux-mips/list/
 S:     Maintained
 F:     drivers/tc/
@@ -15850,7 +15972,6 @@ F:      net/vmw_vsock/virtio_transport_common.c
 F:     net/vmw_vsock/virtio_transport.c
 F:     drivers/net/vsockmon.c
 F:     drivers/vhost/vsock.c
-F:     drivers/vhost/vsock.h
 F:     tools/testing/vsock/
 
 VIRTIO CONSOLE DRIVER
@@ -16009,7 +16130,7 @@ F:      drivers/net/vmxnet3/
 
 VOCORE VOCORE2 BOARD
 M:     Harvey Hunt <harveyhuntnexus@gmail.com>
-L:     linux-mips@linux-mips.org
+L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     arch/mips/boot/dts/ralink/vocore2.dts
 
index 9aa352b38815801e37fd3cb04c66fa96511443f5..f2c3423c3062f2b704c239621d2093cc45280060 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,9 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 4
-PATCHLEVEL = 19
+PATCHLEVEL = 20
 SUBLEVEL = 0
-EXTRAVERSION =
-NAME = "People's Front"
+EXTRAVERSION = -rc6
+NAME = Shy Crocodile
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -485,7 +485,7 @@ ifneq ($(KBUILD_SRC),)
        $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree)
 endif
 
-ifeq ($(cc-name),clang)
+ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
 ifneq ($(CROSS_COMPILE),)
 CLANG_TARGET   := --target=$(notdir $(CROSS_COMPILE:%-=%))
 GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
@@ -702,7 +702,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG)      := -fstack-protector-strong
 
 KBUILD_CFLAGS += $(stackp-flags-y)
 
-ifeq ($(cc-name),clang)
+ifdef CONFIG_CC_IS_CLANG
 KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
 KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
 KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
index ed27fd26262764fc44092d639030aa7b19f53ea8..e1e540ffa9793d5279c68d9bca412e8a3ef115ae 100644 (file)
@@ -429,6 +429,13 @@ config SECCOMP_FILTER
 
          See Documentation/userspace-api/seccomp_filter.rst for details.
 
+config HAVE_ARCH_STACKLEAK
+       bool
+       help
+         An architecture should select this if it has the code which
+         fills the used part of the kernel stack with the STACKLEAK_POISON
+         value before returning from system calls.
+
 config HAVE_STACKPROTECTOR
        bool
        help
index 6a8c53dec57e6e3aa22a5be371b922ebb1bd154d..b7c77bb1bfd20368a8ff95a93d5493353e58023a 100644 (file)
 })
 
 #define user_termios_to_kernel_termios(k, u) \
-       copy_from_user(k, u, sizeof(struct termios))
+       copy_from_user(k, u, sizeof(struct termios2))
 
 #define kernel_termios_to_user_termios(u, k) \
+       copy_to_user(u, k, sizeof(struct termios2))
+
+#define user_termios_to_kernel_termios_1(k, u) \
+       copy_from_user(k, u, sizeof(struct termios))
+
+#define kernel_termios_to_user_termios_1(u, k) \
        copy_to_user(u, k, sizeof(struct termios))
 
 #endif /* _ALPHA_TERMIOS_H */
index 1e9121c9b3c74c16d129ce6fac97f614080dca94..971311605288faea94b19d23d0b346361a11a6a9 100644 (file)
 #define TCXONC         _IO('t', 30)
 #define TCFLSH         _IO('t', 31)
 
+#define TCGETS2                _IOR('T', 42, struct termios2)
+#define TCSETS2                _IOW('T', 43, struct termios2)
+#define TCSETSW2       _IOW('T', 44, struct termios2)
+#define TCSETSF2       _IOW('T', 45, struct termios2)
+
 #define TIOCSWINSZ     _IOW('t', 103, struct winsize)
 #define TIOCGWINSZ     _IOR('t', 104, struct winsize)
 #define        TIOCSTART       _IO('t', 110)           /* start output, like ^Q */
index de6c8360fbe3657e3ddf7cd6bb648a3d8b0fdb71..4575ba34a0eaeecb9b17cb9f3b6b18a698bafdfb 100644 (file)
@@ -26,6 +26,19 @@ struct termios {
        speed_t c_ospeed;               /* output speed */
 };
 
+/* Alpha has identical termios and termios2 */
+
+struct termios2 {
+       tcflag_t c_iflag;               /* input mode flags */
+       tcflag_t c_oflag;               /* output mode flags */
+       tcflag_t c_cflag;               /* control mode flags */
+       tcflag_t c_lflag;               /* local mode flags */
+       cc_t c_cc[NCCS];                /* control characters */
+       cc_t c_line;                    /* line discipline (== c_cc[19]) */
+       speed_t c_ispeed;               /* input speed */
+       speed_t c_ospeed;               /* output speed */
+};
+
 /* Alpha has matching termios and ktermios */
 
 struct ktermios {
@@ -152,6 +165,7 @@ struct ktermios {
 #define B3000000  00034
 #define B3500000  00035
 #define B4000000  00036
+#define BOTHER    00037
 
 #define CSIZE  00001400
 #define   CS5  00000000
@@ -169,6 +183,9 @@ struct ktermios {
 #define CMSPAR   010000000000          /* mark or space (stick) parity */
 #define CRTSCTS          020000000000          /* flow control */
 
+#define CIBAUD 07600000
+#define IBSHIFT        16
+
 /* c_lflag bits */
 #define ISIG   0x00000080
 #define ICANON 0x00000100
index c9e2a1323536313c8ee0e10906db67902c846674..6dd7835573308602f6be4c3e71d654b19390a0f1 100644 (file)
@@ -109,7 +109,7 @@ endmenu
 
 choice
        prompt "ARC Instruction Set"
-       default ISA_ARCOMPACT
+       default ISA_ARCV2
 
 config ISA_ARCOMPACT
        bool "ARCompact ISA"
@@ -176,13 +176,11 @@ endchoice
 
 config CPU_BIG_ENDIAN
        bool "Enable Big Endian Mode"
-       default n
        help
          Build kernel for Big Endian Mode of ARC CPU
 
 config SMP
        bool "Symmetric Multi-Processing"
-       default n
        select ARC_MCIP if ISA_ARCV2
        help
          This enables support for systems with more than one CPU.
@@ -254,7 +252,6 @@ config ARC_CACHE_PAGES
 config ARC_CACHE_VIPT_ALIASING
        bool "Support VIPT Aliasing D$"
        depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
-       default n
 
 endif  #ARC_CACHE
 
@@ -262,7 +259,6 @@ config ARC_HAS_ICCM
        bool "Use ICCM"
        help
          Single Cycle RAMS to store Fast Path Code
-       default n
 
 config ARC_ICCM_SZ
        int "ICCM Size in KB"
@@ -273,7 +269,6 @@ config ARC_HAS_DCCM
        bool "Use DCCM"
        help
          Single Cycle RAMS to store Fast Path Data
-       default n
 
 config ARC_DCCM_SZ
        int "DCCM Size in KB"
@@ -366,13 +361,11 @@ if ISA_ARCOMPACT
 
 config ARC_COMPACT_IRQ_LEVELS
        bool "Setup Timer IRQ as high Priority"
-       default n
        # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
        depends on !SMP
 
 config ARC_FPU_SAVE_RESTORE
        bool "Enable FPU state persistence across context switch"
-       default n
        help
          Double Precision Floating Point unit had dedicated regs which
          need to be saved/restored across context-switch.
@@ -453,7 +446,6 @@ config HIGHMEM
 
 config ARC_HAS_PAE40
        bool "Support for the 40-bit Physical Address Extension"
-       default n
        depends on ISA_ARCV2
        select HIGHMEM
        select PHYS_ADDR_T_64BIT
@@ -496,7 +488,6 @@ config HZ
 
 config ARC_METAWARE_HLINK
        bool "Support for Metaware debugger assisted Host access"
-       default n
        help
          This options allows a Linux userland apps to directly access
          host file system (open/creat/read/write etc) with help from
@@ -524,13 +515,11 @@ config ARC_DW2_UNWIND
 
 config ARC_DBG_TLB_PARANOIA
        bool "Paranoia Checks in Low Level TLB Handlers"
-       default n
 
 endif
 
 config ARC_UBOOT_SUPPORT
        bool "Support uboot arg Handling"
-       default n
        help
          ARC Linux by default checks for uboot provided args as pointers to
          external cmdline or DTB. This however breaks in absence of uboot,
index c64c505d966c7a737b0db3c0590fb1063b549f69..df00578c279d4bc0ee03d71089769383440e7cf6 100644 (file)
@@ -6,7 +6,7 @@
 # published by the Free Software Foundation.
 #
 
-KBUILD_DEFCONFIG := nsim_700_defconfig
+KBUILD_DEFCONFIG := nsim_hs_defconfig
 
 cflags-y       += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
 cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
index ef149f59929ae394a30695fa0940060acef15817..43f17b51ee89cca00a0b2eebb7ed045d49de03a0 100644 (file)
                        bus-width = <4>;
                        dma-coherent;
                };
+
+               gpio: gpio@3000 {
+                       compatible = "snps,dw-apb-gpio";
+                       reg = <0x3000 0x20>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       gpio_port_a: gpio-controller@0 {
+                               compatible = "snps,dw-apb-gpio-port";
+                               gpio-controller;
+                               #gpio-cells = <2>;
+                               snps,nr-gpios = <24>;
+                               reg = <0>;
+                       };
+               };
        };
 
        memory@80000000 {
index 41bc08be6a3b4202bbe27f74fdc8e01a56e4c3cd..020d4493edfd0530423659a4f401258ecf7bc0be 100644 (file)
@@ -14,6 +14,7 @@ CONFIG_PERF_EVENTS=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
 CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
 CONFIG_MODULE_UNLOAD=y
@@ -95,6 +96,7 @@ CONFIG_VFAT_FS=y
 CONFIG_NTFS_FS=y
 CONFIG_TMPFS=y
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
index 1e1c4a8011b523dc88b89fb39e90dfeab5a3154b..666314fffc601be8c455664152f9111814cdd446 100644 (file)
@@ -94,6 +94,7 @@ CONFIG_VFAT_FS=y
 CONFIG_NTFS_FS=y
 CONFIG_TMPFS=y
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
index 6b0c0cfd5c304fd6ae58fc3fd92d9cb53e086d2d..429832b8560b878b65be199f69eb740a6b362054 100644 (file)
@@ -97,6 +97,7 @@ CONFIG_VFAT_FS=y
 CONFIG_NTFS_FS=y
 CONFIG_TMPFS=y
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
index 1dec2b4bc5e6ea70696249d6815dfe69e73eb21c..87b23b7fb781470b2897e66ff5c0ad64fc2734c4 100644 (file)
@@ -45,6 +45,9 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_OF_PLATFORM=y
 # CONFIG_HW_RANDOM is not set
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_DWAPB=y
 # CONFIG_HWMON is not set
 CONFIG_DRM=y
 # CONFIG_DRM_FBDEV_EMULATION is not set
@@ -65,6 +68,7 @@ CONFIG_EXT3_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
index 31ba224bbfb474985b49930dea193c6bbb1a5f37..6e84060e7c90a2cbba081a46f87ab607aee1d22e 100644 (file)
@@ -15,6 +15,7 @@ CONFIG_SYSCTL_SYSCALL=y
 CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
@@ -73,6 +74,7 @@ CONFIG_PROC_KCORE=y
 CONFIG_TMPFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 CONFIG_ROOT_NFS=y
 CONFIG_DEBUG_INFO=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
index 8e0b8b134cd9ed89652b88aea3bade03881e95c9..219c2a65294b82176400c9833e3606cd79f87a1c 100644 (file)
@@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 # CONFIG_LBDAF is not set
index f14eeff7d3084948c16d8905677ec25a629ccdcc..35dfc6491a09486ef0176b8ced1c080efa870ec0 100644 (file)
@@ -15,6 +15,7 @@ CONFIG_EMBEDDED=y
 CONFIG_PERF_EVENTS=y
 # CONFIG_SLUB_DEBUG is not set
 # CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 # CONFIG_LBDAF is not set
@@ -66,5 +67,6 @@ CONFIG_EXT2_FS_XATTR=y
 CONFIG_TMPFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
index 025298a483056b1ca782e83056f8b0a44d193809..1638e5bc967246686735bd6629ce9d7087caffd4 100644 (file)
@@ -65,5 +65,6 @@ CONFIG_EXT2_FS_XATTR=y
 CONFIG_TMPFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
index df7b77b13b823dc0c8d41f543181b12a20212cbd..11cfbdb0f441567ee93d6283e9c8265454c818cf 100644 (file)
@@ -76,6 +76,7 @@ CONFIG_EXT2_FS_XATTR=y
 CONFIG_TMPFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 # CONFIG_ENABLE_MUST_CHECK is not set
 CONFIG_FTRACE=y
index a7f65313f84a56a3ddc0307c669bbfbcf4c0386f..e71ade3cf9c809398a8c51bffd5bdff3f39c465a 100644 (file)
@@ -19,6 +19,7 @@ CONFIG_KALLSYMS_ALL=y
 # CONFIG_AIO is not set
 CONFIG_EMBEDDED=y
 # CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
 CONFIG_SLAB=y
 CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
index db47c3541f15931b2927fd1bd27749f2568e9761..1e59a2e9c602fa2736cfc0d6fdd439b07a11105b 100644 (file)
@@ -85,6 +85,7 @@ CONFIG_NTFS_FS=y
 CONFIG_TMPFS=y
 CONFIG_JFFS2_FS=y
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
index a8ac5e917d9a5895a4bc3ba30be01fd222ecec71..b5c3f6c54b032d2a84510737272cacbe1ec89b1c 100644 (file)
@@ -90,6 +90,7 @@ CONFIG_NTFS_FS=y
 CONFIG_TMPFS=y
 CONFIG_JFFS2_FS=y
 CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
index ff7d3232764a29a41503a213d3bd385e232acf42..f393b663413e49ab38bf0d4070cb7ea9f39bcfd1 100644 (file)
@@ -113,7 +113,9 @@ extern unsigned long perip_base, perip_end;
 
 /* IO coherency related Auxiliary registers */
 #define ARC_REG_IO_COH_ENABLE  0x500
+#define ARC_IO_COH_ENABLE_BIT  BIT(0)
 #define ARC_REG_IO_COH_PARTIAL 0x501
+#define ARC_IO_COH_PARTIAL_BIT BIT(0)
 #define ARC_REG_IO_COH_AP0_BASE        0x508
 #define ARC_REG_IO_COH_AP0_SIZE        0x509
 
index c22b181e8206f3162c4e0e19214f8b303f13c576..2f39d9b3886e4fc638dfa6a8a9b2fc45453d6c69 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/types.h>
 #include <asm/byteorder.h>
 #include <asm/page.h>
+#include <asm/unaligned.h>
 
 #ifdef CONFIG_ISA_ARCV2
 #include <asm/barrier.h>
@@ -94,6 +95,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
        return w;
 }
 
+/*
+ * {read,write}s{b,w,l}() repeatedly access the same IO address in
+ * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
+ * @count times
+ */
+#define __raw_readsx(t,f) \
+static inline void __raw_reads##f(const volatile void __iomem *addr,   \
+                                 void *ptr, unsigned int count)        \
+{                                                                      \
+       bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;        \
+       u##t *buf = ptr;                                                \
+                                                                       \
+       if (!count)                                                     \
+               return;                                                 \
+                                                                       \
+       /* Some ARC CPU's don't support unaligned accesses */           \
+       if (is_aligned) {                                               \
+               do {                                                    \
+                       u##t x = __raw_read##f(addr);                   \
+                       *buf++ = x;                                     \
+               } while (--count);                                      \
+       } else {                                                        \
+               do {                                                    \
+                       u##t x = __raw_read##f(addr);                   \
+                       put_unaligned(x, buf++);                        \
+               } while (--count);                                      \
+       }                                                               \
+}
+
+#define __raw_readsb __raw_readsb
+__raw_readsx(8, b)
+#define __raw_readsw __raw_readsw
+__raw_readsx(16, w)
+#define __raw_readsl __raw_readsl
+__raw_readsx(32, l)
+
 #define __raw_writeb __raw_writeb
 static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
 {
@@ -126,6 +163,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
 
 }
 
+#define __raw_writesx(t,f)                                             \
+static inline void __raw_writes##f(volatile void __iomem *addr,        \
+                                  const void *ptr, unsigned int count) \
+{                                                                      \
+       bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0;        \
+       const u##t *buf = ptr;                                          \
+                                                                       \
+       if (!count)                                                     \
+               return;                                                 \
+                                                                       \
+       /* Some ARC CPU's don't support unaligned accesses */           \
+       if (is_aligned) {                                               \
+               do {                                                    \
+                       __raw_write##f(*buf++, addr);                   \
+               } while (--count);                                      \
+       } else {                                                        \
+               do {                                                    \
+                       __raw_write##f(get_unaligned(buf++), addr);     \
+               } while (--count);                                      \
+       }                                                               \
+}
+
+#define __raw_writesb __raw_writesb
+__raw_writesx(8, b)
+#define __raw_writesw __raw_writesw
+__raw_writesx(16, w)
+#define __raw_writesl __raw_writesl
+__raw_writesx(32, l)
+
 /*
  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
  * Based on ARM model for the typical use case
@@ -141,10 +207,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
 #define readb(c)               ({ u8  __v = readb_relaxed(c); __iormb(); __v; })
 #define readw(c)               ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
 #define readl(c)               ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+#define readsb(p,d,l)          ({ __raw_readsb(p,d,l); __iormb(); })
+#define readsw(p,d,l)          ({ __raw_readsw(p,d,l); __iormb(); })
+#define readsl(p,d,l)          ({ __raw_readsl(p,d,l); __iormb(); })
 
 #define writeb(v,c)            ({ __iowmb(); writeb_relaxed(v,c); })
 #define writew(v,c)            ({ __iowmb(); writew_relaxed(v,c); })
 #define writel(v,c)            ({ __iowmb(); writel_relaxed(v,c); })
+#define writesb(p,d,l)         ({ __iowmb(); __raw_writesb(p,d,l); })
+#define writesw(p,d,l)         ({ __iowmb(); __raw_writesw(p,d,l); })
+#define writesl(p,d,l)         ({ __iowmb(); __raw_writesl(p,d,l); })
 
 /*
  * Relaxed API for drivers which can handle barrier ordering themselves
index b2cae79a25d716165eaf65060cb8ed0be11f3b6c..eea8c5ce633504ec0e7a8f4d6a49ce6042fef4a8 100644 (file)
@@ -243,7 +243,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
 {
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
        struct bcr_identity *core = &cpu->core;
-       int i, n = 0;
+       int i, n = 0, ua = 0;
 
        FIX_PTR(cpu);
 
@@ -263,10 +263,13 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                       IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
                       IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
 
-       n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
+#ifdef __ARC_UNALIGNED__
+       ua = 1;
+#endif
+       n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s%s",
                           IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
                           IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
-                          IS_AVAIL1(cpu->isa.unalign, "unalign (not used)"));
+                          IS_AVAIL1(cpu->isa.unalign, "unalign "), IS_USED_RUN(ua));
 
        if (i)
                n += scnprintf(buf + n, len - n, "\n\t\t: ");
index f2701c13a66b209571ff89b71ac6c93cabb9835d..cf9619d4efb4f86d68cb2417558fe3327c55c408 100644 (file)
@@ -1144,6 +1144,20 @@ noinline void __init arc_ioc_setup(void)
 {
        unsigned int ioc_base, mem_sz;
 
+       /*
+        * If IOC was already enabled (due to bootloader) it technically needs to
+        * be reconfigured with aperture base,size corresponding to Linux memory map
+        * which will certainly be different than uboot's. But disabling and
+        * reenabling IOC when DMA might be potentially active is tricky business.
+        * To avoid random memory issues later, just panic here and ask user to
+        * upgrade bootloader to one which doesn't enable IOC
+        */
+       if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
+               panic("IOC already enabled, please upgrade bootloader!\n");
+
+       if (!ioc_enable)
+               return;
+
        /*
         * As for today we don't support both IOC and ZONE_HIGHMEM enabled
         * simultaneously. This happens because as of today IOC aperture covers
@@ -1187,8 +1201,8 @@ noinline void __init arc_ioc_setup(void)
                panic("IOC Aperture start must be aligned to the size of the aperture");
 
        write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
-       write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
-       write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
+       write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
+       write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
 
        /* Re-enable L1 dcache */
        __dc_enable();
@@ -1265,7 +1279,7 @@ void __init arc_cache_init_master(void)
        if (is_isa_arcv2() && l2_line_sz && !slc_enable)
                arc_slc_disable();
 
-       if (is_isa_arcv2() && ioc_enable)
+       if (is_isa_arcv2() && ioc_exists)
                arc_ioc_setup();
 
        if (is_isa_arcv2() && l2_line_sz && slc_enable) {
index c9da6102eb4fba4eb7f79224f826517d8f20e5c1..e2d9fc3fea01e7a93b4e7bb0b16c7b0874d33e5f 100644 (file)
@@ -66,7 +66,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
        struct vm_area_struct *vma = NULL;
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->mm;
-       int si_code;
+       int si_code = 0;
        int ret;
        vm_fault_t fault;
        int write = regs->ecr_cause & ECR_C_PROTV_STORE;  /* ST/EX */
index d4d33cd7adad73c793c382c9b15026745a9b355b..1e2bb68231ad473faf3a4f551d4dea140be22b2a 100644 (file)
        vmmc-supply = <&vmmc_fixed>;
        bus-width = <4>;
        wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
-       cd-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>; /* gpio_127 */
+       cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio_127 */
 };
 
 &mmc3 {
index dae6e458e59fe7e4b49c65c78ed5224169a4407c..b1c988eed87c681d65bf5ec57d3b1b3bbcdf94d9 100644 (file)
                compatible = "ti,wl1271";
                reg = <2>;
                interrupt-parent = <&gpio6>;
-               interrupts = <10 IRQ_TYPE_LEVEL_HIGH>; /* gpio_170 */
+               interrupts = <10 IRQ_TYPE_EDGE_RISING>; /* gpio_170 */
                ref-clock-frequency = <26000000>;
                tcxo-clock-frequency = <26000000>;
        };
index f2a1d25eb6cf3f1249bcc2166bee7f86a75b2a6d..83e0fbc4a1a10cf4e4b9418b9646d7f06aaea181 100644 (file)
@@ -45,7 +45,7 @@
        };
 
        /* The voltage to the MMC card is hardwired at 3.3V */
-       vmmc: fixedregulator@0 {
+       vmmc: regulator-vmmc {
                compatible = "regulator-fixed";
                regulator-name = "vmmc";
                regulator-min-microvolt = <3300000>;
@@ -53,7 +53,7 @@
                regulator-boot-on;
         };
 
-       veth: fixedregulator@0 {
+       veth: regulator-veth {
                compatible = "regulator-fixed";
                regulator-name = "veth";
                regulator-min-microvolt = <3300000>;
index 7f9cbdf33a51009ffea23fa488ec8f1e6abe7914..2f6aa24a0b67c707068bba9fb7902525ff1158c0 100644 (file)
        };
 
        /* The voltage to the MMC card is hardwired at 3.3V */
-       vmmc: fixedregulator@0 {
+       vmmc: regulator-vmmc {
                compatible = "regulator-fixed";
                regulator-name = "vmmc";
                regulator-min-microvolt = <3300000>;
                regulator-boot-on;
         };
 
-       veth: fixedregulator@0 {
+       veth: regulator-veth {
                compatible = "regulator-fixed";
                regulator-name = "veth";
                regulator-min-microvolt = <3300000>;
index 4adb85e66be3f975894cb712211478439709f274..93762244be7f469a64d9158b2a904e0b8cde1fdd 100644 (file)
@@ -31,7 +31,7 @@
 
        wifi_pwrseq: wifi-pwrseq {
                compatible = "mmc-pwrseq-simple";
-               reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>;
+               reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
        };
 };
 
index c318bcbc6ba7e327bcf164fd543cd47c04c52d2c..89e6fd547c7572f6bc7d243e5055da4e33dc94a2 100644 (file)
@@ -26,7 +26,7 @@
 
        wifi_pwrseq: wifi-pwrseq {
                compatible = "mmc-pwrseq-simple";
-               reset-gpios = <&expgpio 1 GPIO_ACTIVE_HIGH>;
+               reset-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
        };
 };
 
index e45a15ceb94bc4f88a4e89029dd5c18fb7e81853..69d753cac89aec6333dea879219ebfaff3aac4dc 100644 (file)
        pinctrl-0 = <&pinctrl_i2c2>;
        status = "okay";
 
-       eeprom@50 {
-               compatible = "atmel,24c04";
-               pagesize = <16>;
-               reg = <0x50>;
-       };
-
        hpa1: amp@60 {
                compatible = "ti,tpa6130a2";
                reg = <0x60>;
index b560ff88459bf1b74a0c093f3bdc15d2118c0182..5ff9a179c83c3326ab2dec5fdceee155021bf716 100644 (file)
@@ -55,7 +55,7 @@
        };
 
        chosen {
-               stdout-path = "&uart1:115200n8";
+               stdout-path = "serial0:115200n8";
        };
 
        memory@70000000 {
index ed9a980bce8501fcca0c3d357a8440cb8debd59d..beefa1b2049d7b56476a62429c2b4e4abf65ad33 100644 (file)
                        i2c1: i2c@21a0000 {
                                #address-cells = <1>;
                                #size-cells = <0>;
-                               compatible = "fs,imx6sll-i2c", "fsl,imx21-i2c";
+                               compatible = "fsl,imx6sll-i2c", "fsl,imx21-i2c";
                                reg = <0x021a0000 0x4000>;
                                interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
                                clocks = <&clks IMX6SLL_CLK_I2C1>;
index 53b3408b5fab1845248b2b7ff078eba462f730ab..7d7d679945d28efe4f827e1b6197001088826b7f 100644 (file)
                regulator-name = "enet_3v3";
                regulator-min-microvolt = <3300000>;
                regulator-max-microvolt = <3300000>;
-               gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
+               gpio = <&gpio2 6 GPIO_ACTIVE_LOW>;
+               regulator-boot-on;
+               regulator-always-on;
        };
 
        reg_pcie_gpio: regulator-pcie-gpio {
        phy-supply = <&reg_enet_3v3>;
        phy-mode = "rgmii";
        phy-handle = <&ethphy1>;
+       phy-reset-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
        status = "okay";
 
        mdio {
                                MX6SX_PAD_RGMII1_RD3__ENET1_RX_DATA_3   0x3081
                                MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN    0x3081
                                MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M       0x91
+                               /* phy reset */
+                               MX6SX_PAD_ENET2_CRS__GPIO2_IO_7         0x10b0
                        >;
                };
 
index d8aac4a2d02a2489d1843e2d22f2f4cb317eb481..177d21fdeb288d3e458176ddf0d537a746f0d2ba 100644 (file)
                compatible = "regulator-fixed";
                regulator-min-microvolt = <3300000>;
                regulator-max-microvolt = <3300000>;
-               clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
-               clock-names = "slow";
                regulator-name = "reg_wlan";
                startup-delay-us = <70000>;
                gpio = <&gpio4 21 GPIO_ACTIVE_HIGH>;
                enable-active-high;
        };
+
+       usdhc2_pwrseq: usdhc2_pwrseq {
+               compatible = "mmc-pwrseq-simple";
+               clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
+               clock-names = "ext_clock";
+       };
 };
 
 &adc1 {
        bus-width = <4>;
        non-removable;
        vmmc-supply = <&reg_wlan>;
+       mmc-pwrseq = <&usdhc2_pwrseq>;
        cap-power-off-card;
        keep-power-in-suspend;
        status = "okay";
index 21973eb55671920148e25e6a6b0e1c469093bc8e..f27b3849d3ff3ed91d7e2d504206c4c8a7d8045b 100644 (file)
                regulator-min-microvolt = <1800000>;
                regulator-max-microvolt = <1800000>;
        };
+
+       usdhc2_pwrseq: usdhc2_pwrseq {
+               compatible = "mmc-pwrseq-simple";
+               clocks = <&clks IMX7D_CLKO2_ROOT_DIV>;
+               clock-names = "ext_clock";
+       };
+};
+
+&clks {
+       assigned-clocks = <&clks IMX7D_CLKO2_ROOT_SRC>,
+                         <&clks IMX7D_CLKO2_ROOT_DIV>;
+       assigned-clock-parents = <&clks IMX7D_CKIL>;
+       assigned-clock-rates = <0>, <32768>;
 };
 
 &i2c4 {
 
 &usdhc2 { /* Wifi SDIO */
        pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_usdhc2>;
+       pinctrl-0 = <&pinctrl_usdhc2 &pinctrl_wifi_clk>;
        no-1-8-v;
        non-removable;
        keep-power-in-suspend;
        wakeup-source;
        vmmc-supply = <&reg_ap6212>;
+       mmc-pwrseq = <&usdhc2_pwrseq>;
        status = "okay";
 };
 
 };
 
 &iomuxc_lpsr {
+       pinctrl_wifi_clk: wificlkgrp {
+               fsl,pins = <
+                       MX7D_PAD_LPSR_GPIO1_IO03__CCM_CLKO2     0x7d
+               >;
+       };
+
        pinctrl_wdog: wdoggrp {
                fsl,pins = <
                        MX7D_PAD_LPSR_GPIO1_IO00__WDOG1_WDOG_B  0x74
index ac343330d0c83f203526ba458f6ddad1a5357b0d..98b682a8080cc334b40f44cc643f15b191e7a336 100644 (file)
 };
 
 &mmc3 {
-       interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
+       interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
        pinctrl-0 = <&mmc3_pins &wl127x_gpio>;
        pinctrl-names = "default";
        vmmc-supply = <&wl12xx_vmmc>;
index 9d5d53fbe9c0c0212684cae4738a461062a6ed3a..c39cf2ca54da8d34d15e73dadc48bac3bace4918 100644 (file)
@@ -35,7 +35,7 @@
  * jumpering combinations for the long run.
  */
 &mmc3 {
-       interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>;
+       interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>;
        pinctrl-0 = <&mmc3_pins &mmc3_core2_pins>;
        pinctrl-names = "default";
        vmmc-supply = <&wl12xx_vmmc>;
index 2075120cfc4d780482a89b4cd8203419f3860a6d..d8bf939a3aff9d0e0ee1c909237efd55c716dbd5 100644 (file)
 #include "rk3288.dtsi"
 
 / {
-       memory@0 {
+       /*
+        * The default coreboot on veyron devices ignores memory@0 nodes
+        * and would instead create another memory node.
+        */
+       memory {
                device_type = "memory";
                reg = <0x0 0x0 0x0 0x80000000>;
        };
index 843052f14f1cff7d32cf9622ba1ea470c529aa2f..dd0dda6ed44b5ff9d7aec2574e10a0c34ed14130 100644 (file)
                                  0x1 0x0 0x60000000 0x10000000
                                  0x2 0x0 0x70000000 0x10000000
                                  0x3 0x0 0x80000000 0x10000000>;
-                       clocks = <&mck>;
+                       clocks = <&h32ck>;
                        status = "disabled";
 
                        nand_controller: nand-controller {
index c50c36baba758f4364aac78e7f973c2c0b1a65b0..8bf1c17f8cefb6b368c40c4376a7ad918c8fde10 100644 (file)
                        interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&rcc HASH1>;
                        resets = <&rcc HASH1_R>;
-                       dmas = <&mdma1 31 0x10 0x1000A02 0x0 0x0 0x0>;
+                       dmas = <&mdma1 31 0x10 0x1000A02 0x0 0x0>;
                        dma-names = "in";
                        dma-maxburst = <2>;
                        status = "disabled";
index 742d2946b08be48d205bee2ae6041632b27dccf1..583a5a01642f2f36dc8887acf869c0480f47162a 100644 (file)
 
 &reg_dldo3 {
        regulator-always-on;
-       regulator-min-microvolt = <2500000>;
-       regulator-max-microvolt = <2500000>;
+       regulator-min-microvolt = <3300000>;
+       regulator-max-microvolt = <3300000>;
        regulator-name = "vcc-pd";
 };
 
index 41ec66a969907d492dabec284e6a101f592e0216..ca62495587602f44d3e514fb2df910edfc584ea1 100644 (file)
@@ -50,8 +50,8 @@
        compatible = "fsl,vf610m4";
 
        chosen {
-               bootargs = "console=ttyLP2,115200 clk_ignore_unused init=/linuxrc rw";
-               stdout-path = "&uart2";
+               bootargs = "clk_ignore_unused init=/linuxrc rw";
+               stdout-path = "serial2:115200";
        };
 
        memory@8c000000 {
index 0d289240b6ca110ab961a280ddd20fc1c567f2a4..775cac3c02bb0a31facb970e16feef83f86c6632 100644 (file)
 #include <linux/kernel.h>
 
 extern unsigned int processor_id;
+struct proc_info_list *lookup_processor(u32 midr);
 
 #ifdef CONFIG_CPU_CP15
 #define read_cpuid(reg)                                                        \
index 92fd2c8a9af0638834d6c2b5814b9a88911f33fe..12659ce5c1f38e2f166937b18957c4fbf5732c3d 100644 (file)
@@ -10,7 +10,7 @@
 #ifndef _ASM_PGTABLE_2LEVEL_H
 #define _ASM_PGTABLE_2LEVEL_H
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 
 /*
  * Hardware-wise, we have a two level page table structure, where the first
index e25f4392e1b2868446de858701d408aaaee26eab..e1b6f280ab088fb0b8ac59b6ceb3543606c97e01 100644 (file)
@@ -23,7 +23,7 @@ struct mm_struct;
 /*
  * Don't change this structure - ASM code relies on it.
  */
-extern struct processor {
+struct processor {
        /* MISC
         * get data abort address/flags
         */
@@ -79,9 +79,13 @@ extern struct processor {
        unsigned int suspend_size;
        void (*do_suspend)(void *);
        void (*do_resume)(void *);
-} processor;
+};
 
 #ifndef MULTI_CPU
+static inline void init_proc_vtable(const struct processor *p)
+{
+}
+
 extern void cpu_proc_init(void);
 extern void cpu_proc_fin(void);
 extern int cpu_do_idle(void);
@@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
 extern void cpu_do_suspend(void *);
 extern void cpu_do_resume(void *);
 #else
-#define cpu_proc_init                  processor._proc_init
-#define cpu_proc_fin                   processor._proc_fin
-#define cpu_reset                      processor.reset
-#define cpu_do_idle                    processor._do_idle
-#define cpu_dcache_clean_area          processor.dcache_clean_area
-#define cpu_set_pte_ext                        processor.set_pte_ext
-#define cpu_do_switch_mm               processor.switch_mm
 
-/* These three are private to arch/arm/kernel/suspend.c */
-#define cpu_do_suspend                 processor.do_suspend
-#define cpu_do_resume                  processor.do_resume
+extern struct processor processor;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+#include <linux/smp.h>
+/*
+ * This can't be a per-cpu variable because we need to access it before
+ * per-cpu has been initialised.  We have a couple of functions that are
+ * called in a pre-emptible context, and so can't use smp_processor_id()
+ * there, hence PROC_TABLE().  We insist in init_proc_vtable() that the
+ * function pointers for these are identical across all CPUs.
+ */
+extern struct processor *cpu_vtable[];
+#define PROC_VTABLE(f)                 cpu_vtable[smp_processor_id()]->f
+#define PROC_TABLE(f)                  cpu_vtable[0]->f
+static inline void init_proc_vtable(const struct processor *p)
+{
+       unsigned int cpu = smp_processor_id();
+       *cpu_vtable[cpu] = *p;
+       WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
+                    cpu_vtable[0]->dcache_clean_area);
+       WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
+                    cpu_vtable[0]->set_pte_ext);
+}
+#else
+#define PROC_VTABLE(f)                 processor.f
+#define PROC_TABLE(f)                  processor.f
+static inline void init_proc_vtable(const struct processor *p)
+{
+       processor = *p;
+}
+#endif
+
+#define cpu_proc_init                  PROC_VTABLE(_proc_init)
+#define cpu_check_bugs                 PROC_VTABLE(check_bugs)
+#define cpu_proc_fin                   PROC_VTABLE(_proc_fin)
+#define cpu_reset                      PROC_VTABLE(reset)
+#define cpu_do_idle                    PROC_VTABLE(_do_idle)
+#define cpu_dcache_clean_area          PROC_TABLE(dcache_clean_area)
+#define cpu_set_pte_ext                        PROC_TABLE(set_pte_ext)
+#define cpu_do_switch_mm               PROC_VTABLE(switch_mm)
+
+/* These two are private to arch/arm/kernel/suspend.c */
+#define cpu_do_suspend                 PROC_VTABLE(do_suspend)
+#define cpu_do_resume                  PROC_VTABLE(do_resume)
 #endif
 
 extern void cpu_resume(void);
index 7be5113101915cd81a5558f45238041138fb5a58..d41d3598e5e541115c08f9b81b26fd187a7fe7af 100644 (file)
@@ -6,8 +6,8 @@
 void check_other_bugs(void)
 {
 #ifdef MULTI_CPU
-       if (processor.check_bugs)
-               processor.check_bugs();
+       if (cpu_check_bugs)
+               cpu_check_bugs();
 #endif
 }
 
index 0142fcfcc3d3732a6add6132f352bd0dccd036c8..bda949fd84e8b60b13ee3c79f5a28ad7e5151369 100644 (file)
@@ -183,9 +183,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
                           unsigned long frame_pointer)
 {
        unsigned long return_hooker = (unsigned long) &return_to_handler;
-       struct ftrace_graph_ent trace;
        unsigned long old;
-       int err;
 
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                return;
@@ -193,21 +191,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
        old = *parent;
        *parent = return_hooker;
 
-       trace.func = self_addr;
-       trace.depth = current->curr_ret_stack + 1;
-
-       /* Only trace if the calling function expects to */
-       if (!ftrace_graph_entry(&trace)) {
+       if (function_graph_enter(old, self_addr, frame_pointer, NULL))
                *parent = old;
-               return;
-       }
-
-       err = ftrace_push_return_trace(old, self_addr, &trace.depth,
-                                      frame_pointer, NULL);
-       if (err == -EBUSY) {
-               *parent = old;
-               return;
-       }
 }
 
 #ifdef CONFIG_DYNAMIC_FTRACE
index 6e0375e7db055bc82cf0674b37b74646e2d64ff0..997b02302c3145f5ac380ae18823eba50d916ac7 100644 (file)
@@ -145,6 +145,9 @@ __mmap_switched_data:
 #endif
        .size   __mmap_switched_data, . - __mmap_switched_data
 
+       __FINIT
+       .text
+
 /*
  * This provides a C-API version of __lookup_processor_type
  */
@@ -156,9 +159,6 @@ ENTRY(lookup_processor_type)
        ldmfd   sp!, {r4 - r6, r9, pc}
 ENDPROC(lookup_processor_type)
 
-       __FINIT
-       .text
-
 /*
  * Read processor ID register (CP#15, CR0), and look up in the linker-built
  * supported processor list.  Note that we can't use the absolute addresses
index ac7e08886863cfa74855e5b91c4f436e85da1e0a..375b13f7e780663eddb3f04e632751064a6b5bfd 100644 (file)
@@ -114,6 +114,11 @@ EXPORT_SYMBOL(elf_hwcap2);
 
 #ifdef MULTI_CPU
 struct processor processor __ro_after_init;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+struct processor *cpu_vtable[NR_CPUS] = {
+       [0] = &processor,
+};
+#endif
 #endif
 #ifdef MULTI_TLB
 struct cpu_tlb_fns cpu_tlb __ro_after_init;
@@ -666,28 +671,33 @@ static void __init smp_build_mpidr_hash(void)
 }
 #endif
 
-static void __init setup_processor(void)
+/*
+ * locate processor in the list of supported processor types.  The linker
+ * builds this table for us from the entries in arch/arm/mm/proc-*.S
+ */
+struct proc_info_list *lookup_processor(u32 midr)
 {
-       struct proc_info_list *list;
+       struct proc_info_list *list = lookup_processor_type(midr);
 
-       /*
-        * locate processor in the list of supported processor
-        * types.  The linker builds this table for us from the
-        * entries in arch/arm/mm/proc-*.S
-        */
-       list = lookup_processor_type(read_cpuid_id());
        if (!list) {
-               pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
-                      read_cpuid_id());
-               while (1);
+               pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
+                      smp_processor_id(), midr);
+               while (1)
+               /* can't use cpu_relax() here as it may require MMU setup */;
        }
 
+       return list;
+}
+
+static void __init setup_processor(void)
+{
+       unsigned int midr = read_cpuid_id();
+       struct proc_info_list *list = lookup_processor(midr);
+
        cpu_name = list->cpu_name;
        __cpu_architecture = __get_cpu_architecture();
 
-#ifdef MULTI_CPU
-       processor = *list->proc;
-#endif
+       init_proc_vtable(list->proc);
 #ifdef MULTI_TLB
        cpu_tlb = *list->tlb;
 #endif
@@ -699,7 +709,7 @@ static void __init setup_processor(void)
 #endif
 
        pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
-               cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
+               list->cpu_name, midr, midr & 15,
                proc_arch[cpu_architecture()], get_cr());
 
        snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
index 0978282d5fc27a7c4a5e6b0e274da8bfc4c14c8d..12a6172263c0b057a94f2041accf581088374fb0 100644 (file)
@@ -42,6 +42,7 @@
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
+#include <asm/procinfo.h>
 #include <asm/processor.h>
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
@@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
 #endif
 }
 
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+       if (!cpu_vtable[cpu])
+               cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
+
+       return cpu_vtable[cpu] ? 0 : -ENOMEM;
+}
+
+static void secondary_biglittle_init(void)
+{
+       init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
+}
+#else
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+       return 0;
+}
+
+static void secondary_biglittle_init(void)
+{
+}
+#endif
+
 int __cpu_up(unsigned int cpu, struct task_struct *idle)
 {
        int ret;
@@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
        if (!smp_ops.smp_boot_secondary)
                return -ENOSYS;
 
+       ret = secondary_biglittle_prepare(cpu);
+       if (ret)
+               return ret;
+
        /*
         * We need to tell the secondary core where to find
         * its stack and the page tables.
@@ -359,6 +388,8 @@ asmlinkage void secondary_start_kernel(void)
        struct mm_struct *mm = &init_mm;
        unsigned int cpu;
 
+       secondary_biglittle_init();
+
        /*
         * The identity mapping is uncached (strongly ordered), so
         * switch away from it before attempting any exclusive accesses.
index 0bc5bd2665df8dd1433a78f93819727ae473b1f0..2cc9fe4c3a9110fc08cfd03e80b01b207dba86e5 100644 (file)
@@ -759,7 +759,9 @@ static struct davinci_id da830_ids[] = {
 };
 
 static struct davinci_gpio_platform_data da830_gpio_platform_data = {
-       .ngpio = 128,
+       .no_auto_base   = true,
+       .base           = 0,
+       .ngpio          = 128,
 };
 
 int __init da830_register_gpio(void)
index 4528bbf0c86187b91413c74242a5d82cfed4de01..e7b78df2bfefbcfd08120d2c6ec08633995472ef 100644 (file)
@@ -719,7 +719,9 @@ int __init da850_register_vpif_capture(struct vpif_capture_config
 }
 
 static struct davinci_gpio_platform_data da850_gpio_platform_data = {
-       .ngpio = 144,
+       .no_auto_base   = true,
+       .base           = 0,
+       .ngpio          = 144,
 };
 
 int __init da850_register_gpio(void)
index 1fd3619f6a09f1311eeb8153ab627cedd80d3149..cf78da5ab0548a15d9825e857482b9314502f7d6 100644 (file)
@@ -701,6 +701,46 @@ static struct resource da8xx_gpio_resources[] = {
        },
        { /* interrupt */
                .start  = IRQ_DA8XX_GPIO0,
+               .end    = IRQ_DA8XX_GPIO0,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO1,
+               .end    = IRQ_DA8XX_GPIO1,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO2,
+               .end    = IRQ_DA8XX_GPIO2,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO3,
+               .end    = IRQ_DA8XX_GPIO3,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO4,
+               .end    = IRQ_DA8XX_GPIO4,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO5,
+               .end    = IRQ_DA8XX_GPIO5,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO6,
+               .end    = IRQ_DA8XX_GPIO6,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO7,
+               .end    = IRQ_DA8XX_GPIO7,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DA8XX_GPIO8,
                .end    = IRQ_DA8XX_GPIO8,
                .flags  = IORESOURCE_IRQ,
        },
index 9f7d38d12c8886134a0b4d149b6593bf228efc4a..4c6e0bef4509277fa065fbddc66ac3ee38f32ef4 100644 (file)
@@ -548,12 +548,44 @@ static struct resource dm355_gpio_resources[] = {
        },
        {       /* interrupt */
                .start  = IRQ_DM355_GPIOBNK0,
+               .end    = IRQ_DM355_GPIOBNK0,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM355_GPIOBNK1,
+               .end    = IRQ_DM355_GPIOBNK1,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM355_GPIOBNK2,
+               .end    = IRQ_DM355_GPIOBNK2,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM355_GPIOBNK3,
+               .end    = IRQ_DM355_GPIOBNK3,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM355_GPIOBNK4,
+               .end    = IRQ_DM355_GPIOBNK4,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM355_GPIOBNK5,
+               .end    = IRQ_DM355_GPIOBNK5,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM355_GPIOBNK6,
                .end    = IRQ_DM355_GPIOBNK6,
                .flags  = IORESOURCE_IRQ,
        },
 };
 
 static struct davinci_gpio_platform_data dm355_gpio_platform_data = {
+       .no_auto_base   = true,
+       .base           = 0,
        .ngpio          = 104,
 };
 
index abcf2a5ed89b5e4780430911b1ef584711cf0142..01fb2b0c82de3d5840410e921b154089017ffe3b 100644 (file)
@@ -267,12 +267,49 @@ static struct resource dm365_gpio_resources[] = {
        },
        {       /* interrupt */
                .start  = IRQ_DM365_GPIO0,
+               .end    = IRQ_DM365_GPIO0,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM365_GPIO1,
+               .end    = IRQ_DM365_GPIO1,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM365_GPIO2,
+               .end    = IRQ_DM365_GPIO2,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM365_GPIO3,
+               .end    = IRQ_DM365_GPIO3,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM365_GPIO4,
+               .end    = IRQ_DM365_GPIO4,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM365_GPIO5,
+               .end    = IRQ_DM365_GPIO5,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM365_GPIO6,
+               .end    = IRQ_DM365_GPIO6,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM365_GPIO7,
                .end    = IRQ_DM365_GPIO7,
                .flags  = IORESOURCE_IRQ,
        },
 };
 
 static struct davinci_gpio_platform_data dm365_gpio_platform_data = {
+       .no_auto_base   = true,
+       .base           = 0,
        .ngpio          = 104,
        .gpio_unbanked  = 8,
 };
index 0720da7809a693eee06c22a80b0449a1cad06e17..38f92b7d413ef65e7d48a300760e04ae83b283b3 100644 (file)
@@ -492,12 +492,34 @@ static struct resource dm644_gpio_resources[] = {
        },
        {       /* interrupt */
                .start  = IRQ_GPIOBNK0,
+               .end    = IRQ_GPIOBNK0,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_GPIOBNK1,
+               .end    = IRQ_GPIOBNK1,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_GPIOBNK2,
+               .end    = IRQ_GPIOBNK2,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_GPIOBNK3,
+               .end    = IRQ_GPIOBNK3,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_GPIOBNK4,
                .end    = IRQ_GPIOBNK4,
                .flags  = IORESOURCE_IRQ,
        },
 };
 
 static struct davinci_gpio_platform_data dm644_gpio_platform_data = {
+       .no_auto_base   = true,
+       .base           = 0,
        .ngpio          = 71,
 };
 
index 6bd2ed069d0d7491a28b5af9665838e030e41ed3..7dc54b2a610f4f4bb1cae9a3956712fa19d69e59 100644 (file)
@@ -442,12 +442,24 @@ static struct resource dm646x_gpio_resources[] = {
        },
        {       /* interrupt */
                .start  = IRQ_DM646X_GPIOBNK0,
+               .end    = IRQ_DM646X_GPIOBNK0,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM646X_GPIOBNK1,
+               .end    = IRQ_DM646X_GPIOBNK1,
+               .flags  = IORESOURCE_IRQ,
+       },
+       {
+               .start  = IRQ_DM646X_GPIOBNK2,
                .end    = IRQ_DM646X_GPIOBNK2,
                .flags  = IORESOURCE_IRQ,
        },
 };
 
 static struct davinci_gpio_platform_data dm646x_gpio_platform_data = {
+       .no_auto_base   = true,
+       .base           = 0,
        .ngpio          = 43,
 };
 
index 243a108a940b46c9c0d9b13d2802beb11f84d2d9..fd0053e47a151179db824e1aac6fe1526d20327e 100644 (file)
@@ -110,7 +110,7 @@ int __init imx6sx_cpuidle_init(void)
         * except for power up sw2iso which need to be
         * larger than LDO ramp up time.
         */
-       imx_gpc_set_arm_power_up_timing(2, 1);
+       imx_gpc_set_arm_power_up_timing(0xf, 1);
        imx_gpc_set_arm_power_down_timing(1, 1);
 
        return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
index 446edaeb78a71d07a8c719732455589ffa67b49e..a96abcf521b4b095a13658e51f409884d89b35b5 100644 (file)
@@ -44,10 +44,12 @@ static inline int cpu_is_pxa910(void)
 #define cpu_is_pxa910()        (0)
 #endif
 
-#ifdef CONFIG_CPU_MMP2
+#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT)
 static inline int cpu_is_mmp2(void)
 {
-       return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
+       return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
+               (((mmp_chip_id & 0xfff) == 0x410) ||
+                ((mmp_chip_id & 0xfff) == 0x610));
 }
 #else
 #define cpu_is_mmp2()  (0)
index af318d958fd2a7c9796ad2384c8c122ca04358e2..17886744dbe694b1598524b72333374603a6b658 100644 (file)
@@ -750,6 +750,9 @@ static void modem_pm(struct uart_port *port, unsigned int state, unsigned old)
        struct modem_private_data *priv = port->private_data;
        int ret;
 
+       if (!priv)
+               return;
+
        if (IS_ERR(priv->regulator))
                return;
 
@@ -773,7 +776,7 @@ static struct plat_serial8250_port ams_delta_modem_ports[] = {
        {
                .membase        = IOMEM(MODEM_VIRT),
                .mapbase        = MODEM_PHYS,
-               .irq            = -EINVAL, /* changed later */
+               .irq            = IRQ_NOTCONNECTED, /* changed later */
                .flags          = UPF_BOOT_AUTOCONF,
                .irqflags       = IRQF_TRIGGER_RISING,
                .iotype         = UPIO_MEM,
@@ -864,8 +867,7 @@ static int __init modem_nreset_init(void)
 
 
 /*
- * This function expects MODEM IRQ number already assigned to the port
- * and fails if it's not.
+ * This function expects MODEM IRQ number already assigned to the port.
  * The MODEM device requires its RESET# pin kept high during probe.
  * That requirement can be fulfilled in several ways:
  * - with a descriptor of already functional modem_nreset regulator
@@ -888,9 +890,6 @@ static int __init ams_delta_modem_init(void)
        if (!machine_is_ams_delta())
                return -ENODEV;
 
-       if (ams_delta_modem_ports[0].irq < 0)
-               return ams_delta_modem_ports[0].irq;
-
        omap_cfg_reg(M14_1510_GPIO2);
 
        /* Initialize the modem_nreset regulator consumer before use */
index 9500b6e2738019a4fb53e50c8150a2972ca8c391..f86b72d1d59e51f4af15319df87ee61141b4fd02 100644 (file)
@@ -209,11 +209,61 @@ static int __init omapdss_init_fbdev(void)
 
        return 0;
 }
-#else
-static inline int omapdss_init_fbdev(void)
+
+static const char * const omapdss_compat_names[] __initconst = {
+       "ti,omap2-dss",
+       "ti,omap3-dss",
+       "ti,omap4-dss",
+       "ti,omap5-dss",
+       "ti,dra7-dss",
+};
+
+static struct device_node * __init omapdss_find_dss_of_node(void)
 {
-       return 0;
+       struct device_node *node;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
+               node = of_find_compatible_node(NULL, NULL,
+                       omapdss_compat_names[i]);
+               if (node)
+                       return node;
+       }
+
+       return NULL;
 }
+
+static int __init omapdss_init_of(void)
+{
+       int r;
+       struct device_node *node;
+       struct platform_device *pdev;
+
+       /* only create dss helper devices if dss is enabled in the .dts */
+
+       node = omapdss_find_dss_of_node();
+       if (!node)
+               return 0;
+
+       if (!of_device_is_available(node))
+               return 0;
+
+       pdev = of_find_device_by_node(node);
+
+       if (!pdev) {
+               pr_err("Unable to find DSS platform device\n");
+               return -ENODEV;
+       }
+
+       r = of_platform_populate(node, NULL, NULL, &pdev->dev);
+       if (r) {
+               pr_err("Unable to populate DSS submodule devices\n");
+               return r;
+       }
+
+       return omapdss_init_fbdev();
+}
+omap_device_initcall(omapdss_init_of);
 #endif /* CONFIG_FB_OMAP2 */
 
 static void dispc_disable_outputs(void)
@@ -361,58 +411,3 @@ int omap_dss_reset(struct omap_hwmod *oh)
 
        return r;
 }
-
-static const char * const omapdss_compat_names[] __initconst = {
-       "ti,omap2-dss",
-       "ti,omap3-dss",
-       "ti,omap4-dss",
-       "ti,omap5-dss",
-       "ti,dra7-dss",
-};
-
-static struct device_node * __init omapdss_find_dss_of_node(void)
-{
-       struct device_node *node;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
-               node = of_find_compatible_node(NULL, NULL,
-                       omapdss_compat_names[i]);
-               if (node)
-                       return node;
-       }
-
-       return NULL;
-}
-
-static int __init omapdss_init_of(void)
-{
-       int r;
-       struct device_node *node;
-       struct platform_device *pdev;
-
-       /* only create dss helper devices if dss is enabled in the .dts */
-
-       node = omapdss_find_dss_of_node();
-       if (!node)
-               return 0;
-
-       if (!of_device_is_available(node))
-               return 0;
-
-       pdev = of_find_device_by_node(node);
-
-       if (!pdev) {
-               pr_err("Unable to find DSS platform device\n");
-               return -ENODEV;
-       }
-
-       r = of_platform_populate(node, NULL, NULL, &pdev->dev);
-       if (r) {
-               pr_err("Unable to populate DSS submodule devices\n");
-               return r;
-       }
-
-       return omapdss_init_fbdev();
-}
-omap_device_initcall(omapdss_init_of);
index 7b95729e83594d330e4f0d5e3205a05b4d0751e1..38a1be6c3694f2922280c6a74cfcb792b1e4a8b6 100644 (file)
@@ -351,7 +351,7 @@ static void omap44xx_prm_reconfigure_io_chain(void)
  * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and
  * omap44xx_prm_reconfigure_io_chain() must be called.  No return value.
  */
-static void __init omap44xx_prm_enable_io_wakeup(void)
+static void omap44xx_prm_enable_io_wakeup(void)
 {
        s32 inst = omap4_prmst_get_prm_dev_inst();
 
index 215df435bfb9881f347d59f90aa0a98765d304b4..2149b47a0c5ace25958929ca44692df779950fbf 100644 (file)
@@ -360,14 +360,16 @@ v7_dma_inv_range:
        ALT_UP(W(nop))
 #endif
        mcrne   p15, 0, r0, c7, c14, 1          @ clean & invalidate D / U line
+       addne   r0, r0, r2
 
        tst     r1, r3
        bic     r1, r1, r3
        mcrne   p15, 0, r1, c7, c14, 1          @ clean & invalidate D / U line
-1:
-       mcr     p15, 0, r0, c7, c6, 1           @ invalidate D / U line
-       add     r0, r0, r2
        cmp     r0, r1
+1:
+       mcrlo   p15, 0, r0, c7, c6, 1           @ invalidate D / U line
+       addlo   r0, r0, r2
+       cmplo   r0, r1
        blo     1b
        dsb     st
        ret     lr
index 788486e830d3e644bbf4c608af6c75e64b5bb84a..32aa2a2aa260cb59eb10557f2c4159588e350b3c 100644 (file)
 /*
  * dcimvac: Invalidate data cache line by MVA to PoC
  */
-.macro dcimvac, rt, tmp
-       v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC
+.irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
+.macro dcimvac\c, rt, tmp
+       v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
 .endm
+.endr
 
 /*
  * dccmvau: Clean data cache line by MVA to PoU
@@ -369,14 +371,16 @@ v7m_dma_inv_range:
        tst     r0, r3
        bic     r0, r0, r3
        dccimvacne r0, r3
+       addne   r0, r0, r2
        subne   r3, r2, #1      @ restore r3, corrupted by v7m's dccimvac
        tst     r1, r3
        bic     r1, r1, r3
        dccimvacne r1, r3
-1:
-       dcimvac r0, r3
-       add     r0, r0, r2
        cmp     r0, r1
+1:
+       dcimvaclo r0, r3
+       addlo   r0, r0, r2
+       cmplo   r0, r1
        blo     1b
        dsb     st
        ret     lr
index 661fe48ab78da175732920d87046ec7460bc5d8f..78de138aa66dc48ccdfc223dc75dd017961feb28 100644 (file)
@@ -829,7 +829,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
                 void *cpu_addr, dma_addr_t dma_addr, size_t size,
                 unsigned long attrs)
 {
-       int ret;
+       int ret = -ENXIO;
        unsigned long nr_vma_pages = vma_pages(vma);
        unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
        unsigned long pfn = dma_to_pfn(dev, dma_addr);
index 81d0efb055c66080e976f9504c69866f7699b1a6..19516fbc2c55a65c761094bf3a67c8cd69568183 100644 (file)
        .endm
 
 .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
+/*
+ * If we are building for big.Little with branch predictor hardening,
+ * we need the processor function tables to remain available after boot.
+ */
+#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+       .section ".rodata"
+#endif
        .type   \name\()_processor_functions, #object
        .align 2
 ENTRY(\name\()_processor_functions)
@@ -309,6 +316,9 @@ ENTRY(\name\()_processor_functions)
        .endif
 
        .size   \name\()_processor_functions, . - \name\()_processor_functions
+#if 1 // defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+       .previous
+#endif
 .endm
 
 .macro define_cache_functions name:req
index 5544b82a2e7a553d015e23d77a9017682dd91f11..9a07916af8dd27dd021781c06451340ce6d03032 100644 (file)
@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
        case ARM_CPU_PART_CORTEX_A17:
        case ARM_CPU_PART_CORTEX_A73:
        case ARM_CPU_PART_CORTEX_A75:
-               if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
-                       goto bl_error;
                per_cpu(harden_branch_predictor_fn, cpu) =
                        harden_branch_predictor_bpiall;
                spectre_v2_method = "BPIALL";
@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
 
        case ARM_CPU_PART_CORTEX_A15:
        case ARM_CPU_PART_BRAHMA_B15:
-               if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
-                       goto bl_error;
                per_cpu(harden_branch_predictor_fn, cpu) =
                        harden_branch_predictor_iciallu;
                spectre_v2_method = "ICIALLU";
@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
                                          ARM_SMCCC_ARCH_WORKAROUND_1, &res);
                        if ((int)res.a0 != 0)
                                break;
-                       if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
-                               goto bl_error;
                        per_cpu(harden_branch_predictor_fn, cpu) =
                                call_hvc_arch_workaround_1;
-                       processor.switch_mm = cpu_v7_hvc_switch_mm;
+                       cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
                        spectre_v2_method = "hypervisor";
                        break;
 
@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
                                          ARM_SMCCC_ARCH_WORKAROUND_1, &res);
                        if ((int)res.a0 != 0)
                                break;
-                       if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
-                               goto bl_error;
                        per_cpu(harden_branch_predictor_fn, cpu) =
                                call_smc_arch_workaround_1;
-                       processor.switch_mm = cpu_v7_smc_switch_mm;
+                       cpu_do_switch_mm = cpu_v7_smc_switch_mm;
                        spectre_v2_method = "firmware";
                        break;
 
@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
        if (spectre_v2_method)
                pr_info("CPU%u: Spectre v2: using %s workaround\n",
                        smp_processor_id(), spectre_v2_method);
-       return;
-
-bl_error:
-       pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
-               cpu);
 }
 #else
 static void cpu_v7_spectre_init(void)
index 6fe52819e0148c6f3f04b11c75e278cd0b04a1f9..339eb17c9808e2c04a043485e42e5d29a49de347 100644 (file)
@@ -112,7 +112,7 @@ ENTRY(cpu_v7_hvc_switch_mm)
        hvc     #0
        ldmfd   sp!, {r0 - r3}
        b       cpu_v7_switch_mm
-ENDPROC(cpu_v7_smc_switch_mm)
+ENDPROC(cpu_v7_hvc_switch_mm)
 #endif
 ENTRY(cpu_v7_iciallu_switch_mm)
        mov     r3, #0
index 5b4ff9373c894515ee211a3ce47f745e021a7ed6..8a6880d528b6f1175bd275b7ed0946b6029718bd 100644 (file)
@@ -28,10 +28,15 @@ void __init orion_mpp_conf(unsigned int *mpp_list, unsigned int variant_mask,
                           unsigned int mpp_max, void __iomem *dev_bus)
 {
        unsigned int mpp_nr_regs = (1 + mpp_max/8);
-       u32 mpp_ctrl[mpp_nr_regs];
+       u32 mpp_ctrl[8];
        int i;
 
        printk(KERN_DEBUG "initial MPP regs:");
+       if (mpp_nr_regs > ARRAY_SIZE(mpp_ctrl)) {
+               printk(KERN_ERR "orion_mpp_conf: invalid mpp_max\n");
+               return;
+       }
+
        for (i = 0; i < mpp_nr_regs; i++) {
                mpp_ctrl[i] = readl(mpp_ctrl_addr(i, dev_bus));
                printk(" %08x", mpp_ctrl[i]);
index b2aa9b32bff2b5e9d2e6d102a4cd58f6cf8c5676..2c118a6ab358736e8227214b081fce343b48b29f 100644 (file)
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
        }
 
        /* Copy arch-dep-instance from template. */
-       memcpy(code, &optprobe_template_entry,
+       memcpy(code, (unsigned char *)optprobe_template_entry,
                        TMPL_END_IDX * sizeof(kprobe_opcode_t));
 
        /* Adjust buffer according to instruction. */
index aff6e6eadc700f08241668e66c1235ea825217ce..ee7b07938dd59311f47fe5b385bcc27bfa4878f8 100644 (file)
@@ -573,7 +573,7 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
         */
        ufp_exc->fpexc = hwstate->fpexc;
        ufp_exc->fpinst = hwstate->fpinst;
-       ufp_exc->fpinst2 = ufp_exc->fpinst2;
+       ufp_exc->fpinst2 = hwstate->fpinst2;
 
        /* Ensure that VFP is disabled. */
        vfp_flush_hwstate(thread);
index 787d7850e0643d80197e29bcfbfedf6d40ab46eb..ea2ab0330e3a14f67deabada3b8903cca6fcea3b 100644 (file)
@@ -497,6 +497,24 @@ config ARM64_ERRATUM_1188873
 
          If unsure, say Y.
 
+config ARM64_ERRATUM_1286807
+       bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation"
+       default y
+       select ARM64_WORKAROUND_REPEAT_TLBI
+       help
+         This option adds workaround for ARM Cortex-A76 erratum 1286807
+
+         On the affected Cortex-A76 cores (r0p0 to r3p0), if a virtual
+         address for a cacheable mapping of a location is being
+         accessed by a core while another core is remapping the virtual
+         address to a new physical page using the recommended
+         break-before-make sequence, then under very rare circumstances
+         TLBI+DSB completes before a read using the translation being
+         invalidated has been observed by other observers. The
+         workaround repeats the TLBI+DSB operation.
+
+         If unsure, say Y.
+
 config CAVIUM_ERRATUM_22375
        bool "Cavium erratum 22375, 24313"
        default y
@@ -566,9 +584,16 @@ config QCOM_FALKOR_ERRATUM_1003
          is unchanged. Work around the erratum by invalidating the walk cache
          entries for the trampoline before entering the kernel proper.
 
+config ARM64_WORKAROUND_REPEAT_TLBI
+       bool
+       help
+         Enable the repeat TLBI workaround for Falkor erratum 1009 and
+         Cortex-A76 erratum 1286807.
+
 config QCOM_FALKOR_ERRATUM_1009
        bool "Falkor E1009: Prematurely complete a DSB after a TLBI"
        default y
+       select ARM64_WORKAROUND_REPEAT_TLBI
        help
          On Falkor v1, the CPU may prematurely complete a DSB following a
          TLBI xxIS invalidate maintenance operation. Repeat the TLBI operation
index b4e994cd3a421d2b0ebe988b227b670039ba6bc5..6cb9fc7e9382d7f48f1b9d98f00be6b8d9df1f4a 100644 (file)
@@ -134,6 +134,7 @@ vdso_install:
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
 
+ifeq ($(KBUILD_EXTMOD),)
 # We need to generate vdso-offsets.h before compiling certain files in kernel/.
 # In order to do that, we should use the archprepare target, but we can't since
 # asm-offsets.h is included in some files used to generate vdso-offsets.h, and
@@ -143,6 +144,7 @@ archclean:
 prepare: vdso_prepare
 vdso_prepare: prepare0
        $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
+endif
 
 define archhelp
   echo  '* Image.gz      - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
index 8253a1a9e9857112f43c24d85c5c411e653376dd..fef7351e9f677da62cd0c50e8c424a3590dd7b51 100644 (file)
                        clock-names = "stmmaceth";
                        tx-fifo-depth = <16384>;
                        rx-fifo-depth = <16384>;
+                       snps,multicast-filter-bins = <256>;
                        status = "disabled";
                };
 
                        clock-names = "stmmaceth";
                        tx-fifo-depth = <16384>;
                        rx-fifo-depth = <16384>;
+                       snps,multicast-filter-bins = <256>;
                        status = "disabled";
                };
 
                        clock-names = "stmmaceth";
                        tx-fifo-depth = <16384>;
                        rx-fifo-depth = <16384>;
+                       snps,multicast-filter-bins = <256>;
                        status = "disabled";
                };
 
index 64632c8738887804df7a81d5f2b5715280bfffb6..01ea662afba876638c1d44fc0d89f3e52ad4e37e 100644 (file)
                        compatible = "arm,cortex-a72", "arm,armv8";
                        reg = <0x000>;
                        enable-method = "psci";
-                       cpu-idle-states = <&CPU_SLEEP_0>;
                };
                cpu1: cpu@1 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a72", "arm,armv8";
                        reg = <0x001>;
                        enable-method = "psci";
-                       cpu-idle-states = <&CPU_SLEEP_0>;
                };
                cpu2: cpu@100 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a72", "arm,armv8";
                        reg = <0x100>;
                        enable-method = "psci";
-                       cpu-idle-states = <&CPU_SLEEP_0>;
                };
                cpu3: cpu@101 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a72", "arm,armv8";
                        reg = <0x101>;
                        enable-method = "psci";
-                       cpu-idle-states = <&CPU_SLEEP_0>;
                };
        };
 };
index 073610ac0a53e8dcd1786b4b8c8c59b7b955ef50..7d94c1fa592a064d2d42709ce9d6a198ef008eb6 100644 (file)
                method = "smc";
        };
 
-       cpus {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               idle_states {
-                       entry_method = "arm,pcsi";
-
-                       CPU_SLEEP_0: cpu-sleep-0 {
-                               compatible = "arm,idle-state";
-                               local-timer-stop;
-                               arm,psci-suspend-param = <0x0010000>;
-                               entry-latency-us = <80>;
-                               exit-latency-us  = <160>;
-                               min-residency-us = <320>;
-                       };
-
-                       CLUSTER_SLEEP_0: cluster-sleep-0 {
-                               compatible = "arm,idle-state";
-                               local-timer-stop;
-                               arm,psci-suspend-param = <0x1010000>;
-                               entry-latency-us = <500>;
-                               exit-latency-us = <1000>;
-                               min-residency-us = <2500>;
-                       };
-               };
-       };
-
        ap806 {
                #address-cells = <2>;
                #size-cells = <2>;
index 5d6005c9b097529522b9112d3682d636b633f61b..710c5c3d87d30ef7dcb1657bb2081883f8b04aa0 100644 (file)
        model = "Bananapi BPI-R64";
        compatible = "bananapi,bpi-r64", "mediatek,mt7622";
 
+       aliases {
+               serial0 = &uart0;
+       };
+
        chosen {
-               bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
+               stdout-path = "serial0:115200n8";
+               bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
        };
 
        cpus {
index dcad0869b84ca01dc76e2924506583daf1c65c54..3f783348c66a690f3acce45ff76bd5da7c14f48a 100644 (file)
        model = "MediaTek MT7622 RFB1 board";
        compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622";
 
+       aliases {
+               serial0 = &uart0;
+       };
+
        chosen {
-               bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
+               stdout-path = "serial0:115200n8";
+               bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
        };
 
        cpus {
index fe0c875f1d9513538e5a18c74f641557d89675c8..14a1028ca3a64bd54bd21608655dcf1895d25fa1 100644 (file)
                #reset-cells = <1>;
        };
 
-       timer: timer@10004000 {
-               compatible = "mediatek,mt7622-timer",
-                            "mediatek,mt6577-timer";
-               reg = <0 0x10004000 0 0x80>;
-               interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_LOW>;
-               clocks = <&infracfg CLK_INFRA_APXGPT_PD>,
-                        <&topckgen CLK_TOP_RTC>;
-               clock-names = "system-clk", "rtc-clk";
-       };
-
        scpsys: scpsys@10006000 {
                compatible = "mediatek,mt7622-scpsys",
                             "syscon";
index b4276da1fb0d98d7441bb4ac7161d90fb914e8a9..11fd1fe8bdb5209793c09afccff1e7893d2e4e00 100644 (file)
                };
        };
 };
+
+&tlmm {
+       gpio-reserved-ranges = <0 4>, <81 4>;
+};
index eedfaf8922e2afbc4d50b91dc13ebdc486033e1a..b3def035817758fb4baf710684818d3fbeec0f42 100644 (file)
        };
 };
 
+&gcc {
+       protected-clocks = <GCC_QSPI_CORE_CLK>,
+                          <GCC_QSPI_CORE_CLK_SRC>,
+                          <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
+};
+
 &i2c10 {
        status = "okay";
        clock-frequency = <400000>;
        status = "okay";
 };
 
+&tlmm {
+       gpio-reserved-ranges = <0 4>, <81 4>;
+};
+
 &uart9 {
        status = "okay";
 };
index b5f2273caca4ded1e6bc0cfe3a5e52b97a3fd854..a79c8d369e0b48c4ddb3448bf4d2498acb676504 100644 (file)
                        clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x35>, <&dmac1 0x34>,
                               <&dmac2 0x35>, <&dmac2 0x34>;
-                       dma-names = "tx", "rx";
+                       dma-names = "tx", "rx", "tx", "rx";
                        power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
                        resets = <&cpg 518>;
                        status = "disabled";
index fe2e2c051cc93fc0668a3d3e59a3b13432ef4766..5a7012be0d6ad953198c035df5626f2ff8ce0fe9 100644 (file)
@@ -15,7 +15,7 @@
 
        aliases {
                serial0 = &scif0;
-               ethernet0 = &avb;
+               ethernet0 = &gether;
        };
 
        chosen {
        };
 };
 
-&avb {
-       pinctrl-0 = <&avb_pins>;
-       pinctrl-names = "default";
-
-       phy-mode = "rgmii-id";
-       phy-handle = <&phy0>;
-       renesas,no-ether-link;
-       status = "okay";
-
-       phy0: ethernet-phy@0 {
-               rxc-skew-ps = <1500>;
-               reg = <0>;
-               interrupt-parent = <&gpio1>;
-               interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
-       };
-};
-
 &canfd {
        pinctrl-0 = <&canfd0_pins>;
        pinctrl-names = "default";
        clock-frequency = <32768>;
 };
 
+&gether {
+       pinctrl-0 = <&gether_pins>;
+       pinctrl-names = "default";
+
+       phy-mode = "rgmii-id";
+       phy-handle = <&phy0>;
+       renesas,no-ether-link;
+       status = "okay";
+
+       phy0: ethernet-phy@0 {
+               rxc-skew-ps = <1500>;
+               reg = <0>;
+               interrupt-parent = <&gpio4>;
+               interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
+       };
+};
+
 &i2c0 {
        pinctrl-0 = <&i2c0_pins>;
        pinctrl-names = "default";
 };
 
 &pfc {
-       avb_pins: avb {
-               groups = "avb_mdio", "avb_rgmii";
-               function = "avb";
-       };
-
        canfd0_pins: canfd0 {
                groups = "canfd0_data_a";
                function = "canfd0";
        };
 
+       gether_pins: gether {
+               groups = "gether_mdio_a", "gether_rgmii",
+                        "gether_txcrefclk", "gether_txcrefclk_mega";
+               function = "gether";
+       };
+
        i2c0_pins: i2c0 {
                groups = "i2c0";
                function = "i2c0";
index 2dceeea29b8351fa35358600d43aac4516fce8a2..1e6a71066c163fd7bd2493b286e83a0596942930 100644 (file)
 };
 
 &pcie0 {
-       ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_LOW>;
+       ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_HIGH>;
        num-lanes = <4>;
        pinctrl-names = "default";
        pinctrl-0 = <&pcie_clkreqn_cpm>;
index 6c8c4ab044aaf7368a13acf41a4104b9aef233f9..56abbb08c133b5ca25499b4364159facb94438be 100644 (file)
                regulator-always-on;
                vin-supply = <&vcc_sys>;
        };
-
-       vdd_log: vdd-log {
-               compatible = "pwm-regulator";
-               pwms = <&pwm2 0 25000 0>;
-               regulator-name = "vdd_log";
-               regulator-min-microvolt = <800000>;
-               regulator-max-microvolt = <1400000>;
-               regulator-always-on;
-               regulator-boot-on;
-               vin-supply = <&vcc_sys>;
-       };
-
 };
 
 &cpu_l0 {
index affc3c3093532ebdb06d74326571e822ffe59e9f..8d7b47f9dfbf4e43f15b821c92649b594788103a 100644 (file)
@@ -36,7 +36,7 @@
 
        wkup_uart0: serial@42300000 {
                compatible = "ti,am654-uart";
-               reg = <0x00 0x42300000 0x00 0x100>;
+               reg = <0x42300000 0x100>;
                reg-shift = <2>;
                reg-io-width = <4>;
                interrupts = <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>;
index 3cb995606e605badbc58d977201eb10801ec3fdc..c9a57d11330b85eeb5b965ab67e2e5b8b95e3a46 100644 (file)
@@ -308,6 +308,9 @@ CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
 CONFIG_SERIAL_MVEBU_UART=y
 CONFIG_SERIAL_DEV_BUS=y
 CONFIG_VIRTIO_CONSOLE=y
+CONFIG_IPMI_HANDLER=m
+CONFIG_IPMI_DEVICE_INTERFACE=m
+CONFIG_IPMI_SI=m
 CONFIG_TCG_TPM=y
 CONFIG_TCG_TIS_I2C_INFINEON=y
 CONFIG_I2C_CHARDEV=y
index caa955f10e19509adf568e4a44bc357f2340b562..fac54fb050d00e856823817ed381182d40b39187 100644 (file)
@@ -56,6 +56,19 @@ static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
 {
        return is_compat_task();
 }
+
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+
+static inline bool arch_syscall_match_sym_name(const char *sym,
+                                              const char *name)
+{
+       /*
+        * Since all syscall functions have __arm64_ prefix, we must skip it.
+        * However, as we described above, we decided to ignore compat
+        * syscalls, so we don't care about __arm64_compat_ prefix here.
+        */
+       return !strcmp(sym + 8, name);
+}
 #endif /* ifndef __ASSEMBLY__ */
 
 #endif /* __ASM_FTRACE_H */
index 9234013e759e56a9ebd5c326cab49bd7c66df323..21a81b59a0ccd5419be92ec6e661a3e05e5820ff 100644 (file)
@@ -96,6 +96,7 @@ static inline unsigned long __percpu_##op(void *ptr,                  \
                : [val] "Ir" (val));                                    \
                break;                                                  \
        default:                                                        \
+               ret = 0;                                                \
                BUILD_BUG();                                            \
        }                                                               \
                                                                        \
@@ -125,6 +126,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
                ret = READ_ONCE(*(u64 *)ptr);
                break;
        default:
+               ret = 0;
                BUILD_BUG();
        }
 
@@ -194,6 +196,7 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
                : [val] "r" (val));
                break;
        default:
+               ret = 0;
                BUILD_BUG();
        }
 
index 3e2091708b8e51f04b90e8d6b14c586dd54afeab..6b0d4dff50125e49522212cb7e6db1a778da539d 100644 (file)
 #define KERNEL_DS      UL(-1)
 #define USER_DS                (TASK_SIZE_64 - 1)
 
+/*
+ * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
+ * no point in shifting all network buffers by 2 bytes just to make some IP
+ * header fields appear aligned in memory, potentially sacrificing some DMA
+ * performance on some platforms.
+ */
+#define NET_IP_ALIGN   0
+
 #ifndef __ASSEMBLY__
 #ifdef __KERNEL__
 
index 0c909c4a932ff3da741fbda7c16cf6a3780d6107..842fb9572661063bd0db34b27e14d4829093208d 100644 (file)
                         SCTLR_ELx_SA     | SCTLR_ELx_I    | SCTLR_ELx_WXN | \
                         SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
 
-#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
+#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL
 #error "Inconsistent SCTLR_EL2 set/clear bits"
 #endif
 
                         SCTLR_EL1_UMA | SCTLR_ELx_WXN     | ENDIAN_CLEAR_EL1 |\
                         SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI  | SCTLR_EL1_RES0)
 
-#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
+#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffffUL
 #error "Inconsistent SCTLR_EL1 set/clear bits"
 #endif
 
index c3c0387aee18f2aaa2b1be745eeee6c340da2d5f..5dfd23897dea918f4cb670096934661dd0674a5f 100644 (file)
                   ALTERNATIVE("nop\n                   nop",                  \
                               "dsb ish\n               tlbi " #op,            \
                               ARM64_WORKAROUND_REPEAT_TLBI,                   \
-                              CONFIG_QCOM_FALKOR_ERRATUM_1009)                \
+                              CONFIG_ARM64_WORKAROUND_REPEAT_TLBI)            \
                            : : )
 
 #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n"                           \
                   ALTERNATIVE("nop\n                   nop",                  \
                               "dsb ish\n               tlbi " #op ", %0",     \
                               ARM64_WORKAROUND_REPEAT_TLBI,                   \
-                              CONFIG_QCOM_FALKOR_ERRATUM_1009)                \
+                              CONFIG_ARM64_WORKAROUND_REPEAT_TLBI)            \
                            : : "r" (arg))
 
 #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
index a509e35132d225a4eef28af288969abab47ef9b3..6ad715d67df897ced58d65b7d133e81276d979c2 100644 (file)
@@ -570,6 +570,20 @@ static const struct midr_range arm64_harden_el2_vectors[] = {
 
 #endif
 
+#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
+
+static const struct midr_range arm64_repeat_tlbi_cpus[] = {
+#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
+       MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 0, 0, 0, 0),
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1286807
+       MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+#endif
+       {},
+};
+
+#endif
+
 const struct arm64_cpu_capabilities arm64_errata[] = {
 #if    defined(CONFIG_ARM64_ERRATUM_826319) || \
        defined(CONFIG_ARM64_ERRATUM_827319) || \
@@ -695,11 +709,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                .matches = is_kryo_midr,
        },
 #endif
-#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
+#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
        {
-               .desc = "Qualcomm Technologies Falkor erratum 1009",
+               .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
                .capability = ARM64_WORKAROUND_REPEAT_TLBI,
-               ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
+               ERRATA_MIDR_RANGE_LIST(arm64_repeat_tlbi_cpus),
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_858921
index af50064dea51ad23c7c47cbe4786839fbf8603fa..aec5ecb85737edbe274b4f1c8a9082ac4a6947e1 100644 (file)
@@ -1333,7 +1333,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .cpu_enable = cpu_enable_hw_dbm,
        },
 #endif
-#ifdef CONFIG_ARM64_SSBD
        {
                .desc = "CRC32 instructions",
                .capability = ARM64_HAS_CRC32,
@@ -1343,6 +1342,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
                .min_field_value = 1,
        },
+#ifdef CONFIG_ARM64_SSBD
        {
                .desc = "Speculative Store Bypassing Safe (SSBS)",
                .capability = ARM64_SSBS,
index f46d57c31443062c626e6062f4925d6206ee832b..6b5037ed15b288872d7956f58f576ab4c47424f4 100644 (file)
@@ -58,7 +58,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
 /**
  * elfcorehdr_read - read from ELF core header
  * @buf: buffer where the data is placed
- * @csize: number of bytes to read
+ * @count: number of bytes to read
  * @ppos: address in the memory
  *
  * This function reads @count bytes from elf core header which exists
index 50986e388d2b27e92f6984914af4ce756ea0ee46..57e962290df3a0aee4aaeddbb6a8c9369b5c7931 100644 (file)
@@ -216,8 +216,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
 {
        unsigned long return_hooker = (unsigned long)&return_to_handler;
        unsigned long old;
-       struct ftrace_graph_ent trace;
-       int err;
 
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                return;
@@ -229,18 +227,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
         */
        old = *parent;
 
-       trace.func = self_addr;
-       trace.depth = current->curr_ret_stack + 1;
-
-       /* Only trace if the calling function expects to */
-       if (!ftrace_graph_entry(&trace))
-               return;
-
-       err = ftrace_push_return_trace(old, self_addr, &trace.depth,
-                                      frame_pointer, NULL);
-       if (err == -EBUSY)
-               return;
-       else
+       if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
                *parent = return_hooker;
 }
 
index 6b2686d54411fdc0a92e3d4cda2dd38f5b21d40b..29cdc99688f335075dcfa71ed0b387d9ca548538 100644 (file)
@@ -214,7 +214,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
        }
 
        memcpy((void *)dst, src_start, length);
-       flush_icache_range(dst, dst + length);
+       __flush_icache_range(dst, dst + length);
 
        pgdp = pgd_offset_raw(allocator(mask), dst_addr);
        if (pgd_none(READ_ONCE(*pgdp))) {
index 9b65132e789a5572917b7577244b008793f6ff79..2a5b338b254240c8af9d552632214df144a1215a 100644 (file)
@@ -23,7 +23,9 @@
 #include <linux/slab.h>
 #include <linux/stop_machine.h>
 #include <linux/sched/debug.h>
+#include <linux/set_memory.h>
 #include <linux/stringify.h>
+#include <linux/vmalloc.h>
 #include <asm/traps.h>
 #include <asm/ptrace.h>
 #include <asm/cacheflush.h>
@@ -42,10 +44,21 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 static void __kprobes
 post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
 
+static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
+{
+       void *addrs[1];
+       u32 insns[1];
+
+       addrs[0] = addr;
+       insns[0] = opcode;
+
+       return aarch64_insn_patch_text(addrs, insns, 1);
+}
+
 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
 {
        /* prepare insn slot */
-       p->ainsn.api.insn[0] = cpu_to_le32(p->opcode);
+       patch_text(p->ainsn.api.insn, p->opcode);
 
        flush_icache_range((uintptr_t) (p->ainsn.api.insn),
                           (uintptr_t) (p->ainsn.api.insn) +
@@ -118,15 +131,15 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
        return 0;
 }
 
-static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
+void *alloc_insn_page(void)
 {
-       void *addrs[1];
-       u32 insns[1];
+       void *page;
 
-       addrs[0] = (void *)addr;
-       insns[0] = (u32)opcode;
+       page = vmalloc_exec(PAGE_SIZE);
+       if (page)
+               set_memory_ro((unsigned long)page, 1);
 
-       return aarch64_insn_patch_text(addrs, insns, 1);
+       return page;
 }
 
 /* arm kprobe: install breakpoint in text */
index ce99c58cd1f1d2081355a7f4420072a31b43ca71..d9a4c2d6dd8b8b8031e6b552067690797eed6b6e 100644 (file)
@@ -497,25 +497,3 @@ void arch_setup_new_exec(void)
 {
        current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
 }
-
-#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
-void __used stackleak_check_alloca(unsigned long size)
-{
-       unsigned long stack_left;
-       unsigned long current_sp = current_stack_pointer;
-       struct stack_info info;
-
-       BUG_ON(!on_accessible_stack(current, current_sp, &info));
-
-       stack_left = current_sp - info.low;
-
-       /*
-        * There's a good chance we're almost out of stack space if this
-        * is true. Using panic() over BUG() is more likely to give
-        * reliable debugging output.
-        */
-       if (size >= stack_left)
-               panic("alloca() over the kernel stack boundary\n");
-}
-EXPORT_SYMBOL(stackleak_check_alloca);
-#endif
index 953e316521fcaa34fcbe26a9ca8ca7de6b9f51e9..f4fc1e0544b73c5c3785ee35a027ec6cb60623dd 100644 (file)
@@ -313,6 +313,7 @@ void __init setup_arch(char **cmdline_p)
        arm64_memblock_init();
 
        paging_init();
+       efi_apply_persistent_mem_reservations();
 
        acpi_table_upgrade();
 
index 3a703e5d4e3237f9844d09e871ef1eaa62b781cc..a3ac262848451ae49535c37a6997a211b0f5e914 100644 (file)
@@ -160,6 +160,7 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
        __dma_unmap_area(phys_to_virt(paddr), size, dir);
 }
 
+#ifdef CONFIG_IOMMU_DMA
 static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
                                      struct page *page, size_t size)
 {
@@ -188,6 +189,7 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
 
        return ret;
 }
+#endif /* CONFIG_IOMMU_DMA */
 
 static int __init atomic_pool_init(void)
 {
index 9d9582cac6c40cad483d431682a178c67c445b45..9b432d9fcada8dac8e7b1041437387f29785b2af 100644 (file)
@@ -483,8 +483,6 @@ void __init arm64_memblock_init(void)
        high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
 
        dma_contiguous_reserve(arm64_dma_phys_limit);
-
-       memblock_allow_resize();
 }
 
 void __init bootmem_init(void)
index 394b8d554def4c3372425ed5088ee1116ef9898e..d1d6601b385d9214ceadd17ba51057ff4e023177 100644 (file)
@@ -659,6 +659,8 @@ void __init paging_init(void)
 
        memblock_free(__pa_symbol(init_pg_dir),
                      __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
+
+       memblock_allow_resize();
 }
 
 /*
index a6fdaea07c6339cf2754d582765747ee5d8b2ff5..89198017e8e681268504235331471b38b7e945b9 100644 (file)
@@ -351,7 +351,8 @@ static void build_epilogue(struct jit_ctx *ctx)
  * >0 - successfully JITed a 16-byte eBPF instruction.
  * <0 - failed to JIT.
  */
-static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
+static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+                     bool extra_pass)
 {
        const u8 code = insn->code;
        const u8 dst = bpf2a64[insn->dst_reg];
@@ -625,12 +626,19 @@ emit_cond_jmp:
        case BPF_JMP | BPF_CALL:
        {
                const u8 r0 = bpf2a64[BPF_REG_0];
-               const u64 func = (u64)__bpf_call_base + imm;
+               bool func_addr_fixed;
+               u64 func_addr;
+               int ret;
 
-               if (ctx->prog->is_func)
-                       emit_addr_mov_i64(tmp, func, ctx);
+               ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
+                                           &func_addr, &func_addr_fixed);
+               if (ret < 0)
+                       return ret;
+               if (func_addr_fixed)
+                       /* We can use optimized emission here. */
+                       emit_a64_mov_i64(tmp, func_addr, ctx);
                else
-                       emit_a64_mov_i64(tmp, func, ctx);
+                       emit_addr_mov_i64(tmp, func_addr, ctx);
                emit(A64_BLR(tmp), ctx);
                emit(A64_MOV(1, r0, A64_R(0)), ctx);
                break;
@@ -753,7 +761,7 @@ emit_cond_jmp:
        return 0;
 }
 
-static int build_body(struct jit_ctx *ctx)
+static int build_body(struct jit_ctx *ctx, bool extra_pass)
 {
        const struct bpf_prog *prog = ctx->prog;
        int i;
@@ -762,7 +770,7 @@ static int build_body(struct jit_ctx *ctx)
                const struct bpf_insn *insn = &prog->insnsi[i];
                int ret;
 
-               ret = build_insn(insn, ctx);
+               ret = build_insn(insn, ctx, extra_pass);
                if (ret > 0) {
                        i++;
                        if (ctx->image == NULL)
@@ -858,7 +866,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
        /* 1. Initial fake pass to compute ctx->idx. */
 
        /* Fake pass to fill in ctx->offset. */
-       if (build_body(&ctx)) {
+       if (build_body(&ctx, extra_pass)) {
                prog = orig_prog;
                goto out_off;
        }
@@ -888,7 +896,7 @@ skip_init_ctx:
 
        build_prologue(&ctx, was_classic);
 
-       if (build_body(&ctx)) {
+       if (build_body(&ctx, extra_pass)) {
                bpf_jit_binary_free(header);
                prog = orig_prog;
                goto out_off;
index 48cf6ff9df4a36885db042048ce2aa8761c58a58..22a162cd99e8112b37681afd8934c3c3eb490f99 100644 (file)
@@ -1,9 +1 @@
-menu "C-SKY Debug Options"
-config CSKY_BUILTIN_DTB
-       string "Use kernel builtin dtb"
-       help
-         User could define the dtb instead of the one which is passed from
-         bootloader.
-         Sometimes for debug, we want to use a built-in dtb and then we needn't
-         modify bootloader at all.
-endmenu
+# dummy file, do not delete
index 67a4ae1fba2ba4601f689404e52d5ec3dd901ce1..c639fc167895d7a2f00909bf079e5ea2e6b0558c 100644 (file)
@@ -65,26 +65,15 @@ libs-y += arch/csky/lib/ \
        $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
 
 boot := arch/csky/boot
-ifneq '$(CONFIG_CSKY_BUILTIN_DTB)' '""'
 core-y += $(boot)/dts/
-endif
 
 all: zImage
 
-
-dtbs: scripts
-       $(Q)$(MAKE) $(build)=$(boot)/dts
-
-%.dtb %.dtb.S %.dtb.o: scripts
-       $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
-
-zImage Image uImage: vmlinux dtbs
+zImage Image uImage: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
-       $(Q)$(MAKE) $(clean)=$(boot)/dts
-       rm -rf arch/csky/include/generated
 
 define archhelp
   echo  '* zImage       - Compressed kernel image (arch/$(ARCH)/boot/zImage)'
index 305e81a5e91e1e5a0d622f2f399e3511fe61ca07..c57ad3c880bfb933c227fa32141b57b938be3706 100644 (file)
@@ -1,13 +1,3 @@
 dtstree        := $(srctree)/$(src)
 
-ifneq '$(CONFIG_CSKY_BUILTIN_DTB)' '""'
-builtindtb-y := $(patsubst "%",%,$(CONFIG_CSKY_BUILTIN_DTB))
-dtb-y += $(builtindtb-y).dtb
-obj-y += $(builtindtb-y).dtb.o
-.SECONDARY: $(obj)/$(builtindtb-y).dtb.S
-else
 dtb-y := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
-endif
-
-always += $(dtb-y)
-clean-files += *.dtb *.dtb.S
index c410aa4fff1a19efcf5234835409762dd72163b6..b2905c0485a72a177c00c69fceea5be092624cdb 100644 (file)
@@ -16,7 +16,7 @@
 
 static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel)
 {
-       pgd &= ~(1<<31);
+       pgd -= PAGE_OFFSET;
        pgd += PHYS_OFFSET;
        pgd |= 1;
        setup_pgd(pgd, kernel);
@@ -29,7 +29,7 @@ static inline void tlbmiss_handler_setup_pgd(unsigned long pgd, bool kernel)
 
 static inline unsigned long tlb_get_pgd(void)
 {
-       return ((get_pgd()|(1<<31)) - PHYS_OFFSET) & ~1;
+       return ((get_pgd() - PHYS_OFFSET) & ~1) + PAGE_OFFSET;
 }
 
 #define cpu_context(cpu, mm)   ((mm)->context.asid[cpu])
index ebef7f40aabbe26a5fa384294bf05819f949dd30..c5c253cb9bd63ad85177e3e0f00321385362da7e 100644 (file)
@@ -59,7 +59,9 @@ extern struct node_cpuid_s node_cpuid[NR_CPUS];
  */
 
 extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
-#define node_distance(from,to) (numa_slit[(from) * MAX_NUMNODES + (to)])
+#define slit_distance(from,to) (numa_slit[(from) * MAX_NUMNODES + (to)])
+extern int __node_distance(int from, int to);
+#define node_distance(from,to) __node_distance(from, to)
 
 extern int paddr_to_nid(unsigned long paddr);
 
index 1dacbf5e9e09a5a71f9e21bd5fd88eed8199e823..41eb281709da1c334226fc652d7184888e48d327 100644 (file)
@@ -578,8 +578,8 @@ void __init acpi_numa_fixup(void)
        if (!slit_table) {
                for (i = 0; i < MAX_NUMNODES; i++)
                        for (j = 0; j < MAX_NUMNODES; j++)
-                               node_distance(i, j) = i == j ? LOCAL_DISTANCE :
-                                                       REMOTE_DISTANCE;
+                               slit_distance(i, j) = i == j ?
+                                       LOCAL_DISTANCE : REMOTE_DISTANCE;
                return;
        }
 
@@ -592,7 +592,7 @@ void __init acpi_numa_fixup(void)
                        if (!pxm_bit_test(j))
                                continue;
                        node_to = pxm_to_node(j);
-                       node_distance(node_from, node_to) =
+                       slit_distance(node_from, node_to) =
                            slit_table->entry[i * slit_table->locality_count + j];
                }
        }
index 3861d6e32d5ff910615305ef691b4a30fd1028f2..a03803506b0c041fde49ca23bfae160ba6499bd9 100644 (file)
@@ -36,6 +36,12 @@ struct node_cpuid_s node_cpuid[NR_CPUS] =
  */
 u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES];
 
+int __node_distance(int from, int to)
+{
+       return slit_distance(from, to);
+}
+EXPORT_SYMBOL(__node_distance);
+
 /* Identify which cnode a physical address resides on */
 int
 paddr_to_nid(unsigned long paddr)
index 6181e4134483c26aa1a34d55e4b316ddad98f5f5..fe3ddd73a0ccb9e4fec24425164cc8c6c7f477bc 100644 (file)
  */
 #ifdef CONFIG_SUN3
 #define PTRS_PER_PTE   16
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #define PTRS_PER_PMD   1
 #define PTRS_PER_PGD   2048
 #elif defined(CONFIG_COLDFIRE)
 #define PTRS_PER_PTE   512
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #define PTRS_PER_PMD   1
 #define PTRS_PER_PGD   1024
 #else
index f64ebb9c9a413535c105e3235eb50469d51b5697..e14b6621c933e47e1f87db0114f895b39f5450ef 100644 (file)
@@ -63,7 +63,7 @@ extern int mem_init_done;
 
 #include <asm-generic/4level-fixup.h>
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
index d57563c58a26be43672098d9895d9107c945e8d8..224eea40e1ee805fa15d56f33e16b248fbaec30a 100644 (file)
@@ -22,8 +22,7 @@
 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
 {
        unsigned long old;
-       int faulted, err;
-       struct ftrace_graph_ent trace;
+       int faulted;
        unsigned long return_hooker = (unsigned long)
                                &return_to_handler;
 
@@ -63,18 +62,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
                return;
        }
 
-       err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0, NULL);
-       if (err == -EBUSY) {
+       if (function_graph_enter(old, self_addr, 0, NULL))
                *parent = old;
-               return;
-       }
-
-       trace.func = self_addr;
-       /* Only trace if the calling function expects to */
-       if (!ftrace_graph_entry(&trace)) {
-               current->curr_ret_stack--;
-               *parent = old;
-       }
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
index 15a84cfd07191f95dcaa83744f76feaae3a37af2..68410490e12fdc2497104c3eeb7cdf64a8e84b8e 100644 (file)
@@ -128,7 +128,7 @@ cflags-y += -ffreestanding
 # clang's output will be based upon the build machine. So for clang we simply
 # unconditionally specify -EB or -EL as appropriate.
 #
-ifeq ($(cc-name),clang)
+ifdef CONFIG_CC_IS_CLANG
 cflags-$(CONFIG_CPU_BIG_ENDIAN)                += -EB
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += -EL
 else
index 75108ec669ebc881c6949962ef61f6368c4a814a..6c79e8a16a2681f01cf4ffb0a702a8414499bf7b 100644 (file)
@@ -67,7 +67,7 @@ void (*cvmx_override_pko_queue_priority) (int pko_port,
 void (*cvmx_override_ipd_port_setup) (int ipd_port);
 
 /* Port count per interface */
-static int interface_port_count[5];
+static int interface_port_count[9];
 
 /**
  * Return the number of interfaces the chip has. Each interface
index 490b12af103c1285043ecfec912b1839c5586f06..c52d0efacd1466f0320a025d3519ffb8ba212a09 100644 (file)
@@ -140,6 +140,7 @@ CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_DS1307=y
 CONFIG_STAGING=y
 CONFIG_OCTEON_ETHERNET=y
+CONFIG_OCTEON_USB=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_RAS=y
 CONFIG_EXT4_FS=y
index 0170602a1e4e3f920b0b3834e0df26203d81c5a5..6cf8ffb5367ec3fb725aac26c701d0ae5d81923c 100644 (file)
@@ -73,7 +73,7 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
 #ifdef CONFIG_64BIT
        case 4: case 5: case 6: case 7:
 #ifdef CONFIG_MIPS32_O32
-               if (test_thread_flag(TIF_32BIT_REGS))
+               if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
                        return get_user(*arg, (int *)usp + n);
                else
 #endif
index 7f3dfdbc3657e6705b6a797c6e1dfa565fa3bff9..b122cbb4aad184c5dddd56d4990c7924ed9ad563 100644 (file)
@@ -322,7 +322,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
                           unsigned long fp)
 {
        unsigned long old_parent_ra;
-       struct ftrace_graph_ent trace;
        unsigned long return_hooker = (unsigned long)
            &return_to_handler;
        int faulted, insns;
@@ -369,12 +368,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
        if (unlikely(faulted))
                goto out;
 
-       if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp,
-                                    NULL) == -EBUSY) {
-               *parent_ra_addr = old_parent_ra;
-               return;
-       }
-
        /*
         * Get the recorded ip of the current mcount calling site in the
         * __mcount_loc section, which will be used to filter the function
@@ -382,13 +375,10 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
         */
 
        insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
-       trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
+       self_ra -= (MCOUNT_INSN_SIZE * insns);
 
-       /* Only trace if the calling function expects to */
-       if (!ftrace_graph_entry(&trace)) {
-               current->curr_ret_stack--;
+       if (function_graph_enter(old_parent_ra, self_ra, fp, NULL))
                *parent_ra_addr = old_parent_ra;
-       }
        return;
 out:
        ftrace_graph_stop();
index ea09ed6a80a9f2dc0aa625e2fcec0b34e2c6acea..8c6c48ed786a1527c22ba5b46bcdad70029e5865 100644 (file)
@@ -794,6 +794,7 @@ static void __init arch_mem_init(char **cmdline_p)
 
        /* call board setup routine */
        plat_mem_setup();
+       memblock_set_bottom_up(true);
 
        /*
         * Make sure all kernel memory is in the maps.  The "UP" and
index 0f852e1b589193d43f9125cfbfa303d6b47c6fed..15e103c6d799ebd90ad3f55a954c1d40329d5bac 100644 (file)
@@ -2260,10 +2260,8 @@ void __init trap_init(void)
                unsigned long size = 0x200 + VECTORSPACING*64;
                phys_addr_t ebase_pa;
 
-               memblock_set_bottom_up(true);
                ebase = (unsigned long)
                        memblock_alloc_from(size, 1 << fls(size), 0);
-               memblock_set_bottom_up(false);
 
                /*
                 * Try to ensure ebase resides in KSeg0 if possible.
@@ -2307,6 +2305,7 @@ void __init trap_init(void)
        if (board_ebase_setup)
                board_ebase_setup();
        per_cpu_trap_init(true);
+       memblock_set_bottom_up(false);
 
        /*
         * Copy the generic exception handlers to their final destination.
index 622761878cd11bd54e53affa43e5504c0aadc9c0..60bf0a1cb75719d731b60f8b2fe2954074dea77b 100644 (file)
@@ -231,6 +231,8 @@ static __init void prom_meminit(void)
                        cpumask_clear(&__node_data[(node)]->cpumask);
                }
        }
+       max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
+
        for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
                node = cpu / loongson_sysconf.cores_per_node;
                if (node >= num_online_nodes())
@@ -248,19 +250,9 @@ static __init void prom_meminit(void)
 
 void __init paging_init(void)
 {
-       unsigned node;
        unsigned long zones_size[MAX_NR_ZONES] = {0, };
 
        pagetable_init();
-
-       for_each_online_node(node) {
-               unsigned long  start_pfn, end_pfn;
-
-               get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
-
-               if (end_pfn > max_low_pfn)
-                       max_low_pfn = end_pfn;
-       }
 #ifdef CONFIG_ZONE_DMA32
        zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
 #endif
index e6c9485cadcffc7e0ecba01326ca3b777363edb4..cb38461391cb78c714535d2536b5cb4eed1bddad 100644 (file)
@@ -50,7 +50,7 @@ void *arch_dma_alloc(struct device *dev, size_t size,
        void *ret;
 
        ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
-       if (!ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
+       if (ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
                dma_cache_wback_inv((unsigned long) ret, size);
                ret = (void *)UNCAC_ADDR(ret);
        }
index 41b71c4352c25216095d9c197e391796be10be85..c1ce6f43642bc8ee93b189bbaa86dec9cf1d9cb3 100644 (file)
@@ -84,7 +84,7 @@ static struct rt2880_pmx_func pcie_rst_grp[] = {
 };
 static struct rt2880_pmx_func nd_sd_grp[] = {
        FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15),
-       FUNC("sd", MT7620_GPIO_MODE_SD, 45, 15)
+       FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13)
 };
 
 static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
index d8b8444d679527e3843ea1de091be70926355f34..813d13f92957ed00715ac5914d62dc09bcba1861 100644 (file)
@@ -435,6 +435,7 @@ void __init prom_meminit(void)
 
        mlreset();
        szmem();
+       max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
 
        for (node = 0; node < MAX_COMPACT_NODES; node++) {
                if (node_online(node)) {
@@ -455,18 +456,8 @@ extern void setup_zero_pages(void);
 void __init paging_init(void)
 {
        unsigned long zones_size[MAX_NR_ZONES] = {0, };
-       unsigned node;
 
        pagetable_init();
-
-       for_each_online_node(node) {
-               unsigned long start_pfn, end_pfn;
-
-               get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
-
-               if (end_pfn > max_low_pfn)
-                       max_low_pfn = end_pfn;
-       }
        zones_size[ZONE_NORMAL] = max_low_pfn;
        free_area_init_nodes(zones_size);
 }
index 34605ca214984c7257507fa2fb5925eb48bd9f92..58a0315ad743d5bcf965814fd17431f3f2f2bf03 100644 (file)
@@ -10,7 +10,7 @@ ccflags-vdso := \
        $(filter -march=%,$(KBUILD_CFLAGS)) \
        -D__VDSO__
 
-ifeq ($(cc-name),clang)
+ifdef CONFIG_CC_IS_CLANG
 ccflags-vdso += $(filter --target=%,$(KBUILD_CFLAGS))
 endif
 
index d3e19a55cf530046795f7c2836fbc13dc3b823fb..9f52db930c004ecc5c6de013721e06d7b4bf52a3 100644 (file)
@@ -4,7 +4,7 @@
 #ifndef _ASMNDS32_PGTABLE_H
 #define _ASMNDS32_PGTABLE_H
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #include <asm-generic/4level-fixup.h>
 #include <asm-generic/sizes.h>
 
index a0a9679ad5dee8a9d08810556cc204dedd127c08..8a41372551ff3cbca4bcf0a94e1bea400dcb4c01 100644 (file)
@@ -211,29 +211,15 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
                           unsigned long frame_pointer)
 {
        unsigned long return_hooker = (unsigned long)&return_to_handler;
-       struct ftrace_graph_ent trace;
        unsigned long old;
-       int err;
 
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                return;
 
        old = *parent;
 
-       trace.func = self_addr;
-       trace.depth = current->curr_ret_stack + 1;
-
-       /* Only trace if the calling function expects to */
-       if (!ftrace_graph_entry(&trace))
-               return;
-
-       err = ftrace_push_return_trace(old, self_addr, &trace.depth,
-                                      frame_pointer, NULL);
-
-       if (err == -EBUSY)
-               return;
-
-       *parent = return_hooker;
+       if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
+               *parent = return_hooker;
 }
 
 noinline void ftrace_graph_caller(void)
index d047a09d660f003de3b3059984de4d9974ae646d..1085385e1f06a433ccf840fae55b8f76f613e97a 100644 (file)
@@ -71,6 +71,13 @@ ifdef CONFIG_MLONGCALLS
 KBUILD_CFLAGS_KERNEL += -mlong-calls
 endif
 
+# Without this, "ld -r" results in .text sections that are too big (> 0x40000)
+# for branches to reach stubs. And multiple .text sections trigger a warning
+# when creating the sysfs module information section.
+ifndef CONFIG_64BIT
+KBUILD_CFLAGS_MODULE += -ffunction-sections
+endif
+
 # select which processor to optimise for
 cflags-$(CONFIG_PA7000)                += -march=1.1 -mschedule=7100
 cflags-$(CONFIG_PA7200)                += -march=1.1 -mschedule=7200
index b941ac7d4e70b35181351565136a9c25e7ee66f0..c7bb74e22436079de3d9f6153e98fe47cf8a9df4 100644 (file)
@@ -111,7 +111,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
 #if CONFIG_PGTABLE_LEVELS == 3
 #define BITS_PER_PMD   (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
 #else
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 #define BITS_PER_PMD   0
 #endif
 #define PTRS_PER_PMD    (1UL << BITS_PER_PMD)
index 16aec9ba2580a6dd3b3b9bfe03d29099815d2412..8a63515f03bfe3931930d094a479060815832fe6 100644 (file)
@@ -37,8 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
        volatile unsigned int *a;
 
        a = __ldcw_align(x);
-       /* Release with ordered store. */
-       __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
+       mb();
+       *a = 1;
 }
 
 static inline int arch_spin_trylock(arch_spinlock_t *x)
index 6fa8535d3cceb55de7ecf1051fcb2c812dcb1f51..e46a4157a8948862697755439496a16e5acb29f4 100644 (file)
@@ -30,7 +30,6 @@ static void __hot prepare_ftrace_return(unsigned long *parent,
                                        unsigned long self_addr)
 {
        unsigned long old;
-       struct ftrace_graph_ent trace;
        extern int parisc_return_to_handler;
 
        if (unlikely(ftrace_graph_is_dead()))
@@ -41,19 +40,9 @@ static void __hot prepare_ftrace_return(unsigned long *parent,
 
        old = *parent;
 
-       trace.func = self_addr;
-       trace.depth = current->curr_ret_stack + 1;
-
-       /* Only trace if the calling function expects to */
-       if (!ftrace_graph_entry(&trace))
-               return;
-
-        if (ftrace_push_return_trace(old, self_addr, &trace.depth,
-                                    0, NULL) == -EBUSY)
-                return;
-
-       /* activate parisc_return_to_handler() as return point */
-       *parent = (unsigned long) &parisc_return_to_handler;
+       if (!function_graph_enter(old, self_addr, 0, NULL))
+               /* activate parisc_return_to_handler() as return point */
+               *parent = (unsigned long) &parisc_return_to_handler;
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
index 9505c317818df77cb1e67ea39fa3b43110d32d16..a9bc90dc4ae75e4e0489a297ef64c645a9cc7557 100644 (file)
@@ -640,7 +640,8 @@ cas_action:
        sub,<>  %r28, %r25, %r0
 2:     stw     %r24, 0(%r26)
        /* Free lock */
-       stw,ma  %r20, 0(%sr2,%r20)
+       sync
+       stw     %r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
        /* Clear thread register indicator */
        stw     %r0, 4(%sr2,%r20)
@@ -654,7 +655,8 @@ cas_action:
 3:             
        /* Error occurred on load or store */
        /* Free lock */
-       stw,ma  %r20, 0(%sr2,%r20)
+       sync
+       stw     %r20, 0(%sr2,%r20)
 #if ENABLE_LWS_DEBUG
        stw     %r0, 4(%sr2,%r20)
 #endif
@@ -855,7 +857,8 @@ cas2_action:
 
 cas2_end:
        /* Free lock */
-       stw,ma  %r20, 0(%sr2,%r20)
+       sync
+       stw     %r20, 0(%sr2,%r20)
        /* Enable interrupts */
        ssm     PSW_SM_I, %r0
        /* Return to userspace, set no error */
@@ -865,7 +868,8 @@ cas2_end:
 22:
        /* Error occurred on load or store */
        /* Free lock */
-       stw,ma  %r20, 0(%sr2,%r20)
+       sync
+       stw     %r20, 0(%sr2,%r20)
        ssm     PSW_SM_I, %r0
        ldo     1(%r0),%r28
        b       lws_exit
index 2d51b2bd4aa132992f2f28be908b0571a79ae4d3..8be31261aec83190a65927854d214d613244c164 100644 (file)
@@ -930,10 +930,6 @@ config FSL_GTM
        help
          Freescale General-purpose Timers support
 
-# Yes MCA RS/6000s exist but Linux-PPC does not currently support any
-config MCA
-       bool
-
 # Platforms that what PCI turned unconditionally just do select PCI
 # in their config node.  Platforms that want to choose at config
 # time should select PPC_PCI_CHOICE
@@ -944,7 +940,6 @@ config PCI
        bool "PCI support" if PPC_PCI_CHOICE
        default y if !40x && !CPM2 && !PPC_8xx && !PPC_83xx \
                && !PPC_85xx && !PPC_86xx && !GAMECUBE_COMMON
-       default PCI_QSPAN if PPC_8xx
        select GENERIC_PCI_IOMAP
        help
          Find out whether your system includes a PCI bus. PCI is the name of
@@ -958,14 +953,6 @@ config PCI_DOMAINS
 config PCI_SYSCALL
        def_bool PCI
 
-config PCI_QSPAN
-       bool "QSpan PCI"
-       depends on PPC_8xx
-       select PPC_I8259
-       help
-         Say Y here if you have a system based on a Motorola 8xx-series
-         embedded processor with a QSPAN PCI interface, otherwise say N.
-
 config PCI_8260
        bool
        depends on PCI && 8260
index 17be664dafa2f2166d12025c8ee536d49c593b14..8a2ce14d68d077b2d62771070b40ac931d8a975a 100644 (file)
@@ -96,7 +96,7 @@ aflags-$(CONFIG_CPU_BIG_ENDIAN)               += $(call cc-option,-mabi=elfv1)
 aflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += -mabi=elfv2
 endif
 
-ifneq ($(cc-name),clang)
+ifndef CONFIG_CC_IS_CLANG
   cflags-$(CONFIG_CPU_LITTLE_ENDIAN)   += -mno-strict-align
 endif
 
@@ -175,7 +175,7 @@ endif
 # Work around gcc code-gen bugs with -pg / -fno-omit-frame-pointer in gcc <= 4.8
 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44199
 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52828
-ifneq ($(cc-name),clang)
+ifndef CONFIG_CC_IS_CLANG
 CC_FLAGS_FTRACE        += $(call cc-ifversion, -lt, 0409, -mno-sched-epilog)
 endif
 endif
index 55c0210a771d1f6e45bdd85a174d31efc2c99acd..092a400740f84ecfe11344fae29e7fb0927477d0 100644 (file)
                };
 
                ethernet@f0000 {
-                       phy-handle = <&xg_cs4315_phy1>;
+                       phy-handle = <&xg_cs4315_phy2>;
                        phy-connection-type = "xgmii";
                };
 
                ethernet@f2000 {
-                       phy-handle = <&xg_cs4315_phy2>;
+                       phy-handle = <&xg_cs4315_phy1>;
                        phy-connection-type = "xgmii";
                };
 
index 5b037f51741df177cfb26ea11dd1a1e9bb093290..3aa300afbbca4a4c3e07b4079a3b4b43d1ff7def 100644 (file)
@@ -72,7 +72,7 @@
                #address-cells = <1>;
                #size-cells = <1>;
                device_type = "soc";
-               ranges = <0x0 0xff000000 0x4000>;
+               ranges = <0x0 0xff000000 0x28000>;
                bus-frequency = <0>;
 
                // Temporary -- will go away once kernel uses ranges for get_immrbase().
                                #size-cells = <0>;
                        };
                };
+
+               crypto@20000 {
+                       compatible = "fsl,sec1.2", "fsl,sec1.0";
+                       reg = <0x20000 0x8000>;
+                       interrupts = <1 1>;
+                       interrupt-parent = <&PIC>;
+                       fsl,num-channels = <1>;
+                       fsl,channel-fifo-len = <24>;
+                       fsl,exec-units-mask = <0x4c>;
+                       fsl,descriptor-types-mask = <0x05000154>;
+               };
        };
 
        chosen {
index 31733a95bbd052bda1038f8160b127181d3cffc2..3d5acd2b113a2d64b12f5f8a1487483c2cc24159 100644 (file)
@@ -36,6 +36,11 @@ int raw_patch_instruction(unsigned int *addr, unsigned int instr);
 int patch_instruction_site(s32 *addr, unsigned int instr);
 int patch_branch_site(s32 *site, unsigned long target, int flags);
 
+static inline unsigned long patch_site_addr(s32 *site)
+{
+       return (unsigned long)site + *site;
+}
+
 int instr_is_relative_branch(unsigned int instr);
 int instr_is_relative_link_branch(unsigned int instr);
 int instr_is_branch_to_addr(const unsigned int *instr, unsigned long addr);
index 3ef40b703c4ab86e7daf3982ae4781de8988b32f..e746becd9d6ff29c65ab0109fb82dd945a046f6d 100644 (file)
@@ -268,19 +268,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
  * their hooks, a bitfield is reserved for use by the platform near the
  * top of MMIO addresses (not PIO, those have to cope the hard way).
  *
- * This bit field is 12 bits and is at the top of the IO virtual
- * addresses PCI_IO_INDIRECT_TOKEN_MASK.
+ * The highest address in the kernel virtual space are:
  *
- * The kernel virtual space is thus:
+ *  d0003fffffffffff   # with Hash MMU
+ *  c00fffffffffffff   # with Radix MMU
  *
- *  0xD000000000000000         : vmalloc
- *  0xD000080000000000         : PCI PHB IO space
- *  0xD000080080000000         : ioremap
- *  0xD0000fffffffffff         : end of ioremap region
- *
- * Since the top 4 bits are reserved as the region ID, we use thus
- * the next 12 bits and keep 4 bits available for the future if the
- * virtual address space is ever to be extended.
+ * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits
+ * that can be used for the field.
  *
  * The direct IO mapping operations will then mask off those bits
  * before doing the actual access, though that only happen when
@@ -292,8 +286,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
  */
 
 #ifdef CONFIG_PPC_INDIRECT_MMIO
-#define PCI_IO_IND_TOKEN_MASK  0x0fff000000000000ul
-#define PCI_IO_IND_TOKEN_SHIFT 48
+#define PCI_IO_IND_TOKEN_SHIFT 52
+#define PCI_IO_IND_TOKEN_MASK  (0xfful << PCI_IO_IND_TOKEN_SHIFT)
 #define PCI_FIX_ADDR(addr)                                             \
        ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
 #define PCI_GET_ADDR_TOKEN(addr)                                       \
index 4f547752ae79595086c9ad55a44281ea68b52505..fa05aa566ece422971533399c8921ada1af35aec 100644 (file)
  * respectively NA for All or X for Supervisor and no access for User.
  * Then we use the APG to say whether accesses are according to Page rules or
  * "all Supervisor" rules (Access to all)
- * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP:
- * When that bit is not set access is done iaw "all user"
- * which means no access iaw page rules.
- * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED
- * 0x => No access => 11 (all accesses performed as user iaw page definition)
- * 10 => No user => 01 (all accesses performed according to page definition)
- * 11 => User => 00 (all accesses performed as supervisor iaw page definition)
+ * Therefore, we define 2 APG groups. lsb is _PMD_USER
+ * 0 => No user => 01 (all accesses performed according to page definition)
+ * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
  * We define all 16 groups so that all other bits of APG can take any value
  */
-#ifdef CONFIG_SWAP
-#define MI_APG_INIT    0xf4f4f4f4
-#else
 #define MI_APG_INIT    0x44444444
-#endif
 
 /* The effective page number register.  When read, contains the information
  * about the last instruction TLB miss.  When MI_RPN is written, bits in
  * Supervisor and no access for user and NA for ALL.
  * Then we use the APG to say whether accesses are according to Page rules or
  * "all Supervisor" rules (Access to all)
- * We also use the 2nd APG bit for _PAGE_ACCESSED when having SWAP:
- * When that bit is not set access is done iaw "all user"
- * which means no access iaw page rules.
- * Therefore, we define 4 APG groups. lsb is _PMD_USER, 2nd is _PAGE_ACCESSED
- * 0x => No access => 11 (all accesses performed as user iaw page definition)
- * 10 => No user => 01 (all accesses performed according to page definition)
- * 11 => User => 00 (all accesses performed as supervisor iaw page definition)
+ * Therefore, we define 2 APG groups. lsb is _PMD_USER
+ * 0 => No user => 01 (all accesses performed according to page definition)
+ * 1 => User => 00 (all accesses performed as supervisor iaw page definition)
  * We define all 16 groups so that all other bits of APG can take any value
  */
-#ifdef CONFIG_SWAP
-#define MD_APG_INIT    0xf4f4f4f4
-#else
 #define MD_APG_INIT    0x44444444
-#endif
 
 /* The effective page number register.  When read, contains the information
  * about the last instruction TLB miss.  When MD_RPN is written, bits in
  */
 #define SPRN_M_TW      799
 
-/* APGs */
-#define M_APG0         0x00000000
-#define M_APG1         0x00000020
-#define M_APG2         0x00000040
-#define M_APG3         0x00000060
-
 #ifdef CONFIG_PPC_MM_SLICES
 #include <asm/nohash/32/slice.h>
 #define SLICE_ARRAY_SIZE       (1 << (32 - SLICE_LOW_SHIFT - 1))
@@ -251,6 +229,15 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
        BUG();
 }
 
+/* patch sites */
+extern s32 patch__itlbmiss_linmem_top;
+extern s32 patch__dtlbmiss_linmem_top, patch__dtlbmiss_immr_jmp;
+extern s32 patch__fixupdar_linmem_top;
+
+extern s32 patch__itlbmiss_exit_1, patch__itlbmiss_exit_2;
+extern s32 patch__dtlbmiss_exit_1, patch__dtlbmiss_exit_2, patch__dtlbmiss_exit_3;
+extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
+
 #endif /* !__ASSEMBLY__ */
 
 #if defined(CONFIG_PPC_4K_PAGES)
index 6093bc8f74e518bf225c014c25521c8a515ba013..a6e9e314c7077044c0bb58590c95dddce4be8ed1 100644 (file)
                                        __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
 #define PPC_SLBFEE_DOT(t, b)   stringify_in_c(.long PPC_INST_SLBFEE | \
                                        __PPC_RT(t) | __PPC_RB(b))
+#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE |  \
+                                              ___PPC_RT(t) | ___PPC_RB(b))
 #define PPC_ICBT(c,a,b)                stringify_in_c(.long PPC_INST_ICBT | \
                                       __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
 /* PASemi instructions */
index f73886a1a7f51714da637c0f9c81a8dfd1107b7f..0b8a735b6d85f08512143b539c5ee5329598c48c 100644 (file)
@@ -54,6 +54,7 @@ struct pt_regs
 
 #ifdef CONFIG_PPC64
        unsigned long ppr;
+       unsigned long __pad;    /* Maintain 16 byte interrupt stack alignment */
 #endif
 };
 #endif
index bb38dd67d47ddba7d730eb57b5d0cf1aac30093a..1b06add4f092adb5811be46bc4b6011e5537c591 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/spinlock.h>
 #include <asm/page.h>
 #include <linux/time.h>
+#include <linux/cpumask.h>
 
 /*
  * Definitions for talking to the RTAS on CHRP machines.
index 134a573a9f2d0c61f7bf0d789826b6bdccb989f0..3b67b9533c82fe1cfee4f279cdfd1a3897f873d7 100644 (file)
@@ -31,6 +31,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/ptrace.h>
 #include <asm/export.h>
+#include <asm/code-patching-asm.h>
 
 #if CONFIG_TASK_SIZE <= 0x80000000 && CONFIG_PAGE_OFFSET >= 0x80000000
 /* By simply checking Address >= 0x80000000, we know if its a kernel address */
@@ -318,8 +319,8 @@ InstructionTLBMiss:
        cmpli   cr0, r11, PAGE_OFFSET@h
 #ifndef CONFIG_PIN_TLB_TEXT
        /* It is assumed that kernel code fits into the first 8M page */
-_ENTRY(ITLBMiss_cmp)
-       cmpli   cr7, r11, (PAGE_OFFSET + 0x0800000)@h
+0:     cmpli   cr7, r11, (PAGE_OFFSET + 0x0800000)@h
+       patch_site      0b, patch__itlbmiss_linmem_top
 #endif
 #endif
 #endif
@@ -353,13 +354,14 @@ _ENTRY(ITLBMiss_cmp)
 #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
        mtcr    r12
 #endif
-
-#ifdef CONFIG_SWAP
-       rlwinm  r11, r10, 31, _PAGE_ACCESSED >> 1
-#endif
        /* Load the MI_TWC with the attributes for this "segment." */
        mtspr   SPRN_MI_TWC, r11        /* Set segment attributes */
 
+#ifdef CONFIG_SWAP
+       rlwinm  r11, r10, 32-5, _PAGE_PRESENT
+       and     r11, r11, r10
+       rlwimi  r10, r11, 0, _PAGE_PRESENT
+#endif
        li      r11, RPN_PATTERN | 0x200
        /* The Linux PTE won't go exactly into the MMU TLB.
         * Software indicator bits 20 and 23 must be clear.
@@ -372,16 +374,17 @@ _ENTRY(ITLBMiss_cmp)
        mtspr   SPRN_MI_RPN, r10        /* Update TLB entry */
 
        /* Restore registers */
-_ENTRY(itlb_miss_exit_1)
-       mfspr   r10, SPRN_SPRG_SCRATCH0
+0:     mfspr   r10, SPRN_SPRG_SCRATCH0
        mfspr   r11, SPRN_SPRG_SCRATCH1
 #if defined(ITLB_MISS_KERNEL) || defined(CONFIG_HUGETLB_PAGE)
        mfspr   r12, SPRN_SPRG_SCRATCH2
 #endif
        rfi
+       patch_site      0b, patch__itlbmiss_exit_1
+
 #ifdef CONFIG_PERF_EVENTS
-_ENTRY(itlb_miss_perf)
-       lis     r10, (itlb_miss_counter - PAGE_OFFSET)@ha
+       patch_site      0f, patch__itlbmiss_perf
+0:     lis     r10, (itlb_miss_counter - PAGE_OFFSET)@ha
        lwz     r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
        addi    r11, r11, 1
        stw     r11, (itlb_miss_counter - PAGE_OFFSET)@l(r10)
@@ -435,11 +438,11 @@ DataStoreTLBMiss:
 #ifndef CONFIG_PIN_TLB_IMMR
        cmpli   cr0, r11, VIRT_IMMR_BASE@h
 #endif
-_ENTRY(DTLBMiss_cmp)
-       cmpli   cr7, r11, (PAGE_OFFSET + 0x1800000)@h
+0:     cmpli   cr7, r11, (PAGE_OFFSET + 0x1800000)@h
+       patch_site      0b, patch__dtlbmiss_linmem_top
 #ifndef CONFIG_PIN_TLB_IMMR
-_ENTRY(DTLBMiss_jmp)
-       beq-    DTLBMissIMMR
+0:     beq-    DTLBMissIMMR
+       patch_site      0b, patch__dtlbmiss_immr_jmp
 #endif
        blt     cr7, DTLBMissLinear
        lis     r11, (swapper_pg_dir-PAGE_OFFSET)@ha
@@ -470,14 +473,22 @@ _ENTRY(DTLBMiss_jmp)
         * above.
         */
        rlwimi  r11, r10, 0, _PAGE_GUARDED
-#ifdef CONFIG_SWAP
-       /* _PAGE_ACCESSED has to be set. We use second APG bit for that, 0
-        * on that bit will represent a Non Access group
-        */
-       rlwinm  r11, r10, 31, _PAGE_ACCESSED >> 1
-#endif
        mtspr   SPRN_MD_TWC, r11
 
+       /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
+        * We also need to know if the insn is a load/store, so:
+        * Clear _PAGE_PRESENT and load that which will
+        * trap into DTLB Error with store bit set accordinly.
+        */
+       /* PRESENT=0x1, ACCESSED=0x20
+        * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
+        * r10 = (r10 & ~PRESENT) | r11;
+        */
+#ifdef CONFIG_SWAP
+       rlwinm  r11, r10, 32-5, _PAGE_PRESENT
+       and     r11, r11, r10
+       rlwimi  r10, r11, 0, _PAGE_PRESENT
+#endif
        /* The Linux PTE won't go exactly into the MMU TLB.
         * Software indicator bits 24, 25, 26, and 27 must be
         * set.  All other Linux PTE bits control the behavior
@@ -489,14 +500,16 @@ _ENTRY(DTLBMiss_jmp)
 
        /* Restore registers */
        mtspr   SPRN_DAR, r11   /* Tag DAR */
-_ENTRY(dtlb_miss_exit_1)
-       mfspr   r10, SPRN_SPRG_SCRATCH0
+
+0:     mfspr   r10, SPRN_SPRG_SCRATCH0
        mfspr   r11, SPRN_SPRG_SCRATCH1
        mfspr   r12, SPRN_SPRG_SCRATCH2
        rfi
+       patch_site      0b, patch__dtlbmiss_exit_1
+
 #ifdef CONFIG_PERF_EVENTS
-_ENTRY(dtlb_miss_perf)
-       lis     r10, (dtlb_miss_counter - PAGE_OFFSET)@ha
+       patch_site      0f, patch__dtlbmiss_perf
+0:     lis     r10, (dtlb_miss_counter - PAGE_OFFSET)@ha
        lwz     r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
        addi    r11, r11, 1
        stw     r11, (dtlb_miss_counter - PAGE_OFFSET)@l(r10)
@@ -637,8 +650,8 @@ InstructionBreakpoint:
  */
 DTLBMissIMMR:
        mtcr    r12
-       /* Set 512k byte guarded page and mark it valid and accessed */
-       li      r10, MD_PS512K | MD_GUARDED | MD_SVALID | M_APG2
+       /* Set 512k byte guarded page and mark it valid */
+       li      r10, MD_PS512K | MD_GUARDED | MD_SVALID
        mtspr   SPRN_MD_TWC, r10
        mfspr   r10, SPRN_IMMR                  /* Get current IMMR */
        rlwinm  r10, r10, 0, 0xfff80000         /* Get 512 kbytes boundary */
@@ -648,16 +661,17 @@ DTLBMissIMMR:
 
        li      r11, RPN_PATTERN
        mtspr   SPRN_DAR, r11   /* Tag DAR */
-_ENTRY(dtlb_miss_exit_2)
-       mfspr   r10, SPRN_SPRG_SCRATCH0
+
+0:     mfspr   r10, SPRN_SPRG_SCRATCH0
        mfspr   r11, SPRN_SPRG_SCRATCH1
        mfspr   r12, SPRN_SPRG_SCRATCH2
        rfi
+       patch_site      0b, patch__dtlbmiss_exit_2
 
 DTLBMissLinear:
        mtcr    r12
-       /* Set 8M byte page and mark it valid and accessed */
-       li      r11, MD_PS8MEG | MD_SVALID | M_APG2
+       /* Set 8M byte page and mark it valid */
+       li      r11, MD_PS8MEG | MD_SVALID
        mtspr   SPRN_MD_TWC, r11
        rlwinm  r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
        ori     r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
@@ -666,28 +680,29 @@ DTLBMissLinear:
 
        li      r11, RPN_PATTERN
        mtspr   SPRN_DAR, r11   /* Tag DAR */
-_ENTRY(dtlb_miss_exit_3)
-       mfspr   r10, SPRN_SPRG_SCRATCH0
+
+0:     mfspr   r10, SPRN_SPRG_SCRATCH0
        mfspr   r11, SPRN_SPRG_SCRATCH1
        mfspr   r12, SPRN_SPRG_SCRATCH2
        rfi
+       patch_site      0b, patch__dtlbmiss_exit_3
 
 #ifndef CONFIG_PIN_TLB_TEXT
 ITLBMissLinear:
        mtcr    r12
-       /* Set 8M byte page and mark it valid,accessed */
-       li      r11, MI_PS8MEG | MI_SVALID | M_APG2
+       /* Set 8M byte page and mark it valid */
+       li      r11, MI_PS8MEG | MI_SVALID
        mtspr   SPRN_MI_TWC, r11
        rlwinm  r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
        ori     r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
                          _PAGE_PRESENT
        mtspr   SPRN_MI_RPN, r10        /* Update TLB entry */
 
-_ENTRY(itlb_miss_exit_2)
-       mfspr   r10, SPRN_SPRG_SCRATCH0
+0:     mfspr   r10, SPRN_SPRG_SCRATCH0
        mfspr   r11, SPRN_SPRG_SCRATCH1
        mfspr   r12, SPRN_SPRG_SCRATCH2
        rfi
+       patch_site      0b, patch__itlbmiss_exit_2
 #endif
 
 /* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions
@@ -705,8 +720,10 @@ FixupDAR:/* Entry point for dcbx workaround. */
        mfspr   r11, SPRN_M_TW  /* Get level 1 table */
        blt+    3f
        rlwinm  r11, r10, 16, 0xfff8
-_ENTRY(FixupDAR_cmp)
-       cmpli   cr7, r11, (PAGE_OFFSET + 0x1800000)@h
+
+0:     cmpli   cr7, r11, (PAGE_OFFSET + 0x1800000)@h
+       patch_site      0b, patch__fixupdar_linmem_top
+
        /* create physical page address from effective address */
        tophys(r11, r10)
        blt-    cr7, 201f
@@ -960,7 +977,7 @@ initial_mmu:
        ori     r8, r8, MI_EVALID       /* Mark it valid */
        mtspr   SPRN_MI_EPN, r8
        li      r8, MI_PS8MEG /* Set 8M byte page */
-       ori     r8, r8, MI_SVALID | M_APG2      /* Make it valid, APG 2 */
+       ori     r8, r8, MI_SVALID       /* Make it valid */
        mtspr   SPRN_MI_TWC, r8
        li      r8, MI_BOOTINIT         /* Create RPN for address 0 */
        mtspr   SPRN_MI_RPN, r8         /* Store TLB entry */
@@ -987,7 +1004,7 @@ initial_mmu:
        ori     r8, r8, MD_EVALID       /* Mark it valid */
        mtspr   SPRN_MD_EPN, r8
        li      r8, MD_PS512K | MD_GUARDED      /* Set 512k byte page */
-       ori     r8, r8, MD_SVALID | M_APG2      /* Make it valid and accessed */
+       ori     r8, r8, MD_SVALID       /* Make it valid */
        mtspr   SPRN_MD_TWC, r8
        mr      r8, r9                  /* Create paddr for TLB */
        ori     r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
index 4d5322cfad25c7b543cc2e80c1f355f2d75030fc..96f34730010fe3f5f778400a14a7a470d4d38142 100644 (file)
@@ -590,12 +590,11 @@ void flush_all_to_thread(struct task_struct *tsk)
        if (tsk->thread.regs) {
                preempt_disable();
                BUG_ON(tsk != current);
-               save_all(tsk);
-
 #ifdef CONFIG_SPE
                if (tsk->thread.regs->msr & MSR_SPE)
                        tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
 #endif
+               save_all(tsk);
 
                preempt_enable();
        }
index 2a51e4cc8246d35d18d8ddd54b258af02dda47c4..236c1151a3a77057013313ed5da588673f5f3419 100644 (file)
@@ -636,6 +636,8 @@ static void *__init alloc_stack(unsigned long limit, int cpu)
 {
        unsigned long pa;
 
+       BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
+
        pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
                                        early_cpu_to_node(cpu), MEMBLOCK_NONE);
        if (!pa) {
index 4bf051d3e21e70a61f51136c63c22c1c03c9faae..b65c8a34ad6efb718da71a2556e2db9db6bb7cf2 100644 (file)
@@ -950,7 +950,6 @@ int ftrace_disable_ftrace_graph_caller(void)
  */
 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
 {
-       struct ftrace_graph_ent trace;
        unsigned long return_hooker;
 
        if (unlikely(ftrace_graph_is_dead()))
@@ -961,18 +960,8 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
 
        return_hooker = ppc_function_entry(return_to_handler);
 
-       trace.func = ip;
-       trace.depth = current->curr_ret_stack + 1;
-
-       /* Only trace if the calling function expects to */
-       if (!ftrace_graph_entry(&trace))
-               goto out;
-
-       if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
-                                    NULL) == -EBUSY)
-               goto out;
-
-       parent = return_hooker;
+       if (!function_graph_enter(parent, ip, 0, NULL))
+               parent = return_hooker;
 out:
        return parent;
 }
index bf8def2159c31e3e921394464e1491a5097f23b4..a56f8413758ab1d796328093eb754d3b6715f4eb 100644 (file)
@@ -983,6 +983,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
                ret = kvmhv_enter_nested_guest(vcpu);
                if (ret == H_INTERRUPT) {
                        kvmppc_set_gpr(vcpu, 3, 0);
+                       vcpu->arch.hcall_needed = 0;
                        return -EINTR;
                }
                break;
@@ -2337,8 +2338,7 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
                kvmppc_core_prepare_to_enter(vcpu);
                return;
        }
-       dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
-                  / tb_ticks_per_sec;
+       dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now);
        hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
        vcpu->arch.timer_running = 1;
 }
index fa888bfc347e6e6e10055cd7b2e36a6c5c4ecf2b..9f5b8c01c4e165a969d3317be6b5c5b9ae71b2f9 100644 (file)
@@ -61,11 +61,10 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
 
        dec_time = vcpu->arch.dec;
        /*
-        * Guest timebase ticks at the same frequency as host decrementer.
-        * So use the host decrementer calculations for decrementer emulation.
+        * Guest timebase ticks at the same frequency as host timebase.
+        * So use the host timebase calculations for decrementer emulation.
         */
-       dec_time = dec_time << decrementer_clockevent.shift;
-       do_div(dec_time, decrementer_clockevent.mult);
+       dec_time = tb_to_ns(dec_time);
        dec_nsec = do_div(dec_time, NSEC_PER_SEC);
        hrtimer_start(&vcpu->arch.dec_timer,
                ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
index 491b0f715d6bc2c345850645f2dbcd4700f6f182..ea1d7c80831900c4403443b8d836cd462998bf22 100644 (file)
@@ -6,8 +6,6 @@
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace
 
 /*
  * Tracepoint for guest mode entry.
@@ -120,4 +118,10 @@ TRACE_EVENT(kvm_check_requests,
 #endif /* _TRACE_KVM_H */
 
 /* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
 #include <trace/define_trace.h>
index ac640e81fdc5f43709858ad8b3dd5ec2eee58a8f..3837842986aa46ee4ac80f4759d1051d9221c87c 100644 (file)
@@ -6,8 +6,6 @@
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm_booke
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace_booke
 
 #define kvm_trace_symbol_exit \
        {0, "CRITICAL"}, \
@@ -218,4 +216,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio,
 #endif
 
 /* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_booke
+
 #include <trace/define_trace.h>
index bcfe8a987f6a977e65f2e9c7a02962a7099c2a66..8a1e3b0047f190e53a64dfe57c9c88f9ac11d617 100644 (file)
@@ -9,8 +9,6 @@
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm_hv
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace_hv
 
 #define kvm_trace_symbol_hcall \
        {H_REMOVE,                      "H_REMOVE"}, \
@@ -497,4 +495,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
 #endif /* _TRACE_KVM_HV_H */
 
 /* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_hv
+
 #include <trace/define_trace.h>
index 2f9a8829552b946ee8a308a2e069ab6a5c9bb1ba..46a46d328fbf2237dd203d3c33d54dbe0db129b1 100644 (file)
@@ -8,8 +8,6 @@
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kvm_pr
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace_pr
 
 TRACE_EVENT(kvm_book3s_reenter,
        TP_PROTO(int r, struct kvm_vcpu *vcpu),
@@ -257,4 +255,11 @@ TRACE_EVENT(kvm_exit,
 #endif /* _TRACE_KVM_H */
 
 /* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_pr
+
 #include <trace/define_trace.h>
index 36484a2ef9158e39b8a0fb46f84e7d070753f090..01b7f5107c3a32d0b4d4e627b10ecd0fbd380bec 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/memblock.h>
+#include <linux/mmu_context.h>
 #include <asm/fixmap.h>
 #include <asm/code-patching.h>
 
@@ -79,7 +80,7 @@ void __init MMU_init_hw(void)
        for (; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) {
                mtspr(SPRN_MD_CTR, ctr | (i << 8));
                mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
-               mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID | M_APG2);
+               mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID);
                mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT);
                addr += LARGE_PAGE_SIZE_8M;
                mem -= LARGE_PAGE_SIZE_8M;
@@ -97,22 +98,13 @@ static void __init mmu_mapin_immr(void)
                map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
 }
 
-/* Address of instructions to patch */
-#ifndef CONFIG_PIN_TLB_IMMR
-extern unsigned int DTLBMiss_jmp;
-#endif
-extern unsigned int DTLBMiss_cmp, FixupDAR_cmp;
-#ifndef CONFIG_PIN_TLB_TEXT
-extern unsigned int ITLBMiss_cmp;
-#endif
-
-static void __init mmu_patch_cmp_limit(unsigned int *addr, unsigned long mapped)
+static void __init mmu_patch_cmp_limit(s32 *site, unsigned long mapped)
 {
-       unsigned int instr = *addr;
+       unsigned int instr = *(unsigned int *)patch_site_addr(site);
 
        instr &= 0xffff0000;
        instr |= (unsigned long)__va(mapped) >> 16;
-       patch_instruction(addr, instr);
+       patch_instruction_site(site, instr);
 }
 
 unsigned long __init mmu_mapin_ram(unsigned long top)
@@ -123,17 +115,17 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
                mapped = 0;
                mmu_mapin_immr();
 #ifndef CONFIG_PIN_TLB_IMMR
-               patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP);
+               patch_instruction_site(&patch__dtlbmiss_immr_jmp, PPC_INST_NOP);
 #endif
 #ifndef CONFIG_PIN_TLB_TEXT
-               mmu_patch_cmp_limit(&ITLBMiss_cmp, 0);
+               mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top, 0);
 #endif
        } else {
                mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
        }
 
-       mmu_patch_cmp_limit(&DTLBMiss_cmp, mapped);
-       mmu_patch_cmp_limit(&FixupDAR_cmp, mapped);
+       mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top, mapped);
+       mmu_patch_cmp_limit(&patch__fixupdar_linmem_top, mapped);
 
        /* If the size of RAM is not an exact power of two, we may not
         * have covered RAM in its entirety with 8 MiB
index 3a048e98a13231b6ce8ec239d91e98fe1ed2d3a9..ce28ae5ca08033ff36ee157e9b83be3a61d44f52 100644 (file)
@@ -1178,7 +1178,7 @@ static long vphn_get_associativity(unsigned long cpu,
 
        switch (rc) {
        case H_FUNCTION:
-               printk(KERN_INFO
+               printk_once(KERN_INFO
                        "VPHN is not supported. Disabling polling...\n");
                stop_topology_update();
                break;
index c3fdf2969d9faec5cacac62dfd9416011f9b17eb..bc3914d54e26ef8c400c65c92b8c359c171f8207 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/mmu.h>
 #include <asm/mmu_context.h>
 #include <asm/paca.h>
+#include <asm/ppc-opcode.h>
 #include <asm/cputable.h>
 #include <asm/cacheflush.h>
 #include <asm/smp.h>
@@ -58,27 +59,19 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
        return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
 }
 
-static void assert_slb_exists(unsigned long ea)
+static void assert_slb_presence(bool present, unsigned long ea)
 {
 #ifdef CONFIG_DEBUG_VM
        unsigned long tmp;
 
        WARN_ON_ONCE(mfmsr() & MSR_EE);
 
-       asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
-       WARN_ON(tmp == 0);
-#endif
-}
-
-static void assert_slb_notexists(unsigned long ea)
-{
-#ifdef CONFIG_DEBUG_VM
-       unsigned long tmp;
+       if (!cpu_has_feature(CPU_FTR_ARCH_206))
+               return;
 
-       WARN_ON_ONCE(mfmsr() & MSR_EE);
+       asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
 
-       asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
-       WARN_ON(tmp != 0);
+       WARN_ON(present == (tmp == 0));
 #endif
 }
 
@@ -114,7 +107,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
         */
        slb_shadow_update(ea, ssize, flags, index);
 
-       assert_slb_notexists(ea);
+       assert_slb_presence(false, ea);
        asm volatile("slbmte  %0,%1" :
                     : "r" (mk_vsid_data(ea, ssize, flags)),
                       "r" (mk_esid_data(ea, ssize, index))
@@ -137,7 +130,7 @@ void __slb_restore_bolted_realmode(void)
                       "r" (be64_to_cpu(p->save_area[index].esid)));
        }
 
-       assert_slb_exists(local_paca->kstack);
+       assert_slb_presence(true, local_paca->kstack);
 }
 
 /*
@@ -185,7 +178,7 @@ void slb_flush_and_restore_bolted(void)
                     :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
                        "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
                     : "memory");
-       assert_slb_exists(get_paca()->kstack);
+       assert_slb_presence(true, get_paca()->kstack);
 
        get_paca()->slb_cache_ptr = 0;
 
@@ -443,9 +436,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
                                ea = (unsigned long)
                                        get_paca()->slb_cache[i] << SID_SHIFT;
                                /*
-                                * Could assert_slb_exists here, but hypervisor
-                                * or machine check could have come in and
-                                * removed the entry at this point.
+                                * Could assert_slb_presence(true) here, but
+                                * hypervisor or machine check could have come
+                                * in and removed the entry at this point.
                                 */
 
                                slbie_data = ea;
@@ -676,7 +669,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
         * User preloads should add isync afterwards in case the kernel
         * accesses user memory before it returns to userspace with rfid.
         */
-       assert_slb_notexists(ea);
+       assert_slb_presence(false, ea);
        asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
 
        barrier();
@@ -715,7 +708,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
                        return -EFAULT;
 
                if (ea < H_VMALLOC_END)
-                       flags = get_paca()->vmalloc_sllp;
+                       flags = local_paca->vmalloc_sllp;
                else
                        flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
        } else {
index 50b129785aeeead06f8d131a64d30a6ccda2576e..9393e231cbc2813e1f24c4484c62bb87fbf24a53 100644 (file)
@@ -166,7 +166,33 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
        PPC_BLR();
 }
 
-static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
+static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
+                                      u64 func)
+{
+#ifdef PPC64_ELF_ABI_v1
+       /* func points to the function descriptor */
+       PPC_LI64(b2p[TMP_REG_2], func);
+       /* Load actual entry point from function descriptor */
+       PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
+       /* ... and move it to LR */
+       PPC_MTLR(b2p[TMP_REG_1]);
+       /*
+        * Load TOC from function descriptor at offset 8.
+        * We can clobber r2 since we get called through a
+        * function pointer (so caller will save/restore r2)
+        * and since we don't use a TOC ourself.
+        */
+       PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
+#else
+       /* We can clobber r12 */
+       PPC_FUNC_ADDR(12, func);
+       PPC_MTLR(12);
+#endif
+       PPC_BLRL();
+}
+
+static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx,
+                                      u64 func)
 {
        unsigned int i, ctx_idx = ctx->idx;
 
@@ -273,7 +299,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
 {
        const struct bpf_insn *insn = fp->insnsi;
        int flen = fp->len;
-       int i;
+       int i, ret;
 
        /* Start of epilogue code - will only be valid 2nd pass onwards */
        u32 exit_addr = addrs[flen];
@@ -284,8 +310,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                u32 src_reg = b2p[insn[i].src_reg];
                s16 off = insn[i].off;
                s32 imm = insn[i].imm;
+               bool func_addr_fixed;
+               u64 func_addr;
                u64 imm64;
-               u8 *func;
                u32 true_cond;
                u32 tmp_idx;
 
@@ -711,23 +738,15 @@ emit_clear:
                case BPF_JMP | BPF_CALL:
                        ctx->seen |= SEEN_FUNC;
 
-                       /* bpf function call */
-                       if (insn[i].src_reg == BPF_PSEUDO_CALL)
-                               if (!extra_pass)
-                                       func = NULL;
-                               else if (fp->aux->func && off < fp->aux->func_cnt)
-                                       /* use the subprog id from the off
-                                        * field to lookup the callee address
-                                        */
-                                       func = (u8 *) fp->aux->func[off]->bpf_func;
-                               else
-                                       return -EINVAL;
-                       /* kernel helper call */
-                       else
-                               func = (u8 *) __bpf_call_base + imm;
-
-                       bpf_jit_emit_func_call(image, ctx, (u64)func);
+                       ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
+                                                   &func_addr, &func_addr_fixed);
+                       if (ret < 0)
+                               return ret;
 
+                       if (func_addr_fixed)
+                               bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
+                       else
+                               bpf_jit_emit_func_call_rel(image, ctx, func_addr);
                        /* move return value from r3 to BPF_REG_0 */
                        PPC_MR(b2p[BPF_REG_0], 3);
                        break;
@@ -872,6 +891,55 @@ cond_branch:
        return 0;
 }
 
+/* Fix the branch target addresses for subprog calls */
+static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
+                                      struct codegen_context *ctx, u32 *addrs)
+{
+       const struct bpf_insn *insn = fp->insnsi;
+       bool func_addr_fixed;
+       u64 func_addr;
+       u32 tmp_idx;
+       int i, ret;
+
+       for (i = 0; i < fp->len; i++) {
+               /*
+                * During the extra pass, only the branch target addresses for
+                * the subprog calls need to be fixed. All other instructions
+                * can left untouched.
+                *
+                * The JITed image length does not change because we already
+                * ensure that the JITed instruction sequence for these calls
+                * are of fixed length by padding them with NOPs.
+                */
+               if (insn[i].code == (BPF_JMP | BPF_CALL) &&
+                   insn[i].src_reg == BPF_PSEUDO_CALL) {
+                       ret = bpf_jit_get_func_addr(fp, &insn[i], true,
+                                                   &func_addr,
+                                                   &func_addr_fixed);
+                       if (ret < 0)
+                               return ret;
+
+                       /*
+                        * Save ctx->idx as this would currently point to the
+                        * end of the JITed image and set it to the offset of
+                        * the instruction sequence corresponding to the
+                        * subprog call temporarily.
+                        */
+                       tmp_idx = ctx->idx;
+                       ctx->idx = addrs[i] / 4;
+                       bpf_jit_emit_func_call_rel(image, ctx, func_addr);
+
+                       /*
+                        * Restore ctx->idx here. This is safe as the length
+                        * of the JITed sequence remains unchanged.
+                        */
+                       ctx->idx = tmp_idx;
+               }
+       }
+
+       return 0;
+}
+
 struct powerpc64_jit_data {
        struct bpf_binary_header *header;
        u32 *addrs;
@@ -970,6 +1038,22 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
 skip_init_ctx:
        code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
 
+       if (extra_pass) {
+               /*
+                * Do not touch the prologue and epilogue as they will remain
+                * unchanged. Only fix the branch target address for subprog
+                * calls in the body.
+                *
+                * This does not change the offsets and lengths of the subprog
+                * call instruction sequences and hence, the size of the JITed
+                * image as well.
+                */
+               bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
+
+               /* There is no need to perform the usual passes. */
+               goto skip_codegen_passes;
+       }
+
        /* Code generation passes 1-2 */
        for (pass = 1; pass < 3; pass++) {
                /* Now build the prologue, body code & epilogue for real. */
@@ -983,6 +1067,7 @@ skip_init_ctx:
                                proglen - (cgctx.idx * 4), cgctx.seen);
        }
 
+skip_codegen_passes:
        if (bpf_jit_enable > 1)
                /*
                 * Note that we output the base address of the code_base
index 6c0020d1c5614c6610faa359ef87254e3a4c8107..e38f74e9e7a4aec85166bdcd0eeefe529c731bc9 100644 (file)
@@ -31,9 +31,6 @@
 
 extern unsigned long itlb_miss_counter, dtlb_miss_counter;
 extern atomic_t instruction_counter;
-extern unsigned int itlb_miss_perf, dtlb_miss_perf;
-extern unsigned int itlb_miss_exit_1, itlb_miss_exit_2;
-extern unsigned int dtlb_miss_exit_1, dtlb_miss_exit_2, dtlb_miss_exit_3;
 
 static atomic_t insn_ctr_ref;
 static atomic_t itlb_miss_ref;
@@ -103,22 +100,22 @@ static int mpc8xx_pmu_add(struct perf_event *event, int flags)
                break;
        case PERF_8xx_ID_ITLB_LOAD_MISS:
                if (atomic_inc_return(&itlb_miss_ref) == 1) {
-                       unsigned long target = (unsigned long)&itlb_miss_perf;
+                       unsigned long target = patch_site_addr(&patch__itlbmiss_perf);
 
-                       patch_branch(&itlb_miss_exit_1, target, 0);
+                       patch_branch_site(&patch__itlbmiss_exit_1, target, 0);
 #ifndef CONFIG_PIN_TLB_TEXT
-                       patch_branch(&itlb_miss_exit_2, target, 0);
+                       patch_branch_site(&patch__itlbmiss_exit_2, target, 0);
 #endif
                }
                val = itlb_miss_counter;
                break;
        case PERF_8xx_ID_DTLB_LOAD_MISS:
                if (atomic_inc_return(&dtlb_miss_ref) == 1) {
-                       unsigned long target = (unsigned long)&dtlb_miss_perf;
+                       unsigned long target = patch_site_addr(&patch__dtlbmiss_perf);
 
-                       patch_branch(&dtlb_miss_exit_1, target, 0);
-                       patch_branch(&dtlb_miss_exit_2, target, 0);
-                       patch_branch(&dtlb_miss_exit_3, target, 0);
+                       patch_branch_site(&patch__dtlbmiss_exit_1, target, 0);
+                       patch_branch_site(&patch__dtlbmiss_exit_2, target, 0);
+                       patch_branch_site(&patch__dtlbmiss_exit_3, target, 0);
                }
                val = dtlb_miss_counter;
                break;
@@ -180,17 +177,17 @@ static void mpc8xx_pmu_del(struct perf_event *event, int flags)
                break;
        case PERF_8xx_ID_ITLB_LOAD_MISS:
                if (atomic_dec_return(&itlb_miss_ref) == 0) {
-                       patch_instruction(&itlb_miss_exit_1, insn);
+                       patch_instruction_site(&patch__itlbmiss_exit_1, insn);
 #ifndef CONFIG_PIN_TLB_TEXT
-                       patch_instruction(&itlb_miss_exit_2, insn);
+                       patch_instruction_site(&patch__itlbmiss_exit_2, insn);
 #endif
                }
                break;
        case PERF_8xx_ID_DTLB_LOAD_MISS:
                if (atomic_dec_return(&dtlb_miss_ref) == 0) {
-                       patch_instruction(&dtlb_miss_exit_1, insn);
-                       patch_instruction(&dtlb_miss_exit_2, insn);
-                       patch_instruction(&dtlb_miss_exit_3, insn);
+                       patch_instruction_site(&patch__dtlbmiss_exit_1, insn);
+                       patch_instruction_site(&patch__dtlbmiss_exit_2, insn);
+                       patch_instruction_site(&patch__dtlbmiss_exit_3, insn);
                }
                break;
        }
index 2a9d66254ffc58c92b9d86f663719d646734f342..5326ece361204054992c503b3540465bc6bda512 100644 (file)
@@ -29,6 +29,7 @@ config KILAUEA
        select 405EX
        select PPC40x_SIMPLE
        select PPC4xx_PCI_EXPRESS
+       select PCI
        select PCI_MSI
        select PPC4xx_MSI
        help
index f024efd5a4c2061b6989beaa4d4cda98f5e65f9e..9a85d350b1b6c7b36418b661d4fb81ccc742fa58 100644 (file)
@@ -21,6 +21,7 @@ config BLUESTONE
        depends on 44x
        select PPC44x_SIMPLE
        select APM821xx
+       select PCI
        select PCI_MSI
        select PPC4xx_MSI
        select PPC4xx_PCI_EXPRESS
@@ -200,6 +201,7 @@ config AKEBONO
        select SWIOTLB
        select 476FPE
        select PPC4xx_PCI_EXPRESS
+       select PCI
        select PCI_MSI
        select PPC4xx_HSTA_MSI
        select I2C
index 6f60e09319223015f5ebd36e3aa1fd9f66078520..75b9352529818899e99a978a4d85beeae535db90 100644 (file)
@@ -102,63 +102,6 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
 }
 EXPORT_SYMBOL(pnv_pci_get_npu_dev);
 
-#define NPU_DMA_OP_UNSUPPORTED()                                       \
-       dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
-               __func__)
-
-static void *dma_npu_alloc(struct device *dev, size_t size,
-                          dma_addr_t *dma_handle, gfp_t flag,
-                          unsigned long attrs)
-{
-       NPU_DMA_OP_UNSUPPORTED();
-       return NULL;
-}
-
-static void dma_npu_free(struct device *dev, size_t size,
-                        void *vaddr, dma_addr_t dma_handle,
-                        unsigned long attrs)
-{
-       NPU_DMA_OP_UNSUPPORTED();
-}
-
-static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
-                                  unsigned long offset, size_t size,
-                                  enum dma_data_direction direction,
-                                  unsigned long attrs)
-{
-       NPU_DMA_OP_UNSUPPORTED();
-       return 0;
-}
-
-static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
-                         int nelems, enum dma_data_direction direction,
-                         unsigned long attrs)
-{
-       NPU_DMA_OP_UNSUPPORTED();
-       return 0;
-}
-
-static int dma_npu_dma_supported(struct device *dev, u64 mask)
-{
-       NPU_DMA_OP_UNSUPPORTED();
-       return 0;
-}
-
-static u64 dma_npu_get_required_mask(struct device *dev)
-{
-       NPU_DMA_OP_UNSUPPORTED();
-       return 0;
-}
-
-static const struct dma_map_ops dma_npu_ops = {
-       .map_page               = dma_npu_map_page,
-       .map_sg                 = dma_npu_map_sg,
-       .alloc                  = dma_npu_alloc,
-       .free                   = dma_npu_free,
-       .dma_supported          = dma_npu_dma_supported,
-       .get_required_mask      = dma_npu_get_required_mask,
-};
-
 /*
  * Returns the PE assoicated with the PCI device of the given
  * NPU. Returns the linked pci device if pci_dev != NULL.
@@ -270,10 +213,11 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
        rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
 
        /*
-        * We don't initialise npu_pe->tce32_table as we always use
-        * dma_npu_ops which are nops.
+        * NVLink devices use the same TCE table configuration as
+        * their parent device so drivers shouldn't be doing DMA
+        * operations directly on these devices.
         */
-       set_dma_ops(&npe->pdev->dev, &dma_npu_ops);
+       set_dma_ops(&npe->pdev->dev, NULL);
 }
 
 /*
index 8bd590af488a1a1c379476a77dae5eb3809c44e3..794487313cc8d252af91e3ed113e51c624cff3d9 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
+#include <linux/hugetlb.h>
 #include <asm/lppaca.h>
 #include <asm/hvcall.h>
 #include <asm/firmware.h>
@@ -36,6 +37,7 @@
 #include <asm/vio.h>
 #include <asm/mmu.h>
 #include <asm/machdep.h>
+#include <asm/drmem.h>
 
 #include "pseries.h"
 
@@ -433,6 +435,16 @@ static void parse_em_data(struct seq_file *m)
                seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
 }
 
+static void maxmem_data(struct seq_file *m)
+{
+       unsigned long maxmem = 0;
+
+       maxmem += drmem_info->n_lmbs * drmem_info->lmb_size;
+       maxmem += hugetlb_total_pages() * PAGE_SIZE;
+
+       seq_printf(m, "MaxMem=%ld\n", maxmem);
+}
+
 static int pseries_lparcfg_data(struct seq_file *m, void *v)
 {
        int partition_potential_processors;
@@ -491,6 +503,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
        seq_printf(m, "slb_size=%d\n", mmu_slb_size);
 #endif
        parse_em_data(m);
+       maxmem_data(m);
 
        return 0;
 }
index 69e7fb47bcaa3e39c81f017214e388e450b2ac2d..878f9c1d36150c80a021413f23ac36577ef881c0 100644 (file)
@@ -11,6 +11,12 @@ UBSAN_SANITIZE := n
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
 KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
 
+ifdef CONFIG_CC_IS_CLANG
+# clang stores addresses on the stack causing the frame size to blow
+# out. See https://github.com/ClangBuiltLinux/linux/issues/252
+KBUILD_CFLAGS += -Wframe-larger-than=4096
+endif
+
 ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
 
 obj-y                  += xmon.o nonstdio.o spr_access.o
index d10146197533affd63c3e7392ccd73a4d7ba2e27..4b594f2e4f7ebd9eb791f6c2709f4023d3aa76c6 100644 (file)
@@ -71,10 +71,27 @@ KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
 # arch specific predefines for sparse
 CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)
 
+# Default target when executing plain make
+boot           := arch/riscv/boot
+KBUILD_IMAGE   := $(boot)/Image.gz
+
 head-y := arch/riscv/kernel/head.o
 
 core-y += arch/riscv/kernel/ arch/riscv/mm/
 
 libs-y += arch/riscv/lib/
 
-all: vmlinux
+PHONY += vdso_install
+vdso_install:
+       $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
+
+all: Image.gz
+
+Image: vmlinux
+       $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+Image.%: Image
+       $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+zinstall install:
+       $(Q)$(MAKE) $(build)=$(boot) $@
diff --git a/arch/riscv/boot/.gitignore b/arch/riscv/boot/.gitignore
new file mode 100644 (file)
index 0000000..8dab0bb
--- /dev/null
@@ -0,0 +1,2 @@
+Image
+Image.gz
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
new file mode 100644 (file)
index 0000000..0990a9f
--- /dev/null
@@ -0,0 +1,33 @@
+#
+# arch/riscv/boot/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2018, Anup Patel.
+# Author: Anup Patel <anup@brainfault.org>
+#
+# Based on the ia64 and arm64 boot/Makefile.
+#
+
+OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
+
+targets := Image
+
+$(obj)/Image: vmlinux FORCE
+       $(call if_changed,objcopy)
+
+$(obj)/Image.gz: $(obj)/Image FORCE
+       $(call if_changed,gzip)
+
+install:
+       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+       $(obj)/Image System.map "$(INSTALL_PATH)"
+
+zinstall:
+       $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+       $(obj)/Image.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/riscv/boot/install.sh b/arch/riscv/boot/install.sh
new file mode 100644 (file)
index 0000000..18c3915
--- /dev/null
@@ -0,0 +1,60 @@
+#!/bin/sh
+#
+# arch/riscv/boot/install.sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License.  See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+# Adapted from code in arch/i386/boot/install.sh by Russell King
+#
+# "make install" script for the RISC-V Linux port
+#
+# Arguments:
+#   $1 - kernel version
+#   $2 - kernel image file
+#   $3 - kernel map file
+#   $4 - default install path (blank if root directory)
+#
+
+verify () {
+       if [ ! -f "$1" ]; then
+               echo ""                                                   1>&2
+               echo " *** Missing file: $1"                              1>&2
+               echo ' *** You need to run "make" before "make install".' 1>&2
+               echo ""                                                   1>&2
+               exit 1
+       fi
+}
+
+# Make sure the files actually exist
+verify "$2"
+verify "$3"
+
+# User may have a custom install script
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+
+if [ "$(basename $2)" = "Image.gz" ]; then
+# Compressed install
+  echo "Installing compressed kernel"
+  base=vmlinuz
+else
+# Normal install
+  echo "Installing normal kernel"
+  base=vmlinux
+fi
+
+if [ -f $4/$base-$1 ]; then
+  mv $4/$base-$1 $4/$base-$1.old
+fi
+cat $2 > $4/$base-$1
+
+# Install system map file
+if [ -f $4/System.map-$1 ]; then
+  mv $4/System.map-$1 $4/System.map-$1.old
+fi
+cp $3 $4/System.map-$1
index 36473d7dbaac4a602096414e20d2cad2ed296358..ef4f15df9adf03c091bf621f900413680a2829f2 100644 (file)
@@ -1,6 +1,3 @@
-CONFIG_SMP=y
-CONFIG_PCI=y
-CONFIG_PCIE_XILINX=y
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_IKCONFIG=y
@@ -11,10 +8,15 @@ CONFIG_CFS_BANDWIDTH=y
 CONFIG_CGROUP_BPF=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
-CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BPF_SYSCALL=y
+CONFIG_SMP=y
+CONFIG_PCI=y
+CONFIG_PCIE_XILINX=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -59,6 +61,7 @@ CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_UAS=y
 CONFIG_VIRTIO_MMIO=y
+CONFIG_SIFIVE_PLIC=y
 CONFIG_RAS=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
@@ -72,8 +75,6 @@ CONFIG_NFS_V4=y
 CONFIG_NFS_V4_1=y
 CONFIG_NFS_V4_2=y
 CONFIG_ROOT_NFS=y
-# CONFIG_RCU_TRACE is not set
 CONFIG_CRYPTO_USER_API_HASH=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_SIFIVE_PLIC=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_RCU_TRACE is not set
index 349df33808c4231d155d2e6018e19cc4cb19cb4d..cd2af4b013e3826e3b43f44565a9b6c1c6ae7b70 100644 (file)
@@ -8,6 +8,7 @@
 
 #define MODULE_ARCH_VERMAGIC    "riscv"
 
+struct module;
 u64 module_emit_got_entry(struct module *mod, u64 val);
 u64 module_emit_plt_entry(struct module *mod, u64 val);
 
index 2c5df945d43c9abfdfd197a61d8c92ce20e48133..bbe1862e8f80cd404164f03a485a89f253f9fa45 100644 (file)
@@ -56,8 +56,8 @@ struct pt_regs {
        unsigned long sstatus;
        unsigned long sbadaddr;
        unsigned long scause;
-        /* a0 value before the syscall */
-        unsigned long orig_a0;
+       /* a0 value before the syscall */
+       unsigned long orig_a0;
 };
 
 #ifdef CONFIG_64BIT
index 473cfc84e412f3827703caaadffa34a8983c978d..8c3e3e3c8be1204b67076985a9b54e80217707b1 100644 (file)
@@ -400,13 +400,13 @@ extern unsigned long __must_check __asm_copy_from_user(void *to,
 static inline unsigned long
 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
-       return __asm_copy_to_user(to, from, n);
+       return __asm_copy_from_user(to, from, n);
 }
 
 static inline unsigned long
 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
-       return __asm_copy_from_user(to, from, n);
+       return __asm_copy_to_user(to, from, n);
 }
 
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
index eff7aa9aa1637851aadd871807644560bef5c412..fef96f117b4def3fe9a99ec9ca8b26d2278e087c 100644 (file)
 
 /*
  * There is explicitly no include guard here because this file is expected to
- * be included multiple times.  See uapi/asm/syscalls.h for more info.
+ * be included multiple times.
  */
 
-#define __ARCH_WANT_NEW_STAT
 #define __ARCH_WANT_SYS_CLONE
+
 #include <uapi/asm/unistd.h>
-#include <uapi/asm/syscalls.h>
diff --git a/arch/riscv/include/uapi/asm/syscalls.h b/arch/riscv/include/uapi/asm/syscalls.h
deleted file mode 100644 (file)
index 206dc4b..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2017-2018 SiFive
- */
-
-/*
- * There is explicitly no include guard here because this file is expected to
- * be included multiple times in order to define the syscall macros via
- * __SYSCALL.
- */
-
-/*
- * Allows the instruction cache to be flushed from userspace.  Despite RISC-V
- * having a direct 'fence.i' instruction available to userspace (which we
- * can't trap!), that's not actually viable when running on Linux because the
- * kernel might schedule a process on another hart.  There is no way for
- * userspace to handle this without invoking the kernel (as it doesn't know the
- * thread->hart mappings), so we've defined a RISC-V specific system call to
- * flush the instruction cache.
- *
- * __NR_riscv_flush_icache is defined to flush the instruction cache over an
- * address range, with the flush applying to either all threads or just the
- * caller.  We don't currently do anything with the address range, that's just
- * in there for forwards compatibility.
- */
-#ifndef __NR_riscv_flush_icache
-#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
-#endif
-__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h
new file mode 100644 (file)
index 0000000..1f3bd3e
--- /dev/null
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifdef __LP64__
+#define __ARCH_WANT_NEW_STAT
+#endif /* __LP64__ */
+
+#include <asm-generic/unistd.h>
+
+/*
+ * Allows the instruction cache to be flushed from userspace.  Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart.  There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * __NR_riscv_flush_icache is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller.  We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+#ifndef __NR_riscv_flush_icache
+#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
+#endif
+__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
index 3a5a2ee31547b2ca1f3ec0f0cf613ff3448e85e3..b4a7d4427fbb9430e02cdc1855729a9eba90c815 100644 (file)
@@ -64,7 +64,7 @@ int riscv_of_processor_hartid(struct device_node *node)
 
 static void print_isa(struct seq_file *f, const char *orig_isa)
 {
-       static const char *ext = "mafdc";
+       static const char *ext = "mafdcsu";
        const char *isa = orig_isa;
        const char *e;
 
@@ -88,11 +88,14 @@ static void print_isa(struct seq_file *f, const char *orig_isa)
        /*
         * Check the rest of the ISA string for valid extensions, printing those
         * we find.  RISC-V ISA strings define an order, so we only print the
-        * extension bits when they're in order.
+        * extension bits when they're in order. Hide the supervisor (S)
+        * extension from userspace as it's not accessible from there.
         */
        for (e = ext; *e != '\0'; ++e) {
                if (isa[0] == e[0]) {
-                       seq_write(f, isa, 1);
+                       if (isa[0] != 's')
+                               seq_write(f, isa, 1);
+
                        isa++;
                }
        }
index 1157b6b52d259fa79edf0d44f589022174aff18c..c433f6d3dd64f0b4eec0a567854d1cfca59c650f 100644 (file)
@@ -132,7 +132,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
 {
        unsigned long return_hooker = (unsigned long)&return_to_handler;
        unsigned long old;
-       struct ftrace_graph_ent trace;
        int err;
 
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
@@ -144,17 +143,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
         */
        old = *parent;
 
-       trace.func = self_addr;
-       trace.depth = current->curr_ret_stack + 1;
-
-       if (!ftrace_graph_entry(&trace))
-               return;
-
-       err = ftrace_push_return_trace(old, self_addr, &trace.depth,
-                                      frame_pointer, parent);
-       if (err == -EBUSY)
-               return;
-       *parent = return_hooker;
+       if (function_graph_enter(old, self_addr, frame_pointer, parent))
+               *parent = return_hooker;
 }
 
 #ifdef CONFIG_DYNAMIC_FTRACE
index 711190d473d41f47dd52f9fc4720df720ebf57be..fe884cd69abd8f0d7b3fe3a9e20b781937a32502 100644 (file)
@@ -44,6 +44,16 @@ ENTRY(_start)
        amoadd.w a3, a2, (a3)
        bnez a3, .Lsecondary_start
 
+       /* Clear BSS for flat non-ELF images */
+       la a3, __bss_start
+       la a4, __bss_stop
+       ble a4, a3, clear_bss_done
+clear_bss:
+       REG_S zero, (a3)
+       add a3, a3, RISCV_SZPTR
+       blt a3, a4, clear_bss
+clear_bss_done:
+
        /* Save hart ID and DTB physical address */
        mv s0, a0
        mv s1, a1
index 3303ed2cd4193f82c51730a992d6c875b361ff80..7dd308129b40f1862ab04dc1e12c790bf7c111fe 100644 (file)
@@ -21,7 +21,7 @@ static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
 {
        if (v != (u32)v) {
                pr_err("%s: value %016llx out of range for 32-bit field\n",
-                      me->name, v);
+                      me->name, (long long)v);
                return -EINVAL;
        }
        *location = v;
@@ -102,7 +102,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
        if (offset != (s32)offset) {
                pr_err(
                  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
-                 me->name, v, location);
+                 me->name, (long long)v, location);
                return -EINVAL;
        }
 
@@ -144,7 +144,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
        if (IS_ENABLED(CMODEL_MEDLOW)) {
                pr_err(
                  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
-                 me->name, v, location);
+                 me->name, (long long)v, location);
                return -EINVAL;
        }
 
@@ -188,7 +188,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
        } else {
                pr_err(
                  "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
-                 me->name, v, location);
+                 me->name, (long long)v, location);
                return -EINVAL;
        }
 
@@ -212,7 +212,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
                } else {
                        pr_err(
                          "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
-                         me->name, v, location);
+                         me->name, (long long)v, location);
                        return -EINVAL;
                }
        }
@@ -234,7 +234,7 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location,
        if (offset != fill_v) {
                pr_err(
                  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
-                 me->name, v, location);
+                 me->name, (long long)v, location);
                return -EINVAL;
        }
 
index ece84991609ca56d2d3549d1a2dde2139a943028..65df1dfdc30385be7a9a149034a0ba66a5bbbd8e 100644 (file)
@@ -74,7 +74,7 @@ SECTIONS
                *(.sbss*)
        }
 
-       BSS_SECTION(0, 0, 0)
+       BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
 
        EXCEPTION_TABLE(0x10)
        NOTES
index 5739bd05d289e5034b5d9faee2baebb1be1251ed..4e2e600f7d5384074fff062628cbb04f9113f7ab 100644 (file)
@@ -3,6 +3,6 @@ lib-y   += memcpy.o
 lib-y  += memset.o
 lib-y  += uaccess.o
 
-lib-(CONFIG_64BIT) += tishift.o
+lib-$(CONFIG_64BIT) += tishift.o
 
 lib-$(CONFIG_32BIT) += udivdi3.o
index 0b33577932c3bd9c552c62cfe473979987c97313..e21053e5e0da2a06c3ba78e9967e55837ecaddc0 100644 (file)
@@ -27,7 +27,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding)
 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
 KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
 UTS_MACHINE    := s390x
-STACK_SIZE     := $(if $(CONFIG_KASAN),32768,16384)
+STACK_SIZE     := $(if $(CONFIG_KASAN),65536,16384)
 CHECKFLAGS     += -D__s390__ -D__s390x__
 
 export LD_BFD
index 593039620487a6cdad8e076272b8e97cacff0153..b1bdd15e3429f39d50b0c8e73896c5539a4cfc5e 100644 (file)
@@ -22,10 +22,10 @@ OBJCOPYFLAGS :=
 OBJECTS := $(addprefix $(obj)/,$(obj-y))
 
 LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
-$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS)
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE
        $(call if_changed,ld)
 
-OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info
+OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load
 $(obj)/info.bin: vmlinux FORCE
        $(call if_changed,objcopy)
 
@@ -46,17 +46,17 @@ suffix-$(CONFIG_KERNEL_LZMA)  := .lzma
 suffix-$(CONFIG_KERNEL_LZO)  := .lzo
 suffix-$(CONFIG_KERNEL_XZ)  := .xz
 
-$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
        $(call if_changed,gzip)
-$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
        $(call if_changed,bzip2)
-$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE
        $(call if_changed,lz4)
-$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
        $(call if_changed,lzma)
-$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
        $(call if_changed,lzo)
-$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
        $(call if_changed,xzkern)
 
 OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
index 259d1698ac50a468021e17a6a2fbe93526f520f2..c69cb04b7a5948e56535a145cb788de06fa4bed8 100644 (file)
@@ -64,6 +64,8 @@ CONFIG_NUMA=y
 CONFIG_PREEMPT=y
 CONFIG_HZ_100=y
 CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
@@ -84,9 +86,11 @@ CONFIG_PCI_DEBUG=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_CHSC_SCH=y
+CONFIG_VFIO_AP=m
 CONFIG_CRASH_DUMP=y
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -161,8 +165,6 @@ CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_CT_NETLINK=m
 CONFIG_NF_CT_NETLINK_TIMEOUT=m
 CONFIG_NF_TABLES=m
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
@@ -365,6 +367,8 @@ CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_OPENVSWITCH=m
+CONFIG_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS=m
 CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
@@ -461,6 +465,7 @@ CONFIG_PPTP=m
 CONFIG_PPPOL2TP=m
 CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
+CONFIG_ISM=m
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
@@ -486,9 +491,12 @@ CONFIG_MLX4_INFINIBAND=m
 CONFIG_MLX5_INFINIBAND=m
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
+CONFIG_VFIO_MDEV=m
+CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
+CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
@@ -615,7 +623,6 @@ CONFIG_DEBUG_CREDENTIALS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=300
 CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_PM_NOTIFIER_ERROR_INJECT=m
 CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
 CONFIG_FAULT_INJECTION=y
 CONFIG_FAILSLAB=y
@@ -727,3 +734,4 @@ CONFIG_APPLDATA_BASE=y
 CONFIG_KVM=m
 CONFIG_KVM_S390_UCONTROL=y
 CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
index 37fd60c20e22dec8cd8452baaf89135debccf735..32f539dc9c19240d589a5cb62fb51e0a30d9baf5 100644 (file)
@@ -65,6 +65,8 @@ CONFIG_NR_CPUS=512
 CONFIG_NUMA=y
 CONFIG_HZ_100=y
 CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
@@ -82,9 +84,11 @@ CONFIG_PCI=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
 CONFIG_CHSC_SCH=y
+CONFIG_VFIO_AP=m
 CONFIG_CRASH_DUMP=y
 CONFIG_BINFMT_MISC=m
 CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=m
@@ -159,8 +163,6 @@ CONFIG_NF_CONNTRACK_TFTP=m
 CONFIG_NF_CT_NETLINK=m
 CONFIG_NF_CT_NETLINK_TIMEOUT=m
 CONFIG_NF_TABLES=m
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
@@ -362,6 +364,8 @@ CONFIG_NET_ACT_SKBEDIT=m
 CONFIG_NET_ACT_CSUM=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_OPENVSWITCH=m
+CONFIG_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS=m
 CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
@@ -458,6 +462,7 @@ CONFIG_PPTP=m
 CONFIG_PPPOL2TP=m
 CONFIG_PPP_ASYNC=m
 CONFIG_PPP_SYNC_TTY=m
+CONFIG_ISM=m
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
@@ -483,9 +488,12 @@ CONFIG_MLX4_INFINIBAND=m
 CONFIG_MLX5_INFINIBAND=m
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
+CONFIG_VFIO_MDEV=m
+CONFIG_VFIO_MDEV_DEVICE=m
 CONFIG_VIRTIO_PCI=m
 CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
+CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
@@ -666,3 +674,4 @@ CONFIG_APPLDATA_BASE=y
 CONFIG_KVM=m
 CONFIG_KVM_S390_UCONTROL=y
 CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
index 7cb6a52f727dafc6c994423b0db21ccafec4993a..4d58a92b5d979f15e3469240c47a8e6f5fc4c189 100644 (file)
@@ -26,14 +26,23 @@ CONFIG_CGROUP_CPUACCT=y
 CONFIG_CGROUP_PERF=y
 CONFIG_NAMESPACES=y
 CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_EXPERT=y
 # CONFIG_SYSFS_SYSCALL is not set
-CONFIG_CHECKPOINT_RESTORE=y
 CONFIG_BPF_SYSCALL=y
 CONFIG_USERFAULTFD=y
 # CONFIG_COMPAT_BRK is not set
 CONFIG_PROFILING=y
+CONFIG_LIVEPATCH=y
+CONFIG_NR_CPUS=256
+CONFIG_NUMA=y
+CONFIG_HZ_100=y
+CONFIG_KEXEC_FILE=y
+CONFIG_CRASH_DUMP=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
+CONFIG_CMM=m
 CONFIG_OPROFILE=y
 CONFIG_KPROBES=y
 CONFIG_JUMP_LABEL=y
@@ -44,11 +53,7 @@ CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_LIVEPATCH=y
-CONFIG_NR_CPUS=256
-CONFIG_NUMA=y
-CONFIG_HZ_100=y
-CONFIG_KEXEC_FILE=y
+CONFIG_BINFMT_MISC=m
 CONFIG_MEMORY_HOTPLUG=y
 CONFIG_MEMORY_HOTREMOVE=y
 CONFIG_KSM=y
@@ -60,9 +65,6 @@ CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
 CONFIG_ZSMALLOC_STAT=y
 CONFIG_IDLE_PAGE_TRACKING=y
-CONFIG_CRASH_DUMP=y
-CONFIG_BINFMT_MISC=m
-CONFIG_HIBERNATION=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -98,6 +100,7 @@ CONFIG_BLK_DEV_NBD=m
 CONFIG_BLK_DEV_RAM=y
 CONFIG_VIRTIO_BLK=y
 CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
 CONFIG_BLK_DEV_SD=y
 CONFIG_CHR_DEV_ST=y
 CONFIG_BLK_DEV_SR=y
@@ -131,6 +134,7 @@ CONFIG_EQUALIZER=m
 CONFIG_TUN=m
 CONFIG_VIRTIO_NET=y
 # CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_AURORA is not set
 # CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_NET_VENDOR_SOCIONEXT is not set
@@ -157,33 +161,6 @@ CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_HUGETLBFS=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_INFO_DWARF4=y
-CONFIG_GDB_SCRIPTS=y
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_DEBUG_SECTION_MISMATCH=y
-CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_PAGEALLOC=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_PANIC_ON_OOPS=y
-CONFIG_PROVE_LOCKING=y
-CONFIG_LOCK_STAT=y
-CONFIG_DEBUG_LOCKDEP=y
-CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_SG=y
-CONFIG_DEBUG_NOTIFIERS=y
-CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_LATENCYTOP=y
-CONFIG_SCHED_TRACER=y
-CONFIG_FTRACE_SYSCALLS=y
-CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
-CONFIG_STACK_TRACER=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_FUNCTION_PROFILER=y
-# CONFIG_RUNTIME_TESTING_MENU is not set
-CONFIG_S390_PTDUMP=y
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_AUTHENC=m
 CONFIG_CRYPTO_TEST=m
@@ -193,6 +170,7 @@ CONFIG_CRYPTO_CBC=y
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_CTS=m
 CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
 CONFIG_CRYPTO_PCBC=m
 CONFIG_CRYPTO_XTS=m
 CONFIG_CRYPTO_CMAC=m
@@ -231,7 +209,6 @@ CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
 CONFIG_CRYPTO_USER_API_RNG=m
 CONFIG_ZCRYPT=m
-CONFIG_ZCRYPT_MULTIDEVNODES=y
 CONFIG_PKEY=m
 CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_SHA1_S390=m
@@ -247,4 +224,30 @@ CONFIG_CRC7=m
 # CONFIG_XZ_DEC_ARM is not set
 # CONFIG_XZ_DEC_ARMTHUMB is not set
 # CONFIG_XZ_DEC_SPARC is not set
-CONFIG_CMM=m
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCK_STAT=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_STACK_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_FUNCTION_PROFILER=y
+# CONFIG_RUNTIME_TESTING_MENU is not set
+CONFIG_S390_PTDUMP=y
index dbd689d556ce5dd9368392a1e0676c18163acc3c..ccbb53e2202404b85aae86e883d3e64405d2d305 100644 (file)
@@ -46,8 +46,6 @@ static inline int init_new_context(struct task_struct *tsk,
                mm->context.asce_limit = STACK_TOP_MAX;
                mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
                                   _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
-               /* pgd_alloc() did not account this pud */
-               mm_inc_nr_puds(mm);
                break;
        case -PAGE_SIZE:
                /* forked 5-level task, set new asce with new_mm->pgd */
@@ -63,9 +61,6 @@ static inline int init_new_context(struct task_struct *tsk,
                /* forked 2-level compat task, set new asce with new mm->pgd */
                mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
                                   _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
-               /* pgd_alloc() did not account this pmd */
-               mm_inc_nr_pmds(mm);
-               mm_inc_nr_puds(mm);
        }
        crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
        return 0;
index f0f9bcf94c03749b0f0030d9de5765cff1597d37..5ee733720a5716b2308210d497f9c8ab73485cfa 100644 (file)
@@ -36,11 +36,11 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
 
 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
 {
-       if (mm->context.asce_limit <= _REGION3_SIZE)
+       if (mm_pmd_folded(mm))
                return _SEGMENT_ENTRY_EMPTY;
-       if (mm->context.asce_limit <= _REGION2_SIZE)
+       if (mm_pud_folded(mm))
                return _REGION3_ENTRY_EMPTY;
-       if (mm->context.asce_limit <= _REGION1_SIZE)
+       if (mm_p4d_folded(mm))
                return _REGION2_ENTRY_EMPTY;
        return _REGION1_ENTRY_EMPTY;
 }
index 411d435e7a7d2a5a8c650c812017d66f9738710a..063732414dfbb5076c431d13e694e239e878ebef 100644 (file)
@@ -493,6 +493,24 @@ static inline int is_module_addr(void *addr)
                                   _REGION_ENTRY_PROTECT | \
                                   _REGION_ENTRY_NOEXEC)
 
+static inline bool mm_p4d_folded(struct mm_struct *mm)
+{
+       return mm->context.asce_limit <= _REGION1_SIZE;
+}
+#define mm_p4d_folded(mm) mm_p4d_folded(mm)
+
+static inline bool mm_pud_folded(struct mm_struct *mm)
+{
+       return mm->context.asce_limit <= _REGION2_SIZE;
+}
+#define mm_pud_folded(mm) mm_pud_folded(mm)
+
+static inline bool mm_pmd_folded(struct mm_struct *mm)
+{
+       return mm->context.asce_limit <= _REGION3_SIZE;
+}
+#define mm_pmd_folded(mm) mm_pmd_folded(mm)
+
 static inline int mm_has_pgste(struct mm_struct *mm)
 {
 #ifdef CONFIG_PGSTE
index 302795c47c06c299b732ed73de7b057a71b3805c..81038ab357ce955682b713f0c4241611ba5f931f 100644 (file)
@@ -236,7 +236,7 @@ static inline unsigned long current_stack_pointer(void)
        return sp;
 }
 
-static __no_sanitize_address_or_inline unsigned short stap(void)
+static __no_kasan_or_inline unsigned short stap(void)
 {
        unsigned short cpu_address;
 
@@ -330,7 +330,7 @@ static inline void __load_psw(psw_t psw)
  * Set PSW mask to specified value, while leaving the
  * PSW addr pointing to the next instruction.
  */
-static __no_sanitize_address_or_inline void __load_psw_mask(unsigned long mask)
+static __no_kasan_or_inline void __load_psw_mask(unsigned long mask)
 {
        unsigned long addr;
        psw_t psw;
index 27248f42a03c4561a9e1481fbea205b3b866f928..ce4e17c9aad6fa266d306676df4e7cdc69eb7df0 100644 (file)
@@ -14,7 +14,7 @@
  * General size of kernel stacks
  */
 #ifdef CONFIG_KASAN
-#define THREAD_SIZE_ORDER 3
+#define THREAD_SIZE_ORDER 4
 #else
 #define THREAD_SIZE_ORDER 2
 #endif
index 457b7ba0fbb66de24fd82219e18a51ad2663221f..b31c779cf58176ad3bf91ee816053cbcf40b3476 100644 (file)
@@ -136,7 +136,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
                                unsigned long address)
 {
-       if (tlb->mm->context.asce_limit <= _REGION3_SIZE)
+       if (mm_pmd_folded(tlb->mm))
                return;
        pgtable_pmd_page_dtor(virt_to_page(pmd));
        tlb_remove_table(tlb, pmd);
@@ -152,7 +152,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
                                unsigned long address)
 {
-       if (tlb->mm->context.asce_limit <= _REGION1_SIZE)
+       if (mm_p4d_folded(tlb->mm))
                return;
        tlb_remove_table(tlb, p4d);
 }
@@ -167,7 +167,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
 static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
                                unsigned long address)
 {
-       if (tlb->mm->context.asce_limit <= _REGION2_SIZE)
+       if (mm_pud_folded(tlb->mm))
                return;
        tlb_remove_table(tlb, pud);
 }
index 724fba4d09d2df3a35c372224ddc944c9def3ace..39191a0feed1cdedd692e68826b3b19db581cbbb 100644 (file)
@@ -236,10 +236,10 @@ ENTRY(__switch_to)
        stmg    %r6,%r15,__SF_GPRS(%r15)        # store gprs of prev task
        lghi    %r4,__TASK_stack
        lghi    %r1,__TASK_thread
-       lg      %r5,0(%r4,%r3)                  # start of kernel stack of next
+       llill   %r5,STACK_INIT
        stg     %r15,__THREAD_ksp(%r1,%r2)      # store kernel stack of prev
-       lgr     %r15,%r5
-       aghi    %r15,STACK_INIT                 # end of kernel stack of next
+       lg      %r15,0(%r4,%r3)                 # start of kernel stack of next
+       agr     %r15,%r5                        # end of kernel stack of next
        stg     %r3,__LC_CURRENT                # store task struct of next
        stg     %r15,__LC_KERNEL_STACK          # store end of kernel stack
        lg      %r15,__THREAD_ksp(%r1,%r3)      # load kernel stack of next
index 84be7f02d0c2157029cb2868231b67c7603bbf42..39b13d71a8fe6dc2979e8a8320ae62b675b8ee9b 100644 (file)
@@ -203,22 +203,13 @@ device_initcall(ftrace_plt_init);
  */
 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
 {
-       struct ftrace_graph_ent trace;
-
        if (unlikely(ftrace_graph_is_dead()))
                goto out;
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                goto out;
        ip -= MCOUNT_INSN_SIZE;
-       trace.func = ip;
-       trace.depth = current->curr_ret_stack + 1;
-       /* Only trace if the calling function expects to. */
-       if (!ftrace_graph_entry(&trace))
-               goto out;
-       if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
-                                    NULL) == -EBUSY)
-               goto out;
-       parent = (unsigned long) return_to_handler;
+       if (!function_graph_enter(parent, ip, 0, NULL))
+               parent = (unsigned long) return_to_handler;
 out:
        return parent;
 }
index cc085e2d2ce9907690fbe0912dd301ab44e8171d..d5523adeddbf4dc0d9b92962bb39328d474cba22 100644 (file)
@@ -346,6 +346,8 @@ static int __hw_perf_event_init(struct perf_event *event)
                break;
 
        case PERF_TYPE_HARDWARE:
+               if (is_sampling_event(event))   /* No sampling support */
+                       return -ENOENT;
                ev = attr->config;
                /* Count user space (problem-state) only */
                if (!attr->exclude_user && attr->exclude_kernel) {
@@ -373,7 +375,7 @@ static int __hw_perf_event_init(struct perf_event *event)
                return -ENOENT;
 
        if (ev > PERF_CPUM_CF_MAX_CTR)
-               return -EINVAL;
+               return -ENOENT;
 
        /* Obtain the counter set to which the specified counter belongs */
        set = get_counter_set(ev);
index 7bf604ff50a1bd082024c85fb5d32e06cca9c4f8..bfabeb1889cc0cca5c6859cb36bbbeb15b662049 100644 (file)
@@ -1842,10 +1842,30 @@ static void cpumsf_pmu_del(struct perf_event *event, int flags)
 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF);
 CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
 
-static struct attribute *cpumsf_pmu_events_attr[] = {
-       CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
-       NULL,
-       NULL,
+/* Attribute list for CPU_SF.
+ *
+ * The availablitiy depends on the CPU_MF sampling facility authorization
+ * for basic + diagnositic samples. This is determined at initialization
+ * time by the sampling facility device driver.
+ * If the authorization for basic samples is turned off, it should be
+ * also turned off for diagnostic sampling.
+ *
+ * During initialization of the device driver, check the authorization
+ * level for diagnostic sampling and installs the attribute
+ * file for diagnostic sampling if necessary.
+ *
+ * For now install a placeholder to reference all possible attributes:
+ * SF_CYCLES_BASIC and SF_CYCLES_BASIC_DIAG.
+ * Add another entry for the final NULL pointer.
+ */
+enum {
+       SF_CYCLES_BASIC_ATTR_IDX = 0,
+       SF_CYCLES_BASIC_DIAG_ATTR_IDX,
+       SF_CYCLES_ATTR_MAX
+};
+
+static struct attribute *cpumsf_pmu_events_attr[SF_CYCLES_ATTR_MAX + 1] = {
+       [SF_CYCLES_BASIC_ATTR_IDX] = CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC)
 };
 
 PMU_FORMAT_ATTR(event, "config:0-63");
@@ -2040,7 +2060,10 @@ static int __init init_cpum_sampling_pmu(void)
 
        if (si.ad) {
                sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
-               cpumsf_pmu_events_attr[1] =
+               /* Sampling of diagnostic data authorized,
+                * install event into attribute list of PMU device.
+                */
+               cpumsf_pmu_events_attr[SF_CYCLES_BASIC_DIAG_ATTR_IDX] =
                        CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
        }
 
index eb8aebea3ea7bd7a6967136b6cb9aee3e25473aa..e76309fbbcb3b6e23af21350f98f2b555502b978 100644 (file)
@@ -37,7 +37,7 @@ KASAN_SANITIZE := n
 $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
 
 # link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
+$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
        $(call if_changed,vdso32ld)
 
 # strip rule for the .so file
@@ -46,12 +46,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
        $(call if_changed,objcopy)
 
 # assembly rules for the .S files
-$(obj-vdso32): %.o: %.S
+$(obj-vdso32): %.o: %.S FORCE
        $(call if_changed_dep,vdso32as)
 
 # actual build commands
 quiet_cmd_vdso32ld = VDSO32L $@
-      cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+      cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
 quiet_cmd_vdso32as = VDSO32A $@
       cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
 
index a22b2cf86eec985d7f3bf32da11f5f0c220c28e7..f849ac61c5da02ee8b764bc3c01fc44c16137e04 100644 (file)
@@ -37,7 +37,7 @@ KASAN_SANITIZE := n
 $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
 
 # link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
+$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
        $(call if_changed,vdso64ld)
 
 # strip rule for the .so file
@@ -46,12 +46,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
        $(call if_changed,objcopy)
 
 # assembly rules for the .S files
-$(obj-vdso64): %.o: %.S
+$(obj-vdso64): %.o: %.S FORCE
        $(call if_changed_dep,vdso64as)
 
 # actual build commands
 quiet_cmd_vdso64ld = VDSO64L $@
-      cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+      cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
 quiet_cmd_vdso64as = VDSO64A $@
       cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
 
index 21eb7407d51bac8e71f3743defba1f7de5291e3d..8429ab07971575394622444ea6be40eb85b37f62 100644 (file)
@@ -154,14 +154,14 @@ SECTIONS
         * uncompressed image info used by the decompressor
         * it should match struct vmlinux_info
         */
-       .vmlinux.info 0 : {
+       .vmlinux.info 0 (INFO) : {
                QUAD(_stext)                                    /* default_lma */
                QUAD(startup_continue)                          /* entry */
                QUAD(__bss_start - _stext)                      /* image_size */
                QUAD(__bss_stop - __bss_start)                  /* bss_size */
                QUAD(__boot_data_start)                         /* bootdata_off */
                QUAD(__boot_data_end - __boot_data_start)       /* bootdata_size */
-       }
+       } :NONE
 
        /* Debugging sections.  */
        STABS_DEBUG
index 76d89ee8b428837fc6c32f962d0104787caa29a3..6791562779eeca0f6eb6559336a19329de72faff 100644 (file)
@@ -101,6 +101,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
                        mm->context.asce_limit = _REGION1_SIZE;
                        mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
                                _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+                       mm_inc_nr_puds(mm);
                } else {
                        crst_table_init(table, _REGION1_ENTRY_EMPTY);
                        pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
@@ -130,6 +131,7 @@ void crst_table_downgrade(struct mm_struct *mm)
        }
 
        pgd = mm->pgd;
+       mm_dec_nr_pmds(mm);
        mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
        mm->context.asce_limit = _REGION3_SIZE;
        mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
index ae0d9e889534cd880f750845fb58d919080e9325..d31bde0870d894bdc2cd3a3006d966924d1d5c1c 100644 (file)
@@ -53,6 +53,7 @@ int __node_distance(int a, int b)
 {
        return mode->distance ? mode->distance(a, b) : 0;
 }
+EXPORT_SYMBOL(__node_distance);
 
 int numa_debug_enabled;
 
index 96dd9f7da2506d061fa499c5992a8e7eb4a426bf..1b04270e5460e8d77decb68a5c15f329227706ea 100644 (file)
@@ -321,8 +321,7 @@ int ftrace_disable_ftrace_graph_caller(void)
 void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
 {
        unsigned long old;
-       int faulted, err;
-       struct ftrace_graph_ent trace;
+       int faulted;
        unsigned long return_hooker = (unsigned long)&return_to_handler;
 
        if (unlikely(ftrace_graph_is_dead()))
@@ -365,18 +364,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
                return;
        }
 
-       err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0, NULL);
-       if (err == -EBUSY) {
+       if (function_graph_enter(old, self_addr, 0, NULL))
                __raw_writel(old, parent);
-               return;
-       }
-
-       trace.func = self_addr;
-
-       /* Only trace if the calling function expects to */
-       if (!ftrace_graph_entry(&trace)) {
-               current->curr_ret_stack--;
-               __raw_writel(old, parent);
-       }
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index 915dda4ae41205ca51d5aa0d8ac4ee4c777ba3e8..684b84ce397f711c8ebf7857d9f4c826d4eb09ca 100644 (file)
@@ -126,20 +126,11 @@ unsigned long prepare_ftrace_return(unsigned long parent,
                                    unsigned long frame_pointer)
 {
        unsigned long return_hooker = (unsigned long) &return_to_handler;
-       struct ftrace_graph_ent trace;
 
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                return parent + 8UL;
 
-       trace.func = self_addr;
-       trace.depth = current->curr_ret_stack + 1;
-
-       /* Only trace if the calling function expects to */
-       if (!ftrace_graph_entry(&trace))
-               return parent + 8UL;
-
-       if (ftrace_push_return_trace(parent, self_addr, &trace.depth,
-                                    frame_pointer, NULL) == -EBUSY)
+       if (function_graph_enter(parent, self_addr, frame_pointer, NULL))
                return parent + 8UL;
 
        return return_hooker;
index 40d008b0bd3e98e43d07469dbf73d4c358507b29..05eb016fc41be2856632ddebce30599567d365ae 100644 (file)
@@ -108,10 +108,9 @@ int iommu_table_init(struct iommu *iommu, int tsbsize,
        /* Allocate and initialize the free area map.  */
        sz = num_tsb_entries / 8;
        sz = (sz + 7UL) & ~7UL;
-       iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
+       iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
        if (!iommu->tbl.map)
                return -ENOMEM;
-       memset(iommu->tbl.map, 0, sz);
 
        iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
                            (tlb_type != hypervisor ? iommu_flushall : NULL),
index 67b3e6b3ce5d7cf8b417d361c5bbaadce92cc1e0..47c871394ccb1602d59bca5a3459a7e088df98e0 100644 (file)
@@ -1849,16 +1849,12 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
 {
        u64 saved_fault_address = current_thread_info()->fault_address;
        u8 saved_fault_code = get_thread_fault_code();
-       mm_segment_t old_fs;
 
        perf_callchain_store(entry, regs->tpc);
 
        if (!current->mm)
                return;
 
-       old_fs = get_fs();
-       set_fs(USER_DS);
-
        flushw_user();
 
        pagefault_disable();
@@ -1870,7 +1866,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
 
        pagefault_enable();
 
-       set_fs(old_fs);
        set_thread_fault_code(saved_fault_code);
        current_thread_info()->fault_address = saved_fault_address;
 }
index 4c5b3fcbed94c376a2a44bb0c4fdaab139b3a169..e800ce13cc6e5bb2646e10ad0cdd0c71e9a80d44 100644 (file)
@@ -683,6 +683,7 @@ void do_signal32(struct pt_regs * regs)
                                regs->tpc -= 4;
                                regs->tnpc -= 4;
                                pt_regs_clear_syscall(regs);
+                               /* fall through */
                        case ERESTART_RESTARTBLOCK:
                                regs->u_regs[UREG_G1] = __NR_restart_syscall;
                                regs->tpc -= 4;
index 5665261cee37f2330ab7dbd08ecce91593301a64..83953780ca016c4944e2284014c50eb661357aa4 100644 (file)
@@ -508,6 +508,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
                                regs->pc -= 4;
                                regs->npc -= 4;
                                pt_regs_clear_syscall(regs);
+                               /* fall through */
                        case ERESTART_RESTARTBLOCK:
                                regs->u_regs[UREG_G1] = __NR_restart_syscall;
                                regs->pc -= 4;
index e9de1803a22e004adbb3d6b541ad552f71626e7d..ca70787efd8e05de3d4ec8a3f9a8040bfb4c4844 100644 (file)
@@ -533,6 +533,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
                                regs->tpc -= 4;
                                regs->tnpc -= 4;
                                pt_regs_clear_syscall(regs);
+                               /* fall through */
                        case ERESTART_RESTARTBLOCK:
                                regs->u_regs[UREG_G1] = __NR_restart_syscall;
                                regs->tpc -= 4;
index bb68c805b891855e18af6397ce534f74d5550a4d..ff9389a1c9f3f68c5acaa32123d65c5fedbf9846 100644 (file)
@@ -47,9 +47,9 @@ sys_call_table32:
        .word sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
 /*130*/        .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_sendto, sys_shutdown
        .word sys_socketpair, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
-/*140*/        .word sys_sendfile64, sys_nis_syscall, compat_sys_futex, sys_gettid, compat_sys_getrlimit
+/*140*/        .word sys_sendfile64, sys_getpeername, compat_sys_futex, sys_gettid, compat_sys_getrlimit
        .word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
-/*150*/        .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
+/*150*/        .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
        .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
 /*160*/        .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
        .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys_setxattr
index 222785af550b46736676808b6e00d8d8cef9a286..5fda4f7bf15d176fbb913435ea0a25c510c8df5a 100644 (file)
@@ -791,7 +791,7 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src,
 }
 
 /* Just skip the save instruction and the ctx register move.  */
-#define BPF_TAILCALL_PROLOGUE_SKIP     16
+#define BPF_TAILCALL_PROLOGUE_SKIP     32
 #define BPF_TAILCALL_CNT_SP_OFF                (STACK_BIAS + 128)
 
 static void build_prologue(struct jit_ctx *ctx)
@@ -824,9 +824,15 @@ static void build_prologue(struct jit_ctx *ctx)
                const u8 vfp = bpf2sparc[BPF_REG_FP];
 
                emit(ADD | IMMED | RS1(FP) | S13(STACK_BIAS) | RD(vfp), ctx);
+       } else {
+               emit_nop(ctx);
        }
 
        emit_reg_move(I0, O0, ctx);
+       emit_reg_move(I1, O1, ctx);
+       emit_reg_move(I2, O2, ctx);
+       emit_reg_move(I3, O3, ctx);
+       emit_reg_move(I4, O4, ctx);
        /* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */
 }
 
@@ -1270,6 +1276,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                const u8 tmp2 = bpf2sparc[TMP_REG_2];
                u32 opcode = 0, rs2;
 
+               if (insn->dst_reg == BPF_REG_FP)
+                       ctx->saw_frame_pointer = true;
+
                ctx->tmp_2_used = true;
                emit_loadimm(imm, tmp2, ctx);
 
@@ -1308,6 +1317,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                const u8 tmp = bpf2sparc[TMP_REG_1];
                u32 opcode = 0, rs2;
 
+               if (insn->dst_reg == BPF_REG_FP)
+                       ctx->saw_frame_pointer = true;
+
                switch (BPF_SIZE(code)) {
                case BPF_W:
                        opcode = ST32;
@@ -1340,6 +1352,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                const u8 tmp2 = bpf2sparc[TMP_REG_2];
                const u8 tmp3 = bpf2sparc[TMP_REG_3];
 
+               if (insn->dst_reg == BPF_REG_FP)
+                       ctx->saw_frame_pointer = true;
+
                ctx->tmp_1_used = true;
                ctx->tmp_2_used = true;
                ctx->tmp_3_used = true;
@@ -1360,6 +1375,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
                const u8 tmp2 = bpf2sparc[TMP_REG_2];
                const u8 tmp3 = bpf2sparc[TMP_REG_3];
 
+               if (insn->dst_reg == BPF_REG_FP)
+                       ctx->saw_frame_pointer = true;
+
                ctx->tmp_1_used = true;
                ctx->tmp_2_used = true;
                ctx->tmp_3_used = true;
@@ -1425,12 +1443,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
        struct bpf_prog *tmp, *orig_prog = prog;
        struct sparc64_jit_data *jit_data;
        struct bpf_binary_header *header;
+       u32 prev_image_size, image_size;
        bool tmp_blinded = false;
        bool extra_pass = false;
        struct jit_ctx ctx;
-       u32 image_size;
        u8 *image_ptr;
-       int pass;
+       int pass, i;
 
        if (!prog->jit_requested)
                return orig_prog;
@@ -1461,61 +1479,82 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
                header = jit_data->header;
                extra_pass = true;
                image_size = sizeof(u32) * ctx.idx;
+               prev_image_size = image_size;
+               pass = 1;
                goto skip_init_ctx;
        }
 
        memset(&ctx, 0, sizeof(ctx));
        ctx.prog = prog;
 
-       ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL);
+       ctx.offset = kmalloc_array(prog->len, sizeof(unsigned int), GFP_KERNEL);
        if (ctx.offset == NULL) {
                prog = orig_prog;
                goto out_off;
        }
 
-       /* Fake pass to detect features used, and get an accurate assessment
-        * of what the final image size will be.
+       /* Longest sequence emitted is for bswap32, 12 instructions.  Pre-cook
+        * the offset array so that we converge faster.
         */
-       if (build_body(&ctx)) {
-               prog = orig_prog;
-               goto out_off;
-       }
-       build_prologue(&ctx);
-       build_epilogue(&ctx);
-
-       /* Now we know the actual image size. */
-       image_size = sizeof(u32) * ctx.idx;
-       header = bpf_jit_binary_alloc(image_size, &image_ptr,
-                                     sizeof(u32), jit_fill_hole);
-       if (header == NULL) {
-               prog = orig_prog;
-               goto out_off;
-       }
+       for (i = 0; i < prog->len; i++)
+               ctx.offset[i] = i * (12 * 4);
 
-       ctx.image = (u32 *)image_ptr;
-skip_init_ctx:
-       for (pass = 1; pass < 3; pass++) {
+       prev_image_size = ~0U;
+       for (pass = 1; pass < 40; pass++) {
                ctx.idx = 0;
 
                build_prologue(&ctx);
-
                if (build_body(&ctx)) {
-                       bpf_jit_binary_free(header);
                        prog = orig_prog;
                        goto out_off;
                }
-
                build_epilogue(&ctx);
 
                if (bpf_jit_enable > 1)
-                       pr_info("Pass %d: shrink = %d, seen = [%c%c%c%c%c%c]\n", pass,
-                               image_size - (ctx.idx * 4),
+                       pr_info("Pass %d: size = %u, seen = [%c%c%c%c%c%c]\n", pass,
+                               ctx.idx * 4,
                                ctx.tmp_1_used ? '1' : ' ',
                                ctx.tmp_2_used ? '2' : ' ',
                                ctx.tmp_3_used ? '3' : ' ',
                                ctx.saw_frame_pointer ? 'F' : ' ',
                                ctx.saw_call ? 'C' : ' ',
                                ctx.saw_tail_call ? 'T' : ' ');
+
+               if (ctx.idx * 4 == prev_image_size)
+                       break;
+               prev_image_size = ctx.idx * 4;
+               cond_resched();
+       }
+
+       /* Now we know the actual image size. */
+       image_size = sizeof(u32) * ctx.idx;
+       header = bpf_jit_binary_alloc(image_size, &image_ptr,
+                                     sizeof(u32), jit_fill_hole);
+       if (header == NULL) {
+               prog = orig_prog;
+               goto out_off;
+       }
+
+       ctx.image = (u32 *)image_ptr;
+skip_init_ctx:
+       ctx.idx = 0;
+
+       build_prologue(&ctx);
+
+       if (build_body(&ctx)) {
+               bpf_jit_binary_free(header);
+               prog = orig_prog;
+               goto out_off;
+       }
+
+       build_epilogue(&ctx);
+
+       if (ctx.idx * 4 != prev_image_size) {
+               pr_err("bpf_jit: Failed to converge, prev_size=%u size=%d\n",
+                      prev_image_size, ctx.idx * 4);
+               bpf_jit_binary_free(header);
+               prog = orig_prog;
+               goto out_off;
        }
 
        if (bpf_jit_enable > 1)
index 74c002ddc0ce74868286b77f43dfa6885e6c3e70..28c40624bcb6f0e9b15030037d6f199b46c5fa0f 100644 (file)
@@ -1305,6 +1305,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
                io_req->fds[0] = dev->cow.fd;
        else
                io_req->fds[0] = dev->fd;
+       io_req->error = 0;
 
        if (req_op(req) == REQ_OP_FLUSH) {
                io_req->op = UBD_FLUSH;
@@ -1313,9 +1314,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
                io_req->cow_offset = -1;
                io_req->offset = off;
                io_req->length = bvec->bv_len;
-               io_req->error = 0;
                io_req->sector_mask = 0;
-
                io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE;
                io_req->offsets[0] = 0;
                io_req->offsets[1] = dev->cow.data_offset;
@@ -1341,11 +1340,14 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
 static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
                                 const struct blk_mq_queue_data *bd)
 {
+       struct ubd *ubd_dev = hctx->queue->queuedata;
        struct request *req = bd->rq;
        int ret = 0;
 
        blk_mq_start_request(req);
 
+       spin_lock_irq(&ubd_dev->lock);
+
        if (req_op(req) == REQ_OP_FLUSH) {
                ret = ubd_queue_one_vec(hctx, req, 0, NULL);
        } else {
@@ -1361,9 +1363,11 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
                }
        }
 out:
-       if (ret < 0) {
+       spin_unlock_irq(&ubd_dev->lock);
+
+       if (ret < 0)
                blk_mq_requeue_request(req, true);
-       }
+
        return BLK_STS_OK;
 }
 
index c51c989c19c08da99155d354cc11558c1cdb36d4..8689e794a43c8432e083555ddffd6d548fa80d6a 100644 (file)
@@ -129,6 +129,7 @@ config X86
        select HAVE_ARCH_PREL32_RELOCATIONS
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_THREAD_STRUCT_WHITELIST
+       select HAVE_ARCH_STACKLEAK
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64
@@ -443,10 +444,6 @@ config RETPOLINE
          branches. Requires a compiler with -mindirect-branch=thunk-extern
          support for full protection. The kernel may run slower.
 
-         Without compiler support, at least indirect branches in assembler
-         code are eliminated. Since this includes the syscall entry path,
-         it is not entirely pointless.
-
 config INTEL_RDT
        bool "Intel Resource Director Technology support"
        depends on X86 && CPU_SUP_INTEL
@@ -524,7 +521,6 @@ config X86_VSMP
        bool "ScaleMP vSMP"
        select HYPERVISOR_GUEST
        select PARAVIRT
-       select PARAVIRT_XXL
        depends on X86_64 && PCI
        depends on X86_EXTENDED_PLATFORM
        depends on SMP
@@ -1004,13 +1000,7 @@ config NR_CPUS
          to the kernel image.
 
 config SCHED_SMT
-       bool "SMT (Hyperthreading) scheduler support"
-       depends on SMP
-       ---help---
-         SMT scheduler support improves the CPU scheduler's decision making
-         when dealing with Intel Pentium 4 chips with HyperThreading at a
-         cost of slightly increased overhead in some places. If unsure say
-         N here.
+       def_bool y if SMP
 
 config SCHED_MC
        def_bool y
index 5b562e4640099086493bc0fa6d46da88a0780f09..75ef499a66e2b81c82fb6abb9bb4bd9a64521e73 100644 (file)
@@ -213,8 +213,6 @@ ifdef CONFIG_X86_64
 KBUILD_LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
 endif
 
-# Speed up the build
-KBUILD_CFLAGS += -pipe
 # Workaround for a gcc prelease that unfortunately was shipped in a suse release
 KBUILD_CFLAGS += -Wno-sign-compare
 #
@@ -222,9 +220,7 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
 
 # Avoid indirect branches in kernel to deal with Spectre
 ifdef CONFIG_RETPOLINE
-ifneq ($(RETPOLINE_CFLAGS),)
-  KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE
-endif
+  KBUILD_CFLAGS += $(RETPOLINE_CFLAGS)
 endif
 
 archscripts: scripts_basic
@@ -239,7 +235,7 @@ archheaders:
 archmacros:
        $(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s
 
-ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s -Wa,-
+ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s
 export ASM_MACRO_FLAGS
 KBUILD_CFLAGS += $(ASM_MACRO_FLAGS)
 
@@ -308,6 +304,13 @@ ifndef CC_HAVE_ASM_GOTO
        @echo Compiler lacks asm-goto support.
        @exit 1
 endif
+ifdef CONFIG_RETPOLINE
+ifeq ($(RETPOLINE_CFLAGS),)
+       @echo "You are building kernel with non-retpoline compiler." >&2
+       @echo "Please update your compiler." >&2
+       @false
+endif
+endif
 
 archclean:
        $(Q)rm -rf $(objtree)/arch/i386
index 8b4c5e0011572f7a0a7636d769863bfbe3beb620..544ac4fafd112a8b32802e0c4c5a4392b71bd67c 100644 (file)
@@ -1,3 +1,4 @@
+
 /* -----------------------------------------------------------------------
  *
  *   Copyright 2011 Intel Corporation; author Matt Fleming
@@ -634,37 +635,54 @@ static efi_status_t alloc_e820ext(u32 nr_desc, struct setup_data **e820ext,
        return status;
 }
 
+static efi_status_t allocate_e820(struct boot_params *params,
+                                 struct setup_data **e820ext,
+                                 u32 *e820ext_size)
+{
+       unsigned long map_size, desc_size, buff_size;
+       struct efi_boot_memmap boot_map;
+       efi_memory_desc_t *map;
+       efi_status_t status;
+       __u32 nr_desc;
+
+       boot_map.map            = &map;
+       boot_map.map_size       = &map_size;
+       boot_map.desc_size      = &desc_size;
+       boot_map.desc_ver       = NULL;
+       boot_map.key_ptr        = NULL;
+       boot_map.buff_size      = &buff_size;
+
+       status = efi_get_memory_map(sys_table, &boot_map);
+       if (status != EFI_SUCCESS)
+               return status;
+
+       nr_desc = buff_size / desc_size;
+
+       if (nr_desc > ARRAY_SIZE(params->e820_table)) {
+               u32 nr_e820ext = nr_desc - ARRAY_SIZE(params->e820_table);
+
+               status = alloc_e820ext(nr_e820ext, e820ext, e820ext_size);
+               if (status != EFI_SUCCESS)
+                       return status;
+       }
+
+       return EFI_SUCCESS;
+}
+
 struct exit_boot_struct {
        struct boot_params      *boot_params;
        struct efi_info         *efi;
-       struct setup_data       *e820ext;
-       __u32                   e820ext_size;
 };
 
 static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
                                   struct efi_boot_memmap *map,
                                   void *priv)
 {
-       static bool first = true;
        const char *signature;
        __u32 nr_desc;
        efi_status_t status;
        struct exit_boot_struct *p = priv;
 
-       if (first) {
-               nr_desc = *map->buff_size / *map->desc_size;
-               if (nr_desc > ARRAY_SIZE(p->boot_params->e820_table)) {
-                       u32 nr_e820ext = nr_desc -
-                                       ARRAY_SIZE(p->boot_params->e820_table);
-
-                       status = alloc_e820ext(nr_e820ext, &p->e820ext,
-                                              &p->e820ext_size);
-                       if (status != EFI_SUCCESS)
-                               return status;
-               }
-               first = false;
-       }
-
        signature = efi_is_64bit() ? EFI64_LOADER_SIGNATURE
                                   : EFI32_LOADER_SIGNATURE;
        memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
@@ -687,8 +705,8 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
 {
        unsigned long map_sz, key, desc_size, buff_size;
        efi_memory_desc_t *mem_map;
-       struct setup_data *e820ext;
-       __u32 e820ext_size;
+       struct setup_data *e820ext = NULL;
+       __u32 e820ext_size = 0;
        efi_status_t status;
        __u32 desc_version;
        struct efi_boot_memmap map;
@@ -702,8 +720,10 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
        map.buff_size           = &buff_size;
        priv.boot_params        = boot_params;
        priv.efi                = &boot_params->efi_info;
-       priv.e820ext            = NULL;
-       priv.e820ext_size       = 0;
+
+       status = allocate_e820(boot_params, &e820ext, &e820ext_size);
+       if (status != EFI_SUCCESS)
+               return status;
 
        /* Might as well exit boot services now */
        status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@@ -711,9 +731,6 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle)
        if (status != EFI_SUCCESS)
                return status;
 
-       e820ext                 = priv.e820ext;
-       e820ext_size            = priv.e820ext_size;
-
        /* Historic? */
        boot_params->alt_mem_k  = 32 * 1024;
 
index 8f0c4c9fc90433d83e6c82b7ba14d55370d3d267..51079fc9298fc0f1a255fca2d2702a4f8b14fb84 100644 (file)
@@ -113,7 +113,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
 {
        int err;
 
-       memset(&cpu.flags, 0, sizeof cpu.flags);
+       memset(&cpu.flags, 0, sizeof(cpu.flags));
        cpu.level = 3;
 
        if (has_eflag(X86_EFLAGS_AC))
index b25c53527a9409490ff4077ae3f12d96b53a669d..023bf1c3de8b7a1d1d5b22c9dd1c9312ed07ad70 100644 (file)
@@ -50,7 +50,7 @@ static void parse_earlyprintk(void)
        int pos = 0;
        int port = 0;
 
-       if (cmdline_find_option("earlyprintk", arg, sizeof arg) > 0) {
+       if (cmdline_find_option("earlyprintk", arg, sizeof(arg)) > 0) {
                char *e;
 
                if (!strncmp(arg, "serial", 6)) {
@@ -124,7 +124,7 @@ static void parse_console_uart8250(void)
         * console=uart8250,io,0x3f8,115200n8
         * need to make sure it is last one console !
         */
-       if (cmdline_find_option("console", optstr, sizeof optstr) <= 0)
+       if (cmdline_find_option("console", optstr, sizeof(optstr)) <= 0)
                return;
 
        options = optstr;
index 223e42527077d26c818d7e0ec7259d98717dbd61..6c176b6a42ad0c7f51baaeb55f6dff7d0fa533af 100644 (file)
@@ -76,7 +76,7 @@ static int get_edd_info(u8 devno, struct edd_info *ei)
 {
        struct biosregs ireg, oreg;
 
-       memset(ei, 0, sizeof *ei);
+       memset(ei, 0, sizeof(*ei));
 
        /* Check Extensions Present */
 
@@ -133,7 +133,7 @@ void query_edd(void)
        struct edd_info ei, *edp;
        u32 *mbrptr;
 
-       if (cmdline_find_option("edd", eddarg, sizeof eddarg) > 0) {
+       if (cmdline_find_option("edd", eddarg, sizeof(eddarg)) > 0) {
                if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip")) {
                        do_edd = 1;
                        do_mbr = 0;
@@ -166,7 +166,7 @@ void query_edd(void)
                 */
                if (!get_edd_info(devno, &ei)
                    && boot_params.eddbuf_entries < EDDMAXNR) {
-                       memcpy(edp, &ei, sizeof ei);
+                       memcpy(edp, &ei, sizeof(ei));
                        edp++;
                        boot_params.eddbuf_entries++;
                }
index 4c881c850125c674145e4627078c91ce4a453f0e..850b8762e889656c43da518520c5bae60e945e3d 100644 (file)
@@ -300,7 +300,7 @@ _start:
        # Part 2 of the header, from the old setup.S
 
                .ascii  "HdrS"          # header signature
-               .word   0x020e          # header version number (>= 0x0105)
+               .word   0x020d          # header version number (>= 0x0105)
                                        # or else old loadlin-1.5 will fail)
                .globl realmode_swtch
 realmode_swtch:        .word   0, 0            # default_switch, SETUPSEG
@@ -558,10 +558,6 @@ pref_address:              .quad LOAD_PHYSICAL_ADDR        # preferred load addr
 init_size:             .long INIT_SIZE         # kernel initialization size
 handover_offset:       .long 0                 # Filled in by build.c
 
-acpi_rsdp_addr:                .quad 0                 # 64-bit physical pointer to the
-                                               # ACPI RSDP table, added with
-                                               # version 2.14
-
 # End of setup header #####################################################
 
        .section ".entrytext", "ax"
index 9bcea386db65e3bbce1db97a8b7254249fd89ed7..73532543d68924c40374f61c94453bb16e816e25 100644 (file)
@@ -36,8 +36,8 @@ static void copy_boot_params(void)
        const struct old_cmdline * const oldcmd =
                (const struct old_cmdline *)OLD_CL_ADDRESS;
 
-       BUILD_BUG_ON(sizeof boot_params != 4096);
-       memcpy(&boot_params.hdr, &hdr, sizeof hdr);
+       BUILD_BUG_ON(sizeof(boot_params) != 4096);
+       memcpy(&boot_params.hdr, &hdr, sizeof(hdr));
 
        if (!boot_params.hdr.cmd_line_ptr &&
            oldcmd->cl_magic == OLD_CL_MAGIC) {
index d9c28c87e4771ffadf68091800b50142417389ba..7df2b28207be6c589d2478d130c924ebdd0073d7 100644 (file)
@@ -26,7 +26,7 @@ static int detect_memory_e820(void)
 
        initregs(&ireg);
        ireg.ax  = 0xe820;
-       ireg.cx  = sizeof buf;
+       ireg.cx  = sizeof(buf);
        ireg.edx = SMAP;
        ireg.di  = (size_t)&buf;
 
index c0fb356a3092e55f9f70aba1eea1bd8db74d77ae..2fe3616ba16138e51db4d6db47a225456b1c0e13 100644 (file)
@@ -21,7 +21,7 @@
 
 void initregs(struct biosregs *reg)
 {
-       memset(reg, 0, sizeof *reg);
+       memset(reg, 0, sizeof(*reg));
        reg->eflags |= X86_EFLAGS_CF;
        reg->ds = ds();
        reg->es = ds();
index ba3e100654db0239622a3f23f5d9d64855ebffd0..3ecc11a9c44040153521cdbfeafc37714999f5bc 100644 (file)
@@ -62,7 +62,7 @@ static int vesa_probe(void)
                if (mode & ~0x1ff)
                        continue;
 
-               memset(&vminfo, 0, sizeof vminfo); /* Just in case... */
+               memset(&vminfo, 0, sizeof(vminfo)); /* Just in case... */
 
                ireg.ax = 0x4f01;
                ireg.cx = mode;
@@ -109,7 +109,7 @@ static int vesa_set_mode(struct mode_info *mode)
        int is_graphic;
        u16 vesa_mode = mode->mode - VIDEO_FIRST_VESA;
 
-       memset(&vminfo, 0, sizeof vminfo); /* Just in case... */
+       memset(&vminfo, 0, sizeof(vminfo)); /* Just in case... */
 
        initregs(&ireg);
        ireg.ax = 0x4f01;
@@ -241,7 +241,7 @@ void vesa_store_edid(void)
        struct biosregs ireg, oreg;
 
        /* Apparently used as a nonsense token... */
-       memset(&boot_params.edid_info, 0x13, sizeof boot_params.edid_info);
+       memset(&boot_params.edid_info, 0x13, sizeof(boot_params.edid_info));
 
        if (vginfo.version < 0x0200)
                return;         /* EDID requires VBE 2.0+ */
index 77780e386e9b224ef8ec5421af644abc65d1eebd..ac89b6624a4053b4eeea6b16d18ed5f083fba2d4 100644 (file)
@@ -115,7 +115,7 @@ static unsigned int get_entry(void)
                } else if ((key >= '0' && key <= '9') ||
                           (key >= 'A' && key <= 'Z') ||
                           (key >= 'a' && key <= 'z')) {
-                       if (len < sizeof entry_buf) {
+                       if (len < sizeof(entry_buf)) {
                                entry_buf[len++] = key;
                                putchar(key);
                        }
index 708b46a54578d8722fc1c9fa07e58d74f7ff49d8..25e5a6bda8c3a971609dff93919ccab27d6a3aa9 100644 (file)
@@ -329,8 +329,22 @@ For 32-bit we have the following conventions - kernel is built with
 
 #endif
 
+.macro STACKLEAK_ERASE_NOCLOBBER
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+       PUSH_AND_CLEAR_REGS
+       call stackleak_erase
+       POP_REGS
+#endif
+.endm
+
 #endif /* CONFIG_X86_64 */
 
+.macro STACKLEAK_ERASE
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+       call stackleak_erase
+#endif
+.endm
+
 /*
  * This does 'call enter_from_user_mode' unless we can avoid it based on
  * kernel config or using the static jump infrastructure.
index 687e47f8a796621d4effcac9a055965969a81dc2..d309f30cf7af84e67ac38910eff4256da9c25a11 100644 (file)
@@ -46,6 +46,8 @@
 #include <asm/frame.h>
 #include <asm/nospec-branch.h>
 
+#include "calling.h"
+
        .section .entry.text, "ax"
 
 /*
@@ -712,6 +714,7 @@ ENTRY(ret_from_fork)
        /* When we fork, we trace the syscall return in the child, too. */
        movl    %esp, %eax
        call    syscall_return_slowpath
+       STACKLEAK_ERASE
        jmp     restore_all
 
        /* kernel thread */
@@ -886,6 +889,8 @@ ENTRY(entry_SYSENTER_32)
        ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
                    "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
 
+       STACKLEAK_ERASE
+
 /* Opportunistic SYSEXIT */
        TRACE_IRQS_ON                   /* User mode traces as IRQs on. */
 
@@ -997,6 +1002,8 @@ ENTRY(entry_INT80_32)
        call    do_int80_syscall_32
 .Lsyscall_32_done:
 
+       STACKLEAK_ERASE
+
 restore_all:
        TRACE_IRQS_IRET
        SWITCH_TO_ENTRY_STACK
index 4d7a2d9d44cfec5928b902cef1bca9bca29093a6..1f0efdb7b6294daba3e315be0b990ba8296b3fea 100644 (file)
@@ -266,6 +266,8 @@ syscall_return_via_sysret:
         * We are on the trampoline stack.  All regs except RDI are live.
         * We can do future final exit work right here.
         */
+       STACKLEAK_ERASE_NOCLOBBER
+
        SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
 
        popq    %rdi
@@ -564,6 +566,7 @@ ENTRY(interrupt_entry)
 
        ret
 END(interrupt_entry)
+_ASM_NOKPROBE(interrupt_entry)
 
 
 /* Interrupt entry/exit. */
@@ -625,6 +628,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
         * We are on the trampoline stack.  All regs except RDI are live.
         * We can do future final exit work right here.
         */
+       STACKLEAK_ERASE_NOCLOBBER
 
        SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
 
@@ -763,6 +767,7 @@ native_irq_return_ldt:
        jmp     native_irq_return_iret
 #endif
 END(common_interrupt)
+_ASM_NOKPROBE(common_interrupt)
 
 /*
  * APIC interrupts.
@@ -777,6 +782,7 @@ ENTRY(\sym)
        call    \do_sym /* rdi points to pt_regs */
        jmp     ret_from_intr
 END(\sym)
+_ASM_NOKPROBE(\sym)
 .endm
 
 /* Make sure APIC interrupt handlers end up in the irqentry section: */
@@ -957,6 +963,7 @@ ENTRY(\sym)
 
        jmp     error_exit
        .endif
+_ASM_NOKPROBE(\sym)
 END(\sym)
 .endm
 
index 7d0df78db727296d1c4451e3a930033669f47aa3..8eaf8952c408cd619124f9696b4888fae2f529ad 100644 (file)
@@ -261,6 +261,11 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
 
        /* Opportunistic SYSRET */
 sysret32_from_system_call:
+       /*
+        * We are not going to return to userspace from the trampoline
+        * stack. So let's erase the thread stack right now.
+        */
+       STACKLEAK_ERASE
        TRACE_IRQS_ON                   /* User mode traces as IRQs on. */
        movq    RBX(%rsp), %rbx         /* pt_regs->rbx */
        movq    RBP(%rsp), %rbp         /* pt_regs->rbp */
index 141d415a8c8098e9bd9747c94ee84e4de843c9f8..0624bf2266fd76d2852ce005acb2f9d67dbe6b8f 100644 (file)
@@ -47,7 +47,7 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
 CPPFLAGS_vdso.lds += -P -C
 
 VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
-                       -z max-page-size=4096 -z common-page-size=4096
+                       -z max-page-size=4096
 
 $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
        $(call if_changed,vdso)
@@ -98,7 +98,7 @@ CFLAGS_REMOVE_vvar.o = -pg
 
 CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
 VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
-                          -z max-page-size=4096 -z common-page-size=4096
+                          -z max-page-size=4096
 
 # x32-rebranded versions
 vobjx32s-y := $(vobjs-y:.o=-x32.o)
index 106911b603bd95b355ddcf5d16028c4374c17035..374a19712e2009a0cbcb0c3048d4489cf5c2f9d9 100644 (file)
@@ -438,26 +438,6 @@ int x86_setup_perfctr(struct perf_event *event)
        if (config == -1LL)
                return -EINVAL;
 
-       /*
-        * Branch tracing:
-        */
-       if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
-           !attr->freq && hwc->sample_period == 1) {
-               /* BTS is not supported by this architecture. */
-               if (!x86_pmu.bts_active)
-                       return -EOPNOTSUPP;
-
-               /* BTS is currently only allowed for user-mode. */
-               if (!attr->exclude_kernel)
-                       return -EOPNOTSUPP;
-
-               /* disallow bts if conflicting events are present */
-               if (x86_add_exclusive(x86_lbr_exclusive_lbr))
-                       return -EBUSY;
-
-               event->destroy = hw_perf_lbr_event_destroy;
-       }
-
        hwc->config |= config;
 
        return 0;
index 0fb8659b20d8d76fd974406873df8416359ab157..ecc3e34ca955f720579aef805afe26f2b49a9e57 100644 (file)
@@ -2306,14 +2306,18 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
        return handled;
 }
 
-static bool disable_counter_freezing;
+static bool disable_counter_freezing = true;
 static int __init intel_perf_counter_freezing_setup(char *s)
 {
-       disable_counter_freezing = true;
-       pr_info("Intel PMU Counter freezing feature disabled\n");
+       bool res;
+
+       if (kstrtobool(s, &res))
+               return -EINVAL;
+
+       disable_counter_freezing = !res;
        return 1;
 }
-__setup("disable_counter_freezing", intel_perf_counter_freezing_setup);
+__setup("perf_v4_pmi=", intel_perf_counter_freezing_setup);
 
 /*
  * Simplified handler for Arch Perfmon v4:
@@ -2470,16 +2474,7 @@ done:
 static struct event_constraint *
 intel_bts_constraints(struct perf_event *event)
 {
-       struct hw_perf_event *hwc = &event->hw;
-       unsigned int hw_event, bts_event;
-
-       if (event->attr.freq)
-               return NULL;
-
-       hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
-       bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
-
-       if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
+       if (unlikely(intel_pmu_has_bts(event)))
                return &bts_constraint;
 
        return NULL;
@@ -3098,10 +3093,51 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
        return flags;
 }
 
+static int intel_pmu_bts_config(struct perf_event *event)
+{
+       struct perf_event_attr *attr = &event->attr;
+
+       if (unlikely(intel_pmu_has_bts(event))) {
+               /* BTS is not supported by this architecture. */
+               if (!x86_pmu.bts_active)
+                       return -EOPNOTSUPP;
+
+               /* BTS is currently only allowed for user-mode. */
+               if (!attr->exclude_kernel)
+                       return -EOPNOTSUPP;
+
+               /* BTS is not allowed for precise events. */
+               if (attr->precise_ip)
+                       return -EOPNOTSUPP;
+
+               /* disallow bts if conflicting events are present */
+               if (x86_add_exclusive(x86_lbr_exclusive_lbr))
+                       return -EBUSY;
+
+               event->destroy = hw_perf_lbr_event_destroy;
+       }
+
+       return 0;
+}
+
+static int core_pmu_hw_config(struct perf_event *event)
+{
+       int ret = x86_pmu_hw_config(event);
+
+       if (ret)
+               return ret;
+
+       return intel_pmu_bts_config(event);
+}
+
 static int intel_pmu_hw_config(struct perf_event *event)
 {
        int ret = x86_pmu_hw_config(event);
 
+       if (ret)
+               return ret;
+
+       ret = intel_pmu_bts_config(event);
        if (ret)
                return ret;
 
@@ -3127,7 +3163,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
                /*
                 * BTS is set up earlier in this path, so don't account twice
                 */
-               if (!intel_pmu_has_bts(event)) {
+               if (!unlikely(intel_pmu_has_bts(event))) {
                        /* disallow lbr if conflicting events are present */
                        if (x86_add_exclusive(x86_lbr_exclusive_lbr))
                                return -EBUSY;
@@ -3596,7 +3632,7 @@ static __initconst const struct x86_pmu core_pmu = {
        .enable_all             = core_pmu_enable_all,
        .enable                 = core_pmu_enable_event,
        .disable                = x86_pmu_disable_event,
-       .hw_config              = x86_pmu_hw_config,
+       .hw_config              = core_pmu_hw_config,
        .schedule_events        = x86_schedule_events,
        .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
        .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
@@ -4535,7 +4571,7 @@ __init int intel_pmu_init(void)
                }
        }
 
-       snprintf(pmu_name_str, sizeof pmu_name_str, "%s", name);
+       snprintf(pmu_name_str, sizeof(pmu_name_str), "%s", name);
 
        if (version >= 2 && extra_attr) {
                x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
index e17ab885b1e928d17a671eb96f12cf21905bdb93..cb46d602a6b8bd17eb458f84778019b56b15a93c 100644 (file)
@@ -129,8 +129,15 @@ struct intel_uncore_box {
        struct intel_uncore_extra_reg shared_regs[0];
 };
 
-#define UNCORE_BOX_FLAG_INITIATED      0
-#define UNCORE_BOX_FLAG_CTL_OFFS8      1 /* event config registers are 8-byte apart */
+/* CFL uncore 8th cbox MSRs */
+#define CFL_UNC_CBO_7_PERFEVTSEL0              0xf70
+#define CFL_UNC_CBO_7_PER_CTR0                 0xf76
+
+#define UNCORE_BOX_FLAG_INITIATED              0
+/* event config registers are 8-byte apart */
+#define UNCORE_BOX_FLAG_CTL_OFFS8              1
+/* CFL 8th CBOX has different MSR space */
+#define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS     2
 
 struct uncore_event_desc {
        struct kobj_attribute attr;
@@ -297,17 +304,27 @@ unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
 static inline
 unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
 {
-       return box->pmu->type->event_ctl +
-               (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
-               uncore_msr_box_offset(box);
+       if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
+               return CFL_UNC_CBO_7_PERFEVTSEL0 +
+                      (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
+       } else {
+               return box->pmu->type->event_ctl +
+                      (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
+                      uncore_msr_box_offset(box);
+       }
 }
 
 static inline
 unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
 {
-       return box->pmu->type->perf_ctr +
-               (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
-               uncore_msr_box_offset(box);
+       if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
+               return CFL_UNC_CBO_7_PER_CTR0 +
+                      (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
+       } else {
+               return box->pmu->type->perf_ctr +
+                      (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
+                      uncore_msr_box_offset(box);
+       }
 }
 
 static inline
index 8527c3e1038b78d868743274c35368ab318649ca..2593b0d7aeee6089413d980618ce07c35d84c101 100644 (file)
 #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
 #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
 #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
+#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC  0x590c
+#define PCI_DEVICE_ID_INTEL_KBL_U_IMC  0x5904
+#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
+#define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f
+#define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f
+#define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc
+#define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0
+#define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10
+#define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4
+#define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC       0x3e0f
+#define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC       0x3e1f
+#define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC       0x3ec2
+#define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC       0x3e30
+#define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC       0x3e18
+#define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC       0x3ec6
+#define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC       0x3e31
+#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC       0x3e33
+#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC       0x3eca
+#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC       0x3e32
 
 /* SNB event control */
 #define SNB_UNC_CTL_EV_SEL_MASK                        0x000000ff
@@ -202,6 +221,10 @@ static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
                wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
                        SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
        }
+
+       /* The 8th CBOX has different MSR space */
+       if (box->pmu->pmu_idx == 7)
+               __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
 }
 
 static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
@@ -228,7 +251,7 @@ static struct intel_uncore_ops skl_uncore_msr_ops = {
 static struct intel_uncore_type skl_uncore_cbox = {
        .name           = "cbox",
        .num_counters   = 4,
-       .num_boxes      = 5,
+       .num_boxes      = 8,
        .perf_ctr_bits  = 44,
        .fixed_ctr_bits = 48,
        .perf_ctr       = SNB_UNC_CBO_0_PER_CTR0,
@@ -569,7 +592,82 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
                .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
        },
-
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
        { /* end: all zeroes */ },
 };
 
@@ -618,6 +716,25 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
        IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core H Quad Core */
        IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Dual Core */
        IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver),  /* 6th Gen Core S Quad Core */
+       IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core Y */
+       IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U */
+       IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core U Quad Core */
+       IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Dual Core */
+       IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver),  /* 7th Gen Core S Quad Core */
+       IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 2 Cores */
+       IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core U 4 Cores */
+       IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 4 Cores */
+       IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core H 6 Cores */
+       IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 2 Cores Desktop */
+       IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Desktop */
+       IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Desktop */
+       IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Desktop */
+       IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Work Station */
+       IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Work Station */
+       IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Work Station */
+       IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 4 Cores Server */
+       IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 6 Cores Server */
+       IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver),  /* 8th Gen Core S 8 Cores Server */
        {  /* end marker */ }
 };
 
index adae087cecdda0b6b1aeb78bb51fdb55a61696ed..78d7b7031bfccb8ec2dbcd7de6e54a29fd1365ce 100644 (file)
@@ -859,11 +859,16 @@ static inline int amd_pmu_init(void)
 
 static inline bool intel_pmu_has_bts(struct perf_event *event)
 {
-       if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
-           !event->attr.freq && event->hw.sample_period == 1)
-               return true;
+       struct hw_perf_event *hwc = &event->hw;
+       unsigned int hw_event, bts_event;
+
+       if (event->attr.freq)
+               return false;
+
+       hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
+       bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
 
-       return false;
+       return hw_event == bts_event && hwc->sample_period == 1;
 }
 
 int intel_pmu_save_and_restart(struct perf_event *event);
index a07ffd23e4dd67d3e182bd803eb868eaef1bcdf5..f6f6ef436599a6dbd7ecb8c86da4d69dab0a9763 100644 (file)
@@ -36,6 +36,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
         */
        if (boot_params->sentinel) {
                /* fields in boot_params are left uninitialized, clear them */
+               boot_params->acpi_rsdp_addr = 0;
                memset(&boot_params->ext_ramdisk_image, 0,
                       (char *)&boot_params->efi_info -
                        (char *)&boot_params->ext_ramdisk_image);
index fab4df16a3c43737b27368e3b67926a767bfa663..22c4dfe6599230378a3c9ac2ac6816a352390060 100644 (file)
@@ -217,11 +217,18 @@ static inline bool in_x32_syscall(void)
        return false;
 }
 
-static inline bool in_compat_syscall(void)
+static inline bool in_32bit_syscall(void)
 {
        return in_ia32_syscall() || in_x32_syscall();
 }
+
+#ifdef CONFIG_COMPAT
+static inline bool in_compat_syscall(void)
+{
+       return in_32bit_syscall();
+}
 #define in_compat_syscall in_compat_syscall    /* override the generic impl */
+#endif
 
 struct compat_siginfo;
 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
index 89a048c2faec7f8a818d1a461ccd7fa67eca0fd9..28c4a502b4197cce9ae968deb8ea2fe7797e8da4 100644 (file)
 #define X86_FEATURE_LA57               (16*32+16) /* 5-level page tables */
 #define X86_FEATURE_RDPID              (16*32+22) /* RDPID instruction */
 #define X86_FEATURE_CLDEMOTE           (16*32+25) /* CLDEMOTE instruction */
+#define X86_FEATURE_MOVDIRI            (16*32+27) /* MOVDIRI instruction */
+#define X86_FEATURE_MOVDIR64B          (16*32+28) /* MOVDIR64B instruction */
 
 /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
 #define X86_FEATURE_OVERFLOW_RECOV     (17*32+ 0) /* MCA overflow recovery support */
index 5f7290e6e954e9428294d5bf732929918d7868f5..69dcdf195b6112b691616e2512f8a4ecca4796a1 100644 (file)
@@ -226,7 +226,7 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
                     "3: movl $-2,%[err]\n\t"                           \
                     "jmp 2b\n\t"                                       \
                     ".popsection\n\t"                                  \
-                    _ASM_EXTABLE_UA(1b, 3b)                            \
+                    _ASM_EXTABLE(1b, 3b)                               \
                     : [err] "=r" (err)                                 \
                     : "D" (st), "m" (*st), "a" (lmask), "d" (hmask)    \
                     : "memory")
index c18ed65287d5eda607c009bf747fc67f81743dfc..cf350639e76d1312a9c75f0ab21dfb31e0014afe 100644 (file)
@@ -76,9 +76,7 @@ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name
 #define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
 static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
 {
-       if (in_compat_syscall())
-               return true;
-       return false;
+       return in_32bit_syscall();
 }
 #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
 #endif /* !COMPILE_OFFSETS */
index 55e51ff7e421f80b9145036b9356aa1a63ff21ce..fbda5a917c5b772dca40724bb2c24c189c38be91 100644 (file)
@@ -1094,7 +1094,8 @@ struct kvm_x86_ops {
        bool (*has_wbinvd_exit)(void);
 
        u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
-       void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+       /* Returns actual tsc_offset set in active VMCS */
+       u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
        void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
 
index 4da9b1c58d287bbdda427e31dd67a1653b519043..c1a812bd5a27d770da1076c5b22ca9dc7dd66762 100644 (file)
@@ -221,6 +221,8 @@ static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_am
 
 int mce_available(struct cpuinfo_x86 *c);
 bool mce_is_memory_error(struct mce *m);
+bool mce_is_correctable(struct mce *m);
+int mce_usable_address(struct mce *m);
 
 DECLARE_PER_CPU(unsigned, mce_exception_count);
 DECLARE_PER_CPU(unsigned, mce_poll_count);
index 0d6271cce198dcd1ac0108ac9a4ea803a6e8b2dc..1d0a7778e16317cab0087c46a1c30a8754e3ec8e 100644 (file)
@@ -232,7 +232,7 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
                                      : "cc");
        }
 #endif
-               return hv_status;
+       return hv_status;
 }
 
 /*
index 80f4a4f38c79ca4c9fe10f26c9748f4804d3b395..c8f73efb4eceb82391bf908f1f8f292586be5925 100644 (file)
 
 #define MSR_IA32_SPEC_CTRL             0x00000048 /* Speculation Control */
 #define SPEC_CTRL_IBRS                 (1 << 0)   /* Indirect Branch Restricted Speculation */
-#define SPEC_CTRL_STIBP                        (1 << 1)   /* Single Thread Indirect Branch Predictors */
+#define SPEC_CTRL_STIBP_SHIFT          1          /* Single Thread Indirect Branch Predictor (STIBP) bit */
+#define SPEC_CTRL_STIBP                        (1 << SPEC_CTRL_STIBP_SHIFT)    /* STIBP mask */
 #define SPEC_CTRL_SSBD_SHIFT           2          /* Speculative Store Bypass Disable bit */
-#define SPEC_CTRL_SSBD                 (1 << SPEC_CTRL_SSBD_SHIFT)   /* Speculative Store Bypass Disable */
+#define SPEC_CTRL_SSBD                 (1 << SPEC_CTRL_SSBD_SHIFT)     /* Speculative Store Bypass Disable */
 
 #define MSR_IA32_PRED_CMD              0x00000049 /* Prediction Command */
 #define PRED_CMD_IBPB                  (1 << 0)   /* Indirect Branch Prediction Barrier */
index 80dc144224955135098f0890f1dfc93a6bd3eaab..032b6009baab4a96d04d57112f2f48e1182fb8c2 100644 (file)
@@ -3,6 +3,8 @@
 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
 #define _ASM_X86_NOSPEC_BRANCH_H_
 
+#include <linux/static_key.h>
+
 #include <asm/alternative.h>
 #include <asm/alternative-asm.h>
 #include <asm/cpufeatures.h>
        _ASM_PTR " 999b\n\t"                                    \
        ".popsection\n\t"
 
-#if defined(CONFIG_X86_64) && defined(RETPOLINE)
+#ifdef CONFIG_RETPOLINE
+#ifdef CONFIG_X86_64
 
 /*
- * Since the inline asm uses the %V modifier which is only in newer GCC,
- * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
+ * Inline asm uses the %V modifier which is only in newer GCC
+ * which is ensured when CONFIG_RETPOLINE is defined.
  */
 # define CALL_NOSPEC                                           \
        ANNOTATE_NOSPEC_ALTERNATIVE                             \
        X86_FEATURE_RETPOLINE_AMD)
 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
 
-#elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
+#else /* CONFIG_X86_32 */
 /*
  * For i386 we use the original ret-equivalent retpoline, because
  * otherwise we'll run out of registers. We don't care about CET
        X86_FEATURE_RETPOLINE_AMD)
 
 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
+#endif
 #else /* No retpoline for C / inline asm */
 # define CALL_NOSPEC "call *%[thunk_target]\n"
 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
 /* The Spectre V2 mitigation variants */
 enum spectre_v2_mitigation {
        SPECTRE_V2_NONE,
-       SPECTRE_V2_RETPOLINE_MINIMAL,
-       SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
        SPECTRE_V2_RETPOLINE_GENERIC,
        SPECTRE_V2_RETPOLINE_AMD,
        SPECTRE_V2_IBRS_ENHANCED,
 };
 
+/* The indirect branch speculation control variants */
+enum spectre_v2_user_mitigation {
+       SPECTRE_V2_USER_NONE,
+       SPECTRE_V2_USER_STRICT,
+       SPECTRE_V2_USER_PRCTL,
+       SPECTRE_V2_USER_SECCOMP,
+};
+
 /* The Speculative Store Bypass disable variants */
 enum ssb_mitigation {
        SPEC_STORE_BYPASS_NONE,
@@ -303,6 +313,10 @@ do {                                                                       \
        preempt_enable();                                               \
 } while (0)
 
+DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
+DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
 #endif /* __ASSEMBLY__ */
 
 /*
index cd0cf1c568b4cef2fcc5b16c4ebcf374ee9add0d..8f657286d599a9577dca86b46b8199c9c547a661 100644 (file)
 
 /*
  * Set __PAGE_OFFSET to the most negative possible address +
- * PGDIR_SIZE*16 (pgd slot 272).  The gap is to allow a space for a
- * hypervisor to fit.  Choosing 16 slots here is arbitrary, but it's
- * what Xen requires.
+ * PGDIR_SIZE*17 (pgd slot 273).
+ *
+ * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for
+ * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary,
+ * but it's what Xen requires.
  */
-#define __PAGE_OFFSET_BASE_L5  _AC(0xff10000000000000, UL)
-#define __PAGE_OFFSET_BASE_L4  _AC(0xffff880000000000, UL)
+#define __PAGE_OFFSET_BASE_L5  _AC(0xff11000000000000, UL)
+#define __PAGE_OFFSET_BASE_L4  _AC(0xffff888000000000, UL)
 
 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
 #define __PAGE_OFFSET           page_offset_base
index fba54ca23b2a9f8be320f81cf3cc17a8cf550a0e..26942ad63830407255afc9e6de77267056a97135 100644 (file)
@@ -361,7 +361,6 @@ extern struct paravirt_patch_template pv_ops;
        __visible extern const char start_##ops##_##name[], end_##ops##_##name[];       \
        asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
 
-unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
 unsigned paravirt_patch_default(u8 type, void *insnbuf,
                                unsigned long addr, unsigned len);
@@ -651,7 +650,6 @@ void paravirt_leave_lazy_mmu(void);
 void paravirt_flush_lazy_mmu(void);
 
 void _paravirt_nop(void);
-u32 _paravirt_ident_32(u32);
 u64 _paravirt_ident_64(u64);
 
 #define paravirt_nop   ((void *)_paravirt_nop)
index 04edd2d58211a78e3261993bd8d0e088e3b4c4ef..84bd9bdc1987faa634cd1daad7dbfe94d586a82b 100644 (file)
@@ -111,9 +111,7 @@ extern unsigned int ptrs_per_p4d;
  */
 #define MAXMEM                 (1UL << MAX_PHYSMEM_BITS)
 
-#define LDT_PGD_ENTRY_L4       -3UL
-#define LDT_PGD_ENTRY_L5       -112UL
-#define LDT_PGD_ENTRY          (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4)
+#define LDT_PGD_ENTRY          -240UL
 #define LDT_BASE_ADDR          (LDT_PGD_ENTRY << PGDIR_SHIFT)
 #define LDT_END_ADDR           (LDT_BASE_ADDR + PGDIR_SIZE)
 
index 87623c6b13db5c735bfe80d377f678f1a5f1b893..bd5ac6cc37db5f87c92cc3013138dffa8b2a0302 100644 (file)
 #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
 {
-       u32 val = 0;
-
-       if (GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
-                            "I", _Q_PENDING_OFFSET))
-               val |= _Q_PENDING_VAL;
+       u32 val;
 
+       /*
+        * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
+        * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
+        * statement expression, which GCC doesn't like.
+        */
+       val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
+                              "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
        val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
 
        return val;
index ae7c2c5cd7f0e2e9f2becb438a1366461f5725c6..5393babc05989ebc0cbcbbb21251f2c241e3df04 100644 (file)
@@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
        return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
 }
 
+static inline u64 stibp_tif_to_spec_ctrl(u64 tifn)
+{
+       BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
+       return (tifn & _TIF_SPEC_IB) >> (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
+}
+
 static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
 {
        BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
        return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
 }
 
+static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl)
+{
+       BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT);
+       return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT);
+}
+
 static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
 {
        return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
@@ -70,11 +82,7 @@ extern void speculative_store_bypass_ht_init(void);
 static inline void speculative_store_bypass_ht_init(void) { }
 #endif
 
-extern void speculative_store_bypass_update(unsigned long tif);
-
-static inline void speculative_store_bypass_update_current(void)
-{
-       speculative_store_bypass_update(current_thread_info()->flags);
-}
+extern void speculation_ctrl_update(unsigned long tif);
+extern void speculation_ctrl_update_current(void);
 
 #endif
index 36bd243843d6dc9b281a7986d71aaf2cf0041be8..7cf1a270d89101822da3c9390f4e1f112258939e 100644 (file)
@@ -11,9 +11,6 @@ struct task_struct *__switch_to_asm(struct task_struct *prev,
 
 __visible struct task_struct *__switch_to(struct task_struct *prev,
                                          struct task_struct *next);
-struct tss_struct;
-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
-                     struct tss_struct *tss);
 
 /* This runs runs on the previous thread's stack. */
 static inline void prepare_switch_to(struct task_struct *next)
index 2ff2a30a264f4c5f02a01b3b87e4148e8992dc5a..82b73b75d67ca23fd605578ec2da8336eccd08c8 100644 (file)
@@ -79,10 +79,12 @@ struct thread_info {
 #define TIF_SIGPENDING         2       /* signal pending */
 #define TIF_NEED_RESCHED       3       /* rescheduling necessary */
 #define TIF_SINGLESTEP         4       /* reenable singlestep on user return*/
-#define TIF_SSBD                       5       /* Reduced data speculation */
+#define TIF_SSBD               5       /* Speculative store bypass disable */
 #define TIF_SYSCALL_EMU                6       /* syscall emulation active */
 #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_SECCOMP            8       /* secure computing */
+#define TIF_SPEC_IB            9       /* Indirect branch speculation mitigation */
+#define TIF_SPEC_FORCE_UPDATE  10      /* Force speculation MSR update in context switch */
 #define TIF_USER_RETURN_NOTIFY 11      /* notify kernel of userspace return */
 #define TIF_UPROBE             12      /* breakpointed or singlestepping */
 #define TIF_PATCH_PENDING      13      /* pending live patching update */
@@ -110,6 +112,8 @@ struct thread_info {
 #define _TIF_SYSCALL_EMU       (1 << TIF_SYSCALL_EMU)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
+#define _TIF_SPEC_IB           (1 << TIF_SPEC_IB)
+#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
 #define _TIF_USER_RETURN_NOTIFY        (1 << TIF_USER_RETURN_NOTIFY)
 #define _TIF_UPROBE            (1 << TIF_UPROBE)
 #define _TIF_PATCH_PENDING     (1 << TIF_PATCH_PENDING)
@@ -145,8 +149,18 @@ struct thread_info {
         _TIF_FSCHECK)
 
 /* flags to check in __switch_to() */
-#define _TIF_WORK_CTXSW                                                        \
-       (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
+#define _TIF_WORK_CTXSW_BASE                                           \
+       (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|         \
+        _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
+
+/*
+ * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
+ */
+#ifdef CONFIG_SMP
+# define _TIF_WORK_CTXSW       (_TIF_WORK_CTXSW_BASE | _TIF_SPEC_IB)
+#else
+# define _TIF_WORK_CTXSW       (_TIF_WORK_CTXSW_BASE)
+#endif
 
 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
index 323a313947e01a6bfc2cb19b2d47ca16f1b9d8d3..f4204bf377fcf72d597f1d0e438a3f85a8c54127 100644 (file)
@@ -169,10 +169,14 @@ struct tlb_state {
 
 #define LOADED_MM_SWITCHING ((struct mm_struct *)1)
 
+       /* Last user mm for optimizing IBPB */
+       union {
+               struct mm_struct        *last_user_mm;
+               unsigned long           last_user_mm_ibpb;
+       };
+
        u16 loaded_mm_asid;
        u16 next_asid;
-       /* last user mm's ctx id */
-       u64 last_ctx_id;
 
        /*
         * We can be in one of several states:
@@ -453,6 +457,12 @@ static inline void __native_flush_tlb_one_user(unsigned long addr)
  */
 static inline void __flush_tlb_all(void)
 {
+       /*
+        * This is to catch users with enabled preemption and the PGE feature
+        * and don't trigger the warning in __native_flush_tlb().
+        */
+       VM_WARN_ON_ONCE(preemptible());
+
        if (boot_cpu_has(X86_FEATURE_PGE)) {
                __flush_tlb_global();
        } else {
index 0f842104862c3b063cf806566736274d9df3faea..b85a7c54c6a13b51f27b26f13a2dd52f148547a7 100644 (file)
@@ -303,6 +303,4 @@ extern void x86_init_noop(void);
 extern void x86_init_uint_noop(unsigned int unused);
 extern bool x86_pnpbios_disabled(void);
 
-void x86_verify_bootdata_version(void);
-
 #endif
index 123e669bf363d375820ba3ab2ce981f01aa4329e..790ce08e41f20f4b16a9c085204ea877d6db5e5e 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/mm.h>
 #include <linux/device.h>
 
-#include <linux/uaccess.h>
+#include <asm/extable.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 
@@ -93,12 +93,39 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
  */
 static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
 {
-       return __put_user(val, (unsigned long __user *)addr);
+       int ret = 0;
+
+       asm volatile("1: mov %[val], %[ptr]\n"
+                    "2:\n"
+                    ".section .fixup, \"ax\"\n"
+                    "3: sub $1, %[ret]\n"
+                    "   jmp 2b\n"
+                    ".previous\n"
+                    _ASM_EXTABLE(1b, 3b)
+                    : [ret] "+r" (ret), [ptr] "=m" (*addr)
+                    : [val] "r" (val));
+
+       return ret;
 }
 
-static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
+static inline int xen_safe_read_ulong(const unsigned long *addr,
+                                     unsigned long *val)
 {
-       return __get_user(*val, (unsigned long __user *)addr);
+       int ret = 0;
+       unsigned long rval = ~0ul;
+
+       asm volatile("1: mov %[ptr], %[rval]\n"
+                    "2:\n"
+                    ".section .fixup, \"ax\"\n"
+                    "3: sub $1, %[ret]\n"
+                    "   jmp 2b\n"
+                    ".previous\n"
+                    _ASM_EXTABLE(1b, 3b)
+                    : [ret] "+r" (ret), [rval] "+r" (rval)
+                    : [ptr] "m" (*addr));
+       *val = rval;
+
+       return ret;
 }
 
 #ifdef CONFIG_XEN_PV
index 22f89d040dddce756a53fc94c2bb1fbcf3abcb4b..60733f137e9a292f2e4fe0f9c783f4a3f1d103f2 100644 (file)
@@ -16,9 +16,6 @@
 #define RAMDISK_PROMPT_FLAG            0x8000
 #define RAMDISK_LOAD_FLAG              0x4000
 
-/* version flags */
-#define VERSION_WRITTEN        0x8000
-
 /* loadflags */
 #define LOADED_HIGH    (1<<0)
 #define KASLR_FLAG     (1<<1)
@@ -89,7 +86,6 @@ struct setup_header {
        __u64   pref_address;
        __u32   init_size;
        __u32   handover_offset;
-       __u64   acpi_rsdp_addr;
 } __attribute__((packed));
 
 struct sys_desc_table {
@@ -159,7 +155,8 @@ struct boot_params {
        __u8  _pad2[4];                                 /* 0x054 */
        __u64  tboot_addr;                              /* 0x058 */
        struct ist_info ist_info;                       /* 0x060 */
-       __u8  _pad3[16];                                /* 0x070 */
+       __u64 acpi_rsdp_addr;                           /* 0x070 */
+       __u8  _pad3[8];                                 /* 0x078 */
        __u8  hd0_info[16];     /* obsolete! */         /* 0x080 */
        __u8  hd1_info[16];     /* obsolete! */         /* 0x090 */
        struct sys_desc_table sys_desc_table; /* obsolete! */   /* 0x0a0 */
index 92c76bf97ad828436405ce27398d48ce25132f65..06635fbca81c0359c170bc1ef2266e499f526983 100644 (file)
@@ -1776,5 +1776,5 @@ void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
 
 u64 x86_default_get_root_pointer(void)
 {
-       return boot_params.hdr.acpi_rsdp_addr;
+       return boot_params.acpi_rsdp_addr;
 }
index c37e66e493bff6775fae88d00c1f991d97f8f908..500278f5308ee2b1ccb7263e42861dbed76f44df 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/nospec.h>
 #include <linux/prctl.h>
+#include <linux/sched/smt.h>
 
 #include <asm/spec-ctrl.h>
 #include <asm/cmdline.h>
@@ -53,6 +54,13 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
 u64 __ro_after_init x86_amd_ls_cfg_base;
 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
 
+/* Control conditional STIPB in switch_to() */
+DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
+/* Control conditional IBPB in switch_mm() */
+DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
+/* Control unconditional IBPB in switch_mm() */
+DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+
 void __init check_bugs(void)
 {
        identify_boot_cpu();
@@ -123,31 +131,6 @@ void __init check_bugs(void)
 #endif
 }
 
-/* The kernel command line selection */
-enum spectre_v2_mitigation_cmd {
-       SPECTRE_V2_CMD_NONE,
-       SPECTRE_V2_CMD_AUTO,
-       SPECTRE_V2_CMD_FORCE,
-       SPECTRE_V2_CMD_RETPOLINE,
-       SPECTRE_V2_CMD_RETPOLINE_GENERIC,
-       SPECTRE_V2_CMD_RETPOLINE_AMD,
-};
-
-static const char *spectre_v2_strings[] = {
-       [SPECTRE_V2_NONE]                       = "Vulnerable",
-       [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic ASM retpoline",
-       [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
-       [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
-       [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
-       [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
-};
-
-#undef pr_fmt
-#define pr_fmt(fmt)     "Spectre V2 : " fmt
-
-static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
-       SPECTRE_V2_NONE;
-
 void
 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
 {
@@ -169,6 +152,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
                    static_cpu_has(X86_FEATURE_AMD_SSBD))
                        hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 
+               /* Conditional STIBP enabled? */
+               if (static_branch_unlikely(&switch_to_cond_stibp))
+                       hostval |= stibp_tif_to_spec_ctrl(ti->flags);
+
                if (hostval != guestval) {
                        msrval = setguest ? guestval : hostval;
                        wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
@@ -202,7 +189,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
                tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
                                 ssbd_spec_ctrl_to_tif(hostval);
 
-               speculative_store_bypass_update(tif);
+               speculation_ctrl_update(tif);
        }
 }
 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
@@ -217,6 +204,15 @@ static void x86_amd_ssb_disable(void)
                wrmsrl(MSR_AMD64_LS_CFG, msrval);
 }
 
+#undef pr_fmt
+#define pr_fmt(fmt)     "Spectre V2 : " fmt
+
+static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
+       SPECTRE_V2_NONE;
+
+static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
+       SPECTRE_V2_USER_NONE;
+
 #ifdef RETPOLINE
 static bool spectre_v2_bad_module;
 
@@ -238,67 +234,217 @@ static inline const char *spectre_v2_module_string(void)
 static inline const char *spectre_v2_module_string(void) { return ""; }
 #endif
 
-static void __init spec2_print_if_insecure(const char *reason)
+static inline bool match_option(const char *arg, int arglen, const char *opt)
 {
-       if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
-               pr_info("%s selected on command line.\n", reason);
+       int len = strlen(opt);
+
+       return len == arglen && !strncmp(arg, opt, len);
 }
 
-static void __init spec2_print_if_secure(const char *reason)
+/* The kernel command line selection for spectre v2 */
+enum spectre_v2_mitigation_cmd {
+       SPECTRE_V2_CMD_NONE,
+       SPECTRE_V2_CMD_AUTO,
+       SPECTRE_V2_CMD_FORCE,
+       SPECTRE_V2_CMD_RETPOLINE,
+       SPECTRE_V2_CMD_RETPOLINE_GENERIC,
+       SPECTRE_V2_CMD_RETPOLINE_AMD,
+};
+
+enum spectre_v2_user_cmd {
+       SPECTRE_V2_USER_CMD_NONE,
+       SPECTRE_V2_USER_CMD_AUTO,
+       SPECTRE_V2_USER_CMD_FORCE,
+       SPECTRE_V2_USER_CMD_PRCTL,
+       SPECTRE_V2_USER_CMD_PRCTL_IBPB,
+       SPECTRE_V2_USER_CMD_SECCOMP,
+       SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
+};
+
+static const char * const spectre_v2_user_strings[] = {
+       [SPECTRE_V2_USER_NONE]          = "User space: Vulnerable",
+       [SPECTRE_V2_USER_STRICT]        = "User space: Mitigation: STIBP protection",
+       [SPECTRE_V2_USER_PRCTL]         = "User space: Mitigation: STIBP via prctl",
+       [SPECTRE_V2_USER_SECCOMP]       = "User space: Mitigation: STIBP via seccomp and prctl",
+};
+
+static const struct {
+       const char                      *option;
+       enum spectre_v2_user_cmd        cmd;
+       bool                            secure;
+} v2_user_options[] __initdata = {
+       { "auto",               SPECTRE_V2_USER_CMD_AUTO,               false },
+       { "off",                SPECTRE_V2_USER_CMD_NONE,               false },
+       { "on",                 SPECTRE_V2_USER_CMD_FORCE,              true  },
+       { "prctl",              SPECTRE_V2_USER_CMD_PRCTL,              false },
+       { "prctl,ibpb",         SPECTRE_V2_USER_CMD_PRCTL_IBPB,         false },
+       { "seccomp",            SPECTRE_V2_USER_CMD_SECCOMP,            false },
+       { "seccomp,ibpb",       SPECTRE_V2_USER_CMD_SECCOMP_IBPB,       false },
+};
+
+static void __init spec_v2_user_print_cond(const char *reason, bool secure)
 {
-       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
-               pr_info("%s selected on command line.\n", reason);
+       if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
+               pr_info("spectre_v2_user=%s forced on command line.\n", reason);
 }
 
-static inline bool retp_compiler(void)
+static enum spectre_v2_user_cmd __init
+spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
 {
-       return __is_defined(RETPOLINE);
+       char arg[20];
+       int ret, i;
+
+       switch (v2_cmd) {
+       case SPECTRE_V2_CMD_NONE:
+               return SPECTRE_V2_USER_CMD_NONE;
+       case SPECTRE_V2_CMD_FORCE:
+               return SPECTRE_V2_USER_CMD_FORCE;
+       default:
+               break;
+       }
+
+       ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
+                                 arg, sizeof(arg));
+       if (ret < 0)
+               return SPECTRE_V2_USER_CMD_AUTO;
+
+       for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
+               if (match_option(arg, ret, v2_user_options[i].option)) {
+                       spec_v2_user_print_cond(v2_user_options[i].option,
+                                               v2_user_options[i].secure);
+                       return v2_user_options[i].cmd;
+               }
+       }
+
+       pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
+       return SPECTRE_V2_USER_CMD_AUTO;
 }
 
-static inline bool match_option(const char *arg, int arglen, const char *opt)
+static void __init
+spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
 {
-       int len = strlen(opt);
+       enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
+       bool smt_possible = IS_ENABLED(CONFIG_SMP);
+       enum spectre_v2_user_cmd cmd;
 
-       return len == arglen && !strncmp(arg, opt, len);
+       if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
+               return;
+
+       if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
+           cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
+               smt_possible = false;
+
+       cmd = spectre_v2_parse_user_cmdline(v2_cmd);
+       switch (cmd) {
+       case SPECTRE_V2_USER_CMD_NONE:
+               goto set_mode;
+       case SPECTRE_V2_USER_CMD_FORCE:
+               mode = SPECTRE_V2_USER_STRICT;
+               break;
+       case SPECTRE_V2_USER_CMD_PRCTL:
+       case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
+               mode = SPECTRE_V2_USER_PRCTL;
+               break;
+       case SPECTRE_V2_USER_CMD_AUTO:
+       case SPECTRE_V2_USER_CMD_SECCOMP:
+       case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+               if (IS_ENABLED(CONFIG_SECCOMP))
+                       mode = SPECTRE_V2_USER_SECCOMP;
+               else
+                       mode = SPECTRE_V2_USER_PRCTL;
+               break;
+       }
+
+       /* Initialize Indirect Branch Prediction Barrier */
+       if (boot_cpu_has(X86_FEATURE_IBPB)) {
+               setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+
+               switch (cmd) {
+               case SPECTRE_V2_USER_CMD_FORCE:
+               case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
+               case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
+                       static_branch_enable(&switch_mm_always_ibpb);
+                       break;
+               case SPECTRE_V2_USER_CMD_PRCTL:
+               case SPECTRE_V2_USER_CMD_AUTO:
+               case SPECTRE_V2_USER_CMD_SECCOMP:
+                       static_branch_enable(&switch_mm_cond_ibpb);
+                       break;
+               default:
+                       break;
+               }
+
+               pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
+                       static_key_enabled(&switch_mm_always_ibpb) ?
+                       "always-on" : "conditional");
+       }
+
+       /* If enhanced IBRS is enabled no STIPB required */
+       if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+               return;
+
+       /*
+        * If SMT is not possible or STIBP is not available clear the STIPB
+        * mode.
+        */
+       if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP))
+               mode = SPECTRE_V2_USER_NONE;
+set_mode:
+       spectre_v2_user = mode;
+       /* Only print the STIBP mode when SMT possible */
+       if (smt_possible)
+               pr_info("%s\n", spectre_v2_user_strings[mode]);
 }
 
+static const char * const spectre_v2_strings[] = {
+       [SPECTRE_V2_NONE]                       = "Vulnerable",
+       [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
+       [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
+       [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
+};
+
 static const struct {
        const char *option;
        enum spectre_v2_mitigation_cmd cmd;
        bool secure;
-} mitigation_options[] = {
-       { "off",               SPECTRE_V2_CMD_NONE,              false },
-       { "on",                SPECTRE_V2_CMD_FORCE,             true },
-       { "retpoline",         SPECTRE_V2_CMD_RETPOLINE,         false },
-       { "retpoline,amd",     SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
-       { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
-       { "auto",              SPECTRE_V2_CMD_AUTO,              false },
+} mitigation_options[] __initdata = {
+       { "off",                SPECTRE_V2_CMD_NONE,              false },
+       { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
+       { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
+       { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
+       { "retpoline,generic",  SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
+       { "auto",               SPECTRE_V2_CMD_AUTO,              false },
 };
 
+static void __init spec_v2_print_cond(const char *reason, bool secure)
+{
+       if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
+               pr_info("%s selected on command line.\n", reason);
+}
+
 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
 {
+       enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
        char arg[20];
        int ret, i;
-       enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
 
        if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
                return SPECTRE_V2_CMD_NONE;
-       else {
-               ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
-               if (ret < 0)
-                       return SPECTRE_V2_CMD_AUTO;
 
-               for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
-                       if (!match_option(arg, ret, mitigation_options[i].option))
-                               continue;
-                       cmd = mitigation_options[i].cmd;
-                       break;
-               }
+       ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
+       if (ret < 0)
+               return SPECTRE_V2_CMD_AUTO;
 
-               if (i >= ARRAY_SIZE(mitigation_options)) {
-                       pr_err("unknown option (%s). Switching to AUTO select\n", arg);
-                       return SPECTRE_V2_CMD_AUTO;
-               }
+       for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
+               if (!match_option(arg, ret, mitigation_options[i].option))
+                       continue;
+               cmd = mitigation_options[i].cmd;
+               break;
+       }
+
+       if (i >= ARRAY_SIZE(mitigation_options)) {
+               pr_err("unknown option (%s). Switching to AUTO select\n", arg);
+               return SPECTRE_V2_CMD_AUTO;
        }
 
        if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
@@ -316,54 +462,11 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
                return SPECTRE_V2_CMD_AUTO;
        }
 
-       if (mitigation_options[i].secure)
-               spec2_print_if_secure(mitigation_options[i].option);
-       else
-               spec2_print_if_insecure(mitigation_options[i].option);
-
+       spec_v2_print_cond(mitigation_options[i].option,
+                          mitigation_options[i].secure);
        return cmd;
 }
 
-static bool stibp_needed(void)
-{
-       if (spectre_v2_enabled == SPECTRE_V2_NONE)
-               return false;
-
-       if (!boot_cpu_has(X86_FEATURE_STIBP))
-               return false;
-
-       return true;
-}
-
-static void update_stibp_msr(void *info)
-{
-       wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
-}
-
-void arch_smt_update(void)
-{
-       u64 mask;
-
-       if (!stibp_needed())
-               return;
-
-       mutex_lock(&spec_ctrl_mutex);
-       mask = x86_spec_ctrl_base;
-       if (cpu_smt_control == CPU_SMT_ENABLED)
-               mask |= SPEC_CTRL_STIBP;
-       else
-               mask &= ~SPEC_CTRL_STIBP;
-
-       if (mask != x86_spec_ctrl_base) {
-               pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
-                               cpu_smt_control == CPU_SMT_ENABLED ?
-                               "Enabling" : "Disabling");
-               x86_spec_ctrl_base = mask;
-               on_each_cpu(update_stibp_msr, NULL, 1);
-       }
-       mutex_unlock(&spec_ctrl_mutex);
-}
-
 static void __init spectre_v2_select_mitigation(void)
 {
        enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -417,14 +520,12 @@ retpoline_auto:
                        pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
                        goto retpoline_generic;
                }
-               mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
-                                        SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
+               mode = SPECTRE_V2_RETPOLINE_AMD;
                setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
                setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
        } else {
        retpoline_generic:
-               mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
-                                        SPECTRE_V2_RETPOLINE_MINIMAL;
+               mode = SPECTRE_V2_RETPOLINE_GENERIC;
                setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
        }
 
@@ -443,12 +544,6 @@ specv2_set_mode:
        setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
        pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
 
-       /* Initialize Indirect Branch Prediction Barrier if supported */
-       if (boot_cpu_has(X86_FEATURE_IBPB)) {
-               setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
-               pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
-       }
-
        /*
         * Retpoline means the kernel is safe because it has no indirect
         * branches. Enhanced IBRS protects firmware too, so, enable restricted
@@ -465,10 +560,67 @@ specv2_set_mode:
                pr_info("Enabling Restricted Speculation for firmware calls\n");
        }
 
+       /* Set up IBPB and STIBP depending on the general spectre V2 command */
+       spectre_v2_user_select_mitigation(cmd);
+
        /* Enable STIBP if appropriate */
        arch_smt_update();
 }
 
+static void update_stibp_msr(void * __unused)
+{
+       wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+}
+
+/* Update x86_spec_ctrl_base in case SMT state changed. */
+static void update_stibp_strict(void)
+{
+       u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
+
+       if (sched_smt_active())
+               mask |= SPEC_CTRL_STIBP;
+
+       if (mask == x86_spec_ctrl_base)
+               return;
+
+       pr_info("Update user space SMT mitigation: STIBP %s\n",
+               mask & SPEC_CTRL_STIBP ? "always-on" : "off");
+       x86_spec_ctrl_base = mask;
+       on_each_cpu(update_stibp_msr, NULL, 1);
+}
+
+/* Update the static key controlling the evaluation of TIF_SPEC_IB */
+static void update_indir_branch_cond(void)
+{
+       if (sched_smt_active())
+               static_branch_enable(&switch_to_cond_stibp);
+       else
+               static_branch_disable(&switch_to_cond_stibp);
+}
+
+void arch_smt_update(void)
+{
+       /* Enhanced IBRS implies STIBP. No update required. */
+       if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+               return;
+
+       mutex_lock(&spec_ctrl_mutex);
+
+       switch (spectre_v2_user) {
+       case SPECTRE_V2_USER_NONE:
+               break;
+       case SPECTRE_V2_USER_STRICT:
+               update_stibp_strict();
+               break;
+       case SPECTRE_V2_USER_PRCTL:
+       case SPECTRE_V2_USER_SECCOMP:
+               update_indir_branch_cond();
+               break;
+       }
+
+       mutex_unlock(&spec_ctrl_mutex);
+}
+
 #undef pr_fmt
 #define pr_fmt(fmt)    "Speculative Store Bypass: " fmt
 
@@ -483,7 +635,7 @@ enum ssb_mitigation_cmd {
        SPEC_STORE_BYPASS_CMD_SECCOMP,
 };
 
-static const char *ssb_strings[] = {
+static const char * const ssb_strings[] = {
        [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
        [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
        [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
@@ -493,7 +645,7 @@ static const char *ssb_strings[] = {
 static const struct {
        const char *option;
        enum ssb_mitigation_cmd cmd;
-} ssb_mitigation_options[] = {
+} ssb_mitigation_options[]  __initdata = {
        { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
        { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
        { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
@@ -604,10 +756,25 @@ static void ssb_select_mitigation(void)
 #undef pr_fmt
 #define pr_fmt(fmt)     "Speculation prctl: " fmt
 
-static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+static void task_update_spec_tif(struct task_struct *tsk)
 {
-       bool update;
+       /* Force the update of the real TIF bits */
+       set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
 
+       /*
+        * Immediately update the speculation control MSRs for the current
+        * task, but for a non-current task delay setting the CPU
+        * mitigation until it is scheduled next.
+        *
+        * This can only happen for SECCOMP mitigation. For PRCTL it's
+        * always the current task.
+        */
+       if (tsk == current)
+               speculation_ctrl_update_current();
+}
+
+static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
        if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
            ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
                return -ENXIO;
@@ -618,28 +785,56 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
                if (task_spec_ssb_force_disable(task))
                        return -EPERM;
                task_clear_spec_ssb_disable(task);
-               update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
+               task_update_spec_tif(task);
                break;
        case PR_SPEC_DISABLE:
                task_set_spec_ssb_disable(task);
-               update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+               task_update_spec_tif(task);
                break;
        case PR_SPEC_FORCE_DISABLE:
                task_set_spec_ssb_disable(task);
                task_set_spec_ssb_force_disable(task);
-               update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+               task_update_spec_tif(task);
                break;
        default:
                return -ERANGE;
        }
+       return 0;
+}
 
-       /*
-        * If being set on non-current task, delay setting the CPU
-        * mitigation until it is next scheduled.
-        */
-       if (task == current && update)
-               speculative_store_bypass_update_current();
-
+static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+       switch (ctrl) {
+       case PR_SPEC_ENABLE:
+               if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+                       return 0;
+               /*
+                * Indirect branch speculation is always disabled in strict
+                * mode.
+                */
+               if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
+                       return -EPERM;
+               task_clear_spec_ib_disable(task);
+               task_update_spec_tif(task);
+               break;
+       case PR_SPEC_DISABLE:
+       case PR_SPEC_FORCE_DISABLE:
+               /*
+                * Indirect branch speculation is always allowed when
+                * mitigation is force disabled.
+                */
+               if (spectre_v2_user == SPECTRE_V2_USER_NONE)
+                       return -EPERM;
+               if (spectre_v2_user == SPECTRE_V2_USER_STRICT)
+                       return 0;
+               task_set_spec_ib_disable(task);
+               if (ctrl == PR_SPEC_FORCE_DISABLE)
+                       task_set_spec_ib_force_disable(task);
+               task_update_spec_tif(task);
+               break;
+       default:
+               return -ERANGE;
+       }
        return 0;
 }
 
@@ -649,6 +844,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
        switch (which) {
        case PR_SPEC_STORE_BYPASS:
                return ssb_prctl_set(task, ctrl);
+       case PR_SPEC_INDIRECT_BRANCH:
+               return ib_prctl_set(task, ctrl);
        default:
                return -ENODEV;
        }
@@ -659,6 +856,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)
 {
        if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
                ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+       if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP)
+               ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
 }
 #endif
 
@@ -681,11 +880,35 @@ static int ssb_prctl_get(struct task_struct *task)
        }
 }
 
+static int ib_prctl_get(struct task_struct *task)
+{
+       if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
+               return PR_SPEC_NOT_AFFECTED;
+
+       switch (spectre_v2_user) {
+       case SPECTRE_V2_USER_NONE:
+               return PR_SPEC_ENABLE;
+       case SPECTRE_V2_USER_PRCTL:
+       case SPECTRE_V2_USER_SECCOMP:
+               if (task_spec_ib_force_disable(task))
+                       return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+               if (task_spec_ib_disable(task))
+                       return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+               return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+       case SPECTRE_V2_USER_STRICT:
+               return PR_SPEC_DISABLE;
+       default:
+               return PR_SPEC_NOT_AFFECTED;
+       }
+}
+
 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
 {
        switch (which) {
        case PR_SPEC_STORE_BYPASS:
                return ssb_prctl_get(task);
+       case PR_SPEC_INDIRECT_BRANCH:
+               return ib_prctl_get(task);
        default:
                return -ENODEV;
        }
@@ -823,7 +1046,7 @@ early_param("l1tf", l1tf_cmdline);
 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
 
 #if IS_ENABLED(CONFIG_KVM_INTEL)
-static const char *l1tf_vmx_states[] = {
+static const char * const l1tf_vmx_states[] = {
        [VMENTER_L1D_FLUSH_AUTO]                = "auto",
        [VMENTER_L1D_FLUSH_NEVER]               = "vulnerable",
        [VMENTER_L1D_FLUSH_COND]                = "conditional cache flushes",
@@ -839,13 +1062,14 @@ static ssize_t l1tf_show_state(char *buf)
 
        if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
            (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
-            cpu_smt_control == CPU_SMT_ENABLED))
+            sched_smt_active())) {
                return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
                               l1tf_vmx_states[l1tf_vmx_mitigation]);
+       }
 
        return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
                       l1tf_vmx_states[l1tf_vmx_mitigation],
-                      cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
+                      sched_smt_active() ? "vulnerable" : "disabled");
 }
 #else
 static ssize_t l1tf_show_state(char *buf)
@@ -854,11 +1078,39 @@ static ssize_t l1tf_show_state(char *buf)
 }
 #endif
 
+static char *stibp_state(void)
+{
+       if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
+               return "";
+
+       switch (spectre_v2_user) {
+       case SPECTRE_V2_USER_NONE:
+               return ", STIBP: disabled";
+       case SPECTRE_V2_USER_STRICT:
+               return ", STIBP: forced";
+       case SPECTRE_V2_USER_PRCTL:
+       case SPECTRE_V2_USER_SECCOMP:
+               if (static_key_enabled(&switch_to_cond_stibp))
+                       return ", STIBP: conditional";
+       }
+       return "";
+}
+
+static char *ibpb_state(void)
+{
+       if (boot_cpu_has(X86_FEATURE_IBPB)) {
+               if (static_key_enabled(&switch_mm_always_ibpb))
+                       return ", IBPB: always-on";
+               if (static_key_enabled(&switch_mm_cond_ibpb))
+                       return ", IBPB: conditional";
+               return ", IBPB: disabled";
+       }
+       return "";
+}
+
 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
                               char *buf, unsigned int bug)
 {
-       int ret;
-
        if (!boot_cpu_has_bug(bug))
                return sprintf(buf, "Not affected\n");
 
@@ -876,13 +1128,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
                return sprintf(buf, "Mitigation: __user pointer sanitization\n");
 
        case X86_BUG_SPECTRE_V2:
-               ret = sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
-                              boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+               return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+                              ibpb_state(),
                               boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
-                              (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
+                              stibp_state(),
                               boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
                               spectre_v2_module_string());
-               return ret;
 
        case X86_BUG_SPEC_STORE_BYPASS:
                return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
index cbbd57ae06ee2af4b1028c95462209a2c348306f..ffb181f959d2b221759b6c85deded0930f2ff3cc 100644 (file)
@@ -1074,7 +1074,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 #endif
        c->x86_cache_alignment = c->x86_clflush_size;
 
-       memset(&c->x86_capability, 0, sizeof c->x86_capability);
+       memset(&c->x86_capability, 0, sizeof(c->x86_capability));
        c->extended_cpuid_level = 0;
 
        if (!have_cpuid_p())
@@ -1317,7 +1317,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
        c->x86_virt_bits = 32;
 #endif
        c->x86_cache_alignment = c->x86_clflush_size;
-       memset(&c->x86_capability, 0, sizeof c->x86_capability);
+       memset(&c->x86_capability, 0, sizeof(c->x86_capability));
 
        generic_identify(c);
 
index 8cb3c02980cfa72f9d6c810f84f080565c296400..36d2696c9563e88a8e354068d7e8a43d636371d3 100644 (file)
@@ -485,7 +485,7 @@ static void mce_report_event(struct pt_regs *regs)
  * be somewhat complicated (e.g. segment offset would require an instruction
  * parser). So only support physical addresses up to page granuality for now.
  */
-static int mce_usable_address(struct mce *m)
+int mce_usable_address(struct mce *m)
 {
        if (!(m->status & MCI_STATUS_ADDRV))
                return 0;
@@ -505,6 +505,7 @@ static int mce_usable_address(struct mce *m)
 
        return 1;
 }
+EXPORT_SYMBOL_GPL(mce_usable_address);
 
 bool mce_is_memory_error(struct mce *m)
 {
@@ -534,7 +535,7 @@ bool mce_is_memory_error(struct mce *m)
 }
 EXPORT_SYMBOL_GPL(mce_is_memory_error);
 
-static bool mce_is_correctable(struct mce *m)
+bool mce_is_correctable(struct mce *m)
 {
        if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
                return false;
@@ -547,6 +548,7 @@ static bool mce_is_correctable(struct mce *m)
 
        return true;
 }
+EXPORT_SYMBOL_GPL(mce_is_correctable);
 
 static bool cec_add_mce(struct mce *m)
 {
@@ -2215,7 +2217,7 @@ static int mce_device_create(unsigned int cpu)
        if (dev)
                return 0;
 
-       dev = kzalloc(sizeof *dev, GFP_KERNEL);
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
        if (!dev)
                return -ENOMEM;
        dev->id  = cpu;
index dd33c357548f11c0ac21c367d0edc20b34671218..e12454e21b8a5decc64af5e9143d2b469d6ecfbb 100644 (file)
@@ -56,7 +56,7 @@
 /* Threshold LVT offset is at MSR0xC0000410[15:12] */
 #define SMCA_THR_LVT_OFF       0xF000
 
-static bool thresholding_en;
+static bool thresholding_irq_en;
 
 static const char * const th_names[] = {
        "load_store",
@@ -534,9 +534,8 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
 
 set_offset:
        offset = setup_APIC_mce_threshold(offset, new);
-
-       if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt))
-               mce_threshold_vector = amd_threshold_interrupt;
+       if (offset == new)
+               thresholding_irq_en = true;
 
 done:
        mce_threshold_block_init(&b, offset);
@@ -1357,9 +1356,6 @@ int mce_threshold_remove_device(unsigned int cpu)
 {
        unsigned int bank;
 
-       if (!thresholding_en)
-               return 0;
-
        for (bank = 0; bank < mca_cfg.banks; ++bank) {
                if (!(per_cpu(bank_map, cpu) & (1 << bank)))
                        continue;
@@ -1377,9 +1373,6 @@ int mce_threshold_create_device(unsigned int cpu)
        struct threshold_bank **bp;
        int err = 0;
 
-       if (!thresholding_en)
-               return 0;
-
        bp = per_cpu(threshold_banks, cpu);
        if (bp)
                return 0;
@@ -1408,9 +1401,6 @@ static __init int threshold_init_device(void)
 {
        unsigned lcpu = 0;
 
-       if (mce_threshold_vector == amd_threshold_interrupt)
-               thresholding_en = true;
-
        /* to hit CPUs online before the notifier is up */
        for_each_online_cpu(lcpu) {
                int err = mce_threshold_create_device(lcpu);
@@ -1419,6 +1409,9 @@ static __init int threshold_init_device(void)
                        return err;
        }
 
+       if (thresholding_irq_en)
+               mce_threshold_vector = amd_threshold_interrupt;
+
        return 0;
 }
 /*
index b9bc8a1a584e39590e7beecdafb47773015794f7..2637ff09d6a0da6b7e25cd3942ecb05650fd51e5 100644 (file)
@@ -666,8 +666,8 @@ static ssize_t pf_show(struct device *dev,
 }
 
 static DEVICE_ATTR_WO(reload);
-static DEVICE_ATTR(version, 0400, version_show, NULL);
-static DEVICE_ATTR(processor_flags, 0400, pf_show, NULL);
+static DEVICE_ATTR(version, 0444, version_show, NULL);
+static DEVICE_ATTR(processor_flags, 0444, pf_show, NULL);
 
 static struct attribute *mc_default_attrs[] = {
        &dev_attr_version.attr,
index 1c72f3819eb123d8fb7271a0c49cf94aceae5ee3..e81a2db42df7ba0d6fb28d9b0b2fcf0340bbd585 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/kexec.h>
+#include <linux/i8253.h>
 #include <asm/processor.h>
 #include <asm/hypervisor.h>
 #include <asm/hyperv-tlfs.h>
@@ -295,6 +296,16 @@ static void __init ms_hyperv_init_platform(void)
        if (efi_enabled(EFI_BOOT))
                x86_platform.get_nmi_reason = hv_get_nmi_reason;
 
+       /*
+        * Hyper-V VMs have a PIT emulation quirk such that zeroing the
+        * counter register during PIT shutdown restarts the PIT. So it
+        * continues to interrupt @18.2 HZ. Setting i8253_clear_counter
+        * to false tells pit_shutdown() not to zero the counter so that
+        * the PIT really is shutdown. Generation 2 VMs don't have a PIT,
+        * and setting this value has no effect.
+        */
+       i8253_clear_counter_on_shutdown = false;
+
 #if IS_ENABLED(CONFIG_HYPERV)
        /*
         * Setup the hook to get control post apic initialization.
index e12ee86906c6250faa05b13ae9bb9c3dc545558f..86e277f8daf420b1d6b1f82992cc536776a50865 100644 (file)
@@ -798,7 +798,7 @@ static void generic_set_all(void)
        local_irq_restore(flags);
 
        /* Use the atomic bitops to update the global mask */
-       for (count = 0; count < sizeof mask * 8; ++count) {
+       for (count = 0; count < sizeof(mask) * 8; ++count) {
                if (mask & 0x01)
                        set_bit(count, &smp_changes_mask);
                mask >>= 1;
index 40eee6cc412484470daba013f2a197439163707a..2e173d47b450d4febbb9e2028f153bc91382b915 100644 (file)
@@ -174,12 +174,12 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
        case MTRRIOC_SET_PAGE_ENTRY:
        case MTRRIOC_DEL_PAGE_ENTRY:
        case MTRRIOC_KILL_PAGE_ENTRY:
-               if (copy_from_user(&sentry, arg, sizeof sentry))
+               if (copy_from_user(&sentry, arg, sizeof(sentry)))
                        return -EFAULT;
                break;
        case MTRRIOC_GET_ENTRY:
        case MTRRIOC_GET_PAGE_ENTRY:
-               if (copy_from_user(&gentry, arg, sizeof gentry))
+               if (copy_from_user(&gentry, arg, sizeof(gentry)))
                        return -EFAULT;
                break;
 #ifdef CONFIG_COMPAT
@@ -332,7 +332,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
        switch (cmd) {
        case MTRRIOC_GET_ENTRY:
        case MTRRIOC_GET_PAGE_ENTRY:
-               if (copy_to_user(arg, &gentry, sizeof gentry))
+               if (copy_to_user(arg, &gentry, sizeof(gentry)))
                        err = -EFAULT;
                break;
 #ifdef CONFIG_COMPAT
index d9ab49bed8afce9fb2eef06d457709debcf073ba..0eda91f8eeacee4d2511e60383728d22789a948f 100644 (file)
@@ -77,7 +77,7 @@ static __init int setup_vmw_sched_clock(char *s)
 }
 early_param("no-vmw-sched-clock", setup_vmw_sched_clock);
 
-static unsigned long long vmware_sched_clock(void)
+static unsigned long long notrace vmware_sched_clock(void)
 {
        unsigned long long ns;
 
index 5e801c8c8ce7cfaf191505fc9feaae5414f7270b..374a52fa529694f7399ad59e0b4ec2c1d598c636 100644 (file)
@@ -213,8 +213,9 @@ static unsigned int mem32_serial_in(unsigned long addr, int offset)
  * early_pci_serial_init()
  *
  * This function is invoked when the early_printk param starts with "pciserial"
- * The rest of the param should be ",B:D.F,baud" where B, D & F describe the
- * location of a PCI device that must be a UART device.
+ * The rest of the param should be "[force],B:D.F,baud", where B, D & F describe
+ * the location of a PCI device that must be a UART device. "force" is optional
+ * and overrides the use of an UART device with a wrong PCI class code.
  */
 static __init void early_pci_serial_init(char *s)
 {
@@ -224,17 +225,23 @@ static __init void early_pci_serial_init(char *s)
        u32 classcode, bar0;
        u16 cmdreg;
        char *e;
+       int force = 0;
 
-
-       /*
-        * First, part the param to get the BDF values
-        */
        if (*s == ',')
                ++s;
 
        if (*s == 0)
                return;
 
+       /* Force the use of an UART device with wrong class code */
+       if (!strncmp(s, "force,", 6)) {
+               force = 1;
+               s += 6;
+       }
+
+       /*
+        * Part the param to get the BDF values
+        */
        bus = (u8)simple_strtoul(s, &e, 16);
        s = e;
        if (*s != ':')
@@ -253,7 +260,7 @@ static __init void early_pci_serial_init(char *s)
                s++;
 
        /*
-        * Second, find the device from the BDF
+        * Find the device from the BDF
         */
        cmdreg = read_pci_config(bus, slot, func, PCI_COMMAND);
        classcode = read_pci_config(bus, slot, func, PCI_CLASS_REVISION);
@@ -264,8 +271,10 @@ static __init void early_pci_serial_init(char *s)
         */
        if (((classcode >> 16 != PCI_CLASS_COMMUNICATION_MODEM) &&
             (classcode >> 16 != PCI_CLASS_COMMUNICATION_SERIAL)) ||
-          (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */
-               return;
+          (((classcode >> 8) & 0xff) != 0x02)) /* 16550 I/F at BAR0 */ {
+               if (!force)
+                       return;
+       }
 
        /*
         * Determine if it is IO or memory mapped
@@ -289,7 +298,7 @@ static __init void early_pci_serial_init(char *s)
        }
 
        /*
-        * Lastly, initialize the hardware
+        * Initialize the hardware
         */
        if (*s) {
                if (strcmp(s, "nocfg") == 0)
index 61a949d84dfa52aff8572bfd88a3e6dc43556222..d99a8ee9e185e00be294bace7596bddd1dcc02d1 100644 (file)
@@ -344,10 +344,10 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
                        sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
                }
 
+               local_bh_disable();
                fpu->initialized = 1;
-               preempt_disable();
                fpu__restore(fpu);
-               preempt_enable();
+               local_bh_enable();
 
                return err;
        } else {
index 01ebcb6f263e39accb3f8e53ab7eb5372e0726f7..7ee8067cbf45c7a6d7739962bad43b8caadc6f08 100644 (file)
@@ -994,7 +994,6 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
 {
        unsigned long old;
        int faulted;
-       struct ftrace_graph_ent trace;
        unsigned long return_hooker = (unsigned long)
                                &return_to_handler;
 
@@ -1046,19 +1045,7 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
                return;
        }
 
-       trace.func = self_addr;
-       trace.depth = current->curr_ret_stack + 1;
-
-       /* Only trace if the calling function expects to */
-       if (!ftrace_graph_entry(&trace)) {
+       if (function_graph_enter(old, self_addr, frame_pointer, parent))
                *parent = old;
-               return;
-       }
-
-       if (ftrace_push_return_trace(old, self_addr, &trace.depth,
-                                    frame_pointer, parent) == -EBUSY) {
-               *parent = old;
-               return;
-       }
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
index 76fa3b8365985783332b99186b10cdc9a15dd169..ec6fefbfd3c0454b009a2a7a04a76edc44b8c5e8 100644 (file)
@@ -37,7 +37,6 @@ asmlinkage __visible void __init i386_start_kernel(void)
        cr4_init_shadow();
 
        sanitize_boot_params(&boot_params);
-       x86_verify_bootdata_version();
 
        x86_early_init_platform_quirks();
 
index 5dc377dc9d7b5a25027e2d6358696bff2f56d635..16b1cbd3a61e272c38a7fbbc983d852074ea9450 100644 (file)
@@ -385,7 +385,7 @@ static void __init copy_bootdata(char *real_mode_data)
         */
        sme_map_bootdata(real_mode_data);
 
-       memcpy(&boot_params, real_mode_data, sizeof boot_params);
+       memcpy(&boot_params, real_mode_data, sizeof(boot_params));
        sanitize_boot_params(&boot_params);
        cmd_line_ptr = get_cmd_line_ptr();
        if (cmd_line_ptr) {
@@ -457,8 +457,6 @@ void __init x86_64_start_reservations(char *real_mode_data)
        if (!boot_params.hdr.version)
                copy_bootdata(__va(real_mode_data));
 
-       x86_verify_bootdata_version();
-
        x86_early_init_platform_quirks();
 
        switch (boot_params.hdr.hardware_subarch) {
index 40b16b2706560e409dfe57a9817d2c1832a4a89e..6adf6e6c2933945598b32a530098b3dc1b2be690 100644 (file)
@@ -189,7 +189,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
        int len = 0, ret;
 
        while (len < RELATIVEJUMP_SIZE) {
-               ret = __copy_instruction(dest + len, src + len, real, &insn);
+               ret = __copy_instruction(dest + len, src + len, real + len, &insn);
                if (!ret || !can_boost(&insn, src + len))
                        return -EINVAL;
                len += ret;
index ab18e0884dc6fdfb6e403760921b3dc87d7ff592..6135ae8ce0364772f5cc72f73b4bb8f2ad3a8d9e 100644 (file)
@@ -199,14 +199,6 @@ static void sanity_check_ldt_mapping(struct mm_struct *mm)
 /*
  * If PTI is enabled, this maps the LDT into the kernelmode and
  * usermode tables for the given mm.
- *
- * There is no corresponding unmap function.  Even if the LDT is freed, we
- * leave the PTEs around until the slot is reused or the mm is destroyed.
- * This is harmless: the LDT is always in ordinary memory, and no one will
- * access the freed slot.
- *
- * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
- * it useful, and the flush would slow down modify_ldt().
  */
 static int
 map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
@@ -214,8 +206,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
        unsigned long va;
        bool is_vmalloc;
        spinlock_t *ptl;
-       pgd_t *pgd;
-       int i;
+       int i, nr_pages;
 
        if (!static_cpu_has(X86_FEATURE_PTI))
                return 0;
@@ -229,16 +220,11 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
        /* Check if the current mappings are sane */
        sanity_check_ldt_mapping(mm);
 
-       /*
-        * Did we already have the top level entry allocated?  We can't
-        * use pgd_none() for this because it doens't do anything on
-        * 4-level page table kernels.
-        */
-       pgd = pgd_offset(mm, LDT_BASE_ADDR);
-
        is_vmalloc = is_vmalloc_addr(ldt->entries);
 
-       for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) {
+       nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
+
+       for (i = 0; i < nr_pages; i++) {
                unsigned long offset = i << PAGE_SHIFT;
                const void *src = (char *)ldt->entries + offset;
                unsigned long pfn;
@@ -272,13 +258,39 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
        /* Propagate LDT mapping to the user page-table */
        map_ldt_struct_to_user(mm);
 
-       va = (unsigned long)ldt_slot_va(slot);
-       flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT, false);
-
        ldt->slot = slot;
        return 0;
 }
 
+static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
+{
+       unsigned long va;
+       int i, nr_pages;
+
+       if (!ldt)
+               return;
+
+       /* LDT map/unmap is only required for PTI */
+       if (!static_cpu_has(X86_FEATURE_PTI))
+               return;
+
+       nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
+
+       for (i = 0; i < nr_pages; i++) {
+               unsigned long offset = i << PAGE_SHIFT;
+               spinlock_t *ptl;
+               pte_t *ptep;
+
+               va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
+               ptep = get_locked_pte(mm, va, &ptl);
+               pte_clear(mm, va, ptep);
+               pte_unmap_unlock(ptep, ptl);
+       }
+
+       va = (unsigned long)ldt_slot_va(ldt->slot);
+       flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false);
+}
+
 #else /* !CONFIG_PAGE_TABLE_ISOLATION */
 
 static int
@@ -286,6 +298,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
 {
        return 0;
 }
+
+static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
+{
+}
 #endif /* CONFIG_PAGE_TABLE_ISOLATION */
 
 static void free_ldt_pgtables(struct mm_struct *mm)
@@ -524,6 +540,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
        }
 
        install_ldt(mm, new_ldt);
+       unmap_ldt_struct(mm, old_ldt);
        free_ldt_struct(old_ldt);
        error = 0;
 
index ef688804f80d33088fef15448996a97f69e2b193..4588414e2561ccc9d0d883c01fc400f2e9bdd1bf 100644 (file)
@@ -115,14 +115,14 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
                        err = -EBADF;
                        break;
                }
-               if (copy_from_user(&regs, uregs, sizeof regs)) {
+               if (copy_from_user(&regs, uregs, sizeof(regs))) {
                        err = -EFAULT;
                        break;
                }
                err = rdmsr_safe_regs_on_cpu(cpu, regs);
                if (err)
                        break;
-               if (copy_to_user(uregs, &regs, sizeof regs))
+               if (copy_to_user(uregs, &regs, sizeof(regs)))
                        err = -EFAULT;
                break;
 
@@ -131,14 +131,14 @@ static long msr_ioctl(struct file *file, unsigned int ioc, unsigned long arg)
                        err = -EBADF;
                        break;
                }
-               if (copy_from_user(&regs, uregs, sizeof regs)) {
+               if (copy_from_user(&regs, uregs, sizeof(regs))) {
                        err = -EFAULT;
                        break;
                }
                err = wrmsr_safe_regs_on_cpu(cpu, regs);
                if (err)
                        break;
-               if (copy_to_user(uregs, &regs, sizeof regs))
+               if (copy_to_user(uregs, &regs, sizeof(regs)))
                        err = -EFAULT;
                break;
 
index e4d4df37922a3c7eb91adc67903d1a2bf59ba032..c0e0101133f352ba6a8ac8369eef15a3e5301be3 100644 (file)
@@ -56,17 +56,6 @@ asm (".pushsection .entry.text, \"ax\"\n"
      ".type _paravirt_nop, @function\n\t"
      ".popsection");
 
-/* identity function, which can be inlined */
-u32 notrace _paravirt_ident_32(u32 x)
-{
-       return x;
-}
-
-u64 notrace _paravirt_ident_64(u64 x)
-{
-       return x;
-}
-
 void __init default_banner(void)
 {
        printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
@@ -102,6 +91,12 @@ static unsigned paravirt_patch_call(void *insnbuf, const void *target,
 }
 
 #ifdef CONFIG_PARAVIRT_XXL
+/* identity function, which can be inlined */
+u64 notrace _paravirt_ident_64(u64 x)
+{
+       return x;
+}
+
 static unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
                                   unsigned long addr, unsigned len)
 {
@@ -146,13 +141,11 @@ unsigned paravirt_patch_default(u8 type, void *insnbuf,
        else if (opfunc == _paravirt_nop)
                ret = 0;
 
+#ifdef CONFIG_PARAVIRT_XXL
        /* identity functions just return their single argument */
-       else if (opfunc == _paravirt_ident_32)
-               ret = paravirt_patch_ident_32(insnbuf, len);
        else if (opfunc == _paravirt_ident_64)
                ret = paravirt_patch_ident_64(insnbuf, len);
 
-#ifdef CONFIG_PARAVIRT_XXL
        else if (type == PARAVIRT_PATCH(cpu.iret) ||
                 type == PARAVIRT_PATCH(cpu.usergs_sysret64))
                /* If operation requires a jmp, then jmp */
@@ -309,13 +302,8 @@ struct pv_info pv_info = {
 #endif
 };
 
-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
-/* 32-bit pagetable entries */
-#define PTE_IDENT      __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
-#else
 /* 64-bit pagetable entries */
 #define PTE_IDENT      __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
-#endif
 
 struct paravirt_patch_template pv_ops = {
        /* Init ops. */
@@ -483,5 +471,5 @@ NOKPROBE_SYMBOL(native_set_debugreg);
 NOKPROBE_SYMBOL(native_load_idt);
 #endif
 
-EXPORT_SYMBOL_GPL(pv_ops);
+EXPORT_SYMBOL(pv_ops);
 EXPORT_SYMBOL_GPL(pv_info);
index 6368c22fa1fa3b438627d39a63cd738106cb6074..de138d3912e45972b432832793a015b39886c2bc 100644 (file)
@@ -10,24 +10,18 @@ DEF_NATIVE(cpu, iret, "iret");
 DEF_NATIVE(mmu, read_cr2, "mov %cr2, %eax");
 DEF_NATIVE(mmu, write_cr3, "mov %eax, %cr3");
 DEF_NATIVE(mmu, read_cr3, "mov %cr3, %eax");
-#endif
-
-#if defined(CONFIG_PARAVIRT_SPINLOCKS)
-DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)");
-DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
-#endif
-
-unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
-{
-       /* arg in %eax, return in %eax */
-       return 0;
-}
 
 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
 {
        /* arg in %edx:%eax, return in %edx:%eax */
        return 0;
 }
+#endif
+
+#if defined(CONFIG_PARAVIRT_SPINLOCKS)
+DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%eax)");
+DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
+#endif
 
 extern bool pv_is_native_spin_unlock(void);
 extern bool pv_is_native_vcpu_is_preempted(void);
index 7ca9cb726f4d669e110f7de0ba0086f5e6f0526d..9d9e04b310773789f8894a4900f6e1c6d0ab2e6d 100644 (file)
@@ -15,27 +15,19 @@ DEF_NATIVE(cpu, wbinvd, "wbinvd");
 
 DEF_NATIVE(cpu, usergs_sysret64, "swapgs; sysretq");
 DEF_NATIVE(cpu, swapgs, "swapgs");
-#endif
-
-DEF_NATIVE(, mov32, "mov %edi, %eax");
 DEF_NATIVE(, mov64, "mov %rdi, %rax");
 
-#if defined(CONFIG_PARAVIRT_SPINLOCKS)
-DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)");
-DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
-#endif
-
-unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
-{
-       return paravirt_patch_insns(insnbuf, len,
-                                   start__mov32, end__mov32);
-}
-
 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
 {
        return paravirt_patch_insns(insnbuf, len,
                                    start__mov64, end__mov64);
 }
+#endif
+
+#if defined(CONFIG_PARAVIRT_SPINLOCKS)
+DEF_NATIVE(lock, queued_spin_unlock, "movb $0, (%rdi)");
+DEF_NATIVE(lock, vcpu_is_preempted, "xor %eax, %eax");
+#endif
 
 extern bool pv_is_native_spin_unlock(void);
 extern bool pv_is_native_vcpu_is_preempted(void);
index c93fcfdf1673418a352c8eb1085504d8c4ee4ffd..7d31192296a87d09104ab605540ce4264d56f6bf 100644 (file)
@@ -40,6 +40,8 @@
 #include <asm/prctl.h>
 #include <asm/spec-ctrl.h>
 
+#include "process.h"
+
 /*
  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
  * no more per-task TSS's. The TSS size is kept cacheline-aligned
@@ -252,11 +254,12 @@ void arch_setup_new_exec(void)
                enable_cpuid();
 }
 
-static inline void switch_to_bitmap(struct tss_struct *tss,
-                                   struct thread_struct *prev,
+static inline void switch_to_bitmap(struct thread_struct *prev,
                                    struct thread_struct *next,
                                    unsigned long tifp, unsigned long tifn)
 {
+       struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
+
        if (tifn & _TIF_IO_BITMAP) {
                /*
                 * Copy the relevant range of the IO bitmap.
@@ -395,32 +398,85 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
        wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
 }
 
-static __always_inline void intel_set_ssb_state(unsigned long tifn)
+/*
+ * Update the MSRs managing speculation control, during context switch.
+ *
+ * tifp: Previous task's thread flags
+ * tifn: Next task's thread flags
+ */
+static __always_inline void __speculation_ctrl_update(unsigned long tifp,
+                                                     unsigned long tifn)
 {
-       u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+       unsigned long tif_diff = tifp ^ tifn;
+       u64 msr = x86_spec_ctrl_base;
+       bool updmsr = false;
+
+       /*
+        * If TIF_SSBD is different, select the proper mitigation
+        * method. Note that if SSBD mitigation is disabled or permanentely
+        * enabled this branch can't be taken because nothing can set
+        * TIF_SSBD.
+        */
+       if (tif_diff & _TIF_SSBD) {
+               if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
+                       amd_set_ssb_virt_state(tifn);
+               } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+                       amd_set_core_ssb_state(tifn);
+               } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+                          static_cpu_has(X86_FEATURE_AMD_SSBD)) {
+                       msr |= ssbd_tif_to_spec_ctrl(tifn);
+                       updmsr  = true;
+               }
+       }
+
+       /*
+        * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled,
+        * otherwise avoid the MSR write.
+        */
+       if (IS_ENABLED(CONFIG_SMP) &&
+           static_branch_unlikely(&switch_to_cond_stibp)) {
+               updmsr |= !!(tif_diff & _TIF_SPEC_IB);
+               msr |= stibp_tif_to_spec_ctrl(tifn);
+       }
 
-       wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+       if (updmsr)
+               wrmsrl(MSR_IA32_SPEC_CTRL, msr);
 }
 
-static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
 {
-       if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
-               amd_set_ssb_virt_state(tifn);
-       else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
-               amd_set_core_ssb_state(tifn);
-       else
-               intel_set_ssb_state(tifn);
+       if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
+               if (task_spec_ssb_disable(tsk))
+                       set_tsk_thread_flag(tsk, TIF_SSBD);
+               else
+                       clear_tsk_thread_flag(tsk, TIF_SSBD);
+
+               if (task_spec_ib_disable(tsk))
+                       set_tsk_thread_flag(tsk, TIF_SPEC_IB);
+               else
+                       clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
+       }
+       /* Return the updated threadinfo flags*/
+       return task_thread_info(tsk)->flags;
 }
 
-void speculative_store_bypass_update(unsigned long tif)
+void speculation_ctrl_update(unsigned long tif)
 {
+       /* Forced update. Make sure all relevant TIF flags are different */
        preempt_disable();
-       __speculative_store_bypass_update(tif);
+       __speculation_ctrl_update(~tif, tif);
        preempt_enable();
 }
 
-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
-                     struct tss_struct *tss)
+/* Called from seccomp/prctl update */
+void speculation_ctrl_update_current(void)
+{
+       preempt_disable();
+       speculation_ctrl_update(speculation_ctrl_update_tif(current));
+       preempt_enable();
+}
+
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
 {
        struct thread_struct *prev, *next;
        unsigned long tifp, tifn;
@@ -430,7 +486,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
 
        tifn = READ_ONCE(task_thread_info(next_p)->flags);
        tifp = READ_ONCE(task_thread_info(prev_p)->flags);
-       switch_to_bitmap(tss, prev, next, tifp, tifn);
+       switch_to_bitmap(prev, next, tifp, tifn);
 
        propagate_user_return_notify(prev_p, next_p);
 
@@ -451,8 +507,15 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
        if ((tifp ^ tifn) & _TIF_NOCPUID)
                set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
 
-       if ((tifp ^ tifn) & _TIF_SSBD)
-               __speculative_store_bypass_update(tifn);
+       if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
+               __speculation_ctrl_update(tifp, tifn);
+       } else {
+               speculation_ctrl_update_tif(prev_p);
+               tifn = speculation_ctrl_update_tif(next_p);
+
+               /* Enforce MSR update to ensure consistent state */
+               __speculation_ctrl_update(~tifn, tifn);
+       }
 }
 
 /*
diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h
new file mode 100644 (file)
index 0000000..898e97c
--- /dev/null
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Code shared between 32 and 64 bit
+
+#include <asm/spec-ctrl.h>
+
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p);
+
+/*
+ * This needs to be inline to optimize for the common case where no extra
+ * work needs to be done.
+ */
+static inline void switch_to_extra(struct task_struct *prev,
+                                  struct task_struct *next)
+{
+       unsigned long next_tif = task_thread_info(next)->flags;
+       unsigned long prev_tif = task_thread_info(prev)->flags;
+
+       if (IS_ENABLED(CONFIG_SMP)) {
+               /*
+                * Avoid __switch_to_xtra() invocation when conditional
+                * STIPB is disabled and the only different bit is
+                * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not
+                * in the TIF_WORK_CTXSW masks.
+                */
+               if (!static_branch_likely(&switch_to_cond_stibp)) {
+                       prev_tif &= ~_TIF_SPEC_IB;
+                       next_tif &= ~_TIF_SPEC_IB;
+               }
+       }
+
+       /*
+        * __switch_to_xtra() handles debug registers, i/o bitmaps,
+        * speculation mitigations etc.
+        */
+       if (unlikely(next_tif & _TIF_WORK_CTXSW_NEXT ||
+                    prev_tif & _TIF_WORK_CTXSW_PREV))
+               __switch_to_xtra(prev, next);
+}
index 5046a3c9dec2feaa6761e38c9947e90ad4030efb..d3e593eb189f0ba8545d960c287c9ebb224888ed 100644 (file)
@@ -59,6 +59,8 @@
 #include <asm/intel_rdt_sched.h>
 #include <asm/proto.h>
 
+#include "process.h"
+
 void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
 {
        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
@@ -232,7 +234,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        struct fpu *prev_fpu = &prev->fpu;
        struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
-       struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
 
        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
@@ -264,12 +265,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
                set_iopl_mask(next->iopl);
 
-       /*
-        * Now maybe handle debug registers and/or IO bitmaps
-        */
-       if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
-                    task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
-               __switch_to_xtra(prev_p, next_p, tss);
+       switch_to_extra(prev_p, next_p);
 
        /*
         * Leave lazy mode, flushing any hypercalls made here.
index 31b4755369f084575f6b3a0ec30b340392106f70..bbfbf017065c387c76f3f7c6394f34816fb7a4f5 100644 (file)
@@ -60,6 +60,8 @@
 #include <asm/unistd_32_ia32.h>
 #endif
 
+#include "process.h"
+
 /* Prints also some state that isn't saved in the pt_regs */
 void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
 {
@@ -553,7 +555,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        struct fpu *prev_fpu = &prev->fpu;
        struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
-       struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
 
        WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
                     this_cpu_read(irq_count) != -1);
@@ -617,12 +618,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        /* Reload sp0. */
        update_task_stack(next_p);
 
-       /*
-        * Now maybe reload the debug registers and handle I/O bitmaps
-        */
-       if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
-                    task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
-               __switch_to_xtra(prev_p, next_p, tss);
+       switch_to_extra(prev_p, next_p);
 
 #ifdef CONFIG_XEN_PV
        /*
@@ -701,10 +697,10 @@ static void __set_personality_x32(void)
                current->mm->context.ia32_compat = TIF_X32;
        current->personality &= ~READ_IMPLIES_EXEC;
        /*
-        * in_compat_syscall() uses the presence of the x32 syscall bit
+        * in_32bit_syscall() uses the presence of the x32 syscall bit
         * flag to determine compat status.  The x86 mmap() code relies on
         * the syscall bitness so set x32 syscall bit right here to make
-        * in_compat_syscall() work during exec().
+        * in_32bit_syscall() work during exec().
         *
         * Pretend to come from a x32 execve.
         */
index b74e7bfed6ab40826b782f495324e2782963abb4..d494b9bfe618cbc974b0d154bb401fa9dc9b6080 100644 (file)
@@ -1280,23 +1280,6 @@ void __init setup_arch(char **cmdline_p)
        unwind_init();
 }
 
-/*
- * From boot protocol 2.14 onwards we expect the bootloader to set the
- * version to "0x8000 | <used version>". In case we find a version >= 2.14
- * without the 0x8000 we assume the boot loader supports 2.13 only and
- * reset the version accordingly. The 0x8000 flag is removed in any case.
- */
-void __init x86_verify_bootdata_version(void)
-{
-       if (boot_params.hdr.version & VERSION_WRITTEN)
-               boot_params.hdr.version &= ~VERSION_WRITTEN;
-       else if (boot_params.hdr.version >= 0x020e)
-               boot_params.hdr.version = 0x020d;
-
-       if (boot_params.hdr.version < 0x020e)
-               boot_params.hdr.acpi_rsdp_addr = 0;
-}
-
 #ifdef CONFIG_X86_32
 
 static struct resource video_ram_resource = {
index 6a78d4b36a7974ad4d4b4d75500c566529e4d227..f7476ce23b6e0f03cea661324f21f7a89e31295f 100644 (file)
@@ -105,7 +105,7 @@ out:
 static void find_start_end(unsigned long addr, unsigned long flags,
                unsigned long *begin, unsigned long *end)
 {
-       if (!in_compat_syscall() && (flags & MAP_32BIT)) {
+       if (!in_32bit_syscall() && (flags & MAP_32BIT)) {
                /* This is usually used needed to map code in small
                   model, so it needs to be in the first 31bit. Limit
                   it to that.  This means we need to move the
@@ -122,7 +122,7 @@ static void find_start_end(unsigned long addr, unsigned long flags,
        }
 
        *begin  = get_mmap_base(1);
-       if (in_compat_syscall())
+       if (in_32bit_syscall())
                *end = task_size_32bit();
        else
                *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
@@ -193,7 +193,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
                return addr;
 
        /* for MAP_32BIT mappings we force the legacy mmap base */
-       if (!in_compat_syscall() && (flags & MAP_32BIT))
+       if (!in_32bit_syscall() && (flags & MAP_32BIT))
                goto bottomup;
 
        /* requesting a specific address */
@@ -217,9 +217,10 @@ get_unmapped_area:
         * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
         * in the full address space.
         *
-        * !in_compat_syscall() check to avoid high addresses for x32.
+        * !in_32bit_syscall() check to avoid high addresses for x32
+        * (and make it no op on native i386).
         */
-       if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall())
+       if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
                info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
 
        info.align_mask = 0;
index 8f6dcd88202e89d3c7630d034f6d044a712a9fb1..9b7c4ca8f0a7358ce19741de1c875a75e8f52f8e 100644 (file)
@@ -306,7 +306,7 @@ __visible void __noreturn handle_stack_overflow(const char *message,
        die(message, regs, 0);
 
        /* Be absolutely certain we don't return. */
-       panic(message);
+       panic("%s", message);
 }
 #endif
 
index 1eae5af491c278367630f805c48d78cc32f29cbb..891a75dbc131323b70e9776998cc0b437b4fe033 100644 (file)
 
 #define TOPOLOGY_REGISTER_OFFSET 0x10
 
-#if defined CONFIG_PCI && defined CONFIG_PARAVIRT_XXL
-/*
- * Interrupt control on vSMPowered systems:
- * ~AC is a shadow of IF.  If IF is 'on' AC should be 'off'
- * and vice versa.
- */
-
-asmlinkage __visible unsigned long vsmp_save_fl(void)
-{
-       unsigned long flags = native_save_fl();
-
-       if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
-               flags &= ~X86_EFLAGS_IF;
-       return flags;
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
-
-__visible void vsmp_restore_fl(unsigned long flags)
-{
-       if (flags & X86_EFLAGS_IF)
-               flags &= ~X86_EFLAGS_AC;
-       else
-               flags |= X86_EFLAGS_AC;
-       native_restore_fl(flags);
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
-
-asmlinkage __visible void vsmp_irq_disable(void)
-{
-       unsigned long flags = native_save_fl();
-
-       native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
-
-asmlinkage __visible void vsmp_irq_enable(void)
-{
-       unsigned long flags = native_save_fl();
-
-       native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
-
-static unsigned __init vsmp_patch(u8 type, void *ibuf,
-                                 unsigned long addr, unsigned len)
-{
-       switch (type) {
-       case PARAVIRT_PATCH(irq.irq_enable):
-       case PARAVIRT_PATCH(irq.irq_disable):
-       case PARAVIRT_PATCH(irq.save_fl):
-       case PARAVIRT_PATCH(irq.restore_fl):
-               return paravirt_patch_default(type, ibuf, addr, len);
-       default:
-               return native_patch(type, ibuf, addr, len);
-       }
-
-}
-
-static void __init set_vsmp_pv_ops(void)
+#ifdef CONFIG_PCI
+static void __init set_vsmp_ctl(void)
 {
        void __iomem *address;
        unsigned int cap, ctl, cfg;
@@ -109,28 +52,12 @@ static void __init set_vsmp_pv_ops(void)
        }
 #endif
 
-       if (cap & ctl & (1 << 4)) {
-               /* Setup irq ops and turn on vSMP  IRQ fastpath handling */
-               pv_ops.irq.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
-               pv_ops.irq.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
-               pv_ops.irq.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
-               pv_ops.irq.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
-               pv_ops.init.patch = vsmp_patch;
-               ctl &= ~(1 << 4);
-       }
        writel(ctl, address + 4);
        ctl = readl(address + 4);
        pr_info("vSMP CTL: control set to:0x%08x\n", ctl);
 
        early_iounmap(address, 8);
 }
-#else
-static void __init set_vsmp_pv_ops(void)
-{
-}
-#endif
-
-#ifdef CONFIG_PCI
 static int is_vsmp = -1;
 
 static void __init detect_vsmp_box(void)
@@ -164,11 +91,14 @@ static int is_vsmp_box(void)
 {
        return 0;
 }
+static void __init set_vsmp_ctl(void)
+{
+}
 #endif
 
 static void __init vsmp_cap_cpus(void)
 {
-#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP)
+#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
        void __iomem *address;
        unsigned int cfg, topology, node_shift, maxcpus;
 
@@ -221,6 +151,6 @@ void __init vsmp_init(void)
 
        vsmp_cap_cpus();
 
-       set_vsmp_pv_ops();
+       set_vsmp_ctl();
        return;
 }
index 34edf198708f76883d5fa099bfe3683dbf52c753..78e430f4e15cfa3b745882aa5de21fdc14f3277d 100644 (file)
@@ -1509,7 +1509,7 @@ static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
                return emulate_gp(ctxt, index << 3 | 0x2);
 
        addr = dt.address + index * 8;
-       return linear_read_system(ctxt, addr, desc, sizeof *desc);
+       return linear_read_system(ctxt, addr, desc, sizeof(*desc));
 }
 
 static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
@@ -1522,7 +1522,7 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
                struct desc_struct desc;
                u16 sel;
 
-               memset (dt, 0, sizeof *dt);
+               memset(dt, 0, sizeof(*dt));
                if (!ops->get_segment(ctxt, &sel, &desc, &base3,
                                      VCPU_SREG_LDTR))
                        return;
@@ -1586,7 +1586,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       return linear_write_system(ctxt, addr, desc, sizeof *desc);
+       return linear_write_system(ctxt, addr, desc, sizeof(*desc));
 }
 
 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
@@ -1604,7 +1604,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
        u16 dummy;
        u32 base3 = 0;
 
-       memset(&seg_desc, 0, sizeof seg_desc);
+       memset(&seg_desc, 0, sizeof(seg_desc));
 
        if (ctxt->mode == X86EMUL_MODE_REAL) {
                /* set real mode segment descriptor (keep limit etc. for
@@ -3075,17 +3075,17 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
        int ret;
        u32 new_tss_base = get_desc_base(new_desc);
 
-       ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+       ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
        save_state_to_tss16(ctxt, &tss_seg);
 
-       ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+       ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
-       ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
+       ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
@@ -3094,7 +3094,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
 
                ret = linear_write_system(ctxt, new_tss_base,
                                          &tss_seg.prev_task_link,
-                                         sizeof tss_seg.prev_task_link);
+                                         sizeof(tss_seg.prev_task_link));
                if (ret != X86EMUL_CONTINUE)
                        return ret;
        }
@@ -3216,7 +3216,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
        u32 eip_offset = offsetof(struct tss_segment_32, eip);
        u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
 
-       ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
+       ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof(tss_seg));
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
@@ -3228,7 +3228,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
-       ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
+       ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof(tss_seg));
        if (ret != X86EMUL_CONTINUE)
                return ret;
 
@@ -3237,7 +3237,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
 
                ret = linear_write_system(ctxt, new_tss_base,
                                          &tss_seg.prev_task_link,
-                                         sizeof tss_seg.prev_task_link);
+                                         sizeof(tss_seg.prev_task_link));
                if (ret != X86EMUL_CONTINUE)
                        return ret;
        }
index 3cd227ff807fadd27284e6bc81ef2c6f8a60f3d1..c4533d05c214b9e5d776217b5f9e6cec885d31ef 100644 (file)
@@ -55,7 +55,7 @@
 #define PRIo64 "o"
 
 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
-#define apic_debug(fmt, arg...)
+#define apic_debug(fmt, arg...) do {} while (0)
 
 /* 14 is the version for Xeon and Pentium 8.4.8*/
 #define APIC_VERSION                   (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
@@ -576,6 +576,11 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
        rcu_read_lock();
        map = rcu_dereference(kvm->arch.apic_map);
 
+       if (unlikely(!map)) {
+               count = -EOPNOTSUPP;
+               goto out;
+       }
+
        if (min > map->max_apic_id)
                goto out;
        /* Bits above cluster_size are masked in the caller.  */
@@ -2409,7 +2414,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
        r = kvm_apic_state_fixup(vcpu, s, true);
        if (r)
                return r;
-       memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
+       memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
 
        recalculate_apic_map(vcpu->kvm);
        kvm_apic_set_version(vcpu);
index cf5f572f230520b4f00694b0afe8ec6738e89868..7c03c0f35444ff9a112a2f3406a894477eaddc2c 100644 (file)
@@ -5074,9 +5074,9 @@ static bool need_remote_flush(u64 old, u64 new)
 }
 
 static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
-                                   const u8 *new, int *bytes)
+                                   int *bytes)
 {
-       u64 gentry;
+       u64 gentry = 0;
        int r;
 
        /*
@@ -5088,22 +5088,12 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
                /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
                *gpa &= ~(gpa_t)7;
                *bytes = 8;
-               r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
-               if (r)
-                       gentry = 0;
-               new = (const u8 *)&gentry;
        }
 
-       switch (*bytes) {
-       case 4:
-               gentry = *(const u32 *)new;
-               break;
-       case 8:
-               gentry = *(const u64 *)new;
-               break;
-       default:
-               gentry = 0;
-               break;
+       if (*bytes == 4 || *bytes == 8) {
+               r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
+               if (r)
+                       gentry = 0;
        }
 
        return gentry;
@@ -5207,8 +5197,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 
        pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
 
-       gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
-
        /*
         * No need to care whether allocation memory is successful
         * or not since pte prefetch is skiped if it does not have
@@ -5217,6 +5205,9 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        mmu_topup_memory_caches(vcpu);
 
        spin_lock(&vcpu->kvm->mmu_lock);
+
+       gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
+
        ++vcpu->kvm->stat.mmu_pte_write;
        kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 
index 0e21ccc46792f6bcc6665ff63979f23979aa1829..cc6467b35a85f6cec9300011cfa0c464574ed5d3 100644 (file)
@@ -1446,7 +1446,7 @@ static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
        return vcpu->arch.tsc_offset;
 }
 
-static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        u64 g_tsc_offset = 0;
@@ -1464,6 +1464,7 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
        svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
 
        mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+       return svm->vmcb->control.tsc_offset;
 }
 
 static void avic_init_vmcb(struct vcpu_svm *svm)
@@ -1664,20 +1665,23 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
 static int avic_init_access_page(struct kvm_vcpu *vcpu)
 {
        struct kvm *kvm = vcpu->kvm;
-       int ret;
+       int ret = 0;
 
+       mutex_lock(&kvm->slots_lock);
        if (kvm->arch.apic_access_page_done)
-               return 0;
+               goto out;
 
-       ret = x86_set_memory_region(kvm,
-                                   APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
-                                   APIC_DEFAULT_PHYS_BASE,
-                                   PAGE_SIZE);
+       ret = __x86_set_memory_region(kvm,
+                                     APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
+                                     APIC_DEFAULT_PHYS_BASE,
+                                     PAGE_SIZE);
        if (ret)
-               return ret;
+               goto out;
 
        kvm->arch.apic_access_page_done = true;
-       return 0;
+out:
+       mutex_unlock(&kvm->slots_lock);
+       return ret;
 }
 
 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
@@ -2189,21 +2193,31 @@ out:
        return ERR_PTR(err);
 }
 
+static void svm_clear_current_vmcb(struct vmcb *vmcb)
+{
+       int i;
+
+       for_each_online_cpu(i)
+               cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
+}
+
 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       /*
+        * The vmcb page can be recycled, causing a false negative in
+        * svm_vcpu_load(). So, ensure that no logical CPU has this
+        * vmcb page recorded as its current vmcb.
+        */
+       svm_clear_current_vmcb(svm->vmcb);
+
        __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
        __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
        __free_page(virt_to_page(svm->nested.hsave));
        __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
        kvm_vcpu_uninit(vcpu);
        kmem_cache_free(kvm_vcpu_cache, svm);
-       /*
-        * The vmcb page can be recycled, causing a false negative in
-        * svm_vcpu_load(). So do a full IBPB now.
-        */
-       indirect_branch_prediction_barrier();
 }
 
 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -7149,7 +7163,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .has_wbinvd_exit = svm_has_wbinvd_exit,
 
        .read_l1_tsc_offset = svm_read_l1_tsc_offset,
-       .write_tsc_offset = svm_write_tsc_offset,
+       .write_l1_tsc_offset = svm_write_l1_tsc_offset,
 
        .set_tdp_cr3 = set_tdp_cr3,
 
index 4555077d69ce204148facb207f46ddf882483231..02edd9960e9d94cf8cbac80ea1bfccc5673f3089 100644 (file)
@@ -174,6 +174,7 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
  * refer SDM volume 3b section 21.6.13 & 22.1.3.
  */
 static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
+module_param(ple_gap, uint, 0444);
 
 static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
 module_param(ple_window, uint, 0444);
@@ -984,6 +985,7 @@ struct vcpu_vmx {
        struct shared_msr_entry *guest_msrs;
        int                   nmsrs;
        int                   save_nmsrs;
+       bool                  guest_msrs_dirty;
        unsigned long         host_idt_base;
 #ifdef CONFIG_X86_64
        u64                   msr_host_kernel_gs_base;
@@ -1306,7 +1308,7 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
                                            u16 error_code);
 static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
-static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
+static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
                                                          u32 msr, int type);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
@@ -1610,12 +1612,6 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       /* We don't support disabling the feature for simplicity. */
-       if (vmx->nested.enlightened_vmcs_enabled)
-               return 0;
-
-       vmx->nested.enlightened_vmcs_enabled = true;
-
        /*
         * vmcs_version represents the range of supported Enlightened VMCS
         * versions: lower 8 bits is the minimal version, higher 8 bits is the
@@ -1625,6 +1621,12 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
        if (vmcs_version)
                *vmcs_version = (KVM_EVMCS_VERSION << 8) | 1;
 
+       /* We don't support disabling the feature for simplicity. */
+       if (vmx->nested.enlightened_vmcs_enabled)
+               return 0;
+
+       vmx->nested.enlightened_vmcs_enabled = true;
+
        vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
        vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
        vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
@@ -2897,6 +2899,20 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 
        vmx->req_immediate_exit = false;
 
+       /*
+        * Note that guest MSRs to be saved/restored can also be changed
+        * when guest state is loaded. This happens when guest transitions
+        * to/from long-mode by setting MSR_EFER.LMA.
+        */
+       if (!vmx->loaded_cpu_state || vmx->guest_msrs_dirty) {
+               vmx->guest_msrs_dirty = false;
+               for (i = 0; i < vmx->save_nmsrs; ++i)
+                       kvm_set_shared_msr(vmx->guest_msrs[i].index,
+                                          vmx->guest_msrs[i].data,
+                                          vmx->guest_msrs[i].mask);
+
+       }
+
        if (vmx->loaded_cpu_state)
                return;
 
@@ -2957,11 +2973,6 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
                vmcs_writel(HOST_GS_BASE, gs_base);
                host_state->gs_base = gs_base;
        }
-
-       for (i = 0; i < vmx->save_nmsrs; ++i)
-               kvm_set_shared_msr(vmx->guest_msrs[i].index,
-                                  vmx->guest_msrs[i].data,
-                                  vmx->guest_msrs[i].mask);
 }
 
 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
@@ -3436,6 +3447,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
                move_msr_up(vmx, index, save_nmsrs++);
 
        vmx->save_nmsrs = save_nmsrs;
+       vmx->guest_msrs_dirty = true;
 
        if (cpu_has_vmx_msr_bitmap())
                vmx_update_msr_bitmap(&vmx->vcpu);
@@ -3452,11 +3464,9 @@ static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
        return vcpu->arch.tsc_offset;
 }
 
-/*
- * writes 'offset' into guest's timestamp counter offset register
- */
-static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
+       u64 active_offset = offset;
        if (is_guest_mode(vcpu)) {
                /*
                 * We're here if L1 chose not to trap WRMSR to TSC. According
@@ -3464,17 +3474,16 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
                 * set for L2 remains unchanged, and still needs to be added
                 * to the newly set TSC to get L2's TSC.
                 */
-               struct vmcs12 *vmcs12;
-               /* recalculate vmcs02.TSC_OFFSET: */
-               vmcs12 = get_vmcs12(vcpu);
-               vmcs_write64(TSC_OFFSET, offset +
-                       (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
-                        vmcs12->tsc_offset : 0));
+               struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+               if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING))
+                       active_offset += vmcs12->tsc_offset;
        } else {
                trace_kvm_write_tsc_offset(vcpu->vcpu_id,
                                           vmcs_read64(TSC_OFFSET), offset);
-               vmcs_write64(TSC_OFFSET, offset);
        }
+
+       vmcs_write64(TSC_OFFSET, active_offset);
+       return active_offset;
 }
 
 /*
@@ -5944,7 +5953,7 @@ static void free_vpid(int vpid)
        spin_unlock(&vmx_vpid_lock);
 }
 
-static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
+static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
                                                          u32 msr, int type)
 {
        int f = sizeof(unsigned long);
@@ -5982,7 +5991,7 @@ static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bit
        }
 }
 
-static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
+static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
                                                         u32 msr, int type)
 {
        int f = sizeof(unsigned long);
@@ -6020,7 +6029,7 @@ static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitm
        }
 }
 
-static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
+static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
                                                      u32 msr, int type, bool value)
 {
        if (value)
@@ -8664,8 +8673,6 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
        struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
        struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
 
-       vmcs12->hdr.revision_id = evmcs->revision_id;
-
        /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
        vmcs12->tpr_threshold = evmcs->tpr_threshold;
        vmcs12->guest_rip = evmcs->guest_rip;
@@ -9369,7 +9376,30 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
 
                vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page);
 
-               if (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION) {
+               /*
+                * Currently, KVM only supports eVMCS version 1
+                * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
+                * value to first u32 field of eVMCS which should specify eVMCS
+                * VersionNumber.
+                *
+                * Guest should be aware of supported eVMCS versions by host by
+                * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
+                * expected to set this CPUID leaf according to the value
+                * returned in vmcs_version from nested_enable_evmcs().
+                *
+                * However, it turns out that Microsoft Hyper-V fails to comply
+                * to their own invented interface: When Hyper-V use eVMCS, it
+                * just sets first u32 field of eVMCS to revision_id specified
+                * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
+                * which is one of the supported versions specified in
+                * CPUID.0x4000000A.EAX[0:15].
+                *
+                * To overcome Hyper-V bug, we accept here either a supported
+                * eVMCS version or VMCS12 revision_id as valid values for first
+                * u32 field of eVMCS.
+                */
+               if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
+                   (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
                        nested_release_evmcs(vcpu);
                        return 0;
                }
@@ -9390,9 +9420,11 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
                 * present in struct hv_enlightened_vmcs, ...). Make sure there
                 * are no leftovers.
                 */
-               if (from_launch)
-                       memset(vmx->nested.cached_vmcs12, 0,
-                              sizeof(*vmx->nested.cached_vmcs12));
+               if (from_launch) {
+                       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+                       memset(vmcs12, 0, sizeof(*vmcs12));
+                       vmcs12->hdr.revision_id = VMCS12_REVISION;
+               }
 
        }
        return 1;
@@ -15062,7 +15094,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
 
        .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
-       .write_tsc_offset = vmx_write_tsc_offset,
+       .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
 
        .set_tdp_cr3 = vmx_set_cr3,
 
index 66d66d77caee5c7b761ae4eda2f0caf4643987e1..d02937760c3ba8adc6de37ed4b39db9a926f320d 100644 (file)
@@ -1665,8 +1665,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
 
 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
-       kvm_x86_ops->write_tsc_offset(vcpu, offset);
-       vcpu->arch.tsc_offset = offset;
+       vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset);
 }
 
 static inline bool kvm_check_tsc_unstable(void)
@@ -1794,7 +1793,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
                                           s64 adjustment)
 {
-       kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
+       u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
+       kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
 }
 
 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
@@ -2924,7 +2924,7 @@ static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
        unsigned size;
 
        r = -EFAULT;
-       if (copy_from_user(&msrs, user_msrs, sizeof msrs))
+       if (copy_from_user(&msrs, user_msrs, sizeof(msrs)))
                goto out;
 
        r = -E2BIG;
@@ -3091,11 +3091,11 @@ long kvm_arch_dev_ioctl(struct file *filp,
                unsigned n;
 
                r = -EFAULT;
-               if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
+               if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
                        goto out;
                n = msr_list.nmsrs;
                msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
-               if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
+               if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
                        goto out;
                r = -E2BIG;
                if (n < msr_list.nmsrs)
@@ -3117,7 +3117,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
                struct kvm_cpuid2 cpuid;
 
                r = -EFAULT;
-               if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
+               if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
                        goto out;
 
                r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
@@ -3126,7 +3126,7 @@ long kvm_arch_dev_ioctl(struct file *filp,
                        goto out;
 
                r = -EFAULT;
-               if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
+               if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
                        goto out;
                r = 0;
                break;
@@ -3894,7 +3894,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                struct kvm_interrupt irq;
 
                r = -EFAULT;
-               if (copy_from_user(&irq, argp, sizeof irq))
+               if (copy_from_user(&irq, argp, sizeof(irq)))
                        goto out;
                r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
                break;
@@ -3912,7 +3912,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                struct kvm_cpuid cpuid;
 
                r = -EFAULT;
-               if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
+               if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
                        goto out;
                r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
                break;
@@ -3922,7 +3922,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                struct kvm_cpuid2 cpuid;
 
                r = -EFAULT;
-               if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
+               if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
                        goto out;
                r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
                                              cpuid_arg->entries);
@@ -3933,14 +3933,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                struct kvm_cpuid2 cpuid;
 
                r = -EFAULT;
-               if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
+               if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
                        goto out;
                r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
                                              cpuid_arg->entries);
                if (r)
                        goto out;
                r = -EFAULT;
-               if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
+               if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
                        goto out;
                r = 0;
                break;
@@ -3961,13 +3961,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                struct kvm_tpr_access_ctl tac;
 
                r = -EFAULT;
-               if (copy_from_user(&tac, argp, sizeof tac))
+               if (copy_from_user(&tac, argp, sizeof(tac)))
                        goto out;
                r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
                if (r)
                        goto out;
                r = -EFAULT;
-               if (copy_to_user(argp, &tac, sizeof tac))
+               if (copy_to_user(argp, &tac, sizeof(tac)))
                        goto out;
                r = 0;
                break;
@@ -3980,7 +3980,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                if (!lapic_in_kernel(vcpu))
                        goto out;
                r = -EFAULT;
-               if (copy_from_user(&va, argp, sizeof va))
+               if (copy_from_user(&va, argp, sizeof(va)))
                        goto out;
                idx = srcu_read_lock(&vcpu->kvm->srcu);
                r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
@@ -3991,7 +3991,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                u64 mcg_cap;
 
                r = -EFAULT;
-               if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
+               if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap)))
                        goto out;
                r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
                break;
@@ -4000,7 +4000,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                struct kvm_x86_mce mce;
 
                r = -EFAULT;
-               if (copy_from_user(&mce, argp, sizeof mce))
+               if (copy_from_user(&mce, argp, sizeof(mce)))
                        goto out;
                r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
                break;
@@ -4536,7 +4536,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
                if (kvm->created_vcpus)
                        goto set_identity_unlock;
                r = -EFAULT;
-               if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
+               if (copy_from_user(&ident_addr, argp, sizeof(ident_addr)))
                        goto set_identity_unlock;
                r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
 set_identity_unlock:
@@ -4620,7 +4620,7 @@ set_identity_unlock:
                if (r)
                        goto get_irqchip_out;
                r = -EFAULT;
-               if (copy_to_user(argp, chip, sizeof *chip))
+               if (copy_to_user(argp, chip, sizeof(*chip)))
                        goto get_irqchip_out;
                r = 0;
        get_irqchip_out:
@@ -4666,7 +4666,7 @@ set_identity_unlock:
        }
        case KVM_SET_PIT: {
                r = -EFAULT;
-               if (copy_from_user(&u.ps, argp, sizeof u.ps))
+               if (copy_from_user(&u.ps, argp, sizeof(u.ps)))
                        goto out;
                r = -ENXIO;
                if (!kvm->arch.vpit)
@@ -6918,6 +6918,7 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
        clock_pairing.nsec = ts.tv_nsec;
        clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
        clock_pairing.flags = 0;
+       memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));
 
        ret = 0;
        if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
@@ -7455,7 +7456,8 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
        else {
                if (vcpu->arch.apicv_active)
                        kvm_x86_ops->sync_pir_to_irr(vcpu);
-               kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
+               if (ioapic_in_kernel(vcpu->kvm))
+                       kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
        }
 
        if (is_guest_mode(vcpu))
@@ -8205,7 +8207,7 @@ static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
        sregs->efer = vcpu->arch.efer;
        sregs->apic_base = kvm_get_apic_base(vcpu);
 
-       memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
+       memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap));
 
        if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
                set_bit(vcpu->arch.interrupt.nr,
@@ -8509,7 +8511,7 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
        fpu->last_opcode = fxsave->fop;
        fpu->last_ip = fxsave->rip;
        fpu->last_dp = fxsave->rdp;
-       memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
+       memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space));
 
        vcpu_put(vcpu);
        return 0;
@@ -8530,7 +8532,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
        fxsave->fop = fpu->last_opcode;
        fxsave->rip = fpu->last_ip;
        fxsave->rdp = fpu->last_dp;
-       memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
+       memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space));
 
        vcpu_put(vcpu);
        return 0;
index 00b296617ca436c3cea79edcbb0a94d034ee52a3..92e4c4b85bbaadec13e54fb614e8a5dc369e2ff5 100644 (file)
@@ -92,7 +92,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
         * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
         * in the full address space.
         */
-       info.high_limit = in_compat_syscall() ?
+       info.high_limit = in_32bit_syscall() ?
                task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
 
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
@@ -116,7 +116,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
         * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
         * in the full address space.
         */
-       if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall())
+       if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
                info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
 
        info.align_mask = PAGE_MASK & ~huge_page_mask(h);
index 1e95d57760cf79becf81c012df564e42ae7122e2..db316571452145f50832ba56ff7fb49214d4ae02 100644 (file)
@@ -166,7 +166,7 @@ unsigned long get_mmap_base(int is_legacy)
        struct mm_struct *mm = current->mm;
 
 #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
-       if (in_compat_syscall()) {
+       if (in_32bit_syscall()) {
                return is_legacy ? mm->mmap_compat_legacy_base
                                 : mm->mmap_compat_base;
        }
index a80fdd7fb40f3b25791b2ab816ef068eb2f09170..abffa0be80da17b7996a578f8d22af14dfebe326 100644 (file)
@@ -399,9 +399,17 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt)
                n = simple_strtoul(emu_cmdline, &emu_cmdline, 0);
                ret = -1;
                for_each_node_mask(i, physnode_mask) {
+                       /*
+                        * The reason we pass in blk[0] is due to
+                        * numa_remove_memblk_from() called by
+                        * emu_setup_memblk() will delete entry 0
+                        * and then move everything else up in the pi.blk
+                        * array. Therefore we should always be looking
+                        * at blk[0].
+                        */
                        ret = split_nodes_size_interleave_uniform(&ei, &pi,
-                                       pi.blk[i].start, pi.blk[i].end, 0,
-                                       n, &pi.blk[i], nid);
+                                       pi.blk[0].start, pi.blk[0].end, 0,
+                                       n, &pi.blk[0], nid);
                        if (ret < 0)
                                break;
                        if (ret < n) {
index f799076e3d577065e8c73c12c6cddb0c5d6f6cfe..db7a1008223886d398c531d8d34720cd0265d17d 100644 (file)
@@ -2309,9 +2309,13 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
 
        /*
         * We should perform an IPI and flush all tlbs,
-        * but that can deadlock->flush only current cpu:
+        * but that can deadlock->flush only current cpu.
+        * Preemption needs to be disabled around __flush_tlb_all() due to
+        * CR3 reload in __native_flush_tlb().
         */
+       preempt_disable();
        __flush_tlb_all();
+       preempt_enable();
 
        arch_flush_lazy_mmu_mode();
 }
index bddd6b3cee1de51ac8321974b827d208dbab1831..03b6b4c2238daa6c5a97d6eb10c32fb825a9a077 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/export.h>
 #include <linux/cpu.h>
 #include <linux/debugfs.h>
-#include <linux/ptrace.h>
 
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
  *     Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
  */
 
+/*
+ * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is
+ * stored in cpu_tlb_state.last_user_mm_ibpb.
+ */
+#define LAST_USER_MM_IBPB      0x1UL
+
 /*
  * We get here when we do something requiring a TLB invalidation
  * but could not go invalidate all of the contexts.  We do the
@@ -181,17 +186,87 @@ static void sync_current_stack_to_mm(struct mm_struct *mm)
        }
 }
 
-static bool ibpb_needed(struct task_struct *tsk, u64 last_ctx_id)
+static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next)
+{
+       unsigned long next_tif = task_thread_info(next)->flags;
+       unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB;
+
+       return (unsigned long)next->mm | ibpb;
+}
+
+static void cond_ibpb(struct task_struct *next)
 {
+       if (!next || !next->mm)
+               return;
+
        /*
-        * Check if the current (previous) task has access to the memory
-        * of the @tsk (next) task. If access is denied, make sure to
-        * issue a IBPB to stop user->user Spectre-v2 attacks.
-        *
-        * Note: __ptrace_may_access() returns 0 or -ERRNO.
+        * Both, the conditional and the always IBPB mode use the mm
+        * pointer to avoid the IBPB when switching between tasks of the
+        * same process. Using the mm pointer instead of mm->context.ctx_id
+        * opens a hypothetical hole vs. mm_struct reuse, which is more or
+        * less impossible to control by an attacker. Aside of that it
+        * would only affect the first schedule so the theoretically
+        * exposed data is not really interesting.
         */
-       return (tsk && tsk->mm && tsk->mm->context.ctx_id != last_ctx_id &&
-               ptrace_may_access_sched(tsk, PTRACE_MODE_SPEC_IBPB));
+       if (static_branch_likely(&switch_mm_cond_ibpb)) {
+               unsigned long prev_mm, next_mm;
+
+               /*
+                * This is a bit more complex than the always mode because
+                * it has to handle two cases:
+                *
+                * 1) Switch from a user space task (potential attacker)
+                *    which has TIF_SPEC_IB set to a user space task
+                *    (potential victim) which has TIF_SPEC_IB not set.
+                *
+                * 2) Switch from a user space task (potential attacker)
+                *    which has TIF_SPEC_IB not set to a user space task
+                *    (potential victim) which has TIF_SPEC_IB set.
+                *
+                * This could be done by unconditionally issuing IBPB when
+                * a task which has TIF_SPEC_IB set is either scheduled in
+                * or out. Though that results in two flushes when:
+                *
+                * - the same user space task is scheduled out and later
+                *   scheduled in again and only a kernel thread ran in
+                *   between.
+                *
+                * - a user space task belonging to the same process is
+                *   scheduled in after a kernel thread ran in between
+                *
+                * - a user space task belonging to the same process is
+                *   scheduled in immediately.
+                *
+                * Optimize this with reasonably small overhead for the
+                * above cases. Mangle the TIF_SPEC_IB bit into the mm
+                * pointer of the incoming task which is stored in
+                * cpu_tlbstate.last_user_mm_ibpb for comparison.
+                */
+               next_mm = mm_mangle_tif_spec_ib(next);
+               prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb);
+
+               /*
+                * Issue IBPB only if the mm's are different and one or
+                * both have the IBPB bit set.
+                */
+               if (next_mm != prev_mm &&
+                   (next_mm | prev_mm) & LAST_USER_MM_IBPB)
+                       indirect_branch_prediction_barrier();
+
+               this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm);
+       }
+
+       if (static_branch_unlikely(&switch_mm_always_ibpb)) {
+               /*
+                * Only flush when switching to a user space task with a
+                * different context than the user space task which ran
+                * last on this CPU.
+                */
+               if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) {
+                       indirect_branch_prediction_barrier();
+                       this_cpu_write(cpu_tlbstate.last_user_mm, next->mm);
+               }
+       }
 }
 
 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
@@ -292,22 +367,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                new_asid = prev_asid;
                need_flush = true;
        } else {
-               u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
-
                /*
                 * Avoid user/user BTB poisoning by flushing the branch
                 * predictor when switching between processes. This stops
                 * one process from doing Spectre-v2 attacks on another.
-                *
-                * As an optimization, flush indirect branches only when
-                * switching into a processes that can't be ptrace by the
-                * current one (as in such case, attacker has much more
-                * convenient way how to tamper with the next process than
-                * branch buffer poisoning).
                 */
-               if (static_cpu_has(X86_FEATURE_USE_IBPB) &&
-                               ibpb_needed(tsk, last_ctx_id))
-                       indirect_branch_prediction_barrier();
+               cond_ibpb(tsk);
 
                if (IS_ENABLED(CONFIG_VMAP_STACK)) {
                        /*
@@ -365,14 +430,6 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0);
        }
 
-       /*
-        * Record last user mm's context id, so we can avoid
-        * flushing branch buffer with IBPB if we switch back
-        * to the same user.
-        */
-       if (next != &init_mm)
-               this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
-
        /* Make sure we write CR3 before loaded_mm. */
        barrier();
 
@@ -441,7 +498,7 @@ void initialize_tlbstate_and_flush(void)
        write_cr3(build_cr3(mm->pgd, 0));
 
        /* Reinitialize tlbstate. */
-       this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id);
+       this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB);
        this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0);
        this_cpu_write(cpu_tlbstate.next_asid, 1);
        this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id);
index 7476b3b097e1e94dc0c305c6e9ef18a4e41633b3..7138bc7a265c016359e372d8274f3a4f6f068215 100644 (file)
@@ -183,7 +183,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
                        num--;
                }
 
-               if (efi_x >= si->lfb_width) {
+               if (efi_x + font->width > si->lfb_width) {
                        efi_x = 0;
                        efi_y += font->height;
                }
index 0b08067c45f3daa6a62b71c1cc9b4663f2c003a1..b629f6992d9f65dd6ec587ece3e808843ffd748f 100644 (file)
@@ -130,7 +130,7 @@ static void regex_init(int use_real_mode)
                              REG_EXTENDED|REG_NOSUB);
 
                if (err) {
-                       regerror(err, &sym_regex_c[i], errbuf, sizeof errbuf);
+                       regerror(err, &sym_regex_c[i], errbuf, sizeof(errbuf));
                        die("%s", errbuf);
                }
         }
@@ -405,7 +405,7 @@ static void read_shdrs(FILE *fp)
        }
        for (i = 0; i < ehdr.e_shnum; i++) {
                struct section *sec = &secs[i];
-               if (fread(&shdr, sizeof shdr, 1, fp) != 1)
+               if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
                        die("Cannot read ELF section headers %d/%d: %s\n",
                            i, ehdr.e_shnum, strerror(errno));
                sec->shdr.sh_name      = elf_word_to_cpu(shdr.sh_name);
index 413f3519d9a12ea4f66c14720c92e918c306c345..c907b20d49935d5bb15b4f5bc602918d9b815341 100644 (file)
@@ -194,7 +194,7 @@ extern unsigned long um_vdso_addr;
 
 typedef unsigned long elf_greg_t;
 
-#define ELF_NGREG (sizeof (struct user_regs_struct) / sizeof(elf_greg_t))
+#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
 typedef elf_greg_t elf_gregset_t[ELF_NGREG];
 
 typedef struct user_i387_struct elf_fpregset_t;
index e996e8e744cbab90ecaca0ea17f07048465e208b..750f46ad018a0e0e500bf5dd60da2f7901a54f89 100644 (file)
@@ -10,7 +10,6 @@
 #include <xen/xen.h>
 #include <xen/features.h>
 #include <xen/page.h>
-#include <xen/interface/memory.h>
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
@@ -346,80 +345,3 @@ void xen_arch_unregister_cpu(int num)
 }
 EXPORT_SYMBOL(xen_arch_unregister_cpu);
 #endif
-
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-void __init arch_xen_balloon_init(struct resource *hostmem_resource)
-{
-       struct xen_memory_map memmap;
-       int rc;
-       unsigned int i, last_guest_ram;
-       phys_addr_t max_addr = PFN_PHYS(max_pfn);
-       struct e820_table *xen_e820_table;
-       const struct e820_entry *entry;
-       struct resource *res;
-
-       if (!xen_initial_domain())
-               return;
-
-       xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL);
-       if (!xen_e820_table)
-               return;
-
-       memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries);
-       set_xen_guest_handle(memmap.buffer, xen_e820_table->entries);
-       rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
-       if (rc) {
-               pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc);
-               goto out;
-       }
-
-       last_guest_ram = 0;
-       for (i = 0; i < memmap.nr_entries; i++) {
-               if (xen_e820_table->entries[i].addr >= max_addr)
-                       break;
-               if (xen_e820_table->entries[i].type == E820_TYPE_RAM)
-                       last_guest_ram = i;
-       }
-
-       entry = &xen_e820_table->entries[last_guest_ram];
-       if (max_addr >= entry->addr + entry->size)
-               goto out; /* No unallocated host RAM. */
-
-       hostmem_resource->start = max_addr;
-       hostmem_resource->end = entry->addr + entry->size;
-
-       /*
-        * Mark non-RAM regions between the end of dom0 RAM and end of host RAM
-        * as unavailable. The rest of that region can be used for hotplug-based
-        * ballooning.
-        */
-       for (; i < memmap.nr_entries; i++) {
-               entry = &xen_e820_table->entries[i];
-
-               if (entry->type == E820_TYPE_RAM)
-                       continue;
-
-               if (entry->addr >= hostmem_resource->end)
-                       break;
-
-               res = kzalloc(sizeof(*res), GFP_KERNEL);
-               if (!res)
-                       goto out;
-
-               res->name = "Unavailable host RAM";
-               res->start = entry->addr;
-               res->end = (entry->addr + entry->size < hostmem_resource->end) ?
-                           entry->addr + entry->size : hostmem_resource->end;
-               rc = insert_resource(hostmem_resource, res);
-               if (rc) {
-                       pr_warn("%s: Can't insert [%llx - %llx) (%d)\n",
-                               __func__, res->start, res->end, rc);
-                       kfree(res);
-                       goto  out;
-               }
-       }
-
- out:
-       kfree(xen_e820_table);
-}
-#endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
index 0d7b3ae4960bb0cc424cdc853cdf2f35a73835ae..a5d7ed12533707f8714e066cd4be4c30f880988d 100644 (file)
@@ -1905,7 +1905,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
        init_top_pgt[0] = __pgd(0);
 
        /* Pre-constructed entries are in pfn, so convert to mfn */
-       /* L4[272] -> level3_ident_pgt  */
+       /* L4[273] -> level3_ident_pgt  */
        /* L4[511] -> level3_kernel_pgt */
        convert_pfn_mfn(init_top_pgt);
 
@@ -1925,8 +1925,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
        addr[0] = (unsigned long)pgd;
        addr[1] = (unsigned long)l3;
        addr[2] = (unsigned long)l2;
-       /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
-        * Both L4[272][0] and L4[511][510] have entries that point to the same
+       /* Graft it onto L4[273][0]. Note that we creating an aliasing problem:
+        * Both L4[273][0] and L4[511][510] have entries that point to the same
         * L2 (PMD) tables. Meaning that if you modify it in __va space
         * it will be also modified in the __ka space! (But if you just
         * modify the PMD table to point to other PTE's or none, then you
index 2bce7958ce8b3e129634a79cadbbdd098c9fc77e..0766a08bdf458472f16db3dc38477d73abcb0699 100644 (file)
@@ -69,6 +69,11 @@ void xen_mc_flush(void)
 
        trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
 
+#if MC_DEBUG
+       memcpy(b->debug, b->entries,
+              b->mcidx * sizeof(struct multicall_entry));
+#endif
+
        switch (b->mcidx) {
        case 0:
                /* no-op */
@@ -87,32 +92,34 @@ void xen_mc_flush(void)
                break;
 
        default:
-#if MC_DEBUG
-               memcpy(b->debug, b->entries,
-                      b->mcidx * sizeof(struct multicall_entry));
-#endif
-
                if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
                        BUG();
                for (i = 0; i < b->mcidx; i++)
                        if (b->entries[i].result < 0)
                                ret++;
+       }
 
+       if (WARN_ON(ret)) {
+               pr_err("%d of %d multicall(s) failed: cpu %d\n",
+                      ret, b->mcidx, smp_processor_id());
+               for (i = 0; i < b->mcidx; i++) {
+                       if (b->entries[i].result < 0) {
 #if MC_DEBUG
-               if (ret) {
-                       printk(KERN_ERR "%d multicall(s) failed: cpu %d\n",
-                              ret, smp_processor_id());
-                       dump_stack();
-                       for (i = 0; i < b->mcidx; i++) {
-                               printk(KERN_DEBUG "  call %2d/%d: op=%lu arg=[%lx] result=%ld\t%pF\n",
-                                      i+1, b->mcidx,
+                               pr_err("  call %2d: op=%lu arg=[%lx] result=%ld\t%pF\n",
+                                      i + 1,
                                       b->debug[i].op,
                                       b->debug[i].args[0],
                                       b->entries[i].result,
                                       b->caller[i]);
+#else
+                               pr_err("  call %2d: op=%lu arg=[%lx] result=%ld\n",
+                                      i + 1,
+                                      b->entries[i].op,
+                                      b->entries[i].args[0],
+                                      b->entries[i].result);
+#endif
                        }
                }
-#endif
        }
 
        b->mcidx = 0;
@@ -126,8 +133,6 @@ void xen_mc_flush(void)
        b->cbidx = 0;
 
        local_irq_restore(flags);
-
-       WARN_ON(ret);
 }
 
 struct multicall_space __xen_mc_entry(size_t args)
index b06731705529b1e4c339bc21db8de0a565acf6d8..055e37e43541ed17d11cf4a194085b7fc3a3192c 100644 (file)
@@ -656,8 +656,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 
        /*
         * The interface requires atomic updates on p2m elements.
-        * xen_safe_write_ulong() is using __put_user which does an atomic
-        * store via asm().
+        * xen_safe_write_ulong() is using an atomic store via asm().
         */
        if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
                return true;
index 1163e33121fb39af673743b0eda9de5007629c63..075ed47993bbf5c1bf9b760511268613c035d8c1 100644 (file)
@@ -808,6 +808,7 @@ char * __init xen_memory_setup(void)
        addr = xen_e820_table.entries[0].addr;
        size = xen_e820_table.entries[0].size;
        while (i < xen_e820_table.nr_entries) {
+               bool discard = false;
 
                chunk_size = size;
                type = xen_e820_table.entries[i].type;
@@ -823,10 +824,11 @@ char * __init xen_memory_setup(void)
                                xen_add_extra_mem(pfn_s, n_pfns);
                                xen_max_p2m_pfn = pfn_s + n_pfns;
                        } else
-                               type = E820_TYPE_UNUSABLE;
+                               discard = true;
                }
 
-               xen_align_and_add_e820_region(addr, chunk_size, type);
+               if (!discard)
+                       xen_align_and_add_e820_region(addr, chunk_size, type);
 
                addr += chunk_size;
                size -= chunk_size;
index 441c8826216982a4fb9532b68d68a37cbb0d3e05..3776122c87cce16efb8da63de8c32c78696e52d1 100644 (file)
@@ -3,24 +3,21 @@
  * Split spinlock implementation out into its own file, so it can be
  * compiled in a FTRACE-compatible way.
  */
-#include <linux/kernel_stat.h>
+#include <linux/kernel.h>
 #include <linux/spinlock.h>
-#include <linux/debugfs.h>
-#include <linux/log2.h>
-#include <linux/gfp.h>
 #include <linux/slab.h>
+#include <linux/atomic.h>
 
 #include <asm/paravirt.h>
 #include <asm/qspinlock.h>
 
-#include <xen/interface/xen.h>
 #include <xen/events.h>
 
 #include "xen-ops.h"
-#include "debugfs.h"
 
 static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
 static DEFINE_PER_CPU(char *, irq_name);
+static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
 static bool xen_pvspin = true;
 
 static void xen_qlock_kick(int cpu)
@@ -39,25 +36,25 @@ static void xen_qlock_kick(int cpu)
  */
 static void xen_qlock_wait(u8 *byte, u8 val)
 {
-       unsigned long flags;
        int irq = __this_cpu_read(lock_kicker_irq);
+       atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
 
        /* If kicker interrupts not initialized yet, just spin */
        if (irq == -1 || in_nmi())
                return;
 
-       /* Guard against reentry. */
-       local_irq_save(flags);
+       /* Detect reentry. */
+       atomic_inc(nest_cnt);
 
-       /* If irq pending already clear it. */
-       if (xen_test_irq_pending(irq)) {
+       /* If irq pending already and no nested call clear it. */
+       if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
                xen_clear_irq_pending(irq);
        } else if (READ_ONCE(*byte) == val) {
                /* Block until irq becomes pending (or a spurious wakeup) */
                xen_poll_irq(irq);
        }
 
-       local_irq_restore(flags);
+       atomic_dec(nest_cnt);
 }
 
 static irqreturn_t dummy_handler(int irq, void *dev_id)
index 60c141af222bc5e05c426e6c0edf976e93c036d0..d29b7365da8d9facd71a887a071b34bbb68ec5fc 100644 (file)
@@ -1,7 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-config ZONE_DMA
-       def_bool y
-
 config XTENSA
        def_bool y
        select ARCH_HAS_SG_CHAIN
index dc9e0ba7122cad1e982ac33eb5c9d60d9a1db48a..294846117fc2c5527e297ccd50eb55c31c3f3228 100644 (file)
@@ -33,7 +33,7 @@ uImage: $(obj)/uImage
 boot-elf boot-redboot: $(addprefix $(obj)/,$(subdir-y))
        $(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
 
-OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary
+OBJCOPYFLAGS = --strip-all -R .comment -R .notes -O binary
 
 vmlinux.bin: vmlinux FORCE
        $(call if_changed,objcopy)
index be9bfd9aa865beb554b2b6e7ce92010cf276b927..34a23016dd1442f5c95d445f13276d97c772072d 100644 (file)
 # error Linux requires the Xtensa Windowed Registers Option.
 #endif
 
-#define ARCH_SLAB_MINALIGN     XCHAL_DATA_WIDTH
+/* Xtensa ABI requires stack alignment to be at least 16 */
+
+#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
+
+#define ARCH_SLAB_MINALIGN STACK_ALIGN
 
 /*
  * User space process size: 1 GB.
index 67904f55f1884f52893b3a99b1be785a48dc69da..120dd746a14751f3de4317a35921b2037d8c7cdd 100644 (file)
@@ -94,14 +94,14 @@ int main(void)
        DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
        DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
 #if XTENSA_HAVE_COPROCESSORS
-       DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
-       DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
-       DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
-       DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
-       DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
-       DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
-       DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
-       DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
+       DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
+       DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
+       DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2));
+       DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3));
+       DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4));
+       DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5));
+       DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6));
+       DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7));
 #endif
        DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
        DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
index 2f76118ecf6230ff01fe0e43221269da7b208f46..9053a5622d2c3435faefe4950f953b7923e8332f 100644 (file)
@@ -88,9 +88,12 @@ _SetupMMU:
        initialize_mmu
 #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
        rsr     a2, excsave1
-       movi    a3, 0x08000000
+       movi    a3, XCHAL_KSEG_PADDR
+       bltu    a2, a3, 1f
+       sub     a2, a2, a3
+       movi    a3, XCHAL_KSEG_SIZE
        bgeu    a2, a3, 1f
-       movi    a3, 0xd0000000
+       movi    a3, XCHAL_KSEG_CACHED_VADDR
        add     a2, a2, a3
        wsr     a2, excsave1
 1:
index 483dcfb6e681d7d483ef8ebfb948d91b7ee8f1fd..4bb68133a72af93ad00e82d08abb7d26a783b86d 100644 (file)
@@ -94,18 +94,21 @@ void coprocessor_release_all(struct thread_info *ti)
 
 void coprocessor_flush_all(struct thread_info *ti)
 {
-       unsigned long cpenable;
+       unsigned long cpenable, old_cpenable;
        int i;
 
        preempt_disable();
 
+       RSR_CPENABLE(old_cpenable);
        cpenable = ti->cpenable;
+       WSR_CPENABLE(cpenable);
 
        for (i = 0; i < XCHAL_CP_MAX; i++) {
                if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
                        coprocessor_flush(ti, i);
                cpenable >>= 1;
        }
+       WSR_CPENABLE(old_cpenable);
 
        preempt_enable();
 }
index c0845cb1cbb9944ed7deaee9bffa3685171574c3..d9541be0605ad52793e5d59a11b2a8dda7f975a7 100644 (file)
@@ -127,12 +127,37 @@ static int ptrace_setregs(struct task_struct *child, void __user *uregs)
 }
 
 
+#if XTENSA_HAVE_COPROCESSORS
+#define CP_OFFSETS(cp) \
+       { \
+               .elf_xtregs_offset = offsetof(elf_xtregs_t, cp), \
+               .ti_offset = offsetof(struct thread_info, xtregs_cp.cp), \
+               .sz = sizeof(xtregs_ ## cp ## _t), \
+       }
+
+static const struct {
+       size_t elf_xtregs_offset;
+       size_t ti_offset;
+       size_t sz;
+} cp_offsets[] = {
+       CP_OFFSETS(cp0),
+       CP_OFFSETS(cp1),
+       CP_OFFSETS(cp2),
+       CP_OFFSETS(cp3),
+       CP_OFFSETS(cp4),
+       CP_OFFSETS(cp5),
+       CP_OFFSETS(cp6),
+       CP_OFFSETS(cp7),
+};
+#endif
+
 static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
 {
        struct pt_regs *regs = task_pt_regs(child);
        struct thread_info *ti = task_thread_info(child);
        elf_xtregs_t __user *xtregs = uregs;
        int ret = 0;
+       int i __maybe_unused;
 
        if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t)))
                return -EIO;
@@ -140,8 +165,13 @@ static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
 #if XTENSA_HAVE_COPROCESSORS
        /* Flush all coprocessor registers to memory. */
        coprocessor_flush_all(ti);
-       ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp,
-                             sizeof(xtregs_coprocessor_t));
+
+       for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
+               ret |= __copy_to_user((char __user *)xtregs +
+                                     cp_offsets[i].elf_xtregs_offset,
+                                     (const char *)ti +
+                                     cp_offsets[i].ti_offset,
+                                     cp_offsets[i].sz);
 #endif
        ret |= __copy_to_user(&xtregs->opt, &regs->xtregs_opt,
                              sizeof(xtregs->opt));
@@ -157,6 +187,7 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
        struct pt_regs *regs = task_pt_regs(child);
        elf_xtregs_t *xtregs = uregs;
        int ret = 0;
+       int i __maybe_unused;
 
        if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t)))
                return -EFAULT;
@@ -166,8 +197,11 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
        coprocessor_flush_all(ti);
        coprocessor_release_all(ti);
 
-       ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
-                               sizeof(xtregs_coprocessor_t));
+       for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i)
+               ret |= __copy_from_user((char *)ti + cp_offsets[i].ti_offset,
+                                       (const char __user *)xtregs +
+                                       cp_offsets[i].elf_xtregs_offset,
+                                       cp_offsets[i].sz);
 #endif
        ret |= __copy_from_user(&regs->xtregs_opt, &xtregs->opt,
                                sizeof(xtregs->opt));
index b727b18a68acd9ff639d9da228fc70d94dc657ff..b80a430453b1cb7d76b8598db8eb0b06f039df8d 100644 (file)
@@ -131,6 +131,7 @@ SECTIONS
   .fixup   : { *(.fixup) }
 
   EXCEPTION_TABLE(16)
+  NOTES
   /* Data section */
 
   _sdata = .;
@@ -296,38 +297,11 @@ SECTIONS
 
   _end = .;
 
-  .xt.lit : { *(.xt.lit) }
-  .xt.prop : { *(.xt.prop) }
-
-  .debug  0 :  { *(.debug) }
-  .line  0 :  { *(.line) }
-  .debug_srcinfo  0 :  { *(.debug_srcinfo) }
-  .debug_sfnames  0 :  { *(.debug_sfnames) }
-  .debug_aranges  0 :  { *(.debug_aranges) }
-  .debug_pubnames  0 :  { *(.debug_pubnames) }
-  .debug_info  0 :  { *(.debug_info) }
-  .debug_abbrev  0 :  { *(.debug_abbrev) }
-  .debug_line  0 :  { *(.debug_line) }
-  .debug_frame  0 :  { *(.debug_frame) }
-  .debug_str  0 :  { *(.debug_str) }
-  .debug_loc  0 :  { *(.debug_loc) }
-  .debug_macinfo  0 :  { *(.debug_macinfo) }
-  .debug_weaknames  0 :  { *(.debug_weaknames) }
-  .debug_funcnames  0 :  { *(.debug_funcnames) }
-  .debug_typenames  0 :  { *(.debug_typenames) }
-  .debug_varnames  0 :  { *(.debug_varnames) }
-
-  .xt.insn 0 :
-  {
-    *(.xt.insn)
-    *(.gnu.linkonce.x*)
-  }
+  DWARF_DEBUG
 
-  .xt.lit 0 :
-  {
-    *(.xt.lit)
-    *(.gnu.linkonce.p*)
-  }
+  .xt.prop 0 : { KEEP(*(.xt.prop .xt.prop.* .gnu.linkonce.prop.*)) }
+  .xt.insn 0 : { KEEP(*(.xt.insn .xt.insn.* .gnu.linkonce.x*)) }
+  .xt.lit  0 : { KEEP(*(.xt.lit  .xt.lit.*  .gnu.linkonce.p*)) }
 
   /* Sections to be discarded */
   DISCARDS
index 9750a48f491b19c087b8b1013b4614eaf61e4cac..30a48bba4a47372b81ce5f0e6e40aec8ca564dcd 100644 (file)
@@ -71,7 +71,7 @@ void __init zones_init(void)
 {
        /* All pages are DMA-able, so we put them all in the DMA zone. */
        unsigned long zones_size[MAX_NR_ZONES] = {
-               [ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET,
+               [ZONE_NORMAL] = max_low_pfn - ARCH_PFN_OFFSET,
 #ifdef CONFIG_HIGHMEM
                [ZONE_HIGHMEM] = max_pfn - max_low_pfn,
 #endif
index d9a7916ff0ab6474a6f4abac2873a6685ad4d467..9fe5952d117d553f12f32055fde8683c554b06a8 100644 (file)
@@ -642,7 +642,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
        uint64_t serial_nr;
 
        rcu_read_lock();
-       serial_nr = __bio_blkcg(bio)->css.serial_nr;
+       serial_nr = bio_blkcg(bio)->css.serial_nr;
 
        /*
         * Check whether blkcg has changed.  The condition may trigger
@@ -651,7 +651,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
        if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
                goto out;
 
-       bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
+       bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
        /*
         * Update blkg_path for bfq_log_* functions. We cache this
         * path, and update it here, for the following
index 6075100f03a50a73da838b19891b923d0ad422a7..97337214bec4286afa212cf5f271ce26b578d399 100644 (file)
@@ -638,7 +638,7 @@ static bool bfq_varied_queue_weights_or_active_groups(struct bfq_data *bfqd)
                 bfqd->queue_weights_tree.rb_node->rb_right)
 #ifdef CONFIG_BFQ_GROUP_IOSCHED
               ) ||
-               (bfqd->num_active_groups > 0
+               (bfqd->num_groups_with_pending_reqs > 0
 #endif
               );
 }
@@ -802,7 +802,21 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
                         */
                        break;
                }
-               bfqd->num_active_groups--;
+
+               /*
+                * The decrement of num_groups_with_pending_reqs is
+                * not performed immediately upon the deactivation of
+                * entity, but it is delayed to when it also happens
+                * that the first leaf descendant bfqq of entity gets
+                * all its pending requests completed. The following
+                * instructions perform this delayed decrement, if
+                * needed. See the comments on
+                * num_groups_with_pending_reqs for details.
+                */
+               if (entity->in_groups_with_pending_reqs) {
+                       entity->in_groups_with_pending_reqs = false;
+                       bfqd->num_groups_with_pending_reqs--;
+               }
        }
 }
 
@@ -3529,27 +3543,44 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
         * fact, if there are active groups, then, for condition (i)
         * to become false, it is enough that an active group contains
         * more active processes or sub-groups than some other active
-        * group. We address this issue with the following bi-modal
-        * behavior, implemented in the function
+        * group. More precisely, for condition (i) to hold because of
+        * such a group, it is not even necessary that the group is
+        * (still) active: it is sufficient that, even if the group
+        * has become inactive, some of its descendant processes still
+        * have some request already dispatched but still waiting for
+        * completion. In fact, requests have still to be guaranteed
+        * their share of the throughput even after being
+        * dispatched. In this respect, it is easy to show that, if a
+        * group frequently becomes inactive while still having
+        * in-flight requests, and if, when this happens, the group is
+        * not considered in the calculation of whether the scenario
+        * is asymmetric, then the group may fail to be guaranteed its
+        * fair share of the throughput (basically because idling may
+        * not be performed for the descendant processes of the group,
+        * but it had to be).  We address this issue with the
+        * following bi-modal behavior, implemented in the function
         * bfq_symmetric_scenario().
         *
-        * If there are active groups, then the scenario is tagged as
+        * If there are groups with requests waiting for completion
+        * (as commented above, some of these groups may even be
+        * already inactive), then the scenario is tagged as
         * asymmetric, conservatively, without checking any of the
         * conditions (i) and (ii). So the device is idled for bfqq.
         * This behavior matches also the fact that groups are created
-        * exactly if controlling I/O (to preserve bandwidth and
-        * latency guarantees) is a primary concern.
+        * exactly if controlling I/O is a primary concern (to
+        * preserve bandwidth and latency guarantees).
         *
-        * On the opposite end, if there are no active groups, then
-        * only condition (i) is actually controlled, i.e., provided
-        * that condition (i) holds, idling is not performed,
-        * regardless of whether condition (ii) holds. In other words,
-        * only if condition (i) does not hold, then idling is
-        * allowed, and the device tends to be prevented from queueing
-        * many requests, possibly of several processes. Since there
-        * are no active groups, then, to control condition (i) it is
-        * enough to check whether all active queues have the same
-        * weight.
+        * On the opposite end, if there are no groups with requests
+        * waiting for completion, then only condition (i) is actually
+        * controlled, i.e., provided that condition (i) holds, idling
+        * is not performed, regardless of whether condition (ii)
+        * holds. In other words, only if condition (i) does not hold,
+        * then idling is allowed, and the device tends to be
+        * prevented from queueing many requests, possibly of several
+        * processes. Since there are no groups with requests waiting
+        * for completion, then, to control condition (i) it is enough
+        * to check just whether all the queues with requests waiting
+        * for completion also have the same weight.
         *
         * Not checking condition (ii) evidently exposes bfqq to the
         * risk of getting less throughput than its fair share.
@@ -3607,10 +3638,11 @@ static bool bfq_better_to_idle(struct bfq_queue *bfqq)
         * bfqq is weight-raised is checked explicitly here. More
         * precisely, the compound condition below takes into account
         * also the fact that, even if bfqq is being weight-raised,
-        * the scenario is still symmetric if all active queues happen
-        * to be weight-raised. Actually, we should be even more
-        * precise here, and differentiate between interactive weight
-        * raising and soft real-time weight raising.
+        * the scenario is still symmetric if all queues with requests
+        * waiting for completion happen to be
+        * weight-raised. Actually, we should be even more precise
+        * here, and differentiate between interactive weight raising
+        * and soft real-time weight raising.
         *
         * As a side note, it is worth considering that the above
         * device-idling countermeasures may however fail in the
@@ -4384,7 +4416,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,
 
        rcu_read_lock();
 
-       bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio));
+       bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
        if (!bfqg) {
                bfqq = &bfqd->oom_bfqq;
                goto out;
@@ -5417,7 +5449,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
        bfqd->idle_slice_timer.function = bfq_idle_slice_timer;
 
        bfqd->queue_weights_tree = RB_ROOT;
-       bfqd->num_active_groups = 0;
+       bfqd->num_groups_with_pending_reqs = 0;
 
        INIT_LIST_HEAD(&bfqd->active_list);
        INIT_LIST_HEAD(&bfqd->idle_list);
index 77651d817ecd36fe59827f2aa55f9c4ec5ffb979..0b02bf302de07706fbfdc5b5ef66da2545e01eee 100644 (file)
@@ -196,6 +196,9 @@ struct bfq_entity {
 
        /* flag, set to request a weight, ioprio or ioprio_class change  */
        int prio_changed;
+
+       /* flag, set if the entity is counted in groups_with_pending_reqs */
+       bool in_groups_with_pending_reqs;
 };
 
 struct bfq_group;
@@ -448,10 +451,54 @@ struct bfq_data {
         * bfq_weights_tree_[add|remove] for further details).
         */
        struct rb_root queue_weights_tree;
+
        /*
-        * number of groups with requests still waiting for completion
+        * Number of groups with at least one descendant process that
+        * has at least one request waiting for completion. Note that
+        * this accounts for also requests already dispatched, but not
+        * yet completed. Therefore this number of groups may differ
+        * (be larger) than the number of active groups, as a group is
+        * considered active only if its corresponding entity has
+        * descendant queues with at least one request queued. This
+        * number is used to decide whether a scenario is symmetric.
+        * For a detailed explanation see comments on the computation
+        * of the variable asymmetric_scenario in the function
+        * bfq_better_to_idle().
+        *
+        * However, it is hard to compute this number exactly, for
+        * groups with multiple descendant processes. Consider a group
+        * that is inactive, i.e., that has no descendant process with
+        * pending I/O inside BFQ queues. Then suppose that
+        * num_groups_with_pending_reqs is still accounting for this
+        * group, because the group has descendant processes with some
+        * I/O request still in flight. num_groups_with_pending_reqs
+        * should be decremented when the in-flight request of the
+        * last descendant process is finally completed (assuming that
+        * nothing else has changed for the group in the meantime, in
+        * terms of composition of the group and active/inactive state of child
+        * groups and processes). To accomplish this, an additional
+        * pending-request counter must be added to entities, and must
+        * be updated correctly. To avoid this additional field and operations,
+        * we resort to the following tradeoff between simplicity and
+        * accuracy: for an inactive group that is still counted in
+        * num_groups_with_pending_reqs, we decrement
+        * num_groups_with_pending_reqs when the first descendant
+        * process of the group remains with no request waiting for
+        * completion.
+        *
+        * Even this simpler decrement strategy requires a little
+        * carefulness: to avoid multiple decrements, we flag a group,
+        * more precisely an entity representing a group, as still
+        * counted in num_groups_with_pending_reqs when it becomes
+        * inactive. Then, when the first descendant queue of the
+        * entity remains with no request waiting for completion,
+        * num_groups_with_pending_reqs is decremented, and this flag
+        * is reset. After this flag is reset for the entity,
+        * num_groups_with_pending_reqs won't be decremented any
+        * longer in case a new descendant queue of the entity remains
+        * with no request waiting for completion.
         */
-       unsigned int num_active_groups;
+       unsigned int num_groups_with_pending_reqs;
 
        /*
         * Number of bfq_queues containing requests (including the
index 4b0d5fb6916005571d4d4b9885e5a24d194e7a7d..63e0f12be7c98fe7770eb9f1e817f5319690c392 100644 (file)
@@ -1012,7 +1012,10 @@ static void __bfq_activate_entity(struct bfq_entity *entity,
                        container_of(entity, struct bfq_group, entity);
                struct bfq_data *bfqd = bfqg->bfqd;
 
-               bfqd->num_active_groups++;
+               if (!entity->in_groups_with_pending_reqs) {
+                       entity->in_groups_with_pending_reqs = true;
+                       bfqd->num_groups_with_pending_reqs++;
+               }
        }
 #endif
 
index bbfeb4ee2892fcbd9d51de450c41fab7dc466ce5..4f4d9884443b63a8f002ddd754ea467f9a0e4c16 100644 (file)
@@ -605,13 +605,12 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
        if (bio_flagged(bio_src, BIO_THROTTLED))
                bio_set_flag(bio, BIO_THROTTLED);
        bio->bi_opf = bio_src->bi_opf;
+       bio->bi_ioprio = bio_src->bi_ioprio;
        bio->bi_write_hint = bio_src->bi_write_hint;
        bio->bi_iter = bio_src->bi_iter;
        bio->bi_io_vec = bio_src->bi_io_vec;
 
-       bio_clone_blkg_association(bio, bio_src);
-
-       blkcg_bio_issue_init(bio);
+       bio_clone_blkcg_association(bio, bio_src);
 }
 EXPORT_SYMBOL(__bio_clone_fast);
 
@@ -1256,12 +1255,13 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
        /*
         * success
         */
-       if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
+       if ((iov_iter_rw(iter) == WRITE && (!map_data || !map_data->null_mapped)) ||
            (map_data && map_data->from_user)) {
                ret = bio_copy_from_iter(bio, iter);
                if (ret)
                        goto cleanup;
        } else {
+               zero_fill_bio(bio);
                iov_iter_advance(iter, bio->bi_iter.bi_size);
        }
 
@@ -1956,151 +1956,69 @@ EXPORT_SYMBOL(bioset_init_from_src);
 
 #ifdef CONFIG_BLK_CGROUP
 
-/**
- * bio_associate_blkg - associate a bio with the a blkg
- * @bio: target bio
- * @blkg: the blkg to associate
- *
- * This tries to associate @bio with the specified blkg.  Association failure
- * is handled by walking up the blkg tree.  Therefore, the blkg associated can
- * be anything between @blkg and the root_blkg.  This situation only happens
- * when a cgroup is dying and then the remaining bios will spill to the closest
- * alive blkg.
- *
- * A reference will be taken on the @blkg and will be released when @bio is
- * freed.
- */
-int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
-{
-       if (unlikely(bio->bi_blkg))
-               return -EBUSY;
-       bio->bi_blkg = blkg_tryget_closest(blkg);
-       return 0;
-}
-
-/**
- * __bio_associate_blkg_from_css - internal blkg association function
- *
- * This in the core association function that all association paths rely on.
- * A blkg reference is taken which is released upon freeing of the bio.
- */
-static int __bio_associate_blkg_from_css(struct bio *bio,
-                                        struct cgroup_subsys_state *css)
-{
-       struct request_queue *q = bio->bi_disk->queue;
-       struct blkcg_gq *blkg;
-       int ret;
-
-       rcu_read_lock();
-
-       if (!css || !css->parent)
-               blkg = q->root_blkg;
-       else
-               blkg = blkg_lookup_create(css_to_blkcg(css), q);
-
-       ret = bio_associate_blkg(bio, blkg);
-
-       rcu_read_unlock();
-       return ret;
-}
-
-/**
- * bio_associate_blkg_from_css - associate a bio with a specified css
- * @bio: target bio
- * @css: target css
- *
- * Associate @bio with the blkg found by combining the css's blkg and the
- * request_queue of the @bio.  This falls back to the queue's root_blkg if
- * the association fails with the css.
- */
-int bio_associate_blkg_from_css(struct bio *bio,
-                               struct cgroup_subsys_state *css)
-{
-       if (unlikely(bio->bi_blkg))
-               return -EBUSY;
-       return __bio_associate_blkg_from_css(bio, css);
-}
-EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);
-
 #ifdef CONFIG_MEMCG
 /**
- * bio_associate_blkg_from_page - associate a bio with the page's blkg
+ * bio_associate_blkcg_from_page - associate a bio with the page's blkcg
  * @bio: target bio
  * @page: the page to lookup the blkcg from
  *
- * Associate @bio with the blkg from @page's owning memcg and the respective
- * request_queue.  If cgroup_e_css returns NULL, fall back to the queue's
- * root_blkg.
- *
- * Note: this must be called after bio has an associated device.
+ * Associate @bio with the blkcg from @page's owning memcg.  This works like
+ * every other associate function wrt references.
  */
-int bio_associate_blkg_from_page(struct bio *bio, struct page *page)
+int bio_associate_blkcg_from_page(struct bio *bio, struct page *page)
 {
-       struct cgroup_subsys_state *css;
-       int ret;
+       struct cgroup_subsys_state *blkcg_css;
 
-       if (unlikely(bio->bi_blkg))
+       if (unlikely(bio->bi_css))
                return -EBUSY;
        if (!page->mem_cgroup)
                return 0;
-
-       rcu_read_lock();
-
-       css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);
-
-       ret = __bio_associate_blkg_from_css(bio, css);
-
-       rcu_read_unlock();
-       return ret;
+       blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup,
+                                    &io_cgrp_subsys);
+       bio->bi_css = blkcg_css;
+       return 0;
 }
 #endif /* CONFIG_MEMCG */
 
 /**
- * bio_associate_create_blkg - associate a bio with a blkg from q
- * @q: request_queue where bio is going
+ * bio_associate_blkcg - associate a bio with the specified blkcg
  * @bio: target bio
+ * @blkcg_css: css of the blkcg to associate
+ *
+ * Associate @bio with the blkcg specified by @blkcg_css.  Block layer will
+ * treat @bio as if it were issued by a task which belongs to the blkcg.
  *
- * Associate @bio with the blkg found from the bio's css and the request_queue.
- * If one is not found, bio_lookup_blkg creates the blkg.  This falls back to
- * the queue's root_blkg if association fails.
+ * This function takes an extra reference of @blkcg_css which will be put
+ * when @bio is released.  The caller must own @bio and is responsible for
+ * synchronizing calls to this function.
  */
-int bio_associate_create_blkg(struct request_queue *q, struct bio *bio)
+int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
 {
-       struct cgroup_subsys_state *css;
-       int ret = 0;
-
-       /* someone has already associated this bio with a blkg */
-       if (bio->bi_blkg)
-               return ret;
-
-       rcu_read_lock();
-
-       css = blkcg_css();
-
-       ret = __bio_associate_blkg_from_css(bio, css);
-
-       rcu_read_unlock();
-       return ret;
+       if (unlikely(bio->bi_css))
+               return -EBUSY;
+       css_get(blkcg_css);
+       bio->bi_css = blkcg_css;
+       return 0;
 }
+EXPORT_SYMBOL_GPL(bio_associate_blkcg);
 
 /**
- * bio_reassociate_blkg - reassociate a bio with a blkg from q
- * @q: request_queue where bio is going
+ * bio_associate_blkg - associate a bio with the specified blkg
  * @bio: target bio
+ * @blkg: the blkg to associate
  *
- * When submitting a bio, multiple recursive calls to make_request() may occur.
- * This causes the initial associate done in blkcg_bio_issue_check() to be
- * incorrect and reference the prior request_queue.  This performs reassociation
- * when this situation happens.
+ * Associate @bio with the blkg specified by @blkg.  This is the queue specific
+ * blkcg information associated with the @bio, a reference will be taken on the
+ * @blkg and will be freed when the bio is freed.
  */
-int bio_reassociate_blkg(struct request_queue *q, struct bio *bio)
+int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
 {
-       if (bio->bi_blkg) {
-               blkg_put(bio->bi_blkg);
-               bio->bi_blkg = NULL;
-       }
-
-       return bio_associate_create_blkg(q, bio);
+       if (unlikely(bio->bi_blkg))
+               return -EBUSY;
+       if (!blkg_try_get(blkg))
+               return -ENODEV;
+       bio->bi_blkg = blkg;
+       return 0;
 }
 
 /**
@@ -2113,6 +2031,10 @@ void bio_disassociate_task(struct bio *bio)
                put_io_context(bio->bi_ioc);
                bio->bi_ioc = NULL;
        }
+       if (bio->bi_css) {
+               css_put(bio->bi_css);
+               bio->bi_css = NULL;
+       }
        if (bio->bi_blkg) {
                blkg_put(bio->bi_blkg);
                bio->bi_blkg = NULL;
@@ -2120,16 +2042,16 @@ void bio_disassociate_task(struct bio *bio)
 }
 
 /**
- * bio_clone_blkg_association - clone blkg association from src to dst bio
+ * bio_clone_blkcg_association - clone blkcg association from src to dst bio
  * @dst: destination bio
  * @src: source bio
  */
-void bio_clone_blkg_association(struct bio *dst, struct bio *src)
+void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
 {
-       if (src->bi_blkg)
-               bio_associate_blkg(dst, src->bi_blkg);
+       if (src->bi_css)
+               WARN_ON(bio_associate_blkcg(dst, src->bi_css));
 }
-EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
+EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
 #endif /* CONFIG_BLK_CGROUP */
 
 static void __init biovec_init_slabs(void)
index 992da5592c6ed14208116b794975c75c2b3986a1..c630e02836a80d7d406778208c659aebda8fcf06 100644 (file)
@@ -84,37 +84,6 @@ static void blkg_free(struct blkcg_gq *blkg)
        kfree(blkg);
 }
 
-static void __blkg_release(struct rcu_head *rcu)
-{
-       struct blkcg_gq *blkg = container_of(rcu, struct blkcg_gq, rcu_head);
-
-       percpu_ref_exit(&blkg->refcnt);
-
-       /* release the blkcg and parent blkg refs this blkg has been holding */
-       css_put(&blkg->blkcg->css);
-       if (blkg->parent)
-               blkg_put(blkg->parent);
-
-       wb_congested_put(blkg->wb_congested);
-
-       blkg_free(blkg);
-}
-
-/*
- * A group is RCU protected, but having an rcu lock does not mean that one
- * can access all the fields of blkg and assume these are valid.  For
- * example, don't try to follow throtl_data and request queue links.
- *
- * Having a reference to blkg under an rcu allows accesses to only values
- * local to groups like group stats and group rate limits.
- */
-static void blkg_release(struct percpu_ref *ref)
-{
-       struct blkcg_gq *blkg = container_of(ref, struct blkcg_gq, refcnt);
-
-       call_rcu(&blkg->rcu_head, __blkg_release);
-}
-
 /**
  * blkg_alloc - allocate a blkg
  * @blkcg: block cgroup the new blkg is associated with
@@ -141,6 +110,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
        blkg->q = q;
        INIT_LIST_HEAD(&blkg->q_node);
        blkg->blkcg = blkcg;
+       atomic_set(&blkg->refcnt, 1);
 
        /* root blkg uses @q->root_rl, init rl only for !root blkgs */
        if (blkcg != &blkcg_root) {
@@ -247,11 +217,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
                blkg_get(blkg->parent);
        }
 
-       ret = percpu_ref_init(&blkg->refcnt, blkg_release, 0,
-                             GFP_NOWAIT | __GFP_NOWARN);
-       if (ret)
-               goto err_cancel_ref;
-
        /* invoke per-policy init */
        for (i = 0; i < BLKCG_MAX_POLS; i++) {
                struct blkcg_policy *pol = blkcg_policy[i];
@@ -284,8 +249,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
        blkg_put(blkg);
        return ERR_PTR(ret);
 
-err_cancel_ref:
-       percpu_ref_exit(&blkg->refcnt);
 err_put_congested:
        wb_congested_put(wb_congested);
 err_put_css:
@@ -296,7 +259,7 @@ err_free_blkg:
 }
 
 /**
- * __blkg_lookup_create - lookup blkg, try to create one if not there
+ * blkg_lookup_create - lookup blkg, try to create one if not there
  * @blkcg: blkcg of interest
  * @q: request_queue of interest
  *
@@ -305,11 +268,12 @@ err_free_blkg:
  * that all non-root blkg's have access to the parent blkg.  This function
  * should be called under RCU read lock and @q->queue_lock.
  *
- * Returns the blkg or the closest blkg if blkg_create fails as it walks
- * down from root.
+ * Returns pointer to the looked up or created blkg on success, ERR_PTR()
+ * value on error.  If @q is dead, returns ERR_PTR(-EINVAL).  If @q is not
+ * dead and bypassing, returns ERR_PTR(-EBUSY).
  */
-struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
-                                     struct request_queue *q)
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+                                   struct request_queue *q)
 {
        struct blkcg_gq *blkg;
 
@@ -321,7 +285,7 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
         * we shouldn't allow anything to go through for a bypassing queue.
         */
        if (unlikely(blk_queue_bypass(q)))
-               return q->root_blkg;
+               return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
 
        blkg = __blkg_lookup(blkcg, q, true);
        if (blkg)
@@ -329,58 +293,23 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
 
        /*
         * Create blkgs walking down from blkcg_root to @blkcg, so that all
-        * non-root blkgs have access to their parents.  Returns the closest
-        * blkg to the intended blkg should blkg_create() fail.
+        * non-root blkgs have access to their parents.
         */
        while (true) {
                struct blkcg *pos = blkcg;
                struct blkcg *parent = blkcg_parent(blkcg);
-               struct blkcg_gq *ret_blkg = q->root_blkg;
-
-               while (parent) {
-                       blkg = __blkg_lookup(parent, q, false);
-                       if (blkg) {
-                               /* remember closest blkg */
-                               ret_blkg = blkg;
-                               break;
-                       }
+
+               while (parent && !__blkg_lookup(parent, q, false)) {
                        pos = parent;
                        parent = blkcg_parent(parent);
                }
 
                blkg = blkg_create(pos, q, NULL);
-               if (IS_ERR(blkg))
-                       return ret_blkg;
-               if (pos == blkcg)
+               if (pos == blkcg || IS_ERR(blkg))
                        return blkg;
        }
 }
 
-/**
- * blkg_lookup_create - find or create a blkg
- * @blkcg: target block cgroup
- * @q: target request_queue
- *
- * This looks up or creates the blkg representing the unique pair
- * of the blkcg and the request_queue.
- */
-struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
-                                   struct request_queue *q)
-{
-       struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
-       unsigned long flags;
-
-       if (unlikely(!blkg)) {
-               spin_lock_irqsave(q->queue_lock, flags);
-
-               blkg = __blkg_lookup_create(blkcg, q);
-
-               spin_unlock_irqrestore(q->queue_lock, flags);
-       }
-
-       return blkg;
-}
-
 static void blkg_destroy(struct blkcg_gq *blkg)
 {
        struct blkcg *blkcg = blkg->blkcg;
@@ -424,7 +353,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
         * Put the reference taken at the time of creation so that when all
         * queues are gone, group can be destroyed.
         */
-       percpu_ref_kill(&blkg->refcnt);
+       blkg_put(blkg);
 }
 
 /**
@@ -451,6 +380,29 @@ static void blkg_destroy_all(struct request_queue *q)
        q->root_rl.blkg = NULL;
 }
 
+/*
+ * A group is RCU protected, but having an rcu lock does not mean that one
+ * can access all the fields of blkg and assume these are valid.  For
+ * example, don't try to follow throtl_data and request queue links.
+ *
+ * Having a reference to blkg under an rcu allows accesses to only values
+ * local to groups like group stats and group rate limits.
+ */
+void __blkg_release_rcu(struct rcu_head *rcu_head)
+{
+       struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
+
+       /* release the blkcg and parent blkg refs this blkg has been holding */
+       css_put(&blkg->blkcg->css);
+       if (blkg->parent)
+               blkg_put(blkg->parent);
+
+       wb_congested_put(blkg->wb_congested);
+
+       blkg_free(blkg);
+}
+EXPORT_SYMBOL_GPL(__blkg_release_rcu);
+
 /*
  * The next function used by blk_queue_for_each_rl().  It's a bit tricky
  * because the root blkg uses @q->root_rl instead of its own rl.
@@ -1796,7 +1748,8 @@ void blkcg_maybe_throttle_current(void)
        blkg = blkg_lookup(blkcg, q);
        if (!blkg)
                goto out;
-       if (!blkg_tryget(blkg))
+       blkg = blkg_try_get(blkg);
+       if (!blkg)
                goto out;
        rcu_read_unlock();
 
index bc6ea87d10e02cffcaedec7cc9d4567d88cdd6b6..deb56932f8c46e9cb0fe0950000b8da1922addfc 100644 (file)
@@ -785,6 +785,9 @@ void blk_cleanup_queue(struct request_queue *q)
         * prevent that q->request_fn() gets invoked after draining finished.
         */
        blk_freeze_queue(q);
+
+       rq_qos_exit(q);
+
        spin_lock_irq(lock);
        queue_flag_set(QUEUE_FLAG_DEAD, q);
        spin_unlock_irq(lock);
@@ -795,9 +798,8 @@ void blk_cleanup_queue(struct request_queue *q)
         * dispatch may still be in-progress since we dispatch requests
         * from more than one contexts.
         *
-        * No need to quiesce queue if it isn't initialized yet since
-        * blk_freeze_queue() should be enough for cases of passthrough
-        * request.
+        * We rely on driver to deal with the race in case that queue
+        * initialization isn't done.
         */
        if (q->mq_ops && blk_queue_init_done(q))
                blk_mq_quiesce_queue(q);
@@ -2432,7 +2434,6 @@ blk_qc_t generic_make_request(struct bio *bio)
                        if (q)
                                blk_queue_exit(q);
                        q = bio->bi_disk->queue;
-                       bio_reassociate_blkg(q, bio);
                        flags = 0;
                        if (bio->bi_opf & REQ_NOWAIT)
                                flags = BLK_MQ_REQ_NOWAIT;
index 28f80d22752858a2b1fcdfefb5f079469ec480ee..38c35c32aff2dcf3fc0e9ac294a649f0be4a1cb1 100644 (file)
@@ -482,12 +482,34 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
                                     spinlock_t *lock)
 {
        struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
-       struct blkcg_gq *blkg = bio->bi_blkg;
+       struct blkcg *blkcg;
+       struct blkcg_gq *blkg;
+       struct request_queue *q = rqos->q;
        bool issue_as_root = bio_issue_as_root_blkg(bio);
 
        if (!blk_iolatency_enabled(blkiolat))
                return;
 
+       rcu_read_lock();
+       blkcg = bio_blkcg(bio);
+       bio_associate_blkcg(bio, &blkcg->css);
+       blkg = blkg_lookup(blkcg, q);
+       if (unlikely(!blkg)) {
+               if (!lock)
+                       spin_lock_irq(q->queue_lock);
+               blkg = blkg_lookup_create(blkcg, q);
+               if (IS_ERR(blkg))
+                       blkg = NULL;
+               if (!lock)
+                       spin_unlock_irq(q->queue_lock);
+       }
+       if (!blkg)
+               goto out;
+
+       bio_issue_init(&bio->bi_issue, bio_sectors(bio));
+       bio_associate_blkg(bio, blkg);
+out:
+       rcu_read_unlock();
        while (blkg && blkg->parent) {
                struct iolatency_grp *iolat = blkg_to_lat(blkg);
                if (!iolat) {
@@ -708,7 +730,7 @@ static void blkiolatency_timer_fn(struct timer_list *t)
                 * We could be exiting, don't access the pd unless we have a
                 * ref on the blkg.
                 */
-               if (!blkg_tryget(blkg))
+               if (!blkg_try_get(blkg))
                        continue;
 
                iolat = blkg_to_lat(blkg);
index 76f867ea9a9b92fdfa921843a6a0ffe2c4297087..5f2c429d437847447bc329a00c11a91f58a28edf 100644 (file)
@@ -51,16 +51,14 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
        if ((sector | nr_sects) & bs_mask)
                return -EINVAL;
 
-       while (nr_sects) {
-               unsigned int req_sects = nr_sects;
-               sector_t end_sect;
+       if (!nr_sects)
+               return -EINVAL;
 
-               if (!req_sects)
-                       goto fail;
-               if (req_sects > UINT_MAX >> 9)
-                       req_sects = UINT_MAX >> 9;
+       while (nr_sects) {
+               sector_t req_sects = min_t(sector_t, nr_sects,
+                               bio_allowed_max_sectors(q));
 
-               end_sect = sector + req_sects;
+               WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
 
                bio = blk_next_bio(bio, 0, gfp_mask);
                bio->bi_iter.bi_sector = sector;
@@ -68,8 +66,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                bio_set_op_attrs(bio, op, 0);
 
                bio->bi_iter.bi_size = req_sects << 9;
+               sector += req_sects;
                nr_sects -= req_sects;
-               sector = end_sect;
 
                /*
                 * We can loop for a long time in here, if someone does
@@ -82,14 +80,6 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
 
        *biop = bio;
        return 0;
-
-fail:
-       if (bio) {
-               submit_bio_wait(bio);
-               bio_put(bio);
-       }
-       *biop = NULL;
-       return -EOPNOTSUPP;
 }
 EXPORT_SYMBOL(__blkdev_issue_discard);
 
@@ -161,7 +151,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
                return -EOPNOTSUPP;
 
        /* Ensure that max_write_same_sectors doesn't overflow bi_size */
-       max_write_same_sectors = UINT_MAX >> 9;
+       max_write_same_sectors = bio_allowed_max_sectors(q);
 
        while (nr_sects) {
                bio = blk_next_bio(bio, 1, gfp_mask);
index 42a46744c11b45e4970bbe8a918fcf8b29d895d8..7695034f4b87fa382237435e0b614ac086c58ee8 100644 (file)
@@ -46,7 +46,7 @@ static inline bool bio_will_gap(struct request_queue *q,
                bio_get_first_bvec(prev_rq->bio, &pb);
        else
                bio_get_first_bvec(prev, &pb);
-       if (pb.bv_offset)
+       if (pb.bv_offset & queue_virt_boundary(q))
                return true;
 
        /*
@@ -90,7 +90,8 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
        /* Zero-sector (unknown) and one-sector granularities are the same.  */
        granularity = max(q->limits.discard_granularity >> 9, 1U);
 
-       max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+       max_discard_sectors = min(q->limits.max_discard_sectors,
+                       bio_allowed_max_sectors(q));
        max_discard_sectors -= max_discard_sectors % granularity;
 
        if (unlikely(!max_discard_sectors)) {
@@ -714,6 +715,31 @@ static void blk_account_io_merge(struct request *req)
                part_stat_unlock();
        }
 }
+/*
+ * Two cases of handling DISCARD merge:
+ * If max_discard_segments > 1, the driver takes every bio
+ * as a range and send them to controller together. The ranges
+ * needn't to be contiguous.
+ * Otherwise, the bios/requests will be handled as same as
+ * others which should be contiguous.
+ */
+static inline bool blk_discard_mergable(struct request *req)
+{
+       if (req_op(req) == REQ_OP_DISCARD &&
+           queue_max_discard_segments(req->q) > 1)
+               return true;
+       return false;
+}
+
+enum elv_merge blk_try_req_merge(struct request *req, struct request *next)
+{
+       if (blk_discard_mergable(req))
+               return ELEVATOR_DISCARD_MERGE;
+       else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
+               return ELEVATOR_BACK_MERGE;
+
+       return ELEVATOR_NO_MERGE;
+}
 
 /*
  * For non-mq, this has to be called with the request spinlock acquired.
@@ -731,12 +757,6 @@ static struct request *attempt_merge(struct request_queue *q,
        if (req_op(req) != req_op(next))
                return NULL;
 
-       /*
-        * not contiguous
-        */
-       if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
-               return NULL;
-
        if (rq_data_dir(req) != rq_data_dir(next)
            || req->rq_disk != next->rq_disk
            || req_no_special_merge(next))
@@ -760,11 +780,19 @@ static struct request *attempt_merge(struct request_queue *q,
         * counts here. Handle DISCARDs separately, as they
         * have separate settings.
         */
-       if (req_op(req) == REQ_OP_DISCARD) {
+
+       switch (blk_try_req_merge(req, next)) {
+       case ELEVATOR_DISCARD_MERGE:
                if (!req_attempt_discard_merge(q, req, next))
                        return NULL;
-       } else if (!ll_merge_requests_fn(q, req, next))
+               break;
+       case ELEVATOR_BACK_MERGE:
+               if (!ll_merge_requests_fn(q, req, next))
+                       return NULL;
+               break;
+       default:
                return NULL;
+       }
 
        /*
         * If failfast settings disagree or any of the two is already
@@ -792,7 +820,7 @@ static struct request *attempt_merge(struct request_queue *q,
 
        req->__data_len += blk_rq_bytes(next);
 
-       if (req_op(req) != REQ_OP_DISCARD)
+       if (!blk_discard_mergable(req))
                elv_merge_requests(q, req, next);
 
        /*
@@ -888,8 +916,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 
 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
 {
-       if (req_op(rq) == REQ_OP_DISCARD &&
-           queue_max_discard_segments(rq->q) > 1)
+       if (blk_discard_mergable(rq))
                return ELEVATOR_DISCARD_MERGE;
        else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
                return ELEVATOR_BACK_MERGE;
index 3f91c6e5b17a95876f2c9c9ccf7cc481f159c8cc..6a7566244de30bb72b6d779c38c9f0b8d65fd616 100644 (file)
@@ -1764,7 +1764,7 @@ insert:
        if (bypass_insert)
                return BLK_STS_RESOURCE;
 
-       blk_mq_sched_insert_request(rq, false, run_queue, false);
+       blk_mq_request_bypass_insert(rq, run_queue);
        return BLK_STS_OK;
 }
 
@@ -1780,7 +1780,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
 
        ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
        if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
-               blk_mq_sched_insert_request(rq, false, true, false);
+               blk_mq_request_bypass_insert(rq, true);
        else if (ret != BLK_STS_OK)
                blk_mq_end_request(rq, ret);
 
@@ -1815,7 +1815,8 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
                if (ret != BLK_STS_OK) {
                        if (ret == BLK_STS_RESOURCE ||
                                        ret == BLK_STS_DEV_RESOURCE) {
-                               list_add(&rq->queuelist, list);
+                               blk_mq_request_bypass_insert(rq,
+                                                       list_empty(list));
                                break;
                        }
                        blk_mq_end_request(rq, ret);
index 0641533597f1b2cc389e6579a986d6df17115646..844a454a7b3a60a0c3186b589ab12daad0e9b3b8 100644 (file)
@@ -1007,8 +1007,6 @@ void blk_unregister_queue(struct gendisk *disk)
        kobject_del(&q->kobj);
        blk_trace_remove_sysfs(disk_to_dev(disk));
 
-       rq_qos_exit(q);
-
        mutex_lock(&q->sysfs_lock);
        if (q->request_fn || (q->mq_ops && q->elevator))
                elv_unregister_queue(q);
index 4bda70e8db48a9150880dc04a8a1f3fcb8844ac6..db1a3a2ae00617fbe1e4804bbfd327e37ce55737 100644 (file)
@@ -2115,11 +2115,21 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
 }
 #endif
 
+static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
+{
+#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
+       /* fallback to root_blkg if we fail to get a blkg ref */
+       if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))
+               bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);
+       bio_issue_init(&bio->bi_issue, bio_sectors(bio));
+#endif
+}
+
 bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
                    struct bio *bio)
 {
        struct throtl_qnode *qn = NULL;
-       struct throtl_grp *tg = blkg_to_tg(blkg);
+       struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
        struct throtl_service_queue *sq;
        bool rw = bio_data_dir(bio);
        bool throttled = false;
@@ -2138,6 +2148,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
        if (unlikely(blk_queue_bypass(q)))
                goto out_unlock;
 
+       blk_throtl_assoc_bio(tg, bio);
        blk_throtl_update_idletime(tg);
 
        sq = &tg->service_queue;
index a1841b8ff12963a883047780762229f923989f01..0089fefdf771d7082ee05ca97504005090a26025 100644 (file)
@@ -169,7 +169,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
 static inline bool __bvec_gap_to_prev(struct request_queue *q,
                struct bio_vec *bprv, unsigned int offset)
 {
-       return offset ||
+       return (offset & queue_virt_boundary(q)) ||
                ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
 }
 
@@ -395,6 +395,16 @@ static inline unsigned long blk_rq_deadline(struct request *rq)
        return rq->__deadline & ~0x1UL;
 }
 
+/*
+ * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
+ * is defined as 'unsigned int', meantime it has to aligned to with logical
+ * block size which is the minimum accepted unit by hardware.
+ */
+static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
+{
+       return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
+}
+
 /*
  * Internal io_context interface
  */
index cf49fe02f65cd017eb2132fd3475ba29a5d3cf75..559c55bda040e2da3d2ec1bc66dacb6e7f02b829 100644 (file)
@@ -248,6 +248,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
                return NULL;
        bio->bi_disk            = bio_src->bi_disk;
        bio->bi_opf             = bio_src->bi_opf;
+       bio->bi_ioprio          = bio_src->bi_ioprio;
        bio->bi_write_hint      = bio_src->bi_write_hint;
        bio->bi_iter.bi_sector  = bio_src->bi_iter.bi_sector;
        bio->bi_iter.bi_size    = bio_src->bi_iter.bi_size;
@@ -276,9 +277,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
                }
        }
 
-       bio_clone_blkg_association(bio, bio_src);
-
-       blkcg_bio_issue_init(bio);
+       bio_clone_blkcg_association(bio, bio_src);
 
        return bio;
 }
index 6a3d87dd3c1ac42abf04223ea14eb673e7c5ec5c..ed41aa978c4abc66cd2845f4c18177c97387a881 100644 (file)
@@ -3759,7 +3759,7 @@ static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
        uint64_t serial_nr;
 
        rcu_read_lock();
-       serial_nr = __bio_blkcg(bio)->css.serial_nr;
+       serial_nr = bio_blkcg(bio)->css.serial_nr;
        rcu_read_unlock();
 
        /*
@@ -3824,7 +3824,7 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
        struct cfq_group *cfqg;
 
        rcu_read_lock();
-       cfqg = cfq_lookup_cfqg(cfqd, __bio_blkcg(bio));
+       cfqg = cfq_lookup_cfqg(cfqd, bio_blkcg(bio));
        if (!cfqg) {
                cfqq = &cfqd->oom_cfqq;
                goto out;
index f7a235db56aaa78ee8cfa05ef391ff0e09302aba..05c91eb10ca1fd97fde1dd0593d975b99b798e1c 100644 (file)
@@ -1812,7 +1812,7 @@ config CRYPTO_USER_API_AEAD
          cipher algorithms.
 
 config CRYPTO_STATS
-       bool "Crypto usage statistics for User-space"
+       bool
        help
          This option enables the gathering of crypto stats.
          This will collect:
index f3702e533ff41044694625aad813abc58b8af4dd..be70ca6c85d31e89329b032ce7445aa1c2b85f32 100644 (file)
@@ -21,6 +21,18 @@ config ASYMMETRIC_PUBLIC_KEY_SUBTYPE
          appropriate hash algorithms (such as SHA-1) must be available.
          ENOPKG will be reported if the requisite algorithm is unavailable.
 
+config ASYMMETRIC_TPM_KEY_SUBTYPE
+       tristate "Asymmetric TPM backed private key subtype"
+       depends on TCG_TPM
+       depends on TRUSTED_KEYS
+       select CRYPTO_HMAC
+       select CRYPTO_SHA1
+       select CRYPTO_HASH_INFO
+       help
+         This option provides support for TPM backed private key type handling.
+         Operations such as sign, verify, encrypt, decrypt are performed by
+         the TPM after the private key is loaded.
+
 config X509_CERTIFICATE_PARSER
        tristate "X.509 certificate parser"
        depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
@@ -31,6 +43,25 @@ config X509_CERTIFICATE_PARSER
          data and provides the ability to instantiate a crypto key from a
          public key packet found inside the certificate.
 
+config PKCS8_PRIVATE_KEY_PARSER
+       tristate "PKCS#8 private key parser"
+       depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+       select ASN1
+       select OID_REGISTRY
+       help
+         This option provides support for parsing PKCS#8 format blobs for
+         private key data and provides the ability to instantiate a crypto key
+         from that data.
+
+config TPM_KEY_PARSER
+       tristate "TPM private key parser"
+       depends on ASYMMETRIC_TPM_KEY_SUBTYPE
+       select ASN1
+       help
+         This option provides support for parsing TPM format blobs for
+         private key data and provides the ability to instantiate a crypto key
+         from that data.
+
 config PKCS7_MESSAGE_PARSER
        tristate "PKCS#7 message parser"
        depends on X509_CERTIFICATE_PARSER
index d4b2e1b2dc650837ae98489799a3431ee1ce70d4..28b91adba2aed35f830e6c6f7d356004f1d5248c 100644 (file)
@@ -11,6 +11,7 @@ asymmetric_keys-y := \
        signature.o
 
 obj-$(CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE) += public_key.o
+obj-$(CONFIG_ASYMMETRIC_TPM_KEY_SUBTYPE) += asym_tpm.o
 
 #
 # X.509 Certificate handling
@@ -29,6 +30,19 @@ $(obj)/x509_cert_parser.o: \
 $(obj)/x509.asn1.o: $(obj)/x509.asn1.c $(obj)/x509.asn1.h
 $(obj)/x509_akid.asn1.o: $(obj)/x509_akid.asn1.c $(obj)/x509_akid.asn1.h
 
+#
+# PKCS#8 private key handling
+#
+obj-$(CONFIG_PKCS8_PRIVATE_KEY_PARSER) += pkcs8_key_parser.o
+pkcs8_key_parser-y := \
+       pkcs8.asn1.o \
+       pkcs8_parser.o
+
+$(obj)/pkcs8_parser.o: $(obj)/pkcs8.asn1.h
+$(obj)/pkcs8-asn1.o: $(obj)/pkcs8.asn1.c $(obj)/pkcs8.asn1.h
+
+clean-files    += pkcs8.asn1.c pkcs8.asn1.h
+
 #
 # PKCS#7 message handling
 #
@@ -61,3 +75,14 @@ verify_signed_pefile-y := \
 
 $(obj)/mscode_parser.o: $(obj)/mscode.asn1.h $(obj)/mscode.asn1.h
 $(obj)/mscode.asn1.o: $(obj)/mscode.asn1.c $(obj)/mscode.asn1.h
+
+#
+# TPM private key parsing
+#
+obj-$(CONFIG_TPM_KEY_PARSER) += tpm_key_parser.o
+tpm_key_parser-y := \
+       tpm.asn1.o \
+       tpm_parser.o
+
+$(obj)/tpm_parser.o: $(obj)/tpm.asn1.h
+$(obj)/tpm.asn1.o: $(obj)/tpm.asn1.c $(obj)/tpm.asn1.h
diff --git a/crypto/asymmetric_keys/asym_tpm.c b/crypto/asymmetric_keys/asym_tpm.c
new file mode 100644 (file)
index 0000000..5d4c270
--- /dev/null
@@ -0,0 +1,988 @@
+// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "ASYM-TPM: "fmt
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/scatterlist.h>
+#include <linux/tpm.h>
+#include <linux/tpm_command.h>
+#include <crypto/akcipher.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <asm/unaligned.h>
+#include <keys/asymmetric-subtype.h>
+#include <keys/trusted.h>
+#include <crypto/asym_tpm_subtype.h>
+#include <crypto/public_key.h>
+
+#define TPM_ORD_FLUSHSPECIFIC  186
+#define TPM_ORD_LOADKEY2       65
+#define TPM_ORD_UNBIND         30
+#define TPM_ORD_SIGN           60
+#define TPM_LOADKEY2_SIZE              59
+#define TPM_FLUSHSPECIFIC_SIZE         18
+#define TPM_UNBIND_SIZE                        63
+#define TPM_SIGN_SIZE                  63
+
+#define TPM_RT_KEY                      0x00000001
+
+/*
+ * Load a TPM key from the blob provided by userspace
+ */
+static int tpm_loadkey2(struct tpm_buf *tb,
+                       uint32_t keyhandle, unsigned char *keyauth,
+                       const unsigned char *keyblob, int keybloblen,
+                       uint32_t *newhandle)
+{
+       unsigned char nonceodd[TPM_NONCE_SIZE];
+       unsigned char enonce[TPM_NONCE_SIZE];
+       unsigned char authdata[SHA1_DIGEST_SIZE];
+       uint32_t authhandle = 0;
+       unsigned char cont = 0;
+       uint32_t ordinal;
+       int ret;
+
+       ordinal = htonl(TPM_ORD_LOADKEY2);
+
+       /* session for loading the key */
+       ret = oiap(tb, &authhandle, enonce);
+       if (ret < 0) {
+               pr_info("oiap failed (%d)\n", ret);
+               return ret;
+       }
+
+       /* generate odd nonce */
+       ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE);
+       if (ret < 0) {
+               pr_info("tpm_get_random failed (%d)\n", ret);
+               return ret;
+       }
+
+       /* calculate authorization HMAC value */
+       ret = TSS_authhmac(authdata, keyauth, SHA1_DIGEST_SIZE, enonce,
+                          nonceodd, cont, sizeof(uint32_t), &ordinal,
+                          keybloblen, keyblob, 0, 0);
+       if (ret < 0)
+               return ret;
+
+       /* build the request buffer */
+       INIT_BUF(tb);
+       store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
+       store32(tb, TPM_LOADKEY2_SIZE + keybloblen);
+       store32(tb, TPM_ORD_LOADKEY2);
+       store32(tb, keyhandle);
+       storebytes(tb, keyblob, keybloblen);
+       store32(tb, authhandle);
+       storebytes(tb, nonceodd, TPM_NONCE_SIZE);
+       store8(tb, cont);
+       storebytes(tb, authdata, SHA1_DIGEST_SIZE);
+
+       ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+       if (ret < 0) {
+               pr_info("authhmac failed (%d)\n", ret);
+               return ret;
+       }
+
+       ret = TSS_checkhmac1(tb->data, ordinal, nonceodd, keyauth,
+                            SHA1_DIGEST_SIZE, 0, 0);
+       if (ret < 0) {
+               pr_info("TSS_checkhmac1 failed (%d)\n", ret);
+               return ret;
+       }
+
+       *newhandle = LOAD32(tb->data, TPM_DATA_OFFSET);
+       return 0;
+}
+
+/*
+ * Execute the FlushSpecific TPM command
+ */
+static int tpm_flushspecific(struct tpm_buf *tb, uint32_t handle)
+{
+       INIT_BUF(tb);
+       store16(tb, TPM_TAG_RQU_COMMAND);
+       store32(tb, TPM_FLUSHSPECIFIC_SIZE);
+       store32(tb, TPM_ORD_FLUSHSPECIFIC);
+       store32(tb, handle);
+       store32(tb, TPM_RT_KEY);
+
+       return trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+}
+
+/*
+ * Decrypt a blob provided by userspace using a specific key handle.
+ * The handle is a well known handle or previously loaded by e.g. LoadKey2
+ */
+static int tpm_unbind(struct tpm_buf *tb,
+                       uint32_t keyhandle, unsigned char *keyauth,
+                       const unsigned char *blob, uint32_t bloblen,
+                       void *out, uint32_t outlen)
+{
+       unsigned char nonceodd[TPM_NONCE_SIZE];
+       unsigned char enonce[TPM_NONCE_SIZE];
+       unsigned char authdata[SHA1_DIGEST_SIZE];
+       uint32_t authhandle = 0;
+       unsigned char cont = 0;
+       uint32_t ordinal;
+       uint32_t datalen;
+       int ret;
+
+       ordinal = htonl(TPM_ORD_UNBIND);
+       datalen = htonl(bloblen);
+
+       /* session for loading the key */
+       ret = oiap(tb, &authhandle, enonce);
+       if (ret < 0) {
+               pr_info("oiap failed (%d)\n", ret);
+               return ret;
+       }
+
+       /* generate odd nonce */
+       ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE);
+       if (ret < 0) {
+               pr_info("tpm_get_random failed (%d)\n", ret);
+               return ret;
+       }
+
+       /* calculate authorization HMAC value */
+       ret = TSS_authhmac(authdata, keyauth, SHA1_DIGEST_SIZE, enonce,
+                          nonceodd, cont, sizeof(uint32_t), &ordinal,
+                          sizeof(uint32_t), &datalen,
+                          bloblen, blob, 0, 0);
+       if (ret < 0)
+               return ret;
+
+       /* build the request buffer */
+       INIT_BUF(tb);
+       store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
+       store32(tb, TPM_UNBIND_SIZE + bloblen);
+       store32(tb, TPM_ORD_UNBIND);
+       store32(tb, keyhandle);
+       store32(tb, bloblen);
+       storebytes(tb, blob, bloblen);
+       store32(tb, authhandle);
+       storebytes(tb, nonceodd, TPM_NONCE_SIZE);
+       store8(tb, cont);
+       storebytes(tb, authdata, SHA1_DIGEST_SIZE);
+
+       ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+       if (ret < 0) {
+               pr_info("authhmac failed (%d)\n", ret);
+               return ret;
+       }
+
+       datalen = LOAD32(tb->data, TPM_DATA_OFFSET);
+
+       ret = TSS_checkhmac1(tb->data, ordinal, nonceodd,
+                            keyauth, SHA1_DIGEST_SIZE,
+                            sizeof(uint32_t), TPM_DATA_OFFSET,
+                            datalen, TPM_DATA_OFFSET + sizeof(uint32_t),
+                            0, 0);
+       if (ret < 0) {
+               pr_info("TSS_checkhmac1 failed (%d)\n", ret);
+               return ret;
+       }
+
+       memcpy(out, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t),
+              min(outlen, datalen));
+
+       return datalen;
+}
+
+/*
+ * Sign a blob provided by userspace (that has had the hash function applied)
+ * using a specific key handle.  The handle is assumed to have been previously
+ * loaded by e.g. LoadKey2.
+ *
+ * Note that the key signature scheme of the used key should be set to
+ * TPM_SS_RSASSAPKCS1v15_DER.  This allows the hashed input to be of any size
+ * up to key_length_in_bytes - 11 and not be limited to size 20 like the
+ * TPM_SS_RSASSAPKCS1v15_SHA1 signature scheme.
+ */
+static int tpm_sign(struct tpm_buf *tb,
+                   uint32_t keyhandle, unsigned char *keyauth,
+                   const unsigned char *blob, uint32_t bloblen,
+                   void *out, uint32_t outlen)
+{
+       unsigned char nonceodd[TPM_NONCE_SIZE];
+       unsigned char enonce[TPM_NONCE_SIZE];
+       unsigned char authdata[SHA1_DIGEST_SIZE];
+       uint32_t authhandle = 0;
+       unsigned char cont = 0;
+       uint32_t ordinal;
+       uint32_t datalen;
+       int ret;
+
+       ordinal = htonl(TPM_ORD_SIGN);
+       datalen = htonl(bloblen);
+
+       /* session for loading the key */
+       ret = oiap(tb, &authhandle, enonce);
+       if (ret < 0) {
+               pr_info("oiap failed (%d)\n", ret);
+               return ret;
+       }
+
+       /* generate odd nonce */
+       ret = tpm_get_random(NULL, nonceodd, TPM_NONCE_SIZE);
+       if (ret < 0) {
+               pr_info("tpm_get_random failed (%d)\n", ret);
+               return ret;
+       }
+
+       /* calculate authorization HMAC value */
+       ret = TSS_authhmac(authdata, keyauth, SHA1_DIGEST_SIZE, enonce,
+                          nonceodd, cont, sizeof(uint32_t), &ordinal,
+                          sizeof(uint32_t), &datalen,
+                          bloblen, blob, 0, 0);
+       if (ret < 0)
+               return ret;
+
+       /* build the request buffer */
+       INIT_BUF(tb);
+       store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
+       store32(tb, TPM_SIGN_SIZE + bloblen);
+       store32(tb, TPM_ORD_SIGN);
+       store32(tb, keyhandle);
+       store32(tb, bloblen);
+       storebytes(tb, blob, bloblen);
+       store32(tb, authhandle);
+       storebytes(tb, nonceodd, TPM_NONCE_SIZE);
+       store8(tb, cont);
+       storebytes(tb, authdata, SHA1_DIGEST_SIZE);
+
+       ret = trusted_tpm_send(tb->data, MAX_BUF_SIZE);
+       if (ret < 0) {
+               pr_info("authhmac failed (%d)\n", ret);
+               return ret;
+       }
+
+       datalen = LOAD32(tb->data, TPM_DATA_OFFSET);
+
+       ret = TSS_checkhmac1(tb->data, ordinal, nonceodd,
+                            keyauth, SHA1_DIGEST_SIZE,
+                            sizeof(uint32_t), TPM_DATA_OFFSET,
+                            datalen, TPM_DATA_OFFSET + sizeof(uint32_t),
+                            0, 0);
+       if (ret < 0) {
+               pr_info("TSS_checkhmac1 failed (%d)\n", ret);
+               return ret;
+       }
+
+       memcpy(out, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t),
+              min(datalen, outlen));
+
+       return datalen;
+}
+/*
+ * Maximum buffer size for the BER/DER encoded public key.  The public key
+ * is of the form SEQUENCE { INTEGER n, INTEGER e } where n is a maximum 2048
+ * bit key and e is usually 65537
+ * The encoding overhead is:
+ * - max 4 bytes for SEQUENCE
+ *   - max 4 bytes for INTEGER n type/length
+ *     - 257 bytes of n
+ *   - max 2 bytes for INTEGER e type/length
+ *     - 3 bytes of e
+ */
+#define PUB_KEY_BUF_SIZE (4 + 4 + 257 + 2 + 3)
+
+/*
+ * Provide a part of a description of the key for /proc/keys.
+ */
+static void asym_tpm_describe(const struct key *asymmetric_key,
+                             struct seq_file *m)
+{
+       struct tpm_key *tk = asymmetric_key->payload.data[asym_crypto];
+
+       if (!tk)
+               return;
+
+       seq_printf(m, "TPM1.2/Blob");
+}
+
+static void asym_tpm_destroy(void *payload0, void *payload3)
+{
+       struct tpm_key *tk = payload0;
+
+       if (!tk)
+               return;
+
+       kfree(tk->blob);
+       tk->blob_len = 0;
+
+       kfree(tk);
+}
+
+/* How many bytes will it take to encode the length */
+static inline uint32_t definite_length(uint32_t len)
+{
+       if (len <= 127)
+               return 1;
+       if (len <= 255)
+               return 2;
+       return 3;
+}
+
+static inline uint8_t *encode_tag_length(uint8_t *buf, uint8_t tag,
+                                        uint32_t len)
+{
+       *buf++ = tag;
+
+       if (len <= 127) {
+               buf[0] = len;
+               return buf + 1;
+       }
+
+       if (len <= 255) {
+               buf[0] = 0x81;
+               buf[1] = len;
+               return buf + 2;
+       }
+
+       buf[0] = 0x82;
+       put_unaligned_be16(len, buf + 1);
+       return buf + 3;
+}
+
+static uint32_t derive_pub_key(const void *pub_key, uint32_t len, uint8_t *buf)
+{
+       uint8_t *cur = buf;
+       uint32_t n_len = definite_length(len) + 1 + len + 1;
+       uint32_t e_len = definite_length(3) + 1 + 3;
+       uint8_t e[3] = { 0x01, 0x00, 0x01 };
+
+       /* SEQUENCE */
+       cur = encode_tag_length(cur, 0x30, n_len + e_len);
+       /* INTEGER n */
+       cur = encode_tag_length(cur, 0x02, len + 1);
+       cur[0] = 0x00;
+       memcpy(cur + 1, pub_key, len);
+       cur += len + 1;
+       cur = encode_tag_length(cur, 0x02, sizeof(e));
+       memcpy(cur, e, sizeof(e));
+       cur += sizeof(e);
+
+       return cur - buf;
+}
+
+/*
+ * Determine the crypto algorithm name.
+ */
+static int determine_akcipher(const char *encoding, const char *hash_algo,
+                             char alg_name[CRYPTO_MAX_ALG_NAME])
+{
+       if (strcmp(encoding, "pkcs1") == 0) {
+               if (!hash_algo) {
+                       strcpy(alg_name, "pkcs1pad(rsa)");
+                       return 0;
+               }
+
+               if (snprintf(alg_name, CRYPTO_MAX_ALG_NAME, "pkcs1pad(rsa,%s)",
+                            hash_algo) >= CRYPTO_MAX_ALG_NAME)
+                       return -EINVAL;
+
+               return 0;
+       }
+
+       if (strcmp(encoding, "raw") == 0) {
+               strcpy(alg_name, "rsa");
+               return 0;
+       }
+
+       return -ENOPKG;
+}
+
+/*
+ * Query information about a key.
+ */
+static int tpm_key_query(const struct kernel_pkey_params *params,
+                        struct kernel_pkey_query *info)
+{
+       struct tpm_key *tk = params->key->payload.data[asym_crypto];
+       int ret;
+       char alg_name[CRYPTO_MAX_ALG_NAME];
+       struct crypto_akcipher *tfm;
+       uint8_t der_pub_key[PUB_KEY_BUF_SIZE];
+       uint32_t der_pub_key_len;
+       int len;
+
+       /* TPM only works on private keys, public keys still done in software */
+       ret = determine_akcipher(params->encoding, params->hash_algo, alg_name);
+       if (ret < 0)
+               return ret;
+
+       tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+       if (IS_ERR(tfm))
+               return PTR_ERR(tfm);
+
+       der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len,
+                                        der_pub_key);
+
+       ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len);
+       if (ret < 0)
+               goto error_free_tfm;
+
+       len = crypto_akcipher_maxsize(tfm);
+
+       info->key_size = tk->key_len;
+       info->max_data_size = tk->key_len / 8;
+       info->max_sig_size = len;
+       info->max_enc_size = len;
+       info->max_dec_size = tk->key_len / 8;
+
+       info->supported_ops = KEYCTL_SUPPORTS_ENCRYPT |
+                             KEYCTL_SUPPORTS_DECRYPT |
+                             KEYCTL_SUPPORTS_VERIFY |
+                             KEYCTL_SUPPORTS_SIGN;
+
+       ret = 0;
+error_free_tfm:
+       crypto_free_akcipher(tfm);
+       pr_devel("<==%s() = %d\n", __func__, ret);
+       return ret;
+}
+
+/*
+ * Encryption operation is performed with the public key.  Hence it is done
+ * in software
+ */
+static int tpm_key_encrypt(struct tpm_key *tk,
+                          struct kernel_pkey_params *params,
+                          const void *in, void *out)
+{
+       char alg_name[CRYPTO_MAX_ALG_NAME];
+       struct crypto_akcipher *tfm;
+       struct akcipher_request *req;
+       struct crypto_wait cwait;
+       struct scatterlist in_sg, out_sg;
+       uint8_t der_pub_key[PUB_KEY_BUF_SIZE];
+       uint32_t der_pub_key_len;
+       int ret;
+
+       pr_devel("==>%s()\n", __func__);
+
+       ret = determine_akcipher(params->encoding, params->hash_algo, alg_name);
+       if (ret < 0)
+               return ret;
+
+       tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+       if (IS_ERR(tfm))
+               return PTR_ERR(tfm);
+
+       der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len,
+                                        der_pub_key);
+
+       ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len);
+       if (ret < 0)
+               goto error_free_tfm;
+
+       req = akcipher_request_alloc(tfm, GFP_KERNEL);
+       if (!req)
+               goto error_free_tfm;
+
+       sg_init_one(&in_sg, in, params->in_len);
+       sg_init_one(&out_sg, out, params->out_len);
+       akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len,
+                                  params->out_len);
+       crypto_init_wait(&cwait);
+       akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+                                     CRYPTO_TFM_REQ_MAY_SLEEP,
+                                     crypto_req_done, &cwait);
+
+       ret = crypto_akcipher_encrypt(req);
+       ret = crypto_wait_req(ret, &cwait);
+
+       if (ret == 0)
+               ret = req->dst_len;
+
+       akcipher_request_free(req);
+error_free_tfm:
+       crypto_free_akcipher(tfm);
+       pr_devel("<==%s() = %d\n", __func__, ret);
+       return ret;
+}
+
+/*
+ * Decryption operation is performed with the private key in the TPM.
+ */
+static int tpm_key_decrypt(struct tpm_key *tk,
+                          struct kernel_pkey_params *params,
+                          const void *in, void *out)
+{
+       struct tpm_buf *tb;
+       uint32_t keyhandle;
+       uint8_t srkauth[SHA1_DIGEST_SIZE];
+       uint8_t keyauth[SHA1_DIGEST_SIZE];
+       int r;
+
+       pr_devel("==>%s()\n", __func__);
+
+       if (params->hash_algo)
+               return -ENOPKG;
+
+       if (strcmp(params->encoding, "pkcs1"))
+               return -ENOPKG;
+
+       tb = kzalloc(sizeof(*tb), GFP_KERNEL);
+       if (!tb)
+               return -ENOMEM;
+
+       /* TODO: Handle a non-all zero SRK authorization */
+       memset(srkauth, 0, sizeof(srkauth));
+
+       r = tpm_loadkey2(tb, SRKHANDLE, srkauth,
+                               tk->blob, tk->blob_len, &keyhandle);
+       if (r < 0) {
+               pr_devel("loadkey2 failed (%d)\n", r);
+               goto error;
+       }
+
+       /* TODO: Handle a non-all zero key authorization */
+       memset(keyauth, 0, sizeof(keyauth));
+
+       r = tpm_unbind(tb, keyhandle, keyauth,
+                      in, params->in_len, out, params->out_len);
+       if (r < 0)
+               pr_devel("tpm_unbind failed (%d)\n", r);
+
+       if (tpm_flushspecific(tb, keyhandle) < 0)
+               pr_devel("flushspecific failed (%d)\n", r);
+
+error:
+       kzfree(tb);
+       pr_devel("<==%s() = %d\n", __func__, r);
+       return r;
+}
+
+/*
+ * Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
+ */
+static const u8 digest_info_md5[] = {
+       0x30, 0x20, 0x30, 0x0c, 0x06, 0x08,
+       0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */
+       0x05, 0x00, 0x04, 0x10
+};
+
+static const u8 digest_info_sha1[] = {
+       0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
+       0x2b, 0x0e, 0x03, 0x02, 0x1a,
+       0x05, 0x00, 0x04, 0x14
+};
+
+static const u8 digest_info_rmd160[] = {
+       0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
+       0x2b, 0x24, 0x03, 0x02, 0x01,
+       0x05, 0x00, 0x04, 0x14
+};
+
+static const u8 digest_info_sha224[] = {
+       0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
+       0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
+       0x05, 0x00, 0x04, 0x1c
+};
+
+static const u8 digest_info_sha256[] = {
+       0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
+       0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
+       0x05, 0x00, 0x04, 0x20
+};
+
+static const u8 digest_info_sha384[] = {
+       0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
+       0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
+       0x05, 0x00, 0x04, 0x30
+};
+
+static const u8 digest_info_sha512[] = {
+       0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
+       0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
+       0x05, 0x00, 0x04, 0x40
+};
+
+static const struct asn1_template {
+       const char      *name;
+       const u8        *data;
+       size_t          size;
+} asn1_templates[] = {
+#define _(X) { #X, digest_info_##X, sizeof(digest_info_##X) }
+       _(md5),
+       _(sha1),
+       _(rmd160),
+       _(sha256),
+       _(sha384),
+       _(sha512),
+       _(sha224),
+       { NULL }
+#undef _
+};
+
+static const struct asn1_template *lookup_asn1(const char *name)
+{
+       const struct asn1_template *p;
+
+       for (p = asn1_templates; p->name; p++)
+               if (strcmp(name, p->name) == 0)
+                       return p;
+       return NULL;
+}
+
+/*
+ * Sign operation is performed with the private key in the TPM.
+ */
+static int tpm_key_sign(struct tpm_key *tk,
+                       struct kernel_pkey_params *params,
+                       const void *in, void *out)
+{
+       struct tpm_buf *tb;
+       uint32_t keyhandle;
+       uint8_t srkauth[SHA1_DIGEST_SIZE];
+       uint8_t keyauth[SHA1_DIGEST_SIZE];
+       void *asn1_wrapped = NULL;
+       uint32_t in_len = params->in_len;
+       int r;
+
+       pr_devel("==>%s()\n", __func__);
+
+       if (strcmp(params->encoding, "pkcs1"))
+               return -ENOPKG;
+
+       if (params->hash_algo) {
+               const struct asn1_template *asn1 =
+                                               lookup_asn1(params->hash_algo);
+
+               if (!asn1)
+                       return -ENOPKG;
+
+               /* request enough space for the ASN.1 template + input hash */
+               asn1_wrapped = kzalloc(in_len + asn1->size, GFP_KERNEL);
+               if (!asn1_wrapped)
+                       return -ENOMEM;
+
+               /* Copy ASN.1 template, then the input */
+               memcpy(asn1_wrapped, asn1->data, asn1->size);
+               memcpy(asn1_wrapped + asn1->size, in, in_len);
+
+               in = asn1_wrapped;
+               in_len += asn1->size;
+       }
+
+       if (in_len > tk->key_len / 8 - 11) {
+               r = -EOVERFLOW;
+               goto error_free_asn1_wrapped;
+       }
+
+       r = -ENOMEM;
+       tb = kzalloc(sizeof(*tb), GFP_KERNEL);
+       if (!tb)
+               goto error_free_asn1_wrapped;
+
+       /* TODO: Handle a non-all zero SRK authorization */
+       memset(srkauth, 0, sizeof(srkauth));
+
+       r = tpm_loadkey2(tb, SRKHANDLE, srkauth,
+                        tk->blob, tk->blob_len, &keyhandle);
+       if (r < 0) {
+               pr_devel("loadkey2 failed (%d)\n", r);
+               goto error_free_tb;
+       }
+
+       /* TODO: Handle a non-all zero key authorization */
+       memset(keyauth, 0, sizeof(keyauth));
+
+       r = tpm_sign(tb, keyhandle, keyauth, in, in_len, out, params->out_len);
+       if (r < 0)
+               pr_devel("tpm_sign failed (%d)\n", r);
+
+       if (tpm_flushspecific(tb, keyhandle) < 0)
+               pr_devel("flushspecific failed (%d)\n", r);
+
+error_free_tb:
+       kzfree(tb);
+error_free_asn1_wrapped:
+       kfree(asn1_wrapped);
+       pr_devel("<==%s() = %d\n", __func__, r);
+       return r;
+}
+
+/*
+ * Do encryption, decryption and signing ops.
+ */
+static int tpm_key_eds_op(struct kernel_pkey_params *params,
+                         const void *in, void *out)
+{
+       struct tpm_key *tk = params->key->payload.data[asym_crypto];
+       int ret = -EOPNOTSUPP;
+
+       /* Perform the encryption calculation. */
+       switch (params->op) {
+       case kernel_pkey_encrypt:
+               ret = tpm_key_encrypt(tk, params, in, out);
+               break;
+       case kernel_pkey_decrypt:
+               ret = tpm_key_decrypt(tk, params, in, out);
+               break;
+       case kernel_pkey_sign:
+               ret = tpm_key_sign(tk, params, in, out);
+               break;
+       default:
+               BUG();
+       }
+
+       return ret;
+}
+
+/*
+ * Verify a signature using a public key.
+ */
+static int tpm_key_verify_signature(const struct key *key,
+                                   const struct public_key_signature *sig)
+{
+       const struct tpm_key *tk = key->payload.data[asym_crypto];
+       struct crypto_wait cwait;
+       struct crypto_akcipher *tfm;
+       struct akcipher_request *req;
+       struct scatterlist sig_sg, digest_sg;
+       char alg_name[CRYPTO_MAX_ALG_NAME];
+       uint8_t der_pub_key[PUB_KEY_BUF_SIZE];
+       uint32_t der_pub_key_len;
+       void *output;
+       unsigned int outlen;
+       int ret;
+
+       pr_devel("==>%s()\n", __func__);
+
+       BUG_ON(!tk);
+       BUG_ON(!sig);
+       BUG_ON(!sig->s);
+
+       if (!sig->digest)
+               return -ENOPKG;
+
+       ret = determine_akcipher(sig->encoding, sig->hash_algo, alg_name);
+       if (ret < 0)
+               return ret;
+
+       tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+       if (IS_ERR(tfm))
+               return PTR_ERR(tfm);
+
+       der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len,
+                                        der_pub_key);
+
+       ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len);
+       if (ret < 0)
+               goto error_free_tfm;
+
+       ret = -ENOMEM;
+       req = akcipher_request_alloc(tfm, GFP_KERNEL);
+       if (!req)
+               goto error_free_tfm;
+
+       ret = -ENOMEM;
+       outlen = crypto_akcipher_maxsize(tfm);
+       output = kmalloc(outlen, GFP_KERNEL);
+       if (!output)
+               goto error_free_req;
+
+       sg_init_one(&sig_sg, sig->s, sig->s_size);
+       sg_init_one(&digest_sg, output, outlen);
+       akcipher_request_set_crypt(req, &sig_sg, &digest_sg, sig->s_size,
+                                  outlen);
+       crypto_init_wait(&cwait);
+       akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+                                     CRYPTO_TFM_REQ_MAY_SLEEP,
+                                     crypto_req_done, &cwait);
+
+       /* Perform the verification calculation.  This doesn't actually do the
+        * verification, but rather calculates the hash expected by the
+        * signature and returns that to us.
+        */
+       ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
+       if (ret)
+               goto out_free_output;
+
+       /* Do the actual verification step. */
+       if (req->dst_len != sig->digest_size ||
+           memcmp(sig->digest, output, sig->digest_size) != 0)
+               ret = -EKEYREJECTED;
+
+out_free_output:
+       kfree(output);
+error_free_req:
+       akcipher_request_free(req);
+error_free_tfm:
+       crypto_free_akcipher(tfm);
+       pr_devel("<==%s() = %d\n", __func__, ret);
+       if (WARN_ON_ONCE(ret > 0))
+               ret = -EINVAL;
+       return ret;
+}
+
+/*
+ * Parse enough information out of TPM_KEY structure:
+ * TPM_STRUCT_VER -> 4 bytes
+ * TPM_KEY_USAGE -> 2 bytes
+ * TPM_KEY_FLAGS -> 4 bytes
+ * TPM_AUTH_DATA_USAGE -> 1 byte
+ * TPM_KEY_PARMS -> variable
+ * UINT32 PCRInfoSize -> 4 bytes
+ * BYTE* -> PCRInfoSize bytes
+ * TPM_STORE_PUBKEY
+ * UINT32 encDataSize;
+ * BYTE* -> encDataSize;
+ *
+ * TPM_KEY_PARMS:
+ * TPM_ALGORITHM_ID -> 4 bytes
+ * TPM_ENC_SCHEME -> 2 bytes
+ * TPM_SIG_SCHEME -> 2 bytes
+ * UINT32 parmSize -> 4 bytes
+ * BYTE* -> variable
+ */
+static int extract_key_parameters(struct tpm_key *tk)
+{
+       const void *cur = tk->blob;
+       uint32_t len = tk->blob_len;
+       const void *pub_key;
+       uint32_t sz;
+       uint32_t key_len;
+
+       if (len < 11)
+               return -EBADMSG;
+
+       /* Ensure this is a legacy key */
+       if (get_unaligned_be16(cur + 4) != 0x0015)
+               return -EBADMSG;
+
+       /* Skip to TPM_KEY_PARMS */
+       cur += 11;
+       len -= 11;
+
+       if (len < 12)
+               return -EBADMSG;
+
+       /* Make sure this is an RSA key */
+       if (get_unaligned_be32(cur) != 0x00000001)
+               return -EBADMSG;
+
+       /* Make sure this is TPM_ES_RSAESPKCSv15 encoding scheme */
+       if (get_unaligned_be16(cur + 4) != 0x0002)
+               return -EBADMSG;
+
+       /* Make sure this is TPM_SS_RSASSAPKCS1v15_DER signature scheme */
+       if (get_unaligned_be16(cur + 6) != 0x0003)
+               return -EBADMSG;
+
+       sz = get_unaligned_be32(cur + 8);
+       if (len < sz + 12)
+               return -EBADMSG;
+
+       /* Move to TPM_RSA_KEY_PARMS */
+       len -= 12;
+       cur += 12;
+
+       /* Grab the RSA key length */
+       key_len = get_unaligned_be32(cur);
+
+       switch (key_len) {
+       case 512:
+       case 1024:
+       case 1536:
+       case 2048:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Move just past TPM_KEY_PARMS */
+       cur += sz;
+       len -= sz;
+
+       if (len < 4)
+               return -EBADMSG;
+
+       sz = get_unaligned_be32(cur);
+       if (len < 4 + sz)
+               return -EBADMSG;
+
+       /* Move to TPM_STORE_PUBKEY */
+       cur += 4 + sz;
+       len -= 4 + sz;
+
+       /* Grab the size of the public key, it should jive with the key size */
+       sz = get_unaligned_be32(cur);
+       if (sz > 256)
+               return -EINVAL;
+
+       pub_key = cur + 4;
+
+       tk->key_len = key_len;
+       tk->pub_key = pub_key;
+       tk->pub_key_len = sz;
+
+       return 0;
+}
+
+/* Given the blob, parse it and load it into the TPM */
+struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len)
+{
+       int r;
+       struct tpm_key *tk;
+
+       r = tpm_is_tpm2(NULL);
+       if (r < 0)
+               goto error;
+
+       /* We don't support TPM2 yet */
+       if (r > 0) {
+               r = -ENODEV;
+               goto error;
+       }
+
+       r = -ENOMEM;
+       tk = kzalloc(sizeof(struct tpm_key), GFP_KERNEL);
+       if (!tk)
+               goto error;
+
+       tk->blob = kmemdup(blob, blob_len, GFP_KERNEL);
+       if (!tk->blob)
+               goto error_memdup;
+
+       tk->blob_len = blob_len;
+
+       r = extract_key_parameters(tk);
+       if (r < 0)
+               goto error_extract;
+
+       return tk;
+
+error_extract:
+       kfree(tk->blob);
+       tk->blob_len = 0;
+error_memdup:
+       kfree(tk);
+error:
+       return ERR_PTR(r);
+}
+EXPORT_SYMBOL_GPL(tpm_key_create);
+
+/*
+ * TPM-based asymmetric key subtype
+ */
+struct asymmetric_key_subtype asym_tpm_subtype = {
+       .owner                  = THIS_MODULE,
+       .name                   = "asym_tpm",
+       .name_len               = sizeof("asym_tpm") - 1,
+       .describe               = asym_tpm_describe,
+       .destroy                = asym_tpm_destroy,
+       .query                  = tpm_key_query,
+       .eds_op                 = tpm_key_eds_op,
+       .verify_signature       = tpm_key_verify_signature,
+};
+EXPORT_SYMBOL_GPL(asym_tpm_subtype);
+
+MODULE_DESCRIPTION("TPM based asymmetric key subtype");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
index ca8e9ac34ce621613d29de02ba051eba593e7fc6..7be1ccf4fa9f2234c290e9bcffef773f176354aa 100644 (file)
@@ -16,3 +16,6 @@ extern struct asymmetric_key_id *asymmetric_key_hex_to_key_id(const char *id);
 extern int __asymmetric_key_hex_to_key_id(const char *id,
                                          struct asymmetric_key_id *match_id,
                                          size_t hexlen);
+
+extern int asymmetric_key_eds_op(struct kernel_pkey_params *params,
+                                const void *in, void *out);
index 26539e9a8bda41c37a664490e037f2365da7f15c..69a0788a7de5d08eddc6ad82451f2515ed94f6d4 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/ctype.h>
 #include <keys/system_keyring.h>
+#include <keys/user-type.h>
 #include "asymmetric_keys.h"
 
 MODULE_LICENSE("GPL");
@@ -538,6 +539,45 @@ out:
        return ret;
 }
 
+int asymmetric_key_eds_op(struct kernel_pkey_params *params,
+                         const void *in, void *out)
+{
+       const struct asymmetric_key_subtype *subtype;
+       struct key *key = params->key;
+       int ret;
+
+       pr_devel("==>%s()\n", __func__);
+
+       if (key->type != &key_type_asymmetric)
+               return -EINVAL;
+       subtype = asymmetric_key_subtype(key);
+       if (!subtype ||
+           !key->payload.data[0])
+               return -EINVAL;
+       if (!subtype->eds_op)
+               return -ENOTSUPP;
+
+       ret = subtype->eds_op(params, in, out);
+
+       pr_devel("<==%s() = %d\n", __func__, ret);
+       return ret;
+}
+
+static int asymmetric_key_verify_signature(struct kernel_pkey_params *params,
+                                          const void *in, const void *in2)
+{
+       struct public_key_signature sig = {
+               .s_size         = params->in2_len,
+               .digest_size    = params->in_len,
+               .encoding       = params->encoding,
+               .hash_algo      = params->hash_algo,
+               .digest         = (void *)in,
+               .s              = (void *)in2,
+       };
+
+       return verify_signature(params->key, &sig);
+}
+
 struct key_type key_type_asymmetric = {
        .name                   = "asymmetric",
        .preparse               = asymmetric_key_preparse,
@@ -548,6 +588,9 @@ struct key_type key_type_asymmetric = {
        .destroy                = asymmetric_key_destroy,
        .describe               = asymmetric_key_describe,
        .lookup_restriction     = asymmetric_lookup_restriction,
+       .asym_query             = query_asymmetric_key,
+       .asym_eds_op            = asymmetric_key_eds_op,
+       .asym_verify_signature  = asymmetric_key_verify_signature,
 };
 EXPORT_SYMBOL_GPL(key_type_asymmetric);
 
index 0f134162cef4b5f89c016db315df1b52de18ef16..f0d56e1a8b7e2b4971004959261b0d5c18cf0a6b 100644 (file)
@@ -271,6 +271,7 @@ int pkcs7_sig_note_pkey_algo(void *context, size_t hdrlen,
        switch (ctx->last_oid) {
        case OID_rsaEncryption:
                ctx->sinfo->sig->pkey_algo = "rsa";
+               ctx->sinfo->sig->encoding = "pkcs1";
                break;
        default:
                printk("Unsupported pkey algo: %u\n", ctx->last_oid);
diff --git a/crypto/asymmetric_keys/pkcs8.asn1 b/crypto/asymmetric_keys/pkcs8.asn1
new file mode 100644 (file)
index 0000000..702c41a
--- /dev/null
@@ -0,0 +1,24 @@
+--
+-- This is the unencrypted variant
+--
+PrivateKeyInfo ::= SEQUENCE {
+       version                 Version,
+       privateKeyAlgorithm     PrivateKeyAlgorithmIdentifier,
+       privateKey              PrivateKey,
+       attributes              [0] IMPLICIT Attributes OPTIONAL
+}
+
+Version ::= INTEGER  ({ pkcs8_note_version })
+
+PrivateKeyAlgorithmIdentifier ::= AlgorithmIdentifier ({ pkcs8_note_algo })
+
+PrivateKey ::= OCTET STRING ({ pkcs8_note_key })
+
+Attributes ::= SET OF Attribute
+
+Attribute ::= ANY
+
+AlgorithmIdentifier ::= SEQUENCE {
+       algorithm   OBJECT IDENTIFIER ({ pkcs8_note_OID }),
+       parameters  ANY OPTIONAL
+}
diff --git a/crypto/asymmetric_keys/pkcs8_parser.c b/crypto/asymmetric_keys/pkcs8_parser.c
new file mode 100644 (file)
index 0000000..5f6a7ec
--- /dev/null
@@ -0,0 +1,184 @@
+/* PKCS#8 Private Key parser [RFC 5208].
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "PKCS8: "fmt
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/oid_registry.h>
+#include <keys/asymmetric-subtype.h>
+#include <keys/asymmetric-parser.h>
+#include <crypto/public_key.h>
+#include "pkcs8.asn1.h"
+
+struct pkcs8_parse_context {
+       struct public_key *pub;
+       unsigned long   data;                   /* Start of data */
+       enum OID        last_oid;               /* Last OID encountered */
+       enum OID        algo_oid;               /* Algorithm OID */
+       u32             key_size;
+       const void      *key;
+};
+
+/*
+ * Note an OID when we find one for later processing when we know how to
+ * interpret it.
+ */
+int pkcs8_note_OID(void *context, size_t hdrlen,
+                  unsigned char tag,
+                  const void *value, size_t vlen)
+{
+       struct pkcs8_parse_context *ctx = context;
+
+       ctx->last_oid = look_up_OID(value, vlen);
+       if (ctx->last_oid == OID__NR) {
+               char buffer[50];
+
+               sprint_oid(value, vlen, buffer, sizeof(buffer));
+               pr_info("Unknown OID: [%lu] %s\n",
+                       (unsigned long)value - ctx->data, buffer);
+       }
+       return 0;
+}
+
+/*
+ * Note the version number of the ASN.1 blob.
+ */
+int pkcs8_note_version(void *context, size_t hdrlen,
+                      unsigned char tag,
+                      const void *value, size_t vlen)
+{
+       if (vlen != 1 || ((const u8 *)value)[0] != 0) {
+               pr_warn("Unsupported PKCS#8 version\n");
+               return -EBADMSG;
+       }
+       return 0;
+}
+
+/*
+ * Note the public algorithm.
+ */
+int pkcs8_note_algo(void *context, size_t hdrlen,
+                   unsigned char tag,
+                   const void *value, size_t vlen)
+{
+       struct pkcs8_parse_context *ctx = context;
+
+       if (ctx->last_oid != OID_rsaEncryption)
+               return -ENOPKG;
+
+       ctx->pub->pkey_algo = "rsa";
+       return 0;
+}
+
+/*
+ * Note the key data of the ASN.1 blob.
+ */
+int pkcs8_note_key(void *context, size_t hdrlen,
+                  unsigned char tag,
+                  const void *value, size_t vlen)
+{
+       struct pkcs8_parse_context *ctx = context;
+
+       ctx->key = value;
+       ctx->key_size = vlen;
+       return 0;
+}
+
+/*
+ * Parse a PKCS#8 private key blob.
+ */
+static struct public_key *pkcs8_parse(const void *data, size_t datalen)
+{
+       struct pkcs8_parse_context ctx;
+       struct public_key *pub;
+       long ret;
+
+       memset(&ctx, 0, sizeof(ctx));
+
+       ret = -ENOMEM;
+       ctx.pub = kzalloc(sizeof(struct public_key), GFP_KERNEL);
+       if (!ctx.pub)
+               goto error;
+
+       ctx.data = (unsigned long)data;
+
+       /* Attempt to decode the private key */
+       ret = asn1_ber_decoder(&pkcs8_decoder, &ctx, data, datalen);
+       if (ret < 0)
+               goto error_decode;
+
+       ret = -ENOMEM;
+       pub = ctx.pub;
+       pub->key = kmemdup(ctx.key, ctx.key_size, GFP_KERNEL);
+       if (!pub->key)
+               goto error_decode;
+
+       pub->keylen = ctx.key_size;
+       pub->key_is_private = true;
+       return pub;
+
+error_decode:
+       kfree(ctx.pub);
+error:
+       return ERR_PTR(ret);
+}
+
+/*
+ * Attempt to parse a data blob for a key as a PKCS#8 private key.
+ */
+static int pkcs8_key_preparse(struct key_preparsed_payload *prep)
+{
+       struct public_key *pub;
+
+       pub = pkcs8_parse(prep->data, prep->datalen);
+       if (IS_ERR(pub))
+               return PTR_ERR(pub);
+
+       pr_devel("Cert Key Algo: %s\n", pub->pkey_algo);
+       pub->id_type = "PKCS8";
+
+       /* We're pinning the module by being linked against it */
+       __module_get(public_key_subtype.owner);
+       prep->payload.data[asym_subtype] = &public_key_subtype;
+       prep->payload.data[asym_key_ids] = NULL;
+       prep->payload.data[asym_crypto] = pub;
+       prep->payload.data[asym_auth] = NULL;
+       prep->quotalen = 100;
+       return 0;
+}
+
+static struct asymmetric_key_parser pkcs8_key_parser = {
+       .owner  = THIS_MODULE,
+       .name   = "pkcs8",
+       .parse  = pkcs8_key_preparse,
+};
+
+/*
+ * Module stuff
+ */
+static int __init pkcs8_key_init(void)
+{
+       return register_asymmetric_key_parser(&pkcs8_key_parser);
+}
+
+static void __exit pkcs8_key_exit(void)
+{
+       unregister_asymmetric_key_parser(&pkcs8_key_parser);
+}
+
+module_init(pkcs8_key_init);
+module_exit(pkcs8_key_exit);
+
+MODULE_DESCRIPTION("PKCS#8 certificate parser");
+MODULE_LICENSE("GPL");
index e929fe1e4106c7dfaff7c2bcf3186952f449b764..f5d85b47fcc6d23be7315f5d1d245538649f00e7 100644 (file)
@@ -59,6 +59,165 @@ static void public_key_destroy(void *payload0, void *payload3)
        public_key_signature_free(payload3);
 }
 
+/*
+ * Determine the crypto algorithm name.
+ */
+static
+int software_key_determine_akcipher(const char *encoding,
+                                   const char *hash_algo,
+                                   const struct public_key *pkey,
+                                   char alg_name[CRYPTO_MAX_ALG_NAME])
+{
+       int n;
+
+       if (strcmp(encoding, "pkcs1") == 0) {
+               /* The data wangled by the RSA algorithm is typically padded
+                * and encoded in some manner, such as EMSA-PKCS1-1_5 [RFC3447
+                * sec 8.2].
+                */
+               if (!hash_algo)
+                       n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
+                                    "pkcs1pad(%s)",
+                                    pkey->pkey_algo);
+               else
+                       n = snprintf(alg_name, CRYPTO_MAX_ALG_NAME,
+                                    "pkcs1pad(%s,%s)",
+                                    pkey->pkey_algo, hash_algo);
+               return n >= CRYPTO_MAX_ALG_NAME ? -EINVAL : 0;
+       }
+
+       if (strcmp(encoding, "raw") == 0) {
+               strcpy(alg_name, pkey->pkey_algo);
+               return 0;
+       }
+
+       return -ENOPKG;
+}
+
+/*
+ * Query information about a key.
+ */
+static int software_key_query(const struct kernel_pkey_params *params,
+                             struct kernel_pkey_query *info)
+{
+       struct crypto_akcipher *tfm;
+       struct public_key *pkey = params->key->payload.data[asym_crypto];
+       char alg_name[CRYPTO_MAX_ALG_NAME];
+       int ret, len;
+
+       ret = software_key_determine_akcipher(params->encoding,
+                                             params->hash_algo,
+                                             pkey, alg_name);
+       if (ret < 0)
+               return ret;
+
+       tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+       if (IS_ERR(tfm))
+               return PTR_ERR(tfm);
+
+       if (pkey->key_is_private)
+               ret = crypto_akcipher_set_priv_key(tfm,
+                                                  pkey->key, pkey->keylen);
+       else
+               ret = crypto_akcipher_set_pub_key(tfm,
+                                                 pkey->key, pkey->keylen);
+       if (ret < 0)
+               goto error_free_tfm;
+
+       len = crypto_akcipher_maxsize(tfm);
+       info->key_size = len * 8;
+       info->max_data_size = len;
+       info->max_sig_size = len;
+       info->max_enc_size = len;
+       info->max_dec_size = len;
+       info->supported_ops = (KEYCTL_SUPPORTS_ENCRYPT |
+                              KEYCTL_SUPPORTS_VERIFY);
+       if (pkey->key_is_private)
+               info->supported_ops |= (KEYCTL_SUPPORTS_DECRYPT |
+                                       KEYCTL_SUPPORTS_SIGN);
+       ret = 0;
+
+error_free_tfm:
+       crypto_free_akcipher(tfm);
+       pr_devel("<==%s() = %d\n", __func__, ret);
+       return ret;
+}
+
+/*
+ * Do encryption, decryption and signing ops.
+ */
+static int software_key_eds_op(struct kernel_pkey_params *params,
+                              const void *in, void *out)
+{
+       const struct public_key *pkey = params->key->payload.data[asym_crypto];
+       struct akcipher_request *req;
+       struct crypto_akcipher *tfm;
+       struct crypto_wait cwait;
+       struct scatterlist in_sg, out_sg;
+       char alg_name[CRYPTO_MAX_ALG_NAME];
+       int ret;
+
+       pr_devel("==>%s()\n", __func__);
+
+       ret = software_key_determine_akcipher(params->encoding,
+                                             params->hash_algo,
+                                             pkey, alg_name);
+       if (ret < 0)
+               return ret;
+
+       tfm = crypto_alloc_akcipher(alg_name, 0, 0);
+       if (IS_ERR(tfm))
+               return PTR_ERR(tfm);
+
+       req = akcipher_request_alloc(tfm, GFP_KERNEL);
+       if (!req)
+               goto error_free_tfm;
+
+       if (pkey->key_is_private)
+               ret = crypto_akcipher_set_priv_key(tfm,
+                                                  pkey->key, pkey->keylen);
+       else
+               ret = crypto_akcipher_set_pub_key(tfm,
+                                                 pkey->key, pkey->keylen);
+       if (ret)
+               goto error_free_req;
+
+       sg_init_one(&in_sg, in, params->in_len);
+       sg_init_one(&out_sg, out, params->out_len);
+       akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len,
+                                  params->out_len);
+       crypto_init_wait(&cwait);
+       akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
+                                     CRYPTO_TFM_REQ_MAY_SLEEP,
+                                     crypto_req_done, &cwait);
+
+       /* Perform the encryption calculation. */
+       switch (params->op) {
+       case kernel_pkey_encrypt:
+               ret = crypto_akcipher_encrypt(req);
+               break;
+       case kernel_pkey_decrypt:
+               ret = crypto_akcipher_decrypt(req);
+               break;
+       case kernel_pkey_sign:
+               ret = crypto_akcipher_sign(req);
+               break;
+       default:
+               BUG();
+       }
+
+       ret = crypto_wait_req(ret, &cwait);
+       if (ret == 0)
+               ret = req->dst_len;
+
+error_free_req:
+       akcipher_request_free(req);
+error_free_tfm:
+       crypto_free_akcipher(tfm);
+       pr_devel("<==%s() = %d\n", __func__, ret);
+       return ret;
+}
+
 /*
  * Verify a signature using a public key.
  */
@@ -69,8 +228,7 @@ int public_key_verify_signature(const struct public_key *pkey,
        struct crypto_akcipher *tfm;
        struct akcipher_request *req;
        struct scatterlist sig_sg, digest_sg;
-       const char *alg_name;
-       char alg_name_buf[CRYPTO_MAX_ALG_NAME];
+       char alg_name[CRYPTO_MAX_ALG_NAME];
        void *output;
        unsigned int outlen;
        int ret;
@@ -81,21 +239,11 @@ int public_key_verify_signature(const struct public_key *pkey,
        BUG_ON(!sig);
        BUG_ON(!sig->s);
 
-       if (!sig->digest)
-               return -ENOPKG;
-
-       alg_name = sig->pkey_algo;
-       if (strcmp(sig->pkey_algo, "rsa") == 0) {
-               /* The data wangled by the RSA algorithm is typically padded
-                * and encoded in some manner, such as EMSA-PKCS1-1_5 [RFC3447
-                * sec 8.2].
-                */
-               if (snprintf(alg_name_buf, CRYPTO_MAX_ALG_NAME,
-                            "pkcs1pad(rsa,%s)", sig->hash_algo
-                            ) >= CRYPTO_MAX_ALG_NAME)
-                       return -EINVAL;
-               alg_name = alg_name_buf;
-       }
+       ret = software_key_determine_akcipher(sig->encoding,
+                                             sig->hash_algo,
+                                             pkey, alg_name);
+       if (ret < 0)
+               return ret;
 
        tfm = crypto_alloc_akcipher(alg_name, 0, 0);
        if (IS_ERR(tfm))
@@ -106,7 +254,12 @@ int public_key_verify_signature(const struct public_key *pkey,
        if (!req)
                goto error_free_tfm;
 
-       ret = crypto_akcipher_set_pub_key(tfm, pkey->key, pkey->keylen);
+       if (pkey->key_is_private)
+               ret = crypto_akcipher_set_priv_key(tfm,
+                                                  pkey->key, pkey->keylen);
+       else
+               ret = crypto_akcipher_set_pub_key(tfm,
+                                                 pkey->key, pkey->keylen);
        if (ret)
                goto error_free_req;
 
@@ -167,6 +320,8 @@ struct asymmetric_key_subtype public_key_subtype = {
        .name_len               = sizeof("public_key") - 1,
        .describe               = public_key_describe,
        .destroy                = public_key_destroy,
+       .query                  = software_key_query,
+       .eds_op                 = software_key_eds_op,
        .verify_signature       = public_key_verify_signature_2,
 };
 EXPORT_SYMBOL_GPL(public_key_subtype);
index 28198314bc39f4f4a27da38565619f6e262a7fd4..ad95a58c664275a9d561548fa79e6a027a74db0c 100644 (file)
@@ -16,7 +16,9 @@
 #include <linux/export.h>
 #include <linux/err.h>
 #include <linux/slab.h>
+#include <linux/keyctl.h>
 #include <crypto/public_key.h>
+#include <keys/user-type.h>
 #include "asymmetric_keys.h"
 
 /*
@@ -36,6 +38,99 @@ void public_key_signature_free(struct public_key_signature *sig)
 }
 EXPORT_SYMBOL_GPL(public_key_signature_free);
 
+/**
+ * query_asymmetric_key - Get information about an aymmetric key.
+ * @params: Various parameters.
+ * @info: Where to put the information.
+ */
+int query_asymmetric_key(const struct kernel_pkey_params *params,
+                        struct kernel_pkey_query *info)
+{
+       const struct asymmetric_key_subtype *subtype;
+       struct key *key = params->key;
+       int ret;
+
+       pr_devel("==>%s()\n", __func__);
+
+       if (key->type != &key_type_asymmetric)
+               return -EINVAL;
+       subtype = asymmetric_key_subtype(key);
+       if (!subtype ||
+           !key->payload.data[0])
+               return -EINVAL;
+       if (!subtype->query)
+               return -ENOTSUPP;
+
+       ret = subtype->query(params, info);
+
+       pr_devel("<==%s() = %d\n", __func__, ret);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(query_asymmetric_key);
+
+/**
+ * encrypt_blob - Encrypt data using an asymmetric key
+ * @params: Various parameters
+ * @data: Data blob to be encrypted, length params->data_len
+ * @enc: Encrypted data buffer, length params->enc_len
+ *
+ * Encrypt the specified data blob using the private key specified by
+ * params->key.  The encrypted data is wrapped in an encoding if
+ * params->encoding is specified (eg. "pkcs1").
+ *
+ * Returns the length of the data placed in the encrypted data buffer or an
+ * error.
+ */
+int encrypt_blob(struct kernel_pkey_params *params,
+                const void *data, void *enc)
+{
+       params->op = kernel_pkey_encrypt;
+       return asymmetric_key_eds_op(params, data, enc);
+}
+EXPORT_SYMBOL_GPL(encrypt_blob);
+
+/**
+ * decrypt_blob - Decrypt data using an asymmetric key
+ * @params: Various parameters
+ * @enc: Encrypted data to be decrypted, length params->enc_len
+ * @data: Decrypted data buffer, length params->data_len
+ *
+ * Decrypt the specified data blob using the private key specified by
+ * params->key.  The decrypted data is wrapped in an encoding if
+ * params->encoding is specified (eg. "pkcs1").
+ *
+ * Returns the length of the data placed in the decrypted data buffer or an
+ * error.
+ */
+int decrypt_blob(struct kernel_pkey_params *params,
+                const void *enc, void *data)
+{
+       params->op = kernel_pkey_decrypt;
+       return asymmetric_key_eds_op(params, enc, data);
+}
+EXPORT_SYMBOL_GPL(decrypt_blob);
+
+/**
+ * create_signature - Sign some data using an asymmetric key
+ * @params: Various parameters
+ * @data: Data blob to be signed, length params->data_len
+ * @enc: Signature buffer, length params->enc_len
+ *
+ * Sign the specified data blob using the private key specified by params->key.
+ * The signature is wrapped in an encoding if params->encoding is specified
+ * (eg. "pkcs1").  If the encoding needs to know the digest type, this can be
+ * passed through params->hash_algo (eg. "sha1").
+ *
+ * Returns the length of the data placed in the signature buffer or an error.
+ */
+int create_signature(struct kernel_pkey_params *params,
+                    const void *data, void *enc)
+{
+       params->op = kernel_pkey_sign;
+       return asymmetric_key_eds_op(params, data, enc);
+}
+EXPORT_SYMBOL_GPL(create_signature);
+
 /**
  * verify_signature - Initiate the use of an asymmetric key to verify a signature
  * @key: The asymmetric key to verify against
diff --git a/crypto/asymmetric_keys/tpm.asn1 b/crypto/asymmetric_keys/tpm.asn1
new file mode 100644 (file)
index 0000000..d7f1942
--- /dev/null
@@ -0,0 +1,5 @@
+--
+-- Unencryted TPM Blob.  For details of the format, see:
+-- http://david.woodhou.se/draft-woodhouse-cert-best-practice.html#I-D.mavrogiannopoulos-tpmuri
+--
+PrivateKeyInfo ::= OCTET STRING ({ tpm_note_key })
diff --git a/crypto/asymmetric_keys/tpm_parser.c b/crypto/asymmetric_keys/tpm_parser.c
new file mode 100644 (file)
index 0000000..96405d8
--- /dev/null
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) "TPM-PARSER: "fmt
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <keys/asymmetric-subtype.h>
+#include <keys/asymmetric-parser.h>
+#include <crypto/asym_tpm_subtype.h>
+#include "tpm.asn1.h"
+
+struct tpm_parse_context {
+       const void      *blob;
+       u32             blob_len;
+};
+
+/*
+ * Note the key data of the ASN.1 blob.
+ */
+int tpm_note_key(void *context, size_t hdrlen,
+                  unsigned char tag,
+                  const void *value, size_t vlen)
+{
+       struct tpm_parse_context *ctx = context;
+
+       ctx->blob = value;
+       ctx->blob_len = vlen;
+
+       return 0;
+}
+
+/*
+ * Parse a TPM-encrypted private key blob.
+ */
+static struct tpm_key *tpm_parse(const void *data, size_t datalen)
+{
+       struct tpm_parse_context ctx;
+       long ret;
+
+       memset(&ctx, 0, sizeof(ctx));
+
+       /* Attempt to decode the private key */
+       ret = asn1_ber_decoder(&tpm_decoder, &ctx, data, datalen);
+       if (ret < 0)
+               goto error;
+
+       return tpm_key_create(ctx.blob, ctx.blob_len);
+
+error:
+       return ERR_PTR(ret);
+}
+/*
+ * Attempt to parse a data blob for a key as a TPM private key blob.
+ */
+static int tpm_key_preparse(struct key_preparsed_payload *prep)
+{
+       struct tpm_key *tk;
+
+       /*
+        * TPM 1.2 keys are max 2048 bits long, so assume the blob is no
+        * more than 4x that
+        */
+       if (prep->datalen > 256 * 4)
+               return -EMSGSIZE;
+
+       tk = tpm_parse(prep->data, prep->datalen);
+
+       if (IS_ERR(tk))
+               return PTR_ERR(tk);
+
+       /* We're pinning the module by being linked against it */
+       __module_get(asym_tpm_subtype.owner);
+       prep->payload.data[asym_subtype] = &asym_tpm_subtype;
+       prep->payload.data[asym_key_ids] = NULL;
+       prep->payload.data[asym_crypto] = tk;
+       prep->payload.data[asym_auth] = NULL;
+       prep->quotalen = 100;
+       return 0;
+}
+
+static struct asymmetric_key_parser tpm_key_parser = {
+       .owner  = THIS_MODULE,
+       .name   = "tpm_parser",
+       .parse  = tpm_key_preparse,
+};
+
+static int __init tpm_key_init(void)
+{
+       return register_asymmetric_key_parser(&tpm_key_parser);
+}
+
+static void __exit tpm_key_exit(void)
+{
+       unregister_asymmetric_key_parser(&tpm_key_parser);
+}
+
+module_init(tpm_key_init);
+module_exit(tpm_key_exit);
+
+MODULE_DESCRIPTION("TPM private key-blob parser");
+MODULE_LICENSE("GPL v2");
index b6cabac4b62ba6b920cb5947c56db5839711bcc7..991f4d735a4ef1d89083f15cae8efa77f82060d6 100644 (file)
@@ -199,35 +199,32 @@ int x509_note_pkey_algo(void *context, size_t hdrlen,
 
        case OID_md4WithRSAEncryption:
                ctx->cert->sig->hash_algo = "md4";
-               ctx->cert->sig->pkey_algo = "rsa";
-               break;
+               goto rsa_pkcs1;
 
        case OID_sha1WithRSAEncryption:
                ctx->cert->sig->hash_algo = "sha1";
-               ctx->cert->sig->pkey_algo = "rsa";
-               break;
+               goto rsa_pkcs1;
 
        case OID_sha256WithRSAEncryption:
                ctx->cert->sig->hash_algo = "sha256";
-               ctx->cert->sig->pkey_algo = "rsa";
-               break;
+               goto rsa_pkcs1;
 
        case OID_sha384WithRSAEncryption:
                ctx->cert->sig->hash_algo = "sha384";
-               ctx->cert->sig->pkey_algo = "rsa";
-               break;
+               goto rsa_pkcs1;
 
        case OID_sha512WithRSAEncryption:
                ctx->cert->sig->hash_algo = "sha512";
-               ctx->cert->sig->pkey_algo = "rsa";
-               break;
+               goto rsa_pkcs1;
 
        case OID_sha224WithRSAEncryption:
                ctx->cert->sig->hash_algo = "sha224";
-               ctx->cert->sig->pkey_algo = "rsa";
-               break;
+               goto rsa_pkcs1;
        }
 
+rsa_pkcs1:
+       ctx->cert->sig->pkey_algo = "rsa";
+       ctx->cert->sig->encoding = "pkcs1";
        ctx->algo_oid = ctx->last_oid;
        return 0;
 }
index b761b1f9c6ca161c8eb3a9340ab50b69374671bc..dd5f332fd5668985c9e904b35ad56d47b34ed383 100644 (file)
@@ -140,9 +140,8 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
        spawn = skcipher_instance_ctx(inst);
        err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
                                CRYPTO_ALG_TYPE_MASK);
-       crypto_mod_put(alg);
        if (err)
-               goto err_free_inst;
+               goto err_put_alg;
 
        err = crypto_inst_setname(skcipher_crypto_instance(inst), "cbc", alg);
        if (err)
@@ -174,12 +173,15 @@ static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
        err = skcipher_register_instance(tmpl, inst);
        if (err)
                goto err_drop_spawn;
+       crypto_mod_put(alg);
 
 out:
        return err;
 
 err_drop_spawn:
        crypto_drop_spawn(spawn);
+err_put_alg:
+       crypto_mod_put(alg);
 err_free_inst:
        kfree(inst);
        goto out;
index a0d68c09e1b9c53dd9eb4fb9bd08238d24b70d44..20987d0e09d89ceafbd51d3b61b321dc58a56862 100644 (file)
@@ -286,9 +286,8 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
        spawn = skcipher_instance_ctx(inst);
        err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
                                CRYPTO_ALG_TYPE_MASK);
-       crypto_mod_put(alg);
        if (err)
-               goto err_free_inst;
+               goto err_put_alg;
 
        err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg);
        if (err)
@@ -317,12 +316,15 @@ static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb)
        err = skcipher_register_instance(tmpl, inst);
        if (err)
                goto err_drop_spawn;
+       crypto_mod_put(alg);
 
 out:
        return err;
 
 err_drop_spawn:
        crypto_drop_spawn(spawn);
+err_put_alg:
+       crypto_mod_put(alg);
 err_free_inst:
        kfree(inst);
        goto out;
index e41f6cc33fff49f2b35ad52504742ba79c70c3b8..784748dbb19f0c58482ad18c761c7de121d41928 100644 (file)
@@ -84,7 +84,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_cipher rcipher;
 
-       strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
+       strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
 
        rcipher.blocksize = alg->cra_blocksize;
        rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
@@ -103,7 +103,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_comp rcomp;
 
-       strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
+       strncpy(rcomp.type, "compression", sizeof(rcomp.type));
        if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
                    sizeof(struct crypto_report_comp), &rcomp))
                goto nla_put_failure;
@@ -117,7 +117,7 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_acomp racomp;
 
-       strlcpy(racomp.type, "acomp", sizeof(racomp.type));
+       strncpy(racomp.type, "acomp", sizeof(racomp.type));
 
        if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
                    sizeof(struct crypto_report_acomp), &racomp))
@@ -132,7 +132,7 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_akcipher rakcipher;
 
-       strlcpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
+       strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
 
        if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
                    sizeof(struct crypto_report_akcipher), &rakcipher))
@@ -147,7 +147,7 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
 {
        struct crypto_report_kpp rkpp;
 
-       strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
+       strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
 
        if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
                    sizeof(struct crypto_report_kpp), &rkpp))
@@ -161,10 +161,10 @@ nla_put_failure:
 static int crypto_report_one(struct crypto_alg *alg,
                             struct crypto_user_alg *ualg, struct sk_buff *skb)
 {
-       strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
-       strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
+       strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
+       strncpy(ualg->cru_driver_name, alg->cra_driver_name,
                sizeof(ualg->cru_driver_name));
-       strlcpy(ualg->cru_module_name, module_name(alg->cra_module),
+       strncpy(ualg->cru_module_name, module_name(alg->cra_module),
                sizeof(ualg->cru_module_name));
 
        ualg->cru_type = 0;
@@ -177,7 +177,7 @@ static int crypto_report_one(struct crypto_alg *alg,
        if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
                struct crypto_report_larval rl;
 
-               strlcpy(rl.type, "larval", sizeof(rl.type));
+               strncpy(rl.type, "larval", sizeof(rl.type));
                if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
                            sizeof(struct crypto_report_larval), &rl))
                        goto nla_put_failure;
index 021ad06bbb628b5bc199ccded03c44429bae061c..1dfaa0ccd555b5bd3246822365114fa69c7e5ae1 100644 (file)
@@ -37,6 +37,8 @@ static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&raead, 0, sizeof(raead));
+
        strncpy(raead.type, "aead", sizeof(raead.type));
 
        v32 = atomic_read(&alg->encrypt_cnt);
@@ -65,6 +67,8 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&rcipher, 0, sizeof(rcipher));
+
        strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
 
        v32 = atomic_read(&alg->encrypt_cnt);
@@ -93,6 +97,8 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&rcomp, 0, sizeof(rcomp));
+
        strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
        v32 = atomic_read(&alg->compress_cnt);
        rcomp.stat_compress_cnt = v32;
@@ -120,6 +126,8 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&racomp, 0, sizeof(racomp));
+
        strlcpy(racomp.type, "acomp", sizeof(racomp.type));
        v32 = atomic_read(&alg->compress_cnt);
        racomp.stat_compress_cnt = v32;
@@ -147,6 +155,8 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&rakcipher, 0, sizeof(rakcipher));
+
        strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
        v32 = atomic_read(&alg->encrypt_cnt);
        rakcipher.stat_encrypt_cnt = v32;
@@ -177,6 +187,8 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
        struct crypto_stat rkpp;
        u32 v;
 
+       memset(&rkpp, 0, sizeof(rkpp));
+
        strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
 
        v = atomic_read(&alg->setsecret_cnt);
@@ -203,6 +215,8 @@ static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&rhash, 0, sizeof(rhash));
+
        strncpy(rhash.type, "ahash", sizeof(rhash.type));
 
        v32 = atomic_read(&alg->hash_cnt);
@@ -227,6 +241,8 @@ static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&rhash, 0, sizeof(rhash));
+
        strncpy(rhash.type, "shash", sizeof(rhash.type));
 
        v32 = atomic_read(&alg->hash_cnt);
@@ -251,6 +267,8 @@ static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
        u64 v64;
        u32 v32;
 
+       memset(&rrng, 0, sizeof(rrng));
+
        strncpy(rrng.type, "rng", sizeof(rrng.type));
 
        v32 = atomic_read(&alg->generate_cnt);
@@ -275,6 +293,8 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
                                 struct crypto_user_alg *ualg,
                                 struct sk_buff *skb)
 {
+       memset(ualg, 0, sizeof(*ualg));
+
        strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
        strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
                sizeof(ualg->cru_driver_name));
@@ -291,6 +311,7 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
        if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
                struct crypto_stat rl;
 
+               memset(&rl, 0, sizeof(rl));
                strlcpy(rl.type, "larval", sizeof(rl.type));
                if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL,
                            sizeof(struct crypto_stat), &rl))
index ef802f6e964218f06d00b035fc66d960cd4ce700..8aa10144407c04f936061c51d823d932660e2caa 100644 (file)
@@ -244,9 +244,8 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
        spawn = skcipher_instance_ctx(inst);
        err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
                                CRYPTO_ALG_TYPE_MASK);
-       crypto_mod_put(alg);
        if (err)
-               goto err_free_inst;
+               goto err_put_alg;
 
        err = crypto_inst_setname(skcipher_crypto_instance(inst), "pcbc", alg);
        if (err)
@@ -275,12 +274,15 @@ static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
        err = skcipher_register_instance(tmpl, inst);
        if (err)
                goto err_drop_spawn;
+       crypto_mod_put(alg);
 
 out:
        return err;
 
 err_drop_spawn:
        crypto_drop_spawn(spawn);
+err_put_alg:
+       crypto_mod_put(alg);
 err_free_inst:
        kfree(inst);
        goto out;
index 812476e4682138225fd46fd2745062d5e1d55105..cfc04e15fd97506a6110c5845673db731a0cf757 100644 (file)
@@ -392,7 +392,8 @@ static int pkcs1pad_sign(struct akcipher_request *req)
        if (!ctx->key_size)
                return -EINVAL;
 
-       digest_size = digest_info->size;
+       if (digest_info)
+               digest_size = digest_info->size;
 
        if (req->src_len + digest_size > ctx->key_size - 11)
                return -EOVERFLOW;
@@ -412,8 +413,9 @@ static int pkcs1pad_sign(struct akcipher_request *req)
        memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
        req_ctx->in_buf[ps_end] = 0x00;
 
-       memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
-              digest_info->size);
+       if (digest_info)
+               memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
+                      digest_info->size);
 
        pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
                        ctx->key_size - 1 - req->src_len, req->src);
@@ -475,10 +477,13 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
                goto done;
        pos++;
 
-       if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size))
-               goto done;
+       if (digest_info) {
+               if (crypto_memneq(out_buf + pos, digest_info->data,
+                                 digest_info->size))
+                       goto done;
 
-       pos += digest_info->size;
+               pos += digest_info->size;
+       }
 
        err = 0;
 
@@ -608,11 +613,14 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
 
        hash_name = crypto_attr_alg_name(tb[2]);
        if (IS_ERR(hash_name))
-               return PTR_ERR(hash_name);
+               hash_name = NULL;
 
-       digest_info = rsa_lookup_asn1(hash_name);
-       if (!digest_info)
-               return -EINVAL;
+       if (hash_name) {
+               digest_info = rsa_lookup_asn1(hash_name);
+               if (!digest_info)
+                       return -EINVAL;
+       } else
+               digest_info = NULL;
 
        inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
        if (!inst)
@@ -632,14 +640,29 @@ static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
 
        err = -ENAMETOOLONG;
 
-       if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
-                    "pkcs1pad(%s,%s)", rsa_alg->base.cra_name, hash_name) >=
-           CRYPTO_MAX_ALG_NAME ||
-           snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-                    "pkcs1pad(%s,%s)",
-                    rsa_alg->base.cra_driver_name, hash_name) >=
-           CRYPTO_MAX_ALG_NAME)
-               goto out_drop_alg;
+       if (!hash_name) {
+               if (snprintf(inst->alg.base.cra_name,
+                            CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
+                            rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
+                       goto out_drop_alg;
+
+               if (snprintf(inst->alg.base.cra_driver_name,
+                            CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
+                            rsa_alg->base.cra_driver_name) >=
+                            CRYPTO_MAX_ALG_NAME)
+                       goto out_drop_alg;
+       } else {
+               if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
+                            "pkcs1pad(%s,%s)", rsa_alg->base.cra_name,
+                            hash_name) >= CRYPTO_MAX_ALG_NAME)
+                       goto out_drop_alg;
+
+               if (snprintf(inst->alg.base.cra_driver_name,
+                            CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
+                            rsa_alg->base.cra_driver_name,
+                            hash_name) >= CRYPTO_MAX_ALG_NAME)
+                       goto out_drop_alg;
+       }
 
        inst->alg.base.cra_flags = rsa_alg->base.cra_flags & CRYPTO_ALG_ASYNC;
        inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
index ea7240be3001ba245c12d3214c11a7c7e6a8a1fd..78e8d037ae2b342d94ff837d6c9de82b6a4a1090 100644 (file)
@@ -124,8 +124,9 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm)
 
        ctx->cryptd_tfm = cryptd_tfm;
 
-       reqsize = sizeof(struct skcipher_request);
-       reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base);
+       reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm));
+       reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base));
+       reqsize += sizeof(struct skcipher_request);
 
        crypto_skcipher_set_reqsize(tfm, reqsize);
 
index 8f3a444c6ea9233a2c0cd116e387a71c65d2360f..7cea769c37df55b50c55a7e8751a05057b5348cd 100644 (file)
@@ -512,7 +512,7 @@ config CRC_PMIC_OPREGION
 
 config XPOWER_PMIC_OPREGION
        bool "ACPI operation region support for XPower AXP288 PMIC"
-       depends on MFD_AXP20X_I2C && IOSF_MBI
+       depends on MFD_AXP20X_I2C && IOSF_MBI=y
        help
          This config adds ACPI operation region support for XPower AXP288 PMIC.
 
index eaa60c94205a82f685190a5c0790d6ca91df69cb..1f32caa87686e2369b0f4841447d5ab5093b993d 100644 (file)
@@ -30,6 +30,7 @@ static const struct acpi_device_id forbidden_id_list[] = {
        {"PNP0200",  0},        /* AT DMA Controller */
        {"ACPI0009", 0},        /* IOxAPIC */
        {"ACPI000A", 0},        /* IOAPIC */
+       {"SMB0001",  0},        /* ACPI SMBUS virtual device */
        {"", 0},
 };
 
index 0d42f30e5b25d93b33f4566a72658f0ccabf7254..9920fac6413ffb94cce8d5eb146dbb15df5283a4 100644 (file)
@@ -244,7 +244,6 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
 {
        acpi_status status;
        u32 buffer_length;
-       u32 data_length;
        void *buffer;
        union acpi_operand_object *buffer_desc;
        u32 function;
@@ -282,14 +281,12 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
        case ACPI_ADR_SPACE_SMBUS:
 
                buffer_length = ACPI_SMBUS_BUFFER_SIZE;
-               data_length = ACPI_SMBUS_DATA_SIZE;
                function = ACPI_WRITE | (obj_desc->field.attribute << 16);
                break;
 
        case ACPI_ADR_SPACE_IPMI:
 
                buffer_length = ACPI_IPMI_BUFFER_SIZE;
-               data_length = ACPI_IPMI_DATA_SIZE;
                function = ACPI_WRITE;
                break;
 
@@ -310,7 +307,6 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
                /* Add header length to get the full size of the buffer */
 
                buffer_length += ACPI_SERIAL_HEADER_SIZE;
-               data_length = source_desc->buffer.pointer[1];
                function = ACPI_WRITE | (accessor_type << 16);
                break;
 
@@ -318,20 +314,6 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
                return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
        }
 
-#if 0
-       OBSOLETE ?
-           /* Check for possible buffer overflow */
-           if (data_length > source_desc->buffer.length) {
-               ACPI_ERROR((AE_INFO,
-                           "Length in buffer header (%u)(%u) is greater than "
-                           "the physical buffer length (%u) and will overflow",
-                           data_length, buffer_length,
-                           source_desc->buffer.length));
-
-               return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
-       }
-#endif
-
        /* Create the transfer/bidirectional/return buffer */
 
        buffer_desc = acpi_ut_create_buffer_object(buffer_length);
@@ -342,7 +324,8 @@ acpi_ex_write_serial_bus(union acpi_operand_object *source_desc,
        /* Copy the input buffer data to the transfer buffer */
 
        buffer = buffer_desc->buffer.pointer;
-       memcpy(buffer, source_desc->buffer.pointer, data_length);
+       memcpy(buffer, source_desc->buffer.pointer,
+              min(buffer_length, source_desc->buffer.length));
 
        /* Lock entire transaction if requested */
 
index 2a361e22d38d062e73d2ad4db2e5fb0c15c6913a..70f4e80b9246a16f62e9459925ef1efd9905d9f5 100644 (file)
@@ -700,7 +700,7 @@ static void iort_set_device_domain(struct device *dev,
  */
 static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
 {
-       struct acpi_iort_node *node, *msi_parent;
+       struct acpi_iort_node *node, *msi_parent = NULL;
        struct fwnode_handle *iort_fwnode;
        struct acpi_iort_its_group *its;
        int i;
index a7c2673ffd36e8a8287a40182fae189e6b71688e..824ae985ad93bebacbb70010e2e2c632a3d143a8 100644 (file)
@@ -126,6 +126,7 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
 
        return 0;
 }
+EXPORT_SYMBOL(acpi_device_get_power);
 
 static int acpi_dev_pm_explicit_set(struct acpi_device *adev, int state)
 {
index f8c638f3c946d904fd0d9ea0aa3301bca2bafd14..5912d30020c7100025dbab0b8cccf09697d1580a 100644 (file)
@@ -1308,7 +1308,7 @@ static ssize_t scrub_store(struct device *dev,
        if (nd_desc) {
                struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
 
-               rc = acpi_nfit_ars_rescan(acpi_desc, 0);
+               rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
        }
        device_unlock(dev);
        if (rc)
@@ -2928,9 +2928,9 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
                return rc;
 
        if (ars_status_process_records(acpi_desc))
-               return -ENOMEM;
+               dev_err(acpi_desc->dev, "Failed to process ARS records\n");
 
-       return 0;
+       return rc;
 }
 
 static int ars_register(struct acpi_nfit_desc *acpi_desc,
@@ -3341,8 +3341,6 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
                struct nvdimm *nvdimm, unsigned int cmd)
 {
        struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
-       struct nfit_spa *nfit_spa;
-       int rc = 0;
 
        if (nvdimm)
                return 0;
@@ -3355,17 +3353,10 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
         * just needs guarantees that any ARS it initiates are not
         * interrupted by any intervening start requests from userspace.
         */
-       mutex_lock(&acpi_desc->init_mutex);
-       list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
-               if (acpi_desc->scrub_spa
-                               || test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)
-                               || test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) {
-                       rc = -EBUSY;
-                       break;
-               }
-       mutex_unlock(&acpi_desc->init_mutex);
+       if (work_busy(&acpi_desc->dwork.work))
+               return -EBUSY;
 
-       return rc;
+       return 0;
 }
 
 int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
index e9626bf6ca2960a2398aeeefc0f4c9e814e60c1b..d6c1b10f6c2542a8cfbbac6dae31246cd35134f7 100644 (file)
@@ -25,8 +25,12 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val,
        struct acpi_nfit_desc *acpi_desc;
        struct nfit_spa *nfit_spa;
 
-       /* We only care about memory errors */
-       if (!mce_is_memory_error(mce))
+       /* We only care about uncorrectable memory errors */
+       if (!mce_is_memory_error(mce) || mce_is_correctable(mce))
+               return NOTIFY_DONE;
+
+       /* Verify the address reported in the MCE is valid. */
+       if (!mce_usable_address(mce))
                return NOTIFY_DONE;
 
        /*
index cb30a524d16d8a9846caceba2a7aa2c9aba6e4ae..9f1000d2a40c791925430d30e316e3e45a000245 100644 (file)
@@ -2974,7 +2974,6 @@ static void binder_transaction(struct binder_proc *proc,
                t->buffer = NULL;
                goto err_binder_alloc_buf_failed;
        }
-       t->buffer->allow_user_free = 0;
        t->buffer->debug_id = t->debug_id;
        t->buffer->transaction = t;
        t->buffer->target_node = target_node;
@@ -3510,14 +3509,18 @@ static int binder_thread_write(struct binder_proc *proc,
 
                        buffer = binder_alloc_prepare_to_free(&proc->alloc,
                                                              data_ptr);
-                       if (buffer == NULL) {
-                               binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
-                                       proc->pid, thread->pid, (u64)data_ptr);
-                               break;
-                       }
-                       if (!buffer->allow_user_free) {
-                               binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
-                                       proc->pid, thread->pid, (u64)data_ptr);
+                       if (IS_ERR_OR_NULL(buffer)) {
+                               if (PTR_ERR(buffer) == -EPERM) {
+                                       binder_user_error(
+                                               "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
+                                               proc->pid, thread->pid,
+                                               (u64)data_ptr);
+                               } else {
+                                       binder_user_error(
+                                               "%d:%d BC_FREE_BUFFER u%016llx no match\n",
+                                               proc->pid, thread->pid,
+                                               (u64)data_ptr);
+                               }
                                break;
                        }
                        binder_debug(BINDER_DEBUG_FREE_BUFFER,
index 64fd96eada31f42e5677a72de837fafa2987165b..030c98f35cca73aaf5168f559e259e658b5d05ad 100644 (file)
@@ -151,16 +151,12 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
                else {
                        /*
                         * Guard against user threads attempting to
-                        * free the buffer twice
+                        * free the buffer when in use by kernel or
+                        * after it's already been freed.
                         */
-                       if (buffer->free_in_progress) {
-                               binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
-                                                  "%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
-                                                  alloc->pid, current->pid,
-                                                  (u64)user_ptr);
-                               return NULL;
-                       }
-                       buffer->free_in_progress = 1;
+                       if (!buffer->allow_user_free)
+                               return ERR_PTR(-EPERM);
+                       buffer->allow_user_free = 0;
                        return buffer;
                }
        }
@@ -500,7 +496,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
 
        rb_erase(best_fit, &alloc->free_buffers);
        buffer->free = 0;
-       buffer->free_in_progress = 0;
+       buffer->allow_user_free = 0;
        binder_insert_allocated_buffer_locked(alloc, buffer);
        binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
                     "%d: binder_alloc_buf size %zd got %pK\n",
index 9ef64e56385667a53abeab2f41f67b198a8ce86e..fb3238c74c8a8671282cf9a428febaf182880821 100644 (file)
@@ -50,8 +50,7 @@ struct binder_buffer {
        unsigned free:1;
        unsigned allow_user_free:1;
        unsigned async_transaction:1;
-       unsigned free_in_progress:1;
-       unsigned debug_id:28;
+       unsigned debug_id:29;
 
        struct binder_transaction *transaction;
 
index 6e594644cb1d360dabbdf3a4b68851a45c5bfb0f..b8c3f9e6af8994820c30b40889154f87511014e0 100644 (file)
@@ -4553,7 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        /* These specific Samsung models/firmware-revs do not handle LPM well */
        { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
        { "SAMSUNG SSD PM830 mSATA *",  "CXM13D1Q", ATA_HORKAGE_NOLPM, },
-       { "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, },
+       { "SAMSUNG MZ7TD256HAFV-000L9", NULL,       ATA_HORKAGE_NOLPM, },
 
        /* devices that don't properly handle queued TRIM commands */
        { "Micron_M500IT_*",            "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
@@ -4602,6 +4602,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "SSD*INTEL*",                 NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Samsung*SSD*",               NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "SAMSUNG*SSD*",               NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
+       { "SAMSUNG*MZ7KM*",             NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "ST[1248][0248]0[FH]*",       NULL,   ATA_HORKAGE_ZERO_AFTER_TRIM, },
 
        /*
index 10ecb232245db8c617ee808966db432ece834358..4b1ff5bc256a3032191f090226ffb4c5d0286ae9 100644 (file)
@@ -1,14 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Renesas R-Car SATA driver
  *
  * Author: Vladimir Barinov <source@cogentembedded.com>
  * Copyright (C) 2013-2015 Cogent Embedded, Inc.
  * Copyright (C) 2013-2015 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
  */
 
 #include <linux/kernel.h>
index 4e46dc9e41ad01142ffdcfe1c38921df9db0ebda..11e1663bdc4dee0e2cfd7cd9ba61783d00277bbf 100644 (file)
@@ -1410,7 +1410,7 @@ static int init_q(struct fs_dev *dev, struct queue *txq, int queue,
 
        func_enter ();
 
-       fs_dprintk (FS_DEBUG_INIT, "Inititing queue at %x: %d entries:\n", 
+       fs_dprintk (FS_DEBUG_INIT, "Initializing queue at %x: %d entries:\n",
                    queue, nentries);
 
        p = aligned_kmalloc (sz, GFP_KERNEL, 0x10);
@@ -1443,7 +1443,7 @@ static int init_fp(struct fs_dev *dev, struct freepool *fp, int queue,
 {
        func_enter ();
 
-       fs_dprintk (FS_DEBUG_INIT, "Inititing free pool at %x:\n", queue);
+       fs_dprintk (FS_DEBUG_INIT, "Initializing free pool at %x:\n", queue);
 
        write_fs (dev, FP_CNF(queue), (bufsize * RBFP_RBS) | RBFP_RBSVAL | RBFP_CME);
        write_fs (dev, FP_SA(queue),  0);
index 3b25a643058c9dde38511d646da8700e53c46837..21b9b2f2470a26d1f2d1c2d5eb4237fe3902af82 100644 (file)
@@ -155,10 +155,9 @@ struct logical_input {
                        int release_data;
                } std;
                struct {        /* valid when type == INPUT_TYPE_KBD */
-                       /* strings can be non null-terminated */
-                       char press_str[sizeof(void *) + sizeof(int)];
-                       char repeat_str[sizeof(void *) + sizeof(int)];
-                       char release_str[sizeof(void *) + sizeof(int)];
+                       char press_str[sizeof(void *) + sizeof(int)] __nonstring;
+                       char repeat_str[sizeof(void *) + sizeof(int)] __nonstring;
+                       char release_str[sizeof(void *) + sizeof(int)] __nonstring;
                } kbd;
        } u;
 };
index 4aaf00d2098b432f6056cc2391621ccb2d78a1dc..e038e2b3b7ea4a7d53d2d7c189f9dbac269ea228 100644 (file)
@@ -26,8 +26,14 @@ struct devres_node {
 
 struct devres {
        struct devres_node              node;
-       /* -- 3 pointers */
-       unsigned long long              data[]; /* guarantee ull alignment */
+       /*
+        * Some archs want to perform DMA into kmalloc caches
+        * and need a guaranteed alignment larger than
+        * the alignment of a 64-bit integer.
+        * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
+        * buffer alignment as if it was allocated by plain kmalloc().
+        */
+       u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
 };
 
 struct devres_group {
index df8103dd40ac2d1f6e5c29ef7790160a3335b35b..c18586fccb6f2b25c3d0ef535144deeabf640006 100644 (file)
@@ -396,15 +396,14 @@ static struct brd_device *brd_alloc(int i)
        disk->first_minor       = i * max_part;
        disk->fops              = &brd_fops;
        disk->private_data      = brd;
-       disk->queue             = brd->brd_queue;
        disk->flags             = GENHD_FL_EXT_DEVT;
        sprintf(disk->disk_name, "ram%d", i);
        set_capacity(disk, rd_size * 2);
-       disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
+       brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
 
        /* Tell the block layer that this is not a rotational device */
-       blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
-       blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue);
+       blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, brd->brd_queue);
 
        return brd;
 
@@ -436,6 +435,7 @@ static struct brd_device *brd_init_one(int i, bool *new)
 
        brd = brd_alloc(i);
        if (brd) {
+               brd->brd_disk->queue = brd->brd_queue;
                add_disk(brd->brd_disk);
                list_add_tail(&brd->brd_list, &brd_devices);
        }
@@ -503,8 +503,14 @@ static int __init brd_init(void)
 
        /* point of no return */
 
-       list_for_each_entry(brd, &brd_devices, brd_list)
+       list_for_each_entry(brd, &brd_devices, brd_list) {
+               /*
+                * associate with queue just before adding disk for
+                * avoiding to mess up failure path
+                */
+               brd->brd_disk->queue = brd->brd_queue;
                add_disk(brd->brd_disk);
+       }
 
        blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
                                  THIS_MODULE, brd_probe, NULL, NULL);
index 55fd104f1ed4b91cf36b0d6cb1c8b9270443507a..fa8204214ac027adf660db960d4297d3f0cca7bb 100644 (file)
@@ -1856,7 +1856,7 @@ int drbd_send(struct drbd_connection *connection, struct socket *sock,
 
        /* THINK  if (signal_pending) return ... ? */
 
-       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
+       iov_iter_kvec(&msg.msg_iter, WRITE, &iov, 1, size);
 
        if (sock == connection->data.socket) {
                rcu_read_lock();
index fc67fd853375c033a253753c02b8a3a8c23df4b2..61c392752fe4bbfeba5b1b404bf64cff7e9d8b00 100644 (file)
@@ -516,7 +516,7 @@ static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flag
        struct msghdr msg = {
                .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
        };
-       iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, size);
+       iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size);
        return sock_recvmsg(sock, &msg, msg.msg_flags);
 }
 
index a8cfa011c28483ef389ee161b5ca86af71eac13e..fb23578e9a416703648154b7371f05bbe3f5ceb8 100644 (file)
@@ -4148,10 +4148,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
        bio.bi_end_io = floppy_rb0_cb;
        bio_set_op_attrs(&bio, REQ_OP_READ, 0);
 
+       init_completion(&cbdata.complete);
+
        submit_bio(&bio);
        process_fd_request();
 
-       init_completion(&cbdata.complete);
        wait_for_completion(&cbdata.complete);
 
        __free_page(page);
index abad6d15f956343ff86ad45d0f40ff4c7faae50b..cb0cc868507620513d3de7658ed1f6999ceaa965 100644 (file)
@@ -77,7 +77,6 @@
 #include <linux/falloc.h>
 #include <linux/uio.h>
 #include <linux/ioprio.h>
-#include <linux/blk-cgroup.h>
 
 #include "loop.h"
 
@@ -269,7 +268,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
        struct iov_iter i;
        ssize_t bw;
 
-       iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len);
+       iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
 
        file_start_write(file);
        bw = vfs_iter_write(file, &i, ppos, 0);
@@ -347,7 +346,7 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
        ssize_t len;
 
        rq_for_each_segment(bvec, rq, iter) {
-               iov_iter_bvec(&i, ITER_BVEC, &bvec, 1, bvec.bv_len);
+               iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
                len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
                if (len < 0)
                        return len;
@@ -388,7 +387,7 @@ static int lo_read_transfer(struct loop_device *lo, struct request *rq,
                b.bv_offset = 0;
                b.bv_len = bvec.bv_len;
 
-               iov_iter_bvec(&i, ITER_BVEC, &b, 1, b.bv_len);
+               iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
                len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
                if (len < 0) {
                        ret = len;
@@ -555,8 +554,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
        }
        atomic_set(&cmd->ref, 2);
 
-       iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
-                     segments, blk_rq_bytes(rq));
+       iov_iter_bvec(&iter, rw, bvec, segments, blk_rq_bytes(rq));
        iter.iov_offset = offset;
 
        cmd->iocb.ki_pos = pos;
@@ -1761,8 +1759,8 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        /* always use the first bio's css */
 #ifdef CONFIG_BLK_CGROUP
-       if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) {
-               cmd->css = &bio_blkcg(rq->bio)->css;
+       if (cmd->use_aio && rq->bio && rq->bio->bi_css) {
+               cmd->css = rq->bio->bi_css;
                css_get(cmd->css);
        } else
 #endif
index dfc8de6ce5254872a8b96ec364d7cd3136f4f7d7..a7daa8acbab3a53feda3e1e92dcc51d86a32ba71 100644 (file)
@@ -1942,8 +1942,8 @@ static int exec_drive_taskfile(struct driver_data *dd,
                                dev_warn(&dd->pdev->dev,
                                        "data movement but "
                                        "sect_count is 0\n");
-                                       err = -EINVAL;
-                                       goto abort;
+                               err = -EINVAL;
+                               goto abort;
                        }
                }
        }
index 14a51254c3db7f19c94cdab62e1d9e192c7ae02f..4d4d6129ff6627f1249cade3101d2927d7db5a25 100644 (file)
@@ -473,7 +473,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
        u32 nbd_cmd_flags = 0;
        int sent = nsock->sent, skip = 0;
 
-       iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
+       iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
 
        switch (req_op(req)) {
        case REQ_OP_DISCARD:
@@ -564,8 +564,7 @@ send_pages:
 
                        dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
                                req, bvec.bv_len);
-                       iov_iter_bvec(&from, ITER_BVEC | WRITE,
-                                     &bvec, 1, bvec.bv_len);
+                       iov_iter_bvec(&from, WRITE, &bvec, 1, bvec.bv_len);
                        if (skip) {
                                if (skip >= iov_iter_count(&from)) {
                                        skip -= iov_iter_count(&from);
@@ -624,7 +623,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
        int ret = 0;
 
        reply.magic = 0;
-       iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
+       iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
        result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
        if (result <= 0) {
                if (!nbd_disconnected(config))
@@ -678,8 +677,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                struct bio_vec bvec;
 
                rq_for_each_segment(bvec, req, iter) {
-                       iov_iter_bvec(&to, ITER_BVEC | READ,
-                                     &bvec, 1, bvec.bv_len);
+                       iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
                        result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
                        if (result <= 0) {
                                dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
@@ -1073,7 +1071,7 @@ static void send_disconnects(struct nbd_device *nbd)
        for (i = 0; i < config->num_connections; i++) {
                struct nbd_sock *nsock = config->socks[i];
 
-               iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
+               iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
                mutex_lock(&nsock->tx_lock);
                ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
                if (ret <= 0)
index 56452cabce5b587cb7309f9ba24640bbf0ba05da..0ed4b200fa5855e10a142b6f6ce237901cf749ec 100644 (file)
@@ -1919,6 +1919,7 @@ static int negotiate_mq(struct blkfront_info *info)
                              GFP_KERNEL);
        if (!info->rinfo) {
                xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
+               info->nr_rings = 0;
                return -ENOMEM;
        }
 
index ef0ca9414f371bc3275b7afce529029e10b49f68..ff83e899df71fca602aadba443fdce73308eb9c5 100644 (file)
@@ -210,6 +210,7 @@ static int of_fixed_factor_clk_remove(struct platform_device *pdev)
 {
        struct clk *clk = platform_get_drvdata(pdev);
 
+       of_clk_del_provider(pdev->dev.of_node);
        clk_unregister_fixed_factor(clk);
 
        return 0;
index c981159b02c0f09c604a78005f26103c75962e9c..792735d7e46ea0faf3299f710813df3f98cd3834 100644 (file)
@@ -325,6 +325,7 @@ static struct clk_regmap axg_fclk_div2 = {
                .ops = &clk_regmap_gate_ops,
                .parent_names = (const char *[]){ "fclk_div2_div" },
                .num_parents = 1,
+               .flags = CLK_IS_CRITICAL,
        },
 };
 
@@ -349,6 +350,18 @@ static struct clk_regmap axg_fclk_div3 = {
                .ops = &clk_regmap_gate_ops,
                .parent_names = (const char *[]){ "fclk_div3_div" },
                .num_parents = 1,
+               /*
+                * FIXME:
+                * This clock, as fdiv2, is used by the SCPI FW and is required
+                * by the platform to operate correctly.
+                * Until the following condition are met, we need this clock to
+                * be marked as critical:
+                * a) The SCPI generic driver claims and enable all the clocks
+                *    it needs
+                * b) CCF has a clock hand-off mechanism to make the sure the
+                *    clock stays on until the proper driver comes along
+                */
+               .flags = CLK_IS_CRITICAL,
        },
 };
 
index 9309cfaaa464ebd5f3e7d26e174c3c8449e16208..4ada9668fd49c2596de2667aebccd841ee673bb5 100644 (file)
@@ -506,6 +506,18 @@ static struct clk_regmap gxbb_fclk_div3 = {
                .ops = &clk_regmap_gate_ops,
                .parent_names = (const char *[]){ "fclk_div3_div" },
                .num_parents = 1,
+               /*
+                * FIXME:
+                * This clock, as fdiv2, is used by the SCPI FW and is required
+                * by the platform to operate correctly.
+                * Until the following condition are met, we need this clock to
+                * be marked as critical:
+                * a) The SCPI generic driver claims and enable all the clocks
+                *    it needs
+                * b) CCF has a clock hand-off mechanism to make the sure the
+                *    clock stays on until the proper driver comes along
+                */
+               .flags = CLK_IS_CRITICAL,
        },
 };
 
index ad8d483a35cd5c16d3cbae4019dd345e3c7bb69b..ca7d37e2c7be6bb6aba2a7d4e1459d8c4369fcca 100644 (file)
@@ -183,7 +183,7 @@ void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
                pr_err("CLK %d has invalid pointer %p\n", id, clk);
                return;
        }
-       if (id > unit->nr_clks) {
+       if (id >= unit->nr_clks) {
                pr_err("CLK %d is invalid\n", id);
                return;
        }
index 9781b1bf599884d6ae37b06dea03453dc8caac35..9235a331b588068ac717ed66eb0c3db5b89f6f9f 100644 (file)
@@ -200,11 +200,11 @@ static struct clk_hw *cp110_of_clk_get(struct of_phandle_args *clkspec,
        unsigned int idx = clkspec->args[1];
 
        if (type == CP110_CLK_TYPE_CORE) {
-               if (idx > CP110_MAX_CORE_CLOCKS)
+               if (idx >= CP110_MAX_CORE_CLOCKS)
                        return ERR_PTR(-EINVAL);
                return clk_data->hws[idx];
        } else if (type == CP110_CLK_TYPE_GATABLE) {
-               if (idx > CP110_MAX_GATABLE_CLOCKS)
+               if (idx >= CP110_MAX_GATABLE_CLOCKS)
                        return ERR_PTR(-EINVAL);
                return clk_data->hws[CP110_MAX_CORE_CLOCKS + idx];
        }
index db9b2471ac401fbb12c02ffb537c64fe718f430d..0a48ed56833b4b554a14c3d71b71edc50a72f348 100644 (file)
@@ -191,6 +191,22 @@ int qcom_cc_register_sleep_clk(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(qcom_cc_register_sleep_clk);
 
+/* Drop 'protected-clocks' from the list of clocks to register */
+static void qcom_cc_drop_protected(struct device *dev, struct qcom_cc *cc)
+{
+       struct device_node *np = dev->of_node;
+       struct property *prop;
+       const __be32 *p;
+       u32 i;
+
+       of_property_for_each_u32(np, "protected-clocks", prop, p, i) {
+               if (i >= cc->num_rclks)
+                       continue;
+
+               cc->rclks[i] = NULL;
+       }
+}
+
 static struct clk_hw *qcom_cc_clk_hw_get(struct of_phandle_args *clkspec,
                                         void *data)
 {
@@ -251,6 +267,8 @@ int qcom_cc_really_probe(struct platform_device *pdev,
        cc->rclks = rclks;
        cc->num_rclks = num_clks;
 
+       qcom_cc_drop_protected(dev, cc);
+
        for (i = 0; i < num_clks; i++) {
                if (!rclks[i])
                        continue;
index e4ca6a45f31397324d4f79378b59036a38218641..ef1b267cb058a4a03f0ead86218ee165653fd737 100644 (file)
@@ -265,7 +265,7 @@ static struct clk_fixed_factor cxo = {
        .div = 1,
        .hw.init = &(struct clk_init_data){
                .name = "cxo",
-               .parent_names = (const char *[]){ "xo_board" },
+               .parent_names = (const char *[]){ "xo-board" },
                .num_parents = 1,
                .ops = &clk_fixed_factor_ops,
        },
index 9d7d297f0ea8d9b0fbc5c5e5a02c172b0ae14064..f65cc0ff76abdb630b0694eb7c3babcc2c46ff71 100644 (file)
@@ -128,7 +128,7 @@ static const struct zynqmp_eemi_ops *eemi_ops;
  */
 static inline int zynqmp_is_valid_clock(u32 clk_id)
 {
-       if (clk_id > clock_max_idx)
+       if (clk_id >= clock_max_idx)
                return -ENODEV;
 
        return clock[clk_id].valid;
@@ -279,6 +279,9 @@ struct clk_hw *zynqmp_clk_register_fixed_factor(const char *name, u32 clk_id,
        qdata.arg1 = clk_id;
 
        ret = eemi_ops->query_data(qdata, ret_payload);
+       if (ret)
+               return ERR_PTR(ret);
+
        mult = ret_payload[1];
        div = ret_payload[2];
 
index a11f4ba98b05c57d08b211ac933f93fcf7cb4616..55c77e44bb2db3e439fd727d8cffcb5d8a279d3e 100644 (file)
@@ -620,4 +620,22 @@ config RISCV_TIMER
          is accessed via both the SBI and the rdcycle instruction.  This is
          required for all RISC-V systems.
 
+config CSKY_MP_TIMER
+       bool "SMP Timer for the C-SKY platform" if COMPILE_TEST
+       depends on CSKY
+       select TIMER_OF
+       help
+         Say yes here to enable C-SKY SMP timer driver used for C-SKY SMP
+         system.
+         csky,mptimer is not only used in SMP system, it also could be used
+         single core system. It's not a mmio reg and it use mtcr/mfcr instruction.
+
+config GX6605S_TIMER
+       bool "Gx6605s SOC system timer driver" if COMPILE_TEST
+       depends on CSKY
+       select CLKSRC_MMIO
+       select TIMER_OF
+       help
+         This option enables support for gx6605s SOC's timer.
+
 endmenu
index e33b21d3f9d8b360305e309ba729457e025b25c2..dd913810456886d1bcf5aacb8da7b8c445d20411 100644 (file)
@@ -79,3 +79,5 @@ obj-$(CONFIG_CLKSRC_ST_LPC)           += clksrc_st_lpc.o
 obj-$(CONFIG_X86_NUMACHIP)             += numachip.o
 obj-$(CONFIG_ATCPIT100_TIMER)          += timer-atcpit100.o
 obj-$(CONFIG_RISCV_TIMER)              += riscv_timer.o
+obj-$(CONFIG_CSKY_MP_TIMER)            += timer-mp-csky.o
+obj-$(CONFIG_GX6605S_TIMER)            += timer-gx6605s.o
index 9c38895542f4abb5bff8c487ff22701e008443a2..d4350bb10b83a26aa1c9a56555ff8a20e949148a 100644 (file)
 DEFINE_RAW_SPINLOCK(i8253_lock);
 EXPORT_SYMBOL(i8253_lock);
 
+/*
+ * Handle PIT quirk in pit_shutdown() where zeroing the counter register
+ * restarts the PIT, negating the shutdown. On platforms with the quirk,
+ * platform specific code can set this to false.
+ */
+bool i8253_clear_counter_on_shutdown __ro_after_init = true;
+
 #ifdef CONFIG_CLKSRC_I8253
 /*
  * Since the PIT overflows every tick, its not very useful
@@ -109,8 +116,11 @@ static int pit_shutdown(struct clock_event_device *evt)
        raw_spin_lock(&i8253_lock);
 
        outb_p(0x30, PIT_MODE);
-       outb_p(0, PIT_CH0);
-       outb_p(0, PIT_CH0);
+
+       if (i8253_clear_counter_on_shutdown) {
+               outb_p(0, PIT_CH0);
+               outb_p(0, PIT_CH0);
+       }
 
        raw_spin_unlock(&i8253_lock);
        return 0;
diff --git a/drivers/clocksource/timer-gx6605s.c b/drivers/clocksource/timer-gx6605s.c
new file mode 100644 (file)
index 0000000..80d0939
--- /dev/null
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/sched_clock.h>
+
+#include "timer-of.h"
+
+#define CLKSRC_OFFSET  0x40
+
+#define TIMER_STATUS   0x00
+#define TIMER_VALUE    0x04
+#define TIMER_CONTRL   0x10
+#define TIMER_CONFIG   0x20
+#define TIMER_DIV      0x24
+#define TIMER_INI      0x28
+
+#define GX6605S_STATUS_CLR     BIT(0)
+#define GX6605S_CONTRL_RST     BIT(0)
+#define GX6605S_CONTRL_START   BIT(1)
+#define GX6605S_CONFIG_EN      BIT(0)
+#define GX6605S_CONFIG_IRQ_EN  BIT(1)
+
+static irqreturn_t gx6605s_timer_interrupt(int irq, void *dev)
+{
+       struct clock_event_device *ce = dev;
+       void __iomem *base = timer_of_base(to_timer_of(ce));
+
+       writel_relaxed(GX6605S_STATUS_CLR, base + TIMER_STATUS);
+
+       ce->event_handler(ce);
+
+       return IRQ_HANDLED;
+}
+
+static int gx6605s_timer_set_oneshot(struct clock_event_device *ce)
+{
+       void __iomem *base = timer_of_base(to_timer_of(ce));
+
+       /* reset and stop counter */
+       writel_relaxed(GX6605S_CONTRL_RST, base + TIMER_CONTRL);
+
+       /* enable with irq and start */
+       writel_relaxed(GX6605S_CONFIG_EN | GX6605S_CONFIG_IRQ_EN,
+                      base + TIMER_CONFIG);
+
+       return 0;
+}
+
+static int gx6605s_timer_set_next_event(unsigned long delta,
+                                       struct clock_event_device *ce)
+{
+       void __iomem *base = timer_of_base(to_timer_of(ce));
+
+       /* use reset to pause timer */
+       writel_relaxed(GX6605S_CONTRL_RST, base + TIMER_CONTRL);
+
+       /* config next timeout value */
+       writel_relaxed(ULONG_MAX - delta, base + TIMER_INI);
+       writel_relaxed(GX6605S_CONTRL_START, base + TIMER_CONTRL);
+
+       return 0;
+}
+
+static int gx6605s_timer_shutdown(struct clock_event_device *ce)
+{
+       void __iomem *base = timer_of_base(to_timer_of(ce));
+
+       writel_relaxed(0, base + TIMER_CONTRL);
+       writel_relaxed(0, base + TIMER_CONFIG);
+
+       return 0;
+}
+
+static struct timer_of to = {
+       .flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
+       .clkevt = {
+               .rating                 = 300,
+               .features               = CLOCK_EVT_FEAT_DYNIRQ |
+                                         CLOCK_EVT_FEAT_ONESHOT,
+               .set_state_shutdown     = gx6605s_timer_shutdown,
+               .set_state_oneshot      = gx6605s_timer_set_oneshot,
+               .set_next_event         = gx6605s_timer_set_next_event,
+               .cpumask                = cpu_possible_mask,
+       },
+       .of_irq = {
+               .handler                = gx6605s_timer_interrupt,
+               .flags                  = IRQF_TIMER | IRQF_IRQPOLL,
+       },
+};
+
+static u64 notrace gx6605s_sched_clock_read(void)
+{
+       void __iomem *base;
+
+       base = timer_of_base(&to) + CLKSRC_OFFSET;
+
+       return (u64)readl_relaxed(base + TIMER_VALUE);
+}
+
+static void gx6605s_clkevt_init(void __iomem *base)
+{
+       writel_relaxed(0, base + TIMER_DIV);
+       writel_relaxed(0, base + TIMER_CONFIG);
+
+       clockevents_config_and_register(&to.clkevt, timer_of_rate(&to), 2,
+                                       ULONG_MAX);
+}
+
+static int gx6605s_clksrc_init(void __iomem *base)
+{
+       writel_relaxed(0, base + TIMER_DIV);
+       writel_relaxed(0, base + TIMER_INI);
+
+       writel_relaxed(GX6605S_CONTRL_RST, base + TIMER_CONTRL);
+
+       writel_relaxed(GX6605S_CONFIG_EN, base + TIMER_CONFIG);
+
+       writel_relaxed(GX6605S_CONTRL_START, base + TIMER_CONTRL);
+
+       sched_clock_register(gx6605s_sched_clock_read, 32, timer_of_rate(&to));
+
+       return clocksource_mmio_init(base + TIMER_VALUE, "gx6605s",
+                       timer_of_rate(&to), 200, 32, clocksource_mmio_readl_up);
+}
+
+static int __init gx6605s_timer_init(struct device_node *np)
+{
+       int ret;
+
+       /*
+        * The timer driver is for nationalchip gx6605s SOC and there are two
+        * same timer in gx6605s. We use one for clkevt and another for clksrc.
+        *
+        * The timer is mmio map to access, so we need give mmio address in dts.
+        *
+        * It provides a 32bit countup timer and interrupt will be caused by
+        * count-overflow.
+        * So we need set-next-event by ULONG_MAX - delta in TIMER_INI reg.
+        *
+        * The counter at 0x0  offset is clock event.
+        * The counter at 0x40 offset is clock source.
+        * They are the same in hardware, just different used by driver.
+        */
+       ret = timer_of_init(np, &to);
+       if (ret)
+               return ret;
+
+       gx6605s_clkevt_init(timer_of_base(&to));
+
+       return gx6605s_clksrc_init(timer_of_base(&to) + CLKSRC_OFFSET);
+}
+TIMER_OF_DECLARE(csky_gx6605s_timer, "csky,gx6605s-timer", gx6605s_timer_init);
diff --git a/drivers/clocksource/timer-mp-csky.c b/drivers/clocksource/timer-mp-csky.c
new file mode 100644 (file)
index 0000000..a8acc43
--- /dev/null
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/sched_clock.h>
+#include <linux/cpu.h>
+#include <linux/of_irq.h>
+#include <asm/reg_ops.h>
+
+#include "timer-of.h"
+
+#define PTIM_CCVR      "cr<3, 14>"
+#define PTIM_CTLR      "cr<0, 14>"
+#define PTIM_LVR       "cr<6, 14>"
+#define PTIM_TSR       "cr<1, 14>"
+
+static int csky_mptimer_irq;
+
+static int csky_mptimer_set_next_event(unsigned long delta,
+                                      struct clock_event_device *ce)
+{
+       mtcr(PTIM_LVR, delta);
+
+       return 0;
+}
+
+static int csky_mptimer_shutdown(struct clock_event_device *ce)
+{
+       mtcr(PTIM_CTLR, 0);
+
+       return 0;
+}
+
+static int csky_mptimer_oneshot(struct clock_event_device *ce)
+{
+       mtcr(PTIM_CTLR, 1);
+
+       return 0;
+}
+
+static int csky_mptimer_oneshot_stopped(struct clock_event_device *ce)
+{
+       mtcr(PTIM_CTLR, 0);
+
+       return 0;
+}
+
+static DEFINE_PER_CPU(struct timer_of, csky_to) = {
+       .flags                                  = TIMER_OF_CLOCK,
+       .clkevt = {
+               .rating                         = 300,
+               .features                       = CLOCK_EVT_FEAT_PERCPU |
+                                                 CLOCK_EVT_FEAT_ONESHOT,
+               .set_state_shutdown             = csky_mptimer_shutdown,
+               .set_state_oneshot              = csky_mptimer_oneshot,
+               .set_state_oneshot_stopped      = csky_mptimer_oneshot_stopped,
+               .set_next_event                 = csky_mptimer_set_next_event,
+       },
+};
+
+static irqreturn_t csky_timer_interrupt(int irq, void *dev)
+{
+       struct timer_of *to = this_cpu_ptr(&csky_to);
+
+       mtcr(PTIM_TSR, 0);
+
+       to->clkevt.event_handler(&to->clkevt);
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * clock event for percpu
+ */
+static int csky_mptimer_starting_cpu(unsigned int cpu)
+{
+       struct timer_of *to = per_cpu_ptr(&csky_to, cpu);
+
+       to->clkevt.cpumask = cpumask_of(cpu);
+
+       clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
+                                       2, ULONG_MAX);
+
+       enable_percpu_irq(csky_mptimer_irq, 0);
+
+       return 0;
+}
+
+static int csky_mptimer_dying_cpu(unsigned int cpu)
+{
+       disable_percpu_irq(csky_mptimer_irq);
+
+       return 0;
+}
+
+/*
+ * clock source
+ */
+static u64 sched_clock_read(void)
+{
+       return (u64)mfcr(PTIM_CCVR);
+}
+
+static u64 clksrc_read(struct clocksource *c)
+{
+       return (u64)mfcr(PTIM_CCVR);
+}
+
+struct clocksource csky_clocksource = {
+       .name   = "csky",
+       .rating = 400,
+       .mask   = CLOCKSOURCE_MASK(32),
+       .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
+       .read   = clksrc_read,
+};
+
+static int __init csky_mptimer_init(struct device_node *np)
+{
+       int ret, cpu, cpu_rollback;
+       struct timer_of *to = NULL;
+
+       /*
+        * Csky_mptimer is designed for C-SKY SMP multi-processors and
+        * every core has it's own private irq and regs for clkevt and
+        * clksrc.
+        *
+        * The regs is accessed by cpu instruction: mfcr/mtcr instead of
+        * mmio map style. So we needn't mmio-address in dts, but we still
+        * need to give clk and irq number.
+        *
+        * We use private irq for the mptimer and irq number is the same
+        * for every core. So we use request_percpu_irq() in timer_of_init.
+        */
+       csky_mptimer_irq = irq_of_parse_and_map(np, 0);
+       if (csky_mptimer_irq <= 0)
+               return -EINVAL;
+
+       ret = request_percpu_irq(csky_mptimer_irq, csky_timer_interrupt,
+                                "csky_mp_timer", &csky_to);
+       if (ret)
+               return -EINVAL;
+
+       for_each_possible_cpu(cpu) {
+               to = per_cpu_ptr(&csky_to, cpu);
+               ret = timer_of_init(np, to);
+               if (ret)
+                       goto rollback;
+       }
+
+       clocksource_register_hz(&csky_clocksource, timer_of_rate(to));
+       sched_clock_register(sched_clock_read, 32, timer_of_rate(to));
+
+       ret = cpuhp_setup_state(CPUHP_AP_CSKY_TIMER_STARTING,
+                               "clockevents/csky/timer:starting",
+                               csky_mptimer_starting_cpu,
+                               csky_mptimer_dying_cpu);
+       if (ret)
+               return -EINVAL;
+
+       return 0;
+
+rollback:
+       for_each_possible_cpu(cpu_rollback) {
+               if (cpu_rollback == cpu)
+                       break;
+
+               to = per_cpu_ptr(&csky_to, cpu_rollback);
+               timer_of_cleanup(to);
+       }
+       return -EINVAL;
+}
+TIMER_OF_DECLARE(csky_mptimer, "csky,mptimer", csky_mptimer_init);
index 8cfee0ab804b43e2dc90e9f55b241a7aa17de363..d8c3595e90236e5f9d87ca9b5f55a7cbdb76ccdc 100644 (file)
@@ -160,8 +160,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
        /* Ensure the arm clock divider is what we expect */
        ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
        if (ret) {
+               int ret1;
+
                dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
-               regulator_set_voltage_tol(arm_reg, volt_old, 0);
+               ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0);
+               if (ret1)
+                       dev_warn(cpu_dev,
+                                "failed to restore vddarm voltage: %d\n", ret1);
                return ret;
        }
 
index 3f0e2a14895a03dc0c4bc1d01c032d0f50a4b4ae..22b53bf268179ad8c2e873dd9637af61adf2f54d 100644 (file)
@@ -201,19 +201,28 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
        {},
 };
 
+static const struct of_device_id *ti_cpufreq_match_node(void)
+{
+       struct device_node *np;
+       const struct of_device_id *match;
+
+       np = of_find_node_by_path("/");
+       match = of_match_node(ti_cpufreq_of_match, np);
+       of_node_put(np);
+
+       return match;
+}
+
 static int ti_cpufreq_probe(struct platform_device *pdev)
 {
        u32 version[VERSION_COUNT];
-       struct device_node *np;
        const struct of_device_id *match;
        struct opp_table *ti_opp_table;
        struct ti_cpufreq_data *opp_data;
        const char * const reg_names[] = {"vdd", "vbb"};
        int ret;
 
-       np = of_find_node_by_path("/");
-       match = of_match_node(ti_cpufreq_of_match, np);
-       of_node_put(np);
+       match = dev_get_platdata(&pdev->dev);
        if (!match)
                return -ENODEV;
 
@@ -290,7 +299,14 @@ fail_put_node:
 
 static int ti_cpufreq_init(void)
 {
-       platform_device_register_simple("ti-cpufreq", -1, NULL, 0);
+       const struct of_device_id *match;
+
+       /* Check to ensure we are on a compatible platform */
+       match = ti_cpufreq_match_node();
+       if (match)
+               platform_device_register_data(NULL, "ti-cpufreq", -1, match,
+                                             sizeof(*match));
+
        return 0;
 }
 module_init(ti_cpufreq_init);
index 073557f433eb1be630a7f64b8cfc9930771eff2a..3a407a3ef22b4c5a53046452c75c3784e0b77d9d 100644 (file)
@@ -82,7 +82,6 @@ static int __init arm_idle_init_cpu(int cpu)
 {
        int ret;
        struct cpuidle_driver *drv;
-       struct cpuidle_device *dev;
 
        drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
        if (!drv)
@@ -103,13 +102,6 @@ static int __init arm_idle_init_cpu(int cpu)
                goto out_kfree_drv;
        }
 
-       ret = cpuidle_register_driver(drv);
-       if (ret) {
-               if (ret != -EBUSY)
-                       pr_err("Failed to register cpuidle driver\n");
-               goto out_kfree_drv;
-       }
-
        /*
         * Call arch CPU operations in order to initialize
         * idle states suspend back-end specific data
@@ -117,37 +109,21 @@ static int __init arm_idle_init_cpu(int cpu)
        ret = arm_cpuidle_init(cpu);
 
        /*
-        * Skip the cpuidle device initialization if the reported
+        * Allow the initialization to continue for other CPUs, if the reported
         * failure is a HW misconfiguration/breakage (-ENXIO).
         */
-       if (ret == -ENXIO)
-               return 0;
-
        if (ret) {
                pr_err("CPU %d failed to init idle CPU ops\n", cpu);
-               goto out_unregister_drv;
-       }
-
-       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-       if (!dev) {
-               ret = -ENOMEM;
-               goto out_unregister_drv;
+               ret = ret == -ENXIO ? 0 : ret;
+               goto out_kfree_drv;
        }
-       dev->cpu = cpu;
 
-       ret = cpuidle_register_device(dev);
-       if (ret) {
-               pr_err("Failed to register cpuidle device for CPU %d\n",
-                      cpu);
-               goto out_kfree_dev;
-       }
+       ret = cpuidle_register(drv, NULL);
+       if (ret)
+               goto out_kfree_drv;
 
        return 0;
 
-out_kfree_dev:
-       kfree(dev);
-out_unregister_drv:
-       cpuidle_unregister_driver(drv);
 out_kfree_drv:
        kfree(drv);
        return ret;
@@ -178,9 +154,7 @@ out_fail:
        while (--cpu >= 0) {
                dev = per_cpu(cpuidle_devices, cpu);
                drv = cpuidle_get_cpu_driver(dev);
-               cpuidle_unregister_device(dev);
-               cpuidle_unregister_driver(drv);
-               kfree(dev);
+               cpuidle_unregister(drv);
                kfree(drv);
        }
 
index f7d6d690116ee8f32bada36c6b25520976c219c5..cdc4f9a171d986625352319d76ccf243e417410a 100644 (file)
@@ -732,6 +732,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
        int *splits_in_nents;
        int *splits_out_nents = NULL;
        struct sec_request_el *el, *temp;
+       bool split = skreq->src != skreq->dst;
 
        mutex_init(&sec_req->lock);
        sec_req->req_base = &skreq->base;
@@ -750,7 +751,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
        if (ret)
                goto err_free_split_sizes;
 
-       if (skreq->src != skreq->dst) {
+       if (split) {
                sec_req->len_out = sg_nents(skreq->dst);
                ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
                                           &splits_out, &splits_out_nents,
@@ -785,8 +786,9 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
                                               split_sizes[i],
                                               skreq->src != skreq->dst,
                                               splits_in[i], splits_in_nents[i],
-                                              splits_out[i],
-                                              splits_out_nents[i], info);
+                                              split ? splits_out[i] : NULL,
+                                              split ? splits_out_nents[i] : 0,
+                                              info);
                if (IS_ERR(el)) {
                        ret = PTR_ERR(el);
                        goto err_free_elements;
@@ -806,13 +808,6 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
         * more refined but this is unlikely to happen so no need.
         */
 
-       /* Cleanup - all elements in pointer arrays have been coppied */
-       kfree(splits_in_nents);
-       kfree(splits_in);
-       kfree(splits_out_nents);
-       kfree(splits_out);
-       kfree(split_sizes);
-
        /* Grab a big lock for a long time to avoid concurrency issues */
        mutex_lock(&queue->queuelock);
 
@@ -827,13 +822,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
             (!queue->havesoftqueue ||
              kfifo_avail(&queue->softqueue) > steps)) ||
            !list_empty(&ctx->backlog)) {
+               ret = -EBUSY;
                if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
                        list_add_tail(&sec_req->backlog_head, &ctx->backlog);
                        mutex_unlock(&queue->queuelock);
-                       return -EBUSY;
+                       goto out;
                }
 
-               ret = -EBUSY;
                mutex_unlock(&queue->queuelock);
                goto err_free_elements;
        }
@@ -842,7 +837,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
        if (ret)
                goto err_free_elements;
 
-       return -EINPROGRESS;
+       ret = -EINPROGRESS;
+out:
+       /* Cleanup - all elements in pointer arrays have been copied */
+       kfree(splits_in_nents);
+       kfree(splits_in);
+       kfree(splits_out_nents);
+       kfree(splits_out);
+       kfree(split_sizes);
+       return ret;
 
 err_free_elements:
        list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
@@ -854,7 +857,7 @@ err_free_elements:
                                 crypto_skcipher_ivsize(atfm),
                                 DMA_BIDIRECTIONAL);
 err_unmap_out_sg:
-       if (skreq->src != skreq->dst)
+       if (split)
                sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
                                    splits_out_nents, sec_req->len_out,
                                    info->dev);
index 5b44ef226904f9be2530b992c3d06ed338dca53c..fc359ca4503d127fda5f9f631f95d98c4a1d671a 100644 (file)
@@ -184,6 +184,7 @@ static long udmabuf_create(const struct udmabuf_create_list *head,
        exp_info.ops  = &udmabuf_ops;
        exp_info.size = ubuf->pagecount << PAGE_SHIFT;
        exp_info.priv = ubuf;
+       exp_info.flags = O_RDWR;
 
        buf = dma_buf_export(&exp_info);
        if (IS_ERR(buf)) {
index 7cbac6e8c113fefd3eba8b9a568e34047097c478..01d936c9fe899cc0a1d9eeed60eefc94294ec2f5 100644 (file)
@@ -1641,6 +1641,12 @@ static void atc_free_chan_resources(struct dma_chan *chan)
        atchan->descs_allocated = 0;
        atchan->status = 0;
 
+       /*
+        * Free atslave allocated in at_dma_xlate()
+        */
+       kfree(chan->private);
+       chan->private = NULL;
+
        dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
 }
 
@@ -1675,7 +1681,7 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
        dma_cap_zero(mask);
        dma_cap_set(DMA_SLAVE, mask);
 
-       atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
+       atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
        if (!atslave)
                return NULL;
 
@@ -2000,6 +2006,8 @@ static int at_dma_remove(struct platform_device *pdev)
        struct resource         *io;
 
        at_dma_off(atdma);
+       if (pdev->dev.of_node)
+               of_dma_controller_free(pdev->dev.of_node);
        dma_async_device_unregister(&atdma->dma_common);
 
        dma_pool_destroy(atdma->memset_pool);
index d0c3e50b39fbd8ab5f15583113ab0b090af2da70..1fc488e90f363ae76c7901e2070fa3c1b77e8ce2 100644 (file)
@@ -1059,12 +1059,12 @@ static void dwc_issue_pending(struct dma_chan *chan)
 /*
  * Program FIFO size of channels.
  *
- * By default full FIFO (1024 bytes) is assigned to channel 0. Here we
+ * By default full FIFO (512 bytes) is assigned to channel 0. Here we
  * slice FIFO on equal parts between channels.
  */
 static void idma32_fifo_partition(struct dw_dma *dw)
 {
-       u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) |
+       u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) |
                    IDMA32C_FP_UPDATE;
        u64 fifo_partition = 0;
 
@@ -1077,7 +1077,7 @@ static void idma32_fifo_partition(struct dw_dma *dw)
        /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */
        fifo_partition |= value << 32;
 
-       /* Program FIFO Partition registers - 128 bytes for each channel */
+       /* Program FIFO Partition registers - 64 bytes per channel */
        idma32_writeq(dw, FIFO_PARTITION1, fifo_partition);
        idma32_writeq(dw, FIFO_PARTITION0, fifo_partition);
 }
index b4ec2d20e66167786939ae867de0d93378ae4054..cb1b44d78a1f23ea19ad6a53982e997f7d9f25f9 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/spinlock.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
 #include <linux/firmware.h>
 #include <linux/slab.h>
 #include <linux/platform_device.h>
@@ -33,6 +32,7 @@
 #include <linux/of_address.h>
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
+#include <linux/workqueue.h>
 
 #include <asm/irq.h>
 #include <linux/platform_data/dma-imx-sdma.h>
@@ -376,7 +376,7 @@ struct sdma_channel {
        u32                             shp_addr, per_addr;
        enum dma_status                 status;
        struct imx_dma_data             data;
-       struct dma_pool                 *bd_pool;
+       struct work_struct              terminate_worker;
 };
 
 #define IMX_DMA_SG_LOOP                BIT(0)
@@ -1027,31 +1027,49 @@ static int sdma_disable_channel(struct dma_chan *chan)
 
        return 0;
 }
-
-static int sdma_disable_channel_with_delay(struct dma_chan *chan)
+static void sdma_channel_terminate_work(struct work_struct *work)
 {
-       struct sdma_channel *sdmac = to_sdma_chan(chan);
+       struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
+                                                 terminate_worker);
        unsigned long flags;
        LIST_HEAD(head);
 
-       sdma_disable_channel(chan);
-       spin_lock_irqsave(&sdmac->vc.lock, flags);
-       vchan_get_all_descriptors(&sdmac->vc, &head);
-       sdmac->desc = NULL;
-       spin_unlock_irqrestore(&sdmac->vc.lock, flags);
-       vchan_dma_desc_free_list(&sdmac->vc, &head);
-
        /*
         * According to NXP R&D team a delay of one BD SDMA cost time
         * (maximum is 1ms) should be added after disable of the channel
         * bit, to ensure SDMA core has really been stopped after SDMA
         * clients call .device_terminate_all.
         */
-       mdelay(1);
+       usleep_range(1000, 2000);
+
+       spin_lock_irqsave(&sdmac->vc.lock, flags);
+       vchan_get_all_descriptors(&sdmac->vc, &head);
+       sdmac->desc = NULL;
+       spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+       vchan_dma_desc_free_list(&sdmac->vc, &head);
+}
+
+static int sdma_disable_channel_async(struct dma_chan *chan)
+{
+       struct sdma_channel *sdmac = to_sdma_chan(chan);
+
+       sdma_disable_channel(chan);
+
+       if (sdmac->desc)
+               schedule_work(&sdmac->terminate_worker);
 
        return 0;
 }
 
+static void sdma_channel_synchronize(struct dma_chan *chan)
+{
+       struct sdma_channel *sdmac = to_sdma_chan(chan);
+
+       vchan_synchronize(&sdmac->vc);
+
+       flush_work(&sdmac->terminate_worker);
+}
+
 static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
 {
        struct sdma_engine *sdma = sdmac->sdma;
@@ -1192,10 +1210,11 @@ out:
 
 static int sdma_alloc_bd(struct sdma_desc *desc)
 {
+       u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
        int ret = 0;
 
-       desc->bd = dma_pool_alloc(desc->sdmac->bd_pool, GFP_NOWAIT,
-                                 &desc->bd_phys);
+       desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
+                                       GFP_NOWAIT);
        if (!desc->bd) {
                ret = -ENOMEM;
                goto out;
@@ -1206,7 +1225,9 @@ out:
 
 static void sdma_free_bd(struct sdma_desc *desc)
 {
-       dma_pool_free(desc->sdmac->bd_pool, desc->bd, desc->bd_phys);
+       u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
+
+       dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
 }
 
 static void sdma_desc_free(struct virt_dma_desc *vd)
@@ -1272,10 +1293,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
        if (ret)
                goto disable_clk_ahb;
 
-       sdmac->bd_pool = dma_pool_create("bd_pool", chan->device->dev,
-                               sizeof(struct sdma_buffer_descriptor),
-                               32, 0);
-
        return 0;
 
 disable_clk_ahb:
@@ -1290,7 +1307,9 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
 
-       sdma_disable_channel_with_delay(chan);
+       sdma_disable_channel_async(chan);
+
+       sdma_channel_synchronize(chan);
 
        if (sdmac->event_id0)
                sdma_event_disable(sdmac, sdmac->event_id0);
@@ -1304,9 +1323,6 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
 
        clk_disable(sdma->clk_ipg);
        clk_disable(sdma->clk_ahb);
-
-       dma_pool_destroy(sdmac->bd_pool);
-       sdmac->bd_pool = NULL;
 }
 
 static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
@@ -1999,6 +2015,8 @@ static int sdma_probe(struct platform_device *pdev)
 
                sdmac->channel = i;
                sdmac->vc.desc_free = sdma_desc_free;
+               INIT_WORK(&sdmac->terminate_worker,
+                               sdma_channel_terminate_work);
                /*
                 * Add the channel to the DMAC list. Do not add channel 0 though
                 * because we need it internally in the SDMA driver. This also means
@@ -2050,7 +2068,8 @@ static int sdma_probe(struct platform_device *pdev)
        sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
        sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
        sdma->dma_device.device_config = sdma_config;
-       sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
+       sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
+       sdma->dma_device.device_synchronize = sdma_channel_synchronize;
        sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
        sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
        sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
index 1497da3677109c78949cb91adf0907a30eb9b32f..e507ec36c0d3dfa107ccba439551390b9ace1add 100644 (file)
@@ -723,8 +723,22 @@ static int cppi41_stop_chan(struct dma_chan *chan)
 
        desc_phys = lower_32_bits(c->desc_phys);
        desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
-       if (!cdd->chan_busy[desc_num])
+       if (!cdd->chan_busy[desc_num]) {
+               struct cppi41_channel *cc, *_ct;
+
+               /*
+                * channels might still be in the pendling list if
+                * cppi41_dma_issue_pending() is called after
+                * cppi41_runtime_suspend() is called
+                */
+               list_for_each_entry_safe(cc, _ct, &cdd->pending, node) {
+                       if (cc != c)
+                               continue;
+                       list_del(&cc->node);
+                       break;
+               }
                return 0;
+       }
 
        ret = cppi41_tear_down_chan(c);
        if (ret)
index df9467eef32a0e4b67090e56e1a5c0260a0f4037..41c9ccdd20d65658f461991ab4e8bc74d0e6fa4a 100644 (file)
@@ -234,6 +234,7 @@ config EDAC_SKX
        depends on PCI && X86_64 && X86_MCE_INTEL && PCI_MMCONFIG
        depends on ACPI_NFIT || !ACPI_NFIT # if ACPI_NFIT=m, EDAC_SKX can't be y
        select DMI
+       select ACPI_ADXL if ACPI
        help
          Support for error detection and correction the Intel
          Skylake server Integrated Memory Controllers. If your
index dd209e0dd9abb2ca72c0c2b45a5548088852d5c9..a99ea61dad321dddad4ab28bea15ce593ae0c24f 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/bitmap.h>
 #include <linux/math64.h>
 #include <linux/mod_devicetable.h>
+#include <linux/adxl.h>
 #include <acpi/nfit.h>
 #include <asm/cpu_device_id.h>
 #include <asm/intel-family.h>
@@ -35,6 +36,7 @@
 #include "edac_module.h"
 
 #define EDAC_MOD_STR    "skx_edac"
+#define MSG_SIZE       1024
 
 /*
  * Debug macros
 static LIST_HEAD(skx_edac_list);
 
 static u64 skx_tolm, skx_tohm;
+static char *skx_msg;
+static unsigned int nvdimm_count;
+
+enum {
+       INDEX_SOCKET,
+       INDEX_MEMCTRL,
+       INDEX_CHANNEL,
+       INDEX_DIMM,
+       INDEX_MAX
+};
+
+static const char * const component_names[] = {
+       [INDEX_SOCKET]  = "ProcessorSocketId",
+       [INDEX_MEMCTRL] = "MemoryControllerId",
+       [INDEX_CHANNEL] = "ChannelId",
+       [INDEX_DIMM]    = "DimmSlotId",
+};
+
+static int component_indices[ARRAY_SIZE(component_names)];
+static int adxl_component_count;
+static const char * const *adxl_component_names;
+static u64 *adxl_values;
+static char *adxl_msg;
 
 #define NUM_IMC                        2       /* memory controllers per socket */
 #define NUM_CHANNELS           3       /* channels per memory controller */
@@ -393,6 +418,8 @@ static int get_nvdimm_info(struct dimm_info *dimm, struct skx_imc *imc,
        u16 flags;
        u64 size = 0;
 
+       nvdimm_count++;
+
        dev_handle = ACPI_NFIT_BUILD_DEVICE_HANDLE(dimmno, chan, imc->lmc,
                                                   imc->src_id, 0);
 
@@ -941,12 +968,46 @@ static void teardown_skx_debug(void)
 }
 #endif /*CONFIG_EDAC_DEBUG*/
 
+static bool skx_adxl_decode(struct decoded_addr *res)
+
+{
+       int i, len = 0;
+
+       if (res->addr >= skx_tohm || (res->addr >= skx_tolm &&
+                                     res->addr < BIT_ULL(32))) {
+               edac_dbg(0, "Address 0x%llx out of range\n", res->addr);
+               return false;
+       }
+
+       if (adxl_decode(res->addr, adxl_values)) {
+               edac_dbg(0, "Failed to decode 0x%llx\n", res->addr);
+               return false;
+       }
+
+       res->socket  = (int)adxl_values[component_indices[INDEX_SOCKET]];
+       res->imc     = (int)adxl_values[component_indices[INDEX_MEMCTRL]];
+       res->channel = (int)adxl_values[component_indices[INDEX_CHANNEL]];
+       res->dimm    = (int)adxl_values[component_indices[INDEX_DIMM]];
+
+       for (i = 0; i < adxl_component_count; i++) {
+               if (adxl_values[i] == ~0x0ull)
+                       continue;
+
+               len += snprintf(adxl_msg + len, MSG_SIZE - len, " %s:0x%llx",
+                               adxl_component_names[i], adxl_values[i]);
+               if (MSG_SIZE - len <= 0)
+                       break;
+       }
+
+       return true;
+}
+
 static void skx_mce_output_error(struct mem_ctl_info *mci,
                                 const struct mce *m,
                                 struct decoded_addr *res)
 {
        enum hw_event_mc_err_type tp_event;
-       char *type, *optype, msg[256];
+       char *type, *optype;
        bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
        bool overflow = GET_BITFIELD(m->status, 62, 62);
        bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
@@ -1007,22 +1068,47 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
                        break;
                }
        }
+       if (adxl_component_count) {
+               snprintf(skx_msg, MSG_SIZE, "%s%s err_code:%04x:%04x %s",
+                        overflow ? " OVERFLOW" : "",
+                        (uncorrected_error && recoverable) ? " recoverable" : "",
+                        mscod, errcode, adxl_msg);
+       } else {
+               snprintf(skx_msg, MSG_SIZE,
+                        "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x",
+                        overflow ? " OVERFLOW" : "",
+                        (uncorrected_error && recoverable) ? " recoverable" : "",
+                        mscod, errcode,
+                        res->socket, res->imc, res->rank,
+                        res->bank_group, res->bank_address, res->row, res->column);
+       }
 
-       snprintf(msg, sizeof(msg),
-                "%s%s err_code:%04x:%04x socket:%d imc:%d rank:%d bg:%d ba:%d row:%x col:%x",
-                overflow ? " OVERFLOW" : "",
-                (uncorrected_error && recoverable) ? " recoverable" : "",
-                mscod, errcode,
-                res->socket, res->imc, res->rank,
-                res->bank_group, res->bank_address, res->row, res->column);
-
-       edac_dbg(0, "%s\n", msg);
+       edac_dbg(0, "%s\n", skx_msg);
 
        /* Call the helper to output message */
        edac_mc_handle_error(tp_event, mci, core_err_cnt,
                             m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
                             res->channel, res->dimm, -1,
-                            optype, msg);
+                            optype, skx_msg);
+}
+
+static struct mem_ctl_info *get_mci(int src_id, int lmc)
+{
+       struct skx_dev *d;
+
+       if (lmc > NUM_IMC - 1) {
+               skx_printk(KERN_ERR, "Bad lmc %d\n", lmc);
+               return NULL;
+       }
+
+       list_for_each_entry(d, &skx_edac_list, list) {
+               if (d->imc[0].src_id == src_id)
+                       return d->imc[lmc].mci;
+       }
+
+       skx_printk(KERN_ERR, "No mci for src_id %d lmc %d\n", src_id, lmc);
+
+       return NULL;
 }
 
 static int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
@@ -1040,10 +1126,23 @@ static int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
        if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV))
                return NOTIFY_DONE;
 
+       memset(&res, 0, sizeof(res));
        res.addr = mce->addr;
-       if (!skx_decode(&res))
+
+       if (adxl_component_count) {
+               if (!skx_adxl_decode(&res))
+                       return NOTIFY_DONE;
+
+               mci = get_mci(res.socket, res.imc);
+       } else {
+               if (!skx_decode(&res))
+                       return NOTIFY_DONE;
+
+               mci = res.dev->imc[res.imc].mci;
+       }
+
+       if (!mci)
                return NOTIFY_DONE;
-       mci = res.dev->imc[res.imc].mci;
 
        if (mce->mcgstatus & MCG_STATUS_MCIP)
                type = "Exception";
@@ -1094,6 +1193,62 @@ static void skx_remove(void)
        }
 }
 
+static void __init skx_adxl_get(void)
+{
+       const char * const *names;
+       int i, j;
+
+       names = adxl_get_component_names();
+       if (!names) {
+               skx_printk(KERN_NOTICE, "No firmware support for address translation.");
+               skx_printk(KERN_CONT, " Only decoding DDR4 address!\n");
+               return;
+       }
+
+       for (i = 0; i < INDEX_MAX; i++) {
+               for (j = 0; names[j]; j++) {
+                       if (!strcmp(component_names[i], names[j])) {
+                               component_indices[i] = j;
+                               break;
+                       }
+               }
+
+               if (!names[j])
+                       goto err;
+       }
+
+       adxl_component_names = names;
+       while (*names++)
+               adxl_component_count++;
+
+       adxl_values = kcalloc(adxl_component_count, sizeof(*adxl_values),
+                             GFP_KERNEL);
+       if (!adxl_values) {
+               adxl_component_count = 0;
+               return;
+       }
+
+       adxl_msg = kzalloc(MSG_SIZE, GFP_KERNEL);
+       if (!adxl_msg) {
+               adxl_component_count = 0;
+               kfree(adxl_values);
+       }
+
+       return;
+err:
+       skx_printk(KERN_ERR, "'%s' is not matched from DSM parameters: ",
+                  component_names[i]);
+       for (j = 0; names[j]; j++)
+               skx_printk(KERN_CONT, "%s ", names[j]);
+       skx_printk(KERN_CONT, "\n");
+}
+
+static void __exit skx_adxl_put(void)
+{
+       kfree(adxl_values);
+       kfree(adxl_msg);
+}
+
 /*
  * skx_init:
  *     make sure we are running on the correct cpu model
@@ -1158,6 +1313,15 @@ static int __init skx_init(void)
                }
        }
 
+       skx_msg = kzalloc(MSG_SIZE, GFP_KERNEL);
+       if (!skx_msg) {
+               rc = -ENOMEM;
+               goto fail;
+       }
+
+       if (nvdimm_count)
+               skx_adxl_get();
+
        /* Ensure that the OPSTATE is set correctly for POLL or NMI */
        opstate_init();
 
@@ -1176,6 +1340,9 @@ static void __exit skx_exit(void)
        edac_dbg(2, "\n");
        mce_unregister_decode_chain(&skx_mce_dec);
        skx_remove();
+       if (nvdimm_count)
+               skx_adxl_put();
+       kfree(skx_msg);
        teardown_skx_debug();
 }
 
index 388a929baf95d1e1107ab4b0ab20516dea3cfdf7..1a6a77df8a5e8aea45f3cbc2bac9c5d0883b0edb 100644 (file)
@@ -265,6 +265,10 @@ void __init efi_init(void)
                                    (params.mmap & ~PAGE_MASK)));
 
        init_screen_info();
+
+       /* ARM does not permit early mappings to persist across paging_init() */
+       if (IS_ENABLED(CONFIG_ARM))
+               efi_memmap_unmap();
 }
 
 static int __init register_gop_device(void)
index 922cfb813109a3c14a88a0bb054a09534e813ec9..a00934d263c519a9d476a57a5bb2388c6041b810 100644 (file)
@@ -110,7 +110,7 @@ static int __init arm_enable_runtime_services(void)
 {
        u64 mapsize;
 
-       if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) {
+       if (!efi_enabled(EFI_BOOT)) {
                pr_info("EFI services will not be available.\n");
                return 0;
        }
index 249eb70691b0f5e7567cf4fc3bbb8dda9df571cf..415849bab2339a4168d9fff236b8302a02adc284 100644 (file)
@@ -592,7 +592,11 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
 
                early_memunmap(tbl, sizeof(*tbl));
        }
+       return 0;
+}
 
+int __init efi_apply_persistent_mem_reservations(void)
+{
        if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
                unsigned long prsv = efi.mem_reserve;
 
@@ -963,36 +967,59 @@ bool efi_is_table_address(unsigned long phys_addr)
 }
 
 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
+static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
 
-int efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+static int __init efi_memreserve_map_root(void)
 {
-       struct linux_efi_memreserve *rsv, *parent;
-
        if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
                return -ENODEV;
 
-       rsv = kmalloc(sizeof(*rsv), GFP_KERNEL);
-       if (!rsv)
+       efi_memreserve_root = memremap(efi.mem_reserve,
+                                      sizeof(*efi_memreserve_root),
+                                      MEMREMAP_WB);
+       if (WARN_ON_ONCE(!efi_memreserve_root))
                return -ENOMEM;
+       return 0;
+}
 
-       parent = memremap(efi.mem_reserve, sizeof(*rsv), MEMREMAP_WB);
-       if (!parent) {
-               kfree(rsv);
-               return -ENOMEM;
+int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
+{
+       struct linux_efi_memreserve *rsv;
+       int rc;
+
+       if (efi_memreserve_root == (void *)ULONG_MAX)
+               return -ENODEV;
+
+       if (!efi_memreserve_root) {
+               rc = efi_memreserve_map_root();
+               if (rc)
+                       return rc;
        }
 
+       rsv = kmalloc(sizeof(*rsv), GFP_ATOMIC);
+       if (!rsv)
+               return -ENOMEM;
+
        rsv->base = addr;
        rsv->size = size;
 
        spin_lock(&efi_mem_reserve_persistent_lock);
-       rsv->next = parent->next;
-       parent->next = __pa(rsv);
+       rsv->next = efi_memreserve_root->next;
+       efi_memreserve_root->next = __pa(rsv);
        spin_unlock(&efi_mem_reserve_persistent_lock);
 
-       memunmap(parent);
+       return 0;
+}
 
+static int __init efi_memreserve_root_init(void)
+{
+       if (efi_memreserve_root)
+               return 0;
+       if (efi_memreserve_map_root())
+               efi_memreserve_root = (void *)ULONG_MAX;
        return 0;
 }
+early_initcall(efi_memreserve_root_init);
 
 #ifdef CONFIG_KEXEC
 static int update_efi_random_seed(struct notifier_block *nb,
index 3e626fd9bd4e1fafe6e0f4da4597ed66a978bd08..8061667a6765aeb6752a50c134edf60414e7d652 100644 (file)
@@ -229,14 +229,6 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
        return 0;
 }
 
-static inline bool is_compat(void)
-{
-       if (IS_ENABLED(CONFIG_COMPAT) && in_compat_syscall())
-               return true;
-
-       return false;
-}
-
 static void
 copy_out_compat(struct efi_variable *dst, struct compat_efi_variable *src)
 {
@@ -263,7 +255,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
        u8 *data;
        int err;
 
-       if (is_compat()) {
+       if (in_compat_syscall()) {
                struct compat_efi_variable *compat;
 
                if (count != sizeof(*compat))
@@ -324,7 +316,7 @@ efivar_show_raw(struct efivar_entry *entry, char *buf)
                             &entry->var.DataSize, entry->var.Data))
                return -EIO;
 
-       if (is_compat()) {
+       if (in_compat_syscall()) {
                compat = (struct compat_efi_variable *)buf;
 
                size = sizeof(*compat);
@@ -418,7 +410,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
        struct compat_efi_variable *compat = (struct compat_efi_variable *)buf;
        struct efi_variable *new_var = (struct efi_variable *)buf;
        struct efivar_entry *new_entry;
-       bool need_compat = is_compat();
+       bool need_compat = in_compat_syscall();
        efi_char16_t *name;
        unsigned long size;
        u32 attributes;
@@ -495,7 +487,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
 
-       if (is_compat()) {
+       if (in_compat_syscall()) {
                if (count != sizeof(*compat))
                        return -EINVAL;
 
index 30ac0c975f8a1cc6bd544ba90991a4330ea05cd9..3d36142cf81208d408cab75e28edf957fa865c9e 100644 (file)
@@ -75,6 +75,9 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
        efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
        efi_status_t status;
 
+       if (IS_ENABLED(CONFIG_ARM))
+               return;
+
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
                                (void **)&rsv);
        if (status != EFI_SUCCESS) {
index 8830fa601e45d9a1b1094419cd1ec66f41a25e49..0c0d2312f4a8ad27f6e852bc82d5f2b6c0124e64 100644 (file)
@@ -158,6 +158,10 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
                        return efi_status;
                }
        }
+
+       /* shrink the FDT back to its minimum size */
+       fdt_pack(fdt);
+
        return EFI_SUCCESS;
 
 fdt_set_fail:
index fa2904fb841fe459a6562b9fea237c5213fa97be..38b686c67b177da4875b9174c0f50ca165c6b2d4 100644 (file)
@@ -118,6 +118,9 @@ int __init efi_memmap_init_early(struct efi_memory_map_data *data)
 
 void __init efi_memmap_unmap(void)
 {
+       if (!efi_enabled(EFI_MEMMAP))
+               return;
+
        if (!efi.memmap.late) {
                unsigned long size;
 
index a19d845bdb06748907972b652415594e18a095f9..8903b9ccfc2b8da6cdc5341fef8619744d904c88 100644 (file)
@@ -67,7 +67,7 @@ struct efi_runtime_work efi_rts_work;
        }                                                               \
                                                                        \
        init_completion(&efi_rts_work.efi_rts_comp);                    \
-       INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts);            \
+       INIT_WORK(&efi_rts_work.work, efi_call_rts);                    \
        efi_rts_work.arg1 = _arg1;                                      \
        efi_rts_work.arg2 = _arg2;                                      \
        efi_rts_work.arg3 = _arg3;                                      \
index af3a20dd5aa4a504524c0bd36f932ed328903c2c..99c99a5d57fe26d573310f7f30b50e7bf11cecf7 100644 (file)
@@ -46,6 +46,7 @@ config FSI_MASTER_AST_CF
        tristate "FSI master based on Aspeed ColdFire coprocessor"
        depends on GPIOLIB
        depends on GPIO_ASPEED
+       select GENERIC_ALLOCATOR
        ---help---
        This option enables a FSI master using the AST2400 and AST2500 GPIO
        lines driven by the internal ColdFire coprocessor. This requires
index ae861342626e3516527167b18d96a7f5ac522169..d92f5b87c251e1248e20fd44708a4d80f759bc00 100644 (file)
@@ -638,7 +638,7 @@ static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo)
        }
         ffdc_iov.iov_base = ffdc;
        ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE;
-        iov_iter_kvec(&ffdc_iter, WRITE | ITER_KVEC, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
+        iov_iter_kvec(&ffdc_iter, WRITE, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
        cmd[0] = cpu_to_be32(2);
        cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC);
        rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter);
@@ -735,7 +735,7 @@ int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
        rbytes = (*resp_len) * sizeof(__be32);
        resp_iov.iov_base = response;
        resp_iov.iov_len = rbytes;
-        iov_iter_kvec(&resp_iter, WRITE | ITER_KVEC, &resp_iov, 1, rbytes);
+        iov_iter_kvec(&resp_iter, WRITE, &resp_iov, 1, rbytes);
 
        /* Perform the command */
        mutex_lock(&sbefifo->lock);
index df94021dd9d12bc32b18873076151d3fccbae5c7..81dc01ac2351fcac14e3d5be1978a4876e751375 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/fs.h>
 #include <linux/uaccess.h>
 #include <linux/slab.h>
-#include <linux/cdev.h>
 #include <linux/list.h>
 
 #include <uapi/linux/fsi.h>
index b01ba4438501a959de7796dc2eff67a6126d88e3..31e891f00175c635a9ee92c7e0f090eb135fc29b 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/of.h>
 #include <linux/pm.h>
 #include <linux/pm_runtime.h>
+#include <linux/sched.h>
 #include <linux/serdev.h>
 #include <linux/slab.h>
 
@@ -63,7 +64,7 @@ static int gnss_serial_write_raw(struct gnss_device *gdev,
        int ret;
 
        /* write is only buffered synchronously */
-       ret = serdev_device_write(serdev, buf, count, 0);
+       ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
        if (ret < 0)
                return ret;
 
index 79cb98950013bbb60f4ff4126cd562453441045a..2c22836d3ffd5f060ae2cde22dbd462ef977ee0b 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/pm.h>
 #include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
+#include <linux/sched.h>
 #include <linux/serdev.h>
 #include <linux/slab.h>
 #include <linux/wait.h>
@@ -83,7 +84,7 @@ static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf,
        int ret;
 
        /* write is only buffered synchronously */
-       ret = serdev_device_write(serdev, buf, count, 0);
+       ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
        if (ret < 0)
                return ret;
 
@@ -167,7 +168,7 @@ static int sirf_set_active(struct sirf_data *data, bool active)
        else
                timeout = SIRF_HIBERNATE_TIMEOUT;
 
-       while (retries-- > 0) {
+       do {
                sirf_pulse_on_off(data);
                ret = sirf_wait_for_power_state(data, active, timeout);
                if (ret < 0) {
@@ -178,9 +179,9 @@ static int sirf_set_active(struct sirf_data *data, bool active)
                }
 
                break;
-       }
+       } while (retries--);
 
-       if (retries == 0)
+       if (retries < 0)
                return -ETIMEDOUT;
 
        return 0;
index 5c1564fcc24ea68808e8f553889b09ac6e5e2d9f..bdb29e51b4176390a5614985e96021780fd8111c 100644 (file)
@@ -258,7 +258,7 @@ static int davinci_gpio_probe(struct platform_device *pdev)
        chips->chip.set = davinci_gpio_set;
 
        chips->chip.ngpio = ngpio;
-       chips->chip.base = -1;
+       chips->chip.base = pdata->no_auto_base ? pdata->base : -1;
 
 #ifdef CONFIG_OF_GPIO
        chips->chip.of_gpio_n_cells = 2;
index 8269cffc2967f772ba2da14e566fd136bb68da56..6a50f9f59c901b6d38069a67b5b9f7881e8a43b6 100644 (file)
@@ -35,8 +35,8 @@
 #define gpio_mockup_err(...)   pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
 
 enum {
-       GPIO_MOCKUP_DIR_OUT = 0,
-       GPIO_MOCKUP_DIR_IN = 1,
+       GPIO_MOCKUP_DIR_IN = 0,
+       GPIO_MOCKUP_DIR_OUT = 1,
 };
 
 /*
@@ -131,7 +131,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
 {
        struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
 
-       return chip->lines[offset].dir;
+       return !chip->lines[offset].dir;
 }
 
 static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
index bfe4c5c9f41cef3a9c3a483e5283e015d840bc91..e9600b556f397babf8c472ceb2b012f2de97b42a 100644 (file)
@@ -268,8 +268,8 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
 
        if (pxa_gpio_has_pinctrl()) {
                ret = pinctrl_gpio_direction_input(chip->base + offset);
-               if (!ret)
-                       return 0;
+               if (ret)
+                       return ret;
        }
 
        spin_lock_irqsave(&gpio_lock, flags);
index 230e41562462b27fdf5d11874b3de8c34c107707..a2cbb474901c224bebae335cd0789273a78955ae 100644 (file)
@@ -1295,7 +1295,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
        gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
        if (!gdev->descs) {
                status = -ENOMEM;
-               goto err_free_gdev;
+               goto err_free_ida;
        }
 
        if (chip->ngpio == 0) {
@@ -1427,8 +1427,9 @@ err_free_label:
        kfree_const(gdev->label);
 err_free_descs:
        kfree(gdev->descs);
-err_free_gdev:
+err_free_ida:
        ida_simple_remove(&gpio_ida, gdev->id);
+err_free_gdev:
        /* failures here can mean systems won't boot... */
        pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
               gdev->base, gdev->base + gdev->ngpio - 1,
index d0102cfc8efbd1825df74c84ed26a0d0042a7e25..b0fc116296cb3bff55f6b95b432fbc018a29bbde 100644 (file)
@@ -151,6 +151,7 @@ extern int amdgpu_compute_multipipe;
 extern int amdgpu_gpu_recovery;
 extern int amdgpu_emu_mode;
 extern uint amdgpu_smu_memory_pool_size;
+extern uint amdgpu_dc_feature_mask;
 extern struct amdgpu_mgpu_info mgpu_info;
 
 #ifdef CONFIG_DRM_AMDGPU_SI
@@ -232,7 +233,7 @@ enum amdgpu_kiq_irq {
 
 #define MAX_KIQ_REG_WAIT       5000 /* in usecs, 5ms */
 #define MAX_KIQ_REG_BAILOUT_INTERVAL   5 /* in msecs, 5ms */
-#define MAX_KIQ_REG_TRY 20
+#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */
 
 int amdgpu_device_ip_set_clockgating_state(void *dev,
                                           enum amd_ip_block_type block_type,
index 297a5490ad8c0be64157364419ca6e2f5b1eab1a..0a4fba196b843e4fe27b48b94f478478d1281749 100644 (file)
@@ -135,7 +135,8 @@ static int acp_poweroff(struct generic_pm_domain *genpd)
         * 2. power off the acp tiles
         * 3. check and enter ulv state
         */
-               if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+               if (adev->powerplay.pp_funcs &&
+                       adev->powerplay.pp_funcs->set_powergating_by_smu)
                        amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true);
        }
        return 0;
@@ -517,7 +518,8 @@ static int acp_set_powergating_state(void *handle,
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        bool enable = state == AMD_PG_STATE_GATE ? true : false;
 
-       if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+       if (adev->powerplay.pp_funcs &&
+               adev->powerplay.pp_funcs->set_powergating_by_smu)
                amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable);
 
        return 0;
index c31a8849e9f87705ed3abac2ac23bfcbc7303d85..1580ec60b89f753ce2e12018b2a07eb936802d2e 100644 (file)
@@ -501,8 +501,11 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
 
-       amdgpu_dpm_switch_power_profile(adev,
-                                       PP_SMC_POWER_PROFILE_COMPUTE, !idle);
+       if (adev->powerplay.pp_funcs &&
+           adev->powerplay.pp_funcs->switch_power_profile)
+               amdgpu_dpm_switch_power_profile(adev,
+                                               PP_SMC_POWER_PROFILE_COMPUTE,
+                                               !idle);
 }
 
 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
index f9b54236102d58421d179f230d6968a3dbd39d04..95f4c4139fc60a078d651b8164d11b0befcf766b 100644 (file)
@@ -39,6 +39,7 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
        [AMDGPU_HW_IP_UVD_ENC]  =       1,
        [AMDGPU_HW_IP_VCN_DEC]  =       1,
        [AMDGPU_HW_IP_VCN_ENC]  =       1,
+       [AMDGPU_HW_IP_VCN_JPEG] =       1,
 };
 
 static int amdgput_ctx_total_num_entities(void)
index 1e4dd09a50726646cf117a480ddabfa5f2aecc2c..30bc345d6fdf0d5827c2aa737d284da92787ea70 100644 (file)
@@ -1493,8 +1493,6 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
        }
 
        adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
-       if (amdgpu_sriov_vf(adev))
-               adev->powerplay.pp_feature &= ~PP_GFXOFF_MASK;
 
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
@@ -1600,7 +1598,7 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
                }
        }
 
-       if (adev->powerplay.pp_funcs->load_firmware) {
+       if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
                r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
                if (r) {
                        pr_err("firmware loading failed\n");
@@ -3341,7 +3339,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
 
                kthread_park(ring->sched.thread);
 
-               if (job && job->base.sched == &ring->sched)
+               if (job && job->base.sched != &ring->sched)
                        continue;
 
                drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
index 6748cd7fc129b0e7b83966da865f674c676c04e1..686a26de50f91e816471548bf3c1a0fc3f86db86 100644 (file)
@@ -626,6 +626,13 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
                                         "dither",
                                         amdgpu_dither_enum_list, sz);
 
+       if (amdgpu_device_has_dc_support(adev)) {
+               adev->mode_info.max_bpc_property =
+                       drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
+               if (!adev->mode_info.max_bpc_property)
+                       return -ENOMEM;
+       }
+
        return 0;
 }
 
index 28781414d71c85e4dc5657e7cbfb237a39f5a3c3..8de55f7f1a3a3922b4a1ac2d17cf12cdd35d1fd6 100644 (file)
@@ -114,8 +114,8 @@ uint amdgpu_pg_mask = 0xffffffff;
 uint amdgpu_sdma_phase_quantum = 32;
 char *amdgpu_disable_cu = NULL;
 char *amdgpu_virtual_display = NULL;
-/* OverDrive(bit 14) disabled by default*/
-uint amdgpu_pp_feature_mask = 0xffffbfff;
+/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/
+uint amdgpu_pp_feature_mask = 0xfffd3fff;
 int amdgpu_ngg = 0;
 int amdgpu_prim_buf_per_se = 0;
 int amdgpu_pos_buf_per_se = 0;
@@ -127,6 +127,9 @@ int amdgpu_compute_multipipe = -1;
 int amdgpu_gpu_recovery = -1; /* auto */
 int amdgpu_emu_mode = 0;
 uint amdgpu_smu_memory_pool_size = 0;
+/* FBC (bit 0) disabled by default*/
+uint amdgpu_dc_feature_mask = 0;
+
 struct amdgpu_mgpu_info mgpu_info = {
        .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
 };
@@ -631,6 +634,14 @@ module_param(halt_if_hws_hang, int, 0644);
 MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
 #endif
 
+/**
+ * DOC: dcfeaturemask (uint)
+ * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * The default is the current set of stable display features.
+ */
+MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
+module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
+
 static const struct pci_device_id pciidlist[] = {
 #ifdef  CONFIG_DRM_AMDGPU_SI
        {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
index 790fd5408ddff2dbb6aa349c988b40ebfaf82536..1a656b8657f736fa0385aba0c54c6548d72af819 100644 (file)
@@ -392,7 +392,7 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
        if (!(adev->powerplay.pp_feature & PP_GFXOFF_MASK))
                return;
 
-       if (!adev->powerplay.pp_funcs->set_powergating_by_smu)
+       if (!adev->powerplay.pp_funcs || !adev->powerplay.pp_funcs->set_powergating_by_smu)
                return;
 
 
index 81732a84c2ab090af4e2f834e2223c0eeabe2bcc..8f3d44e5e78785a18089204113636553de40064d 100644 (file)
@@ -467,9 +467,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
        if (!info->return_size || !info->return_pointer)
                return -EINVAL;
 
-       /* Ensure IB tests are run on ring */
-       flush_delayed_work(&adev->late_init_work);
-
        switch (info->query) {
        case AMDGPU_INFO_ACCEL_WORKING:
                ui32 = adev->accel_working;
@@ -950,6 +947,9 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
        struct amdgpu_fpriv *fpriv;
        int r, pasid;
 
+       /* Ensure IB tests are run on ring */
+       flush_delayed_work(&adev->late_init_work);
+
        file_priv->driver_priv = NULL;
 
        r = pm_runtime_get_sync(dev->dev);
index b9e9e8b02fb756a0d7291c605353d59cd228826e..d1b4d9b6aae0d1743f77dc2373d0c9159d03f937 100644 (file)
@@ -339,6 +339,8 @@ struct amdgpu_mode_info {
        struct drm_property *audio_property;
        /* FMT dithering */
        struct drm_property *dither_property;
+       /* maximum number of bits per channel for monitor color */
+       struct drm_property *max_bpc_property;
        /* hardcoded DFP edid from BIOS */
        struct edid *bios_hardcoded_edid;
        int bios_hardcoded_edid_size;
index 94055a485e01300e5106fa261b6ce51b636360ab..59cc678de8c1570642afc2d488f63fbc179a1e99 100644 (file)
@@ -704,7 +704,10 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
                return ret;
 
        if (adev->powerplay.pp_funcs->force_clock_level)
-               amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+               ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+
+       if (ret)
+               return -EINVAL;
 
        return count;
 }
@@ -737,7 +740,10 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
                return ret;
 
        if (adev->powerplay.pp_funcs->force_clock_level)
-               amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+               ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
+
+       if (ret)
+               return -EINVAL;
 
        return count;
 }
@@ -770,7 +776,10 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
                return ret;
 
        if (adev->powerplay.pp_funcs->force_clock_level)
-               amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+               ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
+
+       if (ret)
+               return -EINVAL;
 
        return count;
 }
index 6904d794d60a7a5c06057f74cf930db06a01e195..0877ff9a959445ad77a263f2ba1105ec5e795822 100644 (file)
@@ -181,7 +181,7 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
 
        if (level == adev->vm_manager.root_level)
                /* For the root directory */
-               return round_up(adev->vm_manager.max_pfn, 1 << shift) >> shift;
+               return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift;
        else if (level != AMDGPU_VM_PTB)
                /* Everything in between */
                return 512;
@@ -542,7 +542,8 @@ static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev,
                                   struct amdgpu_vm_pt_cursor *cursor)
 {
        amdgpu_vm_pt_next(adev, cursor);
-       while (amdgpu_vm_pt_descendant(adev, cursor));
+       if (cursor->pfn != ~0ll)
+               while (amdgpu_vm_pt_descendant(adev, cursor));
 }
 
 /**
@@ -1631,13 +1632,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
                        continue;
                }
 
-               /* First check if the entry is already handled */
-               if (cursor.pfn < frag_start) {
-                       cursor.entry->huge = true;
-                       amdgpu_vm_pt_next(adev, &cursor);
-                       continue;
-               }
-
                /* If it isn't already handled it can't be a huge page */
                if (cursor.entry->huge) {
                        /* Add the entry to the relocated list to update it. */
@@ -1662,9 +1656,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
                        if (!amdgpu_vm_pt_descendant(adev, &cursor))
                                return -ENOENT;
                        continue;
-               } else if (frag >= parent_shift) {
+               } else if (frag >= parent_shift &&
+                          cursor.level - 1 != adev->vm_manager.root_level) {
                        /* If the fragment size is even larger than the parent
-                        * shift we should go up one level and check it again.
+                        * shift we should go up one level and check it again
+                        * unless one level up is the root level.
                         */
                        if (!amdgpu_vm_pt_ancestor(&cursor))
                                return -ENOENT;
@@ -1672,10 +1668,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
                }
 
                /* Looks good so far, calculate parameters for the update */
-               incr = AMDGPU_GPU_PAGE_SIZE << shift;
+               incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
                mask = amdgpu_vm_entries_mask(adev, cursor.level);
                pe_start = ((cursor.pfn >> shift) & mask) * 8;
-               entry_end = (mask + 1) << shift;
+               entry_end = (uint64_t)(mask + 1) << shift;
                entry_end += cursor.pfn & ~(entry_end - 1);
                entry_end = min(entry_end, end);
 
@@ -1688,7 +1684,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
                                              flags | AMDGPU_PTE_FRAG(frag));
 
                        pe_start += nptes * 8;
-                       dst += nptes * AMDGPU_GPU_PAGE_SIZE << shift;
+                       dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
 
                        frag_start = upd_end;
                        if (frag_start >= frag_end) {
@@ -1700,8 +1696,17 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
                        }
                } while (frag_start < entry_end);
 
-               if (frag >= shift)
+               if (amdgpu_vm_pt_descendant(adev, &cursor)) {
+                       /* Mark all child entries as huge */
+                       while (cursor.pfn < frag_start) {
+                               cursor.entry->huge = true;
+                               amdgpu_vm_pt_next(adev, &cursor);
+                       }
+
+               } else if (frag >= shift) {
+                       /* or just move on to the next on the same level. */
                        amdgpu_vm_pt_next(adev, &cursor);
+               }
        }
 
        return 0;
@@ -3234,8 +3239,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        }
        rbtree_postorder_for_each_entry_safe(mapping, tmp,
                                             &vm->va.rb_root, rb) {
+               /* Don't remove the mapping here, we don't want to trigger a
+                * rebalance and the tree is about to be destroyed anyway.
+                */
                list_del(&mapping->list);
-               amdgpu_vm_it_remove(mapping, &vm->va);
                kfree(mapping);
        }
        list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
index 3d0f277a6523f80a4e2ee7e66c94c494b94448bc..617b0c8908a375aa0d132af1868f3eaf9e2067b1 100644 (file)
@@ -4815,8 +4815,10 @@ static int gfx_v8_0_kcq_resume(struct amdgpu_device *adev)
        if (r)
                goto done;
 
-       /* Test KCQs */
-       for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+       /* Test KCQs - reversing the order of rings seems to fix ring test failure
+        * after GPU reset
+        */
+       for (i = adev->gfx.num_compute_rings - 1; i >= 0; i--) {
                ring = &adev->gfx.compute_ring[i];
                ring->ready = true;
                r = amdgpu_ring_test_ring(ring);
index 6d7baf59d6e11e947c83ef34d716c5a546d6460f..21363b2b2ee5729e7807b9046aa2872438044ae1 100644 (file)
@@ -2440,12 +2440,13 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
 #endif
 
        WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
+       udelay(50);
 
        /* carrizo do enable cp interrupt after cp inited */
-       if (!(adev->flags & AMD_IS_APU))
+       if (!(adev->flags & AMD_IS_APU)) {
                gfx_v9_0_enable_gui_idle_interrupt(adev, true);
-
-       udelay(50);
+               udelay(50);
+       }
 
 #ifdef AMDGPU_RLC_DEBUG_RETRY
        /* RLC_GPM_GENERAL_6 : RLC Ucode version */
index ceb7847b504f70fe73435e5b81b4ee4da5588421..bfa317ad20a956017273a7c1fe7ca2decd6491e1 100644 (file)
@@ -72,7 +72,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
 
        /* Program the system aperture low logical page number. */
        WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
-                    min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
+                    min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 
        if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
                /*
@@ -82,11 +82,11 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
                 * to get rid of the VM fault and hardware hang.
                 */
                WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
-                            max((adev->gmc.vram_end >> 18) + 0x1,
+                            max((adev->gmc.fb_end >> 18) + 0x1,
                                 adev->gmc.agp_end >> 18));
        else
                WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
-                            max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
+                            max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
 
        /* Set default page address. */
        value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
index e1c2b4e9c7b23a10ac3b1b2b5375d2bf84eae9c2..73ad02aea2b2e802f0dbce340d70877bdc3b6ca1 100644 (file)
@@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
 MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
 MODULE_FIRMWARE("amdgpu/verde_mc.bin");
 MODULE_FIRMWARE("amdgpu/oland_mc.bin");
+MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
 MODULE_FIRMWARE("amdgpu/si58_mc.bin");
 
 #define MC_SEQ_MISC0__MT__MASK   0xf0000000
index 1d3265c97b704b5a403cca7721818ac91dad6c4c..747c068379dc79b5408525dda7d2215b9b128043 100644 (file)
@@ -56,6 +56,9 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
 MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
 
 static const u32 golden_settings_tonga_a11[] =
 {
@@ -224,13 +227,39 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
                chip_name = "tonga";
                break;
        case CHIP_POLARIS11:
-               chip_name = "polaris11";
+               if (((adev->pdev->device == 0x67ef) &&
+                    ((adev->pdev->revision == 0xe0) ||
+                     (adev->pdev->revision == 0xe5))) ||
+                   ((adev->pdev->device == 0x67ff) &&
+                    ((adev->pdev->revision == 0xcf) ||
+                     (adev->pdev->revision == 0xef) ||
+                     (adev->pdev->revision == 0xff))))
+                       chip_name = "polaris11_k";
+               else if ((adev->pdev->device == 0x67ef) &&
+                        (adev->pdev->revision == 0xe2))
+                       chip_name = "polaris11_k";
+               else
+                       chip_name = "polaris11";
                break;
        case CHIP_POLARIS10:
-               chip_name = "polaris10";
+               if ((adev->pdev->device == 0x67df) &&
+                   ((adev->pdev->revision == 0xe1) ||
+                    (adev->pdev->revision == 0xf7)))
+                       chip_name = "polaris10_k";
+               else
+                       chip_name = "polaris10";
                break;
        case CHIP_POLARIS12:
-               chip_name = "polaris12";
+               if (((adev->pdev->device == 0x6987) &&
+                    ((adev->pdev->revision == 0xc0) ||
+                     (adev->pdev->revision == 0xc3))) ||
+                   ((adev->pdev->device == 0x6981) &&
+                    ((adev->pdev->revision == 0x00) ||
+                     (adev->pdev->revision == 0x01) ||
+                     (adev->pdev->revision == 0x10))))
+                       chip_name = "polaris12_k";
+               else
+                       chip_name = "polaris12";
                break;
        case CHIP_FIJI:
        case CHIP_CARRIZO:
@@ -337,7 +366,7 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
        const struct mc_firmware_header_v1_0 *hdr;
        const __le32 *fw_data = NULL;
        const __le32 *io_mc_regs = NULL;
-       u32 data, vbios_version;
+       u32 data;
        int i, ucode_size, regs_size;
 
        /* Skip MC ucode loading on SR-IOV capable boards.
@@ -348,13 +377,6 @@ static int gmc_v8_0_polaris_mc_load_microcode(struct amdgpu_device *adev)
        if (amdgpu_sriov_bios(adev))
                return 0;
 
-       WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
-       data = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
-       vbios_version = data & 0xf;
-
-       if (vbios_version == 0)
-               return 0;
-
        if (!adev->gmc.fw)
                return -EINVAL;
 
index 14649f8475f3f68cfe9ebd2a816864cae1318e0f..a0db67adc34cee3d1ee13ca97d8b333ff36dfdc6 100644 (file)
@@ -90,7 +90,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
 
        /* Program the system aperture low logical page number. */
        WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
-                    min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
+                    min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
 
        if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
                /*
@@ -100,11 +100,11 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
                 * to get rid of the VM fault and hardware hang.
                 */
                WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
-                            max((adev->gmc.vram_end >> 18) + 0x1,
+                            max((adev->gmc.fb_end >> 18) + 0x1,
                                 adev->gmc.agp_end >> 18));
        else
                WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
-                            max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
+                            max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
 
        /* Set default page address. */
        value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
@@ -280,7 +280,7 @@ void mmhub_v1_0_update_power_gating(struct amdgpu_device *adev,
                return;
 
        if (enable && adev->pg_flags & AMD_PG_SUPPORT_MMHUB) {
-               if (adev->powerplay.pp_funcs->set_powergating_by_smu)
+               if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu)
                        amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GMC, true);
 
        }
index 04fa3d972636bb9878191ec9789f36a9684f6b04..7a8c9172d30a946fd91d147f8c73267a51b1fb08 100644 (file)
@@ -1366,7 +1366,8 @@ static int sdma_v4_0_hw_init(void *handle)
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu)
+       if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs &&
+                       adev->powerplay.pp_funcs->set_powergating_by_smu)
                amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false);
 
        sdma_v4_0_init_golden_registers(adev);
@@ -1386,7 +1387,8 @@ static int sdma_v4_0_hw_fini(void *handle)
        sdma_v4_0_ctx_switch_enable(adev, false);
        sdma_v4_0_enable(adev, false);
 
-       if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu)
+       if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs
+                       && adev->powerplay.pp_funcs->set_powergating_by_smu)
                amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true);
 
        return 0;
index bf5e6a413dee6e9b5de53f62ad7547ecb5e5b23e..4cc0dcb1a1875bfc559affd1f55e268a25e6282e 100644 (file)
 #define mmMP0_MISC_LIGHT_SLEEP_CTRL                                                             0x01ba
 #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX                                                    0
 
+/* for Vega20 register name change */
+#define mmHDP_MEM_POWER_CTRL   0x00d4
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L
+#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK   0x00000002L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK  0x00010000L
+#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK            0x00020000L
+#define mmHDP_MEM_POWER_CTRL_BASE_IDX  0
 /*
  * Indirect registers accessor
  */
@@ -870,15 +877,33 @@ static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable
 {
        uint32_t def, data;
 
-       def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+       if (adev->asic_type == CHIP_VEGA20) {
+               def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL));
 
-       if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
-               data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
-       else
-               data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+               if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+                       data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+                               HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+                               HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+                               HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK;
+               else
+                       data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK |
+                               HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK |
+                               HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK |
+                               HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK);
 
-       if (def != data)
-               WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
+               if (def != data)
+                       WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data);
+       } else {
+               def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
+
+               if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+                       data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+               else
+                       data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
+
+               if (def != data)
+                       WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data);
+       }
 }
 
 static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable)
index eae90922fdbe0f4356be31c4fd16eeb6846409a2..322e09b5b44894183d2c8aab92b1319f634a4fd5 100644 (file)
@@ -48,6 +48,7 @@ static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev);
 static void vcn_v1_0_set_jpeg_ring_funcs(struct amdgpu_device *adev);
 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev);
 static void vcn_v1_0_jpeg_ring_set_patch_ring(struct amdgpu_ring *ring, uint32_t ptr);
+static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state);
 
 /**
  * vcn_v1_0_early_init - set function pointers
@@ -222,7 +223,7 @@ static int vcn_v1_0_hw_fini(void *handle)
        struct amdgpu_ring *ring = &adev->vcn.ring_dec;
 
        if (RREG32_SOC15(VCN, 0, mmUVD_STATUS))
-               vcn_v1_0_stop(adev);
+               vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
 
        ring->ready = false;
 
index a99f71797aa359f83217887dd4dcf531d639d45e..a0fda6f9252a52979b5c90569d48b4212f4ea27a 100644 (file)
@@ -129,7 +129,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
        else
                wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
-       WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+       WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFFFF);
 
        /* set rptr, wptr to 0 */
        WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
index 2d4473557b0d23210782ff72397b47bd7f9c94ef..d13fc4fcb51790859f03aefb14f4bd90067c8fd8 100644 (file)
@@ -49,6 +49,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
                adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
                adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
                adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+               adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
        }
        return 0;
 }
index e224f23e22155918a742bbd13e45131edc9463b0..5a6edf65c9eaebd958104d4d0dd8216281ffaccc 100644 (file)
@@ -429,6 +429,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
            adev->asic_type < CHIP_RAVEN)
                init_data.flags.gpu_vm_support = true;
 
+       if (amdgpu_dc_feature_mask & DC_FBC_MASK)
+               init_data.flags.fbc_support = true;
+
        /* Display Core create. */
        adev->dm.dc = dc_create(&init_data);
 
@@ -2355,8 +2358,15 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
 static enum dc_color_depth
 convert_color_depth_from_display_info(const struct drm_connector *connector)
 {
+       struct dm_connector_state *dm_conn_state =
+               to_dm_connector_state(connector->state);
        uint32_t bpc = connector->display_info.bpc;
 
+       /* TODO: Remove this when there's support for max_bpc in drm */
+       if (dm_conn_state && bpc > dm_conn_state->max_bpc)
+               /* Round down to nearest even number. */
+               bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1);
+
        switch (bpc) {
        case 0:
                /*
@@ -2544,9 +2554,9 @@ static void fill_audio_info(struct audio_info *audio_info,
 
        cea_revision = drm_connector->display_info.cea_rev;
 
-       strncpy(audio_info->display_name,
+       strscpy(audio_info->display_name,
                edid_caps->display_name,
-               AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
+               AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
 
        if (cea_revision >= 3) {
                audio_info->mode_count = edid_caps->audio_mode_count;
@@ -2700,18 +2710,11 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        drm_connector = &aconnector->base;
 
        if (!aconnector->dc_sink) {
-               /*
-                * Create dc_sink when necessary to MST
-                * Don't apply fake_sink to MST
-                */
-               if (aconnector->mst_port) {
-                       dm_dp_mst_dc_sink_create(drm_connector);
-                       return stream;
+               if (!aconnector->mst_port) {
+                       sink = create_fake_sink(aconnector);
+                       if (!sink)
+                               return stream;
                }
-
-               sink = create_fake_sink(aconnector);
-               if (!sink)
-                       return stream;
        } else {
                sink = aconnector->dc_sink;
        }
@@ -2947,6 +2950,9 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
        } else if (property == adev->mode_info.underscan_property) {
                dm_new_state->underscan_enable = val;
                ret = 0;
+       } else if (property == adev->mode_info.max_bpc_property) {
+               dm_new_state->max_bpc = val;
+               ret = 0;
        }
 
        return ret;
@@ -2989,6 +2995,9 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
        } else if (property == adev->mode_info.underscan_property) {
                *val = dm_state->underscan_enable;
                ret = 0;
+       } else if (property == adev->mode_info.max_bpc_property) {
+               *val = dm_state->max_bpc;
+               ret = 0;
        }
        return ret;
 }
@@ -3033,6 +3042,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
                state->underscan_enable = false;
                state->underscan_hborder = 0;
                state->underscan_vborder = 0;
+               state->max_bpc = 8;
 
                __drm_atomic_helper_connector_reset(connector, &state->base);
        }
@@ -3054,6 +3064,7 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
 
        new_state->freesync_capable = state->freesync_capable;
        new_state->freesync_enable = state->freesync_enable;
+       new_state->max_bpc = state->max_bpc;
 
        return &new_state->base;
 }
@@ -3301,7 +3312,7 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane,
 static const struct drm_plane_funcs dm_plane_funcs = {
        .update_plane   = drm_atomic_helper_update_plane,
        .disable_plane  = drm_atomic_helper_disable_plane,
-       .destroy        = drm_plane_cleanup,
+       .destroy        = drm_primary_helper_destroy,
        .reset = dm_drm_plane_reset,
        .atomic_duplicate_state = dm_drm_plane_duplicate_state,
        .atomic_destroy_state = dm_drm_plane_destroy_state,
@@ -3641,7 +3652,7 @@ amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
        mode->hdisplay = hdisplay;
        mode->vdisplay = vdisplay;
        mode->type &= ~DRM_MODE_TYPE_PREFERRED;
-       strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+       strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
 
        return mode;
 
@@ -3799,6 +3810,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
        drm_object_attach_property(&aconnector->base.base,
                                adev->mode_info.underscan_vborder_property,
                                0);
+       drm_object_attach_property(&aconnector->base.base,
+                               adev->mode_info.max_bpc_property,
+                               0);
 
 }
 
index 978b34a5011ce508055064658b556ef5082097e7..6e069d777ab22d0e733bb7bc1c62d3d3370cc265 100644 (file)
@@ -160,8 +160,6 @@ struct amdgpu_dm_connector {
        struct mutex hpd_lock;
 
        bool fake_enable;
-
-       bool mst_connected;
 };
 
 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
@@ -206,6 +204,7 @@ struct dm_connector_state {
        enum amdgpu_rmx_type scaling;
        uint8_t underscan_vborder;
        uint8_t underscan_hborder;
+       uint8_t max_bpc;
        bool underscan_enable;
        bool freesync_enable;
        bool freesync_capable;
index 03601d717fed90708463fca143a6de50d6b750b1..1b0d209d836764ee16c4b9159100a52a6753f7b2 100644 (file)
@@ -205,40 +205,6 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
        .atomic_get_property = amdgpu_dm_connector_atomic_get_property
 };
 
-void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
-{
-       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-       struct dc_sink *dc_sink;
-       struct dc_sink_init_data init_params = {
-                       .link = aconnector->dc_link,
-                       .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
-
-       /* FIXME none of this is safe. we shouldn't touch aconnector here in
-        * atomic_check
-        */
-
-       /*
-        * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
-        */
-       if (!aconnector->port || !aconnector->port->aux.ddc.algo)
-               return;
-
-       ASSERT(aconnector->edid);
-
-       dc_sink = dc_link_add_remote_sink(
-               aconnector->dc_link,
-               (uint8_t *)aconnector->edid,
-               (aconnector->edid->extensions + 1) * EDID_LENGTH,
-               &init_params);
-
-       dc_sink->priv = aconnector;
-       aconnector->dc_sink = dc_sink;
-
-       if (aconnector->dc_sink)
-               amdgpu_dm_update_freesync_caps(
-                               connector, aconnector->edid);
-}
-
 static int dm_dp_mst_get_modes(struct drm_connector *connector)
 {
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
@@ -319,12 +285,7 @@ dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
        struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_encoder *amdgpu_encoder;
        struct drm_encoder *encoder;
-       const struct drm_connector_helper_funcs *connector_funcs =
-               connector->base.helper_private;
-       struct drm_encoder *enc_master =
-               connector_funcs->best_encoder(&connector->base);
 
-       DRM_DEBUG_KMS("enc master is %p\n", enc_master);
        amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
        if (!amdgpu_encoder)
                return NULL;
@@ -354,25 +315,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
        struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_dm_connector *aconnector;
        struct drm_connector *connector;
-       struct drm_connector_list_iter conn_iter;
-
-       drm_connector_list_iter_begin(dev, &conn_iter);
-       drm_for_each_connector_iter(connector, &conn_iter) {
-               aconnector = to_amdgpu_dm_connector(connector);
-               if (aconnector->mst_port == master
-                               && !aconnector->port) {
-                       DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
-                                               aconnector, connector->base.id, aconnector->mst_port);
-
-                       aconnector->port = port;
-                       drm_connector_set_path_property(connector, pathprop);
-
-                       drm_connector_list_iter_end(&conn_iter);
-                       aconnector->mst_connected = true;
-                       return &aconnector->base;
-               }
-       }
-       drm_connector_list_iter_end(&conn_iter);
 
        aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
        if (!aconnector)
@@ -400,10 +342,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
                master->connector_id);
 
        aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
+       drm_connector_attach_encoder(&aconnector->base,
+                                    &aconnector->mst_encoder->base);
 
-       /*
-        * TODO: understand why this one is needed
-        */
        drm_object_attach_property(
                &connector->base,
                dev->mode_config.path_property,
@@ -421,8 +362,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
         */
        amdgpu_dm_connector_funcs_reset(connector);
 
-       aconnector->mst_connected = true;
-
        DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
                        aconnector, connector->base.id, aconnector->mst_port);
 
@@ -434,6 +373,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
 static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
                                        struct drm_connector *connector)
 {
+       struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+       struct drm_device *dev = master->base.dev;
+       struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 
        DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
@@ -447,7 +389,10 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
                aconnector->dc_sink = NULL;
        }
 
-       aconnector->mst_connected = false;
+       drm_connector_unregister(connector);
+       if (adev->mode_info.rfbdev)
+               drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
+       drm_connector_put(connector);
 }
 
 static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -458,18 +403,10 @@ static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
        drm_kms_helper_hotplug_event(dev);
 }
 
-static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
-{
-       mutex_lock(&connector->dev->mode_config.mutex);
-       drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
-       mutex_unlock(&connector->dev->mode_config.mutex);
-}
-
 static void dm_dp_mst_register_connector(struct drm_connector *connector)
 {
        struct drm_device *dev = connector->dev;
        struct amdgpu_device *adev = dev->dev_private;
-       struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 
        if (adev->mode_info.rfbdev)
                drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
@@ -477,9 +414,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
                DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
 
        drm_connector_register(connector);
-
-       if (aconnector->mst_connected)
-               dm_dp_mst_link_status_reset(connector);
 }
 
 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
index 8cf51da26657e29e72062b34aeed7e5d827f9e21..2da851b40042aee9b79eb2c666d45c0f5061fee0 100644 (file)
@@ -31,6 +31,5 @@ struct amdgpu_dm_connector;
 
 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
                                       struct amdgpu_dm_connector *aconnector);
-void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
 
 #endif
index 0fab64a2a9150f723422f8e3600174866b03cc1b..12001a006b2d8e1d0b5f3734c189e9faf23d94e5 100644 (file)
@@ -101,7 +101,7 @@ bool dm_pp_apply_display_requirements(
                        adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
                }
 
-               if (adev->powerplay.pp_funcs->display_configuration_change)
+               if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change)
                        adev->powerplay.pp_funcs->display_configuration_change(
                                adev->powerplay.pp_handle,
                                &adev->pm.pm_display_cfg);
@@ -304,7 +304,7 @@ bool dm_pp_get_clock_levels_by_type(
        struct amd_pp_simple_clock_info validation_clks = { 0 };
        uint32_t i;
 
-       if (adev->powerplay.pp_funcs->get_clock_by_type) {
+       if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
                if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
                        dc_to_pp_clock_type(clk_type), &pp_clks)) {
                /* Error in pplib. Provide default values. */
@@ -315,7 +315,7 @@ bool dm_pp_get_clock_levels_by_type(
 
        pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
 
-       if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
+       if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
                if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
                                                pp_handle, &validation_clks)) {
                        /* Error in pplib. Provide default values. */
@@ -398,6 +398,9 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
        struct pp_clock_levels_with_voltage pp_clk_info = {0};
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
+       if (!pp_funcs || !pp_funcs->get_clock_by_type_with_voltage)
+               return false;
+
        if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
                                                     dc_to_pp_clock_type(clk_type),
                                                     &pp_clk_info))
@@ -438,7 +441,7 @@ bool dm_pp_apply_clock_for_voltage_request(
        if (!pp_clock_request.clock_type)
                return false;
 
-       if (adev->powerplay.pp_funcs->display_clock_voltage_request)
+       if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request)
                ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
                        adev->powerplay.pp_handle,
                        &pp_clock_request);
@@ -455,7 +458,7 @@ bool dm_pp_get_static_clocks(
        struct amd_pp_clock_info pp_clk_info = {0};
        int ret = 0;
 
-       if (adev->powerplay.pp_funcs->get_current_clocks)
+       if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks)
                ret = adev->powerplay.pp_funcs->get_current_clocks(
                        adev->powerplay.pp_handle,
                        &pp_clk_info);
@@ -505,6 +508,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
        wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
        wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
 
+       if (!pp_funcs || !pp_funcs->set_watermarks_for_clocks_ranges)
+               return;
+
        for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
                if (ranges->reader_wm_sets[i].wm_inst > 3)
                        wm_dce_clocks[i].wm_set_id = WM_SET_A;
index fb04a4ad141fdb68f68a747f6c4474a15e7da8a2..5da2186b3615ff97b3bc54a68b3849ad6c85856f 100644 (file)
@@ -1722,7 +1722,7 @@ static void write_i2c_retimer_setting(
                i2c_success = i2c_write(pipe_ctx, slave_address,
                                buffer, sizeof(buffer));
                RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
-                       offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n",
+                       offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
                        slave_address, buffer[0], buffer[1], i2c_success?1:0);
                if (!i2c_success)
                        /* Write failure */
@@ -1734,7 +1734,7 @@ static void write_i2c_retimer_setting(
                i2c_success = i2c_write(pipe_ctx, slave_address,
                                buffer, sizeof(buffer));
                RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
-                       offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n",
+                       offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
                        slave_address, buffer[0], buffer[1], i2c_success?1:0);
                if (!i2c_success)
                        /* Write failure */
index 199527171100b0ed7cbd34aae7468e0989efc9a6..b57fa61b3034a14869a2cee91423b7f7e0fa11e0 100644 (file)
@@ -169,6 +169,7 @@ struct link_training_settings;
 struct dc_config {
        bool gpu_vm_support;
        bool disable_disp_pll_sharing;
+       bool fbc_support;
 };
 
 enum visual_confirm {
index b75ede5f84f76837960463387a90ca35aa7ac62a..a6bcb90e8419af401bbe4650dc1515f32681cda8 100644 (file)
@@ -1736,7 +1736,12 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx,
        if (events->force_trigger)
                value |= 0x1;
 
-       value |= 0x84;
+       if (num_pipes) {
+               struct dc *dc = pipe_ctx[0]->stream->ctx->dc;
+
+               if (dc->fbc_compressor)
+                       value |= 0x84;
+       }
 
        for (i = 0; i < num_pipes; i++)
                pipe_ctx[i]->stream_res.tg->funcs->
@@ -2507,6 +2512,8 @@ static void pplib_apply_display_requirements(
                        dc,
                        context->bw.dce.sclk_khz);
 
+       pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz;
+
        pp_display_cfg->min_engine_clock_deep_sleep_khz
                        = context->bw.dce.sclk_deep_sleep_khz;
 
index de190935f0a456000cbabbdcc723b7c1a43667b7..7c9fd9052ee233f2c91d5a2c5cd4c91765603335 100644 (file)
@@ -568,7 +568,7 @@ static struct input_pixel_processor *dce110_ipp_create(
 
 static const struct encoder_feature_support link_enc_feature = {
                .max_hdmi_deep_color = COLOR_DEPTH_121212,
-               .max_hdmi_pixel_clock = 594000,
+               .max_hdmi_pixel_clock = 300000,
                .flags.bits.IS_HBR2_CAPABLE = true,
                .flags.bits.IS_TPS3_CAPABLE = true
 };
@@ -1362,7 +1362,8 @@ static bool construct(
                pool->base.sw_i2cs[i] = NULL;
        }
 
-       dc->fbc_compressor = dce110_compressor_create(ctx);
+       if (dc->config.fbc_support)
+               dc->fbc_compressor = dce110_compressor_create(ctx);
 
        if (!underlay_create(ctx, &pool->base))
                goto res_create_fail;
index a407892905af29661a70ad75a6c76c5d502163c3..c0d9f332baedc10d71e701b5f19e254d8b7af40d 100644 (file)
@@ -40,8 +40,6 @@
 #define LITTLEENDIAN_CPU
 #endif
 
-#undef READ
-#undef WRITE
 #undef FRAME_SIZE
 
 #define dm_output_to_console(fmt, ...) DRM_DEBUG_KMS(fmt, ##__VA_ARGS__)
index 2083c308007cde72412f7ad7291dc85fe882e532..470d7b89071a40163dc039af84a4bc345852cfcf 100644 (file)
@@ -133,6 +133,10 @@ enum PP_FEATURE_MASK {
        PP_AVFS_MASK = 0x40000,
 };
 
+enum DC_FEATURE_MASK {
+       DC_FBC_MASK = 0x1,
+};
+
 /**
  * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
  */
index d2e7c0fa96c2f7263f367cf67597993aa9ebcbf5..8eb0bb241210bdffe3ff4f3e280bed4856a1c810 100644 (file)
@@ -1325,7 +1325,7 @@ struct atom_smu_info_v3_3 {
   struct   atom_common_table_header  table_header;
   uint8_t  smuip_min_ver;
   uint8_t  smuip_max_ver;
-  uint8_t  smu_rsd1;
+  uint8_t  waflclk_ss_mode;
   uint8_t  gpuclk_ss_mode;
   uint16_t sclk_ss_percentage;
   uint16_t sclk_ss_rate_10hz;
@@ -1355,7 +1355,10 @@ struct atom_smu_info_v3_3 {
   uint32_t syspll3_1_vco_freq_10khz;
   uint32_t bootup_fclk_10khz;
   uint32_t bootup_waflclk_10khz;
-  uint32_t reserved[3];
+  uint32_t smu_info_caps;
+  uint16_t waflclk_ss_percentage;    // in unit of 0.001%
+  uint16_t smuinitoffset;
+  uint32_t reserved;
 };
 
 /*
index e8964cae6b93dba0c3d183dcc5383fa12734fca3..d6aa1d414320bf1d63bb84ffb490ac8e8b6417e6 100644 (file)
@@ -723,11 +723,14 @@ static int pp_dpm_force_clock_level(void *handle,
                pr_info("%s was not implemented.\n", __func__);
                return 0;
        }
+
+       if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+               pr_info("force clock level is for dpm manual mode only.\n");
+               return -EINVAL;
+       }
+
        mutex_lock(&hwmgr->smu_lock);
-       if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)
-               ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
-       else
-               ret = -EINVAL;
+       ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
        mutex_unlock(&hwmgr->smu_lock);
        return ret;
 }
@@ -963,6 +966,7 @@ static int pp_dpm_switch_power_profile(void *handle,
 static int pp_set_power_limit(void *handle, uint32_t limit)
 {
        struct pp_hwmgr *hwmgr = handle;
+       uint32_t max_power_limit;
 
        if (!hwmgr || !hwmgr->pm_en)
                return -EINVAL;
@@ -975,7 +979,13 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
        if (limit == 0)
                limit = hwmgr->default_power_limit;
 
-       if (limit > hwmgr->default_power_limit)
+       max_power_limit = hwmgr->default_power_limit;
+       if (hwmgr->od_enabled) {
+               max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
+               max_power_limit /= 100;
+       }
+
+       if (limit > max_power_limit)
                return -EINVAL;
 
        mutex_lock(&hwmgr->smu_lock);
@@ -994,8 +1004,13 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
 
        mutex_lock(&hwmgr->smu_lock);
 
-       if (default_limit)
+       if (default_limit) {
                *limit = hwmgr->default_power_limit;
+               if (hwmgr->od_enabled) {
+                       *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
+                       *limit /= 100;
+               }
+       }
        else
                *limit = hwmgr->power_limit;
 
@@ -1303,12 +1318,12 @@ static int pp_enable_mgpu_fan_boost(void *handle)
 {
        struct pp_hwmgr *hwmgr = handle;
 
-       if (!hwmgr || !hwmgr->pm_en)
+       if (!hwmgr)
                return -EINVAL;
 
-       if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL) {
+       if (!hwmgr->pm_en ||
+            hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
                return 0;
-       }
 
        mutex_lock(&hwmgr->smu_lock);
        hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
index 85119c2bdcc8ff2e2bbd54b53e204b095decb2d4..a2a7e0e94aa6b704b015122d413c06d6045b1d00 100644 (file)
@@ -80,7 +80,9 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
        PHM_FUNC_CHECK(hwmgr);
        adev = hwmgr->adev;
 
-       if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)) {
+       /* Skip for suspend/resume case */
+       if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)
+               && adev->in_suspend) {
                pr_info("dpm has been enabled\n");
                return 0;
        }
index 47ac9236973947fb369e5b3652900e9d64ab0d93..0173d04800245b44f88f1f80e8dc3cb50df3bd2c 100644 (file)
@@ -352,6 +352,9 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id,
 
        switch (task_id) {
        case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+               ret = phm_pre_display_configuration_changed(hwmgr);
+               if (ret)
+                       return ret;
                ret = phm_set_cpu_power_state(hwmgr);
                if (ret)
                        return ret;
index 91ffb7bc4ee72512f9a31aebbce9eaec3939d9d7..56437866d1206c163f36593e2764bfb6bfd96170 100644 (file)
@@ -265,8 +265,6 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
        if (skip)
                return 0;
 
-       phm_pre_display_configuration_changed(hwmgr);
-
        phm_display_configuration_changed(hwmgr);
 
        if (hwmgr->ps)
index 6c99cbf51c08fd035fa3da585c06b8b7074bdb29..b61a01f552840d39a0ce7113de3d6e035d84354f 100644 (file)
@@ -3588,9 +3588,12 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
                        break;
        }
 
-       if (i >= sclk_table->count)
-               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
-       else {
+       if (i >= sclk_table->count) {
+               if (sclk > sclk_table->dpm_levels[i-1].value) {
+                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+                       sclk_table->dpm_levels[i-1].value = sclk;
+               }
+       } else {
        /* TODO: Check SCLK in DAL's minimum clocks
         * in case DeepSleep divider update is required.
         */
@@ -3605,9 +3608,12 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons
                        break;
        }
 
-       if (i >= mclk_table->count)
-               data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
+       if (i >= mclk_table->count) {
+               if (mclk > mclk_table->dpm_levels[i-1].value) {
+                       data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+                       mclk_table->dpm_levels[i-1].value = mclk;
+               }
+       }
 
        if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
                data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
@@ -4523,12 +4529,12 @@ static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
        struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
        struct smu7_single_dpm_table *golden_sclk_table =
                        &(data->golden_dpm_table.sclk_table);
-       int value;
+       int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+       int golden_value = golden_sclk_table->dpm_levels
+                       [golden_sclk_table->count - 1].value;
 
-       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
-                       100 /
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
 
        return value;
 }
@@ -4565,12 +4571,12 @@ static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
        struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
        struct smu7_single_dpm_table *golden_mclk_table =
                        &(data->golden_dpm_table.mclk_table);
-       int value;
+        int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+       int golden_value = golden_mclk_table->dpm_levels
+                       [golden_mclk_table->count - 1].value;
 
-       value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
-                       100 /
-                       golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
 
        return value;
 }
index 4714b5b598255b1cad2790b76011f02e98afd704..101c09b212ade5690299c823f27aea1ae65cae4a 100644 (file)
@@ -713,20 +713,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
        for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
                table->WatermarkRow[1][i].MinClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
+                       1000));
                table->WatermarkRow[1][i].MaxClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
-                       100);
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
+                       1000));
                table->WatermarkRow[1][i].MinUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+                       1000));
                table->WatermarkRow[1][i].MaxUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+                       1000));
                table->WatermarkRow[1][i].WmSetting = (uint8_t)
                                wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
        }
@@ -734,20 +734,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
        for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
                table->WatermarkRow[0][i].MinClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
+                       1000));
                table->WatermarkRow[0][i].MaxClock =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
+                       1000));
                table->WatermarkRow[0][i].MinUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+                       1000));
                table->WatermarkRow[0][i].MaxUclk =
                        cpu_to_le16((uint16_t)
-                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
-                       1000);
+                       (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+                       1000));
                table->WatermarkRow[0][i].WmSetting = (uint8_t)
                                wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
        }
index 419a1d77d661e3708fded47180ce2b532592a087..79c86247d0ac0324f2282a3fc2ef46006cefc209 100644 (file)
@@ -1333,7 +1333,6 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
        if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
                hwmgr->platform_descriptor.overdriveLimit.memoryClock =
                                        dpm_table->dpm_levels[dpm_table->count-1].value;
-
        vega10_init_dpm_state(&(dpm_table->dpm_state));
 
        data->dpm_table.eclk_table.count = 0;
@@ -3249,6 +3248,41 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
 {
        struct vega10_hwmgr *data = hwmgr->backend;
+       const struct phm_set_power_state_input *states =
+                       (const struct phm_set_power_state_input *)input;
+       const struct vega10_power_state *vega10_ps =
+                       cast_const_phw_vega10_power_state(states->pnew_state);
+       struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
+       uint32_t sclk = vega10_ps->performance_levels
+                       [vega10_ps->performance_level_count - 1].gfx_clock;
+       struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
+       uint32_t mclk = vega10_ps->performance_levels
+                       [vega10_ps->performance_level_count - 1].mem_clock;
+       uint32_t i;
+
+       for (i = 0; i < sclk_table->count; i++) {
+               if (sclk == sclk_table->dpm_levels[i].value)
+                       break;
+       }
+
+       if (i >= sclk_table->count) {
+               if (sclk > sclk_table->dpm_levels[i-1].value) {
+                       data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+                       sclk_table->dpm_levels[i-1].value = sclk;
+               }
+       }
+
+       for (i = 0; i < mclk_table->count; i++) {
+               if (mclk == mclk_table->dpm_levels[i].value)
+                       break;
+       }
+
+       if (i >= mclk_table->count) {
+               if (mclk > mclk_table->dpm_levels[i-1].value) {
+                       data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+                       mclk_table->dpm_levels[i-1].value = mclk;
+               }
+       }
 
        if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
                data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
@@ -4492,15 +4526,13 @@ static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
        struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
        struct vega10_single_dpm_table *golden_sclk_table =
                        &(data->golden_dpm_table.gfx_table);
-       int value;
-
-       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
-                       golden_sclk_table->dpm_levels
-                       [golden_sclk_table->count - 1].value) *
-                       100 /
-                       golden_sclk_table->dpm_levels
+       int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+       int golden_value = golden_sclk_table->dpm_levels
                        [golden_sclk_table->count - 1].value;
 
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
+
        return value;
 }
 
@@ -4529,11 +4561,13 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
 
        if (vega10_ps->performance_levels
                        [vega10_ps->performance_level_count - 1].gfx_clock >
-                       hwmgr->platform_descriptor.overdriveLimit.engineClock)
+                       hwmgr->platform_descriptor.overdriveLimit.engineClock) {
                vega10_ps->performance_levels
                [vega10_ps->performance_level_count - 1].gfx_clock =
                                hwmgr->platform_descriptor.overdriveLimit.engineClock;
-
+               pr_warn("max sclk supported by vbios is %d\n",
+                               hwmgr->platform_descriptor.overdriveLimit.engineClock);
+       }
        return 0;
 }
 
@@ -4543,16 +4577,13 @@ static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
        struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
        struct vega10_single_dpm_table *golden_mclk_table =
                        &(data->golden_dpm_table.mem_table);
-       int value;
-
-       value = (mclk_table->dpm_levels
-                       [mclk_table->count - 1].value -
-                       golden_mclk_table->dpm_levels
-                       [golden_mclk_table->count - 1].value) *
-                       100 /
-                       golden_mclk_table->dpm_levels
+       int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+       int golden_value = golden_mclk_table->dpm_levels
                        [golden_mclk_table->count - 1].value;
 
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
+
        return value;
 }
 
@@ -4581,10 +4612,13 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
 
        if (vega10_ps->performance_levels
                        [vega10_ps->performance_level_count - 1].mem_clock >
-                       hwmgr->platform_descriptor.overdriveLimit.memoryClock)
+                       hwmgr->platform_descriptor.overdriveLimit.memoryClock) {
                vega10_ps->performance_levels
                [vega10_ps->performance_level_count - 1].mem_clock =
                                hwmgr->platform_descriptor.overdriveLimit.memoryClock;
+               pr_warn("max mclk supported by vbios is %d\n",
+                               hwmgr->platform_descriptor.overdriveLimit.memoryClock);
+       }
 
        return 0;
 }
index 9600e2f226e98e2be4d647d839bd98aef883de30..54364444ecd121dd611c30148f4e292a8ee18e1a 100644 (file)
@@ -2243,12 +2243,12 @@ static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr)
        struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
        struct vega12_single_dpm_table *golden_sclk_table =
                        &(data->golden_dpm_table.gfx_table);
-       int value;
+       int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+       int golden_value = golden_sclk_table->dpm_levels
+                       [golden_sclk_table->count - 1].value;
 
-       value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
-                       100 /
-                       golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
 
        return value;
 }
@@ -2264,16 +2264,13 @@ static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr)
        struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
        struct vega12_single_dpm_table *golden_mclk_table =
                        &(data->golden_dpm_table.mem_table);
-       int value;
-
-       value = (mclk_table->dpm_levels
-                       [mclk_table->count - 1].value -
-                       golden_mclk_table->dpm_levels
-                       [golden_mclk_table->count - 1].value) *
-                       100 /
-                       golden_mclk_table->dpm_levels
+       int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+       int golden_value = golden_mclk_table->dpm_levels
                        [golden_mclk_table->count - 1].value;
 
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
+
        return value;
 }
 
@@ -2356,6 +2353,13 @@ static int vega12_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable)
                return vega12_disable_gfx_off(hwmgr);
 }
 
+static int vega12_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+                               PHM_PerformanceLevelDesignation designation, uint32_t index,
+                               PHM_PerformanceLevel *level)
+{
+       return 0;
+}
+
 static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
        .backend_init = vega12_hwmgr_backend_init,
        .backend_fini = vega12_hwmgr_backend_fini,
@@ -2406,6 +2410,7 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
        .register_irq_handlers = smu9_register_irq_handlers,
        .start_thermal_controller = vega12_start_thermal_controller,
        .powergate_gfx = vega12_gfx_off_control,
+       .get_performance_level = vega12_get_performance_level,
 };
 
 int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
index b4dbbb7c334ce04c9760f0114825cbdc0ba5ee88..3367dd30cdd0d1c8c8482afb436885383a736103 100644 (file)
@@ -75,7 +75,17 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
        data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
        data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT;
 
-       data->registry_data.disallowed_features = 0x0;
+       /*
+        * Disable the following features for now:
+        *   GFXCLK DS
+        *   SOCLK DS
+        *   LCLK DS
+        *   DCEFCLK DS
+        *   FCLK DS
+        *   MP1CLK DS
+        *   MP0CLK DS
+        */
+       data->registry_data.disallowed_features = 0xE0041C00;
        data->registry_data.od_state_in_dc_support = 0;
        data->registry_data.thermal_support = 1;
        data->registry_data.skip_baco_hardware = 0;
@@ -120,6 +130,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
        data->registry_data.disable_auto_wattman = 1;
        data->registry_data.auto_wattman_debug = 0;
        data->registry_data.auto_wattman_sample_period = 100;
+       data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
        data->registry_data.auto_wattman_threshold = 50;
        data->registry_data.gfxoff_controlled_by_driver = 1;
        data->gfxoff_allowed = false;
@@ -829,6 +840,28 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
        return 0;
 }
 
+static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
+{
+       struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+
+       if (data->smu_features[GNLD_DPM_UCLK].enabled)
+               return smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_SetUclkFastSwitch,
+                       1);
+
+       return 0;
+}
+
+static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
+{
+       struct vega20_hwmgr *data =
+                       (struct vega20_hwmgr *)(hwmgr->backend);
+
+       return smum_send_msg_to_smc_with_parameter(hwmgr,
+                       PPSMC_MSG_SetFclkGfxClkRatio,
+                       data->registry_data.fclk_gfxclk_ratio);
+}
+
 static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
 {
        struct vega20_hwmgr *data =
@@ -1290,12 +1323,13 @@ static int vega20_get_sclk_od(
                        &(data->dpm_table.gfx_table);
        struct vega20_single_dpm_table *golden_sclk_table =
                        &(data->golden_dpm_table.gfx_table);
-       int value;
+       int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
+       int golden_value = golden_sclk_table->dpm_levels
+                       [golden_sclk_table->count - 1].value;
 
        /* od percentage */
-       value = DIV_ROUND_UP((sclk_table->dpm_levels[sclk_table->count - 1].value -
-               golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * 100,
-               golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value);
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
 
        return value;
 }
@@ -1335,12 +1369,13 @@ static int vega20_get_mclk_od(
                        &(data->dpm_table.mem_table);
        struct vega20_single_dpm_table *golden_mclk_table =
                        &(data->golden_dpm_table.mem_table);
-       int value;
+       int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
+       int golden_value = golden_mclk_table->dpm_levels
+                       [golden_mclk_table->count - 1].value;
 
        /* od percentage */
-       value = DIV_ROUND_UP((mclk_table->dpm_levels[mclk_table->count - 1].value -
-               golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * 100,
-               golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value);
+       value -= golden_value;
+       value = DIV_ROUND_UP(value * 100, golden_value);
 
        return value;
 }
@@ -1532,6 +1567,16 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
                        "[EnableDPMTasks] Failed to enable all smu features!",
                        return result);
 
+       result = vega20_notify_smc_display_change(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+                       "[EnableDPMTasks] Failed to notify smc display change!",
+                       return result);
+
+       result = vega20_send_clock_ratio(hwmgr);
+       PP_ASSERT_WITH_CODE(!result,
+                       "[EnableDPMTasks] Failed to send clock ratio!",
+                       return result);
+
        /* Initialize UVD/VCE powergating state */
        vega20_init_powergate_state(hwmgr);
 
@@ -1615,14 +1660,15 @@ static uint32_t vega20_find_highest_dpm_level(
        return i;
 }
 
-static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
+static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
 {
        struct vega20_hwmgr *data =
                        (struct vega20_hwmgr *)(hwmgr->backend);
        uint32_t min_freq;
        int ret = 0;
 
-       if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+       if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
+          (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
                min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
@@ -1631,7 +1677,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+       if (data->smu_features[GNLD_DPM_UCLK].enabled &&
+          (feature_mask & FEATURE_DPM_UCLK_MASK)) {
                min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
                                        hwmgr, PPSMC_MSG_SetSoftMinByFreq,
@@ -1647,7 +1694,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_UVD].enabled) {
+       if (data->smu_features[GNLD_DPM_UVD].enabled &&
+          (feature_mask & FEATURE_DPM_UVD_MASK)) {
                min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1665,7 +1713,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_VCE].enabled) {
+       if (data->smu_features[GNLD_DPM_VCE].enabled &&
+          (feature_mask & FEATURE_DPM_VCE_MASK)) {
                min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1675,7 +1724,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+       if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
+          (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
                min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1688,14 +1738,15 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
        return ret;
 }
 
-static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
+static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_mask)
 {
        struct vega20_hwmgr *data =
                        (struct vega20_hwmgr *)(hwmgr->backend);
        uint32_t max_freq;
        int ret = 0;
 
-       if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+       if (data->smu_features[GNLD_DPM_GFXCLK].enabled &&
+          (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
                max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1705,7 +1756,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+       if (data->smu_features[GNLD_DPM_UCLK].enabled &&
+          (feature_mask & FEATURE_DPM_UCLK_MASK)) {
                max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1715,7 +1767,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_UVD].enabled) {
+       if (data->smu_features[GNLD_DPM_UVD].enabled &&
+          (feature_mask & FEATURE_DPM_UVD_MASK)) {
                max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1732,7 +1785,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_VCE].enabled) {
+       if (data->smu_features[GNLD_DPM_VCE].enabled &&
+          (feature_mask & FEATURE_DPM_VCE_MASK)) {
                max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1742,7 +1796,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
                                        return ret);
        }
 
-       if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+       if (data->smu_features[GNLD_DPM_SOCCLK].enabled &&
+          (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
                max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
 
                PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
@@ -1875,38 +1930,20 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr,
        return ret;
 }
 
-static int vega20_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx_freq)
-{
-       uint32_t gfx_clk = 0;
-       int ret = 0;
-
-       *gfx_freq = 0;
-
-       PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16))) == 0,
-                       "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
-                       return ret);
-       gfx_clk = smum_get_argument(hwmgr);
-
-       *gfx_freq = gfx_clk * 100;
-
-       return 0;
-}
-
-static int vega20_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_freq)
+static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr,
+               PPCLK_e clk_id, uint32_t *clk_freq)
 {
-       uint32_t mem_clk = 0;
        int ret = 0;
 
-       *mclk_freq = 0;
+       *clk_freq = 0;
 
        PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16))) == 0,
-                       "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
+                       PPSMC_MSG_GetDpmClockFreq, (clk_id << 16))) == 0,
+                       "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!",
                        return ret);
-       mem_clk = smum_get_argument(hwmgr);
+       *clk_freq = smum_get_argument(hwmgr);
 
-       *mclk_freq = mem_clk * 100;
+       *clk_freq = *clk_freq * 100;
 
        return 0;
 }
@@ -1937,12 +1974,16 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
 
        switch (idx) {
        case AMDGPU_PP_SENSOR_GFX_SCLK:
-               ret = vega20_get_current_gfx_clk_freq(hwmgr, (uint32_t *)value);
+               ret = vega20_get_current_clk_freq(hwmgr,
+                               PPCLK_GFXCLK,
+                               (uint32_t *)value);
                if (!ret)
                        *size = 4;
                break;
        case AMDGPU_PP_SENSOR_GFX_MCLK:
-               ret = vega20_get_current_mclk_freq(hwmgr, (uint32_t *)value);
+               ret = vega20_get_current_clk_freq(hwmgr,
+                               PPCLK_UCLK,
+                               (uint32_t *)value);
                if (!ret)
                        *size = 4;
                break;
@@ -1986,19 +2027,6 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx,
        return ret;
 }
 
-static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr,
-               bool has_disp)
-{
-       struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
-
-       if (data->smu_features[GNLD_DPM_UCLK].enabled)
-               return smum_send_msg_to_smc_with_parameter(hwmgr,
-                       PPSMC_MSG_SetUclkFastSwitch,
-                       has_disp ? 1 : 0);
-
-       return 0;
-}
-
 int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
                struct pp_display_clock_request *clock_req)
 {
@@ -2012,7 +2040,6 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
        if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
                switch (clk_type) {
                case amd_pp_dcef_clock:
-                       clk_freq = clock_req->clock_freq_in_khz / 100;
                        clk_select = PPCLK_DCEFCLK;
                        break;
                case amd_pp_disp_clock:
@@ -2041,29 +2068,31 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
        return result;
 }
 
+static int vega20_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
+                               PHM_PerformanceLevelDesignation designation, uint32_t index,
+                               PHM_PerformanceLevel *level)
+{
+       return 0;
+}
+
 static int vega20_notify_smc_display_config_after_ps_adjustment(
                struct pp_hwmgr *hwmgr)
 {
        struct vega20_hwmgr *data =
                        (struct vega20_hwmgr *)(hwmgr->backend);
+       struct vega20_single_dpm_table *dpm_table =
+                       &data->dpm_table.mem_table;
        struct PP_Clocks min_clocks = {0};
        struct pp_display_clock_request clock_req;
        int ret = 0;
 
-       if ((hwmgr->display_config->num_display > 1) &&
-            !hwmgr->display_config->multi_monitor_in_sync &&
-            !hwmgr->display_config->nb_pstate_switch_disable)
-               vega20_notify_smc_display_change(hwmgr, false);
-       else
-               vega20_notify_smc_display_change(hwmgr, true);
-
        min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
        min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
        min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
 
        if (data->smu_features[GNLD_DPM_DCEFCLK].supported) {
                clock_req.clock_type = amd_pp_dcef_clock;
-               clock_req.clock_freq_in_khz = min_clocks.dcefClock;
+               clock_req.clock_freq_in_khz = min_clocks.dcefClock * 10;
                if (!vega20_display_clock_voltage_request(hwmgr, &clock_req)) {
                        if (data->smu_features[GNLD_DS_DCEFCLK].supported)
                                PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
@@ -2076,6 +2105,15 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
                }
        }
 
+       if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+               dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100;
+               PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_SetHardMinByFreq,
+                               (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+                               "[SetHardMinFreq] Set hard min uclk failed!",
+                               return ret);
+       }
+
        return 0;
 }
 
@@ -2098,12 +2136,12 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr)
                data->dpm_table.mem_table.dpm_state.soft_max_level =
                data->dpm_table.mem_table.dpm_levels[soft_level].value;
 
-       ret = vega20_upload_dpm_min_level(hwmgr);
+       ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload boot level to highest!",
                        return ret);
 
-       ret = vega20_upload_dpm_max_level(hwmgr);
+       ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload dpm max level to highest!",
                        return ret);
@@ -2130,12 +2168,12 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
                data->dpm_table.mem_table.dpm_state.soft_max_level =
                data->dpm_table.mem_table.dpm_levels[soft_level].value;
 
-       ret = vega20_upload_dpm_min_level(hwmgr);
+       ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload boot level to highest!",
                        return ret);
 
-       ret = vega20_upload_dpm_max_level(hwmgr);
+       ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload dpm max level to highest!",
                        return ret);
@@ -2148,12 +2186,12 @@ static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
 {
        int ret = 0;
 
-       ret = vega20_upload_dpm_min_level(hwmgr);
+       ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload DPM Bootup Levels!",
                        return ret);
 
-       ret = vega20_upload_dpm_max_level(hwmgr);
+       ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF);
        PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload DPM Max Levels!",
                        return ret);
@@ -2211,12 +2249,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
                data->dpm_table.gfx_table.dpm_state.soft_max_level =
                        data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
 
-               ret = vega20_upload_dpm_min_level(hwmgr);
+               ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
                PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload boot level to lowest!",
                        return ret);
 
-               ret = vega20_upload_dpm_max_level(hwmgr);
+               ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK);
                PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload dpm max level to highest!",
                        return ret);
@@ -2231,12 +2269,12 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
                data->dpm_table.mem_table.dpm_state.soft_max_level =
                        data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
 
-               ret = vega20_upload_dpm_min_level(hwmgr);
+               ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_UCLK_MASK);
                PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload boot level to lowest!",
                        return ret);
 
-               ret = vega20_upload_dpm_max_level(hwmgr);
+               ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_UCLK_MASK);
                PP_ASSERT_WITH_CODE(!ret,
                        "Failed to upload dpm max level to highest!",
                        return ret);
@@ -2353,7 +2391,7 @@ static int vega20_get_sclks(struct pp_hwmgr *hwmgr,
 
        for (i = 0; i < count; i++) {
                clocks->data[i].clocks_in_khz =
-                       dpm_table->dpm_levels[i].value * 100;
+                       dpm_table->dpm_levels[i].value * 1000;
                clocks->data[i].latency_in_us = 0;
        }
 
@@ -2383,7 +2421,7 @@ static int vega20_get_memclocks(struct pp_hwmgr *hwmgr,
        for (i = 0; i < count; i++) {
                clocks->data[i].clocks_in_khz =
                        data->mclk_latency_table.entries[i].frequency =
-                       dpm_table->dpm_levels[i].value * 100;
+                       dpm_table->dpm_levels[i].value * 1000;
                clocks->data[i].latency_in_us =
                        data->mclk_latency_table.entries[i].latency =
                        vega20_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
@@ -2408,7 +2446,7 @@ static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr,
 
        for (i = 0; i < count; i++) {
                clocks->data[i].clocks_in_khz =
-                       dpm_table->dpm_levels[i].value * 100;
+                       dpm_table->dpm_levels[i].value * 1000;
                clocks->data[i].latency_in_us = 0;
        }
 
@@ -2431,7 +2469,7 @@ static int vega20_get_socclocks(struct pp_hwmgr *hwmgr,
 
        for (i = 0; i < count; i++) {
                clocks->data[i].clocks_in_khz =
-                       dpm_table->dpm_levels[i].value * 100;
+                       dpm_table->dpm_levels[i].value * 1000;
                clocks->data[i].latency_in_us = 0;
        }
 
@@ -2582,11 +2620,11 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
                                return -EINVAL;
                        }
 
-                       if (input_clk < clocks.data[0].clocks_in_khz / 100 ||
+                       if (input_clk < clocks.data[0].clocks_in_khz / 1000 ||
                            input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) {
                                pr_info("clock freq %d is not within allowed range [%d - %d]\n",
                                        input_clk,
-                                       clocks.data[0].clocks_in_khz / 100,
+                                       clocks.data[0].clocks_in_khz / 1000,
                                        od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
                                return -EINVAL;
                        }
@@ -2726,7 +2764,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 
        switch (type) {
        case PP_SCLK:
-               ret = vega20_get_current_gfx_clk_freq(hwmgr, &now);
+               ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
                PP_ASSERT_WITH_CODE(!ret,
                                "Attempt to get current gfx clk Failed!",
                                return ret);
@@ -2738,12 +2776,12 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 
                for (i = 0; i < clocks.num_levels; i++)
                        size += sprintf(buf + size, "%d: %uMhz %s\n",
-                               i, clocks.data[i].clocks_in_khz / 100,
+                               i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz == now) ? "*" : "");
                break;
 
        case PP_MCLK:
-               ret = vega20_get_current_mclk_freq(hwmgr, &now);
+               ret = vega20_get_current_clk_freq(hwmgr, PPCLK_UCLK, &now);
                PP_ASSERT_WITH_CODE(!ret,
                                "Attempt to get current mclk freq Failed!",
                                return ret);
@@ -2755,7 +2793,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
 
                for (i = 0; i < clocks.num_levels; i++)
                        size += sprintf(buf + size, "%d: %uMhz %s\n",
-                               i, clocks.data[i].clocks_in_khz / 100,
+                               i, clocks.data[i].clocks_in_khz / 1000,
                                (clocks.data[i].clocks_in_khz == now) ? "*" : "");
                break;
 
@@ -2820,7 +2858,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
                                        return ret);
 
                        size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
-                               clocks.data[0].clocks_in_khz / 100,
+                               clocks.data[0].clocks_in_khz / 1000,
                                od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
                }
 
@@ -3476,6 +3514,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = {
                vega20_set_watermarks_for_clocks_ranges,
        .display_clock_voltage_request =
                vega20_display_clock_voltage_request,
+       .get_performance_level =
+               vega20_get_performance_level,
        /* UMD pstate, profile related */
        .force_dpm_level =
                vega20_dpm_force_dpm_level,
index 56fe6a0d42e804f956846faad473071335b3b887..25faaa5c5b10cbc5fcd720d7b15546a826641682 100644 (file)
@@ -328,6 +328,7 @@ struct vega20_registry_data {
        uint8_t   disable_auto_wattman;
        uint32_t  auto_wattman_debug;
        uint32_t  auto_wattman_sample_period;
+       uint32_t  fclk_gfxclk_ratio;
        uint8_t   auto_wattman_threshold;
        uint8_t   log_avfs_param;
        uint8_t   enable_enginess;
index e5f7f82300659f18999a29442166442be071010a..97f8a1a970c37e124c8e5b07727f7ce6e32e8849 100644 (file)
@@ -642,8 +642,14 @@ static int check_powerplay_tables(
                "Unsupported PPTable format!", return -1);
        PP_ASSERT_WITH_CODE(powerplay_table->sHeader.structuresize > 0,
                "Invalid PowerPlay Table!", return -1);
-       PP_ASSERT_WITH_CODE(powerplay_table->smcPPTable.Version == PPTABLE_V20_SMU_VERSION,
-               "Unmatch PPTable version, vbios update may be needed!", return -1);
+
+       if (powerplay_table->smcPPTable.Version != PPTABLE_V20_SMU_VERSION) {
+               pr_info("Unmatch PPTable version: "
+                       "pptable from VBIOS is V%d while driver supported is V%d!",
+                       powerplay_table->smcPPTable.Version,
+                       PPTABLE_V20_SMU_VERSION);
+               return -EINVAL;
+       }
 
        //dump_pptable(&powerplay_table->smcPPTable);
 
@@ -716,10 +722,6 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
                "[appendVbiosPPTable] Failed to retrieve Smc Dpm Table from VBIOS!",
                return -1);
 
-       memset(ppsmc_pptable->Padding32,
-                       0,
-                       sizeof(struct atom_smc_dpm_info_v4_4) -
-                       sizeof(struct atom_common_table_header));
        ppsmc_pptable->MaxVoltageStepGfx = smc_dpm_table->maxvoltagestepgfx;
        ppsmc_pptable->MaxVoltageStepSoc = smc_dpm_table->maxvoltagestepsoc;
 
@@ -778,22 +780,19 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
        ppsmc_pptable->FllGfxclkSpreadPercent = smc_dpm_table->fllgfxclkspreadpercent;
        ppsmc_pptable->FllGfxclkSpreadFreq = smc_dpm_table->fllgfxclkspreadfreq;
 
-       if ((smc_dpm_table->table_header.format_revision == 4) &&
-           (smc_dpm_table->table_header.content_revision == 4)) {
-               for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
-                       ppsmc_pptable->I2cControllers[i].Enabled =
-                               smc_dpm_table->i2ccontrollers[i].enabled;
-                       ppsmc_pptable->I2cControllers[i].SlaveAddress =
-                               smc_dpm_table->i2ccontrollers[i].slaveaddress;
-                       ppsmc_pptable->I2cControllers[i].ControllerPort =
-                               smc_dpm_table->i2ccontrollers[i].controllerport;
-                       ppsmc_pptable->I2cControllers[i].ThermalThrottler =
-                               smc_dpm_table->i2ccontrollers[i].thermalthrottler;
-                       ppsmc_pptable->I2cControllers[i].I2cProtocol =
-                               smc_dpm_table->i2ccontrollers[i].i2cprotocol;
-                       ppsmc_pptable->I2cControllers[i].I2cSpeed =
-                               smc_dpm_table->i2ccontrollers[i].i2cspeed;
-               }
+       for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
+               ppsmc_pptable->I2cControllers[i].Enabled =
+                       smc_dpm_table->i2ccontrollers[i].enabled;
+               ppsmc_pptable->I2cControllers[i].SlaveAddress =
+                       smc_dpm_table->i2ccontrollers[i].slaveaddress;
+               ppsmc_pptable->I2cControllers[i].ControllerPort =
+                       smc_dpm_table->i2ccontrollers[i].controllerport;
+               ppsmc_pptable->I2cControllers[i].ThermalThrottler =
+                       smc_dpm_table->i2ccontrollers[i].thermalthrottler;
+               ppsmc_pptable->I2cControllers[i].I2cProtocol =
+                       smc_dpm_table->i2ccontrollers[i].i2cprotocol;
+               ppsmc_pptable->I2cControllers[i].I2cSpeed =
+                       smc_dpm_table->i2ccontrollers[i].i2cspeed;
        }
 
        return 0;
@@ -882,15 +881,10 @@ static int init_powerplay_table_information(
        if (pptable_information->smc_pptable == NULL)
                return -ENOMEM;
 
-       if (powerplay_table->smcPPTable.Version <= 2)
-               memcpy(pptable_information->smc_pptable,
-                               &(powerplay_table->smcPPTable),
-                               sizeof(PPTable_t) -
-                               sizeof(I2cControllerConfig_t) * I2C_CONTROLLER_NAME_COUNT);
-       else
-               memcpy(pptable_information->smc_pptable,
-                               &(powerplay_table->smcPPTable),
-                               sizeof(PPTable_t));
+       memcpy(pptable_information->smc_pptable,
+                       &(powerplay_table->smcPPTable),
+                       sizeof(PPTable_t));
+
 
        result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
 
index 2998a49960ede1d47975675afa8c1a290d0461c9..63d5cf69154967b90aa696de2ae5c1d407bd579f 100644 (file)
@@ -29,7 +29,7 @@
 // any structure is changed in this file
 #define SMU11_DRIVER_IF_VERSION 0x12
 
-#define PPTABLE_V20_SMU_VERSION 2
+#define PPTABLE_V20_SMU_VERSION 3
 
 #define NUM_GFXCLK_DPM_LEVELS  16
 #define NUM_VCLK_DPM_LEVELS    8
index 45d64a81e94539fe403087cd2827b2992e59edca..4f63a736ea0e7371b6f09b26ea8cc55ec6b9bdd0 100644 (file)
 #define PPSMC_MSG_SetSystemVirtualDramAddrHigh   0x4B
 #define PPSMC_MSG_SetSystemVirtualDramAddrLow    0x4C
 #define PPSMC_MSG_WaflTest                       0x4D
-// Unused ID 0x4E to 0x50
+#define PPSMC_MSG_SetFclkGfxClkRatio             0x4E
+// Unused ID 0x4F to 0x50
 #define PPSMC_MSG_AllowGfxOff                    0x51
 #define PPSMC_MSG_DisallowGfxOff                 0x52
 #define PPSMC_MSG_GetPptLimit                    0x53
index f836d30fdd4428b166591676f69f588f43f852fb..09b844ec3eabae4f09f8c0d10ed84d53fcc75ee2 100644 (file)
@@ -71,7 +71,11 @@ static int smu8_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
        result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
                                        SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
        if (result != 0) {
+               /* Read the last message to SMU, to report actual cause */
+               uint32_t val = cgs_read_register(hwmgr->device,
+                                                mmSMU_MP1_SRBM2P_MSG_0);
                pr_err("smu8_send_msg_to_smc_async (0x%04x) failed\n", msg);
+               pr_err("SMU still servicing msg (0x%04x)\n", val);
                return result;
        }
 
index 69dab82a37714853b5dfdb74dbe479f7d6c10fb1..bf589c53b908d66789679df6f4098c883150fa87 100644 (file)
@@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = {
 
 MODULE_DEVICE_TABLE(pci, pciidlist);
 
+static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+       struct apertures_struct *ap;
+       bool primary = false;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return;
+
+       ap->ranges[0].base = pci_resource_start(pdev, 0);
+       ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+       primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+       drm_fb_helper_remove_conflicting_framebuffers(ap, "astdrmfb", primary);
+       kfree(ap);
+}
+
 static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
+       ast_kick_out_firmware_fb(pdev);
+
        return drm_get_pci_dev(pdev, ent, &driver);
 }
 
index 0cd827e11fa20d8af7f038cecb8ea12465014523..de26df0c6044de127422999c669eeb5d68304a66 100644 (file)
@@ -263,6 +263,7 @@ static void ast_fbdev_destroy(struct drm_device *dev,
 {
        struct ast_framebuffer *afb = &afbdev->afb;
 
+       drm_crtc_force_disable_all(dev);
        drm_fb_helper_unregister_fbi(&afbdev->helper);
 
        if (afb->obj) {
index dac355812adcbdcea7d68a40ff44bca90b0e0dcf..373700c05a00f9f3890d48fee79317edf77cedd8 100644 (file)
@@ -583,7 +583,8 @@ void ast_driver_unload(struct drm_device *dev)
        drm_mode_config_cleanup(dev);
 
        ast_mm_fini(ast);
-       pci_iounmap(dev->pdev, ast->ioregs);
+       if (ast->ioregs != ast->regs + AST_IO_MM_OFFSET)
+               pci_iounmap(dev->pdev, ast->ioregs);
        pci_iounmap(dev->pdev, ast->regs);
        kfree(ast);
 }
index 5e77d456d9bb9434040107a69536815a270c7865..8bb355d5d43d80169fbfeb73954008b772616e15 100644 (file)
@@ -568,6 +568,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
        }
        ast_bo_unreserve(bo);
 
+       ast_set_offset_reg(crtc);
        ast_set_start_address_crt1(crtc, (u32)gpu_addr);
 
        return 0;
@@ -972,9 +973,21 @@ static int get_clock(void *i2c_priv)
 {
        struct ast_i2c_chan *i2c = i2c_priv;
        struct ast_private *ast = i2c->dev->dev_private;
-       uint32_t val;
+       uint32_t val, val2, count, pass;
+
+       count = 0;
+       pass = 0;
+       val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+       do {
+               val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+               if (val == val2) {
+                       pass++;
+               } else {
+                       pass = 0;
+                       val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+               }
+       } while ((pass < 5) && (count++ < 0x10000));
 
-       val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
        return val & 1 ? 1 : 0;
 }
 
@@ -982,9 +995,21 @@ static int get_data(void *i2c_priv)
 {
        struct ast_i2c_chan *i2c = i2c_priv;
        struct ast_private *ast = i2c->dev->dev_private;
-       uint32_t val;
+       uint32_t val, val2, count, pass;
+
+       count = 0;
+       pass = 0;
+       val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+       do {
+               val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+               if (val == val2) {
+                       pass++;
+               } else {
+                       pass = 0;
+                       val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+               }
+       } while ((pass < 5) && (count++ < 0x10000));
 
-       val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
        return val & 1 ? 1 : 0;
 }
 
@@ -997,7 +1022,7 @@ static void set_clock(void *i2c_priv, int clock)
 
        for (i = 0; i < 0x10000; i++) {
                ujcrb7 = ((clock & 0x01) ? 0 : 1);
-               ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
+               ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
                jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
                if (ujcrb7 == jtemp)
                        break;
@@ -1013,7 +1038,7 @@ static void set_data(void *i2c_priv, int data)
 
        for (i = 0; i < 0x10000; i++) {
                ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
-               ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
+               ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
                jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
                if (ujcrb7 == jtemp)
                        break;
@@ -1254,7 +1279,7 @@ static int ast_cursor_move(struct drm_crtc *crtc,
        ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
 
        /* dummy write to fire HWC */
-       ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
+       ast_show_cursor(crtc);
 
        return 0;
 }
index f8a931cf3665e8bac6a02017760c33d790d530a0..10243965ee7c0219737cb6a4e3fa85b728cd1fe4 100644 (file)
@@ -54,7 +54,7 @@
 #define SN_AUX_ADDR_7_0_REG                    0x76
 #define SN_AUX_LENGTH_REG                      0x77
 #define SN_AUX_CMD_REG                         0x78
-#define  AUX_CMD_SEND                          BIT(1)
+#define  AUX_CMD_SEND                          BIT(0)
 #define  AUX_CMD_REQ(x)                                ((x) << 4)
 #define SN_AUX_RDATA_REG(x)                    (0x79 + (x))
 #define SN_SSC_CONFIG_REG                      0x93
@@ -458,18 +458,6 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
        unsigned int val;
        int ret;
 
-       /*
-        * FIXME:
-        * This 70ms was found necessary by experimentation. If it's not
-        * present, link training fails. It seems like it can go anywhere from
-        * pre_enable() up to semi-auto link training initiation below.
-        *
-        * Neither the datasheet for the bridge nor the panel tested mention a
-        * delay of this magnitude in the timing requirements. So for now, add
-        * the mystery delay until someone figures out a better fix.
-        */
-       msleep(70);
-
        /* DSI_A lane config */
        val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
        regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
@@ -536,7 +524,22 @@ static void ti_sn_bridge_pre_enable(struct drm_bridge *bridge)
        /* configure bridge ref_clk */
        ti_sn_bridge_set_refclk_freq(pdata);
 
-       /* in case drm_panel is connected then HPD is not supported */
+       /*
+        * HPD on this bridge chip is a bit useless.  This is an eDP bridge
+        * so the HPD is an internal signal that's only there to signal that
+        * the panel is done powering up.  ...but the bridge chip debounces
+        * this signal by between 100 ms and 400 ms (depending on process,
+        * voltage, and temperate--I measured it at about 200 ms).  One
+        * particular panel asserted HPD 84 ms after it was powered on meaning
+        * that we saw HPD 284 ms after power on.  ...but the same panel said
+        * that instead of looking at HPD you could just hardcode a delay of
+        * 200 ms.  We'll assume that the panel driver will have the hardcoded
+        * delay in its prepare and always disable HPD.
+        *
+        * If HPD somehow makes sense on some future panel we'll have to
+        * change this to be conditional on someone specifying that HPD should
+        * be used.
+        */
        regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
                           HPD_DISABLE);
 
index 701cb334e1ea314917488b6ecde68402fb462d87..d8b526b7932c3db06e1ff7cad3f2485ddfe52cee 100644 (file)
@@ -308,6 +308,26 @@ update_connector_routing(struct drm_atomic_state *state,
                return 0;
        }
 
+       crtc_state = drm_atomic_get_new_crtc_state(state,
+                                                  new_connector_state->crtc);
+       /*
+        * For compatibility with legacy users, we want to make sure that
+        * we allow DPMS On->Off modesets on unregistered connectors. Modesets
+        * which would result in anything else must be considered invalid, to
+        * avoid turning on new displays on dead connectors.
+        *
+        * Since the connector can be unregistered at any point during an
+        * atomic check or commit, this is racy. But that's OK: all we care
+        * about is ensuring that userspace can't do anything but shut off the
+        * display on a connector that was destroyed after its been notified,
+        * not before.
+        */
+       if (drm_connector_is_unregistered(connector) && crtc_state->active) {
+               DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
+                                connector->base.id, connector->name);
+               return -EINVAL;
+       }
+
        funcs = connector->helper_private;
 
        if (funcs->atomic_best_encoder)
@@ -352,7 +372,6 @@ update_connector_routing(struct drm_atomic_state *state,
 
        set_best_encoder(state, new_connector_state, new_encoder);
 
-       crtc_state = drm_atomic_get_new_crtc_state(state, new_connector_state->crtc);
        crtc_state->connectors_changed = true;
 
        DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d:%s]\n",
index d9c0f75739054d88f9533a47684ec4c6f9fb30bf..1669c42c40ed3537cb9240de83a1128e38b816c5 100644 (file)
@@ -142,6 +142,7 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
 
        lockdep_assert_held_once(&dev->master_mutex);
 
+       WARN_ON(fpriv->is_master);
        old_master = fpriv->master;
        fpriv->master = drm_master_create(dev);
        if (!fpriv->master) {
@@ -170,6 +171,7 @@ out_err:
        /* drop references and restore old master on failure */
        drm_master_put(&fpriv->master);
        fpriv->master = old_master;
+       fpriv->is_master = 0;
 
        return ret;
 }
index 1e40e5decbe91a4f709305d5259b5b7c0425e617..4943cef178beb7675ab46a0d3100e33ae836bd0e 100644 (file)
@@ -379,7 +379,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
        /* The connector should have been removed from userspace long before
         * it is finally destroyed.
         */
-       if (WARN_ON(connector->registered))
+       if (WARN_ON(connector->registration_state ==
+                   DRM_CONNECTOR_REGISTERED))
                drm_connector_unregister(connector);
 
        if (connector->tile_group) {
@@ -436,7 +437,7 @@ int drm_connector_register(struct drm_connector *connector)
                return 0;
 
        mutex_lock(&connector->mutex);
-       if (connector->registered)
+       if (connector->registration_state != DRM_CONNECTOR_INITIALIZING)
                goto unlock;
 
        ret = drm_sysfs_connector_add(connector);
@@ -456,7 +457,7 @@ int drm_connector_register(struct drm_connector *connector)
 
        drm_mode_object_register(connector->dev, &connector->base);
 
-       connector->registered = true;
+       connector->registration_state = DRM_CONNECTOR_REGISTERED;
        goto unlock;
 
 err_debugfs:
@@ -478,7 +479,7 @@ EXPORT_SYMBOL(drm_connector_register);
 void drm_connector_unregister(struct drm_connector *connector)
 {
        mutex_lock(&connector->mutex);
-       if (!connector->registered) {
+       if (connector->registration_state != DRM_CONNECTOR_REGISTERED) {
                mutex_unlock(&connector->mutex);
                return;
        }
@@ -489,7 +490,7 @@ void drm_connector_unregister(struct drm_connector *connector)
        drm_sysfs_connector_remove(connector);
        drm_debugfs_connector_remove(connector);
 
-       connector->registered = false;
+       connector->registration_state = DRM_CONNECTOR_UNREGISTERED;
        mutex_unlock(&connector->mutex);
 }
 EXPORT_SYMBOL(drm_connector_unregister);
index 5ff1d79b86c4a532917e819cfed3c4c702a3ea07..0e0df398222d1e0220b4388d0513bf73f66164ae 100644 (file)
@@ -1275,6 +1275,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
        mutex_lock(&mgr->lock);
        mstb = mgr->mst_primary;
 
+       if (!mstb)
+               goto out;
+
        for (i = 0; i < lct - 1; i++) {
                int shift = (i % 2) ? 0 : 4;
                int port_num = (rad[i / 2] >> shift) & 0xf;
index ff0bfc65a8c1dbbbbe99ac77aeb2e122cdaf3026..b506e3622b08f64d5e1bb6d9ece2c716580883a8 100644 (file)
@@ -122,6 +122,9 @@ static const struct edid_quirk {
        /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
        { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
 
+       /* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
+       { "BOE", 0x0771, EDID_QUIRK_FORCE_6BPC },
+
        /* Belinea 10 15 55 */
        { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
        { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
index a502f3e519fdb668c685f392d518361e1e0a1ef2..9d64f874f965be1a74970997f3be5ec07df2139c 100644 (file)
@@ -71,7 +71,7 @@ MODULE_PARM_DESC(drm_fbdev_overalloc,
 #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
 static bool drm_leak_fbdev_smem = false;
 module_param_unsafe(drm_leak_fbdev_smem, bool, 0600);
-MODULE_PARM_DESC(fbdev_emulation,
+MODULE_PARM_DESC(drm_leak_fbdev_smem,
                 "Allow unsafe leaking fbdev physical smem address [default=false]");
 #endif
 
@@ -219,6 +219,9 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
        mutex_lock(&fb_helper->lock);
        drm_connector_list_iter_begin(dev, &conn_iter);
        drm_for_each_connector_iter(connector, &conn_iter) {
+               if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+                       continue;
+
                ret = __drm_fb_helper_add_one_connector(fb_helper, connector);
                if (ret)
                        goto fail;
index 90a1c846fc25aada95b51178060a17d20aa5ed47..8aaa5e86a979ce0985c822201643768200e4a2e3 100644 (file)
@@ -97,9 +97,9 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
 
 /**
  * drm_driver_legacy_fb_format - compute drm fourcc code from legacy description
+ * @dev: DRM device
  * @bpp: bits per pixels
  * @depth: bit depth per pixel
- * @native: use host native byte order
  *
  * Computes a drm fourcc pixel format code for the given @bpp/@depth values.
  * Unlike drm_mode_legacy_fb_format() this looks at the drivers mode_config,
index 0c4eb4a9ab31f79efff7d6902c542d9a6f69ddd9..51e06defc8d8a0ea7fd2eb770a85dbce94d2b3b7 100644 (file)
@@ -104,6 +104,8 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
 int drm_sysfs_connector_add(struct drm_connector *connector);
 void drm_sysfs_connector_remove(struct drm_connector *connector);
 
+void drm_sysfs_lease_event(struct drm_device *dev);
+
 /* drm_gem.c */
 int drm_gem_init(struct drm_device *dev);
 void drm_gem_destroy(struct drm_device *dev);
index 24a177ea54176d91b289da8471cbffce3d4ee120..c61680ad962d9ef3189b476fbec507eac8a2a459 100644 (file)
@@ -296,7 +296,7 @@ void drm_lease_destroy(struct drm_master *master)
 
        if (master->lessor) {
                /* Tell the master to check the lessee list */
-               drm_sysfs_hotplug_event(dev);
+               drm_sysfs_lease_event(dev);
                drm_master_put(&master->lessor);
        }
 
index b3c1daad1169b806271691c7e373900ef6b27e5e..ecb7b33002bb27de0af599702a354e7c241cd6ed 100644 (file)
@@ -301,6 +301,16 @@ void drm_sysfs_connector_remove(struct drm_connector *connector)
        connector->kdev = NULL;
 }
 
+void drm_sysfs_lease_event(struct drm_device *dev)
+{
+       char *event_string = "LEASE=1";
+       char *envp[] = { event_string, NULL };
+
+       DRM_DEBUG("generating lease event\n");
+
+       kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
+}
+
 /**
  * drm_sysfs_hotplug_event - generate a DRM uevent
  * @dev: DRM device
index e7c3ed6c9a2e10ddcd7665e851a1bffb9ff0247f..9b476368aa313efd7c33945aeadd2b4c481c70d2 100644 (file)
@@ -93,7 +93,7 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
         * If the GPU managed to complete this jobs fence, the timout is
         * spurious. Bail out.
         */
-       if (fence_completed(gpu, submit->out_fence->seqno))
+       if (dma_fence_is_signaled(submit->out_fence))
                return;
 
        /*
index 94529aa8233922b71cc36011fff305280651be53..aef487dd873153d77fd602726ab6bd92256ab593 100644 (file)
@@ -164,13 +164,6 @@ static u32 decon_get_frame_count(struct decon_context *ctx, bool end)
        return frm;
 }
 
-static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc)
-{
-       struct decon_context *ctx = crtc->ctx;
-
-       return decon_get_frame_count(ctx, false);
-}
-
 static void decon_setup_trigger(struct decon_context *ctx)
 {
        if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG))
@@ -536,7 +529,6 @@ static const struct exynos_drm_crtc_ops decon_crtc_ops = {
        .disable                = decon_disable,
        .enable_vblank          = decon_enable_vblank,
        .disable_vblank         = decon_disable_vblank,
-       .get_vblank_counter     = decon_get_vblank_counter,
        .atomic_begin           = decon_atomic_begin,
        .update_plane           = decon_update_plane,
        .disable_plane          = decon_disable_plane,
@@ -554,7 +546,6 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
        int ret;
 
        ctx->drm_dev = drm_dev;
-       drm_dev->max_vblank_count = 0xffffffff;
 
        for (win = ctx->first_win; win < WINDOWS_NR; win++) {
                ctx->configs[win].pixel_formats = decon_formats;
index eea90251808fa2e58398fdcb1cac01d160307320..2696289ecc78f204fb504f24c4f897694acb41df 100644 (file)
@@ -162,16 +162,6 @@ static void exynos_drm_crtc_disable_vblank(struct drm_crtc *crtc)
                exynos_crtc->ops->disable_vblank(exynos_crtc);
 }
 
-static u32 exynos_drm_crtc_get_vblank_counter(struct drm_crtc *crtc)
-{
-       struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
-       if (exynos_crtc->ops->get_vblank_counter)
-               return exynos_crtc->ops->get_vblank_counter(exynos_crtc);
-
-       return 0;
-}
-
 static const struct drm_crtc_funcs exynos_crtc_funcs = {
        .set_config     = drm_atomic_helper_set_config,
        .page_flip      = drm_atomic_helper_page_flip,
@@ -181,7 +171,6 @@ static const struct drm_crtc_funcs exynos_crtc_funcs = {
        .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
        .enable_vblank = exynos_drm_crtc_enable_vblank,
        .disable_vblank = exynos_drm_crtc_disable_vblank,
-       .get_vblank_counter = exynos_drm_crtc_get_vblank_counter,
 };
 
 struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
index ec9604f1272b50d12b44a9f759693dc4a8eaebf6..5e61e707f95555da181969aa71254fc71ffa4098 100644 (file)
@@ -135,7 +135,6 @@ struct exynos_drm_crtc_ops {
        void (*disable)(struct exynos_drm_crtc *crtc);
        int (*enable_vblank)(struct exynos_drm_crtc *crtc);
        void (*disable_vblank)(struct exynos_drm_crtc *crtc);
-       u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
        enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
                const struct drm_display_mode *mode);
        bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
index 07af7758066db47c866a86a2be8fdfe5386421a5..d81e62ae286aea79d39757ecb2233608b905d75f 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <drm/drmP.h>
 #include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
 #include <drm/drm_mipi_dsi.h>
 #include <drm/drm_panel.h>
 #include <drm/drm_atomic_helper.h>
@@ -1474,12 +1475,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
 {
        struct exynos_dsi *dsi = encoder_to_dsi(encoder);
        struct drm_connector *connector = &dsi->connector;
+       struct drm_device *drm = encoder->dev;
        int ret;
 
        connector->polled = DRM_CONNECTOR_POLL_HPD;
 
-       ret = drm_connector_init(encoder->dev, connector,
-                                &exynos_dsi_connector_funcs,
+       ret = drm_connector_init(drm, connector, &exynos_dsi_connector_funcs,
                                 DRM_MODE_CONNECTOR_DSI);
        if (ret) {
                DRM_ERROR("Failed to initialize connector with drm\n");
@@ -1489,7 +1490,12 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
        connector->status = connector_status_disconnected;
        drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
        drm_connector_attach_encoder(connector, encoder);
+       if (!drm->registered)
+               return 0;
 
+       connector->funcs->reset(connector);
+       drm_fb_helper_add_one_connector(drm->fb_helper, connector);
+       drm_connector_register(connector);
        return 0;
 }
 
@@ -1527,7 +1533,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
                }
 
                dsi->panel = of_drm_find_panel(device->dev.of_node);
-               if (dsi->panel) {
+               if (IS_ERR(dsi->panel)) {
+                       dsi->panel = NULL;
+               } else {
                        drm_panel_attach(dsi->panel, &dsi->connector);
                        dsi->connector.status = connector_status_connected;
                }
index 918dd2c822098444c6708761baded3ef95420025..01d182289efa38fd75a83cd399b02bbfd1e590c0 100644 (file)
@@ -192,7 +192,7 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
        struct drm_fb_helper *helper;
        int ret;
 
-       if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
+       if (!dev->mode_config.num_crtc)
                return 0;
 
        fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
index fe754022e356b033c2fa62af06c28f3612197df5..359d37d5c958c6b258053ba62804762f75d00087 100644 (file)
@@ -61,10 +61,12 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
        }
 
        mutex_lock(&dev_priv->drm.struct_mutex);
+       mmio_hw_access_pre(dev_priv);
        ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
                                  size, I915_GTT_PAGE_SIZE,
                                  I915_COLOR_UNEVICTABLE,
                                  start, end, flags);
+       mmio_hw_access_post(dev_priv);
        mutex_unlock(&dev_priv->drm.struct_mutex);
        if (ret)
                gvt_err("fail to alloc %s gm space from host\n",
index 2402395a068da2fc5e83ba988aa76afccfe09f03..c7103dd2d8d571fde462f173dcc67efc0973cc69 100644 (file)
@@ -1905,7 +1905,6 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
                vgpu_free_mm(mm);
                return ERR_PTR(-ENOMEM);
        }
-       mm->ggtt_mm.last_partial_off = -1UL;
 
        return mm;
 }
@@ -1930,7 +1929,6 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
                invalidate_ppgtt_mm(mm);
        } else {
                vfree(mm->ggtt_mm.virtual_ggtt);
-               mm->ggtt_mm.last_partial_off = -1UL;
        }
 
        vgpu_free_mm(mm);
@@ -2168,6 +2166,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
        struct intel_gvt_gtt_entry e, m;
        dma_addr_t dma_addr;
        int ret;
+       struct intel_gvt_partial_pte *partial_pte, *pos, *n;
+       bool partial_update = false;
 
        if (bytes != 4 && bytes != 8)
                return -EINVAL;
@@ -2178,68 +2178,57 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
        if (!vgpu_gmadr_is_valid(vgpu, gma))
                return 0;
 
-       ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
-
+       e.type = GTT_TYPE_GGTT_PTE;
        memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
                        bytes);
 
        /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
-        * write, we assume the two 4 bytes writes are consecutive.
-        * Otherwise, we abort and report error
+        * write, save the first 4 bytes in a list and update virtual
+        * PTE. Only update shadow PTE when the second 4 bytes comes.
         */
        if (bytes < info->gtt_entry_size) {
-               if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
-                       /* the first partial part*/
-                       ggtt_mm->ggtt_mm.last_partial_off = off;
-                       ggtt_mm->ggtt_mm.last_partial_data = e.val64;
-                       return 0;
-               } else if ((g_gtt_index ==
-                               (ggtt_mm->ggtt_mm.last_partial_off >>
-                               info->gtt_entry_size_shift)) &&
-                       (off != ggtt_mm->ggtt_mm.last_partial_off)) {
-                       /* the second partial part */
-
-                       int last_off = ggtt_mm->ggtt_mm.last_partial_off &
-                               (info->gtt_entry_size - 1);
-
-                       memcpy((void *)&e.val64 + last_off,
-                               (void *)&ggtt_mm->ggtt_mm.last_partial_data +
-                               last_off, bytes);
-
-                       ggtt_mm->ggtt_mm.last_partial_off = -1UL;
-               } else {
-                       int last_offset;
-
-                       gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
-                                       ggtt_mm->ggtt_mm.last_partial_off, off,
-                                       bytes, info->gtt_entry_size);
-
-                       /* set host ggtt entry to scratch page and clear
-                        * virtual ggtt entry as not present for last
-                        * partially write offset
-                        */
-                       last_offset = ggtt_mm->ggtt_mm.last_partial_off &
-                                       (~(info->gtt_entry_size - 1));
-
-                       ggtt_get_host_entry(ggtt_mm, &m, last_offset);
-                       ggtt_invalidate_pte(vgpu, &m);
-                       ops->set_pfn(&m, gvt->gtt.scratch_mfn);
-                       ops->clear_present(&m);
-                       ggtt_set_host_entry(ggtt_mm, &m, last_offset);
-                       ggtt_invalidate(gvt->dev_priv);
-
-                       ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
-                       ops->clear_present(&e);
-                       ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
-
-                       ggtt_mm->ggtt_mm.last_partial_off = off;
-                       ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+               bool found = false;
+
+               list_for_each_entry_safe(pos, n,
+                               &ggtt_mm->ggtt_mm.partial_pte_list, list) {
+                       if (g_gtt_index == pos->offset >>
+                                       info->gtt_entry_size_shift) {
+                               if (off != pos->offset) {
+                                       /* the second partial part*/
+                                       int last_off = pos->offset &
+                                               (info->gtt_entry_size - 1);
+
+                                       memcpy((void *)&e.val64 + last_off,
+                                               (void *)&pos->data + last_off,
+                                               bytes);
+
+                                       list_del(&pos->list);
+                                       kfree(pos);
+                                       found = true;
+                                       break;
+                               }
+
+                               /* update of the first partial part */
+                               pos->data = e.val64;
+                               ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+                               return 0;
+                       }
+               }
 
-                       return 0;
+               if (!found) {
+                       /* the first partial part */
+                       partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
+                       if (!partial_pte)
+                               return -ENOMEM;
+                       partial_pte->offset = off;
+                       partial_pte->data = e.val64;
+                       list_add_tail(&partial_pte->list,
+                               &ggtt_mm->ggtt_mm.partial_pte_list);
+                       partial_update = true;
                }
        }
 
-       if (ops->test_present(&e)) {
+       if (!partial_update && (ops->test_present(&e))) {
                gfn = ops->get_pfn(&e);
                m = e;
 
@@ -2263,16 +2252,18 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
                } else
                        ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
        } else {
-               ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
-               ggtt_invalidate_pte(vgpu, &m);
                ops->set_pfn(&m, gvt->gtt.scratch_mfn);
                ops->clear_present(&m);
        }
 
 out:
+       ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+
+       ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
+       ggtt_invalidate_pte(vgpu, &e);
+
        ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
        ggtt_invalidate(gvt->dev_priv);
-       ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
        return 0;
 }
 
@@ -2430,6 +2421,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
 
        intel_vgpu_reset_ggtt(vgpu, false);
 
+       INIT_LIST_HEAD(&gtt->ggtt_mm->ggtt_mm.partial_pte_list);
+
        return create_scratch_page_tree(vgpu);
 }
 
@@ -2454,6 +2447,15 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
 
 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
 {
+       struct intel_gvt_partial_pte *pos, *next;
+
+       list_for_each_entry_safe(pos, next,
+                                &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
+                                list) {
+               gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
+                       pos->offset, pos->data);
+               kfree(pos);
+       }
        intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
        vgpu->gtt.ggtt_mm = NULL;
 }
index 7a9b36176efb7fca7198527512f8873ad21248cb..d8cb04cc946dff3e19466ff387089db96c226d53 100644 (file)
@@ -35,7 +35,6 @@
 #define _GVT_GTT_H_
 
 #define I915_GTT_PAGE_SHIFT         12
-#define I915_GTT_PAGE_MASK             (~(I915_GTT_PAGE_SIZE - 1))
 
 struct intel_vgpu_mm;
 
@@ -133,6 +132,12 @@ enum intel_gvt_mm_type {
 
 #define GVT_RING_CTX_NR_PDPS   GEN8_3LVL_PDPES
 
+struct intel_gvt_partial_pte {
+       unsigned long offset;
+       u64 data;
+       struct list_head list;
+};
+
 struct intel_vgpu_mm {
        enum intel_gvt_mm_type type;
        struct intel_vgpu *vgpu;
@@ -157,8 +162,7 @@ struct intel_vgpu_mm {
                } ppgtt_mm;
                struct {
                        void *virtual_ggtt;
-                       unsigned long last_partial_off;
-                       u64 last_partial_data;
+                       struct list_head partial_pte_list;
                } ggtt_mm;
        };
 };
index 90f50f67909a090d72b4cee84077d0b530a4969b..aa280bb071254547fd3d810494bd488d4edbcd44 100644 (file)
@@ -1609,7 +1609,7 @@ static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
        return 0;
 }
 
-static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
+static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
 {
        vgpu_vreg(vgpu, offset) = 0;
@@ -2607,6 +2607,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
        MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
        MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+       MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
+       MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
        return 0;
 }
 
@@ -3205,9 +3208,6 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
        MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
        MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
 
-       MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
-       MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
-
        MMIO_D(RC6_CTX_BASE, D_BXT);
 
        MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
index 10e63eea5492916f676011c98ab1751e9d02dac1..d6e02c15ef97d995fd4ec2ab203c15e5ff8ab245 100644 (file)
@@ -131,7 +131,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
        {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
 
        {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
-       {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */
+       {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
 
        {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
        {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
@@ -158,6 +158,8 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
        int ring_id, i;
 
        for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
+               if (!HAS_ENGINE(dev_priv, ring_id))
+                       continue;
                offset.reg = regs[ring_id];
                for (i = 0; i < GEN9_MOCS_SIZE; i++) {
                        gen9_render_mocs.control_table[ring_id][i] =
index 44e2c0f5ec502bc1a6c27007c77d56df89019ce3..ffdbbac4400eaf7d86390a3ff105a18ef36645ea 100644 (file)
@@ -1175,8 +1175,6 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
                return -EINVAL;
        }
 
-       dram_info->valid_dimm = true;
-
        /*
         * If any of the channel is single rank channel, worst case output
         * will be same as if single rank memory, so consider single rank
@@ -1193,8 +1191,7 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
                return -EINVAL;
        }
 
-       if (ch0.is_16gb_dimm || ch1.is_16gb_dimm)
-               dram_info->is_16gb_dimm = true;
+       dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
 
        dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
                                                                       val_ch1,
@@ -1314,7 +1311,6 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv)
                return -EINVAL;
        }
 
-       dram_info->valid_dimm = true;
        dram_info->valid = true;
        return 0;
 }
@@ -1327,12 +1323,17 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
        int ret;
 
        dram_info->valid = false;
-       dram_info->valid_dimm = false;
-       dram_info->is_16gb_dimm = false;
        dram_info->rank = I915_DRAM_RANK_INVALID;
        dram_info->bandwidth_kbps = 0;
        dram_info->num_channels = 0;
 
+       /*
+        * Assume 16Gb DIMMs are present until proven otherwise.
+        * This is only used for the level 0 watermark latency
+        * w/a which does not apply to bxt/glk.
+        */
+       dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
+
        if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
                return;
 
index 8624b4bdc242dd7cbd77d527eb3b84fe59a0777f..9102571e9692d1540ad987ed31c4ec735dd80cf5 100644 (file)
@@ -1948,7 +1948,6 @@ struct drm_i915_private {
 
        struct dram_info {
                bool valid;
-               bool valid_dimm;
                bool is_16gb_dimm;
                u8 num_channels;
                enum dram_rank {
index 09187286d34627df882e4ede753db7e40da41934..d4fac09095f862aed3131243957059de2df4f6b0 100644 (file)
@@ -460,7 +460,7 @@ eb_validate_vma(struct i915_execbuffer *eb,
         * any non-page-aligned or non-canonical addresses.
         */
        if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
-                    entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK)))
+                    entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
                return -EINVAL;
 
        /* pad_to_size was once a reserved field, so sanitize it */
@@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma,
                else if (gen >= 4)
                        len = 4;
                else
-                       len = 3;
+                       len = 6;
 
                batch = reloc_gpu(eb, vma, len);
                if (IS_ERR(batch))
@@ -1309,6 +1309,11 @@ relocate_entry(struct i915_vma *vma,
                        *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
                        *batch++ = addr;
                        *batch++ = target_offset;
+
+                       /* And again for good measure (blb/pnv) */
+                       *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+                       *batch++ = addr;
+                       *batch++ = target_offset;
                }
 
                goto out;
index 56c7f86373112b96212fc7952a5968854f9bdbb1..07999fe09ad231a037bc73379b5fc27aaf484b94 100644 (file)
@@ -1757,7 +1757,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *base, struct seq_file *m)
                        if (i == 4)
                                continue;
 
-                       seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
+                       seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
                                   pde, pte,
                                   (pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
                        for (i = 0; i < 4; i++) {
@@ -3413,6 +3413,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
                ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
                if (ggtt->vm.clear_range != nop_clear_range)
                        ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+
+               /* Prevent recursively calling stop_machine() and deadlocks. */
+               dev_info(dev_priv->drm.dev,
+                        "Disabling error capture for VT-d workaround\n");
+               i915_disable_error_state(dev_priv, -ENODEV);
        }
 
        ggtt->invalidate = gen6_ggtt_invalidate;
index 7e2af5f4f39bcbb5ec355257d41decea7b45d019..28039290655cb7d5cf11f2c94260ba88c669e169 100644 (file)
 #include "i915_selftest.h"
 #include "i915_timeline.h"
 
-#define I915_GTT_PAGE_SIZE_4K BIT(12)
-#define I915_GTT_PAGE_SIZE_64K BIT(16)
-#define I915_GTT_PAGE_SIZE_2M BIT(21)
+#define I915_GTT_PAGE_SIZE_4K  BIT_ULL(12)
+#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
+#define I915_GTT_PAGE_SIZE_2M  BIT_ULL(21)
 
 #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
 #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
 
+#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
+
 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
 
 #define I915_FENCE_REG_NONE -1
@@ -659,20 +661,20 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
                        u64 start, u64 end, unsigned int flags);
 
 /* Flags used by pin/bind&friends. */
-#define PIN_NONBLOCK           BIT(0)
-#define PIN_MAPPABLE           BIT(1)
-#define PIN_ZONE_4G            BIT(2)
-#define PIN_NONFAULT           BIT(3)
-#define PIN_NOEVICT            BIT(4)
-
-#define PIN_MBZ                        BIT(5) /* I915_VMA_PIN_OVERFLOW */
-#define PIN_GLOBAL             BIT(6) /* I915_VMA_GLOBAL_BIND */
-#define PIN_USER               BIT(7) /* I915_VMA_LOCAL_BIND */
-#define PIN_UPDATE             BIT(8)
-
-#define PIN_HIGH               BIT(9)
-#define PIN_OFFSET_BIAS                BIT(10)
-#define PIN_OFFSET_FIXED       BIT(11)
+#define PIN_NONBLOCK           BIT_ULL(0)
+#define PIN_MAPPABLE           BIT_ULL(1)
+#define PIN_ZONE_4G            BIT_ULL(2)
+#define PIN_NONFAULT           BIT_ULL(3)
+#define PIN_NOEVICT            BIT_ULL(4)
+
+#define PIN_MBZ                        BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */
+#define PIN_GLOBAL             BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER               BIT_ULL(7) /* I915_VMA_LOCAL_BIND */
+#define PIN_UPDATE             BIT_ULL(8)
+
+#define PIN_HIGH               BIT_ULL(9)
+#define PIN_OFFSET_BIAS                BIT_ULL(10)
+#define PIN_OFFSET_FIXED       BIT_ULL(11)
 #define PIN_OFFSET_MASK                (-I915_GTT_PAGE_SIZE)
 
 #endif
index 8762d17b66591e2afc8fc9647bef2784145f2c19..3eb33e000d6f00f3ae4b3fdbc8f37f38e97b4d83 100644 (file)
@@ -648,6 +648,9 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
                return 0;
        }
 
+       if (IS_ERR(error))
+               return PTR_ERR(error);
+
        if (*error->error_msg)
                err_printf(m, "%s\n", error->error_msg);
        err_printf(m, "Kernel: " UTS_RELEASE "\n");
@@ -1859,6 +1862,7 @@ void i915_capture_error_state(struct drm_i915_private *i915,
        error = i915_capture_gpu_state(i915);
        if (!error) {
                DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+               i915_disable_error_state(i915, -ENOMEM);
                return;
        }
 
@@ -1914,5 +1918,14 @@ void i915_reset_error_state(struct drm_i915_private *i915)
        i915->gpu_error.first_error = NULL;
        spin_unlock_irq(&i915->gpu_error.lock);
 
-       i915_gpu_state_put(error);
+       if (!IS_ERR(error))
+               i915_gpu_state_put(error);
+}
+
+void i915_disable_error_state(struct drm_i915_private *i915, int err)
+{
+       spin_lock_irq(&i915->gpu_error.lock);
+       if (!i915->gpu_error.first_error)
+               i915->gpu_error.first_error = ERR_PTR(err);
+       spin_unlock_irq(&i915->gpu_error.lock);
 }
index 8710fb18ed746cface7e9a7b2d6d6ac7cd06b2b4..3ec89a504de52331ade6a9452a844527d84ec515 100644 (file)
@@ -343,6 +343,7 @@ static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
 
 struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
 void i915_reset_error_state(struct drm_i915_private *i915);
+void i915_disable_error_state(struct drm_i915_private *i915, int err);
 
 #else
 
@@ -355,13 +356,18 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
 static inline struct i915_gpu_state *
 i915_first_error_state(struct drm_i915_private *i915)
 {
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 
 static inline void i915_reset_error_state(struct drm_i915_private *i915)
 {
 }
 
+static inline void i915_disable_error_state(struct drm_i915_private *i915,
+                                           int err)
+{
+}
+
 #endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
 
 #endif /* _I915_GPU_ERROR_H_ */
index 7c491ea3d052aaccfc5eab69e2ea8f6b31f6813e..e31c27e45734ef19ae3764b5894da3320f4b2991 100644 (file)
@@ -2095,8 +2095,12 @@ enum i915_power_well_id {
 
 /* ICL PHY DFLEX registers */
 #define PORT_TX_DFLEXDPMLE1            _MMIO(0x1638C0)
-#define   DFLEXDPMLE1_DPMLETC_MASK(n)  (0xf << (4 * (n)))
-#define   DFLEXDPMLE1_DPMLETC(n, x)    ((x) << (4 * (n)))
+#define   DFLEXDPMLE1_DPMLETC_MASK(tc_port)    (0xf << (4 * (tc_port)))
+#define   DFLEXDPMLE1_DPMLETC_ML0(tc_port)     (1 << (4 * (tc_port)))
+#define   DFLEXDPMLE1_DPMLETC_ML1_0(tc_port)   (3 << (4 * (tc_port)))
+#define   DFLEXDPMLE1_DPMLETC_ML3(tc_port)     (8 << (4 * (tc_port)))
+#define   DFLEXDPMLE1_DPMLETC_ML3_2(tc_port)   (12 << (4 * (tc_port)))
+#define   DFLEXDPMLE1_DPMLETC_ML3_0(tc_port)   (15 << (4 * (tc_port)))
 
 /* BXT PHY Ref registers */
 #define _PORT_REF_DW3_A                        0x16218C
@@ -4593,12 +4597,12 @@ enum {
 
 #define  DRM_DIP_ENABLE                        (1 << 28)
 #define  PSR_VSC_BIT_7_SET             (1 << 27)
-#define  VSC_SELECT_MASK               (0x3 << 26)
-#define  VSC_SELECT_SHIFT              26
-#define  VSC_DIP_HW_HEA_DATA           (0 << 26)
-#define  VSC_DIP_HW_HEA_SW_DATA                (1 << 26)
-#define  VSC_DIP_HW_DATA_SW_HEA                (2 << 26)
-#define  VSC_DIP_SW_HEA_DATA           (3 << 26)
+#define  VSC_SELECT_MASK               (0x3 << 25)
+#define  VSC_SELECT_SHIFT              25
+#define  VSC_DIP_HW_HEA_DATA           (0 << 25)
+#define  VSC_DIP_HW_HEA_SW_DATA                (1 << 25)
+#define  VSC_DIP_HW_DATA_SW_HEA                (2 << 25)
+#define  VSC_DIP_SW_HEA_DATA           (3 << 25)
 #define  VDIP_ENABLE_PPS               (1 << 24)
 
 /* Panel power sequencing */
index 769f3f5866611174cbabeca5e4d1fbb0711b9b86..ee3ca2de983b96ea52ffda2c794963f2b23d705d 100644 (file)
@@ -144,6 +144,9 @@ static const struct {
 /* HDMI N/CTS table */
 #define TMDS_297M 297000
 #define TMDS_296M 296703
+#define TMDS_594M 594000
+#define TMDS_593M 593407
+
 static const struct {
        int sample_rate;
        int clock;
@@ -164,6 +167,20 @@ static const struct {
        { 176400, TMDS_297M, 18816, 247500 },
        { 192000, TMDS_296M, 23296, 281250 },
        { 192000, TMDS_297M, 20480, 247500 },
+       { 44100, TMDS_593M, 8918, 937500 },
+       { 44100, TMDS_594M, 9408, 990000 },
+       { 48000, TMDS_593M, 5824, 562500 },
+       { 48000, TMDS_594M, 6144, 594000 },
+       { 32000, TMDS_593M, 5824, 843750 },
+       { 32000, TMDS_594M, 3072, 445500 },
+       { 88200, TMDS_593M, 17836, 937500 },
+       { 88200, TMDS_594M, 18816, 990000 },
+       { 96000, TMDS_593M, 11648, 562500 },
+       { 96000, TMDS_594M, 12288, 594000 },
+       { 176400, TMDS_593M, 35672, 937500 },
+       { 176400, TMDS_594M, 37632, 990000 },
+       { 192000, TMDS_593M, 23296, 562500 },
+       { 192000, TMDS_594M, 24576, 594000 },
 };
 
 /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
index 29075c763428055ddb3625a80b59643e694f3d76..8d74276029e621f9ae7e6db54c815be5d2d0c9d1 100644 (file)
@@ -2138,16 +2138,8 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
 static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
                                     int pixel_rate)
 {
-       if (INTEL_GEN(dev_priv) >= 10)
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
                return DIV_ROUND_UP(pixel_rate, 2);
-       else if (IS_GEMINILAKE(dev_priv))
-               /*
-                * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
-                * as a temporary workaround. Use a higher cdclk instead. (Note that
-                * intel_compute_max_dotclk() limits the max pixel clock to 99% of max
-                * cdclk.)
-                */
-               return DIV_ROUND_UP(pixel_rate * 100, 2 * 99);
        else if (IS_GEN9(dev_priv) ||
                 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
                return pixel_rate;
@@ -2543,14 +2535,8 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
 {
        int max_cdclk_freq = dev_priv->max_cdclk_freq;
 
-       if (INTEL_GEN(dev_priv) >= 10)
+       if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
                return 2 * max_cdclk_freq;
-       else if (IS_GEMINILAKE(dev_priv))
-               /*
-                * FIXME: Limiting to 99% as a temporary workaround. See
-                * intel_min_cdclk() for details.
-                */
-               return 2 * max_cdclk_freq * 99 / 100;
        else if (IS_GEN9(dev_priv) ||
                 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
                return max_cdclk_freq;
index 0ef0c6448d53a835fbdf5319a8010c64d613bd0f..01fa98299bae65a125862e57c307cdbce07c3d32 100644 (file)
@@ -474,7 +474,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
                        u8 eu_disabled_mask;
                        u32 n_disabled;
 
-                       if (!(sseu->subslice_mask[ss] & BIT(ss)))
+                       if (!(sseu->subslice_mask[s] & BIT(ss)))
                                /* skip disabled subslice */
                                continue;
 
index 9741cc419e1b2bc1f5eb4771ae75468f0099289d..c9878dd1f7cd04dd3a6a7b8877d8f5d95eafc620 100644 (file)
@@ -2890,6 +2890,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        return;
 
 valid_fb:
+       intel_state->base.rotation = plane_config->rotation;
        intel_fill_fb_ggtt_view(&intel_state->view, fb,
                                intel_state->base.rotation);
        intel_state->color_plane[0].stride =
@@ -4850,8 +4851,31 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
  * chroma samples for both of the luma samples, and thus we don't
  * actually get the expected MPEG2 chroma siting convention :(
  * The same behaviour is observed on pre-SKL platforms as well.
+ *
+ * Theory behind the formula (note that we ignore sub-pixel
+ * source coordinates):
+ * s = source sample position
+ * d = destination sample position
+ *
+ * Downscaling 4:1:
+ * -0.5
+ * | 0.0
+ * | |     1.5 (initial phase)
+ * | |     |
+ * v v     v
+ * | s | s | s | s |
+ * |       d       |
+ *
+ * Upscaling 1:4:
+ * -0.5
+ * | -0.375 (initial phase)
+ * | |     0.0
+ * | |     |
+ * v v     v
+ * |       s       |
+ * | d | d | d | d |
  */
-u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
+u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
 {
        int phase = -0x8000;
        u16 trip = 0;
@@ -4859,6 +4883,15 @@ u16 skl_scaler_calc_phase(int sub, bool chroma_cosited)
        if (chroma_cosited)
                phase += (sub - 1) * 0x8000 / sub;
 
+       phase += scale / (2 * sub);
+
+       /*
+        * Hardware initial phase limited to [-0.5:1.5].
+        * Since the max hardware scale factor is 3.0, we
+        * should never actually excdeed 1.0 here.
+        */
+       WARN_ON(phase < -0x8000 || phase > 0x18000);
+
        if (phase < 0)
                phase = 0x10000 + phase;
        else
@@ -5067,13 +5100,20 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
 
        if (crtc->config->pch_pfit.enabled) {
                u16 uv_rgb_hphase, uv_rgb_vphase;
+               int pfit_w, pfit_h, hscale, vscale;
                int id;
 
                if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
                        return;
 
-               uv_rgb_hphase = skl_scaler_calc_phase(1, false);
-               uv_rgb_vphase = skl_scaler_calc_phase(1, false);
+               pfit_w = (crtc->config->pch_pfit.size >> 16) & 0xFFFF;
+               pfit_h = crtc->config->pch_pfit.size & 0xFFFF;
+
+               hscale = (crtc->config->pipe_src_w << 16) / pfit_w;
+               vscale = (crtc->config->pipe_src_h << 16) / pfit_h;
+
+               uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+               uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
 
                id = scaler_state->scaler_id;
                I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
@@ -7843,8 +7883,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
                        plane_config->tiling = I915_TILING_X;
                        fb->modifier = I915_FORMAT_MOD_X_TILED;
                }
+
+               if (val & DISPPLANE_ROTATE_180)
+                       plane_config->rotation = DRM_MODE_ROTATE_180;
        }
 
+       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
+           val & DISPPLANE_MIRROR)
+               plane_config->rotation |= DRM_MODE_REFLECT_X;
+
        pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
        fourcc = i9xx_format_to_fourcc(pixel_format);
        fb->format = drm_format_info(fourcc);
@@ -8913,6 +8960,29 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
                goto error;
        }
 
+       /*
+        * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
+        * while i915 HW rotation is clockwise, thats why this swapping.
+        */
+       switch (val & PLANE_CTL_ROTATE_MASK) {
+       case PLANE_CTL_ROTATE_0:
+               plane_config->rotation = DRM_MODE_ROTATE_0;
+               break;
+       case PLANE_CTL_ROTATE_90:
+               plane_config->rotation = DRM_MODE_ROTATE_270;
+               break;
+       case PLANE_CTL_ROTATE_180:
+               plane_config->rotation = DRM_MODE_ROTATE_180;
+               break;
+       case PLANE_CTL_ROTATE_270:
+               plane_config->rotation = DRM_MODE_ROTATE_90;
+               break;
+       }
+
+       if (INTEL_GEN(dev_priv) >= 10 &&
+           val & PLANE_CTL_FLIP_HORIZONTAL)
+               plane_config->rotation |= DRM_MODE_REFLECT_X;
+
        base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
        plane_config->base = base;
 
@@ -12768,17 +12838,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
                        intel_check_cpu_fifo_underruns(dev_priv);
                        intel_check_pch_fifo_underruns(dev_priv);
 
-                       if (!new_crtc_state->active) {
-                               /*
-                                * Make sure we don't call initial_watermarks
-                                * for ILK-style watermark updates.
-                                *
-                                * No clue what this is supposed to achieve.
-                                */
-                               if (INTEL_GEN(dev_priv) >= 9)
-                                       dev_priv->display.initial_watermarks(intel_state,
-                                                                            to_intel_crtc_state(new_crtc_state));
-                       }
+                       /* FIXME unify this for all platforms */
+                       if (!new_crtc_state->active &&
+                           !HAS_GMCH_DISPLAY(dev_priv) &&
+                           dev_priv->display.initial_watermarks)
+                               dev_priv->display.initial_watermarks(intel_state,
+                                                                    to_intel_crtc_state(new_crtc_state));
                }
        }
 
@@ -14646,7 +14711,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
             fb->height < SKL_MIN_YUV_420_SRC_H ||
             (fb->width % 4) != 0 || (fb->height % 4) != 0)) {
                DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
-               return -EINVAL;
+               goto err;
        }
 
        for (i = 0; i < fb->format->num_planes; i++) {
@@ -15233,6 +15298,14 @@ retry:
                        ret = drm_atomic_add_affected_planes(state, crtc);
                        if (ret)
                                goto out;
+
+                       /*
+                        * FIXME hack to force a LUT update to avoid the
+                        * plane update forcing the pipe gamma on without
+                        * having a proper LUT loaded. Remove once we
+                        * have readout for pipe gamma enable.
+                        */
+                       crtc_state->color_mgmt_changed = true;
                }
        }
 
index 3fae4dab295f093892491ecd010970dc7fc28128..13f9b56a9ce7ca711467fc9309b720f8d9565661 100644 (file)
@@ -5102,19 +5102,13 @@ intel_dp_long_pulse(struct intel_connector *connector,
                 */
                status = connector_status_disconnected;
                goto out;
-       } else {
-               /*
-                * If display is now connected check links status,
-                * there has been known issues of link loss triggering
-                * long pulse.
-                *
-                * Some sinks (eg. ASUS PB287Q) seem to perform some
-                * weird HPD ping pong during modesets. So we can apparently
-                * end up with HPD going low during a modeset, and then
-                * going back up soon after. And once that happens we must
-                * retrain the link to get a picture. That's in case no
-                * userspace component reacted to intermittent HPD dip.
-                */
+       }
+
+       /*
+        * Some external monitors do not signal loss of link synchronization
+        * with an IRQ_HPD, so force a link status check.
+        */
+       if (!intel_dp_is_edp(intel_dp)) {
                struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
 
                intel_dp_retrain_link(encoder, ctx);
index 7f155b4f1a7d7ab9a3389181d411140951962d6e..a911691dbd0fdd1837c8cfadec09f8d39eb7ac7d 100644 (file)
@@ -77,7 +77,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
        pipe_config->pbn = mst_pbn;
 
        /* Zombie connectors can't have VCPI slots */
-       if (READ_ONCE(connector->registered)) {
+       if (!drm_connector_is_unregistered(connector)) {
                slots = drm_dp_atomic_find_vcpi_slots(state,
                                                      &intel_dp->mst_mgr,
                                                      port,
@@ -313,7 +313,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
        struct edid *edid;
        int ret;
 
-       if (!READ_ONCE(connector->registered))
+       if (drm_connector_is_unregistered(connector))
                return intel_connector_update_modes(connector, NULL);
 
        edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
@@ -329,7 +329,7 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
        struct intel_connector *intel_connector = to_intel_connector(connector);
        struct intel_dp *intel_dp = intel_connector->mst_port;
 
-       if (!READ_ONCE(connector->registered))
+       if (drm_connector_is_unregistered(connector))
                return connector_status_disconnected;
        return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr,
                                      intel_connector->port);
@@ -372,7 +372,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
        int bpp = 24; /* MST uses fixed bpp */
        int max_rate, mode_rate, max_lanes, max_link_clock;
 
-       if (!READ_ONCE(connector->registered))
+       if (drm_connector_is_unregistered(connector))
                return MODE_ERROR;
 
        if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -452,6 +452,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
        if (!intel_connector)
                return NULL;
 
+       intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+       intel_connector->mst_port = intel_dp;
+       intel_connector->port = port;
+
        connector = &intel_connector->base;
        ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
                                 DRM_MODE_CONNECTOR_DisplayPort);
@@ -462,10 +466,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
 
        drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
 
-       intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
-       intel_connector->mst_port = intel_dp;
-       intel_connector->port = port;
-
        for_each_pipe(dev_priv, pipe) {
                struct drm_encoder *enc =
                        &intel_dp->mst_encoders[pipe]->base.base;
index f8dc84b2d2d3443dcd47232f013bddb4ae9731f8..db6fa1d0cbdae3efea1e3ddb1c3ce43bf5f8a10e 100644 (file)
@@ -547,6 +547,7 @@ struct intel_initial_plane_config {
        unsigned int tiling;
        int size;
        u32 base;
+       u8 rotation;
 };
 
 #define SKL_MIN_SRC_W 8
@@ -1646,7 +1647,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
                                  struct intel_crtc_state *crtc_state);
 
-u16 skl_scaler_calc_phase(int sub, bool chroma_center);
+u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center);
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(const struct intel_crtc_state *crtc_state,
                  u32 pixel_format);
index 648a13c6043c0071ddd495424691d795b39b96a1..9a801813023728e2e0a05bb5feba1f8415eb3269 100644 (file)
@@ -228,7 +228,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
                drm_for_each_connector_iter(connector, &conn_iter) {
                        struct intel_connector *intel_connector = to_intel_connector(connector);
 
-                       if (intel_connector->encoder->hpd_pin == pin) {
+                       /* Don't check MST ports, they don't have pins */
+                       if (!intel_connector->mst_port &&
+                           intel_connector->encoder->hpd_pin == pin) {
                                if (connector->polled != intel_connector->polled)
                                        DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
                                                         connector->name);
@@ -395,37 +397,54 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
        struct intel_encoder *encoder;
        bool storm_detected = false;
        bool queue_dig = false, queue_hp = false;
+       u32 long_hpd_pulse_mask = 0;
+       u32 short_hpd_pulse_mask = 0;
+       enum hpd_pin pin;
 
        if (!pin_mask)
                return;
 
        spin_lock(&dev_priv->irq_lock);
+
+       /*
+        * Determine whether ->hpd_pulse() exists for each pin, and
+        * whether we have a short or a long pulse. This is needed
+        * as each pin may have up to two encoders (HDMI and DP) and
+        * only the one of them (DP) will have ->hpd_pulse().
+        */
        for_each_intel_encoder(&dev_priv->drm, encoder) {
-               enum hpd_pin pin = encoder->hpd_pin;
                bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
+               enum port port = encoder->port;
+               bool long_hpd;
 
+               pin = encoder->hpd_pin;
                if (!(BIT(pin) & pin_mask))
                        continue;
 
-               if (has_hpd_pulse) {
-                       bool long_hpd = long_mask & BIT(pin);
-                       enum port port = encoder->port;
+               if (!has_hpd_pulse)
+                       continue;
 
-                       DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
-                                        long_hpd ? "long" : "short");
-                       /*
-                        * For long HPD pulses we want to have the digital queue happen,
-                        * but we still want HPD storm detection to function.
-                        */
-                       queue_dig = true;
-                       if (long_hpd) {
-                               dev_priv->hotplug.long_port_mask |= (1 << port);
-                       } else {
-                               /* for short HPD just trigger the digital queue */
-                               dev_priv->hotplug.short_port_mask |= (1 << port);
-                               continue;
-                       }
+               long_hpd = long_mask & BIT(pin);
+
+               DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
+                                long_hpd ? "long" : "short");
+               queue_dig = true;
+
+               if (long_hpd) {
+                       long_hpd_pulse_mask |= BIT(pin);
+                       dev_priv->hotplug.long_port_mask |= BIT(port);
+               } else {
+                       short_hpd_pulse_mask |= BIT(pin);
+                       dev_priv->hotplug.short_port_mask |= BIT(port);
                }
+       }
+
+       /* Now process each pin just once */
+       for_each_hpd_pin(pin) {
+               bool long_hpd;
+
+               if (!(BIT(pin) & pin_mask))
+                       continue;
 
                if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
                        /*
@@ -442,11 +461,22 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
                if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
                        continue;
 
-               if (!has_hpd_pulse) {
+               /*
+                * Delegate to ->hpd_pulse() if one of the encoders for this
+                * pin has it, otherwise let the hotplug_work deal with this
+                * pin directly.
+                */
+               if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) {
+                       long_hpd = long_hpd_pulse_mask & BIT(pin);
+               } else {
                        dev_priv->hotplug.event_bits |= BIT(pin);
+                       long_hpd = true;
                        queue_hp = true;
                }
 
+               if (!long_hpd)
+                       continue;
+
                if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
                        dev_priv->hotplug.event_bits &= ~BIT(pin);
                        storm_detected = true;
index cdf19553ffacd28f1097bb2096b8cc35d4654b84..5d5336fbe7b05836b7bedc28bffbfef9e6b08b4f 100644 (file)
@@ -297,8 +297,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
        lpe_audio_platdev_destroy(dev_priv);
 
        irq_free_desc(dev_priv->lpe_audio.irq);
-}
 
+       dev_priv->lpe_audio.irq = -1;
+       dev_priv->lpe_audio.platdev = NULL;
+}
 
 /**
  * intel_lpe_audio_notify() - notify lpe audio event
index 43957bb37a42249cfb75793fd688f191eaef2c98..37c94a54efcbb2501509b5a838e61d983985d8b2 100644 (file)
@@ -424,7 +424,8 @@ static u64 execlists_update_context(struct i915_request *rq)
 
        reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
 
-       /* True 32b PPGTT with dynamic page allocation: update PDP
+       /*
+        * True 32b PPGTT with dynamic page allocation: update PDP
         * registers and point the unallocated PDPs to scratch page.
         * PML4 is allocated during ppgtt init, so this is not needed
         * in 48-bit mode.
@@ -432,6 +433,17 @@ static u64 execlists_update_context(struct i915_request *rq)
        if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
                execlists_update_context_pdps(ppgtt, reg_state);
 
+       /*
+        * Make sure the context image is complete before we submit it to HW.
+        *
+        * Ostensibly, writes (including the WCB) should be flushed prior to
+        * an uncached write such as our mmio register access, the empirical
+        * evidence (esp. on Braswell) suggests that the WC write into memory
+        * may not be visible to the HW prior to the completion of the UC
+        * register write and that we may begin execution from the context
+        * before its image is complete leading to invalid PD chasing.
+        */
+       wmb();
        return ce->lrc_desc;
 }
 
index 1db9b8328275038f93661c0e743bc0598303d25b..3fe358db12768f23a2fcf7fe971821c4cac3f1b9 100644 (file)
@@ -2493,6 +2493,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
        uint32_t method1, method2;
        int cpp;
 
+       if (mem_value == 0)
+               return U32_MAX;
+
        if (!intel_wm_plane_visible(cstate, pstate))
                return 0;
 
@@ -2522,6 +2525,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
        uint32_t method1, method2;
        int cpp;
 
+       if (mem_value == 0)
+               return U32_MAX;
+
        if (!intel_wm_plane_visible(cstate, pstate))
                return 0;
 
@@ -2545,6 +2551,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
 {
        int cpp;
 
+       if (mem_value == 0)
+               return U32_MAX;
+
        if (!intel_wm_plane_visible(cstate, pstate))
                return 0;
 
@@ -2881,8 +2890,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
                 * any underrun. If not able to get Dimm info assume 16GB dimm
                 * to avoid any underrun.
                 */
-               if (!dev_priv->dram_info.valid_dimm ||
-                   dev_priv->dram_info.is_16gb_dimm)
+               if (dev_priv->dram_info.is_16gb_dimm)
                        wm[0] += 1;
 
        } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -3009,6 +3017,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv)
        intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 }
 
+static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv)
+{
+       /*
+        * On some SNB machines (Thinkpad X220 Tablet at least)
+        * LP3 usage can cause vblank interrupts to be lost.
+        * The DEIIR bit will go high but it looks like the CPU
+        * never gets interrupted.
+        *
+        * It's not clear whether other interrupt source could
+        * be affected or if this is somehow limited to vblank
+        * interrupts only. To play it safe we disable LP3
+        * watermarks entirely.
+        */
+       if (dev_priv->wm.pri_latency[3] == 0 &&
+           dev_priv->wm.spr_latency[3] == 0 &&
+           dev_priv->wm.cur_latency[3] == 0)
+               return;
+
+       dev_priv->wm.pri_latency[3] = 0;
+       dev_priv->wm.spr_latency[3] = 0;
+       dev_priv->wm.cur_latency[3] = 0;
+
+       DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n");
+       intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency);
+       intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
+       intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
+}
+
 static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
 {
        intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency);
@@ -3025,8 +3061,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
        intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency);
        intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency);
 
-       if (IS_GEN6(dev_priv))
+       if (IS_GEN6(dev_priv)) {
                snb_wm_latency_quirk(dev_priv);
+               snb_wm_lp3_irq_quirk(dev_priv);
+       }
 }
 
 static void skl_setup_wm_latency(struct drm_i915_private *dev_priv)
index d0ef50bf930ad747abe7b4510521f8ad79923ba5..187bb0ceb4ac4324b3c12ab72635d4a776b2129c 100644 (file)
@@ -91,6 +91,7 @@ static int
 gen4_render_ring_flush(struct i915_request *rq, u32 mode)
 {
        u32 cmd, *cs;
+       int i;
 
        /*
         * read/write caches:
@@ -127,12 +128,45 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
                        cmd |= MI_INVALIDATE_ISP;
        }
 
-       cs = intel_ring_begin(rq, 2);
+       i = 2;
+       if (mode & EMIT_INVALIDATE)
+               i += 20;
+
+       cs = intel_ring_begin(rq, i);
        if (IS_ERR(cs))
                return PTR_ERR(cs);
 
        *cs++ = cmd;
-       *cs++ = MI_NOOP;
+
+       /*
+        * A random delay to let the CS invalidate take effect? Without this
+        * delay, the GPU relocation path fails as the CS does not see
+        * the updated contents. Just as important, if we apply the flushes
+        * to the EMIT_FLUSH branch (i.e. immediately after the relocation
+        * write and before the invalidate on the next batch), the relocations
+        * still fail. This implies that is a delay following invalidation
+        * that is required to reset the caches as opposed to a delay to
+        * ensure the memory is written.
+        */
+       if (mode & EMIT_INVALIDATE) {
+               *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
+               *cs++ = i915_ggtt_offset(rq->engine->scratch) |
+                       PIPE_CONTROL_GLOBAL_GTT;
+               *cs++ = 0;
+               *cs++ = 0;
+
+               for (i = 0; i < 12; i++)
+                       *cs++ = MI_FLUSH;
+
+               *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
+               *cs++ = i915_ggtt_offset(rq->engine->scratch) |
+                       PIPE_CONTROL_GLOBAL_GTT;
+               *cs++ = 0;
+               *cs++ = 0;
+       }
+
+       *cs++ = cmd;
+
        intel_ring_advance(rq, cs);
 
        return 0;
index 0fdabce647ab64be1751da09ed705de3889ad969..44e4491a4918994b80ddde101042368263abf8d1 100644 (file)
@@ -2748,6 +2748,12 @@ static const struct i915_power_well_desc icl_power_wells[] = {
                        .hsw.has_fuses = true,
                },
        },
+       {
+               .name = "DC off",
+               .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
+               .ops = &gen9_dc_off_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
        {
                .name = "power well 2",
                .domains = ICL_PW_2_POWER_DOMAINS,
@@ -2759,12 +2765,6 @@ static const struct i915_power_well_desc icl_power_wells[] = {
                        .hsw.has_fuses = true,
                },
        },
-       {
-               .name = "DC off",
-               .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS,
-               .ops = &gen9_dc_off_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
        {
                .name = "power well 3",
                .domains = ICL_PW_3_POWER_DOMAINS,
@@ -3176,8 +3176,7 @@ static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
 void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
                            u8 req_slices)
 {
-       u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
-       u32 val;
+       const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
        bool ret;
 
        if (req_slices > intel_dbuf_max_slices(dev_priv)) {
@@ -3188,7 +3187,6 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
        if (req_slices == hw_enabled_slices || req_slices == 0)
                return;
 
-       val = I915_READ(DBUF_CTL_S2);
        if (req_slices > hw_enabled_slices)
                ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
        else
index 5fd2f7bf3927191a22cdeba959c5fd7c4f6f512a..d3090a7537bb9576c89f69d17541eadbf9353d8c 100644 (file)
@@ -302,13 +302,65 @@ skl_plane_max_stride(struct intel_plane *plane,
                return min(8192 * cpp, 32768);
 }
 
+static void
+skl_program_scaler(struct intel_plane *plane,
+                  const struct intel_crtc_state *crtc_state,
+                  const struct intel_plane_state *plane_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum pipe pipe = plane->pipe;
+       int scaler_id = plane_state->scaler_id;
+       const struct intel_scaler *scaler =
+               &crtc_state->scaler_state.scalers[scaler_id];
+       int crtc_x = plane_state->base.dst.x1;
+       int crtc_y = plane_state->base.dst.y1;
+       uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+       uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+       u16 y_hphase, uv_rgb_hphase;
+       u16 y_vphase, uv_rgb_vphase;
+       int hscale, vscale;
+
+       hscale = drm_rect_calc_hscale(&plane_state->base.src,
+                                     &plane_state->base.dst,
+                                     0, INT_MAX);
+       vscale = drm_rect_calc_vscale(&plane_state->base.src,
+                                     &plane_state->base.dst,
+                                     0, INT_MAX);
+
+       /* TODO: handle sub-pixel coordinates */
+       if (plane_state->base.fb->format->format == DRM_FORMAT_NV12) {
+               y_hphase = skl_scaler_calc_phase(1, hscale, false);
+               y_vphase = skl_scaler_calc_phase(1, vscale, false);
+
+               /* MPEG2 chroma siting convention */
+               uv_rgb_hphase = skl_scaler_calc_phase(2, hscale, true);
+               uv_rgb_vphase = skl_scaler_calc_phase(2, vscale, false);
+       } else {
+               /* not used */
+               y_hphase = 0;
+               y_vphase = 0;
+
+               uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
+               uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
+       }
+
+       I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
+                     PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
+       I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+       I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
+                     PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+       I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
+                     PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+       I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
+       I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
+}
+
 void
 skl_update_plane(struct intel_plane *plane,
                 const struct intel_crtc_state *crtc_state,
                 const struct intel_plane_state *plane_state)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       const struct drm_framebuffer *fb = plane_state->base.fb;
        enum plane_id plane_id = plane->id;
        enum pipe pipe = plane->pipe;
        u32 plane_ctl = plane_state->ctl;
@@ -318,8 +370,6 @@ skl_update_plane(struct intel_plane *plane,
        u32 aux_stride = skl_plane_stride(plane_state, 1);
        int crtc_x = plane_state->base.dst.x1;
        int crtc_y = plane_state->base.dst.y1;
-       uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
-       uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
        uint32_t x = plane_state->color_plane[0].x;
        uint32_t y = plane_state->color_plane[0].y;
        uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
@@ -329,8 +379,6 @@ skl_update_plane(struct intel_plane *plane,
        /* Sizes are 0 based */
        src_w--;
        src_h--;
-       crtc_w--;
-       crtc_h--;
 
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
 
@@ -353,41 +401,8 @@ skl_update_plane(struct intel_plane *plane,
                      (plane_state->color_plane[1].y << 16) |
                      plane_state->color_plane[1].x);
 
-       /* program plane scaler */
        if (plane_state->scaler_id >= 0) {
-               int scaler_id = plane_state->scaler_id;
-               const struct intel_scaler *scaler =
-                       &crtc_state->scaler_state.scalers[scaler_id];
-               u16 y_hphase, uv_rgb_hphase;
-               u16 y_vphase, uv_rgb_vphase;
-
-               /* TODO: handle sub-pixel coordinates */
-               if (fb->format->format == DRM_FORMAT_NV12) {
-                       y_hphase = skl_scaler_calc_phase(1, false);
-                       y_vphase = skl_scaler_calc_phase(1, false);
-
-                       /* MPEG2 chroma siting convention */
-                       uv_rgb_hphase = skl_scaler_calc_phase(2, true);
-                       uv_rgb_vphase = skl_scaler_calc_phase(2, false);
-               } else {
-                       /* not used */
-                       y_hphase = 0;
-                       y_vphase = 0;
-
-                       uv_rgb_hphase = skl_scaler_calc_phase(1, false);
-                       uv_rgb_vphase = skl_scaler_calc_phase(1, false);
-               }
-
-               I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
-                             PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
-               I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
-               I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
-                             PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
-               I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
-                             PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
-               I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
-               I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
-                             ((crtc_w + 1) << 16)|(crtc_h + 1));
+               skl_program_scaler(plane, crtc_state, plane_state);
 
                I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
        } else {
index 8d03f64eabd71d449ebedca890135d69dabb1a74..5c22f2c8d4cfee20337d484a9eb227c3c6adcb34 100644 (file)
@@ -551,7 +551,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
                        err = igt_check_page_sizes(vma);
 
                        if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
-                               pr_err("page_sizes.gtt=%u, expected %lu\n",
+                               pr_err("page_sizes.gtt=%u, expected %llu\n",
                                       vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
                                err = -EINVAL;
                        }
index 8e2e269db97e82917b299afbe680fc008b8c90a1..127d8151367177dea04bf5c121b2331e46d1accc 100644 (file)
@@ -1337,7 +1337,7 @@ static int igt_gtt_reserve(void *arg)
                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
                if (vma->node.start != total ||
                    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
-                       pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+                       pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
                               vma->node.start, vma->node.size,
                               total, 2*I915_GTT_PAGE_SIZE);
                        err = -EINVAL;
@@ -1386,7 +1386,7 @@ static int igt_gtt_reserve(void *arg)
                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
                if (vma->node.start != total ||
                    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
-                       pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+                       pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
                               vma->node.start, vma->node.size,
                               total, 2*I915_GTT_PAGE_SIZE);
                        err = -EINVAL;
@@ -1430,7 +1430,7 @@ static int igt_gtt_reserve(void *arg)
                GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
                if (vma->node.start != offset ||
                    vma->node.size != 2*I915_GTT_PAGE_SIZE) {
-                       pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+                       pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
                               vma->node.start, vma->node.size,
                               offset, 2*I915_GTT_PAGE_SIZE);
                        err = -EINVAL;
index 05520202c96778c1401dac07a9b9ff768ba97b91..191b314f9e9e5ce92284beec9f4d27f54bdcc811 100644 (file)
@@ -45,6 +45,7 @@ struct meson_crtc {
        struct drm_crtc base;
        struct drm_pending_vblank_event *event;
        struct meson_drm *priv;
+       bool enabled;
 };
 #define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
 
@@ -80,8 +81,7 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
 
 };
 
-static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
-                                    struct drm_crtc_state *old_state)
+static void meson_crtc_enable(struct drm_crtc *crtc)
 {
        struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
        struct drm_crtc_state *crtc_state = crtc->state;
@@ -101,6 +101,22 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
        writel_bits_relaxed(VPP_POSTBLEND_ENABLE, VPP_POSTBLEND_ENABLE,
                            priv->io_base + _REG(VPP_MISC));
 
+       drm_crtc_vblank_on(crtc);
+
+       meson_crtc->enabled = true;
+}
+
+static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
+                                    struct drm_crtc_state *old_state)
+{
+       struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+       struct meson_drm *priv = meson_crtc->priv;
+
+       DRM_DEBUG_DRIVER("\n");
+
+       if (!meson_crtc->enabled)
+               meson_crtc_enable(crtc);
+
        priv->viu.osd1_enabled = true;
 }
 
@@ -110,6 +126,8 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
        struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
        struct meson_drm *priv = meson_crtc->priv;
 
+       drm_crtc_vblank_off(crtc);
+
        priv->viu.osd1_enabled = false;
        priv->viu.osd1_commit = false;
 
@@ -124,6 +142,8 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
 
                crtc->state->event = NULL;
        }
+
+       meson_crtc->enabled = false;
 }
 
 static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -132,6 +152,9 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
        struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
        unsigned long flags;
 
+       if (crtc->state->enable && !meson_crtc->enabled)
+               meson_crtc_enable(crtc);
+
        if (crtc->state->event) {
                WARN_ON(drm_crtc_vblank_get(crtc) != 0);
 
index df7247cd93f98f9f237721bb1eddd104cbbdc345..2cb2ad26d71670c387b144dc34668567f1fa9c34 100644 (file)
@@ -706,6 +706,7 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = {
        .reg_read = meson_dw_hdmi_reg_read,
        .reg_write = meson_dw_hdmi_reg_write,
        .max_register = 0x10000,
+       .fast_io = true,
 };
 
 static bool meson_hdmi_connector_is_available(struct device *dev)
index 514245e69b3847d1dc1d5f96249d5e49f849a4ca..be76f3d64bf2e7e1c85142a7d159228bf5905e6a 100644 (file)
@@ -71,6 +71,7 @@
  */
 
 /* HHI Registers */
+#define HHI_GCLK_MPEG2         0x148 /* 0x52 offset in data sheet */
 #define HHI_VDAC_CNTL0         0x2F4 /* 0xbd offset in data sheet */
 #define HHI_VDAC_CNTL1         0x2F8 /* 0xbe offset in data sheet */
 #define HHI_HDMI_PHY_CNTL0     0x3a0 /* 0xe8 offset in data sheet */
@@ -714,6 +715,7 @@ struct meson_hdmi_venc_vic_mode {
        { 5, &meson_hdmi_encp_mode_1080i60 },
        { 20, &meson_hdmi_encp_mode_1080i50 },
        { 32, &meson_hdmi_encp_mode_1080p24 },
+       { 33, &meson_hdmi_encp_mode_1080p50 },
        { 34, &meson_hdmi_encp_mode_1080p30 },
        { 31, &meson_hdmi_encp_mode_1080p50 },
        { 16, &meson_hdmi_encp_mode_1080p60 },
@@ -854,6 +856,13 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
        unsigned int sof_lines;
        unsigned int vsync_lines;
 
+       /* Use VENCI for 480i and 576i and double HDMI pixels */
+       if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
+               hdmi_repeat = true;
+               use_enci = true;
+               venc_hdmi_latency = 1;
+       }
+
        if (meson_venc_hdmi_supported_vic(vic)) {
                vmode = meson_venc_hdmi_get_vic_vmode(vic);
                if (!vmode) {
@@ -865,13 +874,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
        } else {
                meson_venc_hdmi_get_dmt_vmode(mode, &vmode_dmt);
                vmode = &vmode_dmt;
-       }
-
-       /* Use VENCI for 480i and 576i and double HDMI pixels */
-       if (mode->flags & DRM_MODE_FLAG_DBLCLK) {
-               hdmi_repeat = true;
-               use_enci = true;
-               venc_hdmi_latency = 1;
+               use_enci = false;
        }
 
        /* Repeat VENC pixels for 480/576i/p, 720p50/60 and 1080p50/60 */
@@ -1529,10 +1532,12 @@ unsigned int meson_venci_get_field(struct meson_drm *priv)
 void meson_venc_enable_vsync(struct meson_drm *priv)
 {
        writel_relaxed(2, priv->io_base + _REG(VENC_INTCTRL));
+       regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), BIT(25));
 }
 
 void meson_venc_disable_vsync(struct meson_drm *priv)
 {
+       regmap_update_bits(priv->hhi, HHI_GCLK_MPEG2, BIT(25), 0);
        writel_relaxed(0, priv->io_base + _REG(VENC_INTCTRL));
 }
 
index 6bcfa527c1801045569496712c904b3a26f8c518..26a0857878bfd520fe3ebe43cfa85c60534ff66a 100644 (file)
@@ -184,18 +184,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
        if (lut_sel == VIU_LUT_OSD_OETF) {
                writel(0, priv->io_base + _REG(addr_port));
 
-               for (i = 0; i < 20; i++)
+               for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
                        writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
                                priv->io_base + _REG(data_port));
 
                writel(r_map[OSD_OETF_LUT_SIZE - 1] | (g_map[0] << 16),
                        priv->io_base + _REG(data_port));
 
-               for (i = 0; i < 20; i++)
+               for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
                        writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
                                priv->io_base + _REG(data_port));
 
-               for (i = 0; i < 20; i++)
+               for (i = 0; i < (OSD_OETF_LUT_SIZE / 2); i++)
                        writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
                                priv->io_base + _REG(data_port));
 
@@ -211,18 +211,18 @@ void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
        } else if (lut_sel == VIU_LUT_OSD_EOTF) {
                writel(0, priv->io_base + _REG(addr_port));
 
-               for (i = 0; i < 20; i++)
+               for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
                        writel(r_map[i * 2] | (r_map[i * 2 + 1] << 16),
                                priv->io_base + _REG(data_port));
 
                writel(r_map[OSD_EOTF_LUT_SIZE - 1] | (g_map[0] << 16),
                        priv->io_base + _REG(data_port));
 
-               for (i = 0; i < 20; i++)
+               for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
                        writel(g_map[i * 2 + 1] | (g_map[i * 2 + 2] << 16),
                                priv->io_base + _REG(data_port));
 
-               for (i = 0; i < 20; i++)
+               for (i = 0; i < (OSD_EOTF_LUT_SIZE / 2); i++)
                        writel(b_map[i * 2] | (b_map[i * 2 + 1] << 16),
                                priv->io_base + _REG(data_port));
 
index d4530d60767b816605e9edcc2be921c5425e8c63..ca169f013a14efb4bbe5e26609c8152312ec7493 100644 (file)
@@ -1594,7 +1594,6 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
                                NULL);
 
        drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
-       plane->crtc = crtc;
 
        /* save user friendly CRTC name for later */
        snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
index 96cdf06e7da21d8f5bf576014f149f2efbbc7298..d31d8281424efb371cf04cfe82220ef3959828fa 100644 (file)
@@ -488,8 +488,6 @@ static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
 
        drm_encoder_cleanup(drm_enc);
        mutex_destroy(&dpu_enc->enc_lock);
-
-       kfree(dpu_enc);
 }
 
 void dpu_encoder_helper_split_config(
index bfcd165e96dfe98d8f6ad16cdadc19017c3e7042..d743e7ca6a3c8b2e83e5ccc619e0f3d6070ff4da 100644 (file)
@@ -216,7 +216,7 @@ static const struct dpu_format dpu_format_map[] = {
        INTERLEAVED_RGB_FMT(XBGR8888,
                COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
                C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
-               true, 4, 0,
+               false, 4, 0,
                DPU_FETCH_LINEAR, 1),
 
        INTERLEAVED_RGB_FMT(RGBA8888,
index 4c03f0b7343ed655c60111be4d09249bde463b28..41bec570c51848f24f378d0982cb987860fd0047 100644 (file)
@@ -39,6 +39,8 @@
 #define DSI_PIXEL_PLL_CLK              1
 #define NUM_PROVIDED_CLKS              2
 
+#define VCO_REF_CLK_RATE               19200000
+
 struct dsi_pll_regs {
        u32 pll_prop_gain_rate;
        u32 pll_lockdet_rate;
@@ -316,7 +318,7 @@ static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
            parent_rate);
 
        pll_10nm->vco_current_rate = rate;
-       pll_10nm->vco_ref_clk_rate = parent_rate;
+       pll_10nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
 
        dsi_pll_setup_config(pll_10nm);
 
index c79659ca570655da77888052fc47a16bc53cf409..adbdce3aeda0039f7779059687774924c7d6561f 100644 (file)
@@ -332,6 +332,12 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
                goto fail;
        }
 
+       ret = msm_hdmi_hpd_enable(hdmi->connector);
+       if (ret < 0) {
+               DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
+               goto fail;
+       }
+
        encoder->bridge = hdmi->bridge;
 
        priv->bridges[priv->num_bridges++]       = hdmi->bridge;
@@ -571,7 +577,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
 {
        struct drm_device *drm = dev_get_drvdata(master);
        struct msm_drm_private *priv = drm->dev_private;
-       static struct hdmi_platform_config *hdmi_cfg;
+       struct hdmi_platform_config *hdmi_cfg;
        struct hdmi *hdmi;
        struct device_node *of_node = dev->of_node;
        int i, err;
index accc9a61611d35bf9718d66fe09ce6c6e40a549f..5c5df6ab2a573421726a5ca69fbf619841c8303a 100644 (file)
@@ -245,6 +245,7 @@ void msm_hdmi_bridge_destroy(struct drm_bridge *bridge);
 
 void msm_hdmi_connector_irq(struct drm_connector *connector);
 struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi);
+int msm_hdmi_hpd_enable(struct drm_connector *connector);
 
 /*
  * i2c adapter for ddc:
index e9c9a0af508e8c41bc12e91fc13d2f23b5041f33..30e908dfded7ed888267d2c7a4a8211764fc6a22 100644 (file)
@@ -167,8 +167,9 @@ static void enable_hpd_clocks(struct hdmi *hdmi, bool enable)
        }
 }
 
-static int hpd_enable(struct hdmi_connector *hdmi_connector)
+int msm_hdmi_hpd_enable(struct drm_connector *connector)
 {
+       struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
        struct hdmi *hdmi = hdmi_connector->hdmi;
        const struct hdmi_platform_config *config = hdmi->config;
        struct device *dev = &hdmi->pdev->dev;
@@ -450,7 +451,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
 {
        struct drm_connector *connector = NULL;
        struct hdmi_connector *hdmi_connector;
-       int ret;
 
        hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
        if (!hdmi_connector)
@@ -471,12 +471,6 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
        connector->interlace_allowed = 0;
        connector->doublescan_allowed = 0;
 
-       ret = hpd_enable(hdmi_connector);
-       if (ret) {
-               dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
-               return ERR_PTR(ret);
-       }
-
        drm_connector_attach_encoder(connector, hdmi->encoder);
 
        return connector;
index 4bcdeca7479db6a2481c2b6e8ad38defe7f280de..2088a20eb27024f8e868d17b1eb04343a3ad37bb 100644 (file)
@@ -34,7 +34,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
                if (!new_crtc_state->active)
                        continue;
 
+               if (drm_crtc_vblank_get(crtc))
+                       continue;
+
                kms->funcs->wait_for_crtc_commit_done(kms, crtc);
+
+               drm_crtc_vblank_put(crtc);
        }
 }
 
index f0da0d3c8a80f7cf9ab5082095aed6df3e3c9529..d756436c1fcd3293f40db2d9efda018603c496e4 100644 (file)
@@ -84,7 +84,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
-               return ret;
+               goto free_priv;
 
        pm_runtime_get_sync(&gpu->pdev->dev);
        show_priv->state = gpu->funcs->gpu_state_get(gpu);
@@ -94,13 +94,20 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
 
        if (IS_ERR(show_priv->state)) {
                ret = PTR_ERR(show_priv->state);
-               kfree(show_priv);
-               return ret;
+               goto free_priv;
        }
 
        show_priv->dev = dev;
 
-       return single_open(file, msm_gpu_show, show_priv);
+       ret = single_open(file, msm_gpu_show, show_priv);
+       if (ret)
+               goto free_priv;
+
+       return 0;
+
+free_priv:
+       kfree(show_priv);
+       return ret;
 }
 
 static const struct file_operations msm_gpu_fops = {
index 4904d0d414094f7f7c6bdb79226bf7430f5f99aa..dcff812c63d0739ee3dd867e63a503f1bee5e1f1 100644 (file)
@@ -553,17 +553,18 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
                        kthread_run(kthread_worker_fn,
                                &priv->disp_thread[i].worker,
                                "crtc_commit:%d", priv->disp_thread[i].crtc_id);
-               ret = sched_setscheduler(priv->disp_thread[i].thread,
-                                                       SCHED_FIFO, &param);
-               if (ret)
-                       pr_warn("display thread priority update failed: %d\n",
-                                                                       ret);
-
                if (IS_ERR(priv->disp_thread[i].thread)) {
                        dev_err(dev, "failed to create crtc_commit kthread\n");
                        priv->disp_thread[i].thread = NULL;
+                       goto err_msm_uninit;
                }
 
+               ret = sched_setscheduler(priv->disp_thread[i].thread,
+                                        SCHED_FIFO, &param);
+               if (ret)
+                       dev_warn(dev, "disp_thread set priority failed: %d\n",
+                                ret);
+
                /* initialize event thread */
                priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
                kthread_init_worker(&priv->event_thread[i].worker);
@@ -572,6 +573,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
                        kthread_run(kthread_worker_fn,
                                &priv->event_thread[i].worker,
                                "crtc_event:%d", priv->event_thread[i].crtc_id);
+               if (IS_ERR(priv->event_thread[i].thread)) {
+                       dev_err(dev, "failed to create crtc_event kthread\n");
+                       priv->event_thread[i].thread = NULL;
+                       goto err_msm_uninit;
+               }
+
                /**
                 * event thread should also run at same priority as disp_thread
                 * because it is handling frame_done events. A lower priority
@@ -580,34 +587,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
                 * failure at crtc commit level.
                 */
                ret = sched_setscheduler(priv->event_thread[i].thread,
-                                                       SCHED_FIFO, &param);
+                                        SCHED_FIFO, &param);
                if (ret)
-                       pr_warn("display event thread priority update failed: %d\n",
-                                                                       ret);
-
-               if (IS_ERR(priv->event_thread[i].thread)) {
-                       dev_err(dev, "failed to create crtc_event kthread\n");
-                       priv->event_thread[i].thread = NULL;
-               }
-
-               if ((!priv->disp_thread[i].thread) ||
-                               !priv->event_thread[i].thread) {
-                       /* clean up previously created threads if any */
-                       for ( ; i >= 0; i--) {
-                               if (priv->disp_thread[i].thread) {
-                                       kthread_stop(
-                                               priv->disp_thread[i].thread);
-                                       priv->disp_thread[i].thread = NULL;
-                               }
-
-                               if (priv->event_thread[i].thread) {
-                                       kthread_stop(
-                                               priv->event_thread[i].thread);
-                                       priv->event_thread[i].thread = NULL;
-                               }
-                       }
-                       goto err_msm_uninit;
-               }
+                       dev_warn(dev, "event_thread set priority failed:%d\n",
+                                ret);
        }
 
        ret = drm_vblank_init(ddev, priv->num_crtcs);
index 7a7923e6220da89b252997d9f9bb674e0b7802dd..6942604ad9a8b832b8425f90012ba688da3983aa 100644 (file)
@@ -317,6 +317,9 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
        uint32_t *ptr;
        int ret = 0;
 
+       if (!nr_relocs)
+               return 0;
+
        if (offset % 4) {
                DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
                return -EINVAL;
@@ -410,7 +413,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        struct msm_file_private *ctx = file->driver_priv;
        struct msm_gem_submit *submit;
        struct msm_gpu *gpu = priv->gpu;
-       struct dma_fence *in_fence = NULL;
        struct sync_file *sync_file = NULL;
        struct msm_gpu_submitqueue *queue;
        struct msm_ringbuffer *ring;
@@ -443,6 +445,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        ring = gpu->rb[queue->prio];
 
        if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
+               struct dma_fence *in_fence;
+
                in_fence = sync_file_get_fence(args->fence_fd);
 
                if (!in_fence)
@@ -452,11 +456,13 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                 * Wait if the fence is from a foreign context, or if the fence
                 * array contains any fence from a foreign context.
                 */
-               if (!dma_fence_match_context(in_fence, ring->fctx->context)) {
+               ret = 0;
+               if (!dma_fence_match_context(in_fence, ring->fctx->context))
                        ret = dma_fence_wait(in_fence, true);
-                       if (ret)
-                               return ret;
-               }
+
+               dma_fence_put(in_fence);
+               if (ret)
+                       return ret;
        }
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -582,8 +588,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        }
 
 out:
-       if (in_fence)
-               dma_fence_put(in_fence);
        submit_cleanup(submit);
        if (ret)
                msm_gem_submit_free(submit);
index 11aac83370664f45ce5c8a39e6bb6b284581ae40..2b7c8946adba97983a79a6f78dae85ffc269102c 100644 (file)
@@ -345,6 +345,10 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
 {
        struct msm_gpu_state *state;
 
+       /* Check if the target supports capturing crash state */
+       if (!gpu->funcs->gpu_state_get)
+               return;
+
        /* Only save one crash state at a time */
        if (gpu->crashstate)
                return;
@@ -434,10 +438,9 @@ static void recover_worker(struct work_struct *work)
        if (submit) {
                struct task_struct *task;
 
-               rcu_read_lock();
-               task = pid_task(submit->pid, PIDTYPE_PID);
+               task = get_pid_task(submit->pid, PIDTYPE_PID);
                if (task) {
-                       comm = kstrdup(task->comm, GFP_ATOMIC);
+                       comm = kstrdup(task->comm, GFP_KERNEL);
 
                        /*
                         * So slightly annoying, in other paths like
@@ -450,10 +453,10 @@ static void recover_worker(struct work_struct *work)
                         * about the submit going away.
                         */
                        mutex_unlock(&dev->struct_mutex);
-                       cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
+                       cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+                       put_task_struct(task);
                        mutex_lock(&dev->struct_mutex);
                }
-               rcu_read_unlock();
 
                if (comm && cmd) {
                        dev_err(dev->dev, "%s: offending task: %s (%s)\n",
index b23d33622f374b0ce88791914b53cb126899676b..2a90aa4caec081b2349ce115d77f4225d22ab3a4 100644 (file)
@@ -66,7 +66,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
 //     pm_runtime_get_sync(mmu->dev);
        ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
 //     pm_runtime_put_sync(mmu->dev);
-       WARN_ON(ret < 0);
+       WARN_ON(!ret);
 
        return (ret == len) ? 0 : -EINVAL;
 }
index cca9334584391d97a4026f6ae48bfdb8d7f12ae9..0c2c8d2c631f309791a91b388d7d370c969e2f20 100644 (file)
@@ -316,10 +316,11 @@ static void snapshot_buf(struct msm_rd_state *rd,
                uint64_t iova, uint32_t size)
 {
        struct msm_gem_object *obj = submit->bos[idx].obj;
+       unsigned offset = 0;
        const char *buf;
 
        if (iova) {
-               buf += iova - submit->bos[idx].iova;
+               offset = iova - submit->bos[idx].iova;
        } else {
                iova = submit->bos[idx].iova;
                size = obj->base.size;
@@ -340,6 +341,8 @@ static void snapshot_buf(struct msm_rd_state *rd,
        if (IS_ERR(buf))
                return;
 
+       buf += offset;
+
        rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
 
        msm_gem_put_vaddr(&obj->base);
index 6bb78076b5b5830c12c0db372bbaf684dd3d1fc3..6cbbae3f438bd0e44cbc01406687ed82170b7372 100644 (file)
@@ -881,22 +881,16 @@ nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
 {
        struct nv50_head *head = nv50_head(connector_state->crtc);
        struct nv50_mstc *mstc = nv50_mstc(connector);
-       if (mstc->port) {
-               struct nv50_mstm *mstm = mstc->mstm;
-               return &mstm->msto[head->base.index]->encoder;
-       }
-       return NULL;
+
+       return &mstc->mstm->msto[head->base.index]->encoder;
 }
 
 static struct drm_encoder *
 nv50_mstc_best_encoder(struct drm_connector *connector)
 {
        struct nv50_mstc *mstc = nv50_mstc(connector);
-       if (mstc->port) {
-               struct nv50_mstm *mstm = mstc->mstm;
-               return &mstm->msto[0]->encoder;
-       }
-       return NULL;
+
+       return &mstc->mstm->msto[0]->encoder;
 }
 
 static enum drm_mode_status
index 1f8161b041be6c592f826820818ea352ae3fdfe2..465120809eb3bb621343002b4f149a3c44787eca 100644 (file)
@@ -177,6 +177,7 @@ static int panel_dpi_probe(struct platform_device *pdev)
        dssdev->type = OMAP_DISPLAY_TYPE_DPI;
        dssdev->owner = THIS_MODULE;
        dssdev->of_ports = BIT(0);
+       drm_bus_flags_from_videomode(&ddata->vm, &dssdev->bus_flags);
 
        omapdss_display_init(dssdev);
        omapdss_device_register(dssdev);
index 394c129cfb3bb8e03b8970fc839656e3d25becd5..00a9c2ab9e6c8932baecc9b5591b07626201c589 100644 (file)
@@ -5409,15 +5409,24 @@ static int dsi_probe(struct platform_device *pdev)
 
        /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
         * of data to 3 by default */
-       if (dsi->data->quirks & DSI_QUIRK_GNQ)
+       if (dsi->data->quirks & DSI_QUIRK_GNQ) {
+               dsi_runtime_get(dsi);
                /* NB_DATA_LANES */
                dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9);
-       else
+               dsi_runtime_put(dsi);
+       } else {
                dsi->num_lanes_supported = 3;
+       }
+
+       r = of_platform_populate(dev->of_node, NULL, NULL, dev);
+       if (r) {
+               DSSERR("Failed to populate DSI child devices: %d\n", r);
+               goto err_pm_disable;
+       }
 
        r = dsi_init_output(dsi);
        if (r)
-               goto err_pm_disable;
+               goto err_of_depopulate;
 
        r = dsi_probe_of(dsi);
        if (r) {
@@ -5425,10 +5434,6 @@ static int dsi_probe(struct platform_device *pdev)
                goto err_uninit_output;
        }
 
-       r = of_platform_populate(dev->of_node, NULL, NULL, dev);
-       if (r)
-               DSSERR("Failed to populate DSI child devices: %d\n", r);
-
        r = component_add(&pdev->dev, &dsi_component_ops);
        if (r)
                goto err_uninit_output;
@@ -5437,6 +5442,8 @@ static int dsi_probe(struct platform_device *pdev)
 
 err_uninit_output:
        dsi_uninit_output(dsi);
+err_of_depopulate:
+       of_platform_depopulate(dev);
 err_pm_disable:
        pm_runtime_disable(dev);
        return r;
@@ -5470,19 +5477,12 @@ static int dsi_runtime_suspend(struct device *dev)
        /* wait for current handler to finish before turning the DSI off */
        synchronize_irq(dsi->irq);
 
-       dispc_runtime_put(dsi->dss->dispc);
-
        return 0;
 }
 
 static int dsi_runtime_resume(struct device *dev)
 {
        struct dsi_data *dsi = dev_get_drvdata(dev);
-       int r;
-
-       r = dispc_runtime_get(dsi->dss->dispc);
-       if (r)
-               return r;
 
        dsi->is_enabled = true;
        /* ensure the irq handler sees the is_enabled value */
index 1aaf260aa9b8638d2e7fac7aaa36ed3fe14a0880..7553c7fc1c457f23bb456046c17408ba89fc9d24 100644 (file)
@@ -1484,16 +1484,23 @@ static int dss_probe(struct platform_device *pdev)
                                                   dss);
 
        /* Add all the child devices as components. */
+       r = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+       if (r)
+               goto err_uninit_debugfs;
+
        omapdss_gather_components(&pdev->dev);
 
        device_for_each_child(&pdev->dev, &match, dss_add_child_component);
 
        r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
        if (r)
-               goto err_uninit_debugfs;
+               goto err_of_depopulate;
 
        return 0;
 
+err_of_depopulate:
+       of_platform_depopulate(&pdev->dev);
+
 err_uninit_debugfs:
        dss_debugfs_remove_file(dss->debugfs.clk);
        dss_debugfs_remove_file(dss->debugfs.dss);
@@ -1522,6 +1529,8 @@ static int dss_remove(struct platform_device *pdev)
 {
        struct dss_device *dss = platform_get_drvdata(pdev);
 
+       of_platform_depopulate(&pdev->dev);
+
        component_master_del(&pdev->dev, &dss_component_ops);
 
        dss_debugfs_remove_file(dss->debugfs.clk);
index cf6230eac31a3cffb37f62cb3e7b79ec9a6bb552..aabdda394c9c6f4cf7f93eb0f8e0f9a6126262d1 100644 (file)
@@ -635,10 +635,14 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
 
        hdmi->dss = dss;
 
-       r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
+       r = hdmi_runtime_get(hdmi);
        if (r)
                return r;
 
+       r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
+       if (r)
+               goto err_runtime_put;
+
        r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp);
        if (r)
                goto err_pll_uninit;
@@ -652,12 +656,16 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
        hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
                                               hdmi);
 
+       hdmi_runtime_put(hdmi);
+
        return 0;
 
 err_cec_uninit:
        hdmi4_cec_uninit(&hdmi->core);
 err_pll_uninit:
        hdmi_pll_uninit(&hdmi->pll);
+err_runtime_put:
+       hdmi_runtime_put(hdmi);
        return r;
 }
 
@@ -833,32 +841,6 @@ static int hdmi4_remove(struct platform_device *pdev)
        return 0;
 }
 
-static int hdmi_runtime_suspend(struct device *dev)
-{
-       struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-
-       dispc_runtime_put(hdmi->dss->dispc);
-
-       return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
-       struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-       int r;
-
-       r = dispc_runtime_get(hdmi->dss->dispc);
-       if (r < 0)
-               return r;
-
-       return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
-       .runtime_suspend = hdmi_runtime_suspend,
-       .runtime_resume = hdmi_runtime_resume,
-};
-
 static const struct of_device_id hdmi_of_match[] = {
        { .compatible = "ti,omap4-hdmi", },
        {},
@@ -869,7 +851,6 @@ struct platform_driver omapdss_hdmi4hw_driver = {
        .remove         = hdmi4_remove,
        .driver         = {
                .name   = "omapdss_hdmi",
-               .pm     = &hdmi_pm_ops,
                .of_match_table = hdmi_of_match,
                .suppress_bind_attrs = true,
        },
index b0e4a7463f8c88517fcb398a049a8f5355df6dc9..9e8556f67a2914aed8ed1b71409956c2bcc07057 100644 (file)
@@ -825,32 +825,6 @@ static int hdmi5_remove(struct platform_device *pdev)
        return 0;
 }
 
-static int hdmi_runtime_suspend(struct device *dev)
-{
-       struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-
-       dispc_runtime_put(hdmi->dss->dispc);
-
-       return 0;
-}
-
-static int hdmi_runtime_resume(struct device *dev)
-{
-       struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-       int r;
-
-       r = dispc_runtime_get(hdmi->dss->dispc);
-       if (r < 0)
-               return r;
-
-       return 0;
-}
-
-static const struct dev_pm_ops hdmi_pm_ops = {
-       .runtime_suspend = hdmi_runtime_suspend,
-       .runtime_resume = hdmi_runtime_resume,
-};
-
 static const struct of_device_id hdmi_of_match[] = {
        { .compatible = "ti,omap5-hdmi", },
        { .compatible = "ti,dra7-hdmi", },
@@ -862,7 +836,6 @@ struct platform_driver omapdss_hdmi5hw_driver = {
        .remove         = hdmi5_remove,
        .driver         = {
                .name   = "omapdss_hdmi5",
-               .pm     = &hdmi_pm_ops,
                .of_match_table = hdmi_of_match,
                .suppress_bind_attrs = true,
        },
index 1f698a95a94a57d4a03626666c7f755f4fce6fae..33e15cb77efa79afbcc3d46c17eb045b4a5d3c57 100644 (file)
@@ -432,7 +432,7 @@ struct omap_dss_device {
        const struct omap_dss_driver *driver;
        const struct omap_dss_device_ops *ops;
        unsigned long ops_flags;
-       unsigned long bus_flags;
+       u32 bus_flags;
 
        /* helper variable for driver suspend/resume */
        bool activate_after_resume;
index ff0b18c8e4acedc4d2e310d5377a789fecaaf9a2..b5f52727f8b17237f52bbad92e170a488e27b396 100644 (file)
@@ -946,19 +946,12 @@ static int venc_runtime_suspend(struct device *dev)
        if (venc->tv_dac_clk)
                clk_disable_unprepare(venc->tv_dac_clk);
 
-       dispc_runtime_put(venc->dss->dispc);
-
        return 0;
 }
 
 static int venc_runtime_resume(struct device *dev)
 {
        struct venc_device *venc = dev_get_drvdata(dev);
-       int r;
-
-       r = dispc_runtime_get(venc->dss->dispc);
-       if (r < 0)
-               return r;
 
        if (venc->tv_dac_clk)
                clk_prepare_enable(venc->tv_dac_clk);
index 62928ec0e7db7a6fd6c53d1801c62a19dae80758..caffc547ef97e385cb913f77fd3ebe55a082d486 100644 (file)
@@ -350,11 +350,14 @@ static void omap_crtc_arm_event(struct drm_crtc *crtc)
 static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
                                    struct drm_crtc_state *old_state)
 {
+       struct omap_drm_private *priv = crtc->dev->dev_private;
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
        int ret;
 
        DBG("%s", omap_crtc->name);
 
+       priv->dispc_ops->runtime_get(priv->dispc);
+
        spin_lock_irq(&crtc->dev->event_lock);
        drm_crtc_vblank_on(crtc);
        ret = drm_crtc_vblank_get(crtc);
@@ -367,6 +370,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
 static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
                                     struct drm_crtc_state *old_state)
 {
+       struct omap_drm_private *priv = crtc->dev->dev_private;
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
 
        DBG("%s", omap_crtc->name);
@@ -379,6 +383,8 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
        spin_unlock_irq(&crtc->dev->event_lock);
 
        drm_crtc_vblank_off(crtc);
+
+       priv->dispc_ops->runtime_put(priv->dispc);
 }
 
 static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
index 452e625f6ce331a24a13afe8c40209cab5e0b0d1..933ebc9f9faaaf35049a53aef49551e3ff1e740a 100644 (file)
@@ -52,17 +52,44 @@ static const struct drm_encoder_funcs omap_encoder_funcs = {
        .destroy = omap_encoder_destroy,
 };
 
+static void omap_encoder_hdmi_mode_set(struct drm_encoder *encoder,
+                                      struct drm_display_mode *adjusted_mode)
+{
+       struct drm_device *dev = encoder->dev;
+       struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+       struct omap_dss_device *dssdev = omap_encoder->output;
+       struct drm_connector *connector;
+       bool hdmi_mode;
+
+       hdmi_mode = false;
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               if (connector->encoder == encoder) {
+                       hdmi_mode = omap_connector_get_hdmi_mode(connector);
+                       break;
+               }
+       }
+
+       if (dssdev->ops->hdmi.set_hdmi_mode)
+               dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
+
+       if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
+               struct hdmi_avi_infoframe avi;
+               int r;
+
+               r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
+                                                            false);
+               if (r == 0)
+                       dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
+       }
+}
+
 static void omap_encoder_mode_set(struct drm_encoder *encoder,
                                  struct drm_display_mode *mode,
                                  struct drm_display_mode *adjusted_mode)
 {
-       struct drm_device *dev = encoder->dev;
        struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-       struct drm_connector *connector;
        struct omap_dss_device *dssdev;
        struct videomode vm = { 0 };
-       bool hdmi_mode;
-       int r;
 
        drm_display_mode_to_videomode(adjusted_mode, &vm);
 
@@ -112,27 +139,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
        }
 
        /* Set the HDMI mode and HDMI infoframe if applicable. */
-       hdmi_mode = false;
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               if (connector->encoder == encoder) {
-                       hdmi_mode = omap_connector_get_hdmi_mode(connector);
-                       break;
-               }
-       }
-
-       dssdev = omap_encoder->output;
-
-       if (dssdev->ops->hdmi.set_hdmi_mode)
-               dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
-
-       if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
-               struct hdmi_avi_infoframe avi;
-
-               r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
-                                                            false);
-               if (r == 0)
-                       dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
-       }
+       if (omap_encoder->output->output_type == OMAP_DISPLAY_TYPE_HDMI)
+               omap_encoder_hdmi_mode_set(encoder, adjusted_mode);
 }
 
 static void omap_encoder_disable(struct drm_encoder *encoder)
index 97964f7f2acee08350101947a4ccd9a717f5f199..a04ffb3b21742a834c44c4770e2760f8139053dd 100644 (file)
@@ -56,6 +56,8 @@ struct panel_desc {
        /**
         * @prepare: the time (in milliseconds) that it takes for the panel to
         *           become ready and start receiving video data
+        * @hpd_absent_delay: Add this to the prepare delay if we know Hot
+        *                    Plug Detect isn't used.
         * @enable: the time (in milliseconds) that it takes for the panel to
         *          display the first valid frame after starting to receive
         *          video data
@@ -66,6 +68,7 @@ struct panel_desc {
         */
        struct {
                unsigned int prepare;
+               unsigned int hpd_absent_delay;
                unsigned int enable;
                unsigned int disable;
                unsigned int unprepare;
@@ -79,6 +82,7 @@ struct panel_simple {
        struct drm_panel base;
        bool prepared;
        bool enabled;
+       bool no_hpd;
 
        const struct panel_desc *desc;
 
@@ -202,6 +206,7 @@ static int panel_simple_unprepare(struct drm_panel *panel)
 static int panel_simple_prepare(struct drm_panel *panel)
 {
        struct panel_simple *p = to_panel_simple(panel);
+       unsigned int delay;
        int err;
 
        if (p->prepared)
@@ -215,8 +220,11 @@ static int panel_simple_prepare(struct drm_panel *panel)
 
        gpiod_set_value_cansleep(p->enable_gpio, 1);
 
-       if (p->desc->delay.prepare)
-               msleep(p->desc->delay.prepare);
+       delay = p->desc->delay.prepare;
+       if (p->no_hpd)
+               delay += p->desc->delay.hpd_absent_delay;
+       if (delay)
+               msleep(delay);
 
        p->prepared = true;
 
@@ -305,6 +313,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
        panel->prepared = false;
        panel->desc = desc;
 
+       panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
+
        panel->supply = devm_regulator_get(dev, "power");
        if (IS_ERR(panel->supply))
                return PTR_ERR(panel->supply);
@@ -1363,7 +1373,7 @@ static const struct panel_desc innolux_n156bge_l21 = {
        },
 };
 
-static const struct drm_display_mode innolux_tv123wam_mode = {
+static const struct drm_display_mode innolux_p120zdg_bf1_mode = {
        .clock = 206016,
        .hdisplay = 2160,
        .hsync_start = 2160 + 48,
@@ -1377,15 +1387,16 @@ static const struct drm_display_mode innolux_tv123wam_mode = {
        .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
 };
 
-static const struct panel_desc innolux_tv123wam = {
-       .modes = &innolux_tv123wam_mode,
+static const struct panel_desc innolux_p120zdg_bf1 = {
+       .modes = &innolux_p120zdg_bf1_mode,
        .num_modes = 1,
        .bpc = 8,
        .size = {
-               .width = 259,
-               .height = 173,
+               .width = 254,
+               .height = 169,
        },
        .delay = {
+               .hpd_absent_delay = 200,
                .unprepare = 500,
        },
 };
@@ -2445,8 +2456,8 @@ static const struct of_device_id platform_of_match[] = {
                .compatible = "innolux,n156bge-l21",
                .data = &innolux_n156bge_l21,
        }, {
-               .compatible = "innolux,tv123wam",
-               .data = &innolux_tv123wam,
+               .compatible = "innolux,p120zdg-bf1",
+               .data = &innolux_p120zdg_bf1,
        }, {
                .compatible = "innolux,zj070na-01p",
                .data = &innolux_zj070na_01p,
index d85f0a1c158173b97bf94b251a889f31e8fff948..cebf313c6e1f9290dd8817099fb6388d25649032 100644 (file)
@@ -202,10 +202,25 @@ void rcar_du_group_put(struct rcar_du_group *rgrp)
 
 static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
 {
-       struct rcar_du_crtc *rcrtc = &rgrp->dev->crtcs[rgrp->index * 2];
+       struct rcar_du_device *rcdu = rgrp->dev;
+
+       /*
+        * Group start/stop is controlled by the DRES and DEN bits of DSYSR0
+        * for the first group and DSYSR2 for the second group. On most DU
+        * instances, this maps to the first CRTC of the group, and we can just
+        * use rcar_du_crtc_dsysr_clr_set() to access the correct DSYSR. On
+        * M3-N, however, DU2 doesn't exist, but DSYSR2 does. We thus need to
+        * access the register directly using group read/write.
+        */
+       if (rcdu->info->channels_mask & BIT(rgrp->index * 2)) {
+               struct rcar_du_crtc *rcrtc = &rgrp->dev->crtcs[rgrp->index * 2];
 
-       rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_DRES | DSYSR_DEN,
-                                  start ? DSYSR_DEN : DSYSR_DRES);
+               rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_DRES | DSYSR_DEN,
+                                          start ? DSYSR_DEN : DSYSR_DRES);
+       } else {
+               rcar_du_group_write(rgrp, DSYSR,
+                                   start ? DSYSR_DEN : DSYSR_DRES);
+       }
 }
 
 void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
index af7dcb6da351408391892dff43c9fc574291ae50..e7eb0d1e17be5e6575550f9e6c12d9b96f92a0d5 100644 (file)
@@ -75,7 +75,7 @@ static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder)
 
        DRM_DEBUG_DRIVER("Enabling LVDS output\n");
 
-       if (!IS_ERR(tcon->panel)) {
+       if (tcon->panel) {
                drm_panel_prepare(tcon->panel);
                drm_panel_enable(tcon->panel);
        }
@@ -88,7 +88,7 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
 
        DRM_DEBUG_DRIVER("Disabling LVDS output\n");
 
-       if (!IS_ERR(tcon->panel)) {
+       if (tcon->panel) {
                drm_panel_disable(tcon->panel);
                drm_panel_unprepare(tcon->panel);
        }
index bf068da6b12e11b7a9440fdca6ff03ca84ff2479..f4a22689eb54c238f96626c03d8271d70ce645b8 100644 (file)
@@ -135,7 +135,7 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
 
        DRM_DEBUG_DRIVER("Enabling RGB output\n");
 
-       if (!IS_ERR(tcon->panel)) {
+       if (tcon->panel) {
                drm_panel_prepare(tcon->panel);
                drm_panel_enable(tcon->panel);
        }
@@ -148,7 +148,7 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
 
        DRM_DEBUG_DRIVER("Disabling RGB output\n");
 
-       if (!IS_ERR(tcon->panel)) {
+       if (tcon->panel) {
                drm_panel_disable(tcon->panel);
                drm_panel_unprepare(tcon->panel);
        }
index c78cd35a1294b215f84b33031ac7d826760b885a..f949287d926cd07f6859331c769ef02621ef130f 100644 (file)
@@ -491,7 +491,8 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
        sun4i_tcon0_mode_set_common(tcon, mode);
 
        /* Set dithering if needed */
-       sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
+       if (tcon->panel)
+               sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
 
        /* Adjust clock delay */
        clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
@@ -555,7 +556,7 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
         * Following code is a way to avoid quirks all around TCON
         * and DOTCLOCK drivers.
         */
-       if (!IS_ERR(tcon->panel)) {
+       if (tcon->panel) {
                struct drm_panel *panel = tcon->panel;
                struct drm_connector *connector = panel->connector;
                struct drm_display_info display_info = connector->display_info;
index ba80150d10524802271be2ce89f9144c0a98bee2..895d77d799e4fd9aad3f8715ae4c10f1fe5a7c75 100644 (file)
@@ -492,8 +492,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
        if (!fbo)
                return -ENOMEM;
 
-       ttm_bo_get(bo);
        fbo->base = *bo;
+       fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
+
+       ttm_bo_get(bo);
        fbo->bo = bo;
 
        /**
index 127468785f7484495df89a00989c17388548f9c5..1f94b9affe4bbafddede75306c3109b1368bc172 100644 (file)
@@ -214,6 +214,12 @@ static int vc4_atomic_commit(struct drm_device *dev,
                return 0;
        }
 
+       /* We know for sure we don't want an async update here. Set
+        * state->legacy_cursor_update to false to prevent
+        * drm_atomic_helper_setup_commit() from auto-completing
+        * commit->flip_done.
+        */
+       state->legacy_cursor_update = false;
        ret = drm_atomic_helper_setup_commit(state, nonblock);
        if (ret)
                return ret;
index 9dc3fcbd290bef915cea9d12472b79b3dd36db4a..c6635f23918a8c1ec07531fc6aeffede5e6e528f 100644 (file)
@@ -807,7 +807,7 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
 static void vc4_plane_atomic_async_update(struct drm_plane *plane,
                                          struct drm_plane_state *state)
 {
-       struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state);
+       struct vc4_plane_state *vc4_state, *new_vc4_state;
 
        if (plane->state->fb != state->fb) {
                vc4_plane_async_set_fb(plane, state->fb);
@@ -828,7 +828,18 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
        plane->state->src_y = state->src_y;
 
        /* Update the display list based on the new crtc_x/y. */
-       vc4_plane_atomic_check(plane, plane->state);
+       vc4_plane_atomic_check(plane, state);
+
+       new_vc4_state = to_vc4_plane_state(state);
+       vc4_state = to_vc4_plane_state(plane->state);
+
+       /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */
+       vc4_state->dlist[vc4_state->pos0_offset] =
+               new_vc4_state->dlist[vc4_state->pos0_offset];
+       vc4_state->dlist[vc4_state->pos2_offset] =
+               new_vc4_state->dlist[vc4_state->pos2_offset];
+       vc4_state->dlist[vc4_state->ptr0_offset] =
+               new_vc4_state->dlist[vc4_state->ptr0_offset];
 
        /* Note that we can't just call vc4_plane_write_dlist()
         * because that would smash the context data that the HVS is
index cf2a18571d484d078dc1eabc59a3d6ff0f11ab07..a132c37d733490fa70af2674237162d73459a31d 100644 (file)
@@ -380,6 +380,9 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
                        mutex_unlock(&vgasr_mutex);
                        return -EINVAL;
                }
+               /* notify if GPU has been already bound */
+               if (ops->gpu_bound)
+                       ops->gpu_bound(pdev, id);
        }
        mutex_unlock(&vgasr_mutex);
 
index a1fa2fc8c9b57fd8e3de462d35b6247bd0d3e6e3..951bb17ae8b2c823879002abe8311fbe1ab05f22 100644 (file)
@@ -70,6 +70,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
 #define QUIRK_T100_KEYBOARD            BIT(6)
 #define QUIRK_T100CHI                  BIT(7)
 #define QUIRK_G752_KEYBOARD            BIT(8)
+#define QUIRK_T101HA_DOCK              BIT(9)
 
 #define I2C_KEYBOARD_QUIRKS                    (QUIRK_FIX_NOTEBOOK_REPORT | \
                                                 QUIRK_NO_INIT_REPORTS | \
@@ -241,6 +242,18 @@ static int asus_report_input(struct asus_drvdata *drvdat, u8 *data, int size)
        return 1;
 }
 
+static int asus_event(struct hid_device *hdev, struct hid_field *field,
+                     struct hid_usage *usage, __s32 value)
+{
+       if ((usage->hid & HID_USAGE_PAGE) == 0xff310000 &&
+           (usage->hid & HID_USAGE) != 0x00 && !usage->type) {
+               hid_warn(hdev, "Unmapped Asus vendor usagepage code 0x%02x\n",
+                        usage->hid & HID_USAGE);
+       }
+
+       return 0;
+}
+
 static int asus_raw_event(struct hid_device *hdev,
                struct hid_report *report, u8 *data, int size)
 {
@@ -510,6 +523,7 @@ static int asus_input_mapping(struct hid_device *hdev,
                case 0x20: asus_map_key_clear(KEY_BRIGHTNESSUP);                break;
                case 0x35: asus_map_key_clear(KEY_DISPLAY_OFF);         break;
                case 0x6c: asus_map_key_clear(KEY_SLEEP);               break;
+               case 0x7c: asus_map_key_clear(KEY_MICMUTE);             break;
                case 0x82: asus_map_key_clear(KEY_CAMERA);              break;
                case 0x88: asus_map_key_clear(KEY_RFKILL);                      break;
                case 0xb5: asus_map_key_clear(KEY_CALC);                        break;
@@ -528,6 +542,9 @@ static int asus_input_mapping(struct hid_device *hdev,
                /* Fn+Space Power4Gear Hybrid */
                case 0x5c: asus_map_key_clear(KEY_PROG3);               break;
 
+               /* Fn+F5 "fan" symbol on FX503VD */
+               case 0x99: asus_map_key_clear(KEY_PROG4);               break;
+
                default:
                        /* ASUS lazily declares 256 usages, ignore the rest,
                         * as some make the keyboard appear as a pointer device. */
@@ -683,6 +700,11 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return ret;
        }
 
+       /* use hid-multitouch for T101HA touchpad */
+       if (id->driver_data & QUIRK_T101HA_DOCK &&
+           hdev->collection->usage == HID_GD_MOUSE)
+               return -ENODEV;
+
        ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        if (ret) {
                hid_err(hdev, "Asus hw start failed: %d\n", ret);
@@ -805,12 +827,17 @@ static const struct hid_device_id asus_devices[] = {
                USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3), QUIRK_G752_KEYBOARD },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+               USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD),
+         QUIRK_USE_KBD_BACKLIGHT },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD),
          QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
        { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
                USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD),
          QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES },
+       { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+               USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD), QUIRK_T101HA_DOCK },
        { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_ASUS_AK1D) },
        { HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_ASUS_MD_5110) },
        { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_ASUS_MD_5112) },
@@ -832,6 +859,7 @@ static struct hid_driver asus_driver = {
 #ifdef CONFIG_PM
        .reset_resume           = asus_reset_resume,
 #endif
+       .event                  = asus_event,
        .raw_event              = asus_raw_event
 };
 module_hid_driver(asus_driver);
index 5bec9244c45b54aa943ae5363cecdc9c5d7f38f5..f41d5fe51abe3b812b600c7fa79d789f350ef3ce 100644 (file)
@@ -172,6 +172,8 @@ static int open_collection(struct hid_parser *parser, unsigned type)
        collection->type = type;
        collection->usage = usage;
        collection->level = parser->collection_stack_ptr - 1;
+       collection->parent = parser->active_collection;
+       parser->active_collection = collection;
 
        if (type == HID_COLLECTION_APPLICATION)
                parser->device->maxapplication++;
@@ -190,6 +192,8 @@ static int close_collection(struct hid_parser *parser)
                return -EINVAL;
        }
        parser->collection_stack_ptr--;
+       if (parser->active_collection)
+               parser->active_collection = parser->active_collection->parent;
        return 0;
 }
 
@@ -290,6 +294,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
                field->usage[i].collection_index =
                        parser->local.collection_index[j];
                field->usage[i].usage_index = i;
+               field->usage[i].resolution_multiplier = 1;
        }
 
        field->maxusage = usages;
@@ -943,6 +948,167 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
 }
 EXPORT_SYMBOL_GPL(hid_validate_values);
 
+static int hid_calculate_multiplier(struct hid_device *hid,
+                                    struct hid_field *multiplier)
+{
+       int m;
+       __s32 v = *multiplier->value;
+       __s32 lmin = multiplier->logical_minimum;
+       __s32 lmax = multiplier->logical_maximum;
+       __s32 pmin = multiplier->physical_minimum;
+       __s32 pmax = multiplier->physical_maximum;
+
+       /*
+        * "Because OS implementations will generally divide the control's
+        * reported count by the Effective Resolution Multiplier, designers
+        * should take care not to establish a potential Effective
+        * Resolution Multiplier of zero."
+        * HID Usage Table, v1.12, Section 4.3.1, p31
+        */
+       if (lmax - lmin == 0)
+               return 1;
+       /*
+        * Handling the unit exponent is left as an exercise to whoever
+        * finds a device where that exponent is not 0.
+        */
+       m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
+       if (unlikely(multiplier->unit_exponent != 0)) {
+               hid_warn(hid,
+                        "unsupported Resolution Multiplier unit exponent %d\n",
+                        multiplier->unit_exponent);
+       }
+
+       /* There are no devices with an effective multiplier > 255 */
+       if (unlikely(m == 0 || m > 255 || m < -255)) {
+               hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
+               m = 1;
+       }
+
+       return m;
+}
+
+static void hid_apply_multiplier_to_field(struct hid_device *hid,
+                                         struct hid_field *field,
+                                         struct hid_collection *multiplier_collection,
+                                         int effective_multiplier)
+{
+       struct hid_collection *collection;
+       struct hid_usage *usage;
+       int i;
+
+       /*
+        * If multiplier_collection is NULL, the multiplier applies
+        * to all fields in the report.
+        * Otherwise, it is the Logical Collection the multiplier applies to
+        * but our field may be in a subcollection of that collection.
+        */
+       for (i = 0; i < field->maxusage; i++) {
+               usage = &field->usage[i];
+
+               collection = &hid->collection[usage->collection_index];
+               while (collection && collection != multiplier_collection)
+                       collection = collection->parent;
+
+               if (collection || multiplier_collection == NULL)
+                       usage->resolution_multiplier = effective_multiplier;
+
+       }
+}
+
+static void hid_apply_multiplier(struct hid_device *hid,
+                                struct hid_field *multiplier)
+{
+       struct hid_report_enum *rep_enum;
+       struct hid_report *rep;
+       struct hid_field *field;
+       struct hid_collection *multiplier_collection;
+       int effective_multiplier;
+       int i;
+
+       /*
+        * "The Resolution Multiplier control must be contained in the same
+        * Logical Collection as the control(s) to which it is to be applied.
+        * If no Resolution Multiplier is defined, then the Resolution
+        * Multiplier defaults to 1.  If more than one control exists in a
+        * Logical Collection, the Resolution Multiplier is associated with
+        * all controls in the collection. If no Logical Collection is
+        * defined, the Resolution Multiplier is associated with all
+        * controls in the report."
+        * HID Usage Table, v1.12, Section 4.3.1, p30
+        *
+        * Thus, search from the current collection upwards until we find a
+        * logical collection. Then search all fields for that same parent
+        * collection. Those are the fields the multiplier applies to.
+        *
+        * If we have more than one multiplier, it will overwrite the
+        * applicable fields later.
+        */
+       multiplier_collection = &hid->collection[multiplier->usage->collection_index];
+       while (multiplier_collection &&
+              multiplier_collection->type != HID_COLLECTION_LOGICAL)
+               multiplier_collection = multiplier_collection->parent;
+
+       effective_multiplier = hid_calculate_multiplier(hid, multiplier);
+
+       rep_enum = &hid->report_enum[HID_INPUT_REPORT];
+       list_for_each_entry(rep, &rep_enum->report_list, list) {
+               for (i = 0; i < rep->maxfield; i++) {
+                       field = rep->field[i];
+                       hid_apply_multiplier_to_field(hid, field,
+                                                     multiplier_collection,
+                                                     effective_multiplier);
+               }
+       }
+}
+
+/*
+ * hid_setup_resolution_multiplier - set up all resolution multipliers
+ *
+ * @device: hid device
+ *
+ * Search for all Resolution Multiplier Feature Reports and apply their
+ * value to all matching Input items. This only updates the internal struct
+ * fields.
+ *
+ * The Resolution Multiplier is applied by the hardware. If the multiplier
+ * is anything other than 1, the hardware will send pre-multiplied events
+ * so that the same physical interaction generates an accumulated
+ *     accumulated_value = value * * multiplier
+ * This may be achieved by sending
+ * - "value * multiplier" for each event, or
+ * - "value" but "multiplier" times as frequently, or
+ * - a combination of the above
+ * The only guarantee is that the same physical interaction always generates
+ * an accumulated 'value * multiplier'.
+ *
+ * This function must be called before any event processing and after
+ * any SetRequest to the Resolution Multiplier.
+ */
+void hid_setup_resolution_multiplier(struct hid_device *hid)
+{
+       struct hid_report_enum *rep_enum;
+       struct hid_report *rep;
+       struct hid_usage *usage;
+       int i, j;
+
+       rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
+       list_for_each_entry(rep, &rep_enum->report_list, list) {
+               for (i = 0; i < rep->maxfield; i++) {
+                       /* Ignore if report count is out of bounds. */
+                       if (rep->field[i]->report_count < 1)
+                               continue;
+
+                       for (j = 0; j < rep->field[i]->maxusage; j++) {
+                               usage = &rep->field[i]->usage[j];
+                               if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
+                                       hid_apply_multiplier(hid,
+                                                            rep->field[i]);
+                       }
+               }
+       }
+}
+EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
+
 /**
  * hid_open_report - open a driver-specific device report
  *
@@ -1039,9 +1205,17 @@ int hid_open_report(struct hid_device *device)
                                hid_err(device, "unbalanced delimiter at end of report description\n");
                                goto err;
                        }
+
+                       /*
+                        * fetch initial values in case the device's
+                        * default multiplier isn't the recommended 1
+                        */
+                       hid_setup_resolution_multiplier(device);
+
                        kfree(parser->collection_stack);
                        vfree(parser);
                        device->status |= HID_STAT_PARSED;
+
                        return 0;
                }
        }
index 3f0916b64c60e9bd8fcf29b22386f2a6c7cd7649..e0bb7b34f3a4de8a1f4b51a9f3bc8406b21cd621 100644 (file)
@@ -326,6 +326,8 @@ module_param_cb(g6_is_space, &cougar_g6_is_space_ops, &g6_is_space, 0644);
 static struct hid_device_id cougar_id_table[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR,
                         USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR,
+                        USB_DEVICE_ID_COUGAR_700K_GAMING_KEYBOARD) },
        {}
 };
 MODULE_DEVICE_TABLE(hid, cougar_id_table);
index b48100236df890cdd1bbffa0daac97257357a38d..c530476edba62b7804b892e47a675396dfe1c3c8 100644 (file)
@@ -1072,11 +1072,6 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
        return 0;
 }
 
-static int hid_debug_rdesc_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, hid_debug_rdesc_show, inode->i_private);
-}
-
 static int hid_debug_events_open(struct inode *inode, struct file *file)
 {
        int err = 0;
@@ -1211,12 +1206,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-static const struct file_operations hid_debug_rdesc_fops = {
-       .open           = hid_debug_rdesc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(hid_debug_rdesc);
 
 static const struct file_operations hid_debug_events_fops = {
        .owner =        THIS_MODULE,
index b372854cf38d3221d8598affb395845fb42ce930..704049e62d58ac9a9cd1e7c8bb4a3e1c222eb42d 100644 (file)
@@ -309,7 +309,7 @@ static void mousevsc_on_receive(struct hv_device *device,
                hid_input_report(input_dev->hid_device, HID_INPUT_REPORT,
                                 input_dev->input_buf, len, 1);
 
-               pm_wakeup_event(&input_dev->device->device, 0);
+               pm_wakeup_hard_event(&input_dev->device->device);
 
                break;
        default:
index 27519eb8ee636f8823d71a3ccf6802c8620e3745..518fa76414f560f8e76d88a2079310cc8b8c4936 100644 (file)
 #define USB_DEVICE_ID_ASUSTEK_T100TA_KEYBOARD  0x17e0
 #define USB_DEVICE_ID_ASUSTEK_T100TAF_KEYBOARD 0x1807
 #define USB_DEVICE_ID_ASUSTEK_T100CHI_KEYBOARD 0x8502
+#define USB_DEVICE_ID_ASUSTEK_T101HA_KEYBOARD  0x183d
 #define USB_DEVICE_ID_ASUSTEK_T304_KEYBOARD    0x184a
 #define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD     0x8585
 #define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD     0x0101
 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854
 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2 0x1837
 #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD3 0x1822
+#define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869
 
 #define USB_VENDOR_ID_ATEN             0x0557
 #define USB_DEVICE_ID_ATEN_UC100KM     0x2004
 
 #define USB_VENDOR_ID_SOLID_YEAR                       0x060b
 #define USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD      0x500a
+#define USB_DEVICE_ID_COUGAR_700K_GAMING_KEYBOARD      0x700a
 
 #define USB_VENDOR_ID_SOUNDGRAPH       0x15c2
 #define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST    0x0034
index d6fab579848743555c53534ad933419ad69318a9..59a5608b8dc06fb3e21811605d6c81f28cf7e070 100644 (file)
@@ -712,7 +712,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                                map_abs_clear(usage->hid & 0xf);
                        break;
 
-               case HID_GD_SLIDER: case HID_GD_DIAL: case HID_GD_WHEEL:
+               case HID_GD_WHEEL:
+                       if (field->flags & HID_MAIN_ITEM_RELATIVE) {
+                               set_bit(REL_WHEEL, input->relbit);
+                               map_rel(REL_WHEEL_HI_RES);
+                       } else {
+                               map_abs(usage->hid & 0xf);
+                       }
+                       break;
+               case HID_GD_SLIDER: case HID_GD_DIAL:
                        if (field->flags & HID_MAIN_ITEM_RELATIVE)
                                map_rel(usage->hid & 0xf);
                        else
@@ -1012,7 +1020,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case 0x22f: map_key_clear(KEY_ZOOMRESET);       break;
                case 0x233: map_key_clear(KEY_SCROLLUP);        break;
                case 0x234: map_key_clear(KEY_SCROLLDOWN);      break;
-               case 0x238: map_rel(REL_HWHEEL);                break;
+               case 0x238: /* AC Pan */
+                       set_bit(REL_HWHEEL, input->relbit);
+                       map_rel(REL_HWHEEL_HI_RES);
+                       break;
                case 0x23d: map_key_clear(KEY_EDIT);            break;
                case 0x25f: map_key_clear(KEY_CANCEL);          break;
                case 0x269: map_key_clear(KEY_INSERT);          break;
@@ -1200,6 +1211,38 @@ ignore:
 
 }
 
+static void hidinput_handle_scroll(struct hid_usage *usage,
+                                  struct input_dev *input,
+                                  __s32 value)
+{
+       int code;
+       int hi_res, lo_res;
+
+       if (value == 0)
+               return;
+
+       if (usage->code == REL_WHEEL_HI_RES)
+               code = REL_WHEEL;
+       else
+               code = REL_HWHEEL;
+
+       /*
+        * Windows reports one wheel click as value 120. Where a high-res
+        * scroll wheel is present, a fraction of 120 is reported instead.
+        * Our REL_WHEEL_HI_RES axis does the same because all HW must
+        * adhere to the 120 expectation.
+        */
+       hi_res = value * 120/usage->resolution_multiplier;
+
+       usage->wheel_accumulated += hi_res;
+       lo_res = usage->wheel_accumulated/120;
+       if (lo_res)
+               usage->wheel_accumulated -= lo_res * 120;
+
+       input_event(input, EV_REL, code, lo_res);
+       input_event(input, EV_REL, usage->code, hi_res);
+}
+
 void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value)
 {
        struct input_dev *input;
@@ -1262,6 +1305,12 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
        if ((usage->type == EV_KEY) && (usage->code == 0)) /* Key 0 is "unassigned", not KEY_UNKNOWN */
                return;
 
+       if ((usage->type == EV_REL) && (usage->code == REL_WHEEL_HI_RES ||
+                                       usage->code == REL_HWHEEL_HI_RES)) {
+               hidinput_handle_scroll(usage, input, value);
+               return;
+       }
+
        if ((usage->type == EV_ABS) && (field->flags & HID_MAIN_ITEM_RELATIVE) &&
                        (usage->code == ABS_VOLUME)) {
                int count = abs(value);
@@ -1489,6 +1538,58 @@ static void hidinput_close(struct input_dev *dev)
        hid_hw_close(hid);
 }
 
+static void hidinput_change_resolution_multipliers(struct hid_device *hid)
+{
+       struct hid_report_enum *rep_enum;
+       struct hid_report *rep;
+       struct hid_usage *usage;
+       int i, j;
+
+       rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
+       list_for_each_entry(rep, &rep_enum->report_list, list) {
+               bool update_needed = false;
+
+               if (rep->maxfield == 0)
+                       continue;
+
+               /*
+                * If we have more than one feature within this report we
+                * need to fill in the bits from the others before we can
+                * overwrite the ones for the Resolution Multiplier.
+                */
+               if (rep->maxfield > 1) {
+                       hid_hw_request(hid, rep, HID_REQ_GET_REPORT);
+                       hid_hw_wait(hid);
+               }
+
+               for (i = 0; i < rep->maxfield; i++) {
+                       __s32 logical_max = rep->field[i]->logical_maximum;
+
+                       /* There is no good reason for a Resolution
+                        * Multiplier to have a count other than 1.
+                        * Ignore that case.
+                        */
+                       if (rep->field[i]->report_count != 1)
+                               continue;
+
+                       for (j = 0; j < rep->field[i]->maxusage; j++) {
+                               usage = &rep->field[i]->usage[j];
+
+                               if (usage->hid != HID_GD_RESOLUTION_MULTIPLIER)
+                                       continue;
+
+                               *rep->field[i]->value = logical_max;
+                               update_needed = true;
+                       }
+               }
+               if (update_needed)
+                       hid_hw_request(hid, rep, HID_REQ_SET_REPORT);
+       }
+
+       /* refresh our structs */
+       hid_setup_resolution_multiplier(hid);
+}
+
 static void report_features(struct hid_device *hid)
 {
        struct hid_driver *drv = hid->driver;
@@ -1782,6 +1883,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
                }
        }
 
+       hidinput_change_resolution_multipliers(hid);
+
        list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
                if (drv->input_configured &&
                    drv->input_configured(hid, hidinput))
@@ -1840,4 +1943,3 @@ void hidinput_disconnect(struct hid_device *hid)
        cancel_work_sync(&hid->led_work);
 }
 EXPORT_SYMBOL_GPL(hidinput_disconnect);
-
index 19cc980eebce6a3019c44d55dcbef0002e1cda10..15ed6177a7a364d6b2634babe0df1be83b4cec7b 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/sched/clock.h>
 #include <linux/kfifo.h>
 #include <linux/input/mt.h>
 #include <linux/workqueue.h>
@@ -64,6 +65,14 @@ MODULE_PARM_DESC(disable_tap_to_click,
 #define HIDPP_QUIRK_NO_HIDINPUT                        BIT(23)
 #define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS       BIT(24)
 #define HIDPP_QUIRK_UNIFYING                   BIT(25)
+#define HIDPP_QUIRK_HI_RES_SCROLL_1P0          BIT(26)
+#define HIDPP_QUIRK_HI_RES_SCROLL_X2120                BIT(27)
+#define HIDPP_QUIRK_HI_RES_SCROLL_X2121                BIT(28)
+
+/* Convenience constant to check for any high-res support. */
+#define HIDPP_QUIRK_HI_RES_SCROLL      (HIDPP_QUIRK_HI_RES_SCROLL_1P0 | \
+                                        HIDPP_QUIRK_HI_RES_SCROLL_X2120 | \
+                                        HIDPP_QUIRK_HI_RES_SCROLL_X2121)
 
 #define HIDPP_QUIRK_DELAYED_INIT               HIDPP_QUIRK_NO_HIDINPUT
 
@@ -128,6 +137,25 @@ struct hidpp_battery {
        bool online;
 };
 
+/**
+ * struct hidpp_scroll_counter - Utility class for processing high-resolution
+ *                             scroll events.
+ * @dev: the input device for which events should be reported.
+ * @wheel_multiplier: the scalar multiplier to be applied to each wheel event
+ * @remainder: counts the number of high-resolution units moved since the last
+ *             low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should
+ *             only be used by class methods.
+ * @direction: direction of last movement (1 or -1)
+ * @last_time: last event time, used to reset remainder after inactivity
+ */
+struct hidpp_scroll_counter {
+       struct input_dev *dev;
+       int wheel_multiplier;
+       int remainder;
+       int direction;
+       unsigned long long last_time;
+};
+
 struct hidpp_device {
        struct hid_device *hid_dev;
        struct mutex send_mutex;
@@ -149,6 +177,7 @@ struct hidpp_device {
        unsigned long capabilities;
 
        struct hidpp_battery battery;
+       struct hidpp_scroll_counter vertical_wheel_counter;
 };
 
 /* HID++ 1.0 error codes */
@@ -391,6 +420,67 @@ static void hidpp_prefix_name(char **name, int name_length)
        *name = new_name;
 }
 
+/**
+ * hidpp_scroll_counter_handle_scroll() - Send high- and low-resolution scroll
+ *                                        events given a high-resolution wheel
+ *                                        movement.
+ * @counter: a hid_scroll_counter struct describing the wheel.
+ * @hi_res_value: the movement of the wheel, in the mouse's high-resolution
+ *                units.
+ *
+ * Given a high-resolution movement, this function converts the movement into
+ * fractions of 120 and emits high-resolution scroll events for the input
+ * device. It also uses the multiplier from &struct hid_scroll_counter to
+ * emit low-resolution scroll events when appropriate for
+ * backwards-compatibility with userspace input libraries.
+ */
+static void hidpp_scroll_counter_handle_scroll(struct hidpp_scroll_counter *counter,
+                                              int hi_res_value)
+{
+       int low_res_value, remainder, direction;
+       unsigned long long now, previous;
+
+       hi_res_value = hi_res_value * 120/counter->wheel_multiplier;
+       input_report_rel(counter->dev, REL_WHEEL_HI_RES, hi_res_value);
+
+       remainder = counter->remainder;
+       direction = hi_res_value > 0 ? 1 : -1;
+
+       now = sched_clock();
+       previous = counter->last_time;
+       counter->last_time = now;
+       /*
+        * Reset the remainder after a period of inactivity or when the
+        * direction changes. This prevents the REL_WHEEL emulation point
+        * from sliding for devices that don't always provide the same
+        * number of movements per detent.
+        */
+       if (now - previous > 1000000000 || direction != counter->direction)
+               remainder = 0;
+
+       counter->direction = direction;
+       remainder += hi_res_value;
+
+       /* Some wheels will rest 7/8ths of a detent from the previous detent
+        * after slow movement, so we want the threshold for low-res events to
+        * be in the middle between two detents (e.g. after 4/8ths) as
+        * opposed to on the detents themselves (8/8ths).
+        */
+       if (abs(remainder) >= 60) {
+               /* Add (or subtract) 1 because we want to trigger when the wheel
+                * is half-way to the next detent (i.e. scroll 1 detent after a
+                * 1/2 detent movement, 2 detents after a 1 1/2 detent movement,
+                * etc.).
+                */
+               low_res_value = remainder / 120;
+               if (low_res_value == 0)
+                       low_res_value = (hi_res_value > 0 ? 1 : -1);
+               input_report_rel(counter->dev, REL_WHEEL, low_res_value);
+               remainder -= low_res_value * 120;
+       }
+       counter->remainder = remainder;
+}
+
 /* -------------------------------------------------------------------------- */
 /* HIDP++ 1.0 commands                                                        */
 /* -------------------------------------------------------------------------- */
@@ -400,32 +490,53 @@ static void hidpp_prefix_name(char **name, int name_length)
 #define HIDPP_SET_LONG_REGISTER                                0x82
 #define HIDPP_GET_LONG_REGISTER                                0x83
 
-#define HIDPP_REG_GENERAL                              0x00
-
-static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
+/**
+ * hidpp10_set_register_bit() - Sets a single bit in a HID++ 1.0 register.
+ * @hidpp_dev: the device to set the register on.
+ * @register_address: the address of the register to modify.
+ * @byte: the byte of the register to modify. Should be less than 3.
+ * Return: 0 if successful, otherwise a negative error code.
+ */
+static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
+       u8 register_address, u8 byte, u8 bit)
 {
        struct hidpp_report response;
        int ret;
        u8 params[3] = { 0 };
 
        ret = hidpp_send_rap_command_sync(hidpp_dev,
-                                       REPORT_ID_HIDPP_SHORT,
-                                       HIDPP_GET_REGISTER,
-                                       HIDPP_REG_GENERAL,
-                                       NULL, 0, &response);
+                                         REPORT_ID_HIDPP_SHORT,
+                                         HIDPP_GET_REGISTER,
+                                         register_address,
+                                         NULL, 0, &response);
        if (ret)
                return ret;
 
        memcpy(params, response.rap.params, 3);
 
-       /* Set the battery bit */
-       params[0] |= BIT(4);
+       params[byte] |= BIT(bit);
 
        return hidpp_send_rap_command_sync(hidpp_dev,
-                                       REPORT_ID_HIDPP_SHORT,
-                                       HIDPP_SET_REGISTER,
-                                       HIDPP_REG_GENERAL,
-                                       params, 3, &response);
+                                          REPORT_ID_HIDPP_SHORT,
+                                          HIDPP_SET_REGISTER,
+                                          register_address,
+                                          params, 3, &response);
+}
+
+
+#define HIDPP_REG_GENERAL                              0x00
+
+static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
+{
+       return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_GENERAL, 0, 4);
+}
+
+#define HIDPP_REG_FEATURES                             0x01
+
+/* On HID++ 1.0 devices, high-res scroll was called "scrolling acceleration". */
+static int hidpp10_enable_scrolling_acceleration(struct hidpp_device *hidpp_dev)
+{
+       return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_FEATURES, 0, 6);
 }
 
 #define HIDPP_REG_BATTERY_STATUS                       0x07
@@ -1136,6 +1247,99 @@ static int hidpp_battery_get_property(struct power_supply *psy,
        return ret;
 }
 
+/* -------------------------------------------------------------------------- */
+/* 0x2120: Hi-resolution scrolling                                            */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_HI_RESOLUTION_SCROLLING                     0x2120
+
+#define CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE 0x10
+
+static int hidpp_hrs_set_highres_scrolling_mode(struct hidpp_device *hidpp,
+       bool enabled, u8 *multiplier)
+{
+       u8 feature_index;
+       u8 feature_type;
+       int ret;
+       u8 params[1];
+       struct hidpp_report response;
+
+       ret = hidpp_root_get_feature(hidpp,
+                                    HIDPP_PAGE_HI_RESOLUTION_SCROLLING,
+                                    &feature_index,
+                                    &feature_type);
+       if (ret)
+               return ret;
+
+       params[0] = enabled ? BIT(0) : 0;
+       ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+                                         CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE,
+                                         params, sizeof(params), &response);
+       if (ret)
+               return ret;
+       *multiplier = response.fap.params[1];
+       return 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* 0x2121: HiRes Wheel                                                        */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_HIRES_WHEEL         0x2121
+
+#define CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY   0x00
+#define CMD_HIRES_WHEEL_SET_WHEEL_MODE         0x20
+
+static int hidpp_hrw_get_wheel_capability(struct hidpp_device *hidpp,
+       u8 *multiplier)
+{
+       u8 feature_index;
+       u8 feature_type;
+       int ret;
+       struct hidpp_report response;
+
+       ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
+                                    &feature_index, &feature_type);
+       if (ret)
+               goto return_default;
+
+       ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+                                         CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY,
+                                         NULL, 0, &response);
+       if (ret)
+               goto return_default;
+
+       *multiplier = response.fap.params[0];
+       return 0;
+return_default:
+       hid_warn(hidpp->hid_dev,
+                "Couldn't get wheel multiplier (error %d)\n", ret);
+       return ret;
+}
+
+static int hidpp_hrw_set_wheel_mode(struct hidpp_device *hidpp, bool invert,
+       bool high_resolution, bool use_hidpp)
+{
+       u8 feature_index;
+       u8 feature_type;
+       int ret;
+       u8 params[1];
+       struct hidpp_report response;
+
+       ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
+                                    &feature_index, &feature_type);
+       if (ret)
+               return ret;
+
+       params[0] = (invert          ? BIT(2) : 0) |
+                   (high_resolution ? BIT(1) : 0) |
+                   (use_hidpp       ? BIT(0) : 0);
+
+       return hidpp_send_fap_command_sync(hidpp, feature_index,
+                                          CMD_HIRES_WHEEL_SET_WHEEL_MODE,
+                                          params, sizeof(params), &response);
+}
+
 /* -------------------------------------------------------------------------- */
 /* 0x4301: Solar Keyboard                                                     */
 /* -------------------------------------------------------------------------- */
@@ -1465,7 +1669,7 @@ struct hidpp_ff_work_data {
        u8 size;
 };
 
-static const signed short hiddpp_ff_effects[] = {
+static const signed short hidpp_ff_effects[] = {
        FF_CONSTANT,
        FF_PERIODIC,
        FF_SINE,
@@ -1480,7 +1684,7 @@ static const signed short hiddpp_ff_effects[] = {
        -1
 };
 
-static const signed short hiddpp_ff_effects_v2[] = {
+static const signed short hidpp_ff_effects_v2[] = {
        FF_RAMP,
        FF_FRICTION,
        FF_INERTIA,
@@ -1873,11 +2077,11 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
        version = bcdDevice & 255;
 
        /* Set supported force feedback capabilities */
-       for (j = 0; hiddpp_ff_effects[j] >= 0; j++)
-               set_bit(hiddpp_ff_effects[j], dev->ffbit);
+       for (j = 0; hidpp_ff_effects[j] >= 0; j++)
+               set_bit(hidpp_ff_effects[j], dev->ffbit);
        if (version > 1)
-               for (j = 0; hiddpp_ff_effects_v2[j] >= 0; j++)
-                       set_bit(hiddpp_ff_effects_v2[j], dev->ffbit);
+               for (j = 0; hidpp_ff_effects_v2[j] >= 0; j++)
+                       set_bit(hidpp_ff_effects_v2[j], dev->ffbit);
 
        /* Read number of slots available in device */
        error = hidpp_send_fap_command_sync(hidpp, feature_index,
@@ -2387,10 +2591,15 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
                input_report_key(mydata->input, BTN_RIGHT,
                        !!(data[1] & M560_MOUSE_BTN_RIGHT));
 
-               if (data[1] & M560_MOUSE_BTN_WHEEL_LEFT)
+               if (data[1] & M560_MOUSE_BTN_WHEEL_LEFT) {
                        input_report_rel(mydata->input, REL_HWHEEL, -1);
-               else if (data[1] & M560_MOUSE_BTN_WHEEL_RIGHT)
+                       input_report_rel(mydata->input, REL_HWHEEL_HI_RES,
+                                        -120);
+               } else if (data[1] & M560_MOUSE_BTN_WHEEL_RIGHT) {
                        input_report_rel(mydata->input, REL_HWHEEL, 1);
+                       input_report_rel(mydata->input, REL_HWHEEL_HI_RES,
+                                        120);
+               }
 
                v = hid_snto32(hid_field_extract(hdev, data+3, 0, 12), 12);
                input_report_rel(mydata->input, REL_X, v);
@@ -2399,7 +2608,8 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
                input_report_rel(mydata->input, REL_Y, v);
 
                v = hid_snto32(data[6], 8);
-               input_report_rel(mydata->input, REL_WHEEL, v);
+               hidpp_scroll_counter_handle_scroll(
+                               &hidpp->vertical_wheel_counter, v);
 
                input_sync(mydata->input);
        }
@@ -2426,6 +2636,8 @@ static void m560_populate_input(struct hidpp_device *hidpp,
        __set_bit(REL_Y, mydata->input->relbit);
        __set_bit(REL_WHEEL, mydata->input->relbit);
        __set_bit(REL_HWHEEL, mydata->input->relbit);
+       __set_bit(REL_WHEEL_HI_RES, mydata->input->relbit);
+       __set_bit(REL_HWHEEL_HI_RES, mydata->input->relbit);
 }
 
 static int m560_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -2527,6 +2739,37 @@ static int g920_get_config(struct hidpp_device *hidpp)
        return 0;
 }
 
+/* -------------------------------------------------------------------------- */
+/* High-resolution scroll wheels                                              */
+/* -------------------------------------------------------------------------- */
+
+static int hi_res_scroll_enable(struct hidpp_device *hidpp)
+{
+       int ret;
+       u8 multiplier = 1;
+
+       if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2121) {
+               ret = hidpp_hrw_set_wheel_mode(hidpp, false, true, false);
+               if (ret == 0)
+                       ret = hidpp_hrw_get_wheel_capability(hidpp, &multiplier);
+       } else if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2120) {
+               ret = hidpp_hrs_set_highres_scrolling_mode(hidpp, true,
+                                                          &multiplier);
+       } else /* if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) */ {
+               ret = hidpp10_enable_scrolling_acceleration(hidpp);
+               multiplier = 8;
+       }
+       if (ret)
+               return ret;
+
+       if (multiplier == 0)
+               multiplier = 1;
+
+       hidpp->vertical_wheel_counter.wheel_multiplier = multiplier;
+       hid_info(hidpp->hid_dev, "multiplier = %d\n", multiplier);
+       return 0;
+}
+
 /* -------------------------------------------------------------------------- */
 /* Generic HID++ devices                                                      */
 /* -------------------------------------------------------------------------- */
@@ -2572,6 +2815,9 @@ static void hidpp_populate_input(struct hidpp_device *hidpp,
                wtp_populate_input(hidpp, input, origin_is_hid_core);
        else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560)
                m560_populate_input(hidpp, input, origin_is_hid_core);
+
+       if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
+               hidpp->vertical_wheel_counter.dev = input;
 }
 
 static int hidpp_input_configured(struct hid_device *hdev,
@@ -2690,6 +2936,27 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
        return 0;
 }
 
+static int hidpp_event(struct hid_device *hdev, struct hid_field *field,
+       struct hid_usage *usage, __s32 value)
+{
+       /* This function will only be called for scroll events, due to the
+        * restriction imposed in hidpp_usages.
+        */
+       struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+       struct hidpp_scroll_counter *counter = &hidpp->vertical_wheel_counter;
+       /* A scroll event may occur before the multiplier has been retrieved or
+        * the input device set, or high-res scroll enabling may fail. In such
+        * cases we must return early (falling back to default behaviour) to
+        * avoid a crash in hidpp_scroll_counter_handle_scroll.
+        */
+       if (!(hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) || value == 0
+           || counter->dev == NULL || counter->wheel_multiplier == 0)
+               return 0;
+
+       hidpp_scroll_counter_handle_scroll(counter, value);
+       return 1;
+}
+
 static int hidpp_initialize_battery(struct hidpp_device *hidpp)
 {
        static atomic_t battery_no = ATOMIC_INIT(0);
@@ -2901,6 +3168,9 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
        if (hidpp->battery.ps)
                power_supply_changed(hidpp->battery.ps);
 
+       if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
+               hi_res_scroll_enable(hidpp);
+
        if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input)
                /* if the input nodes are already created, we can stop now */
                return;
@@ -3086,35 +3356,63 @@ static void hidpp_remove(struct hid_device *hdev)
        mutex_destroy(&hidpp->send_mutex);
 }
 
+#define LDJ_DEVICE(product) \
+       HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
+                  USB_VENDOR_ID_LOGITECH, (product))
+
 static const struct hid_device_id hidpp_devices[] = {
        { /* wireless touchpad */
-         HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
-               USB_VENDOR_ID_LOGITECH, 0x4011),
+         LDJ_DEVICE(0x4011),
          .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT |
                         HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS },
        { /* wireless touchpad T650 */
-         HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
-               USB_VENDOR_ID_LOGITECH, 0x4101),
+         LDJ_DEVICE(0x4101),
          .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT },
        { /* wireless touchpad T651 */
          HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
                USB_DEVICE_ID_LOGITECH_T651),
          .driver_data = HIDPP_QUIRK_CLASS_WTP },
+       { /* Mouse Logitech Anywhere MX */
+         LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+       { /* Mouse Logitech Cube */
+         LDJ_DEVICE(0x4010), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
+       { /* Mouse Logitech M335 */
+         LDJ_DEVICE(0x4050), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { /* Mouse Logitech M515 */
+         LDJ_DEVICE(0x4007), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
        { /* Mouse logitech M560 */
-         HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
-               USB_VENDOR_ID_LOGITECH, 0x402d),
-         .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 },
+         LDJ_DEVICE(0x402d),
+         .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560
+               | HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
+       { /* Mouse Logitech M705 (firmware RQM17) */
+         LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+       { /* Mouse Logitech M705 (firmware RQM67) */
+         LDJ_DEVICE(0x406d), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { /* Mouse Logitech M720 */
+         LDJ_DEVICE(0x405e), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { /* Mouse Logitech MX Anywhere 2 */
+         LDJ_DEVICE(0x404a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { LDJ_DEVICE(0xb013), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { LDJ_DEVICE(0xb018), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { LDJ_DEVICE(0xb01f), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { /* Mouse Logitech MX Anywhere 2S */
+         LDJ_DEVICE(0x406a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { /* Mouse Logitech MX Master */
+         LDJ_DEVICE(0x4041), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { LDJ_DEVICE(0x4060), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { /* Mouse Logitech MX Master 2S */
+         LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+       { /* Mouse Logitech Performance MX */
+         LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
        { /* Keyboard logitech K400 */
-         HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
-               USB_VENDOR_ID_LOGITECH, 0x4024),
+         LDJ_DEVICE(0x4024),
          .driver_data = HIDPP_QUIRK_CLASS_K400 },
        { /* Solar Keyboard Logitech K750 */
-         HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
-               USB_VENDOR_ID_LOGITECH, 0x4002),
+         LDJ_DEVICE(0x4002),
          .driver_data = HIDPP_QUIRK_CLASS_K750 },
 
-       { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
-               USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
+       { LDJ_DEVICE(HID_ANY_ID) },
 
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
                .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
@@ -3123,12 +3421,19 @@ static const struct hid_device_id hidpp_devices[] = {
 
 MODULE_DEVICE_TABLE(hid, hidpp_devices);
 
+static const struct hid_usage_id hidpp_usages[] = {
+       { HID_GD_WHEEL, EV_REL, REL_WHEEL_HI_RES },
+       { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
 static struct hid_driver hidpp_driver = {
        .name = "logitech-hidpp-device",
        .id_table = hidpp_devices,
        .probe = hidpp_probe,
        .remove = hidpp_remove,
        .raw_event = hidpp_raw_event,
+       .usage_table = hidpp_usages,
+       .event = hidpp_event,
        .input_configured = hidpp_input_configured,
        .input_mapping = hidpp_input_mapping,
        .input_mapped = hidpp_input_mapped,
index e8a114157f87b81593225469dbb7f838cb450480..bb012bc032e02635a5501dd6f9961b0da49fabc2 100644 (file)
@@ -358,7 +358,7 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
                                                sensor_inst->hsdev,
                                                sensor_inst->hsdev->usage,
                                                usage, report_id,
-                                               SENSOR_HUB_SYNC);
+                                               SENSOR_HUB_SYNC, false);
        } else if (!strncmp(name, "units", strlen("units")))
                value = sensor_inst->fields[field_index].attribute.units;
        else if (!strncmp(name, "unit-expo", strlen("unit-expo")))
index 2b63487057c25b7fb931b8823db31d15f69667be..4256fdc5cd6d50db32f40447a37c2d310d8f1579 100644 (file)
@@ -299,7 +299,8 @@ EXPORT_SYMBOL_GPL(sensor_hub_get_feature);
 int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
                                        u32 usage_id,
                                        u32 attr_usage_id, u32 report_id,
-                                       enum sensor_hub_read_flags flag)
+                                       enum sensor_hub_read_flags flag,
+                                       bool is_signed)
 {
        struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
        unsigned long flags;
@@ -331,10 +332,16 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
                                                &hsdev->pending.ready, HZ*5);
                switch (hsdev->pending.raw_size) {
                case 1:
-                       ret_val = *(u8 *)hsdev->pending.raw_data;
+                       if (is_signed)
+                               ret_val = *(s8 *)hsdev->pending.raw_data;
+                       else
+                               ret_val = *(u8 *)hsdev->pending.raw_data;
                        break;
                case 2:
-                       ret_val = *(u16 *)hsdev->pending.raw_data;
+                       if (is_signed)
+                               ret_val = *(s16 *)hsdev->pending.raw_data;
+                       else
+                               ret_val = *(u16 *)hsdev->pending.raw_data;
                        break;
                case 4:
                        ret_val = *(u32 *)hsdev->pending.raw_data;
index 4a44e48e08b225a6180ad014604dabc83ce65c2d..9fc51eff10790a9f86a0ab5eadd2bb96ccfc5e76 100644 (file)
@@ -107,8 +107,6 @@ out:
 
 /*
  * The first byte of the report buffer is expected to be a report number.
- *
- * This function is to be called with the minors_lock mutex held.
  */
 static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, size_t count, unsigned char report_type)
 {
@@ -117,6 +115,8 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
        __u8 *buf;
        int ret = 0;
 
+       lockdep_assert_held(&minors_lock);
+
        if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
                ret = -ENODEV;
                goto out;
@@ -181,8 +181,6 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t
  * of buffer is the report number to request, or 0x0 if the defice does not
  * use numbered reports. The report_type parameter can be HID_FEATURE_REPORT
  * or HID_INPUT_REPORT.
- *
- * This function is to be called with the minors_lock mutex held.
  */
 static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t count, unsigned char report_type)
 {
@@ -192,6 +190,8 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
        int ret = 0, len;
        unsigned char report_number;
 
+       lockdep_assert_held(&minors_lock);
+
        if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
                ret = -ENODEV;
                goto out;
index 8793cc49f8554c2b331323c86071b5371e4ce635..a6e1ee744f4d418c32745420b936afec111363dc 100644 (file)
@@ -117,6 +117,7 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        int ret;
        struct ish_hw *hw;
+       unsigned long irq_flag = 0;
        struct ishtp_device *ishtp;
        struct device *dev = &pdev->dev;
 
@@ -156,8 +157,12 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
 
        /* request and enable interrupt */
+       ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+       if (!pdev->msi_enabled && !pdev->msix_enabled)
+               irq_flag = IRQF_SHARED;
+
        ret = devm_request_irq(dev, pdev->irq, ish_irq_handler,
-                              IRQF_SHARED, KBUILD_MODNAME, ishtp);
+                              irq_flag, KBUILD_MODNAME, ishtp);
        if (ret) {
                dev_err(dev, "ISH: request IRQ %d failed\n", pdev->irq);
                return ret;
index de8193f3b8381a38b40f9fe59b9daf7314cbcf47..fe00b12e44178a4b9f3f4f5bfd7b447457c417d5 100644 (file)
@@ -516,6 +516,14 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
        }
        wait_for_completion(&msginfo->waitevent);
 
+       if (msginfo->response.gpadl_created.creation_status != 0) {
+               pr_err("Failed to establish GPADL: err = 0x%x\n",
+                      msginfo->response.gpadl_created.creation_status);
+
+               ret = -EDQUOT;
+               goto cleanup;
+       }
+
        if (channel->rescind) {
                ret = -ENODEV;
                goto cleanup;
index 6277597d3d5818145c93eb561ff3f5f1017a0202..edd34c167a9bd44e70c34e24e506798562dc28a4 100644 (file)
@@ -435,61 +435,16 @@ void vmbus_free_channels(void)
        }
 }
 
-/*
- * vmbus_process_offer - Process the offer by creating a channel/device
- * associated with this offer
- */
-static void vmbus_process_offer(struct vmbus_channel *newchannel)
+/* Note: the function can run concurrently for primary/sub channels. */
+static void vmbus_add_channel_work(struct work_struct *work)
 {
-       struct vmbus_channel *channel;
-       bool fnew = true;
+       struct vmbus_channel *newchannel =
+               container_of(work, struct vmbus_channel, add_channel_work);
+       struct vmbus_channel *primary_channel = newchannel->primary_channel;
        unsigned long flags;
        u16 dev_type;
        int ret;
 
-       /* Make sure this is a new offer */
-       mutex_lock(&vmbus_connection.channel_mutex);
-
-       /*
-        * Now that we have acquired the channel_mutex,
-        * we can release the potentially racing rescind thread.
-        */
-       atomic_dec(&vmbus_connection.offer_in_progress);
-
-       list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
-               if (!uuid_le_cmp(channel->offermsg.offer.if_type,
-                       newchannel->offermsg.offer.if_type) &&
-                       !uuid_le_cmp(channel->offermsg.offer.if_instance,
-                               newchannel->offermsg.offer.if_instance)) {
-                       fnew = false;
-                       break;
-               }
-       }
-
-       if (fnew)
-               list_add_tail(&newchannel->listentry,
-                             &vmbus_connection.chn_list);
-
-       mutex_unlock(&vmbus_connection.channel_mutex);
-
-       if (!fnew) {
-               /*
-                * Check to see if this is a sub-channel.
-                */
-               if (newchannel->offermsg.offer.sub_channel_index != 0) {
-                       /*
-                        * Process the sub-channel.
-                        */
-                       newchannel->primary_channel = channel;
-                       spin_lock_irqsave(&channel->lock, flags);
-                       list_add_tail(&newchannel->sc_list, &channel->sc_list);
-                       channel->num_sc++;
-                       spin_unlock_irqrestore(&channel->lock, flags);
-               } else {
-                       goto err_free_chan;
-               }
-       }
-
        dev_type = hv_get_dev_type(newchannel);
 
        init_vp_index(newchannel, dev_type);
@@ -507,27 +462,26 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
        /*
         * This state is used to indicate a successful open
         * so that when we do close the channel normally, we
-        * can cleanup properly
+        * can cleanup properly.
         */
        newchannel->state = CHANNEL_OPEN_STATE;
 
-       if (!fnew) {
-               struct hv_device *dev
-                       = newchannel->primary_channel->device_obj;
+       if (primary_channel != NULL) {
+               /* newchannel is a sub-channel. */
+               struct hv_device *dev = primary_channel->device_obj;
 
                if (vmbus_add_channel_kobj(dev, newchannel))
-                       goto err_free_chan;
+                       goto err_deq_chan;
+
+               if (primary_channel->sc_creation_callback != NULL)
+                       primary_channel->sc_creation_callback(newchannel);
 
-               if (channel->sc_creation_callback != NULL)
-                       channel->sc_creation_callback(newchannel);
                newchannel->probe_done = true;
                return;
        }
 
        /*
-        * Start the process of binding this offer to the driver
-        * We need to set the DeviceObject field before calling
-        * vmbus_child_dev_add()
+        * Start the process of binding the primary channel to the driver
         */
        newchannel->device_obj = vmbus_device_create(
                &newchannel->offermsg.offer.if_type,
@@ -556,13 +510,28 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
 
 err_deq_chan:
        mutex_lock(&vmbus_connection.channel_mutex);
-       list_del(&newchannel->listentry);
+
+       /*
+        * We need to set the flag, otherwise
+        * vmbus_onoffer_rescind() can be blocked.
+        */
+       newchannel->probe_done = true;
+
+       if (primary_channel == NULL) {
+               list_del(&newchannel->listentry);
+       } else {
+               spin_lock_irqsave(&primary_channel->lock, flags);
+               list_del(&newchannel->sc_list);
+               spin_unlock_irqrestore(&primary_channel->lock, flags);
+       }
+
        mutex_unlock(&vmbus_connection.channel_mutex);
 
        if (newchannel->target_cpu != get_cpu()) {
                put_cpu();
                smp_call_function_single(newchannel->target_cpu,
-                                        percpu_channel_deq, newchannel, true);
+                                        percpu_channel_deq,
+                                        newchannel, true);
        } else {
                percpu_channel_deq(newchannel);
                put_cpu();
@@ -570,14 +539,104 @@ err_deq_chan:
 
        vmbus_release_relid(newchannel->offermsg.child_relid);
 
-err_free_chan:
        free_channel(newchannel);
 }
 
+/*
+ * vmbus_process_offer - Process the offer by creating a channel/device
+ * associated with this offer
+ */
+static void vmbus_process_offer(struct vmbus_channel *newchannel)
+{
+       struct vmbus_channel *channel;
+       struct workqueue_struct *wq;
+       unsigned long flags;
+       bool fnew = true;
+
+       mutex_lock(&vmbus_connection.channel_mutex);
+
+       /*
+        * Now that we have acquired the channel_mutex,
+        * we can release the potentially racing rescind thread.
+        */
+       atomic_dec(&vmbus_connection.offer_in_progress);
+
+       list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+               if (!uuid_le_cmp(channel->offermsg.offer.if_type,
+                                newchannel->offermsg.offer.if_type) &&
+                   !uuid_le_cmp(channel->offermsg.offer.if_instance,
+                                newchannel->offermsg.offer.if_instance)) {
+                       fnew = false;
+                       break;
+               }
+       }
+
+       if (fnew)
+               list_add_tail(&newchannel->listentry,
+                             &vmbus_connection.chn_list);
+       else {
+               /*
+                * Check to see if this is a valid sub-channel.
+                */
+               if (newchannel->offermsg.offer.sub_channel_index == 0) {
+                       mutex_unlock(&vmbus_connection.channel_mutex);
+                       /*
+                        * Don't call free_channel(), because newchannel->kobj
+                        * is not initialized yet.
+                        */
+                       kfree(newchannel);
+                       WARN_ON_ONCE(1);
+                       return;
+               }
+               /*
+                * Process the sub-channel.
+                */
+               newchannel->primary_channel = channel;
+               spin_lock_irqsave(&channel->lock, flags);
+               list_add_tail(&newchannel->sc_list, &channel->sc_list);
+               spin_unlock_irqrestore(&channel->lock, flags);
+       }
+
+       mutex_unlock(&vmbus_connection.channel_mutex);
+
+       /*
+        * vmbus_process_offer() mustn't call channel->sc_creation_callback()
+        * directly for sub-channels, because sc_creation_callback() ->
+        * vmbus_open() may never get the host's response to the
+        * OPEN_CHANNEL message (the host may rescind a channel at any time,
+        * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind()
+        * may not wake up the vmbus_open() as it's blocked due to a non-zero
+        * vmbus_connection.offer_in_progress, and finally we have a deadlock.
+        *
+        * The above is also true for primary channels, if the related device
+        * drivers use sync probing mode by default.
+        *
+        * And, usually the handling of primary channels and sub-channels can
+        * depend on each other, so we should offload them to different
+        * workqueues to avoid possible deadlock, e.g. in sync-probing mode,
+        * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() ->
+        * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock
+        * and waits for all the sub-channels to appear, but the latter
+        * can't get the rtnl_lock and this blocks the handling of
+        * sub-channels.
+        */
+       INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
+       wq = fnew ? vmbus_connection.handle_primary_chan_wq :
+                   vmbus_connection.handle_sub_chan_wq;
+       queue_work(wq, &newchannel->add_channel_work);
+}
+
 /*
  * We use this state to statically distribute the channel interrupt load.
  */
 static int next_numa_node_id;
+/*
+ * init_vp_index() accesses global variables like next_numa_node_id, and
+ * it can run concurrently for primary channels and sub-channels: see
+ * vmbus_process_offer(), so we need the lock to protect the global
+ * variables.
+ */
+static DEFINE_SPINLOCK(bind_channel_to_cpu_lock);
 
 /*
  * Starting with Win8, we can statically distribute the incoming
@@ -613,6 +672,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
                return;
        }
 
+       spin_lock(&bind_channel_to_cpu_lock);
+
        /*
         * Based on the channel affinity policy, we will assign the NUMA
         * nodes.
@@ -695,6 +756,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
        channel->target_cpu = cur_cpu;
        channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
 
+       spin_unlock(&bind_channel_to_cpu_lock);
+
        free_cpumask_var(available_mask);
 }
 
index f4d08c8ac7f8ff8f101cbe477826a0924b63170d..4fe117b761ce03a3d6351b86270d08853131d355 100644 (file)
@@ -190,6 +190,20 @@ int vmbus_connect(void)
                goto cleanup;
        }
 
+       vmbus_connection.handle_primary_chan_wq =
+               create_workqueue("hv_pri_chan");
+       if (!vmbus_connection.handle_primary_chan_wq) {
+               ret = -ENOMEM;
+               goto cleanup;
+       }
+
+       vmbus_connection.handle_sub_chan_wq =
+               create_workqueue("hv_sub_chan");
+       if (!vmbus_connection.handle_sub_chan_wq) {
+               ret = -ENOMEM;
+               goto cleanup;
+       }
+
        INIT_LIST_HEAD(&vmbus_connection.chn_msg_list);
        spin_lock_init(&vmbus_connection.channelmsg_lock);
 
@@ -280,10 +294,14 @@ void vmbus_disconnect(void)
         */
        vmbus_initiate_unload(false);
 
-       if (vmbus_connection.work_queue) {
-               drain_workqueue(vmbus_connection.work_queue);
+       if (vmbus_connection.handle_sub_chan_wq)
+               destroy_workqueue(vmbus_connection.handle_sub_chan_wq);
+
+       if (vmbus_connection.handle_primary_chan_wq)
+               destroy_workqueue(vmbus_connection.handle_primary_chan_wq);
+
+       if (vmbus_connection.work_queue)
                destroy_workqueue(vmbus_connection.work_queue);
-       }
 
        if (vmbus_connection.int_page) {
                free_pages((unsigned long)vmbus_connection.int_page, 0);
index a7513a8a8e3728d0de762d62f7337f527df34544..d6106e1a0d4af597d04cb833215c3b27c0b2aa0d 100644 (file)
@@ -353,6 +353,9 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
 
                out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
 
+               /* fallthrough */
+
+       case KVP_OP_GET_IP_INFO:
                utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
                                MAX_ADAPTER_ID_SIZE,
                                UTF16_LITTLE_ENDIAN,
@@ -405,7 +408,11 @@ kvp_send_key(struct work_struct *dummy)
                process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
                break;
        case KVP_OP_GET_IP_INFO:
-               /* We only need to pass on message->kvp_hdr.operation.  */
+               /*
+                * We only need to pass on the info of operation, adapter_id
+                * and addr_family to the userland kvp daemon.
+                */
+               process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO);
                break;
        case KVP_OP_SET:
                switch (in_msg->body.kvp_set.data.value_type) {
@@ -446,9 +453,9 @@ kvp_send_key(struct work_struct *dummy)
 
                }
 
-               break;
-
-       case KVP_OP_GET:
+               /*
+                * The key is always a string - utf16 encoding.
+                */
                message->body.kvp_set.data.key_size =
                        utf16s_to_utf8s(
                        (wchar_t *)in_msg->body.kvp_set.data.key,
@@ -456,6 +463,17 @@ kvp_send_key(struct work_struct *dummy)
                        UTF16_LITTLE_ENDIAN,
                        message->body.kvp_set.data.key,
                        HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
+
+               break;
+
+       case KVP_OP_GET:
+               message->body.kvp_get.data.key_size =
+                       utf16s_to_utf8s(
+                       (wchar_t *)in_msg->body.kvp_get.data.key,
+                       in_msg->body.kvp_get.data.key_size,
+                       UTF16_LITTLE_ENDIAN,
+                       message->body.kvp_get.data.key,
+                       HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
                break;
 
        case KVP_OP_DELETE:
index 72eaba3d50fc26da141993c5f1eadb9916d1f94d..87d3d7da78f876198e0a160f3c39b60044fbcad4 100644 (file)
@@ -335,7 +335,14 @@ struct vmbus_connection {
        struct list_head chn_list;
        struct mutex channel_mutex;
 
+       /*
+        * An offer message is handled first on the work_queue, and then
+        * is further handled on handle_primary_chan_wq or
+        * handle_sub_chan_wq.
+        */
        struct workqueue_struct *work_queue;
+       struct workqueue_struct *handle_primary_chan_wq;
+       struct workqueue_struct *handle_sub_chan_wq;
 };
 
 
index 975c951698846bffb8ae98ec4a70759e60a1f11b..84f61cec6319c8eb65ccc80553cbbdfde0542cc9 100644 (file)
@@ -649,8 +649,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
                                if (info[i]->config[j] & HWMON_T_INPUT) {
                                        err = hwmon_thermal_add_sensor(dev,
                                                                hwdev, j);
-                                       if (err)
-                                               goto free_device;
+                                       if (err) {
+                                               device_unregister(hdev);
+                                               goto ida_remove;
+                                       }
                                }
                        }
                }
@@ -658,8 +660,6 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
 
        return hdev;
 
-free_device:
-       device_unregister(hdev);
 free_hwmon:
        kfree(hwdev);
 ida_remove:
index 0ccca87f527191dc000649d1a0b1eaf44c87d35b..293dd1c6c7b36ef2b0770cf76e465aaea22b4673 100644 (file)
@@ -181,7 +181,7 @@ static ssize_t show_label(struct device *dev, struct device_attribute *devattr,
        return sprintf(buf, "%s\n", sdata->label);
 }
 
-static int __init get_logical_cpu(int hwcpu)
+static int get_logical_cpu(int hwcpu)
 {
        int cpu;
 
@@ -192,9 +192,8 @@ static int __init get_logical_cpu(int hwcpu)
        return -ENOENT;
 }
 
-static void __init make_sensor_label(struct device_node *np,
-                                    struct sensor_data *sdata,
-                                    const char *label)
+static void make_sensor_label(struct device_node *np,
+                             struct sensor_data *sdata, const char *label)
 {
        u32 id;
        size_t n;
index 71d3445ba869c85654ae3dcaf3a5460e8fadb268..07ee19573b3f0f8d65d6a64710fb1bea557b42eb 100644 (file)
@@ -274,7 +274,7 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
                break;
        case INA2XX_CURRENT:
                /* signed register, result in mA */
-               val = regval * data->current_lsb_uA;
+               val = (s16)regval * data->current_lsb_uA;
                val = DIV_ROUND_CLOSEST(val, 1000);
                break;
        case INA2XX_CALIBRATION:
@@ -491,7 +491,7 @@ static int ina2xx_probe(struct i2c_client *client,
        }
 
        data->groups[group++] = &ina2xx_group;
-       if (id->driver_data == ina226)
+       if (chip == ina226)
                data->groups[group++] = &ina226_group;
 
        hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
@@ -500,7 +500,7 @@ static int ina2xx_probe(struct i2c_client *client,
                return PTR_ERR(hwmon_dev);
 
        dev_info(dev, "power monitor %s (Rshunt = %li uOhm)\n",
-                id->name, data->rshunt);
+                client->name, data->rshunt);
 
        return 0;
 }
index de46577c7d5a1711447f0f249604635260c8f958..d8fa4bea4bc8450660fc5b468c4de8d7fad1002c 100644 (file)
@@ -51,7 +51,7 @@
  */
 #define MLXREG_FAN_GET_RPM(rval, d, s) (DIV_ROUND_CLOSEST(15000000 * 100, \
                                         ((rval) + (s)) * (d)))
-#define MLXREG_FAN_GET_FAULT(val, mask) (!!((val) ^ (mask)))
+#define MLXREG_FAN_GET_FAULT(val, mask) (!((val) ^ (mask)))
 #define MLXREG_FAN_PWM_DUTY2STATE(duty)        (DIV_ROUND_CLOSEST((duty) *     \
                                         MLXREG_FAN_MAX_STATE,          \
                                         MLXREG_FAN_MAX_DUTY))
index be5ba469089531b26b0ba2d0b1866c20f3e5312b..0d0457245e7d0e19f1acad510cb5a3d295ed812f 100644 (file)
@@ -115,7 +115,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct rpi_hwmon_data *data;
-       int ret;
 
        data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
        if (!data)
@@ -124,11 +123,6 @@ static int rpi_hwmon_probe(struct platform_device *pdev)
        /* Parent driver assure that firmware is correct */
        data->fw = dev_get_drvdata(dev->parent);
 
-       /* Init throttled */
-       ret = rpi_firmware_property(data->fw, RPI_FIRMWARE_GET_THROTTLED,
-                                   &data->last_throttled,
-                                   sizeof(data->last_throttled));
-
        data->hwmon_dev = devm_hwmon_device_register_with_info(dev, "rpi_volt",
                                                               data,
                                                               &rpi_chip_info,
index 49276bbdac3ddf0dc1153cb1941cf28f0ac10ea7..1bb80f992aa8609ed6df25973548efdfdbc7fe8c 100644 (file)
@@ -1691,7 +1691,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
  * somewhere else in the code
  */
 #define SENSOR_ATTR_TEMP(index) {                                      \
-       SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \
+       SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 5 ? S_IWUSR : 0), \
                show_temp_mode, store_temp_mode, NOT_USED, index - 1),  \
        SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp,          \
                NULL, TEMP_READ, index - 1),                            \
index 56ccb1ea7da5b405e904d90ac38b17303a3d2faa..f2c6819712013046246002346af928bd1ab16bc0 100644 (file)
@@ -224,6 +224,15 @@ config I2C_NFORCE2_S4985
          This driver can also be built as a module.  If so, the module
          will be called i2c-nforce2-s4985.
 
+config I2C_NVIDIA_GPU
+       tristate "NVIDIA GPU I2C controller"
+       depends on PCI
+       help
+         If you say yes to this option, support will be included for the
+         NVIDIA GPU I2C controller which is used to communicate with the GPU's
+         Type-C controller. This driver can also be built as a module called
+         i2c-nvidia-gpu.
+
 config I2C_SIS5595
        tristate "SiS 5595"
        depends on PCI
@@ -752,7 +761,7 @@ config I2C_OCORES
 
 config I2C_OMAP
        tristate "OMAP I2C adapter"
-       depends on ARCH_OMAP
+       depends on ARCH_OMAP || ARCH_K3
        default y if MACH_OMAP_H3 || MACH_OMAP_OSK
        help
          If you say yes to this option, support will be included for the
index 18b26af82b1c5425a9dcec9c61cca3cdff694d60..5f0cb6915969aa98d5722b02e0fe9cb9a1ae25a7 100644 (file)
@@ -19,6 +19,7 @@ obj-$(CONFIG_I2C_ISCH)                += i2c-isch.o
 obj-$(CONFIG_I2C_ISMT)         += i2c-ismt.o
 obj-$(CONFIG_I2C_NFORCE2)      += i2c-nforce2.o
 obj-$(CONFIG_I2C_NFORCE2_S4985)        += i2c-nforce2-s4985.o
+obj-$(CONFIG_I2C_NVIDIA_GPU)   += i2c-nvidia-gpu.o
 obj-$(CONFIG_I2C_PIIX4)                += i2c-piix4.o
 obj-$(CONFIG_I2C_SIS5595)      += i2c-sis5595.o
 obj-$(CONFIG_I2C_SIS630)       += i2c-sis630.o
index 8e60048a33f8f88b5e10cf48d0cfc3a84f781424..51d34959709bade4c9baed0f14770ec4cb9719ea 100644 (file)
@@ -74,8 +74,7 @@
                                 MST_STATUS_ND)
 #define   MST_STATUS_ERR       (MST_STATUS_NAK | \
                                 MST_STATUS_AL  | \
-                                MST_STATUS_IP  | \
-                                MST_STATUS_TSS)
+                                MST_STATUS_IP)
 #define MST_TX_BYTES_XFRD      0x50
 #define MST_RX_BYTES_XFRD      0x54
 #define SCL_HIGH_PERIOD                0x80
@@ -241,7 +240,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev)
                         */
                        if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) {
                                idev->msg_err = -EPROTO;
-                               i2c_int_disable(idev, ~0);
+                               i2c_int_disable(idev, ~MST_STATUS_TSS);
                                complete(&idev->msg_complete);
                                break;
                        }
@@ -299,14 +298,19 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
 
        if (status & MST_STATUS_SCC) {
                /* Stop completed */
-               i2c_int_disable(idev, ~0);
+               i2c_int_disable(idev, ~MST_STATUS_TSS);
                complete(&idev->msg_complete);
        } else if (status & MST_STATUS_SNS) {
                /* Transfer done */
-               i2c_int_disable(idev, ~0);
+               i2c_int_disable(idev, ~MST_STATUS_TSS);
                if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
                        axxia_i2c_empty_rx_fifo(idev);
                complete(&idev->msg_complete);
+       } else if (status & MST_STATUS_TSS) {
+               /* Transfer timeout */
+               idev->msg_err = -ETIMEDOUT;
+               i2c_int_disable(idev, ~MST_STATUS_TSS);
+               complete(&idev->msg_complete);
        } else if (unlikely(status & MST_STATUS_ERR)) {
                /* Transfer error */
                i2c_int_disable(idev, ~0);
@@ -339,10 +343,10 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
        u32 rx_xfer, tx_xfer;
        u32 addr_1, addr_2;
        unsigned long time_left;
+       unsigned int wt_value;
 
        idev->msg = msg;
        idev->msg_xfrd = 0;
-       idev->msg_err = 0;
        reinit_completion(&idev->msg_complete);
 
        if (i2c_m_ten(msg)) {
@@ -383,9 +387,18 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
        else if (axxia_i2c_fill_tx_fifo(idev) != 0)
                int_mask |= MST_STATUS_TFL;
 
+       wt_value = WT_VALUE(readl(idev->base + WAIT_TIMER_CONTROL));
+       /* Disable wait timer temporarly */
+       writel(wt_value, idev->base + WAIT_TIMER_CONTROL);
+       /* Check if timeout error happened */
+       if (idev->msg_err)
+               goto out;
+
        /* Start manual mode */
        writel(CMD_MANUAL, idev->base + MST_COMMAND);
 
+       writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL);
+
        i2c_int_enable(idev, int_mask);
 
        time_left = wait_for_completion_timeout(&idev->msg_complete,
@@ -396,13 +409,15 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
        if (readl(idev->base + MST_COMMAND) & CMD_BUSY)
                dev_warn(idev->dev, "busy after xfer\n");
 
-       if (time_left == 0)
+       if (time_left == 0) {
                idev->msg_err = -ETIMEDOUT;
-
-       if (idev->msg_err == -ETIMEDOUT)
                i2c_recover_bus(&idev->adapter);
+               axxia_i2c_init(idev);
+       }
 
-       if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO)
+out:
+       if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO &&
+                       idev->msg_err != -ETIMEDOUT)
                axxia_i2c_init(idev);
 
        return idev->msg_err;
@@ -410,7 +425,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
 
 static int axxia_i2c_stop(struct axxia_i2c_dev *idev)
 {
-       u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC;
+       u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC | MST_STATUS_TSS;
        unsigned long time_left;
 
        reinit_completion(&idev->msg_complete);
@@ -437,6 +452,9 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
        int i;
        int ret = 0;
 
+       idev->msg_err = 0;
+       i2c_int_enable(idev, MST_STATUS_TSS);
+
        for (i = 0; ret == 0 && i < num; ++i)
                ret = axxia_i2c_xfer_msg(idev, &msgs[i]);
 
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
new file mode 100644 (file)
index 0000000..e99c3bb
--- /dev/null
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nvidia GPU I2C controller Driver
+ *
+ * Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
+ * Author: Ajay Gupta <ajayg@nvidia.com>
+ */
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/unaligned.h>
+
+/* I2C definitions */
+#define I2C_MST_CNTL                           0x00
+#define I2C_MST_CNTL_GEN_START                 BIT(0)
+#define I2C_MST_CNTL_GEN_STOP                  BIT(1)
+#define I2C_MST_CNTL_CMD_READ                  (1 << 2)
+#define I2C_MST_CNTL_CMD_WRITE                 (2 << 2)
+#define I2C_MST_CNTL_BURST_SIZE_SHIFT          6
+#define I2C_MST_CNTL_GEN_NACK                  BIT(28)
+#define I2C_MST_CNTL_STATUS                    GENMASK(30, 29)
+#define I2C_MST_CNTL_STATUS_OKAY               (0 << 29)
+#define I2C_MST_CNTL_STATUS_NO_ACK             (1 << 29)
+#define I2C_MST_CNTL_STATUS_TIMEOUT            (2 << 29)
+#define I2C_MST_CNTL_STATUS_BUS_BUSY           (3 << 29)
+#define I2C_MST_CNTL_CYCLE_TRIGGER             BIT(31)
+
+#define I2C_MST_ADDR                           0x04
+
+#define I2C_MST_I2C0_TIMING                            0x08
+#define I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ          0x10e
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT            16
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX                255
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CHECK              BIT(24)
+
+#define I2C_MST_DATA                                   0x0c
+
+#define I2C_MST_HYBRID_PADCTL                          0x20
+#define I2C_MST_HYBRID_PADCTL_MODE_I2C                 BIT(0)
+#define I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV                BIT(14)
+#define I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV                BIT(15)
+
+struct gpu_i2c_dev {
+       struct device *dev;
+       void __iomem *regs;
+       struct i2c_adapter adapter;
+       struct i2c_board_info *gpu_ccgx_ucsi;
+};
+
+static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd)
+{
+       u32 val;
+
+       /* enable I2C */
+       val = readl(i2cd->regs + I2C_MST_HYBRID_PADCTL);
+       val |= I2C_MST_HYBRID_PADCTL_MODE_I2C |
+               I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+               I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV;
+       writel(val, i2cd->regs + I2C_MST_HYBRID_PADCTL);
+
+       /* enable 100KHZ mode */
+       val = I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ;
+       val |= (I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX
+           << I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT);
+       val |= I2C_MST_I2C0_TIMING_TIMEOUT_CHECK;
+       writel(val, i2cd->regs + I2C_MST_I2C0_TIMING);
+}
+
+static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
+{
+       unsigned long target = jiffies + msecs_to_jiffies(1000);
+       u32 val;
+
+       do {
+               val = readl(i2cd->regs + I2C_MST_CNTL);
+               if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER))
+                       break;
+               if ((val & I2C_MST_CNTL_STATUS) !=
+                               I2C_MST_CNTL_STATUS_BUS_BUSY)
+                       break;
+               usleep_range(500, 600);
+       } while (time_is_after_jiffies(target));
+
+       if (time_is_before_jiffies(target)) {
+               dev_err(i2cd->dev, "i2c timeout error %x\n", val);
+               return -ETIMEDOUT;
+       }
+
+       val = readl(i2cd->regs + I2C_MST_CNTL);
+       switch (val & I2C_MST_CNTL_STATUS) {
+       case I2C_MST_CNTL_STATUS_OKAY:
+               return 0;
+       case I2C_MST_CNTL_STATUS_NO_ACK:
+               return -ENXIO;
+       case I2C_MST_CNTL_STATUS_TIMEOUT:
+               return -ETIMEDOUT;
+       default:
+               return 0;
+       }
+}
+
+static int gpu_i2c_read(struct gpu_i2c_dev *i2cd, u8 *data, u16 len)
+{
+       int status;
+       u32 val;
+
+       val = I2C_MST_CNTL_GEN_START | I2C_MST_CNTL_CMD_READ |
+               (len << I2C_MST_CNTL_BURST_SIZE_SHIFT) |
+               I2C_MST_CNTL_CYCLE_TRIGGER | I2C_MST_CNTL_GEN_NACK;
+       writel(val, i2cd->regs + I2C_MST_CNTL);
+
+       status = gpu_i2c_check_status(i2cd);
+       if (status < 0)
+               return status;
+
+       val = readl(i2cd->regs + I2C_MST_DATA);
+       switch (len) {
+       case 1:
+               data[0] = val;
+               break;
+       case 2:
+               put_unaligned_be16(val, data);
+               break;
+       case 3:
+               put_unaligned_be16(val >> 8, data);
+               data[2] = val;
+               break;
+       case 4:
+               put_unaligned_be32(val, data);
+               break;
+       default:
+               break;
+       }
+       return status;
+}
+
+static int gpu_i2c_start(struct gpu_i2c_dev *i2cd)
+{
+       writel(I2C_MST_CNTL_GEN_START, i2cd->regs + I2C_MST_CNTL);
+       return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_stop(struct gpu_i2c_dev *i2cd)
+{
+       writel(I2C_MST_CNTL_GEN_STOP, i2cd->regs + I2C_MST_CNTL);
+       return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_write(struct gpu_i2c_dev *i2cd, u8 data)
+{
+       u32 val;
+
+       writel(data, i2cd->regs + I2C_MST_DATA);
+
+       val = I2C_MST_CNTL_CMD_WRITE | (1 << I2C_MST_CNTL_BURST_SIZE_SHIFT);
+       writel(val, i2cd->regs + I2C_MST_CNTL);
+
+       return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_master_xfer(struct i2c_adapter *adap,
+                              struct i2c_msg *msgs, int num)
+{
+       struct gpu_i2c_dev *i2cd = i2c_get_adapdata(adap);
+       int status, status2;
+       int i, j;
+
+       /*
+        * The controller supports maximum 4 byte read due to known
+        * limitation of sending STOP after every read.
+        */
+       for (i = 0; i < num; i++) {
+               if (msgs[i].flags & I2C_M_RD) {
+                       /* program client address before starting read */
+                       writel(msgs[i].addr, i2cd->regs + I2C_MST_ADDR);
+                       /* gpu_i2c_read has implicit start */
+                       status = gpu_i2c_read(i2cd, msgs[i].buf, msgs[i].len);
+                       if (status < 0)
+                               goto stop;
+               } else {
+                       u8 addr = i2c_8bit_addr_from_msg(msgs + i);
+
+                       status = gpu_i2c_start(i2cd);
+                       if (status < 0) {
+                               if (i == 0)
+                                       return status;
+                               goto stop;
+                       }
+
+                       status = gpu_i2c_write(i2cd, addr);
+                       if (status < 0)
+                               goto stop;
+
+                       for (j = 0; j < msgs[i].len; j++) {
+                               status = gpu_i2c_write(i2cd, msgs[i].buf[j]);
+                               if (status < 0)
+                                       goto stop;
+                       }
+               }
+       }
+       status = gpu_i2c_stop(i2cd);
+       if (status < 0)
+               return status;
+
+       return i;
+stop:
+       status2 = gpu_i2c_stop(i2cd);
+       if (status2 < 0)
+               dev_err(i2cd->dev, "i2c stop failed %d\n", status2);
+       return status;
+}
+
+static const struct i2c_adapter_quirks gpu_i2c_quirks = {
+       .max_read_len = 4,
+       .max_comb_2nd_msg_len = 4,
+       .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+};
+
+static u32 gpu_i2c_functionality(struct i2c_adapter *adap)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm gpu_i2c_algorithm = {
+       .master_xfer    = gpu_i2c_master_xfer,
+       .functionality  = gpu_i2c_functionality,
+};
+
+/*
+ * This driver is for Nvidia GPU cards with USB Type-C interface.
+ * We want to identify the cards using vendor ID and class code only
+ * to avoid dependency of adding product id for any new card which
+ * requires this driver.
+ * Currently there is no class code defined for UCSI device over PCI
+ * so using UNKNOWN class for now and it will be updated when UCSI
+ * over PCI gets a class code.
+ * There is no other NVIDIA cards with UNKNOWN class code. Even if the
+ * driver gets loaded for an undesired card then eventually i2c_read()
+ * (initiated from UCSI i2c_client) will timeout or UCSI commands will
+ * timeout.
+ */
+#define PCI_CLASS_SERIAL_UNKNOWN       0x0c80
+static const struct pci_device_id gpu_i2c_ids[] = {
+       { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+               PCI_CLASS_SERIAL_UNKNOWN << 8, 0xffffff00},
+       { }
+};
+MODULE_DEVICE_TABLE(pci, gpu_i2c_ids);
+
+static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
+{
+       struct i2c_client *ccgx_client;
+
+       i2cd->gpu_ccgx_ucsi = devm_kzalloc(i2cd->dev,
+                                          sizeof(*i2cd->gpu_ccgx_ucsi),
+                                          GFP_KERNEL);
+       if (!i2cd->gpu_ccgx_ucsi)
+               return -ENOMEM;
+
+       strlcpy(i2cd->gpu_ccgx_ucsi->type, "ccgx-ucsi",
+               sizeof(i2cd->gpu_ccgx_ucsi->type));
+       i2cd->gpu_ccgx_ucsi->addr = 0x8;
+       i2cd->gpu_ccgx_ucsi->irq = irq;
+       ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
+       if (!ccgx_client)
+               return -ENODEV;
+
+       return 0;
+}
+
+static int gpu_i2c_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       struct gpu_i2c_dev *i2cd;
+       int status;
+
+       i2cd = devm_kzalloc(&pdev->dev, sizeof(*i2cd), GFP_KERNEL);
+       if (!i2cd)
+               return -ENOMEM;
+
+       i2cd->dev = &pdev->dev;
+       dev_set_drvdata(&pdev->dev, i2cd);
+
+       status = pcim_enable_device(pdev);
+       if (status < 0) {
+               dev_err(&pdev->dev, "pcim_enable_device failed %d\n", status);
+               return status;
+       }
+
+       pci_set_master(pdev);
+
+       i2cd->regs = pcim_iomap(pdev, 0, 0);
+       if (!i2cd->regs) {
+               dev_err(&pdev->dev, "pcim_iomap failed\n");
+               return -ENOMEM;
+       }
+
+       status = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+       if (status < 0) {
+               dev_err(&pdev->dev, "pci_alloc_irq_vectors err %d\n", status);
+               return status;
+       }
+
+       gpu_enable_i2c_bus(i2cd);
+
+       i2c_set_adapdata(&i2cd->adapter, i2cd);
+       i2cd->adapter.owner = THIS_MODULE;
+       strlcpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter",
+               sizeof(i2cd->adapter.name));
+       i2cd->adapter.algo = &gpu_i2c_algorithm;
+       i2cd->adapter.quirks = &gpu_i2c_quirks;
+       i2cd->adapter.dev.parent = &pdev->dev;
+       status = i2c_add_adapter(&i2cd->adapter);
+       if (status < 0)
+               goto free_irq_vectors;
+
+       status = gpu_populate_client(i2cd, pdev->irq);
+       if (status < 0) {
+               dev_err(&pdev->dev, "gpu_populate_client failed %d\n", status);
+               goto del_adapter;
+       }
+
+       return 0;
+
+del_adapter:
+       i2c_del_adapter(&i2cd->adapter);
+free_irq_vectors:
+       pci_free_irq_vectors(pdev);
+       return status;
+}
+
+static void gpu_i2c_remove(struct pci_dev *pdev)
+{
+       struct gpu_i2c_dev *i2cd = dev_get_drvdata(&pdev->dev);
+
+       i2c_del_adapter(&i2cd->adapter);
+       pci_free_irq_vectors(pdev);
+}
+
+static int gpu_i2c_resume(struct device *dev)
+{
+       struct gpu_i2c_dev *i2cd = dev_get_drvdata(dev);
+
+       gpu_enable_i2c_bus(i2cd);
+       return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(gpu_i2c_driver_pm, NULL, gpu_i2c_resume, NULL);
+
+static struct pci_driver gpu_i2c_driver = {
+       .name           = "nvidia-gpu",
+       .id_table       = gpu_i2c_ids,
+       .probe          = gpu_i2c_probe,
+       .remove         = gpu_i2c_remove,
+       .driver         = {
+               .pm     = &gpu_i2c_driver_pm,
+       },
+};
+
+module_pci_driver(gpu_i2c_driver);
+
+MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
+MODULE_DESCRIPTION("Nvidia GPU I2C controller Driver");
+MODULE_LICENSE("GPL v2");
index 527f55c8c4c70e560a9787a610c68017fbb10235..db075bc0d9525d62a7b366abd7c6eb1edeaaa76c 100644 (file)
@@ -571,18 +571,19 @@ static int geni_i2c_probe(struct platform_device *pdev)
 
        dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
 
-       ret = i2c_add_adapter(&gi2c->adap);
-       if (ret) {
-               dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
-               return ret;
-       }
-
        gi2c->suspended = 1;
        pm_runtime_set_suspended(gi2c->se.dev);
        pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY);
        pm_runtime_use_autosuspend(gi2c->se.dev);
        pm_runtime_enable(gi2c->se.dev);
 
+       ret = i2c_add_adapter(&gi2c->adap);
+       if (ret) {
+               dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
+               pm_runtime_disable(gi2c->se.dev);
+               return ret;
+       }
+
        return 0;
 }
 
@@ -590,8 +591,8 @@ static int geni_i2c_remove(struct platform_device *pdev)
 {
        struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
 
-       pm_runtime_disable(gi2c->se.dev);
        i2c_del_adapter(&gi2c->adap);
+       pm_runtime_disable(gi2c->se.dev);
        return 0;
 }
 
index 4aa7dde876f3f23dd38e2799270b1340aca3af2c..254e6219e5389f17114185c57470914562c2bed6 100644 (file)
@@ -779,6 +779,11 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
        pm_runtime_get_sync(dev);
 
+       /* Check bus state before init otherwise bus busy info will be lost */
+       ret = rcar_i2c_bus_barrier(priv);
+       if (ret < 0)
+               goto out;
+
        /* Gen3 needs a reset before allowing RXDMA once */
        if (priv->devtype == I2C_RCAR_GEN3) {
                priv->flags |= ID_P_NO_RXDMA;
@@ -791,10 +796,6 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
        rcar_i2c_init(priv);
 
-       ret = rcar_i2c_bus_barrier(priv);
-       if (ret < 0)
-               goto out;
-
        for (i = 0; i < num; i++)
                rcar_i2c_request_dma(priv, msgs + i);
 
index 7e9a2bbf5ddcb967459367778a834c8314ff6f2b..ff3f4553648f3c29a8c576172fc4c342cef6a94b 100644 (file)
@@ -367,6 +367,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
 {
        struct acpi_smbus_cmi *smbus_cmi;
        const struct acpi_device_id *id;
+       int ret;
 
        smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
        if (!smbus_cmi)
@@ -388,8 +389,10 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
        acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
                            acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
 
-       if (smbus_cmi->cap_info == 0)
+       if (smbus_cmi->cap_info == 0) {
+               ret = -ENODEV;
                goto err;
+       }
 
        snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
                "SMBus CMI adapter %s",
@@ -400,7 +403,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
        smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
        smbus_cmi->adapter.dev.parent = &device->dev;
 
-       if (i2c_add_adapter(&smbus_cmi->adapter)) {
+       ret = i2c_add_adapter(&smbus_cmi->adapter);
+       if (ret) {
                dev_err(&device->dev, "Couldn't register adapter!\n");
                goto err;
        }
@@ -410,7 +414,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
 err:
        kfree(smbus_cmi);
        device->driver_data = NULL;
-       return -EIO;
+       return ret;
 }
 
 static int acpi_smbus_cmi_remove(struct acpi_device *device)
index dd384743dbbd0581ffc2b3c7f179e158bebdf10b..03da4a539a2f2ab9289b7d1ce589d8fa75f3511b 100644 (file)
@@ -173,8 +173,6 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
                "interrupt: enabled_irqs=%04x, irq_status=%04x\n",
                priv->enabled_irqs, irq_status);
 
-       uniphier_fi2c_clear_irqs(priv, irq_status);
-
        if (irq_status & UNIPHIER_FI2C_INT_STOP)
                goto complete;
 
@@ -214,7 +212,13 @@ static irqreturn_t uniphier_fi2c_interrupt(int irq, void *dev_id)
 
        if (irq_status & (UNIPHIER_FI2C_INT_RF | UNIPHIER_FI2C_INT_RB)) {
                uniphier_fi2c_drain_rxfifo(priv);
-               if (!priv->len)
+               /*
+                * If the number of bytes to read is multiple of the FIFO size
+                * (msg->len == 8, 16, 24, ...), the INT_RF bit is set a little
+                * earlier than INT_RB. We wait for INT_RB to confirm the
+                * completion of the current message.
+                */
+               if (!priv->len && (irq_status & UNIPHIER_FI2C_INT_RB))
                        goto data_done;
 
                if (unlikely(priv->flags & UNIPHIER_FI2C_MANUAL_NACK)) {
@@ -253,12 +257,20 @@ complete:
        }
 
 handled:
+       /*
+        * This controller makes a pause while any bit of the IRQ status is
+        * asserted. Clear the asserted bit to kick the controller just before
+        * exiting the handler.
+        */
+       uniphier_fi2c_clear_irqs(priv, irq_status);
+
        spin_unlock(&priv->lock);
 
        return IRQ_HANDLED;
 }
 
-static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr)
+static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr,
+                                 bool repeat)
 {
        priv->enabled_irqs |= UNIPHIER_FI2C_INT_TE;
        uniphier_fi2c_set_irqs(priv);
@@ -268,8 +280,12 @@ static void uniphier_fi2c_tx_init(struct uniphier_fi2c_priv *priv, u16 addr)
        /* set slave address */
        writel(UNIPHIER_FI2C_DTTX_CMD | addr << 1,
               priv->membase + UNIPHIER_FI2C_DTTX);
-       /* first chunk of data */
-       uniphier_fi2c_fill_txfifo(priv, true);
+       /*
+        * First chunk of data. For a repeated START condition, do not write
+        * data to the TX fifo here to avoid the timing issue.
+        */
+       if (!repeat)
+               uniphier_fi2c_fill_txfifo(priv, true);
 }
 
 static void uniphier_fi2c_rx_init(struct uniphier_fi2c_priv *priv, u16 addr)
@@ -350,7 +366,7 @@ static int uniphier_fi2c_master_xfer_one(struct i2c_adapter *adap,
        if (is_read)
                uniphier_fi2c_rx_init(priv, msg->addr);
        else
-               uniphier_fi2c_tx_init(priv, msg->addr);
+               uniphier_fi2c_tx_init(priv, msg->addr, repeat);
 
        dev_dbg(&adap->dev, "start condition\n");
        /*
@@ -502,9 +518,26 @@ static void uniphier_fi2c_hw_init(struct uniphier_fi2c_priv *priv)
 
        uniphier_fi2c_reset(priv);
 
+       /*
+        *  Standard-mode: tLOW + tHIGH = 10 us
+        *  Fast-mode:     tLOW + tHIGH = 2.5 us
+        */
        writel(cyc, priv->membase + UNIPHIER_FI2C_CYC);
-       writel(cyc / 2, priv->membase + UNIPHIER_FI2C_LCTL);
+       /*
+        *  Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us, tBUF = 4.7 us
+        *  Fast-mode:     tLOW = 1.3 us, tHIGH = 0.6 us, tBUF = 1.3 us
+        * "tLow/tHIGH = 5/4" meets both.
+        */
+       writel(cyc * 5 / 9, priv->membase + UNIPHIER_FI2C_LCTL);
+       /*
+        *  Standard-mode: tHD;STA = 4.0 us, tSU;STA = 4.7 us, tSU;STO = 4.0 us
+        *  Fast-mode:     tHD;STA = 0.6 us, tSU;STA = 0.6 us, tSU;STO = 0.6 us
+        */
        writel(cyc / 2, priv->membase + UNIPHIER_FI2C_SSUT);
+       /*
+        *  Standard-mode: tSU;DAT = 250 ns
+        *  Fast-mode:     tSU;DAT = 100 ns
+        */
        writel(cyc / 16, priv->membase + UNIPHIER_FI2C_DSUT);
 
        uniphier_fi2c_prepare_operation(priv);
index 454f914ae66dbd49931575122bb7c7dea662b11b..c488e558aef709ee5097f05436624807df26ac4e 100644 (file)
@@ -320,7 +320,13 @@ static void uniphier_i2c_hw_init(struct uniphier_i2c_priv *priv)
 
        uniphier_i2c_reset(priv, true);
 
-       writel((cyc / 2 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK);
+       /*
+        * Bit30-16: clock cycles of tLOW.
+        *  Standard-mode: tLOW = 4.7 us, tHIGH = 4.0 us
+        *  Fast-mode:     tLOW = 1.3 us, tHIGH = 0.6 us
+        * "tLow/tHIGH = 5/4" meets both.
+        */
+       writel((cyc * 5 / 9 << 16) | cyc, priv->membase + UNIPHIER_I2C_CLK);
 
        uniphier_i2c_reset(priv, false);
 }
index dc78aa7369def416898f0a4c514fd017c147f0c0..28460f6a60cc15220c9a8748b3bd688e81244c37 100644 (file)
@@ -306,10 +306,7 @@ static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client)
        if (client->flags & I2C_CLIENT_TEN)
                return -EINVAL;
 
-       irq = irq_find_mapping(adap->host_notify_domain, client->addr);
-       if (!irq)
-               irq = irq_create_mapping(adap->host_notify_domain,
-                                        client->addr);
+       irq = irq_create_mapping(adap->host_notify_domain, client->addr);
 
        return irq > 0 ? irq : -ENXIO;
 }
@@ -433,6 +430,8 @@ static int i2c_device_remove(struct device *dev)
        dev_pm_clear_wake_irq(&client->dev);
        device_init_wakeup(&client->dev, false);
 
+       client->irq = 0;
+
        return status;
 }
 
index 45c9974303328b29af5a32d2ed5b47155aaa8e32..4c8c7a620d08dae851de513eebe2710dee3ca89e 100644 (file)
@@ -614,18 +614,7 @@ static int ide_drivers_show(struct seq_file *s, void *p)
        return 0;
 }
 
-static int ide_drivers_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, &ide_drivers_show, NULL);
-}
-
-static const struct file_operations ide_drivers_operations = {
-       .owner          = THIS_MODULE,
-       .open           = ide_drivers_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(ide_drivers);
 
 void proc_ide_create(void)
 {
@@ -634,7 +623,7 @@ void proc_ide_create(void)
        if (!proc_ide_root)
                return;
 
-       proc_create("drivers", 0, proc_ide_root, &ide_drivers_operations);
+       proc_create("drivers", 0, proc_ide_root, &ide_drivers_fops);
 }
 
 void proc_ide_destroy(void)
index c5b902b86b444773519edc1a52fe4d8782992f37..203ed4adc04ae6680de39910985b05996c227528 100644 (file)
@@ -920,6 +920,7 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
        struct device_node *root = of_find_node_by_path("/");
        const char *model = of_get_property(root, "model", NULL);
 
+       of_node_put(root);
        /* Get cable type from device-tree. */
        if (cable && !strncmp(cable, "80-", 3)) {
                /* Some drives fail to detect 80c cable in PowerBook */
index 41d97faf50138905d24cb3dea7da5cbb185062c7..38ff374a3ca451071a5fba6a09483d43abce3139 100644 (file)
@@ -149,6 +149,7 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
        int report_id = -1;
        u32 address;
        int ret_type;
+       s32 min;
        struct hid_sensor_hub_device *hsdev =
                                        accel_state->common_attributes.hsdev;
 
@@ -158,12 +159,14 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_RAW:
                hid_sensor_power_state(&accel_state->common_attributes, true);
                report_id = accel_state->accel[chan->scan_index].report_id;
+               min = accel_state->accel[chan->scan_index].logical_minimum;
                address = accel_3d_addresses[chan->scan_index];
                if (report_id >= 0)
                        *val = sensor_hub_input_attr_get_raw_value(
                                        accel_state->common_attributes.hsdev,
                                        hsdev->usage, address, report_id,
-                                       SENSOR_HUB_SYNC);
+                                       SENSOR_HUB_SYNC,
+                                       min < 0);
                else {
                        *val = 0;
                        hid_sensor_power_state(&accel_state->common_attributes,
index 36941e69f95956a71898cb714fd3da49b1a19dbf..88e857c4baf4504370032610b3ae0d12058a622a 100644 (file)
@@ -111,6 +111,7 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
        int report_id = -1;
        u32 address;
        int ret_type;
+       s32 min;
 
        *val = 0;
        *val2 = 0;
@@ -118,13 +119,15 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
        case IIO_CHAN_INFO_RAW:
                hid_sensor_power_state(&gyro_state->common_attributes, true);
                report_id = gyro_state->gyro[chan->scan_index].report_id;
+               min = gyro_state->gyro[chan->scan_index].logical_minimum;
                address = gyro_3d_addresses[chan->scan_index];
                if (report_id >= 0)
                        *val = sensor_hub_input_attr_get_raw_value(
                                        gyro_state->common_attributes.hsdev,
                                        HID_USAGE_SENSOR_GYRO_3D, address,
                                        report_id,
-                                       SENSOR_HUB_SYNC);
+                                       SENSOR_HUB_SYNC,
+                                       min < 0);
                else {
                        *val = 0;
                        hid_sensor_power_state(&gyro_state->common_attributes,
index beab6d6fd6e18bb5f0a927b9f7d70ea4b1e08b0d..4bc95f31c730ee99255ef179fde5466980661517 100644 (file)
@@ -75,7 +75,8 @@ static int humidity_read_raw(struct iio_dev *indio_dev,
                                HID_USAGE_SENSOR_HUMIDITY,
                                HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY,
                                humid_st->humidity_attr.report_id,
-                               SENSOR_HUB_SYNC);
+                               SENSOR_HUB_SYNC,
+                               humid_st->humidity_attr.logical_minimum < 0);
                hid_sensor_power_state(&humid_st->common_attributes, false);
 
                return IIO_VAL_INT;
index 406caaee9a3c54b6da43a4b697f302a165be927a..94f33250ba5a671d41ee193d4bcbde069ab98d1d 100644 (file)
@@ -93,6 +93,7 @@ static int als_read_raw(struct iio_dev *indio_dev,
        int report_id = -1;
        u32 address;
        int ret_type;
+       s32 min;
 
        *val = 0;
        *val2 = 0;
@@ -102,8 +103,8 @@ static int als_read_raw(struct iio_dev *indio_dev,
                case  CHANNEL_SCAN_INDEX_INTENSITY:
                case  CHANNEL_SCAN_INDEX_ILLUM:
                        report_id = als_state->als_illum.report_id;
-                       address =
-                       HID_USAGE_SENSOR_LIGHT_ILLUM;
+                       min = als_state->als_illum.logical_minimum;
+                       address = HID_USAGE_SENSOR_LIGHT_ILLUM;
                        break;
                default:
                        report_id = -1;
@@ -116,7 +117,8 @@ static int als_read_raw(struct iio_dev *indio_dev,
                                        als_state->common_attributes.hsdev,
                                        HID_USAGE_SENSOR_ALS, address,
                                        report_id,
-                                       SENSOR_HUB_SYNC);
+                                       SENSOR_HUB_SYNC,
+                                       min < 0);
                        hid_sensor_power_state(&als_state->common_attributes,
                                                false);
                } else {
index 45107f7537b5d8e0911f381c0ef2289d19b7730e..cf5a0c242609d4f53573a10c269ac3fe6c64a36f 100644 (file)
@@ -73,6 +73,7 @@ static int prox_read_raw(struct iio_dev *indio_dev,
        int report_id = -1;
        u32 address;
        int ret_type;
+       s32 min;
 
        *val = 0;
        *val2 = 0;
@@ -81,8 +82,8 @@ static int prox_read_raw(struct iio_dev *indio_dev,
                switch (chan->scan_index) {
                case  CHANNEL_SCAN_INDEX_PRESENCE:
                        report_id = prox_state->prox_attr.report_id;
-                       address =
-                       HID_USAGE_SENSOR_HUMAN_PRESENCE;
+                       min = prox_state->prox_attr.logical_minimum;
+                       address = HID_USAGE_SENSOR_HUMAN_PRESENCE;
                        break;
                default:
                        report_id = -1;
@@ -95,7 +96,8 @@ static int prox_read_raw(struct iio_dev *indio_dev,
                                prox_state->common_attributes.hsdev,
                                HID_USAGE_SENSOR_PROX, address,
                                report_id,
-                               SENSOR_HUB_SYNC);
+                               SENSOR_HUB_SYNC,
+                               min < 0);
                        hid_sensor_power_state(&prox_state->common_attributes,
                                                false);
                } else {
index d55c4885211ad6329b9096760a93d3e4d176921f..f3c0d41e5a8c270728789ddda6f6cec61085097a 100644 (file)
@@ -163,21 +163,23 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
        int report_id = -1;
        u32 address;
        int ret_type;
+       s32 min;
 
        *val = 0;
        *val2 = 0;
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
                hid_sensor_power_state(&magn_state->magn_flux_attributes, true);
-               report_id =
-                       magn_state->magn[chan->address].report_id;
+               report_id = magn_state->magn[chan->address].report_id;
+               min = magn_state->magn[chan->address].logical_minimum;
                address = magn_3d_addresses[chan->address];
                if (report_id >= 0)
                        *val = sensor_hub_input_attr_get_raw_value(
                                magn_state->magn_flux_attributes.hsdev,
                                HID_USAGE_SENSOR_COMPASS_3D, address,
                                report_id,
-                               SENSOR_HUB_SYNC);
+                               SENSOR_HUB_SYNC,
+                               min < 0);
                else {
                        *val = 0;
                        hid_sensor_power_state(
index 0a9e8fadfa9de8a66dd3d1fee8d82f81cc6b10a4..37ab3056646497fd67280df4a7235917e07772f7 100644 (file)
@@ -30,11 +30,6 @@ int st_magn_trig_set_state(struct iio_trigger *trig, bool state)
        return st_sensors_set_dataready_irq(indio_dev, state);
 }
 
-static int st_magn_buffer_preenable(struct iio_dev *indio_dev)
-{
-       return st_sensors_set_enable(indio_dev, true);
-}
-
 static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
 {
        int err;
@@ -50,7 +45,7 @@ static int st_magn_buffer_postenable(struct iio_dev *indio_dev)
        if (err < 0)
                goto st_magn_buffer_postenable_error;
 
-       return err;
+       return st_sensors_set_enable(indio_dev, true);
 
 st_magn_buffer_postenable_error:
        kfree(mdata->buffer_data);
@@ -63,11 +58,11 @@ static int st_magn_buffer_predisable(struct iio_dev *indio_dev)
        int err;
        struct st_sensor_data *mdata = iio_priv(indio_dev);
 
-       err = iio_triggered_buffer_predisable(indio_dev);
+       err = st_sensors_set_enable(indio_dev, false);
        if (err < 0)
                goto st_magn_buffer_predisable_error;
 
-       err = st_sensors_set_enable(indio_dev, false);
+       err = iio_triggered_buffer_predisable(indio_dev);
 
 st_magn_buffer_predisable_error:
        kfree(mdata->buffer_data);
@@ -75,7 +70,6 @@ st_magn_buffer_predisable_error:
 }
 
 static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
-       .preenable = &st_magn_buffer_preenable,
        .postenable = &st_magn_buffer_postenable,
        .predisable = &st_magn_buffer_predisable,
 };
index 1e5451d1ff884b15514c92e624c0beec4aaaa924..bdc5e4554ee484cfc149f5d17a392853d9b754cf 100644 (file)
@@ -111,21 +111,23 @@ static int incl_3d_read_raw(struct iio_dev *indio_dev,
        int report_id = -1;
        u32 address;
        int ret_type;
+       s32 min;
 
        *val = 0;
        *val2 = 0;
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
                hid_sensor_power_state(&incl_state->common_attributes, true);
-               report_id =
-                       incl_state->incl[chan->scan_index].report_id;
+               report_id = incl_state->incl[chan->scan_index].report_id;
+               min = incl_state->incl[chan->scan_index].logical_minimum;
                address = incl_3d_addresses[chan->scan_index];
                if (report_id >= 0)
                        *val = sensor_hub_input_attr_get_raw_value(
                                incl_state->common_attributes.hsdev,
                                HID_USAGE_SENSOR_INCLINOMETER_3D, address,
                                report_id,
-                               SENSOR_HUB_SYNC);
+                               SENSOR_HUB_SYNC,
+                               min < 0);
                else {
                        hid_sensor_power_state(&incl_state->common_attributes,
                                                false);
index 4c437918f1d282f55661213a098e9d0b6ca9049a..d7b1c00ceb4da30cc4f9dae2848206ecf7411a4d 100644 (file)
@@ -77,6 +77,7 @@ static int press_read_raw(struct iio_dev *indio_dev,
        int report_id = -1;
        u32 address;
        int ret_type;
+       s32 min;
 
        *val = 0;
        *val2 = 0;
@@ -85,8 +86,8 @@ static int press_read_raw(struct iio_dev *indio_dev,
                switch (chan->scan_index) {
                case  CHANNEL_SCAN_INDEX_PRESSURE:
                        report_id = press_state->press_attr.report_id;
-                       address =
-                       HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE;
+                       min = press_state->press_attr.logical_minimum;
+                       address = HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE;
                        break;
                default:
                        report_id = -1;
@@ -99,7 +100,8 @@ static int press_read_raw(struct iio_dev *indio_dev,
                                press_state->common_attributes.hsdev,
                                HID_USAGE_SENSOR_PRESSURE, address,
                                report_id,
-                               SENSOR_HUB_SYNC);
+                               SENSOR_HUB_SYNC,
+                               min < 0);
                        hid_sensor_power_state(&press_state->common_attributes,
                                                false);
                } else {
index beaf6fd3e337c6f962bb3dbcd49a482150ca64bf..b592fc4f007e417c0b57437a8f55064cf5d55c7a 100644 (file)
@@ -76,7 +76,8 @@ static int temperature_read_raw(struct iio_dev *indio_dev,
                        HID_USAGE_SENSOR_TEMPERATURE,
                        HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE,
                        temp_st->temperature_attr.report_id,
-                       SENSOR_HUB_SYNC);
+                       SENSOR_HUB_SYNC,
+                       temp_st->temperature_attr.logical_minimum < 0);
                hid_sensor_power_state(
                                &temp_st->common_attributes,
                                false);
index ee366199b169caa5a521c31e6a98e8ae6ecccbcf..25d43c8f1c2a869ffc10548f1d91da7049669b66 100644 (file)
@@ -767,8 +767,10 @@ static int netdevice_event(struct notifier_block *this, unsigned long event,
 
        case NETDEV_CHANGEADDR:
                cmds[0] = netdev_del_cmd;
-               cmds[1] = add_default_gid_cmd;
-               cmds[2] = add_cmd;
+               if (ndev->reg_state == NETREG_REGISTERED) {
+                       cmds[1] = add_default_gid_cmd;
+                       cmds[2] = add_cmd;
+               }
                break;
 
        case NETDEV_CHANGEUPPER:
index 2b4c5e7dd5a173c270e131016e40fcb892e04d70..676c1fd1119d80a17d4542d035a319300332842f 100644 (file)
@@ -137,15 +137,6 @@ static void ib_umem_notifier_release(struct mmu_notifier *mn,
        up_read(&per_mm->umem_rwsem);
 }
 
-static int invalidate_page_trampoline(struct ib_umem_odp *item, u64 start,
-                                     u64 end, void *cookie)
-{
-       ib_umem_notifier_start_account(item);
-       item->umem.context->invalidate_range(item, start, start + PAGE_SIZE);
-       ib_umem_notifier_end_account(item);
-       return 0;
-}
-
 static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
                                             u64 start, u64 end, void *cookie)
 {
@@ -553,12 +544,13 @@ out:
                put_page(page);
 
        if (remove_existing_mapping && umem->context->invalidate_range) {
-               invalidate_page_trampoline(
+               ib_umem_notifier_start_account(umem_odp);
+               umem->context->invalidate_range(
                        umem_odp,
-                       ib_umem_start(umem) + (page_index >> umem->page_shift),
-                       ib_umem_start(umem) + ((page_index + 1) >>
-                                              umem->page_shift),
-                       NULL);
+                       ib_umem_start(umem) + (page_index << umem->page_shift),
+                       ib_umem_start(umem) +
+                               ((page_index + 1) << umem->page_shift));
+               ib_umem_notifier_end_account(umem_odp);
                ret = -EAGAIN;
        }
 
index cf22826542100f5a369a2835882fcc97e5a03d55..77f095e5fbe3887ec4293945b701adb84791f765 100644 (file)
@@ -1268,6 +1268,7 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
        /* Registered a new RoCE device instance to netdev */
        rc = bnxt_re_register_netdev(rdev);
        if (rc) {
+               rtnl_unlock();
                pr_err("Failed to register with netedev: %#x\n", rc);
                return -EINVAL;
        }
@@ -1466,6 +1467,7 @@ static void bnxt_re_task(struct work_struct *work)
                                "Failed to register with IB: %#x", rc);
                        bnxt_re_remove_one(rdev);
                        bnxt_re_dev_unreg(rdev);
+                       goto exit;
                }
                break;
        case NETDEV_UP:
@@ -1489,6 +1491,7 @@ static void bnxt_re_task(struct work_struct *work)
        }
        smp_mb__before_atomic();
        atomic_dec(&rdev->sched_count);
+exit:
        kfree(re_work);
 }
 
index a4c62ae23a9aeee22213b8f799e4e796ae953e1b..3beb1523e17c29c054da7ccb874912a754283d5d 100644 (file)
@@ -1756,10 +1756,9 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
        return hns_roce_cmq_send(hr_dev, &desc, 1);
 }
 
-static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
-                                 unsigned long mtpt_idx)
+static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
+                       struct hns_roce_mr *mr)
 {
-       struct hns_roce_v2_mpt_entry *mpt_entry;
        struct scatterlist *sg;
        u64 page_addr;
        u64 *pages;
@@ -1767,6 +1766,53 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
        int len;
        int entry;
 
+       mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
+       mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
+       roce_set_field(mpt_entry->byte_48_mode_ba,
+                      V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
+                      upper_32_bits(mr->pbl_ba >> 3));
+
+       pages = (u64 *)__get_free_page(GFP_KERNEL);
+       if (!pages)
+               return -ENOMEM;
+
+       i = 0;
+       for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
+               len = sg_dma_len(sg) >> PAGE_SHIFT;
+               for (j = 0; j < len; ++j) {
+                       page_addr = sg_dma_address(sg) +
+                               (j << mr->umem->page_shift);
+                       pages[i] = page_addr >> 6;
+                       /* Record the first 2 entry directly to MTPT table */
+                       if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
+                               goto found;
+                       i++;
+               }
+       }
+found:
+       mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
+       roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
+                      V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
+
+       mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
+       roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
+                      V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
+       roce_set_field(mpt_entry->byte_64_buf_pa1,
+                      V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
+                      V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
+                      mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+
+       free_page((unsigned long)pages);
+
+       return 0;
+}
+
+static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+                                 unsigned long mtpt_idx)
+{
+       struct hns_roce_v2_mpt_entry *mpt_entry;
+       int ret;
+
        mpt_entry = mb_buf;
        memset(mpt_entry, 0, sizeof(*mpt_entry));
 
@@ -1781,7 +1827,6 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
                       mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
        roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
                       V2_MPT_BYTE_4_PD_S, mr->pd);
-       mpt_entry->byte_4_pd_hop_st = cpu_to_le32(mpt_entry->byte_4_pd_hop_st);
 
        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
@@ -1796,13 +1841,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
                     (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
        roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
                     (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
-       mpt_entry->byte_8_mw_cnt_en = cpu_to_le32(mpt_entry->byte_8_mw_cnt_en);
 
        roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
                     mr->type == MR_TYPE_MR ? 0 : 1);
        roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
                     1);
-       mpt_entry->byte_12_mw_pa = cpu_to_le32(mpt_entry->byte_12_mw_pa);
 
        mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
        mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
@@ -1813,53 +1856,9 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
        if (mr->type == MR_TYPE_DMA)
                return 0;
 
-       mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
-
-       mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
-       roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
-                      V2_MPT_BYTE_48_PBL_BA_H_S,
-                      upper_32_bits(mr->pbl_ba >> 3));
-       mpt_entry->byte_48_mode_ba = cpu_to_le32(mpt_entry->byte_48_mode_ba);
-
-       pages = (u64 *)__get_free_page(GFP_KERNEL);
-       if (!pages)
-               return -ENOMEM;
-
-       i = 0;
-       for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
-               len = sg_dma_len(sg) >> PAGE_SHIFT;
-               for (j = 0; j < len; ++j) {
-                       page_addr = sg_dma_address(sg) +
-                                   (j << mr->umem->page_shift);
-                       pages[i] = page_addr >> 6;
-
-                       /* Record the first 2 entry directly to MTPT table */
-                       if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
-                               goto found;
-                       i++;
-               }
-       }
-
-found:
-       mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
-       roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
-                      V2_MPT_BYTE_56_PA0_H_S,
-                      upper_32_bits(pages[0]));
-       mpt_entry->byte_56_pa0_h = cpu_to_le32(mpt_entry->byte_56_pa0_h);
-
-       mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
-       roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
-                      V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
+       ret = set_mtpt_pbl(mpt_entry, mr);
 
-       free_page((unsigned long)pages);
-
-       roce_set_field(mpt_entry->byte_64_buf_pa1,
-                      V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
-                      V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
-                      mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
-       mpt_entry->byte_64_buf_pa1 = cpu_to_le32(mpt_entry->byte_64_buf_pa1);
-
-       return 0;
+       return ret;
 }
 
 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
@@ -1868,6 +1867,7 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
                                        u64 size, void *mb_buf)
 {
        struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
+       int ret = 0;
 
        if (flags & IB_MR_REREG_PD) {
                roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
@@ -1880,14 +1880,14 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
                             V2_MPT_BYTE_8_BIND_EN_S,
                             (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
                roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
-                          V2_MPT_BYTE_8_ATOMIC_EN_S,
-                          (mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0));
+                            V2_MPT_BYTE_8_ATOMIC_EN_S,
+                            mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
                roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
-                            (mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0));
+                            mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
                roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
-                           (mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
+                            mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
                roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
-                            (mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
+                            mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
        }
 
        if (flags & IB_MR_REREG_TRANS) {
@@ -1896,21 +1896,13 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
                mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
                mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
 
-               mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
-               mpt_entry->pbl_ba_l =
-                               cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
-               roce_set_field(mpt_entry->byte_48_mode_ba,
-                              V2_MPT_BYTE_48_PBL_BA_H_M,
-                              V2_MPT_BYTE_48_PBL_BA_H_S,
-                              upper_32_bits(mr->pbl_ba >> 3));
-               mpt_entry->byte_48_mode_ba =
-                               cpu_to_le32(mpt_entry->byte_48_mode_ba);
-
                mr->iova = iova;
                mr->size = size;
+
+               ret = set_mtpt_pbl(mpt_entry, mr);
        }
 
-       return 0;
+       return ret;
 }
 
 static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
index e9c428071df3140685071f458abf72512381ae55..3569fda07e07f47b9286b7e1251c2716f9169203 100644 (file)
@@ -1094,31 +1094,26 @@ enum mlx5_ib_width {
        MLX5_IB_WIDTH_12X       = 1 << 4
 };
 
-static int translate_active_width(struct ib_device *ibdev, u8 active_width,
+static void translate_active_width(struct ib_device *ibdev, u8 active_width,
                                  u8 *ib_width)
 {
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
-       int err = 0;
 
-       if (active_width & MLX5_IB_WIDTH_1X) {
+       if (active_width & MLX5_IB_WIDTH_1X)
                *ib_width = IB_WIDTH_1X;
-       } else if (active_width & MLX5_IB_WIDTH_2X) {
-               mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
-                           (int)active_width);
-               err = -EINVAL;
-       } else if (active_width & MLX5_IB_WIDTH_4X) {
+       else if (active_width & MLX5_IB_WIDTH_4X)
                *ib_width = IB_WIDTH_4X;
-       } else if (active_width & MLX5_IB_WIDTH_8X) {
+       else if (active_width & MLX5_IB_WIDTH_8X)
                *ib_width = IB_WIDTH_8X;
-       } else if (active_width & MLX5_IB_WIDTH_12X) {
+       else if (active_width & MLX5_IB_WIDTH_12X)
                *ib_width = IB_WIDTH_12X;
-       else {
-               mlx5_ib_dbg(dev, "Invalid active_width %d\n",
+       else {
+               mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
                            (int)active_width);
-               err = -EINVAL;
+               *ib_width = IB_WIDTH_4X;
        }
 
-       return err;
+       return;
 }
 
 static int mlx5_mtu_to_ib_mtu(int mtu)
@@ -1225,10 +1220,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
        if (err)
                goto out;
 
-       err = translate_active_width(ibdev, ib_link_width_oper,
-                                    &props->active_width);
-       if (err)
-               goto out;
+       translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
+
        err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
        if (err)
                goto out;
index b04eb67753261c71c036bc4269578d350dd40de4..2cc3d69ab6f64dde00ee48c82ff93c5edca697f4 100644 (file)
@@ -674,6 +674,15 @@ next_mr:
                        goto srcu_unlock;
                }
 
+               if (!mr->umem->is_odp) {
+                       mlx5_ib_dbg(dev, "skipping non ODP MR (lkey=0x%06x) in page fault handler.\n",
+                                   key);
+                       if (bytes_mapped)
+                               *bytes_mapped += bcnt;
+                       ret = 0;
+                       goto srcu_unlock;
+               }
+
                ret = pagefault_mr(dev, mr, io_virt, bcnt, bytes_mapped);
                if (ret < 0)
                        goto srcu_unlock;
@@ -735,6 +744,7 @@ next_mr:
                        head = frame;
 
                        bcnt -= frame->bcnt;
+                       offset = 0;
                }
                break;
 
index 6841c0f9237fb3c665cffce42cb501f3200897fe..3747cc681b18a54fb2a841f2539064e266ee5587 100644 (file)
@@ -2633,8 +2633,7 @@ static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
 
        if (access_flags & IB_ACCESS_REMOTE_READ)
                *hw_access_flags |= MLX5_QP_BIT_RRE;
-       if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
-           qp->ibqp.qp_type == IB_QPT_RC) {
+       if (access_flags & IB_ACCESS_REMOTE_ATOMIC) {
                int atomic_mode;
 
                atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type);
@@ -4678,17 +4677,18 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
                        goto out;
                }
 
-               if (wr->opcode == IB_WR_LOCAL_INV ||
-                   wr->opcode == IB_WR_REG_MR) {
+               if (wr->opcode == IB_WR_REG_MR) {
                        fence = dev->umr_fence;
                        next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
-               } else if (wr->send_flags & IB_SEND_FENCE) {
-                       if (qp->next_fence)
-                               fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
-                       else
-                               fence = MLX5_FENCE_MODE_FENCE;
-               } else {
-                       fence = qp->next_fence;
+               } else  {
+                       if (wr->send_flags & IB_SEND_FENCE) {
+                               if (qp->next_fence)
+                                       fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
+                               else
+                                       fence = MLX5_FENCE_MODE_FENCE;
+                       } else {
+                               fence = qp->next_fence;
+                       }
                }
 
                switch (ibqp->qp_type) {
index 89ec0f64abfc35b64ed5058c85a054e13f3dab26..084bb4baebb50a1fe061bd3fa2be0e2deed2ae89 100644 (file)
@@ -91,13 +91,15 @@ EXPORT_SYMBOL(rvt_check_ah);
  * rvt_create_ah - create an address handle
  * @pd: the protection domain
  * @ah_attr: the attributes of the AH
+ * @udata: pointer to user's input output buffer information.
  *
  * This may be called from interrupt context.
  *
  * Return: newly allocated ah
  */
 struct ib_ah *rvt_create_ah(struct ib_pd *pd,
-                           struct rdma_ah_attr *ah_attr)
+                           struct rdma_ah_attr *ah_attr,
+                           struct ib_udata *udata)
 {
        struct rvt_ah *ah;
        struct rvt_dev_info *dev = ib_to_rvt(pd->device);
index 16105af9918908b4d6d417513560c273080465fc..25271b48a6830c5b6b962095c8fee2b33503036c 100644 (file)
@@ -51,7 +51,8 @@
 #include <rdma/rdma_vt.h>
 
 struct ib_ah *rvt_create_ah(struct ib_pd *pd,
-                           struct rdma_ah_attr *ah_attr);
+                           struct rdma_ah_attr *ah_attr,
+                           struct ib_udata *udata);
 int rvt_destroy_ah(struct ib_ah *ibah);
 int rvt_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
 int rvt_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
index 946b623ba5eb7f693469c90fdbd32c7f76bd3b2e..4ff3d98fa6a4e2b6b127d973d6b0edf9063cd474 100644 (file)
@@ -1124,7 +1124,9 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
                                         IB_MR_CHECK_SIG_STATUS, &mr_status);
                if (ret) {
                        pr_err("ib_check_mr_status failed, ret %d\n", ret);
-                       goto err;
+                       /* Not a lot we can do, return ambiguous guard error */
+                       *sector = 0;
+                       return 0x1;
                }
 
                if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
@@ -1152,9 +1154,6 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
        }
 
        return 0;
-err:
-       /* Not alot we can do here, return ambiguous guard error */
-       return 0x1;
 }
 
 void iser_err_comp(struct ib_wc *wc, const char *type)
index d4b9db487b16fa3f9a87e4f5fd6732a8b4d9c9b4..cfc8b94527b97cda3f4b20782af0fbc2b6260f2b 100644 (file)
@@ -480,18 +480,18 @@ static const u8 xboxone_hori_init[] = {
 };
 
 /*
- * This packet is required for some of the PDP pads to start
+ * This packet is required for most (all?) of the PDP pads to start
  * sending input reports. These pads include: (0x0e6f:0x02ab),
- * (0x0e6f:0x02a4).
+ * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
  */
 static const u8 xboxone_pdp_init1[] = {
        0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
 };
 
 /*
- * This packet is required for some of the PDP pads to start
+ * This packet is required for most (all?) of the PDP pads to start
  * sending input reports. These pads include: (0x0e6f:0x02ab),
- * (0x0e6f:0x02a4).
+ * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
  */
 static const u8 xboxone_pdp_init2[] = {
        0x06, 0x20, 0x00, 0x02, 0x01, 0x00
@@ -527,12 +527,8 @@ static const struct xboxone_init_packet xboxone_init_packets[] = {
        XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
        XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
        XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
-       XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1),
-       XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2),
-       XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1),
-       XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2),
-       XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1),
-       XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2),
+       XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
+       XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
        XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
        XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
        XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
index 7e75835e220f29f2140f26d50ab0403e774f16dd..850bb259c20ebbba2c68f7dd88c6cfb83c9192a1 100644 (file)
@@ -841,7 +841,7 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra
        if (param[0] != 3) {
                param[0] = 2;
                if (ps2_command(ps2dev, param, ATKBD_CMD_SSCANSET))
-               return 2;
+                       return 2;
        }
 
        ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MBR);
index 81be6f781f0b60207088114e0b72b6e0e12d1b14..d5600118159835321c3c55b1d10e1cf4468cc22e 100644 (file)
@@ -493,7 +493,8 @@ static int cros_ec_keyb_register_bs(struct cros_ec_keyb *ckdev)
        for (i = 0; i < ARRAY_SIZE(cros_ec_keyb_bs); i++) {
                const struct cros_ec_bs_map *map = &cros_ec_keyb_bs[i];
 
-               if (buttons & BIT(map->bit))
+               if ((map->ev_type == EV_KEY && (buttons & BIT(map->bit))) ||
+                   (map->ev_type == EV_SW && (switches & BIT(map->bit))))
                        input_set_capability(idev, map->ev_type, map->code);
        }
 
index f51ae09596ef25942ff6bab030be9697c3a0eca7..403452ef00e6f257d67ca44bdf5626b0e5cc53a4 100644 (file)
@@ -407,7 +407,7 @@ matrix_keypad_parse_dt(struct device *dev)
        struct matrix_keypad_platform_data *pdata;
        struct device_node *np = dev->of_node;
        unsigned int *gpios;
-       int i, nrow, ncol;
+       int ret, i, nrow, ncol;
 
        if (!np) {
                dev_err(dev, "device lacks DT data\n");
@@ -452,12 +452,19 @@ matrix_keypad_parse_dt(struct device *dev)
                return ERR_PTR(-ENOMEM);
        }
 
-       for (i = 0; i < pdata->num_row_gpios; i++)
-               gpios[i] = of_get_named_gpio(np, "row-gpios", i);
+       for (i = 0; i < nrow; i++) {
+               ret = of_get_named_gpio(np, "row-gpios", i);
+               if (ret < 0)
+                       return ERR_PTR(ret);
+               gpios[i] = ret;
+       }
 
-       for (i = 0; i < pdata->num_col_gpios; i++)
-               gpios[pdata->num_row_gpios + i] =
-                       of_get_named_gpio(np, "col-gpios", i);
+       for (i = 0; i < ncol; i++) {
+               ret = of_get_named_gpio(np, "col-gpios", i);
+               if (ret < 0)
+                       return ERR_PTR(ret);
+               gpios[nrow + i] = ret;
+       }
 
        pdata->row_gpios = gpios;
        pdata->col_gpios = &gpios[pdata->num_row_gpios];
@@ -484,10 +491,8 @@ static int matrix_keypad_probe(struct platform_device *pdev)
        pdata = dev_get_platdata(&pdev->dev);
        if (!pdata) {
                pdata = matrix_keypad_parse_dt(&pdev->dev);
-               if (IS_ERR(pdata)) {
-                       dev_err(&pdev->dev, "no platform data defined\n");
+               if (IS_ERR(pdata))
                        return PTR_ERR(pdata);
-               }
        } else if (!pdata->keymap_data) {
                dev_err(&pdev->dev, "no keymap data defined\n");
                return -EINVAL;
index 46406345742b97c06595ab7e3b785ee2a6daca1b..a7dc286f406c992ebd55d764808691101dfff690 100644 (file)
 
 /* OMAP4 values */
 #define OMAP4_VAL_IRQDISABLE           0x0
-#define OMAP4_VAL_DEBOUNCINGTIME       0x7
-#define OMAP4_VAL_PVT                  0x7
+
+/*
+ * Errata i689: If a key is released for a time shorter than debounce time,
+ * the keyboard will idle and never detect the key release. The workaround
+ * is to use at least a 12ms debounce time. See omap5432 TRM chapter
+ * "26.4.6.2 Keyboard Controller Timer" for more information.
+ */
+#define OMAP4_KEYPAD_PTV_DIV_128        0x6
+#define OMAP4_KEYPAD_DEBOUNCINGTIME_MS(dbms, ptv)     \
+       ((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1)
+#define OMAP4_VAL_DEBOUNCINGTIME_16MS                                  \
+       OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128)
 
 enum {
        KBD_REVISION_OMAP4 = 0,
@@ -181,9 +191,9 @@ static int omap4_keypad_open(struct input_dev *input)
 
        kbd_writel(keypad_data, OMAP4_KBD_CTRL,
                        OMAP4_DEF_CTRL_NOSOFTMODE |
-                       (OMAP4_VAL_PVT << OMAP4_DEF_CTRL_PTV_SHIFT));
+                       (OMAP4_KEYPAD_PTV_DIV_128 << OMAP4_DEF_CTRL_PTV_SHIFT));
        kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME,
-                       OMAP4_VAL_DEBOUNCINGTIME);
+                       OMAP4_VAL_DEBOUNCINGTIME_16MS);
        /* clear pending interrupts */
        kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
                         kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
index b0f9d19b3410ae1867e1c134b30f8ccb8a1e5bd3..a94b6494e71a5b724b261ef31987c52a4c5523c6 100644 (file)
@@ -1348,6 +1348,9 @@ static const struct acpi_device_id elan_acpi_id[] = {
        { "ELAN0618", 0 },
        { "ELAN061C", 0 },
        { "ELAN061D", 0 },
+       { "ELAN061E", 0 },
+       { "ELAN0620", 0 },
+       { "ELAN0621", 0 },
        { "ELAN0622", 0 },
        { "ELAN1000", 0 },
        { }
index 5e85f3cca867dc23feb5e5753c86addfb06ddaf9..2bd5bb11c8baec85bb9422dbfb0612e6bfed77f2 100644 (file)
@@ -170,6 +170,7 @@ static const char * const smbus_pnp_ids[] = {
        "LEN0048", /* X1 Carbon 3 */
        "LEN0046", /* X250 */
        "LEN004a", /* W541 */
+       "LEN005b", /* P50 */
        "LEN0071", /* T480 */
        "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
        "LEN0073", /* X1 Carbon G5 (Elantech) */
@@ -177,6 +178,7 @@ static const char * const smbus_pnp_ids[] = {
        "LEN0096", /* X280 */
        "LEN0097", /* X280 -> ALPS trackpoint */
        "LEN200f", /* T450s */
+       "SYN3221", /* HP 15-ay000 */
        NULL
 };
 
index 47a0e81a2989c93389e2affd20414d17057cde79..a8b9be3e28db709ef8769e29da2684c3d1e3bcf9 100644 (file)
@@ -177,7 +177,7 @@ static void hv_kbd_on_receive(struct hv_device *hv_dev,
                 * state because the Enter-UP can trigger a wakeup at once.
                 */
                if (!(info & IS_BREAK))
-                       pm_wakeup_event(&hv_dev->device, 0);
+                       pm_wakeup_hard_event(&hv_dev->device);
 
                break;
 
index 02fb119858197b34fc9fe4e9a50fc18bc3e98957..42d3fd7e04d7c1a7f29fb0689fe5b597ef070692 100644 (file)
@@ -1,23 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Touch Screen driver for Renesas MIGO-R Platform
  *
  * Copyright (c) 2008 Magnus Damm
  * Copyright (c) 2007 Ujjwal Pande <ujjwal@kenati.com>,
  *  Kenati Technologies Pvt Ltd.
- *
- * This file is free software; you can redistribute it and/or
- * modify it under the terms of the GNU  General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This file is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- *  General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 #include <linux/module.h>
 #include <linux/kernel.h>
index b71673911aac303bbc61e34c1375582c6e7dbddf..11ff32c6802506a8deedfe460010c9974a046de6 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * ST1232 Touchscreen Controller Driver
  *
@@ -7,15 +8,6 @@
  * Using code from:
  *  - android.git.kernel.org: projects/kernel/common.git: synaptics_i2c_rmi.c
  *     Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <linux/delay.h>
@@ -295,4 +287,4 @@ module_i2c_driver(st1232_ts_driver);
 
 MODULE_AUTHOR("Tony SIM <chinyeow.sim.xt@renesas.com>");
 MODULE_DESCRIPTION("SITRONIX ST1232 Touchscreen Controller Driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
index bb2cd29e165885d1697e4d77614a42333c5457dc..d8f7000a466aa4d2d3d79874428738fec70a4324 100644 (file)
@@ -797,7 +797,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
        entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
        memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
                    &entry, sizeof(entry));
-       entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL;
+       entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
+                (BIT_ULL(52)-1)) & ~7ULL;
        memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
                    &entry, sizeof(entry));
        writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
index f3ccf025108b4c377f2f9bc6de80e8222b817a01..41a4b8808802b8bcc30106c37b748eb3507943c0 100644 (file)
@@ -3075,7 +3075,7 @@ static int copy_context_table(struct intel_iommu *iommu,
                        }
 
                        if (old_ce)
-                               iounmap(old_ce);
+                               memunmap(old_ce);
 
                        ret = 0;
                        if (devfn < 0x80)
index db301efe126d4ac9cf2aeb606489565135db7e7d..88715090752670a5701cde52f8a4ce11baec1da4 100644 (file)
@@ -595,7 +595,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                        pr_err("%s: Page request without PASID: %08llx %08llx\n",
                               iommu->name, ((unsigned long long *)req)[0],
                               ((unsigned long long *)req)[1]);
-                       goto bad_req;
+                       goto no_pasid;
                }
 
                if (!svm || svm->pasid != req->pasid) {
index b98a031895803a16b8e2d9d0df0a4b1686789aef..ddf3a492e1d59c1fd867f12881e6c3391038cf0f 100644 (file)
@@ -498,6 +498,9 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
 
 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
 {
+       if (!domain->mmu)
+               return;
+
        /*
         * Disable the context. Flush the TLB as required when modifying the
         * context registers.
index 566d69a2edbc723b8370483a39bf66ca60aaef62..add4c9c934c8abda564b25904dc7b9f479ca7afc 100644 (file)
@@ -384,9 +384,9 @@ static int mvebu_sei_probe(struct platform_device *pdev)
 
        sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        sei->base = devm_ioremap_resource(sei->dev, sei->res);
-       if (!sei->base) {
+       if (IS_ERR(sei->base)) {
                dev_err(sei->dev, "Failed to remap SEI resource\n");
-               return -ENODEV;
+               return PTR_ERR(sei->base);
        }
 
        /* Retrieve the SEI capabilities with the interrupt ranges */
index b05022f94f18c22ff2c72fa71cf11984077152b7..072bb5e36c184e6945624ec07dc27595fe8e7a7d 100644 (file)
@@ -718,8 +718,7 @@ l1oip_socket_thread(void *data)
                printk(KERN_DEBUG "%s: socket created and open\n",
                       __func__);
        while (!signal_pending(current)) {
-               iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1,
-                               recvbuf_size);
+               iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, recvbuf_size);
                recvlen = sock_recvmsg(socket, &msg, 0);
                if (recvlen > 0) {
                        l1oip_socket_parse(hc, &sin_rx, recvbuf, recvlen);
index ce7acd115dd8da7578b4fc8d3fc3733692d90743..1870cf87afe1ef7993b6e2cda9a28aeede31f268 100644 (file)
@@ -75,8 +75,6 @@ static void pattern_trig_timer_function(struct timer_list *t)
 {
        struct pattern_trig_data *data = from_timer(data, t, timer);
 
-       mutex_lock(&data->lock);
-
        for (;;) {
                if (!data->is_indefinite && !data->repeat)
                        break;
@@ -87,9 +85,10 @@ static void pattern_trig_timer_function(struct timer_list *t)
                                           data->curr->brightness);
                        mod_timer(&data->timer,
                                  jiffies + msecs_to_jiffies(data->curr->delta_t));
-
-                       /* Skip the tuple with zero duration */
-                       pattern_trig_update_patterns(data);
+                       if (!data->next->delta_t) {
+                               /* Skip the tuple with zero duration */
+                               pattern_trig_update_patterns(data);
+                       }
                        /* Select next tuple */
                        pattern_trig_update_patterns(data);
                } else {
@@ -116,8 +115,6 @@ static void pattern_trig_timer_function(struct timer_list *t)
 
                break;
        }
-
-       mutex_unlock(&data->lock);
 }
 
 static int pattern_trig_start_pattern(struct led_classdev *led_cdev)
@@ -176,14 +173,10 @@ static ssize_t repeat_store(struct device *dev, struct device_attribute *attr,
        if (res < -1 || res == 0)
                return -EINVAL;
 
-       /*
-        * Clear previous patterns' performence firstly, and remove the timer
-        * without mutex lock to avoid dead lock.
-        */
-       del_timer_sync(&data->timer);
-
        mutex_lock(&data->lock);
 
+       del_timer_sync(&data->timer);
+
        if (data->is_hw_pattern)
                led_cdev->pattern_clear(led_cdev);
 
@@ -234,14 +227,10 @@ static ssize_t pattern_trig_store_patterns(struct led_classdev *led_cdev,
        struct pattern_trig_data *data = led_cdev->trigger_data;
        int ccount, cr, offset = 0, err = 0;
 
-       /*
-        * Clear previous patterns' performence firstly, and remove the timer
-        * without mutex lock to avoid dead lock.
-        */
-       del_timer_sync(&data->timer);
-
        mutex_lock(&data->lock);
 
+       del_timer_sync(&data->timer);
+
        if (data->is_hw_pattern)
                led_cdev->pattern_clear(led_cdev);
 
index f3fb5bb8c82a1cfe861aef1af22b581709815817..ac1cffd2a09b05f5f5217e579c9e87ea80efce84 100644 (file)
@@ -542,7 +542,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
                    !discard_bio)
                        continue;
                bio_chain(discard_bio, bio);
-               bio_clone_blkg_association(discard_bio, bio);
+               bio_clone_blkcg_association(discard_bio, bio);
                if (mddev->gendisk)
                        trace_block_bio_remap(bdev_get_queue(rdev->bdev),
                                discard_bio, disk_devt(mddev->gendisk),
index 31d1f4ab915ea7e07f8843d1feb99217409a1af4..65a933a21e685b9c97fadc7662e34ad31c088798 100644 (file)
@@ -807,7 +807,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
        }
 
        if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ) {
-               dprintk(1, "%s: transmit queue full\n", __func__);
+               dprintk(2, "%s: transmit queue full\n", __func__);
                return -EBUSY;
        }
 
@@ -1180,6 +1180,8 @@ static int cec_config_log_addr(struct cec_adapter *adap,
 {
        struct cec_log_addrs *las = &adap->log_addrs;
        struct cec_msg msg = { };
+       const unsigned int max_retries = 2;
+       unsigned int i;
        int err;
 
        if (cec_has_log_addr(adap, log_addr))
@@ -1188,19 +1190,44 @@ static int cec_config_log_addr(struct cec_adapter *adap,
        /* Send poll message */
        msg.len = 1;
        msg.msg[0] = (log_addr << 4) | log_addr;
-       err = cec_transmit_msg_fh(adap, &msg, NULL, true);
 
-       /*
-        * While trying to poll the physical address was reset
-        * and the adapter was unconfigured, so bail out.
-        */
-       if (!adap->is_configuring)
-               return -EINTR;
+       for (i = 0; i < max_retries; i++) {
+               err = cec_transmit_msg_fh(adap, &msg, NULL, true);
 
-       if (err)
-               return err;
+               /*
+                * While trying to poll the physical address was reset
+                * and the adapter was unconfigured, so bail out.
+                */
+               if (!adap->is_configuring)
+                       return -EINTR;
+
+               if (err)
+                       return err;
 
-       if (msg.tx_status & CEC_TX_STATUS_OK)
+               /*
+                * The message was aborted due to a disconnect or
+                * unconfigure, just bail out.
+                */
+               if (msg.tx_status & CEC_TX_STATUS_ABORTED)
+                       return -EINTR;
+               if (msg.tx_status & CEC_TX_STATUS_OK)
+                       return 0;
+               if (msg.tx_status & CEC_TX_STATUS_NACK)
+                       break;
+               /*
+                * Retry up to max_retries times if the message was neither
+                * OKed or NACKed. This can happen due to e.g. a Lost
+                * Arbitration condition.
+                */
+       }
+
+       /*
+        * If we are unable to get an OK or a NACK after max_retries attempts
+        * (and note that each attempt already consists of four polls), then
+        * then we assume that something is really weird and that it is not a
+        * good idea to try and claim this logical address.
+        */
+       if (i == max_retries)
                return 0;
 
        /*
index 6d4b2eec67b4fdde3fb78f7c8da0d2bb9531b22b..29836c1a40e987985937f89dc7574ca6205a489e 100644 (file)
@@ -80,8 +80,8 @@ struct dvb_pll_desc {
 
 static const struct dvb_pll_desc dvb_pll_thomson_dtt7579 = {
        .name  = "Thomson dtt7579",
-       .min   = 177000000,
-       .max   = 858000000,
+       .min   = 177 * MHz,
+       .max   = 858 * MHz,
        .iffreq= 36166667,
        .sleepdata = (u8[]){ 2, 0xb4, 0x03 },
        .count = 4,
@@ -102,8 +102,8 @@ static void thomson_dtt759x_bw(struct dvb_frontend *fe, u8 *buf)
 
 static const struct dvb_pll_desc dvb_pll_thomson_dtt759x = {
        .name  = "Thomson dtt759x",
-       .min   = 177000000,
-       .max   = 896000000,
+       .min   = 177 * MHz,
+       .max   = 896 * MHz,
        .set   = thomson_dtt759x_bw,
        .iffreq= 36166667,
        .sleepdata = (u8[]){ 2, 0x84, 0x03 },
@@ -126,8 +126,8 @@ static void thomson_dtt7520x_bw(struct dvb_frontend *fe, u8 *buf)
 
 static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = {
        .name  = "Thomson dtt7520x",
-       .min   = 185000000,
-       .max   = 900000000,
+       .min   = 185 * MHz,
+       .max   = 900 * MHz,
        .set   = thomson_dtt7520x_bw,
        .iffreq = 36166667,
        .count = 7,
@@ -144,8 +144,8 @@ static const struct dvb_pll_desc dvb_pll_thomson_dtt7520x = {
 
 static const struct dvb_pll_desc dvb_pll_lg_z201 = {
        .name  = "LG z201",
-       .min   = 174000000,
-       .max   = 862000000,
+       .min   = 174 * MHz,
+       .max   = 862 * MHz,
        .iffreq= 36166667,
        .sleepdata = (u8[]){ 2, 0xbc, 0x03 },
        .count = 5,
@@ -160,8 +160,8 @@ static const struct dvb_pll_desc dvb_pll_lg_z201 = {
 
 static const struct dvb_pll_desc dvb_pll_unknown_1 = {
        .name  = "unknown 1", /* used by dntv live dvb-t */
-       .min   = 174000000,
-       .max   = 862000000,
+       .min   = 174 * MHz,
+       .max   = 862 * MHz,
        .iffreq= 36166667,
        .count = 9,
        .entries = {
@@ -182,8 +182,8 @@ static const struct dvb_pll_desc dvb_pll_unknown_1 = {
  */
 static const struct dvb_pll_desc dvb_pll_tua6010xs = {
        .name  = "Infineon TUA6010XS",
-       .min   =  44250000,
-       .max   = 858000000,
+       .min   = 44250 * kHz,
+       .max   = 858 * MHz,
        .iffreq= 36125000,
        .count = 3,
        .entries = {
@@ -196,8 +196,8 @@ static const struct dvb_pll_desc dvb_pll_tua6010xs = {
 /* Panasonic env57h1xd5 (some Philips PLL ?) */
 static const struct dvb_pll_desc dvb_pll_env57h1xd5 = {
        .name  = "Panasonic ENV57H1XD5",
-       .min   =  44250000,
-       .max   = 858000000,
+       .min   = 44250 * kHz,
+       .max   = 858 * MHz,
        .iffreq= 36125000,
        .count = 4,
        .entries = {
@@ -220,8 +220,8 @@ static void tda665x_bw(struct dvb_frontend *fe, u8 *buf)
 
 static const struct dvb_pll_desc dvb_pll_tda665x = {
        .name  = "Philips TDA6650/TDA6651",
-       .min   =  44250000,
-       .max   = 858000000,
+       .min   = 44250 * kHz,
+       .max   = 858 * MHz,
        .set   = tda665x_bw,
        .iffreq= 36166667,
        .initdata = (u8[]){ 4, 0x0b, 0xf5, 0x85, 0xab },
@@ -254,8 +254,8 @@ static void tua6034_bw(struct dvb_frontend *fe, u8 *buf)
 
 static const struct dvb_pll_desc dvb_pll_tua6034 = {
        .name  = "Infineon TUA6034",
-       .min   =  44250000,
-       .max   = 858000000,
+       .min   = 44250 * kHz,
+       .max   = 858 * MHz,
        .iffreq= 36166667,
        .count = 3,
        .set   = tua6034_bw,
@@ -278,8 +278,8 @@ static void tded4_bw(struct dvb_frontend *fe, u8 *buf)
 
 static const struct dvb_pll_desc dvb_pll_tded4 = {
        .name = "ALPS TDED4",
-       .min = 47000000,
-       .max = 863000000,
+       .min =  47 * MHz,
+       .max = 863 * MHz,
        .iffreq= 36166667,
        .set   = tded4_bw,
        .count = 4,
@@ -296,8 +296,8 @@ static const struct dvb_pll_desc dvb_pll_tded4 = {
  */
 static const struct dvb_pll_desc dvb_pll_tdhu2 = {
        .name = "ALPS TDHU2",
-       .min = 54000000,
-       .max = 864000000,
+       .min =  54 * MHz,
+       .max = 864 * MHz,
        .iffreq= 44000000,
        .count = 4,
        .entries = {
@@ -313,8 +313,8 @@ static const struct dvb_pll_desc dvb_pll_tdhu2 = {
  */
 static const struct dvb_pll_desc dvb_pll_samsung_tbmv = {
        .name = "Samsung TBMV30111IN / TBMV30712IN1",
-       .min = 54000000,
-       .max = 860000000,
+       .min =  54 * MHz,
+       .max = 860 * MHz,
        .iffreq= 44000000,
        .count = 6,
        .entries = {
@@ -332,8 +332,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmv = {
  */
 static const struct dvb_pll_desc dvb_pll_philips_sd1878_tda8261 = {
        .name  = "Philips SD1878",
-       .min   =  950000,
-       .max   = 2150000,
+       .min   =  950 * MHz,
+       .max   = 2150 * MHz,
        .iffreq= 249, /* zero-IF, offset 249 is to round up */
        .count = 4,
        .entries = {
@@ -398,8 +398,8 @@ static void opera1_bw(struct dvb_frontend *fe, u8 *buf)
 
 static const struct dvb_pll_desc dvb_pll_opera1 = {
        .name  = "Opera Tuner",
-       .min   =  900000,
-       .max   = 2250000,
+       .min   =  900 * MHz,
+       .max   = 2250 * MHz,
        .initdata = (u8[]){ 4, 0x08, 0xe5, 0xe1, 0x00 },
        .initdata2 = (u8[]){ 4, 0x08, 0xe5, 0xe5, 0x00 },
        .iffreq= 0,
@@ -445,8 +445,8 @@ static void samsung_dtos403ih102a_set(struct dvb_frontend *fe, u8 *buf)
 /* unknown pll used in Samsung DTOS403IH102A DVB-C tuner */
 static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = {
        .name   = "Samsung DTOS403IH102A",
-       .min    =  44250000,
-       .max    = 858000000,
+       .min    = 44250 * kHz,
+       .max    = 858 * MHz,
        .iffreq =  36125000,
        .count  = 8,
        .set    = samsung_dtos403ih102a_set,
@@ -465,8 +465,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_dtos403ih102a = {
 /* Samsung TDTC9251DH0 DVB-T NIM, as used on AirStar 2 */
 static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = {
        .name   = "Samsung TDTC9251DH0",
-       .min    =  48000000,
-       .max    = 863000000,
+       .min    =  48 * MHz,
+       .max    = 863 * MHz,
        .iffreq =  36166667,
        .count  = 3,
        .entries = {
@@ -479,8 +479,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tdtc9251dh0 = {
 /* Samsung TBDU18132 DVB-S NIM with TSA5059 PLL, used in SkyStar2 DVB-S 2.3 */
 static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = {
        .name = "Samsung TBDU18132",
-       .min    =  950000,
-       .max    = 2150000, /* guesses */
+       .min    =  950 * MHz,
+       .max    = 2150 * MHz, /* guesses */
        .iffreq = 0,
        .count = 2,
        .entries = {
@@ -500,8 +500,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbdu18132 = {
 /* Samsung TBMU24112 DVB-S NIM with SL1935 zero-IF tuner */
 static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = {
        .name = "Samsung TBMU24112",
-       .min    =  950000,
-       .max    = 2150000, /* guesses */
+       .min    =  950 * MHz,
+       .max    = 2150 * MHz, /* guesses */
        .iffreq = 0,
        .count = 2,
        .entries = {
@@ -521,8 +521,8 @@ static const struct dvb_pll_desc dvb_pll_samsung_tbmu24112 = {
  * 822 - 862   1  *  0   0   1   0   0   0   0x88 */
 static const struct dvb_pll_desc dvb_pll_alps_tdee4 = {
        .name = "ALPS TDEE4",
-       .min    =  47000000,
-       .max    = 862000000,
+       .min    =  47 * MHz,
+       .max    = 862 * MHz,
        .iffreq =  36125000,
        .count = 4,
        .entries = {
@@ -537,8 +537,8 @@ static const struct dvb_pll_desc dvb_pll_alps_tdee4 = {
 /* CP cur. 50uA, AGC takeover: 103dBuV, PORT3 on */
 static const struct dvb_pll_desc dvb_pll_tua6034_friio = {
        .name   = "Infineon TUA6034 ISDB-T (Friio)",
-       .min    =  90000000,
-       .max    = 770000000,
+       .min    =  90 * MHz,
+       .max    = 770 * MHz,
        .iffreq =  57000000,
        .initdata = (u8[]){ 4, 0x9a, 0x50, 0xb2, 0x08 },
        .sleepdata = (u8[]){ 4, 0x9a, 0x70, 0xb3, 0x0b },
@@ -553,8 +553,8 @@ static const struct dvb_pll_desc dvb_pll_tua6034_friio = {
 /* Philips TDA6651 ISDB-T, used in Earthsoft PT1 */
 static const struct dvb_pll_desc dvb_pll_tda665x_earth_pt1 = {
        .name   = "Philips TDA6651 ISDB-T (EarthSoft PT1)",
-       .min    =  90000000,
-       .max    = 770000000,
+       .min    =  90 * MHz,
+       .max    = 770 * MHz,
        .iffreq =  57000000,
        .initdata = (u8[]){ 5, 0x0e, 0x7f, 0xc1, 0x80, 0x80 },
        .count = 10,
@@ -610,9 +610,6 @@ static int dvb_pll_configure(struct dvb_frontend *fe, u8 *buf,
        u32 div;
        int i;
 
-       if (frequency && (frequency < desc->min || frequency > desc->max))
-               return -EINVAL;
-
        for (i = 0; i < desc->count; i++) {
                if (frequency > desc->entries[i].limit)
                        continue;
@@ -799,7 +796,6 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
        struct dvb_pll_priv *priv = NULL;
        int ret;
        const struct dvb_pll_desc *desc;
-       struct dtv_frontend_properties *c = &fe->dtv_property_cache;
 
        b1 = kmalloc(1, GFP_KERNEL);
        if (!b1)
@@ -845,18 +841,12 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
 
        strncpy(fe->ops.tuner_ops.info.name, desc->name,
                sizeof(fe->ops.tuner_ops.info.name));
-       switch (c->delivery_system) {
-       case SYS_DVBS:
-       case SYS_DVBS2:
-       case SYS_TURBO:
-       case SYS_ISDBS:
-               fe->ops.tuner_ops.info.frequency_min_hz = desc->min * kHz;
-               fe->ops.tuner_ops.info.frequency_max_hz = desc->max * kHz;
-               break;
-       default:
-               fe->ops.tuner_ops.info.frequency_min_hz = desc->min;
-               fe->ops.tuner_ops.info.frequency_max_hz = desc->max;
-       }
+
+       fe->ops.tuner_ops.info.frequency_min_hz = desc->min;
+       fe->ops.tuner_ops.info.frequency_max_hz = desc->max;
+
+       dprintk("%s tuner, frequency range: %u...%u\n",
+               desc->name, desc->min, desc->max);
 
        if (!desc->initdata)
                fe->ops.tuner_ops.init = NULL;
index ca5d92942820a2fcb439ec1e7d277dd445214276..41d470d9ca943ea84653f9dd8bfb0ab83dafe1a4 100644 (file)
@@ -1918,7 +1918,6 @@ static int tc358743_probe_of(struct tc358743_state *state)
        ret = v4l2_fwnode_endpoint_alloc_parse(of_fwnode_handle(ep), &endpoint);
        if (ret) {
                dev_err(dev, "failed to parse endpoint\n");
-               ret = ret;
                goto put_node;
        }
 
index 4e9db1fed69711f731089472799ac47d85b4ab94..c71a34ae6383c65651bd08baa8de0e5e59e59e9c 100644 (file)
@@ -238,6 +238,9 @@ static const struct file_operations request_fops = {
        .owner = THIS_MODULE,
        .poll = media_request_poll,
        .unlocked_ioctl = media_request_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl = media_request_ioctl,
+#endif /* CONFIG_COMPAT */
        .release = media_request_close,
 };
 
index 452eb9b42140bb927e7af8287dcbe99b45ef51dd..447baaebca4486c4b5c3b8c5dc261ec3e4cd2ce8 100644 (file)
@@ -1844,14 +1844,12 @@ fail_mutex_destroy:
 static void cio2_pci_remove(struct pci_dev *pci_dev)
 {
        struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
-       unsigned int i;
 
+       media_device_unregister(&cio2->media_dev);
        cio2_notifier_exit(cio2);
+       cio2_queues_exit(cio2);
        cio2_fbpt_exit_dummy(cio2);
-       for (i = 0; i < CIO2_QUEUES; i++)
-               cio2_queue_exit(cio2, &cio2->queue[i]);
        v4l2_device_unregister(&cio2->v4l2_dev);
-       media_device_unregister(&cio2->media_dev);
        media_device_cleanup(&cio2->media_dev);
        mutex_destroy(&cio2->lock);
 }
index 77fb7987b42f33cda57dc8b6627d4befc2d7a83e..13f2828d880df373ff494d10416ecb16be873bf7 100644 (file)
@@ -1587,6 +1587,8 @@ static void isp_pm_complete(struct device *dev)
 
 static void isp_unregister_entities(struct isp_device *isp)
 {
+       media_device_unregister(&isp->media_dev);
+
        omap3isp_csi2_unregister_entities(&isp->isp_csi2a);
        omap3isp_ccp2_unregister_entities(&isp->isp_ccp2);
        omap3isp_ccdc_unregister_entities(&isp->isp_ccdc);
@@ -1597,7 +1599,6 @@ static void isp_unregister_entities(struct isp_device *isp)
        omap3isp_stat_unregister_entities(&isp->isp_hist);
 
        v4l2_device_unregister(&isp->v4l2_dev);
-       media_device_unregister(&isp->media_dev);
        media_device_cleanup(&isp->media_dev);
 }
 
index 1eb9132bfc85fdad1ab68d81442fb435be0ac0f0..013cdebecbc49b5a899a91fb8455383f2f6df506 100644 (file)
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(debug, " activates debug info");
 #define MAX_WIDTH              4096U
 #define MIN_WIDTH              640U
 #define MAX_HEIGHT             2160U
-#define MIN_HEIGHT             480U
+#define MIN_HEIGHT             360U
 
 #define dprintk(dev, fmt, arg...) \
        v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
@@ -304,7 +304,8 @@ restart:
                for (; p < p_out + sz; p++) {
                        u32 copy;
 
-                       p = memchr(p, magic[ctx->comp_magic_cnt], sz);
+                       p = memchr(p, magic[ctx->comp_magic_cnt],
+                                  p_out + sz - p);
                        if (!p) {
                                ctx->comp_magic_cnt = 0;
                                break;
index af150a0395dfb55ef55bf606df4825ec5b807912..d82db738f174ef048d663d366b4950cd732c2b43 100644 (file)
@@ -1009,7 +1009,7 @@ static const struct v4l2_m2m_ops m2m_ops = {
 
 static const struct media_device_ops m2m_media_ops = {
        .req_validate = vb2_request_validate,
-       .req_queue = vb2_m2m_request_queue,
+       .req_queue = v4l2_m2m_request_queue,
 };
 
 static int vim2m_probe(struct platform_device *pdev)
index fce9d6f4b7c924c6b95dea6c59f792ac701b7031..3137f5d89d8030448e4cad4b912a6dca2caa11df 100644 (file)
@@ -426,10 +426,10 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
 
        /* append the packet to the frame buffer */
        if (len > 0) {
-               if (gspca_dev->image_len + len > gspca_dev->pixfmt.sizeimage) {
+               if (gspca_dev->image_len + len > PAGE_ALIGN(gspca_dev->pixfmt.sizeimage)) {
                        gspca_err(gspca_dev, "frame overflow %d > %d\n",
                                  gspca_dev->image_len + len,
-                                 gspca_dev->pixfmt.sizeimage);
+                                 PAGE_ALIGN(gspca_dev->pixfmt.sizeimage));
                        packet_type = DISCARD_PACKET;
                } else {
 /* !! image is NULL only when last pkt is LAST or DISCARD
@@ -1297,18 +1297,19 @@ static int gspca_queue_setup(struct vb2_queue *vq,
                             unsigned int sizes[], struct device *alloc_devs[])
 {
        struct gspca_dev *gspca_dev = vb2_get_drv_priv(vq);
+       unsigned int size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage);
 
        if (*nplanes)
-               return sizes[0] < gspca_dev->pixfmt.sizeimage ? -EINVAL : 0;
+               return sizes[0] < size ? -EINVAL : 0;
        *nplanes = 1;
-       sizes[0] = gspca_dev->pixfmt.sizeimage;
+       sizes[0] = size;
        return 0;
 }
 
 static int gspca_buffer_prepare(struct vb2_buffer *vb)
 {
        struct gspca_dev *gspca_dev = vb2_get_drv_priv(vb->vb2_queue);
-       unsigned long size = gspca_dev->pixfmt.sizeimage;
+       unsigned long size = PAGE_ALIGN(gspca_dev->pixfmt.sizeimage);
 
        if (vb2_plane_size(vb, 0) < size) {
                gspca_err(gspca_dev, "buffer too small (%lu < %lu)\n",
index 6e37950292cd9b832d589e1728fba7d28d469794..5f2b033a7a42f1cb35dd13e40a498232bd3a225d 100644 (file)
@@ -1664,6 +1664,11 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx,
                    p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME)
                        return -EINVAL;
 
+               if (p_mpeg2_slice_params->pad ||
+                   p_mpeg2_slice_params->picture.pad ||
+                   p_mpeg2_slice_params->sequence.pad)
+                       return -EINVAL;
+
                return 0;
 
        case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION:
index a3ef1f50a4b3496dcfbe7cb4332a3bab9a3acc56..481e3c65cf97a63202e1223b106c59b5e670651b 100644 (file)
@@ -193,6 +193,22 @@ int v4l2_event_pending(struct v4l2_fh *fh)
 }
 EXPORT_SYMBOL_GPL(v4l2_event_pending);
 
+static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
+{
+       struct v4l2_fh *fh = sev->fh;
+       unsigned int i;
+
+       lockdep_assert_held(&fh->subscribe_lock);
+       assert_spin_locked(&fh->vdev->fh_lock);
+
+       /* Remove any pending events for this subscription */
+       for (i = 0; i < sev->in_use; i++) {
+               list_del(&sev->events[sev_pos(sev, i)].list);
+               fh->navailable--;
+       }
+       list_del(&sev->list);
+}
+
 int v4l2_event_subscribe(struct v4l2_fh *fh,
                         const struct v4l2_event_subscription *sub, unsigned elems,
                         const struct v4l2_subscribed_event_ops *ops)
@@ -224,27 +240,23 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
 
        spin_lock_irqsave(&fh->vdev->fh_lock, flags);
        found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
+       if (!found_ev)
+               list_add(&sev->list, &fh->subscribed);
        spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
        if (found_ev) {
                /* Already listening */
                kvfree(sev);
-               goto out_unlock;
-       }
-
-       if (sev->ops && sev->ops->add) {
+       } else if (sev->ops && sev->ops->add) {
                ret = sev->ops->add(sev, elems);
                if (ret) {
+                       spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+                       __v4l2_event_unsubscribe(sev);
+                       spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
                        kvfree(sev);
-                       goto out_unlock;
                }
        }
 
-       spin_lock_irqsave(&fh->vdev->fh_lock, flags);
-       list_add(&sev->list, &fh->subscribed);
-       spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
-
-out_unlock:
        mutex_unlock(&fh->subscribe_lock);
 
        return ret;
@@ -279,7 +291,6 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
 {
        struct v4l2_subscribed_event *sev;
        unsigned long flags;
-       int i;
 
        if (sub->type == V4L2_EVENT_ALL) {
                v4l2_event_unsubscribe_all(fh);
@@ -291,14 +302,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
        spin_lock_irqsave(&fh->vdev->fh_lock, flags);
 
        sev = v4l2_event_subscribed(fh, sub->type, sub->id);
-       if (sev != NULL) {
-               /* Remove any pending events for this subscription */
-               for (i = 0; i < sev->in_use; i++) {
-                       list_del(&sev->events[sev_pos(sev, i)].list);
-                       fh->navailable--;
-               }
-               list_del(&sev->list);
-       }
+       if (sev != NULL)
+               __v4l2_event_unsubscribe(sev);
 
        spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
 
index d7806db222d83b87f39175fb53c06178d98b363e..1ed2465972acab4d0fae6aa4a8d28c966167d224 100644 (file)
@@ -953,7 +953,7 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
 }
 EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue);
 
-void vb2_m2m_request_queue(struct media_request *req)
+void v4l2_m2m_request_queue(struct media_request *req)
 {
        struct media_request_object *obj, *obj_safe;
        struct v4l2_m2m_ctx *m2m_ctx = NULL;
@@ -997,7 +997,7 @@ void vb2_m2m_request_queue(struct media_request *req)
        if (m2m_ctx)
                v4l2_m2m_try_schedule(m2m_ctx);
 }
-EXPORT_SYMBOL_GPL(vb2_m2m_request_queue);
+EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue);
 
 /* Videobuf2 ioctl helpers */
 
index 8f9d6964173ec7852072022f597e45ac5ca5d88d..b99a194ce5a4a2926d8eda42e610c64ab34c5edb 100644 (file)
@@ -263,6 +263,11 @@ static const struct file_operations fops = {
 #endif
 };
 
+static void cros_ec_class_release(struct device *dev)
+{
+       kfree(to_cros_ec_dev(dev));
+}
+
 static void cros_ec_sensors_register(struct cros_ec_dev *ec)
 {
        /*
@@ -395,7 +400,7 @@ static int ec_device_probe(struct platform_device *pdev)
        int retval = -ENOMEM;
        struct device *dev = &pdev->dev;
        struct cros_ec_platform *ec_platform = dev_get_platdata(dev);
-       struct cros_ec_dev *ec = devm_kzalloc(dev, sizeof(*ec), GFP_KERNEL);
+       struct cros_ec_dev *ec = kzalloc(sizeof(*ec), GFP_KERNEL);
 
        if (!ec)
                return retval;
@@ -417,6 +422,7 @@ static int ec_device_probe(struct platform_device *pdev)
        ec->class_dev.devt = MKDEV(ec_major, pdev->id);
        ec->class_dev.class = &cros_class;
        ec->class_dev.parent = dev;
+       ec->class_dev.release = cros_ec_class_release;
 
        retval = dev_set_name(&ec->class_dev, "%s", ec_platform->ec_name);
        if (retval) {
index b2a0340f277e268739c288e6bb73f62321475c30..d8e3cc2dc7470d8deaa3b89a5855b22b034725db 100644 (file)
@@ -132,7 +132,7 @@ static const struct of_device_id atmel_ssc_dt_ids[] = {
 MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids);
 #endif
 
-static inline const struct atmel_ssc_platform_data * __init
+static inline const struct atmel_ssc_platform_data *
        atmel_ssc_get_driver_data(struct platform_device *pdev)
 {
        if (pdev->dev.of_node) {
index 3370a4138e942621a008573b1fa1ad6912b1c75c..951c984de61ae9e85af22eb05e76199c5752baa6 100644 (file)
@@ -8,7 +8,9 @@ lkdtm-$(CONFIG_LKDTM)           += perms.o
 lkdtm-$(CONFIG_LKDTM)          += refcount.o
 lkdtm-$(CONFIG_LKDTM)          += rodata_objcopy.o
 lkdtm-$(CONFIG_LKDTM)          += usercopy.o
+lkdtm-$(CONFIG_LKDTM)          += stackleak.o
 
+KASAN_SANITIZE_stackleak.o     := n
 KCOV_INSTRUMENT_rodata.o       := n
 
 OBJCOPYFLAGS :=
index 5a755590d3dcefe85b0354c88bacc9dffd2e392d..2837dc77478ed43e9a8561c850c92879f010b07d 100644 (file)
@@ -184,6 +184,7 @@ static const struct crashtype crashtypes[] = {
        CRASHTYPE(USERCOPY_STACK_BEYOND),
        CRASHTYPE(USERCOPY_KERNEL),
        CRASHTYPE(USERCOPY_KERNEL_DS),
+       CRASHTYPE(STACKLEAK_ERASING),
 };
 
 
index 07db641d71d023bd2eb710873114261170cce079..3c6fd327e166a4c83dfef10f8d83209cd4f6f892 100644 (file)
@@ -84,4 +84,7 @@ void lkdtm_USERCOPY_STACK_BEYOND(void);
 void lkdtm_USERCOPY_KERNEL(void);
 void lkdtm_USERCOPY_KERNEL_DS(void);
 
+/* lkdtm_stackleak.c */
+void lkdtm_STACKLEAK_ERASING(void);
+
 #endif
diff --git a/drivers/misc/lkdtm/stackleak.c b/drivers/misc/lkdtm/stackleak.c
new file mode 100644 (file)
index 0000000..d5a0844
--- /dev/null
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This code tests that the current task stack is properly erased (filled
+ * with STACKLEAK_POISON).
+ *
+ * Authors:
+ *   Alexander Popov <alex.popov@linux.com>
+ *   Tycho Andersen <tycho@tycho.ws>
+ */
+
+#include "lkdtm.h"
+#include <linux/stackleak.h>
+
+void lkdtm_STACKLEAK_ERASING(void)
+{
+       unsigned long *sp, left, found, i;
+       const unsigned long check_depth =
+                       STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
+
+       /*
+        * For the details about the alignment of the poison values, see
+        * the comment in stackleak_track_stack().
+        */
+       sp = PTR_ALIGN(&i, sizeof(unsigned long));
+
+       left = ((unsigned long)sp & (THREAD_SIZE - 1)) / sizeof(unsigned long);
+       sp--;
+
+       /*
+        * One 'long int' at the bottom of the thread stack is reserved
+        * and not poisoned.
+        */
+       if (left > 1) {
+               left--;
+       } else {
+               pr_err("FAIL: not enough stack space for the test\n");
+               return;
+       }
+
+       pr_info("checking unused part of the thread stack (%lu bytes)...\n",
+                                       left * sizeof(unsigned long));
+
+       /*
+        * Search for 'check_depth' poison values in a row (just like
+        * stackleak_erase() does).
+        */
+       for (i = 0, found = 0; i < left && found <= check_depth; i++) {
+               if (*(sp - i) == STACKLEAK_POISON)
+                       found++;
+               else
+                       found = 0;
+       }
+
+       if (found <= check_depth) {
+               pr_err("FAIL: thread stack is not erased (checked %lu bytes)\n",
+                                               i * sizeof(unsigned long));
+               return;
+       }
+
+       pr_info("first %lu bytes are unpoisoned\n",
+                               (i - found) * sizeof(unsigned long));
+
+       /* The rest of thread stack should be erased */
+       for (; i < left; i++) {
+               if (*(sp - i) != STACKLEAK_POISON) {
+                       pr_err("FAIL: thread stack is NOT properly erased\n");
+                       return;
+               }
+       }
+
+       pr_info("OK: the rest of the thread stack is properly erased\n");
+       return;
+}
index c824329f7012adfc765e4b1872185f5b148fd445..0e4193cb08cf1ac9d3f095810c082e5c3298775b 100644 (file)
@@ -416,7 +416,7 @@ static int scif_create_remote_lookup(struct scif_dev *remote_dev,
                if (err)
                        goto error_window;
                err = scif_map_page(&window->num_pages_lookup.lookup[j],
-                                   vmalloc_dma_phys ?
+                                   vmalloc_num_pages ?
                                    vmalloc_to_page(&window->num_pages[i]) :
                                    virt_to_page(&window->num_pages[i]),
                                    remote_dev);
index 313da31502626897a61a65606aa117d0d67dc83b..1540a7785e14743ae1b035aeb21d391af8516050 100644 (file)
@@ -27,6 +27,9 @@
 #include <linux/delay.h>
 #include <linux/bitops.h>
 #include <asm/uv/uv_hub.h>
+
+#include <linux/nospec.h>
+
 #include "gru.h"
 #include "grutables.h"
 #include "gruhandles.h"
@@ -196,6 +199,7 @@ int gru_dump_chiplet_request(unsigned long arg)
        /* Currently, only dump by gid is implemented */
        if (req.gid >= gru_max_gids)
                return -EINVAL;
+       req.gid = array_index_nospec(req.gid, gru_max_gids);
 
        gru = GID_TO_GRU(req.gid);
        ubuf = req.buf;
index bd52f29b4a4e273eb6f69a56c34a0789279d2226..264f4ed8eef26e8f839d121d905497c3c75de67c 100644 (file)
@@ -3030,7 +3030,7 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
        if (!qpair || !buf)
                return VMCI_ERROR_INVALID_ARGS;
 
-       iov_iter_kvec(&from, WRITE | ITER_KVEC, &v, 1, buf_size);
+       iov_iter_kvec(&from, WRITE, &v, 1, buf_size);
 
        qp_lock(qpair);
 
@@ -3074,7 +3074,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
        if (!qpair || !buf)
                return VMCI_ERROR_INVALID_ARGS;
 
-       iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size);
+       iov_iter_kvec(&to, READ, &v, 1, buf_size);
 
        qp_lock(qpair);
 
@@ -3119,7 +3119,7 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
        if (!qpair || !buf)
                return VMCI_ERROR_INVALID_ARGS;
 
-       iov_iter_kvec(&to, READ | ITER_KVEC, &v, 1, buf_size);
+       iov_iter_kvec(&to, READ, &v, 1, buf_size);
 
        qp_lock(qpair);
 
index 7bfd366d970dae374bae6acd36624eb23f719506..c4115bae5db187f1a331efd5c495b20bc31a5c86 100644 (file)
@@ -12,6 +12,7 @@
  *     - JMicron (hardware and technical support)
  */
 
+#include <linux/bitfield.h>
 #include <linux/string.h>
 #include <linux/delay.h>
 #include <linux/highmem.h>
@@ -462,6 +463,9 @@ struct intel_host {
        u32     dsm_fns;
        int     drv_strength;
        bool    d3_retune;
+       bool    rpm_retune_ok;
+       u32     glk_rx_ctrl1;
+       u32     glk_tun_val;
 };
 
 static const guid_t intel_dsm_guid =
@@ -791,6 +795,77 @@ cleanup:
        return ret;
 }
 
+#ifdef CONFIG_PM
+#define GLK_RX_CTRL1   0x834
+#define GLK_TUN_VAL    0x840
+#define GLK_PATH_PLL   GENMASK(13, 8)
+#define GLK_DLY                GENMASK(6, 0)
+/* Workaround firmware failing to restore the tuning value */
+static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp)
+{
+       struct sdhci_pci_slot *slot = chip->slots[0];
+       struct intel_host *intel_host = sdhci_pci_priv(slot);
+       struct sdhci_host *host = slot->host;
+       u32 glk_rx_ctrl1;
+       u32 glk_tun_val;
+       u32 dly;
+
+       if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc))
+               return;
+
+       glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1);
+       glk_tun_val = sdhci_readl(host, GLK_TUN_VAL);
+
+       if (susp) {
+               intel_host->glk_rx_ctrl1 = glk_rx_ctrl1;
+               intel_host->glk_tun_val = glk_tun_val;
+               return;
+       }
+
+       if (!intel_host->glk_tun_val)
+               return;
+
+       if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) {
+               intel_host->rpm_retune_ok = true;
+               return;
+       }
+
+       dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) +
+                                 (intel_host->glk_tun_val << 1));
+       if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1))
+               return;
+
+       glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly;
+       sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1);
+
+       intel_host->rpm_retune_ok = true;
+       chip->rpm_retune = true;
+       mmc_retune_needed(host->mmc);
+       pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc));
+}
+
+static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp)
+{
+       if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC &&
+           !chip->rpm_retune)
+               glk_rpm_retune_wa(chip, susp);
+}
+
+static int glk_runtime_suspend(struct sdhci_pci_chip *chip)
+{
+       glk_rpm_retune_chk(chip, true);
+
+       return sdhci_cqhci_runtime_suspend(chip);
+}
+
+static int glk_runtime_resume(struct sdhci_pci_chip *chip)
+{
+       glk_rpm_retune_chk(chip, false);
+
+       return sdhci_cqhci_runtime_resume(chip);
+}
+#endif
+
 #ifdef CONFIG_ACPI
 static int ni_set_max_freq(struct sdhci_pci_slot *slot)
 {
@@ -879,8 +954,8 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
        .resume                 = sdhci_cqhci_resume,
 #endif
 #ifdef CONFIG_PM
-       .runtime_suspend        = sdhci_cqhci_runtime_suspend,
-       .runtime_resume         = sdhci_cqhci_runtime_resume,
+       .runtime_suspend        = glk_runtime_suspend,
+       .runtime_resume         = glk_runtime_resume,
 #endif
        .quirks                 = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
        .quirks2                = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
@@ -1762,8 +1837,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
                device_init_wakeup(&pdev->dev, true);
 
        if (slot->cd_idx >= 0) {
-               ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx,
+               ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx,
                                           slot->cd_override_level, 0, NULL);
+               if (ret && ret != -EPROBE_DEFER)
+                       ret = mmc_gpiod_request_cd(host->mmc, NULL,
+                                                  slot->cd_idx,
+                                                  slot->cd_override_level,
+                                                  0, NULL);
                if (ret == -EPROBE_DEFER)
                        goto remove;
 
index e514d57a0419defecb8dcbbc8be4604aea1321da..aa983422aa970f1035201a1a4841b7a09d9acc3f 100644 (file)
@@ -207,7 +207,7 @@ comment "Disk-On-Chip Device Drivers"
 config MTD_DOCG3
        tristate "M-Systems Disk-On-Chip G3"
        select BCH
-       select BCH_CONST_PARAMS
+       select BCH_CONST_PARAMS if !MTD_NAND_BCH
        select BITREVERSE
        help
          This provides an MTD device driver for the M-Systems DiskOnChip
index 784c6e1a0391e92c90723e698d8bc148fe3e4916..fd5fe12d74613ecebddb88699dcae5e1862d3829 100644 (file)
@@ -221,7 +221,14 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
                info->mtd = info->subdev[0].mtd;
                ret = 0;
        } else if (info->num_subdev > 1) {
-               struct mtd_info *cdev[nr];
+               struct mtd_info **cdev;
+
+               cdev = kmalloc_array(nr, sizeof(*cdev), GFP_KERNEL);
+               if (!cdev) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+
                /*
                 * We detected multiple devices.  Concatenate them together.
                 */
@@ -230,6 +237,7 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev,
 
                info->mtd = mtd_concat_create(cdev, info->num_subdev,
                                              plat->name);
+               kfree(cdev);
                if (info->mtd == NULL) {
                        ret = -ENXIO;
                        goto err;
index 56cde38b92c034e28b1428d7f11ac57c25753fad..044adf91385465ca73e54f53238fb68b03c8a604 100644 (file)
@@ -27,7 +27,8 @@ int nanddev_bbt_init(struct nand_device *nand)
        unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block,
                                           BITS_PER_LONG);
 
-       nand->bbt.cache = kzalloc(nwords, GFP_KERNEL);
+       nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache),
+                                 GFP_KERNEL);
        if (!nand->bbt.cache)
                return -ENOMEM;
 
index fb33f6be7c4ff7306f391931e1843f6712edbdb7..ad720494e8f78dfd74995ba067a8ea973f555980 100644 (file)
@@ -2032,8 +2032,7 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
        int ret;
 
        nand_np = dev->of_node;
-       nfc_np = of_find_compatible_node(dev->of_node, NULL,
-                                        "atmel,sama5d3-nfc");
+       nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
        if (!nfc_np) {
                dev_err(dev, "Could not find device node for sama5d3-nfc\n");
                return -ENODEV;
@@ -2447,15 +2446,19 @@ static int atmel_nand_controller_probe(struct platform_device *pdev)
        }
 
        if (caps->legacy_of_bindings) {
+               struct device_node *nfc_node;
                u32 ale_offs = 21;
 
                /*
                 * If we are parsing legacy DT props and the DT contains a
                 * valid NFC node, forward the request to the sama5 logic.
                 */
-               if (of_find_compatible_node(pdev->dev.of_node, NULL,
-                                           "atmel,sama5d3-nfc"))
+               nfc_node = of_get_compatible_child(pdev->dev.of_node,
+                                                  "atmel,sama5d3-nfc");
+               if (nfc_node) {
                        caps = &atmel_sama5_nand_caps;
+                       of_node_put(nfc_node);
+               }
 
                /*
                 * Even if the compatible says we are dealing with an
index 05bd0779fe9bf7eae08acca31b7ba30f7592b9b1..71050a0b31dfe3b6bf273ff4c240e7cfe307080f 100644 (file)
@@ -590,7 +590,6 @@ retry:
 
 /**
  * panic_nand_wait - [GENERIC] wait until the command is done
- * @mtd: MTD device structure
  * @chip: NAND chip structure
  * @timeo: timeout
  *
index ef75dfa62a4f816f49d6008dcc3bbd7561293530..699d3cf49c6da04b49180cda81d3b5d05fc66563 100644 (file)
 #define        NAND_VERSION_MINOR_SHIFT        16
 
 /* NAND OP_CMDs */
-#define        PAGE_READ                       0x2
-#define        PAGE_READ_WITH_ECC              0x3
-#define        PAGE_READ_WITH_ECC_SPARE        0x4
-#define        PROGRAM_PAGE                    0x6
-#define        PAGE_PROGRAM_WITH_ECC           0x7
-#define        PROGRAM_PAGE_SPARE              0x9
-#define        BLOCK_ERASE                     0xa
-#define        FETCH_ID                        0xb
-#define        RESET_DEVICE                    0xd
+#define        OP_PAGE_READ                    0x2
+#define        OP_PAGE_READ_WITH_ECC           0x3
+#define        OP_PAGE_READ_WITH_ECC_SPARE     0x4
+#define        OP_PROGRAM_PAGE                 0x6
+#define        OP_PAGE_PROGRAM_WITH_ECC        0x7
+#define        OP_PROGRAM_PAGE_SPARE           0x9
+#define        OP_BLOCK_ERASE                  0xa
+#define        OP_FETCH_ID                     0xb
+#define        OP_RESET_DEVICE                 0xd
 
 /* Default Value for NAND_DEV_CMD_VLD */
 #define NAND_DEV_CMD_VLD_VAL           (READ_START_VLD | WRITE_START_VLD | \
@@ -692,11 +692,11 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
 
        if (read) {
                if (host->use_ecc)
-                       cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
+                       cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
                else
-                       cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
+                       cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE;
        } else {
-                       cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
+               cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
        }
 
        if (host->use_ecc) {
@@ -1170,7 +1170,7 @@ static int nandc_param(struct qcom_nand_host *host)
         * in use. we configure the controller to perform a raw read of 512
         * bytes to read onfi params
         */
-       nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
+       nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE);
        nandc_set_reg(nandc, NAND_ADDR0, 0);
        nandc_set_reg(nandc, NAND_ADDR1, 0);
        nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
@@ -1224,7 +1224,7 @@ static int erase_block(struct qcom_nand_host *host, int page_addr)
        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
 
        nandc_set_reg(nandc, NAND_FLASH_CMD,
-                     BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
+                     OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
        nandc_set_reg(nandc, NAND_ADDR0, page_addr);
        nandc_set_reg(nandc, NAND_ADDR1, 0);
        nandc_set_reg(nandc, NAND_DEV0_CFG0,
@@ -1255,7 +1255,7 @@ static int read_id(struct qcom_nand_host *host, int column)
        if (column == -1)
                return 0;
 
-       nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
+       nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID);
        nandc_set_reg(nandc, NAND_ADDR0, column);
        nandc_set_reg(nandc, NAND_ADDR1, 0);
        nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT,
@@ -1276,7 +1276,7 @@ static int reset(struct qcom_nand_host *host)
        struct nand_chip *chip = &host->chip;
        struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
 
-       nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
+       nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE);
        nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
 
        write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
index e24db817154ee73ad1fc0fd9586f4e294fc2886a..04cedd3a2bf6634c5d1f05ef3d27ba302d12935f 100644 (file)
@@ -644,9 +644,23 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
                ndelay(cqspi->wr_delay);
 
        while (remaining > 0) {
+               size_t write_words, mod_bytes;
+
                write_bytes = remaining > page_size ? page_size : remaining;
-               iowrite32_rep(cqspi->ahb_base, txbuf,
-                             DIV_ROUND_UP(write_bytes, 4));
+               write_words = write_bytes / 4;
+               mod_bytes = write_bytes % 4;
+               /* Write 4 bytes at a time then single bytes. */
+               if (write_words) {
+                       iowrite32_rep(cqspi->ahb_base, txbuf, write_words);
+                       txbuf += (write_words * 4);
+               }
+               if (mod_bytes) {
+                       unsigned int temp = 0xFFFFFFFF;
+
+                       memcpy(&temp, txbuf, mod_bytes);
+                       iowrite32(temp, cqspi->ahb_base);
+                       txbuf += mod_bytes;
+               }
 
                if (!wait_for_completion_timeout(&cqspi->transfer_complete,
                                        msecs_to_jiffies(CQSPI_TIMEOUT_MS))) {
@@ -655,7 +669,6 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr,
                        goto failwr;
                }
 
-               txbuf += write_bytes;
                remaining -= write_bytes;
 
                if (remaining > 0)
@@ -996,7 +1009,7 @@ static int cqspi_direct_read_execute(struct spi_nor *nor, u_char *buf,
 err_unmap:
        dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE);
 
-       return 0;
+       return ret;
 }
 
 static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
index 9407ca5f9443338d56a355fe0eadfa6641a429dc..1fdd2834fbcb164c48fb580e51cfa53fdfaddf96 100644 (file)
@@ -2156,7 +2156,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
  * @nor:       pointer to a 'struct spi_nor'
  * @addr:      offset in the serial flash memory
  * @len:       number of bytes to read
- * @buf:       buffer where the data is copied into
+ * @buf:       buffer where the data is copied into (dma-safe memory)
  *
  * Return: 0 on success, -errno otherwise.
  */
@@ -2521,6 +2521,34 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
        return left->size - right->size;
 }
 
+/**
+ * spi_nor_sort_erase_mask() - sort erase mask
+ * @map:       the erase map of the SPI NOR
+ * @erase_mask:        the erase type mask to be sorted
+ *
+ * Replicate the sort done for the map's erase types in BFPT: sort the erase
+ * mask in ascending order with the smallest erase type size starting from
+ * BIT(0) in the sorted erase mask.
+ *
+ * Return: sorted erase mask.
+ */
+static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask)
+{
+       struct spi_nor_erase_type *erase_type = map->erase_type;
+       int i;
+       u8 sorted_erase_mask = 0;
+
+       if (!erase_mask)
+               return 0;
+
+       /* Replicate the sort done for the map's erase types. */
+       for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+               if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx))
+                       sorted_erase_mask |= BIT(i);
+
+       return sorted_erase_mask;
+}
+
 /**
  * spi_nor_regions_sort_erase_types() - sort erase types in each region
  * @map:       the erase map of the SPI NOR
@@ -2536,19 +2564,13 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r)
 static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map)
 {
        struct spi_nor_erase_region *region = map->regions;
-       struct spi_nor_erase_type *erase_type = map->erase_type;
-       int i;
        u8 region_erase_mask, sorted_erase_mask;
 
        while (region) {
                region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
 
-               /* Replicate the sort done for the map's erase types. */
-               sorted_erase_mask = 0;
-               for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
-                       if (erase_type[i].size &&
-                           region_erase_mask & BIT(erase_type[i].idx))
-                               sorted_erase_mask |= BIT(i);
+               sorted_erase_mask = spi_nor_sort_erase_mask(map,
+                                                           region_erase_mask);
 
                /* Overwrite erase mask. */
                region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) |
@@ -2855,52 +2877,84 @@ static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings)
  * spi_nor_get_map_in_use() - get the configuration map in use
  * @nor:       pointer to a 'struct spi_nor'
  * @smpt:      pointer to the sector map parameter table
+ * @smpt_len:  sector map parameter table length
+ *
+ * Return: pointer to the map in use, ERR_PTR(-errno) otherwise.
  */
-static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt)
+static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt,
+                                        u8 smpt_len)
 {
-       const u32 *ret = NULL;
-       u32 i, addr;
+       const u32 *ret;
+       u8 *buf;
+       u32 addr;
        int err;
+       u8 i;
        u8 addr_width, read_opcode, read_dummy;
-       u8 read_data_mask, data_byte, map_id;
+       u8 read_data_mask, map_id;
+
+       /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */
+       buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+       if (!buf)
+               return ERR_PTR(-ENOMEM);
 
        addr_width = nor->addr_width;
        read_dummy = nor->read_dummy;
        read_opcode = nor->read_opcode;
 
        map_id = 0;
-       i = 0;
        /* Determine if there are any optional Detection Command Descriptors */
-       while (!(smpt[i] & SMPT_DESC_TYPE_MAP)) {
+       for (i = 0; i < smpt_len; i += 2) {
+               if (smpt[i] & SMPT_DESC_TYPE_MAP)
+                       break;
+
                read_data_mask = SMPT_CMD_READ_DATA(smpt[i]);
                nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]);
                nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]);
                nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]);
                addr = smpt[i + 1];
 
-               err = spi_nor_read_raw(nor, addr, 1, &data_byte);
-               if (err)
+               err = spi_nor_read_raw(nor, addr, 1, buf);
+               if (err) {
+                       ret = ERR_PTR(err);
                        goto out;
+               }
 
                /*
                 * Build an index value that is used to select the Sector Map
                 * Configuration that is currently in use.
                 */
-               map_id = map_id << 1 | !!(data_byte & read_data_mask);
-               i = i + 2;
+               map_id = map_id << 1 | !!(*buf & read_data_mask);
        }
 
-       /* Find the matching configuration map */
-       while (SMPT_MAP_ID(smpt[i]) != map_id) {
+       /*
+        * If command descriptors are provided, they always precede map
+        * descriptors in the table. There is no need to start the iteration
+        * over smpt array all over again.
+        *
+        * Find the matching configuration map.
+        */
+       ret = ERR_PTR(-EINVAL);
+       while (i < smpt_len) {
+               if (SMPT_MAP_ID(smpt[i]) == map_id) {
+                       ret = smpt + i;
+                       break;
+               }
+
+               /*
+                * If there are no more configuration map descriptors and no
+                * configuration ID matched the configuration identifier, the
+                * sector address map is unknown.
+                */
                if (smpt[i] & SMPT_DESC_END)
-                       goto out;
+                       break;
+
                /* increment the table index to the next map */
                i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1;
        }
 
-       ret = smpt + i;
        /* fall through */
 out:
+       kfree(buf);
        nor->addr_width = addr_width;
        nor->read_dummy = read_dummy;
        nor->read_opcode = read_opcode;
@@ -2941,12 +2995,13 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
                                              const u32 *smpt)
 {
        struct spi_nor_erase_map *map = &nor->erase_map;
-       const struct spi_nor_erase_type *erase = map->erase_type;
+       struct spi_nor_erase_type *erase = map->erase_type;
        struct spi_nor_erase_region *region;
        u64 offset;
        u32 region_count;
        int i, j;
-       u8 erase_type;
+       u8 uniform_erase_type, save_uniform_erase_type;
+       u8 erase_type, regions_erase_type;
 
        region_count = SMPT_MAP_REGION_COUNT(*smpt);
        /*
@@ -2959,7 +3014,8 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
                return -ENOMEM;
        map->regions = region;
 
-       map->uniform_erase_type = 0xff;
+       uniform_erase_type = 0xff;
+       regions_erase_type = 0;
        offset = 0;
        /* Populate regions. */
        for (i = 0; i < region_count; i++) {
@@ -2974,12 +3030,40 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
                 * Save the erase types that are supported in all regions and
                 * can erase the entire flash memory.
                 */
-               map->uniform_erase_type &= erase_type;
+               uniform_erase_type &= erase_type;
+
+               /*
+                * regions_erase_type mask will indicate all the erase types
+                * supported in this configuration map.
+                */
+               regions_erase_type |= erase_type;
 
                offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
                         region[i].size;
        }
 
+       save_uniform_erase_type = map->uniform_erase_type;
+       map->uniform_erase_type = spi_nor_sort_erase_mask(map,
+                                                         uniform_erase_type);
+
+       if (!regions_erase_type) {
+               /*
+                * Roll back to the previous uniform_erase_type mask, SMPT is
+                * broken.
+                */
+               map->uniform_erase_type = save_uniform_erase_type;
+               return -EINVAL;
+       }
+
+       /*
+        * BFPT advertises all the erase types supported by all the possible
+        * map configurations. Mask out the erase types that are not supported
+        * by the current map configuration.
+        */
+       for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++)
+               if (!(regions_erase_type & BIT(erase[i].idx)))
+                       spi_nor_set_erase_type(&erase[i], 0, 0xFF);
+
        spi_nor_region_mark_end(&region[i - 1]);
 
        return 0;
@@ -3020,9 +3104,9 @@ static int spi_nor_parse_smpt(struct spi_nor *nor,
        for (i = 0; i < smpt_header->length; i++)
                smpt[i] = le32_to_cpu(smpt[i]);
 
-       sector_map = spi_nor_get_map_in_use(nor, smpt);
-       if (!sector_map) {
-               ret = -EINVAL;
+       sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length);
+       if (IS_ERR(sector_map)) {
+               ret = PTR_ERR(sector_map);
                goto out;
        }
 
@@ -3125,7 +3209,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
        if (err)
                goto exit;
 
-       /* Parse other parameter headers. */
+       /* Parse optional parameter tables. */
        for (i = 0; i < header.nph; i++) {
                param_header = &param_headers[i];
 
@@ -3138,8 +3222,17 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
                        break;
                }
 
-               if (err)
-                       goto exit;
+               if (err) {
+                       dev_warn(dev, "Failed to parse optional parameter table: %04x\n",
+                                SFDP_PARAM_HEADER_ID(param_header));
+                       /*
+                        * Let's not drop all information we extracted so far
+                        * if optional table parsers fail. In case of failing,
+                        * each optional parser is responsible to roll back to
+                        * the previously known spi_nor data.
+                        */
+                       err = 0;
+               }
        }
 
 exit:
@@ -3250,12 +3343,14 @@ static int spi_nor_init_params(struct spi_nor *nor,
                memcpy(&sfdp_params, params, sizeof(sfdp_params));
                memcpy(&prev_map, &nor->erase_map, sizeof(prev_map));
 
-               if (spi_nor_parse_sfdp(nor, &sfdp_params))
+               if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
+                       nor->addr_width = 0;
                        /* restore previous erase map */
                        memcpy(&nor->erase_map, &prev_map,
                               sizeof(nor->erase_map));
-               else
+               } else {
                        memcpy(params, &sfdp_params, sizeof(*params));
+               }
        }
 
        return 0;
index 93ceea4f27d5731f865c57be71f69c35b46d274f..e294d3986ba964d07bc9bcb67f800b4d479d5231 100644 (file)
@@ -1072,6 +1072,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
                         * be a result of power cut during erasure.
                         */
                        ai->maybe_bad_peb_count += 1;
+               /* fall through */
        case UBI_IO_BAD_HDR:
                        /*
                         * If we're facing a bad VID header we have to drop *all*
index d2a726654ff1182e961f22ff365dc461996b7204..a4e3454133a47eacdcc61034c11b6c98d831e9c8 100644 (file)
@@ -1334,8 +1334,10 @@ static int bytes_str_to_int(const char *str)
        switch (*endp) {
        case 'G':
                result *= 1024;
+               /* fall through */
        case 'M':
                result *= 1024;
+               /* fall through */
        case 'K':
                result *= 1024;
                if (endp[1] == 'i' && endp[2] == 'B')
index f43fb2f958a54e12c4d29ad91340e237e9d98e5a..93dfcef8afc4bc63a2e852b464e6482d3359e53c 100644 (file)
@@ -2086,6 +2086,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
                   aggregator->aggregator_identifier);
 
        /* Tell the partner that this port is not suitable for aggregation */
+       port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
+       port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
+       port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
        port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
        __update_lacpdu_from_port(port);
        ad_lacpdu_send(port);
index ffa37adb76817f454505b32d010056dfc4d20dc8..333387f1f1fe66490cda8904a7d6c7aeb2d15287 100644 (file)
@@ -3112,13 +3112,13 @@ static int bond_slave_netdev_event(unsigned long event,
        case NETDEV_CHANGE:
                /* For 802.3ad mode only:
                 * Getting invalid Speed/Duplex values here will put slave
-                * in weird state. So mark it as link-down for the time
+                * in weird state. So mark it as link-fail for the time
                 * being and let link-monitoring (miimon) set it right when
                 * correct speeds/duplex are available.
                 */
                if (bond_update_speed_duplex(slave) &&
                    BOND_MODE(bond) == BOND_MODE_8023AD)
-                       slave->link = BOND_LINK_DOWN;
+                       slave->link = BOND_LINK_FAIL;
 
                if (BOND_MODE(bond) == BOND_MODE_8023AD)
                        bond_3ad_adapter_speed_duplex_changed(slave);
index 9697977b80f040c3fde59037e2caabe0cdfaa79f..6b9ad86732188c19c87f26a6639d63f95893f139 100644 (file)
@@ -638,8 +638,7 @@ static int bond_fill_info(struct sk_buff *skb,
                                goto nla_put_failure;
 
                        if (nla_put(skb, IFLA_BOND_AD_ACTOR_SYSTEM,
-                                   sizeof(bond->params.ad_actor_system),
-                                   &bond->params.ad_actor_system))
+                                   ETH_ALEN, &bond->params.ad_actor_system))
                                goto nla_put_failure;
                }
                if (!bond_3ad_get_active_agg_info(bond, &info)) {
index 49163570a63afad2e36777993a57319370d6c8b0..3b3f88ffab53cded04a2c1586727bf82472f43ad 100644 (file)
@@ -477,6 +477,34 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(can_put_echo_skb);
 
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
+{
+       struct can_priv *priv = netdev_priv(dev);
+       struct sk_buff *skb = priv->echo_skb[idx];
+       struct canfd_frame *cf;
+
+       if (idx >= priv->echo_skb_max) {
+               netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+                          __func__, idx, priv->echo_skb_max);
+               return NULL;
+       }
+
+       if (!skb) {
+               netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
+                          __func__, idx);
+               return NULL;
+       }
+
+       /* Using "struct canfd_frame::len" for the frame
+        * length is supported on both CAN and CANFD frames.
+        */
+       cf = (struct canfd_frame *)skb->data;
+       *len_ptr = cf->len;
+       priv->echo_skb[idx] = NULL;
+
+       return skb;
+}
+
 /*
  * Get the skb from the stack and loop it back locally
  *
@@ -486,22 +514,16 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
  */
 unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
 {
-       struct can_priv *priv = netdev_priv(dev);
-
-       BUG_ON(idx >= priv->echo_skb_max);
-
-       if (priv->echo_skb[idx]) {
-               struct sk_buff *skb = priv->echo_skb[idx];
-               struct can_frame *cf = (struct can_frame *)skb->data;
-               u8 dlc = cf->can_dlc;
+       struct sk_buff *skb;
+       u8 len;
 
-               netif_rx(priv->echo_skb[idx]);
-               priv->echo_skb[idx] = NULL;
+       skb = __can_get_echo_skb(dev, idx, &len);
+       if (!skb)
+               return 0;
 
-               return dlc;
-       }
+       netif_rx(skb);
 
-       return 0;
+       return len;
 }
 EXPORT_SYMBOL_GPL(can_get_echo_skb);
 
index 8e972ef0863769e88a2c9d6cec37408d66566292..75ce11395ee8196f0dc3ed8b1368e591047d5edd 100644 (file)
 
 /* FLEXCAN interrupt flag register (IFLAG) bits */
 /* Errata ERR005829 step7: Reserve first valid MB */
-#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO        8
-#define FLEXCAN_TX_MB_OFF_FIFO         9
+#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO                8
 #define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP   0
-#define FLEXCAN_TX_MB_OFF_TIMESTAMP            1
-#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST      (FLEXCAN_TX_MB_OFF_TIMESTAMP + 1)
-#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST       63
-#define FLEXCAN_IFLAG_MB(x)            BIT(x)
+#define FLEXCAN_TX_MB                          63
+#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST      (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1)
+#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST       (FLEXCAN_TX_MB - 1)
+#define FLEXCAN_IFLAG_MB(x)            BIT(x & 0x1f)
 #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7)
 #define FLEXCAN_IFLAG_RX_FIFO_WARN     BIT(6)
 #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE        BIT(5)
@@ -259,9 +258,7 @@ struct flexcan_priv {
        struct can_rx_offload offload;
 
        struct flexcan_regs __iomem *regs;
-       struct flexcan_mb __iomem *tx_mb;
        struct flexcan_mb __iomem *tx_mb_reserved;
-       u8 tx_mb_idx;
        u32 reg_ctrl_default;
        u32 reg_imask1_default;
        u32 reg_imask2_default;
@@ -515,6 +512,7 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
 static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        const struct flexcan_priv *priv = netdev_priv(dev);
+       struct flexcan_regs __iomem *regs = priv->regs;
        struct can_frame *cf = (struct can_frame *)skb->data;
        u32 can_id;
        u32 data;
@@ -537,17 +535,17 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
 
        if (cf->can_dlc > 0) {
                data = be32_to_cpup((__be32 *)&cf->data[0]);
-               priv->write(data, &priv->tx_mb->data[0]);
+               priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[0]);
        }
        if (cf->can_dlc > 4) {
                data = be32_to_cpup((__be32 *)&cf->data[4]);
-               priv->write(data, &priv->tx_mb->data[1]);
+               priv->write(data, &regs->mb[FLEXCAN_TX_MB].data[1]);
        }
 
        can_put_echo_skb(skb, dev, 0);
 
-       priv->write(can_id, &priv->tx_mb->can_id);
-       priv->write(ctrl, &priv->tx_mb->can_ctrl);
+       priv->write(can_id, &regs->mb[FLEXCAN_TX_MB].can_id);
+       priv->write(ctrl, &regs->mb[FLEXCAN_TX_MB].can_ctrl);
 
        /* Errata ERR005829 step8:
         * Write twice INACTIVE(0x8) code to first MB.
@@ -563,9 +561,13 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
 static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
 {
        struct flexcan_priv *priv = netdev_priv(dev);
+       struct flexcan_regs __iomem *regs = priv->regs;
        struct sk_buff *skb;
        struct can_frame *cf;
        bool rx_errors = false, tx_errors = false;
+       u32 timestamp;
+
+       timestamp = priv->read(&regs->timer) << 16;
 
        skb = alloc_can_err_skb(dev, &cf);
        if (unlikely(!skb))
@@ -612,17 +614,21 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
        if (tx_errors)
                dev->stats.tx_errors++;
 
-       can_rx_offload_irq_queue_err_skb(&priv->offload, skb);
+       can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
 }
 
 static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
 {
        struct flexcan_priv *priv = netdev_priv(dev);
+       struct flexcan_regs __iomem *regs = priv->regs;
        struct sk_buff *skb;
        struct can_frame *cf;
        enum can_state new_state, rx_state, tx_state;
        int flt;
        struct can_berr_counter bec;
+       u32 timestamp;
+
+       timestamp = priv->read(&regs->timer) << 16;
 
        flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
        if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
@@ -652,7 +658,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
        if (unlikely(new_state == CAN_STATE_BUS_OFF))
                can_bus_off(dev);
 
-       can_rx_offload_irq_queue_err_skb(&priv->offload, skb);
+       can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
 }
 
 static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
@@ -720,9 +726,14 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload,
                        priv->write(BIT(n - 32), &regs->iflag2);
        } else {
                priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
-               priv->read(&regs->timer);
        }
 
+       /* Read the Free Running Timer. It is optional but recommended
+        * to unlock Mailbox as soon as possible and make it available
+        * for reception.
+        */
+       priv->read(&regs->timer);
+
        return 1;
 }
 
@@ -732,9 +743,9 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
        struct flexcan_regs __iomem *regs = priv->regs;
        u32 iflag1, iflag2;
 
-       iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default;
-       iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default &
-               ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
+       iflag2 = priv->read(&regs->iflag2) & priv->reg_imask2_default &
+               ~FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
+       iflag1 = priv->read(&regs->iflag1) & priv->reg_imask1_default;
 
        return (u64)iflag2 << 32 | iflag1;
 }
@@ -746,11 +757,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
        struct flexcan_priv *priv = netdev_priv(dev);
        struct flexcan_regs __iomem *regs = priv->regs;
        irqreturn_t handled = IRQ_NONE;
-       u32 reg_iflag1, reg_esr;
+       u32 reg_iflag2, reg_esr;
        enum can_state last_state = priv->can.state;
 
-       reg_iflag1 = priv->read(&regs->iflag1);
-
        /* reception interrupt */
        if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
                u64 reg_iflag;
@@ -764,6 +773,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
                                break;
                }
        } else {
+               u32 reg_iflag1;
+
+               reg_iflag1 = priv->read(&regs->iflag1);
                if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) {
                        handled = IRQ_HANDLED;
                        can_rx_offload_irq_offload_fifo(&priv->offload);
@@ -779,17 +791,22 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
                }
        }
 
+       reg_iflag2 = priv->read(&regs->iflag2);
+
        /* transmission complete interrupt */
-       if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) {
+       if (reg_iflag2 & FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB)) {
+               u32 reg_ctrl = priv->read(&regs->mb[FLEXCAN_TX_MB].can_ctrl);
+
                handled = IRQ_HANDLED;
-               stats->tx_bytes += can_get_echo_skb(dev, 0);
+               stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload,
+                                                              0, reg_ctrl << 16);
                stats->tx_packets++;
                can_led_event(dev, CAN_LED_EVENT_TX);
 
                /* after sending a RTR frame MB is in RX mode */
                priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
-                           &priv->tx_mb->can_ctrl);
-               priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
+                           &regs->mb[FLEXCAN_TX_MB].can_ctrl);
+               priv->write(FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB), &regs->iflag2);
                netif_wake_queue(dev);
        }
 
@@ -931,15 +948,13 @@ static int flexcan_chip_start(struct net_device *dev)
        reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
        reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
                FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
-               FLEXCAN_MCR_IDAM_C;
+               FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_MB);
 
-       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
                reg_mcr &= ~FLEXCAN_MCR_FEN;
-               reg_mcr |= FLEXCAN_MCR_MAXMB(priv->offload.mb_last);
-       } else {
-               reg_mcr |= FLEXCAN_MCR_FEN |
-                       FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
-       }
+       else
+               reg_mcr |= FLEXCAN_MCR_FEN;
+
        netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
        priv->write(reg_mcr, &regs->mcr);
 
@@ -982,16 +997,17 @@ static int flexcan_chip_start(struct net_device *dev)
                priv->write(reg_ctrl2, &regs->ctrl2);
        }
 
-       /* clear and invalidate all mailboxes first */
-       for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
-               priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
-                           &regs->mb[i].can_ctrl);
-       }
-
        if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
-               for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++)
+               for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) {
                        priv->write(FLEXCAN_MB_CODE_RX_EMPTY,
                                    &regs->mb[i].can_ctrl);
+               }
+       } else {
+               /* clear and invalidate unused mailboxes first */
+               for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) {
+                       priv->write(FLEXCAN_MB_CODE_RX_INACTIVE,
+                                   &regs->mb[i].can_ctrl);
+               }
        }
 
        /* Errata ERR005829: mark first TX mailbox as INACTIVE */
@@ -1000,7 +1016,7 @@ static int flexcan_chip_start(struct net_device *dev)
 
        /* mark TX mailbox as INACTIVE */
        priv->write(FLEXCAN_MB_CODE_TX_INACTIVE,
-                   &priv->tx_mb->can_ctrl);
+                   &regs->mb[FLEXCAN_TX_MB].can_ctrl);
 
        /* acceptance mask/acceptance code (accept everything) */
        priv->write(0x0, &regs->rxgmask);
@@ -1355,17 +1371,13 @@ static int flexcan_probe(struct platform_device *pdev)
        priv->devtype_data = devtype_data;
        priv->reg_xceiver = reg_xceiver;
 
-       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
-               priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP;
+       if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)
                priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP];
-       } else {
-               priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO;
+       else
                priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO];
-       }
-       priv->tx_mb = &regs->mb[priv->tx_mb_idx];
 
-       priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
-       priv->reg_imask2_default = 0;
+       priv->reg_imask1_default = 0;
+       priv->reg_imask2_default = FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB);
 
        priv->offload.mailbox_read = flexcan_mailbox_read;
 
index 11662f479e760ba77f613c90bfc8026b005da3ea..771a4608373978c31b7011a45cf5659f543820c1 100644 (file)
@@ -24,6 +24,9 @@
 
 #define RCAR_CAN_DRV_NAME      "rcar_can"
 
+#define RCAR_SUPPORTED_CLOCKS  (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \
+                                BIT(CLKR_CLKEXT))
+
 /* Mailbox configuration:
  * mailbox 60 - 63 - Rx FIFO mailboxes
  * mailbox 56 - 59 - Tx FIFO mailboxes
@@ -789,7 +792,7 @@ static int rcar_can_probe(struct platform_device *pdev)
                goto fail_clk;
        }
 
-       if (clock_select >= ARRAY_SIZE(clock_names)) {
+       if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
                err = -EINVAL;
                dev_err(&pdev->dev, "invalid CAN clock selected\n");
                goto fail_clk;
index c7d05027a7a07ea34f862aa3595ebf6026a93897..2ce4fa8698c73b437051de20e29eaad70b263f64 100644 (file)
@@ -211,7 +211,54 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
 }
 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
 
-int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb)
+int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+                               struct sk_buff *skb, u32 timestamp)
+{
+       struct can_rx_offload_cb *cb;
+       unsigned long flags;
+
+       if (skb_queue_len(&offload->skb_queue) >
+           offload->skb_queue_len_max)
+               return -ENOMEM;
+
+       cb = can_rx_offload_get_cb(skb);
+       cb->timestamp = timestamp;
+
+       spin_lock_irqsave(&offload->skb_queue.lock, flags);
+       __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
+       spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+       can_rx_offload_schedule(offload);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
+
+unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
+                                        unsigned int idx, u32 timestamp)
+{
+       struct net_device *dev = offload->dev;
+       struct net_device_stats *stats = &dev->stats;
+       struct sk_buff *skb;
+       u8 len;
+       int err;
+
+       skb = __can_get_echo_skb(dev, idx, &len);
+       if (!skb)
+               return 0;
+
+       err = can_rx_offload_queue_sorted(offload, skb, timestamp);
+       if (err) {
+               stats->rx_errors++;
+               stats->tx_fifo_errors++;
+       }
+
+       return len;
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
+
+int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+                             struct sk_buff *skb)
 {
        if (skb_queue_len(&offload->skb_queue) >
            offload->skb_queue_len_max)
@@ -222,7 +269,7 @@ int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_b
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb);
+EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
 
 static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
 {
index 53e320c92a8be21e286ab4f7ada738fd223a08fa..ddaf46239e39e92337a4ed54ecb3feb1ab94cc59 100644 (file)
@@ -760,7 +760,7 @@ static int hi3110_open(struct net_device *net)
 {
        struct hi3110_priv *priv = netdev_priv(net);
        struct spi_device *spi = priv->spi;
-       unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_RISING;
+       unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_HIGH;
        int ret;
 
        ret = open_candev(net);
index b939a4c10b8409f5fe58e700204fe4e5183b23c0..c89c7d4900d75068badc7a7234c36b5b7345f675 100644 (file)
@@ -528,7 +528,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                        context = &priv->tx_contexts[i];
 
                        context->echo_index = i;
-                       can_put_echo_skb(skb, netdev, context->echo_index);
                        ++priv->active_tx_contexts;
                        if (priv->active_tx_contexts >= (int)dev->max_tx_urbs)
                                netif_stop_queue(netdev);
@@ -553,7 +552,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                dev_kfree_skb(skb);
                spin_lock_irqsave(&priv->tx_contexts_lock, flags);
 
-               can_free_echo_skb(netdev, context->echo_index);
                context->echo_index = dev->max_tx_urbs;
                --priv->active_tx_contexts;
                netif_wake_queue(netdev);
@@ -564,6 +562,8 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
 
        context->priv = priv;
 
+       can_put_echo_skb(skb, netdev, context->echo_index);
+
        usb_fill_bulk_urb(urb, dev->udev,
                          usb_sndbulkpipe(dev->udev,
                                          dev->bulk_out->bEndpointAddress),
index c084bae5ec0a4d936f6a7f121272d972903a8023..5fc0be564274375f3d5c579521a2d3b89ecd4a88 100644 (file)
@@ -1019,6 +1019,11 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
                                        new_state : CAN_STATE_ERROR_ACTIVE;
 
                        can_change_state(netdev, cf, tx_state, rx_state);
+
+                       if (priv->can.restart_ms &&
+                           old_state >= CAN_STATE_BUS_OFF &&
+                           new_state < CAN_STATE_BUS_OFF)
+                               cf->can_id |= CAN_ERR_RESTARTED;
                }
 
                if (new_state == CAN_STATE_BUS_OFF) {
@@ -1028,11 +1033,6 @@ kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv,
 
                        can_bus_off(netdev);
                }
-
-               if (priv->can.restart_ms &&
-                   old_state >= CAN_STATE_BUS_OFF &&
-                   new_state < CAN_STATE_BUS_OFF)
-                       cf->can_id |= CAN_ERR_RESTARTED;
        }
 
        if (!skb) {
index 0678a38b1af4588135f04074c56b37cfe6d09b70..f3d5bda012a107e430bca91bd09dd1d15be1bbf2 100644 (file)
 #include <linux/slab.h>
 #include <linux/usb.h>
 
-#include <linux/can.h>
-#include <linux/can/dev.h>
-#include <linux/can/error.h>
-
 #define UCAN_DRIVER_NAME "ucan"
 #define UCAN_MAX_RX_URBS 8
 /* the CAN controller needs a while to enable/disable the bus */
@@ -1575,11 +1571,8 @@ err_firmware_needs_update:
 /* disconnect the device */
 static void ucan_disconnect(struct usb_interface *intf)
 {
-       struct usb_device *udev;
        struct ucan_priv *up = usb_get_intfdata(intf);
 
-       udev = interface_to_usbdev(intf);
-
        usb_set_intfdata(intf, NULL);
 
        if (up) {
index 54e0ca6ed7308c511ce42bc6ea3dc6e65fb0662b..86b6464b4525c426e09d4d6a9f98bf9a0ee49111 100644 (file)
@@ -1117,11 +1117,6 @@ static int ksz_switch_init(struct ksz_device *dev)
 {
        int i;
 
-       mutex_init(&dev->reg_mutex);
-       mutex_init(&dev->stats_mutex);
-       mutex_init(&dev->alu_mutex);
-       mutex_init(&dev->vlan_mutex);
-
        dev->ds->ops = &ksz_switch_ops;
 
        for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
@@ -1206,6 +1201,11 @@ int ksz_switch_register(struct ksz_device *dev)
        if (dev->pdata)
                dev->chip_id = dev->pdata->chip_id;
 
+       mutex_init(&dev->reg_mutex);
+       mutex_init(&dev->stats_mutex);
+       mutex_init(&dev->alu_mutex);
+       mutex_init(&dev->vlan_mutex);
+
        if (ksz_switch_detect(dev))
                return -EINVAL;
 
index 65f10fec25b397345b03e503526a3b40de514a3a..0b3e51f248c21a2477c9b1736b0f06b80e350e29 100644 (file)
@@ -116,8 +116,7 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
        /* Reset the switch. */
        REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
                  GLOBAL_ATU_CONTROL_SWRESET |
-                 GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
-                 GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
+                 GLOBAL_ATU_CONTROL_LEARNDIS);
 
        /* Wait up to one second for reset to complete. */
        timeout = jiffies + 1 * HZ;
@@ -142,13 +141,10 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
         */
        REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
 
-       /* Enable automatic address learning, set the address
-        * database size to 1024 entries, and set the default aging
-        * time to 5 minutes.
+       /* Disable automatic address learning.
         */
        REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
-                 GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
-                 GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
+                 GLOBAL_ATU_CONTROL_LEARNDIS);
 
        return 0;
 }
index d721ccf7d8bed8230fa5fbbac2ecfd5068680cac..38e399e0f30e16cd189cf48874f08e6289565101 100644 (file)
@@ -567,6 +567,8 @@ int mv88e6xxx_g1_stats_clear(struct mv88e6xxx_chip *chip)
        if (err)
                return err;
 
+       /* Keep the histogram mode bits */
+       val &= MV88E6XXX_G1_STATS_OP_HIST_RX_TX;
        val |= MV88E6XXX_G1_STATS_OP_BUSY | MV88E6XXX_G1_STATS_OP_FLUSH_ALL;
 
        err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_STATS_OP, val);
index 18956e7604a318d72de0ed52cf2a0da2e7602348..a70bb1bb90e7d89dc75f6f3dcc4f1455484057bc 100644 (file)
@@ -1848,6 +1848,8 @@ static void ena_down(struct ena_adapter *adapter)
                rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
                if (rc)
                        dev_err(&adapter->pdev->dev, "Device reset failed\n");
+               /* stop submitting admin commands on a device that was reset */
+               ena_com_set_admin_running_state(adapter->ena_dev, false);
        }
 
        ena_destroy_all_io_queues(adapter);
@@ -1914,6 +1916,9 @@ static int ena_close(struct net_device *netdev)
 
        netif_dbg(adapter, ifdown, netdev, "%s\n", __func__);
 
+       if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
+               return 0;
+
        if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
                ena_down(adapter);
 
@@ -2613,9 +2618,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
                ena_down(adapter);
 
        /* Stop the device from sending AENQ events (in case reset flag is set
-        *  and device is up, ena_close already reset the device
-        * In case the reset flag is set and the device is up, ena_down()
-        * already perform the reset, so it can be skipped.
+        *  and device is up, ena_down() already reset the device.
         */
        if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
                ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
@@ -2694,8 +2697,8 @@ err_device_destroy:
        ena_com_abort_admin_commands(ena_dev);
        ena_com_wait_for_abort_completion(ena_dev);
        ena_com_admin_destroy(ena_dev);
-       ena_com_mmio_reg_read_request_destroy(ena_dev);
        ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE);
+       ena_com_mmio_reg_read_request_destroy(ena_dev);
 err:
        clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
        clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
@@ -3452,6 +3455,8 @@ err_rss:
        ena_com_rss_destroy(ena_dev);
 err_free_msix:
        ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
+       /* stop submitting admin commands on a device that was reset */
+       ena_com_set_admin_running_state(ena_dev, false);
        ena_free_mgmnt_irq(adapter);
        ena_disable_msix(adapter);
 err_worker_destroy:
@@ -3498,18 +3503,12 @@ static void ena_remove(struct pci_dev *pdev)
 
        cancel_work_sync(&adapter->reset_task);
 
-       unregister_netdev(netdev);
-
-       /* If the device is running then we want to make sure the device will be
-        * reset to make sure no more events will be issued by the device.
-        */
-       if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
-               set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
-
        rtnl_lock();
        ena_destroy_device(adapter, true);
        rtnl_unlock();
 
+       unregister_netdev(netdev);
+
        free_netdev(netdev);
 
        ena_com_rss_destroy(ena_dev);
index 521873642339356fd2053d160ea6ed2230fe4c33..dc8b6173d8d8224a5485480dec81c4f6526d94f0 100644 (file)
@@ -45,7 +45,7 @@
 
 #define DRV_MODULE_VER_MAJOR   2
 #define DRV_MODULE_VER_MINOR   0
-#define DRV_MODULE_VER_SUBMINOR 1
+#define DRV_MODULE_VER_SUBMINOR 2
 
 #define DRV_MODULE_NAME                "ena"
 #ifndef DRV_MODULE_VERSION
index b4fc0ed5bce83796ebe8a40eef3c053ecdb94254..9d489982682336a7cef13186f20557a4a7d7daeb 100644 (file)
@@ -1419,7 +1419,7 @@ static int sparc_lance_probe_one(struct platform_device *op,
 
                        prop = of_get_property(nd, "tpe-link-test?", NULL);
                        if (!prop)
-                               goto no_link_test;
+                               goto node_put;
 
                        if (strcmp(prop, "true")) {
                                printk(KERN_NOTICE "SunLance: warning: overriding option "
@@ -1428,6 +1428,8 @@ static int sparc_lance_probe_one(struct platform_device *op,
                                       "to ecd@skynet.be\n");
                                auxio_set_lte(AUXIO_LTE_ON);
                        }
+node_put:
+                       of_node_put(nd);
 no_link_test:
                        lp->auto_select = 1;
                        lp->tpe = 0;
index 6a633c70f603d7b70c3819b20f73d4b4f1405e64..99ef1daaa4d8027636cc5b0f7f542b7961f6764e 100644 (file)
@@ -407,13 +407,13 @@ static void aq_ethtool_get_pauseparam(struct net_device *ndev,
                                      struct ethtool_pauseparam *pause)
 {
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
+       u32 fc = aq_nic->aq_nic_cfg.flow_control;
 
        pause->autoneg = 0;
 
-       if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
-               pause->rx_pause = 1;
-       if (aq_nic->aq_hw->aq_nic_cfg->flow_control & AQ_NIC_FC_TX)
-               pause->tx_pause = 1;
+       pause->rx_pause = !!(fc & AQ_NIC_FC_RX);
+       pause->tx_pause = !!(fc & AQ_NIC_FC_TX);
+
 }
 
 static int aq_ethtool_set_pauseparam(struct net_device *ndev,
index e8689241204e9086fdb2c8750402ff7467e129ac..a1e70da358ca6910f02a82a6ef2e3949f60ee4fe 100644 (file)
@@ -204,6 +204,10 @@ struct aq_hw_ops {
 
        int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
 
+       int (*hw_set_offload)(struct aq_hw_s *self,
+                             struct aq_nic_cfg_s *aq_nic_cfg);
+
+       int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
 };
 
 struct aq_fw_ops {
@@ -226,6 +230,8 @@ struct aq_fw_ops {
 
        int (*update_stats)(struct aq_hw_s *self);
 
+       u32 (*get_flow_control)(struct aq_hw_s *self, u32 *fcmode);
+
        int (*set_flow_control)(struct aq_hw_s *self);
 
        int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
index e3ae29e523f0e26738b0ab80a2f3ac431083d13b..7c07eef275eb8498ade72b676dc2eeda8532185e 100644 (file)
@@ -99,8 +99,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
        struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic);
        bool is_lro = false;
+       int err = 0;
+
+       aq_cfg->features = features;
 
-       if (aq_cfg->hw_features & NETIF_F_LRO) {
+       if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
                is_lro = features & NETIF_F_LRO;
 
                if (aq_cfg->is_lro != is_lro) {
@@ -112,8 +115,11 @@ static int aq_ndev_set_features(struct net_device *ndev,
                        }
                }
        }
+       if ((aq_nic->ndev->features ^ features) & NETIF_F_RXCSUM)
+               err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
+                                                       aq_cfg);
 
-       return 0;
+       return err;
 }
 
 static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
index 5fed244466871cd69b764ffdfcf55d92a9043763..7abdc0952425921330d3639c99824fe5ae7c0e00 100644 (file)
@@ -118,12 +118,13 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
        }
 
        cfg->link_speed_msk &= cfg->aq_hw_caps->link_speed_msk;
-       cfg->hw_features = cfg->aq_hw_caps->hw_features;
+       cfg->features = cfg->aq_hw_caps->hw_features;
 }
 
 static int aq_nic_update_link_status(struct aq_nic_s *self)
 {
        int err = self->aq_fw_ops->update_link_status(self->aq_hw);
+       u32 fc = 0;
 
        if (err)
                return err;
@@ -133,6 +134,15 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
                        AQ_CFG_DRV_NAME, self->link_status.mbps,
                        self->aq_hw->aq_link_status.mbps);
                aq_nic_update_interrupt_moderation_settings(self);
+
+               /* Driver has to update flow control settings on RX block
+                * on any link event.
+                * We should query FW whether it negotiated FC.
+                */
+               if (self->aq_fw_ops->get_flow_control)
+                       self->aq_fw_ops->get_flow_control(self->aq_hw, &fc);
+               if (self->aq_hw_ops->hw_set_fc)
+                       self->aq_hw_ops->hw_set_fc(self->aq_hw, fc, 0);
        }
 
        self->link_status = self->aq_hw->aq_link_status;
@@ -590,7 +600,7 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
                }
        }
 
-       if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
+       if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
                packet_filter |= IFF_MULTICAST;
                self->mc_list.count = i;
                self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
@@ -772,7 +782,9 @@ void aq_nic_get_link_ksettings(struct aq_nic_s *self,
                ethtool_link_ksettings_add_link_mode(cmd, advertising,
                                                     Pause);
 
-       if (self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX)
+       /* Asym is when either RX or TX, but not both */
+       if (!!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_TX) ^
+           !!(self->aq_nic_cfg.flow_control & AQ_NIC_FC_RX))
                ethtool_link_ksettings_add_link_mode(cmd, advertising,
                                                     Asym_Pause);
 
index c1582f4e8e1b503f7f5baf60dcd1408e99d19620..44ec47a3d60a57bee0c9b0a62907eb1988ee5a5b 100644 (file)
@@ -23,7 +23,7 @@ struct aq_vec_s;
 
 struct aq_nic_cfg_s {
        const struct aq_hw_caps_s *aq_hw_caps;
-       u64 hw_features;
+       u64 features;
        u32 rxds;               /* rx ring size, descriptors # */
        u32 txds;               /* tx ring size, descriptors # */
        u32 vecs;               /* vecs==allocated irqs */
index 3db91446cc67717b548333e4bd3b95db8281c152..74550ccc7a20ff8437463384e906b718027dc6ef 100644 (file)
@@ -172,6 +172,27 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
        return !!budget;
 }
 
+static void aq_rx_checksum(struct aq_ring_s *self,
+                          struct aq_ring_buff_s *buff,
+                          struct sk_buff *skb)
+{
+       if (!(self->aq_nic->ndev->features & NETIF_F_RXCSUM))
+               return;
+
+       if (unlikely(buff->is_cso_err)) {
+               ++self->stats.rx.errors;
+               skb->ip_summed = CHECKSUM_NONE;
+               return;
+       }
+       if (buff->is_ip_cso) {
+               __skb_incr_checksum_unnecessary(skb);
+               if (buff->is_udp_cso || buff->is_tcp_cso)
+                       __skb_incr_checksum_unnecessary(skb);
+       } else {
+               skb->ip_summed = CHECKSUM_NONE;
+       }
+}
+
 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
 int aq_ring_rx_clean(struct aq_ring_s *self,
                     struct napi_struct *napi,
@@ -267,18 +288,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                }
 
                skb->protocol = eth_type_trans(skb, ndev);
-               if (unlikely(buff->is_cso_err)) {
-                       ++self->stats.rx.errors;
-                       skb->ip_summed = CHECKSUM_NONE;
-               } else {
-                       if (buff->is_ip_cso) {
-                               __skb_incr_checksum_unnecessary(skb);
-                               if (buff->is_udp_cso || buff->is_tcp_cso)
-                                       __skb_incr_checksum_unnecessary(skb);
-                       } else {
-                               skb->ip_summed = CHECKSUM_NONE;
-                       }
-               }
+
+               aq_rx_checksum(self, buff, skb);
 
                skb_set_hash(skb, buff->rss_hash,
                             buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
index 76d25d594a0f62fedf507e50b58701933b1872c2..a7e853fa43c24a07e719ef3a31b216df07fefafb 100644 (file)
@@ -100,12 +100,17 @@ static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
        return err;
 }
 
+static int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc)
+{
+       hw_atl_rpb_rx_xoff_en_per_tc_set(self, !!(fc & AQ_NIC_FC_RX), tc);
+       return 0;
+}
+
 static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
 {
        u32 tc = 0U;
        u32 buff_size = 0U;
        unsigned int i_priority = 0U;
-       bool is_rx_flow_control = false;
 
        /* TPS Descriptor rate init */
        hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
@@ -138,7 +143,6 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
 
        /* QoS Rx buf size per TC */
        tc = 0;
-       is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
        buff_size = HW_ATL_B0_RXBUF_MAX;
 
        hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
@@ -150,7 +154,8 @@ static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
                                                   (buff_size *
                                                   (1024U / 32U) * 50U) /
                                                   100U, tc);
-       hw_atl_rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+
+       hw_atl_b0_set_fc(self, self->aq_nic_cfg->flow_control, tc);
 
        /* QoS 802.1p priority -> TC mapping */
        for (i_priority = 8U; i_priority--;)
@@ -229,8 +234,10 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
        hw_atl_tpo_tcp_udp_crc_offload_en_set(self, 1);
 
        /* RX checksums offloads*/
-       hw_atl_rpo_ipv4header_crc_offload_en_set(self, 1);
-       hw_atl_rpo_tcp_udp_crc_offload_en_set(self, 1);
+       hw_atl_rpo_ipv4header_crc_offload_en_set(self, !!(aq_nic_cfg->features &
+                                                NETIF_F_RXCSUM));
+       hw_atl_rpo_tcp_udp_crc_offload_en_set(self, !!(aq_nic_cfg->features &
+                                             NETIF_F_RXCSUM));
 
        /* LSO offloads*/
        hw_atl_tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
@@ -655,9 +662,9 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
                struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
                        &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
 
-               unsigned int is_err = 1U;
                unsigned int is_rx_check_sum_enabled = 0U;
                unsigned int pkt_type = 0U;
+               u8 rx_stat = 0U;
 
                if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
                        break;
@@ -665,35 +672,35 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
 
                buff = &ring->buff_ring[ring->hw_head];
 
-               is_err = (0x0000003CU & rxd_wb->status);
+               rx_stat = (0x0000003CU & rxd_wb->status) >> 2;
 
-               is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
-               is_err &= ~0x20U; /* exclude validity bit */
+               is_rx_check_sum_enabled = (rxd_wb->type >> 19) & 0x3U;
 
                pkt_type = 0xFFU & (rxd_wb->type >> 4);
 
-               if (is_rx_check_sum_enabled) {
-                       if (0x0U == (pkt_type & 0x3U))
-                               buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U;
+               if (is_rx_check_sum_enabled & BIT(0) &&
+                   (0x0U == (pkt_type & 0x3U)))
+                       buff->is_ip_cso = (rx_stat & BIT(1)) ? 0U : 1U;
 
+               if (is_rx_check_sum_enabled & BIT(1)) {
                        if (0x4U == (pkt_type & 0x1CU))
-                               buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
+                               buff->is_udp_cso = (rx_stat & BIT(2)) ? 0U :
+                                                  !!(rx_stat & BIT(3));
                        else if (0x0U == (pkt_type & 0x1CU))
-                               buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
-
-                       /* Checksum offload workaround for small packets */
-                       if (rxd_wb->pkt_len <= 60) {
-                               buff->is_ip_cso = 0U;
-                               buff->is_cso_err = 0U;
-                       }
+                               buff->is_tcp_cso = (rx_stat & BIT(2)) ? 0U :
+                                                  !!(rx_stat & BIT(3));
+               }
+               buff->is_cso_err = !!(rx_stat & 0x6);
+               /* Checksum offload workaround for small packets */
+               if (unlikely(rxd_wb->pkt_len <= 60)) {
+                       buff->is_ip_cso = 0U;
+                       buff->is_cso_err = 0U;
                }
-
-               is_err &= ~0x18U;
 
                dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
 
-               if (is_err || rxd_wb->type & 0x1000U) {
-                       /* status error or DMA error */
+               if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
+                       /* MAC error or DMA error */
                        buff->is_error = 1U;
                } else {
                        if (self->aq_nic_cfg->is_rss) {
@@ -915,6 +922,12 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
 static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
 {
        hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
+
+       /* Invalidate Descriptor Cache to prevent writing to the cached
+        * descriptors and to the data pointer of those descriptors
+        */
+       hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
+
        return aq_hw_err_from_flags(self);
 }
 
@@ -963,4 +976,6 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
        .hw_get_regs                 = hw_atl_utils_hw_get_regs,
        .hw_get_hw_stats             = hw_atl_utils_get_hw_stats,
        .hw_get_fw_version           = hw_atl_utils_get_fw_version,
+       .hw_set_offload              = hw_atl_b0_hw_offload_set,
+       .hw_set_fc                   = hw_atl_b0_set_fc,
 };
index be0a3a90dfad6ac157ba81ab238ca43631e6df82..5502ec5f0f6993502cd8d4e880032a9606da5c34 100644 (file)
@@ -619,6 +619,14 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
                            HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
 }
 
+void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
+{
+       aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
+                           HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
+                           HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
+                           init);
+}
+
 void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
                                            u32 rx_pkt_buff_size_per_tc, u32 buffer)
 {
index 7056c7342afcf2bf426a2e261cacd79b405c75a2..41f239928c157f121b74f086c35c86457f2a3aba 100644 (file)
@@ -325,6 +325,9 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
                                            u32 rx_pkt_buff_size_per_tc,
                                            u32 buffer);
 
+/* set rdm rx dma descriptor cache init */
+void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
+
 /* set rx xoff enable (per tc) */
 void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
                                      u32 buffer);
index 716674a9b729efc741ef4b434a20dd9dbd415b88..a715fa317b1c822781b4a0422033816b5684e7e3 100644 (file)
 /* default value of bitfield desc{d}_reset */
 #define HW_ATL_RDM_DESCDRESET_DEFAULT 0x0
 
+/* rdm_desc_init_i bitfield definitions
+ * preprocessor definitions for the bitfield rdm_desc_init_i.
+ * port="pif_rdm_desc_init_i"
+ */
+
+/* register address for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR 0x00005a00
+/* bitmask for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK 0xffffffff
+/* inverted bitmask for bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSKN 0x00000000
+/* lower bit position of bitfield  rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT 0
+/* width of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_WIDTH 32
+/* default value of bitfield rdm_desc_init_i */
+#define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
+
 /* rx int_desc_wrb_en bitfield definitions
  * preprocessor definitions for the bitfield "int_desc_wrb_en".
  * port="pif_rdm_int_desc_wrb_en_i"
index 096ca5730887c1d4a47861c3a6a456cd240b2bd1..7de3220d9cab7bf99109145c1ccef4a3ea898d87 100644 (file)
@@ -30,6 +30,8 @@
 #define HW_ATL_FW2X_MPI_STATE_ADDR     0x370
 #define HW_ATL_FW2X_MPI_STATE2_ADDR    0x374
 
+#define HW_ATL_FW2X_CAP_PAUSE            BIT(CAPS_HI_PAUSE)
+#define HW_ATL_FW2X_CAP_ASYM_PAUSE       BIT(CAPS_HI_ASYMMETRIC_PAUSE)
 #define HW_ATL_FW2X_CAP_SLEEP_PROXY      BIT(CAPS_HI_SLEEP_PROXY)
 #define HW_ATL_FW2X_CAP_WOL              BIT(CAPS_HI_WOL)
 
@@ -451,6 +453,24 @@ static int aq_fw2x_set_flow_control(struct aq_hw_s *self)
        return 0;
 }
 
+static u32 aq_fw2x_get_flow_control(struct aq_hw_s *self, u32 *fcmode)
+{
+       u32 mpi_state = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
+
+       if (mpi_state & HW_ATL_FW2X_CAP_PAUSE)
+               if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
+                       *fcmode = AQ_NIC_FC_RX;
+               else
+                       *fcmode = AQ_NIC_FC_RX | AQ_NIC_FC_TX;
+       else
+               if (mpi_state & HW_ATL_FW2X_CAP_ASYM_PAUSE)
+                       *fcmode = AQ_NIC_FC_TX;
+               else
+                       *fcmode = 0;
+
+       return 0;
+}
+
 const struct aq_fw_ops aq_fw_2x_ops = {
        .init = aq_fw2x_init,
        .deinit = aq_fw2x_deinit,
@@ -465,4 +485,5 @@ const struct aq_fw_ops aq_fw_2x_ops = {
        .set_eee_rate = aq_fw2x_set_eee_rate,
        .get_eee_rate = aq_fw2x_get_eee_rate,
        .set_flow_control = aq_fw2x_set_flow_control,
+       .get_flow_control = aq_fw2x_get_flow_control
 };
index 78c5de467426f1e4276cebe8ee81cc0091d4c6fa..9d0e74f6b089df4c304ab49c2749d8bbba47c2e7 100644 (file)
@@ -140,6 +140,5 @@ struct alx_priv {
 };
 
 extern const struct ethtool_ops alx_ethtool_ops;
-extern const char alx_drv_name[];
 
 #endif
index 7968c644ad8617fef2fec1360e869a622c525a02..c131cfc1b79df5a62e048bbf1d15d070e7c0fced 100644 (file)
@@ -49,7 +49,7 @@
 #include "hw.h"
 #include "reg.h"
 
-const char alx_drv_name[] = "alx";
+static const char alx_drv_name[] = "alx";
 
 static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
 {
index 4122553e224b294d4eff1828201e467fcc5a60b9..0e2d99c737e35192b90d0bf3ce541ef2d6ecd4d1 100644 (file)
@@ -1902,9 +1902,6 @@ static void bcm_sysport_netif_start(struct net_device *dev)
                intrl2_1_mask_clear(priv, 0xffffffff);
        else
                intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
-
-       /* Last call before we start the real business */
-       netif_tx_start_all_queues(dev);
 }
 
 static void rbuf_init(struct bcm_sysport_priv *priv)
@@ -2048,6 +2045,8 @@ static int bcm_sysport_open(struct net_device *dev)
 
        bcm_sysport_netif_start(dev);
 
+       netif_tx_start_all_queues(dev);
+
        return 0;
 
 out_clear_rx_int:
@@ -2071,7 +2070,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
        struct bcm_sysport_priv *priv = netdev_priv(dev);
 
        /* stop all software from updating hardware */
-       netif_tx_stop_all_queues(dev);
+       netif_tx_disable(dev);
        napi_disable(&priv->napi);
        cancel_work_sync(&priv->dim.dim.work);
        phy_stop(dev->phydev);
@@ -2658,12 +2657,12 @@ static int __maybe_unused bcm_sysport_suspend(struct device *d)
        if (!netif_running(dev))
                return 0;
 
+       netif_device_detach(dev);
+
        bcm_sysport_netif_stop(dev);
 
        phy_suspend(dev->phydev);
 
-       netif_device_detach(dev);
-
        /* Disable UniMAC RX */
        umac_enable_set(priv, CMD_RX_EN, 0);
 
@@ -2746,8 +2745,6 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
                goto out_free_rx_ring;
        }
 
-       netif_device_attach(dev);
-
        /* RX pipe enable */
        topctrl_writel(priv, 0, RX_FLUSH_CNTL);
 
@@ -2788,6 +2785,8 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
 
        bcm_sysport_netif_start(dev);
 
+       netif_device_attach(dev);
+
        return 0;
 
 out_free_rx_ring:
index be1506169076f0a89f6a621d01dce81afe720ba7..0de487a8f0eb22d7b033c4f9751b68cf2454286e 100644 (file)
@@ -2191,6 +2191,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
 #define PMF_DMAE_C(bp)                 (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
                                         E1HVN_MAX)
 
+/* Following is the DMAE channel number allocation for the clients.
+ *   MFW: OCBB/OCSD implementations use DMAE channels 14/15 respectively.
+ *   Driver: 0-3 and 8-11 (for PF dmae operations)
+ *           4 and 12 (for stats requests)
+ */
+#define BNX2X_FW_DMAE_C                 13 /* Channel for FW DMAE operations */
+
 /* PCIE link and speed */
 #define PCICFG_LINK_WIDTH              0x1f00000
 #define PCICFG_LINK_WIDTH_SHIFT                20
index 3f4d2c8da21a3a848b4149758883333522b6f77a..a9eaaf3e73a4c41f6dc6808f723a400750fd1ba1 100644 (file)
@@ -6149,6 +6149,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
        rdata->sd_vlan_tag      = cpu_to_le16(start_params->sd_vlan_tag);
        rdata->path_id          = BP_PATH(bp);
        rdata->network_cos_mode = start_params->network_cos_mode;
+       rdata->dmae_cmd_id      = BNX2X_FW_DMAE_C;
 
        rdata->vxlan_dst_port   = cpu_to_le16(start_params->vxlan_dst_port);
        rdata->geneve_dst_port  = cpu_to_le16(start_params->geneve_dst_port);
index dd85d790f638939e552042c642abceb108d61fb3..5d21c14853acc90cb07434fec4460ce004614db5 100644 (file)
@@ -1675,7 +1675,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        } else {
                if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
                        if (dev->features & NETIF_F_RXCSUM)
-                               cpr->rx_l4_csum_errors++;
+                               bnapi->cp_ring.rx_l4_csum_errors++;
                }
        }
 
@@ -5162,6 +5162,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
                cp = le16_to_cpu(resp->alloc_cmpl_rings);
                stats = le16_to_cpu(resp->alloc_stat_ctx);
                cp = min_t(u16, cp, stats);
+               hw_resc->resv_irqs = cp;
                if (bp->flags & BNXT_FLAG_CHIP_P5) {
                        int rx = hw_resc->resv_rx_rings;
                        int tx = hw_resc->resv_tx_rings;
@@ -5175,7 +5176,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
                                hw_resc->resv_rx_rings = rx;
                                hw_resc->resv_tx_rings = tx;
                        }
-                       cp = le16_to_cpu(resp->alloc_msix);
+                       hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
                        hw_resc->resv_hw_ring_grps = rx;
                }
                hw_resc->resv_cp_rings = cp;
@@ -5353,7 +5354,7 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
                return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
 }
 
-static int bnxt_cp_rings_in_use(struct bnxt *bp)
+static int bnxt_nq_rings_in_use(struct bnxt *bp)
 {
        int cp = bp->cp_nr_rings;
        int ulp_msix, ulp_base;
@@ -5368,10 +5369,22 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
        return cp;
 }
 
+static int bnxt_cp_rings_in_use(struct bnxt *bp)
+{
+       int cp;
+
+       if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+               return bnxt_nq_rings_in_use(bp);
+
+       cp = bp->tx_nr_rings + bp->rx_nr_rings;
+       return cp;
+}
+
 static bool bnxt_need_reserve_rings(struct bnxt *bp)
 {
        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
        int cp = bnxt_cp_rings_in_use(bp);
+       int nq = bnxt_nq_rings_in_use(bp);
        int rx = bp->rx_nr_rings;
        int vnic = 1, grp = rx;
 
@@ -5387,7 +5400,7 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
                rx <<= 1;
        if (BNXT_NEW_RM(bp) &&
            (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
-            hw_resc->resv_vnics != vnic ||
+            hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic ||
             (hw_resc->resv_hw_ring_grps != grp &&
              !(bp->flags & BNXT_FLAG_CHIP_P5))))
                return true;
@@ -5397,7 +5410,7 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
 static int __bnxt_reserve_rings(struct bnxt *bp)
 {
        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
-       int cp = bnxt_cp_rings_in_use(bp);
+       int cp = bnxt_nq_rings_in_use(bp);
        int tx = bp->tx_nr_rings;
        int rx = bp->rx_nr_rings;
        int grp, rx_rings, rc;
@@ -5422,7 +5435,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
        tx = hw_resc->resv_tx_rings;
        if (BNXT_NEW_RM(bp)) {
                rx = hw_resc->resv_rx_rings;
-               cp = hw_resc->resv_cp_rings;
+               cp = hw_resc->resv_irqs;
                grp = hw_resc->resv_hw_ring_grps;
                vnic = hw_resc->resv_vnics;
        }
@@ -6292,6 +6305,8 @@ hwrm_func_qcaps_exit:
        return rc;
 }
 
+static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
+
 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
 {
        int rc;
@@ -6299,6 +6314,11 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
        rc = __bnxt_hwrm_func_qcaps(bp);
        if (rc)
                return rc;
+       rc = bnxt_hwrm_queue_qportcfg(bp);
+       if (rc) {
+               netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
+               return rc;
+       }
        if (bp->hwrm_spec_code >= 0x10803) {
                rc = bnxt_alloc_ctx_mem(bp);
                if (rc)
@@ -7026,7 +7046,12 @@ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
 
 unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
 {
-       return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
+       unsigned int cp = bp->hw_resc.max_cp_rings;
+
+       if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+               cp -= bnxt_get_ulp_msix_num(bp);
+
+       return cp;
 }
 
 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
@@ -7048,7 +7073,9 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num)
        int total_req = bp->cp_nr_rings + num;
        int max_idx, avail_msix;
 
-       max_idx = min_t(int, bp->total_irqs, max_cp);
+       max_idx = bp->total_irqs;
+       if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+               max_idx = min_t(int, bp->total_irqs, max_cp);
        avail_msix = max_idx - bp->cp_nr_rings;
        if (!BNXT_NEW_RM(bp) || avail_msix >= num)
                return avail_msix;
@@ -7066,7 +7093,7 @@ static int bnxt_get_num_msix(struct bnxt *bp)
        if (!BNXT_NEW_RM(bp))
                return bnxt_get_max_func_irqs(bp);
 
-       return bnxt_cp_rings_in_use(bp);
+       return bnxt_nq_rings_in_use(bp);
 }
 
 static int bnxt_init_msix(struct bnxt *bp)
@@ -7794,6 +7821,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
 
                rc = bnxt_hwrm_func_resc_qcaps(bp, true);
                hw_resc->resv_cp_rings = 0;
+               hw_resc->resv_irqs = 0;
                hw_resc->resv_tx_rings = 0;
                hw_resc->resv_rx_rings = 0;
                hw_resc->resv_hw_ring_grps = 0;
@@ -8714,6 +8742,26 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
        return rc;
 }
 
+static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
+                                      u32 ring_id, u32 *prod, u32 *cons)
+{
+       struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
+       struct hwrm_dbg_ring_info_get_input req = {0};
+       int rc;
+
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
+       req.ring_type = ring_type;
+       req.fw_ring_id = cpu_to_le32(ring_id);
+       mutex_lock(&bp->hwrm_cmd_lock);
+       rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+       if (!rc) {
+               *prod = le32_to_cpu(resp->producer_index);
+               *cons = le32_to_cpu(resp->consumer_index);
+       }
+       mutex_unlock(&bp->hwrm_cmd_lock);
+       return rc;
+}
+
 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
 {
        struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
@@ -8821,6 +8869,11 @@ static void bnxt_timer(struct timer_list *t)
                        bnxt_queue_sp_work(bp);
                }
        }
+
+       if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
+               set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
+               bnxt_queue_sp_work(bp);
+       }
 bnxt_restart_timer:
        mod_timer(&bp->timer, jiffies + bp->current_interval);
 }
@@ -8851,6 +8904,44 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
        bnxt_rtnl_unlock_sp(bp);
 }
 
+static void bnxt_chk_missed_irq(struct bnxt *bp)
+{
+       int i;
+
+       if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+               return;
+
+       for (i = 0; i < bp->cp_nr_rings; i++) {
+               struct bnxt_napi *bnapi = bp->bnapi[i];
+               struct bnxt_cp_ring_info *cpr;
+               u32 fw_ring_id;
+               int j;
+
+               if (!bnapi)
+                       continue;
+
+               cpr = &bnapi->cp_ring;
+               for (j = 0; j < 2; j++) {
+                       struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
+                       u32 val[2];
+
+                       if (!cpr2 || cpr2->has_more_work ||
+                           !bnxt_has_work(bp, cpr2))
+                               continue;
+
+                       if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
+                               cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
+                               continue;
+                       }
+                       fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
+                       bnxt_dbg_hwrm_ring_info_get(bp,
+                               DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
+                               fw_ring_id, &val[0], &val[1]);
+                       cpr->missed_irqs++;
+               }
+       }
+}
+
 static void bnxt_cfg_ntp_filters(struct bnxt *);
 
 static void bnxt_sp_task(struct work_struct *work)
@@ -8930,6 +9021,9 @@ static void bnxt_sp_task(struct work_struct *work)
        if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
                bnxt_tc_flow_stats_work(bp);
 
+       if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
+               bnxt_chk_missed_irq(bp);
+
        /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
         * must be the last functions to be called before exiting.
         */
@@ -9733,13 +9827,16 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
                                int *max_cp)
 {
        struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
-       int max_ring_grps = 0;
+       int max_ring_grps = 0, max_irq;
 
        *max_tx = hw_resc->max_tx_rings;
        *max_rx = hw_resc->max_rx_rings;
-       *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
-                       hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
-       *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
+       *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
+       max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
+                       bnxt_get_ulp_msix_num(bp),
+                       bnxt_get_max_func_stat_ctxs(bp));
+       if (!(bp->flags & BNXT_FLAG_CHIP_P5))
+               *max_cp = min_t(int, *max_cp, max_irq);
        max_ring_grps = hw_resc->max_hw_ring_grps;
        if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
                *max_cp -= 1;
@@ -9747,6 +9844,11 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
        }
        if (bp->flags & BNXT_FLAG_AGG_RINGS)
                *max_rx >>= 1;
+       if (bp->flags & BNXT_FLAG_CHIP_P5) {
+               bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
+               /* On P5 chips, max_cp output param should be available NQs */
+               *max_cp = max_irq;
+       }
        *max_rx = min_t(int, *max_rx, max_ring_grps);
 }
 
@@ -10087,6 +10189,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        bnxt_hwrm_func_qcfg(bp);
+       bnxt_hwrm_vnic_qcaps(bp);
        bnxt_hwrm_port_led_qcaps(bp);
        bnxt_ethtool_init(bp);
        bnxt_dcb_init(bp);
@@ -10120,7 +10223,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                                    VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
        }
 
-       bnxt_hwrm_vnic_qcaps(bp);
        if (bnxt_rfs_supported(bp)) {
                dev->hw_features |= NETIF_F_NTUPLE;
                if (bnxt_rfs_capable(bp)) {
index 498b373c992d372504278eb666d79f0ad83a9cd6..3030931ccaf8afc25b4d42d4a8390a7692bcbbcd 100644 (file)
@@ -798,6 +798,8 @@ struct bnxt_cp_ring_info {
        u8                      had_work_done:1;
        u8                      has_more_work:1;
 
+       u32                     last_cp_raw_cons;
+
        struct bnxt_coal        rx_ring_coal;
        u64                     rx_packets;
        u64                     rx_bytes;
@@ -816,6 +818,7 @@ struct bnxt_cp_ring_info {
        dma_addr_t              hw_stats_map;
        u32                     hw_stats_ctx_id;
        u64                     rx_l4_csum_errors;
+       u64                     missed_irqs;
 
        struct bnxt_ring_struct cp_ring_struct;
 
@@ -925,6 +928,7 @@ struct bnxt_hw_resc {
        u16     min_stat_ctxs;
        u16     max_stat_ctxs;
        u16     max_irqs;
+       u16     resv_irqs;
 };
 
 #if defined(CONFIG_BNXT_SRIOV)
@@ -1527,6 +1531,7 @@ struct bnxt {
 #define BNXT_LINK_SPEED_CHNG_SP_EVENT  14
 #define BNXT_FLOW_STATS_SP_EVENT       15
 #define BNXT_UPDATE_PHY_SP_EVENT       16
+#define BNXT_RING_COAL_NOW_SP_EVENT    17
 
        struct bnxt_hw_resc     hw_resc;
        struct bnxt_pf_info     pf;
index 48078564f0258cf2d862bd6d57c9d0da79d072a1..6cc69a58478a5ffaad8b7a0511d68e36a93657f3 100644 (file)
@@ -137,7 +137,7 @@ reset_coalesce:
        return rc;
 }
 
-#define BNXT_NUM_STATS 21
+#define BNXT_NUM_STATS 22
 
 #define BNXT_RX_STATS_ENTRY(counter)   \
        { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
@@ -384,6 +384,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
                for (k = 0; k < stat_fields; j++, k++)
                        buf[j] = le64_to_cpu(hw_stats[k]);
                buf[j++] = cpr->rx_l4_csum_errors;
+               buf[j++] = cpr->missed_irqs;
 
                bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
                        le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
@@ -468,6 +469,8 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
                        buf += ETH_GSTRING_LEN;
                        sprintf(buf, "[%d]: rx_l4_csum_errors", i);
                        buf += ETH_GSTRING_LEN;
+                       sprintf(buf, "[%d]: missed_irqs", i);
+                       buf += ETH_GSTRING_LEN;
                }
                for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
                        strcpy(buf, bnxt_sw_func_stats[i].string);
@@ -2942,8 +2945,8 @@ bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
        record->asic_state = 0;
        strlcpy(record->system_name, utsname()->nodename,
                sizeof(record->system_name));
-       record->year = cpu_to_le16(tm.tm_year);
-       record->month = cpu_to_le16(tm.tm_mon);
+       record->year = cpu_to_le16(tm.tm_year + 1900);
+       record->month = cpu_to_le16(tm.tm_mon + 1);
        record->day = cpu_to_le16(tm.tm_mday);
        record->hour = cpu_to_le16(tm.tm_hour);
        record->minute = cpu_to_le16(tm.tm_min);
index beee61292d5e522bae0842e5e76253eeb4e1d1f4..0a3097baafde6f31b47ec6b1a3fb9982ed7d6329 100644 (file)
@@ -43,6 +43,9 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
        if (ulp_id == BNXT_ROCE_ULP) {
                unsigned int max_stat_ctxs;
 
+               if (bp->flags & BNXT_FLAG_CHIP_P5)
+                       return -EOPNOTSUPP;
+
                max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
                if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
                    bp->num_stat_ctxs == max_stat_ctxs)
@@ -165,7 +168,7 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
        if (BNXT_NEW_RM(bp)) {
                struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
 
-               avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings;
+               avail_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
                edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
        }
        bnxt_fill_msix_vecs(bp, ent);
index 20c1681bb1afeea35e23f20242abc0fe34fd1304..2d6f090bf6440cc7253fe4f0764b10bde618ff73 100644 (file)
@@ -2855,7 +2855,6 @@ static void bcmgenet_netif_start(struct net_device *dev)
 
        umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
 
-       netif_tx_start_all_queues(dev);
        bcmgenet_enable_tx_napi(priv);
 
        /* Monitor link interrupts now */
@@ -2937,6 +2936,8 @@ static int bcmgenet_open(struct net_device *dev)
 
        bcmgenet_netif_start(dev);
 
+       netif_tx_start_all_queues(dev);
+
        return 0;
 
 err_irq1:
@@ -2958,7 +2959,7 @@ static void bcmgenet_netif_stop(struct net_device *dev)
        struct bcmgenet_priv *priv = netdev_priv(dev);
 
        bcmgenet_disable_tx_napi(priv);
-       netif_tx_stop_all_queues(dev);
+       netif_tx_disable(dev);
 
        /* Disable MAC receive */
        umac_enable_set(priv, CMD_RX_EN, false);
@@ -3620,13 +3621,13 @@ static int bcmgenet_suspend(struct device *d)
        if (!netif_running(dev))
                return 0;
 
+       netif_device_detach(dev);
+
        bcmgenet_netif_stop(dev);
 
        if (!device_may_wakeup(d))
                phy_suspend(dev->phydev);
 
-       netif_device_detach(dev);
-
        /* Prepare the device for Wake-on-LAN and switch to the slow clock */
        if (device_may_wakeup(d) && priv->wolopts) {
                ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
@@ -3700,8 +3701,6 @@ static int bcmgenet_resume(struct device *d)
        /* Always enable ring 16 - descriptor ring */
        bcmgenet_enable_dma(priv, dma_ctrl);
 
-       netif_device_attach(dev);
-
        if (!device_may_wakeup(d))
                phy_resume(dev->phydev);
 
@@ -3710,6 +3709,8 @@ static int bcmgenet_resume(struct device *d)
 
        bcmgenet_netif_start(dev);
 
+       netif_device_attach(dev);
+
        return 0;
 
 out_clk_disable:
index 89295306f1615713f6b59202e83e0ae0d1fb2338..432c3b8670848e868e53d4b02096d5c8cf19b87d 100644 (file)
@@ -12422,6 +12422,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
 {
        struct tg3 *tp = netdev_priv(dev);
        int i, irq_sync = 0, err = 0;
+       bool reset_phy = false;
 
        if ((ering->rx_pending > tp->rx_std_ring_mask) ||
            (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
@@ -12453,7 +12454,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
 
        if (netif_running(dev)) {
                tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
-               err = tg3_restart_hw(tp, false);
+               /* Reset PHY to avoid PHY lock up */
+               if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+                   tg3_asic_rev(tp) == ASIC_REV_5719 ||
+                   tg3_asic_rev(tp) == ASIC_REV_5720)
+                       reset_phy = true;
+
+               err = tg3_restart_hw(tp, reset_phy);
                if (!err)
                        tg3_netif_start(tp);
        }
@@ -12487,6 +12494,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
 {
        struct tg3 *tp = netdev_priv(dev);
        int err = 0;
+       bool reset_phy = false;
 
        if (tp->link_config.autoneg == AUTONEG_ENABLE)
                tg3_warn_mgmt_link_flap(tp);
@@ -12556,7 +12564,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
 
                if (netif_running(dev)) {
                        tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
-                       err = tg3_restart_hw(tp, false);
+                       /* Reset PHY to avoid PHY lock up */
+                       if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+                           tg3_asic_rev(tp) == ASIC_REV_5719 ||
+                           tg3_asic_rev(tp) == ASIC_REV_5720)
+                               reset_phy = true;
+
+                       err = tg3_restart_hw(tp, reset_phy);
                        if (!err)
                                tg3_netif_start(tp);
                }
index 4c3925af53bccf0215f9fabd037adf8ab993ea46..abe5d0dac8510b984692b70c6e4f5307c105335e 100644 (file)
@@ -111,7 +111,7 @@ static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
        "mac_tx_one_collision",
        "mac_tx_multi_collision",
        "mac_tx_max_collision_fail",
-       "mac_tx_max_deferal_fail",
+       "mac_tx_max_deferral_fail",
        "mac_tx_fifo_err",
        "mac_tx_runts",
 
index ea9859e028d48f55328eb211bdbef27e7f2b8f76..de61060721c4a67a1f55e47bb29ddf4f38b0dce3 100644 (file)
@@ -349,13 +349,15 @@ lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
        struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
        struct sk_buff *skb = sc->ctxptr;
        struct net_device *ndev = skb->dev;
+       u32 iq_no;
 
        dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
                         sc->datasize, DMA_TO_DEVICE);
        dev_kfree_skb_any(skb);
+       iq_no = sc->iq_no;
        octeon_free_soft_command(oct, sc);
 
-       if (octnet_iq_is_full(oct, sc->iq_no))
+       if (octnet_iq_is_full(oct, iq_no))
                return;
 
        if (netif_queue_stopped(ndev))
index 55af04fa03a77e850196e82e930e3f85af7c6aa7..6c8dcb65ff031d230303604c2071797027bf11a4 100644 (file)
@@ -1441,6 +1441,9 @@ static void nic_remove(struct pci_dev *pdev)
 {
        struct nicpf *nic = pci_get_drvdata(pdev);
 
+       if (!nic)
+               return;
+
        if (nic->flags & NIC_SRIOV_ENABLED)
                pci_disable_sriov(pdev);
 
index 768f584f8392732b19d6e889ed456a3c6de8e809..88f8a8fa93cdcef2162f1867b46ed9525ef4fbf1 100644 (file)
@@ -1784,6 +1784,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
        bool if_up = netif_running(nic->netdev);
        struct bpf_prog *old_prog;
        bool bpf_attached = false;
+       int ret = 0;
 
        /* For now just support only the usual MTU sized frames */
        if (prog && (dev->mtu > 1500)) {
@@ -1817,8 +1818,12 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
        if (nic->xdp_prog) {
                /* Attach BPF program */
                nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
-               if (!IS_ERR(nic->xdp_prog))
+               if (!IS_ERR(nic->xdp_prog)) {
                        bpf_attached = true;
+               } else {
+                       ret = PTR_ERR(nic->xdp_prog);
+                       nic->xdp_prog = NULL;
+               }
        }
 
        /* Calculate Tx queues needed for XDP and network stack */
@@ -1830,7 +1835,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
                netif_trans_update(nic->netdev);
        }
 
-       return 0;
+       return ret;
 }
 
 static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
index 187a249ff2d1d2fd4201a4e74b9459cbeb3a4b52..fcaf18fa39048b5b83646a7db8e95fd484e27a1f 100644 (file)
@@ -585,10 +585,12 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
        if (!sq->dmem.base)
                return;
 
-       if (sq->tso_hdrs)
+       if (sq->tso_hdrs) {
                dma_free_coherent(&nic->pdev->dev,
                                  sq->dmem.q_len * TSO_HEADER_SIZE,
                                  sq->tso_hdrs, sq->tso_hdrs_phys);
+               sq->tso_hdrs = NULL;
+       }
 
        /* Free pending skbs in the queue */
        smp_rmb();
index 75c1c5ed23878441664d2c7db867b67ee8ba21aa..e2cdfa75673fd58cf6dbacd1e67d7a05dd4b33c8 100644 (file)
@@ -67,7 +67,6 @@ config CHELSIO_T3
 config CHELSIO_T4
        tristate "Chelsio Communications T4/T5/T6 Ethernet support"
        depends on PCI && (IPV6 || IPV6=n)
-       depends on THERMAL || !THERMAL
        select FW_LOADER
        select MDIO
        select ZLIB_DEFLATE
index 78e5d17a1d5fb2306f104ce1bf1c4d02c42003ed..91d8a885deba9b8ea82125c3f0f9009fae9a3f91 100644 (file)
@@ -12,6 +12,4 @@ cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
 cxgb4-$(CONFIG_CHELSIO_T4_DCB) +=  cxgb4_dcb.o
 cxgb4-$(CONFIG_CHELSIO_T4_FCOE) +=  cxgb4_fcoe.o
 cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
-ifdef CONFIG_THERMAL
-cxgb4-objs += cxgb4_thermal.o
-endif
+cxgb4-$(CONFIG_THERMAL) += cxgb4_thermal.o
index 05a46926016a5e2d53bc57b0c9c5ca19f5484f29..d49db46254cd7d528f6f181e8237277069c8a916 100644 (file)
@@ -5863,7 +5863,7 @@ fw_attach_fail:
        if (!is_t4(adapter->params.chip))
                cxgb4_ptp_init(adapter);
 
-       if (IS_ENABLED(CONFIG_THERMAL) &&
+       if (IS_REACHABLE(CONFIG_THERMAL) &&
            !is_t4(adapter->params.chip) && (adapter->flags & FW_OK))
                cxgb4_thermal_init(adapter);
 
@@ -5932,7 +5932,7 @@ static void remove_one(struct pci_dev *pdev)
 
                if (!is_t4(adapter->params.chip))
                        cxgb4_ptp_stop(adapter);
-               if (IS_ENABLED(CONFIG_THERMAL))
+               if (IS_REACHABLE(CONFIG_THERMAL))
                        cxgb4_thermal_remove(adapter);
 
                /* If we allocated filters, free up state associated with any
index ceec467f590d2a382447d087a9e16f7b0fb576de..949103db8a8ad75afab234f879acddc7e8c4a515 100644 (file)
@@ -660,7 +660,7 @@ static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq,
 
                        u64_stats_update_begin(&port->tx_stats_syncp);
                        port->tx_frag_stats[nfrags]++;
-                       u64_stats_update_end(&port->ir_stats_syncp);
+                       u64_stats_update_end(&port->tx_stats_syncp);
                }
        }
 
index 570caeb8ee9edafb723c1f4ffc191274ad903d93..084f24daf2b5a8854dcbb9002314f658f0ceaf26 100644 (file)
@@ -872,11 +872,10 @@ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
        struct net_device *netdev = dev_id;
        struct ftmac100 *priv = netdev_priv(netdev);
 
-       if (likely(netif_running(netdev))) {
-               /* Disable interrupts for polling */
-               ftmac100_disable_all_int(priv);
+       /* Disable interrupts for polling */
+       ftmac100_disable_all_int(priv);
+       if (likely(netif_running(netdev)))
                napi_schedule(&priv->napi);
-       }
 
        return IRQ_HANDLED;
 }
index c415ac67cb7bef218d476fc59f7302b83660513b..e80fedb27cee81411019914f483590c7ec6c1871 100644 (file)
@@ -2786,7 +2786,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
        if (!muram_node) {
                dev_err(&of_dev->dev, "%s: could not find MURAM node\n",
                        __func__);
-               goto fman_node_put;
+               goto fman_free;
        }
 
        err = of_address_to_resource(muram_node, 0,
@@ -2795,11 +2795,10 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
                of_node_put(muram_node);
                dev_err(&of_dev->dev, "%s: of_address_to_resource() = %d\n",
                        __func__, err);
-               goto fman_node_put;
+               goto fman_free;
        }
 
        of_node_put(muram_node);
-       of_node_put(fm_node);
 
        err = devm_request_irq(&of_dev->dev, irq, fman_irq, IRQF_SHARED,
                               "fman", fman);
index be268dcde8fa2a5db3fd392237d583a4c5cc0d5e..f9a4e76c5a8b73799c61cb7d3a260cc50f9287ee 100644 (file)
@@ -915,10 +915,8 @@ static int hip04_mac_probe(struct platform_device *pdev)
        }
 
        ret = register_netdev(ndev);
-       if (ret) {
-               free_netdev(ndev);
+       if (ret)
                goto alloc_fail;
-       }
 
        return 0;
 
index e82e4ca206205da6562fa3449802869a975a86d3..055b40606dbc20f358f6445067b33f226d79554e 100644 (file)
@@ -316,8 +316,8 @@ struct hnae3_ae_ops {
        int (*set_loopback)(struct hnae3_handle *handle,
                            enum hnae3_loop loop_mode, bool en);
 
-       void (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
-                                bool en_mc_pmc);
+       int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
+                               bool en_mc_pmc);
        int (*set_mtu)(struct hnae3_handle *handle, int new_mtu);
 
        void (*get_pauseparam)(struct hnae3_handle *handle,
@@ -391,7 +391,7 @@ struct hnae3_ae_ops {
                                      int vector_num,
                                      struct hnae3_ring_chain_node *vr_chain);
 
-       void (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
+       int (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
        u32 (*get_fw_version)(struct hnae3_handle *handle);
        void (*get_mdix_mode)(struct hnae3_handle *handle,
                              u8 *tp_mdix_ctrl, u8 *tp_mdix);
index 32f3aca814e78b530495956d7cafcb9017139176..20fcf0d1c2ce5f8ec986928019aefbd209088731 100644 (file)
@@ -509,16 +509,18 @@ static void hns3_nic_set_rx_mode(struct net_device *netdev)
        h->netdev_flags = new_flags;
 }
 
-void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
+int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
 {
        struct hns3_nic_priv *priv = netdev_priv(netdev);
        struct hnae3_handle *h = priv->ae_handle;
 
        if (h->ae_algo->ops->set_promisc_mode) {
-               h->ae_algo->ops->set_promisc_mode(h,
-                                                 promisc_flags & HNAE3_UPE,
-                                                 promisc_flags & HNAE3_MPE);
+               return h->ae_algo->ops->set_promisc_mode(h,
+                                               promisc_flags & HNAE3_UPE,
+                                               promisc_flags & HNAE3_MPE);
        }
+
+       return 0;
 }
 
 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
@@ -1494,18 +1496,22 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
        return ret;
 }
 
-static void hns3_restore_vlan(struct net_device *netdev)
+static int hns3_restore_vlan(struct net_device *netdev)
 {
        struct hns3_nic_priv *priv = netdev_priv(netdev);
+       int ret = 0;
        u16 vid;
-       int ret;
 
        for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
                ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
-               if (ret)
-                       netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n",
-                                   vid, ret);
+               if (ret) {
+                       netdev_err(netdev, "Restore vlan: %d filter, ret:%d\n",
+                                  vid, ret);
+                       return ret;
+               }
        }
+
+       return ret;
 }
 
 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
@@ -2727,7 +2733,7 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
                        chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
                                             GFP_KERNEL);
                        if (!chain)
-                               return -ENOMEM;
+                               goto err_free_chain;
 
                        cur_chain->next = chain;
                        chain->tqp_index = tx_ring->tqp->tqp_index;
@@ -2757,7 +2763,7 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
        while (rx_ring) {
                chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
                if (!chain)
-                       return -ENOMEM;
+                       goto err_free_chain;
 
                cur_chain->next = chain;
                chain->tqp_index = rx_ring->tqp->tqp_index;
@@ -2772,6 +2778,16 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
        }
 
        return 0;
+
+err_free_chain:
+       cur_chain = head->next;
+       while (cur_chain) {
+               chain = cur_chain->next;
+               devm_kfree(&pdev->dev, chain);
+               cur_chain = chain;
+       }
+
+       return -ENOMEM;
 }
 
 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
@@ -2821,7 +2837,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
        struct hnae3_handle *h = priv->ae_handle;
        struct hns3_enet_tqp_vector *tqp_vector;
        int ret = 0;
-       u16 i;
+       int i;
 
        hns3_nic_set_cpumask(priv);
 
@@ -2868,13 +2884,19 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
                hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
 
                if (ret)
-                       return ret;
+                       goto map_ring_fail;
 
                netif_napi_add(priv->netdev, &tqp_vector->napi,
                               hns3_nic_common_poll, NAPI_POLL_WEIGHT);
        }
 
        return 0;
+
+map_ring_fail:
+       while (i--)
+               netif_napi_del(&priv->tqp_vector[i].napi);
+
+       return ret;
 }
 
 static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
@@ -3031,8 +3053,10 @@ static int hns3_queue_to_ring(struct hnae3_queue *tqp,
                return ret;
 
        ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
-       if (ret)
+       if (ret) {
+               devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
                return ret;
+       }
 
        return 0;
 }
@@ -3059,6 +3083,12 @@ static int hns3_get_ring_config(struct hns3_nic_priv *priv)
 
        return 0;
 err:
+       while (i--) {
+               devm_kfree(priv->dev, priv->ring_data[i].ring);
+               devm_kfree(priv->dev,
+                          priv->ring_data[i + h->kinfo.num_tqps].ring);
+       }
+
        devm_kfree(&pdev->dev, priv->ring_data);
        return ret;
 }
@@ -3226,9 +3256,6 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
        int i;
 
        for (i = 0; i < h->kinfo.num_tqps; i++) {
-               if (h->ae_algo->ops->reset_queue)
-                       h->ae_algo->ops->reset_queue(h, i);
-
                hns3_fini_ring(priv->ring_data[i].ring);
                hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
        }
@@ -3236,11 +3263,12 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
 }
 
 /* Set mac addr if it is configured. or leave it to the AE driver */
-static void hns3_init_mac_addr(struct net_device *netdev, bool init)
+static int hns3_init_mac_addr(struct net_device *netdev, bool init)
 {
        struct hns3_nic_priv *priv = netdev_priv(netdev);
        struct hnae3_handle *h = priv->ae_handle;
        u8 mac_addr_temp[ETH_ALEN];
+       int ret = 0;
 
        if (h->ae_algo->ops->get_mac_addr && init) {
                h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
@@ -3255,8 +3283,9 @@ static void hns3_init_mac_addr(struct net_device *netdev, bool init)
        }
 
        if (h->ae_algo->ops->set_mac_addr)
-               h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
+               ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
 
+       return ret;
 }
 
 static int hns3_restore_fd_rules(struct net_device *netdev)
@@ -3469,20 +3498,29 @@ err_out:
        return ret;
 }
 
-static void hns3_recover_hw_addr(struct net_device *ndev)
+static int hns3_recover_hw_addr(struct net_device *ndev)
 {
        struct netdev_hw_addr_list *list;
        struct netdev_hw_addr *ha, *tmp;
+       int ret = 0;
 
        /* go through and sync uc_addr entries to the device */
        list = &ndev->uc;
-       list_for_each_entry_safe(ha, tmp, &list->list, list)
-               hns3_nic_uc_sync(ndev, ha->addr);
+       list_for_each_entry_safe(ha, tmp, &list->list, list) {
+               ret = hns3_nic_uc_sync(ndev, ha->addr);
+               if (ret)
+                       return ret;
+       }
 
        /* go through and sync mc_addr entries to the device */
        list = &ndev->mc;
-       list_for_each_entry_safe(ha, tmp, &list->list, list)
-               hns3_nic_mc_sync(ndev, ha->addr);
+       list_for_each_entry_safe(ha, tmp, &list->list, list) {
+               ret = hns3_nic_mc_sync(ndev, ha->addr);
+               if (ret)
+                       return ret;
+       }
+
+       return ret;
 }
 
 static void hns3_remove_hw_addr(struct net_device *netdev)
@@ -3609,7 +3647,10 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
        int ret;
 
        for (i = 0; i < h->kinfo.num_tqps; i++) {
-               h->ae_algo->ops->reset_queue(h, i);
+               ret = h->ae_algo->ops->reset_queue(h, i);
+               if (ret)
+                       return ret;
+
                hns3_init_ring_hw(priv->ring_data[i].ring);
 
                /* We need to clear tx ring here because self test will
@@ -3701,18 +3742,31 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
        bool vlan_filter_enable;
        int ret;
 
-       hns3_init_mac_addr(netdev, false);
-       hns3_recover_hw_addr(netdev);
-       hns3_update_promisc_mode(netdev, handle->netdev_flags);
+       ret = hns3_init_mac_addr(netdev, false);
+       if (ret)
+               return ret;
+
+       ret = hns3_recover_hw_addr(netdev);
+       if (ret)
+               return ret;
+
+       ret = hns3_update_promisc_mode(netdev, handle->netdev_flags);
+       if (ret)
+               return ret;
+
        vlan_filter_enable = netdev->flags & IFF_PROMISC ? false : true;
        hns3_enable_vlan_filter(netdev, vlan_filter_enable);
 
-
        /* Hardware table is only clear when pf resets */
-       if (!(handle->flags & HNAE3_SUPPORT_VF))
-               hns3_restore_vlan(netdev);
+       if (!(handle->flags & HNAE3_SUPPORT_VF)) {
+               ret = hns3_restore_vlan(netdev);
+               if (ret)
+                       return ret;
+       }
 
-       hns3_restore_fd_rules(netdev);
+       ret = hns3_restore_fd_rules(netdev);
+       if (ret)
+               return ret;
 
        /* Carrier off reporting is important to ethtool even BEFORE open */
        netif_carrier_off(netdev);
index 71cfca132d0bd044006f3a8e1a599973dbceb500..d3636d088aa3d960ae3bc2721d1257018286ae89 100644 (file)
@@ -640,7 +640,7 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
                                 u32 rl_value);
 
 void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
-void hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags);
+int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags);
 
 #ifdef CONFIG_HNS3_DCB
 void hns3_dcbnl_setup(struct hnae3_handle *handle);
index ac13cb2b168e5a6e67517837dd470e092a0db8f8..690f62ed87dcaa3b1997df510f0c06784e7c1eb6 100644 (file)
@@ -24,15 +24,15 @@ static int hclge_ring_space(struct hclge_cmq_ring *ring)
        return ring->desc_num - used - 1;
 }
 
-static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int h)
+static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
 {
-       int u = ring->next_to_use;
-       int c = ring->next_to_clean;
+       int ntu = ring->next_to_use;
+       int ntc = ring->next_to_clean;
 
-       if (unlikely(h >= ring->desc_num))
-               return 0;
+       if (ntu > ntc)
+               return head >= ntc && head <= ntu;
 
-       return u > c ? (h > c && h <= u) : (h > c || h <= u);
+       return head >= ntc || head <= ntu;
 }
 
 static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
@@ -304,6 +304,10 @@ int hclge_cmd_queue_init(struct hclge_dev *hdev)
 {
        int ret;
 
+       /* Setup the lock for command queue */
+       spin_lock_init(&hdev->hw.cmq.csq.lock);
+       spin_lock_init(&hdev->hw.cmq.crq.lock);
+
        /* Setup the queue entries for use cmd queue */
        hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
        hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
@@ -337,18 +341,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
        u32 version;
        int ret;
 
+       spin_lock_bh(&hdev->hw.cmq.csq.lock);
+       spin_lock_bh(&hdev->hw.cmq.crq.lock);
+
        hdev->hw.cmq.csq.next_to_clean = 0;
        hdev->hw.cmq.csq.next_to_use = 0;
        hdev->hw.cmq.crq.next_to_clean = 0;
        hdev->hw.cmq.crq.next_to_use = 0;
 
-       /* Setup the lock for command queue */
-       spin_lock_init(&hdev->hw.cmq.csq.lock);
-       spin_lock_init(&hdev->hw.cmq.crq.lock);
-
        hclge_cmd_init_regs(&hdev->hw);
        clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
 
+       spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+       spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
        ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
        if (ret) {
                dev_err(&hdev->pdev->dev,
index dca6f2326c2672bf75f46613c2ed3389003090de..123c37e653f3eda4ad120970aa4cd19b094557f8 100644 (file)
@@ -751,7 +751,7 @@ static void hclge_process_ncsi_error(struct hclge_dev *hdev,
        ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd,
                                    HCLGE_NCSI_INT_CLR, 0);
        if (ret)
-               dev_err(dev, "failed(=%d) to clear NCSI intrerrupt status\n",
+               dev_err(dev, "failed(=%d) to clear NCSI interrupt status\n",
                        ret);
 }
 
index 5234b5373ed3b9257aad7ad3f480040694623721..ffdd96020860db0153d467814984d63efb01995e 100644 (file)
@@ -2236,7 +2236,7 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
        }
 
        /* clear the source of interrupt if it is not cause by reset */
-       if (event_cause != HCLGE_VECTOR0_EVENT_RST) {
+       if (event_cause == HCLGE_VECTOR0_EVENT_MBX) {
                hclge_clear_event_cause(hdev, event_cause, clearval);
                hclge_enable_vector(&hdev->misc_vector, true);
        }
@@ -2470,14 +2470,17 @@ static void hclge_reset(struct hclge_dev *hdev)
        handle = &hdev->vport[0].nic;
        rtnl_lock();
        hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+       rtnl_unlock();
 
        if (!hclge_reset_wait(hdev)) {
+               rtnl_lock();
                hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
                hclge_reset_ae_dev(hdev->ae_dev);
                hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
 
                hclge_clear_reset_cause(hdev);
        } else {
+               rtnl_lock();
                /* schedule again to check pending resets later */
                set_bit(hdev->reset_type, &hdev->reset_pending);
                hclge_reset_task_schedule(hdev);
@@ -3314,8 +3317,8 @@ void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
        param->vf_id = vport_id;
 }
 
-static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
-                                  bool en_mc_pmc)
+static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
+                                 bool en_mc_pmc)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
        struct hclge_dev *hdev = vport->back;
@@ -3323,7 +3326,7 @@ static void hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
 
        hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, true,
                                 vport->vport_id);
-       hclge_cmd_set_promisc_mode(hdev, &param);
+       return hclge_cmd_set_promisc_mode(hdev, &param);
 }
 
 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
@@ -6107,31 +6110,28 @@ static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle,
        return tqp->index;
 }
 
-void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
        struct hclge_dev *hdev = vport->back;
        int reset_try_times = 0;
        int reset_status;
        u16 queue_gid;
-       int ret;
-
-       if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
-               return;
+       int ret = 0;
 
        queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
 
        ret = hclge_tqp_enable(hdev, queue_id, 0, false);
        if (ret) {
-               dev_warn(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
-               return;
+               dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
+               return ret;
        }
 
        ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
        if (ret) {
-               dev_warn(&hdev->pdev->dev,
-                        "Send reset tqp cmd fail, ret = %d\n", ret);
-               return;
+               dev_err(&hdev->pdev->dev,
+                       "Send reset tqp cmd fail, ret = %d\n", ret);
+               return ret;
        }
 
        reset_try_times = 0;
@@ -6144,16 +6144,16 @@ void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
        }
 
        if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
-               dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
-               return;
+               dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
+               return ret;
        }
 
        ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
-       if (ret) {
-               dev_warn(&hdev->pdev->dev,
-                        "Deassert the soft reset fail, ret = %d\n", ret);
-               return;
-       }
+       if (ret)
+               dev_err(&hdev->pdev->dev,
+                       "Deassert the soft reset fail, ret = %d\n", ret);
+
+       return ret;
 }
 
 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
index e3dfd654eca9a1c4b1f0af7a90b29d91c725ca1b..0d92154042699c94a41410ab6dc3aa38437c9b3a 100644 (file)
@@ -778,7 +778,7 @@ int hclge_rss_init_hw(struct hclge_dev *hdev);
 void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
 
 void hclge_mbx_handler(struct hclge_dev *hdev);
-void hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
+int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
 void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
 int hclge_cfg_flowctrl(struct hclge_dev *hdev);
 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
index 04462a347a94075bd28453b6db9088d77ef586c9..f890022938d9a15a96e98ab2c48cea04fc0dc784 100644 (file)
@@ -400,6 +400,12 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
 
        /* handle all the mailbox requests in the queue */
        while (!hclge_cmd_crq_empty(&hdev->hw)) {
+               if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
+                       dev_warn(&hdev->pdev->dev,
+                                "command queue needs re-initializing\n");
+                       return;
+               }
+
                desc = &crq->desc[crq->next_to_use];
                req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
 
index 24b1f2a0c32afc328cbdc82b67d5adb5fae916cc..03018638f701b3f2824bb153864e13cf929bd06c 100644 (file)
@@ -52,7 +52,7 @@ static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
        struct hclge_desc desc;
        int ret;
 
-       if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+       if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
                return 0;
 
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
@@ -90,7 +90,7 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
        struct hclge_desc desc;
        int ret;
 
-       if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+       if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
                return 0;
 
        hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
index aa5cb9834d73a807dd18661c10069b7c929cc6d6..494e562fe8c7e9f2322b9659b8e60ec3abce26f0 100644 (file)
@@ -1168,14 +1168,14 @@ static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
  */
 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
 {
-       struct hclge_vport *vport = hdev->vport;
-       u32 i, k, qs_bitmap;
-       int ret;
+       int i;
 
        for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
-               qs_bitmap = 0;
+               u32 qs_bitmap = 0;
+               int k, ret;
 
                for (k = 0; k < hdev->num_alloc_vport; k++) {
+                       struct hclge_vport *vport = &hdev->vport[k];
                        u16 qs_id = vport->qs_offset + tc;
                        u8 grp, sub_grp;
 
@@ -1185,8 +1185,6 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
                                                  HCLGE_BP_SUB_GRP_ID_S);
                        if (i == grp)
                                qs_bitmap |= (1 << sub_grp);
-
-                       vport++;
                }
 
                ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
index e0a86a58342c28dab0a6e044895f9c4635f46207..085edb945389c5fc8aba0d94f8ffe2c74f070f24 100644 (file)
@@ -925,12 +925,12 @@ static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
        return status;
 }
 
-static void hclgevf_set_promisc_mode(struct hnae3_handle *handle,
-                                    bool en_uc_pmc, bool en_mc_pmc)
+static int hclgevf_set_promisc_mode(struct hnae3_handle *handle,
+                                   bool en_uc_pmc, bool en_mc_pmc)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
 
-       hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
+       return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc);
 }
 
 static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
@@ -1080,7 +1080,7 @@ static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
                                    1, false, NULL, 0);
 }
 
-static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
 {
        struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
        u8 msg_data[2];
@@ -1091,10 +1091,10 @@ static void hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
        /* disable vf queue before send queue reset msg to PF */
        ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
        if (ret)
-               return;
+               return ret;
 
-       hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
-                            2, true, NULL, 0);
+       return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_QUEUE_RESET, 0, msg_data,
+                                   2, true, NULL, 0);
 }
 
 static int hclgevf_notify_client(struct hclgevf_dev *hdev,
@@ -1170,6 +1170,8 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
        /* bring down the nic to stop any ongoing TX/RX */
        hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
 
+       rtnl_unlock();
+
        /* check if VF could successfully fetch the hardware reset completion
         * status from the hardware
         */
@@ -1181,12 +1183,15 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
                        ret);
 
                dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
+               rtnl_lock();
                hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
 
                rtnl_unlock();
                return ret;
        }
 
+       rtnl_lock();
+
        /* now, re-initialize the nic client and ae device*/
        ret = hclgevf_reset_stack(hdev);
        if (ret)
index 967c993d5303ab87420c691abd1af11f4411560a..bbf9bdd0ee3e74246d558b4caa3cb3cfeb453e58 100644 (file)
@@ -532,7 +532,7 @@ void hinic_task_set_inner_l3(struct hinic_sq_task *task,
 }
 
 void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
-                             enum hinic_l4_offload_type l4_type,
+                             enum hinic_l4_tunnel_type l4_type,
                              u32 tunnel_len)
 {
        task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) |
index a0dc63a4bfc7ab9bd724fda8899650b37b4785e5..038522e202b6f616db097fbc4f0ef6d05755e682 100644 (file)
@@ -160,7 +160,7 @@ void hinic_task_set_inner_l3(struct hinic_sq_task *task,
                             u32 network_len);
 
 void hinic_task_set_tunnel_l4(struct hinic_sq_task *task,
-                             enum hinic_l4_offload_type l4_type,
+                             enum hinic_l4_tunnel_type l4_type,
                              u32 tunnel_len);
 
 void hinic_set_cs_inner_l4(struct hinic_sq_task *task,
index e2f80cca9bed432e88221d89d54fe337e09e16d0..0d2de6f676764d73729e859ecfcceaf0a1f47ac9 100644 (file)
@@ -231,7 +231,7 @@ struct emac_regs {
 #define EMAC_STACR_PHYE                        0x00004000
 #define EMAC_STACR_STAC_MASK           0x00003000
 #define EMAC_STACR_STAC_READ           0x00001000
-#define EMAC_STACR_STAC_WRITE          0x00000800
+#define EMAC_STACR_STAC_WRITE          0x00002000
 #define EMAC_STACR_OPBC_MASK           0x00000C00
 #define EMAC_STACR_OPBC_50             0x00000000
 #define EMAC_STACR_OPBC_66             0x00000400
index 7893beffcc714215a5ed47fc8658b9db66ee9d3a..ed50b8dee44f3a8699ca0a226d81f482cdd0f3c0 100644 (file)
@@ -485,8 +485,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
 
                for (j = 0; j < rx_pool->size; j++) {
                        if (rx_pool->rx_buff[j].skb) {
-                               dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
-                               rx_pool->rx_buff[i].skb = NULL;
+                               dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
+                               rx_pool->rx_buff[j].skb = NULL;
                        }
                }
 
@@ -1103,20 +1103,15 @@ static int ibmvnic_open(struct net_device *netdev)
                return 0;
        }
 
-       mutex_lock(&adapter->reset_lock);
-
        if (adapter->state != VNIC_CLOSED) {
                rc = ibmvnic_login(netdev);
-               if (rc) {
-                       mutex_unlock(&adapter->reset_lock);
+               if (rc)
                        return rc;
-               }
 
                rc = init_resources(adapter);
                if (rc) {
                        netdev_err(netdev, "failed to initialize resources\n");
                        release_resources(adapter);
-                       mutex_unlock(&adapter->reset_lock);
                        return rc;
                }
        }
@@ -1124,8 +1119,6 @@ static int ibmvnic_open(struct net_device *netdev)
        rc = __ibmvnic_open(netdev);
        netif_carrier_on(netdev);
 
-       mutex_unlock(&adapter->reset_lock);
-
        return rc;
 }
 
@@ -1269,10 +1262,8 @@ static int ibmvnic_close(struct net_device *netdev)
                return 0;
        }
 
-       mutex_lock(&adapter->reset_lock);
        rc = __ibmvnic_close(netdev);
        ibmvnic_cleanup(netdev);
-       mutex_unlock(&adapter->reset_lock);
 
        return rc;
 }
@@ -1545,7 +1536,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
        tx_crq.v1.sge_len = cpu_to_be32(skb->len);
        tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
 
-       if (adapter->vlan_header_insertion) {
+       if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
                tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
                tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
        }
@@ -1746,6 +1737,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                    struct ibmvnic_rwi *rwi, u32 reset_state)
 {
        u64 old_num_rx_queues, old_num_tx_queues;
+       u64 old_num_rx_slots, old_num_tx_slots;
        struct net_device *netdev = adapter->netdev;
        int i, rc;
 
@@ -1757,6 +1749,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
 
        old_num_rx_queues = adapter->req_rx_queues;
        old_num_tx_queues = adapter->req_tx_queues;
+       old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
+       old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
 
        ibmvnic_cleanup(netdev);
 
@@ -1819,21 +1813,20 @@ static int do_reset(struct ibmvnic_adapter *adapter,
                        if (rc)
                                return rc;
                } else if (adapter->req_rx_queues != old_num_rx_queues ||
-                          adapter->req_tx_queues != old_num_tx_queues) {
-                       adapter->map_id = 1;
+                          adapter->req_tx_queues != old_num_tx_queues ||
+                          adapter->req_rx_add_entries_per_subcrq !=
+                                                       old_num_rx_slots ||
+                          adapter->req_tx_entries_per_subcrq !=
+                                                       old_num_tx_slots) {
                        release_rx_pools(adapter);
                        release_tx_pools(adapter);
-                       rc = init_rx_pools(netdev);
-                       if (rc)
-                               return rc;
-                       rc = init_tx_pools(netdev);
-                       if (rc)
-                               return rc;
-
                        release_napi(adapter);
-                       rc = init_napi(adapter);
+                       release_vpd_data(adapter);
+
+                       rc = init_resources(adapter);
                        if (rc)
                                return rc;
+
                } else {
                        rc = reset_tx_pools(adapter);
                        if (rc)
@@ -1866,7 +1859,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
 
        if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
            adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
-               netdev_notify_peers(netdev);
+               call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
 
        netif_carrier_on(netdev);
 
@@ -1917,17 +1910,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
                adapter->state = VNIC_PROBED;
                return 0;
        }
-       /* netif_set_real_num_xx_queues needs to take rtnl lock here
-        * unless wait_for_reset is set, in which case the rtnl lock
-        * has already been taken before initializing the reset
-        */
-       if (!adapter->wait_for_reset) {
-               rtnl_lock();
-               rc = init_resources(adapter);
-               rtnl_unlock();
-       } else {
-               rc = init_resources(adapter);
-       }
+
+       rc = init_resources(adapter);
        if (rc)
                return rc;
 
@@ -1986,13 +1970,21 @@ static void __ibmvnic_reset(struct work_struct *work)
        struct ibmvnic_rwi *rwi;
        struct ibmvnic_adapter *adapter;
        struct net_device *netdev;
+       bool we_lock_rtnl = false;
        u32 reset_state;
        int rc = 0;
 
        adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
        netdev = adapter->netdev;
 
-       mutex_lock(&adapter->reset_lock);
+       /* netif_set_real_num_xx_queues needs to take rtnl lock here
+        * unless wait_for_reset is set, in which case the rtnl lock
+        * has already been taken before initializing the reset
+        */
+       if (!adapter->wait_for_reset) {
+               rtnl_lock();
+               we_lock_rtnl = true;
+       }
        reset_state = adapter->state;
 
        rwi = get_next_rwi(adapter);
@@ -2020,12 +2012,11 @@ static void __ibmvnic_reset(struct work_struct *work)
        if (rc) {
                netdev_dbg(adapter->netdev, "Reset failed\n");
                free_all_rwi(adapter);
-               mutex_unlock(&adapter->reset_lock);
-               return;
        }
 
        adapter->resetting = false;
-       mutex_unlock(&adapter->reset_lock);
+       if (we_lock_rtnl)
+               rtnl_unlock();
 }
 
 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
@@ -4768,7 +4759,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
 
        INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
        INIT_LIST_HEAD(&adapter->rwi_list);
-       mutex_init(&adapter->reset_lock);
        mutex_init(&adapter->rwi_lock);
        adapter->resetting = false;
 
@@ -4840,8 +4830,8 @@ static int ibmvnic_remove(struct vio_dev *dev)
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 
        adapter->state = VNIC_REMOVING;
-       unregister_netdev(netdev);
-       mutex_lock(&adapter->reset_lock);
+       rtnl_lock();
+       unregister_netdevice(netdev);
 
        release_resources(adapter);
        release_sub_crqs(adapter, 1);
@@ -4852,7 +4842,7 @@ static int ibmvnic_remove(struct vio_dev *dev)
 
        adapter->state = VNIC_REMOVED;
 
-       mutex_unlock(&adapter->reset_lock);
+       rtnl_unlock();
        device_remove_file(&dev->dev, &dev_attr_failover);
        free_netdev(netdev);
        dev_set_drvdata(&dev->dev, NULL);
index 18103b811d4db398df7ce6a6e27c6bda2077c4c2..99c4f8d331ce7c489c4b3badb1d5fbbe57ff8dea 100644 (file)
@@ -1075,7 +1075,7 @@ struct ibmvnic_adapter {
        struct tasklet_struct tasklet;
        enum vnic_state state;
        enum ibmvnic_reset_reason reset_reason;
-       struct mutex reset_lock, rwi_lock;
+       struct mutex rwi_lock;
        struct list_head rwi_list;
        struct work_struct ibmvnic_reset;
        bool resetting;
index fd3373d82a9e94c7850631b88c6b4c7c8940f1a1..59e1bc0f609ee3399130eec23e330111b2dc64e6 100644 (file)
@@ -200,6 +200,15 @@ config IXGBE_DCB
 
          If unsure, say N.
 
+config IXGBE_IPSEC
+       bool "IPSec XFRM cryptography-offload acceleration"
+       depends on IXGBE
+       depends on XFRM_OFFLOAD
+       default y
+       select XFRM_ALGO
+       ---help---
+         Enable support for IPSec offload in ixgbe.ko
+
 config IXGBEVF
        tristate "Intel(R) 10GbE PCI Express Virtual Function Ethernet support"
        depends on PCI_MSI
@@ -217,6 +226,15 @@ config IXGBEVF
          will be called ixgbevf.  MSI-X interrupt support is required
          for this driver to work correctly.
 
+config IXGBEVF_IPSEC
+       bool "IPSec XFRM cryptography-offload acceleration"
+       depends on IXGBEVF
+       depends on XFRM_OFFLOAD
+       default y
+       select XFRM_ALGO
+       ---help---
+         Enable support for IPSec offload in ixgbevf.ko
+
 config I40E
        tristate "Intel(R) Ethernet Controller XL710 Family support"
        imply PTP_1588_CLOCK
index e707d717012faa997a127687ce45d54b27b9e3eb..5d4f1761dc0c2ef0613757ff5bfedb214f9ca79f 100644 (file)
@@ -244,7 +244,8 @@ process_mbx:
                }
 
                /* guarantee we have free space in the SM mailbox */
-               if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
+               if (hw->mbx.state == FM10K_STATE_OPEN &&
+                   !hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
                        /* keep track of how many times this occurs */
                        interface->hw_sm_mbx_full++;
 
@@ -302,6 +303,28 @@ void fm10k_iov_suspend(struct pci_dev *pdev)
        }
 }
 
+static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev)
+{
+       u32 err_mask;
+       int pos;
+
+       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+       if (!pos)
+               return;
+
+       /* Mask the completion abort bit in the ERR_UNCOR_MASK register,
+        * preventing the device from reporting these errors to the upstream
+        * PCIe root device. This avoids bringing down platforms which upgrade
+        * non-fatal completer aborts into machine check exceptions. Completer
+        * aborts can occur whenever a VF reads a queue it doesn't own.
+        */
+       pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask);
+       err_mask |= PCI_ERR_UNC_COMP_ABORT;
+       pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask);
+
+       mmiowb();
+}
+
 int fm10k_iov_resume(struct pci_dev *pdev)
 {
        struct fm10k_intfc *interface = pci_get_drvdata(pdev);
@@ -317,6 +340,12 @@ int fm10k_iov_resume(struct pci_dev *pdev)
        if (!iov_data)
                return -ENOMEM;
 
+       /* Lower severity of completer abort error reporting as
+        * the VFs can trigger this any time they read a queue
+        * that they don't own.
+        */
+       fm10k_mask_aer_comp_abort(pdev);
+
        /* allocate hardware resources for the VFs */
        hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
 
@@ -460,20 +489,6 @@ void fm10k_iov_disable(struct pci_dev *pdev)
        fm10k_iov_free_data(pdev);
 }
 
-static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev)
-{
-       u32 err_sev;
-       int pos;
-
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
-       if (!pos)
-               return;
-
-       pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev);
-       err_sev &= ~PCI_ERR_UNC_COMP_ABORT;
-       pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev);
-}
-
 int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
 {
        int current_vfs = pci_num_vf(pdev);
@@ -495,12 +510,6 @@ int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
 
        /* allocate VFs if not already allocated */
        if (num_vfs && num_vfs != current_vfs) {
-               /* Disable completer abort error reporting as
-                * the VFs can trigger this any time they read a queue
-                * that they don't own.
-                */
-               fm10k_disable_aer_comp_abort(pdev);
-
                err = pci_enable_sriov(pdev, num_vfs);
                if (err) {
                        dev_err(&pdev->dev,
index 503bbc0177922c45804bce8280452c7942663bf9..5b2a50e5798f755c4e360d5fec2d13c65f1ea2a1 100644 (file)
@@ -11,7 +11,7 @@
 
 #include "fm10k.h"
 
-#define DRV_VERSION    "0.23.4-k"
+#define DRV_VERSION    "0.26.1-k"
 #define DRV_SUMMARY    "Intel(R) Ethernet Switch Host Interface Driver"
 const char fm10k_driver_version[] = DRV_VERSION;
 char fm10k_driver_name[] = "fm10k";
index 02345d3813036cac695d675ae24b9a02aea8510c..e49fb51d36133ff49c6b11646f895b1b7dc8ee61 100644 (file)
@@ -23,6 +23,8 @@ static const struct fm10k_info *fm10k_info_tbl[] = {
  */
 static const struct pci_device_id fm10k_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, FM10K_DEV_ID_PF), fm10k_device_pf },
+       { PCI_VDEVICE(INTEL, FM10K_DEV_ID_SDI_FM10420_QDA2), fm10k_device_pf },
+       { PCI_VDEVICE(INTEL, FM10K_DEV_ID_SDI_FM10420_DA2), fm10k_device_pf },
        { PCI_VDEVICE(INTEL, FM10K_DEV_ID_VF), fm10k_device_vf },
        /* required last entry */
        { 0, }
index 3e608e493f9df6bf921383f65be2139ad38cb288..9fb9fca375e3f69282c757fb3ae665a18982e8f3 100644 (file)
@@ -15,6 +15,8 @@ struct fm10k_hw;
 
 #define FM10K_DEV_ID_PF                        0x15A4
 #define FM10K_DEV_ID_VF                        0x15A5
+#define FM10K_DEV_ID_SDI_FM10420_QDA2  0x15D0
+#define FM10K_DEV_ID_SDI_FM10420_DA2   0x15D5
 
 #define FM10K_MAX_QUEUES               256
 #define FM10K_MAX_QUEUES_PF            128
index bc71a21c1dc2cfe7215a3ea124efd2dc11ccbf99..a3f45335437c3cecde089e44c3a13a3932001960 100644 (file)
@@ -1413,7 +1413,7 @@ void __i40e_del_filter(struct i40e_vsi *vsi, struct i40e_mac_filter *f)
        }
 
        vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
-       set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->state);
+       set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state);
 }
 
 /**
@@ -12249,6 +12249,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
                          NETIF_F_GSO_GRE               |
                          NETIF_F_GSO_GRE_CSUM          |
                          NETIF_F_GSO_PARTIAL           |
+                         NETIF_F_GSO_IPXIP4            |
+                         NETIF_F_GSO_IPXIP6            |
                          NETIF_F_GSO_UDP_TUNNEL        |
                          NETIF_F_GSO_UDP_TUNNEL_CSUM   |
                          NETIF_F_SCTP_CRC              |
@@ -12266,13 +12268,13 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        /* record features VLANs can make use of */
        netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
 
-       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
-               netdev->hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
-
        hw_features = hw_enc_features           |
                      NETIF_F_HW_VLAN_CTAG_TX   |
                      NETIF_F_HW_VLAN_CTAG_RX;
 
+       if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+               hw_features |= NETIF_F_NTUPLE | NETIF_F_HW_TC;
+
        netdev->hw_features |= hw_features;
 
        netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
index 81b0e1f8d14b6d041e4a8668b0f20955b9633e55..ac5698ed0b11194a30496b4bb4f11cafb26938c5 100644 (file)
@@ -3674,7 +3674,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
                dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n",
                        local_vf_id, v_opcode, msglen);
                switch (ret) {
-               case VIRTCHNL_ERR_PARAM:
+               case VIRTCHNL_STATUS_ERR_PARAM:
                        return -EPERM;
                default:
                        return -EINVAL;
index add1e457886df5e40a11388738b7580bc3a2508c..433c8e688c78d5623e65fe5edefeae4cc22d67a8 100644 (file)
@@ -33,7 +33,7 @@ static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi)
 }
 
 /**
- * i40e_add_xsk_umem - Store an UMEM for a certain ring/qid
+ * i40e_add_xsk_umem - Store a UMEM for a certain ring/qid
  * @vsi: Current VSI
  * @umem: UMEM to store
  * @qid: Ring/qid to associate with the UMEM
@@ -56,7 +56,7 @@ static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem,
 }
 
 /**
- * i40e_remove_xsk_umem - Remove an UMEM for a certain ring/qid
+ * i40e_remove_xsk_umem - Remove a UMEM for a certain ring/qid
  * @vsi: Current VSI
  * @qid: Ring/qid associated with the UMEM
  **/
@@ -130,7 +130,7 @@ static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
 }
 
 /**
- * i40e_xsk_umem_enable - Enable/associate an UMEM to a certain ring/qid
+ * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid
  * @vsi: Current VSI
  * @umem: UMEM
  * @qid: Rx ring to associate UMEM to
@@ -189,7 +189,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
 }
 
 /**
- * i40e_xsk_umem_disable - Diassociate an UMEM from a certain ring/qid
+ * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid
  * @vsi: Current VSI
  * @qid: Rx ring to associate UMEM to
  *
@@ -255,12 +255,12 @@ int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem,
 }
 
 /**
- * i40e_xsk_umem_query - Queries a certain ring/qid for its UMEM
+ * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid
  * @vsi: Current VSI
  * @umem: UMEM to enable/associate to a ring, or NULL to disable
  * @qid: Rx ring to (dis)associate UMEM (from)to
  *
- * This function enables or disables an UMEM to a certain ring.
+ * This function enables or disables a UMEM to a certain ring.
  *
  * Returns 0 on success, <0 on failure
  **/
@@ -276,7 +276,7 @@ int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
  * @rx_ring: Rx ring
  * @xdp: xdp_buff used as input to the XDP program
  *
- * This function enables or disables an UMEM to a certain ring.
+ * This function enables or disables a UMEM to a certain ring.
  *
  * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
  **/
index 4c4b5717a627de6353f8f4248163f1f7e333714f..b8548370f1c722e61817c4857adae71f1cce18f0 100644 (file)
@@ -76,6 +76,8 @@ extern const char ice_drv_ver[];
 #define ICE_MIN_INTR_PER_VF            (ICE_MIN_QS_PER_VF + 1)
 #define ICE_DFLT_INTR_PER_VF           (ICE_DFLT_QS_PER_VF + 1)
 
+#define ICE_MAX_RESET_WAIT             20
+
 #define ICE_VSIQF_HKEY_ARRAY_SIZE      ((VSIQF_HKEY_MAX_INDEX + 1) *   4)
 
 #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
@@ -189,7 +191,6 @@ struct ice_vsi {
        u64 tx_linearize;
        DECLARE_BITMAP(state, __ICE_STATE_NBITS);
        DECLARE_BITMAP(flags, ICE_VSI_FLAG_NBITS);
-       unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
        unsigned int current_netdev_flags;
        u32 tx_restart;
        u32 tx_busy;
@@ -369,5 +370,6 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
 int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
 void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+void ice_napi_del(struct ice_vsi *vsi);
 
 #endif /* _ICE_H_ */
index 8cd6a2401fd9f2c1a804ba4ee3920e0309c87a4a..554fd707a6d69f45f165a6b77ef23bb027ac6baa 100644 (file)
@@ -811,6 +811,9 @@ void ice_deinit_hw(struct ice_hw *hw)
        /* Attempt to disable FW logging before shutting down control queues */
        ice_cfg_fw_log(hw, false);
        ice_shutdown_all_ctrlq(hw);
+
+       /* Clear VSI contexts if not already cleared */
+       ice_clear_all_vsi_ctx(hw);
 }
 
 /**
index 96923580f2a6c2fdb88c88c2f1e88f0b4154f67c..648acdb4c644b6c62d08d8f99c4307e1537d041f 100644 (file)
@@ -1517,10 +1517,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
        }
 
        if (!test_bit(__ICE_DOWN, pf->state)) {
-               /* Give it a little more time to try to come back */
+               /* Give it a little more time to try to come back. If still
+                * down, restart autoneg link or reinitialize the interface.
+                */
                msleep(75);
                if (!test_bit(__ICE_DOWN, pf->state))
                        return ice_nway_reset(netdev);
+
+               ice_down(vsi);
+               ice_up(vsi);
        }
 
        return err;
index 5fdea6ec7675b6d82b8c71c5bbe133b84280c79d..596b9fb1c510dec854004dd0bce6ef68a74ba8c6 100644 (file)
 #define GLNVM_ULD                              0x000B6008
 #define GLNVM_ULD_CORER_DONE_M                 BIT(3)
 #define GLNVM_ULD_GLOBR_DONE_M                 BIT(4)
+#define GLPCI_CNF2                             0x000BE004
+#define GLPCI_CNF2_CACHELINE_SIZE_M            BIT(1)
 #define PF_FUNC_RID                            0x0009E880
 #define PF_FUNC_RID_FUNC_NUM_S                 0
 #define PF_FUNC_RID_FUNC_NUM_M                 ICE_M(0x7, 0)
index 5bacad01f0c9c1c8cce2b4f56caca05b034688c2..1041fa2a7767878590930f1851c394eb67da80c5 100644 (file)
@@ -1997,7 +1997,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
        status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
        if (status) {
                netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
-                          ena ? "Ena" : "Dis", vsi->idx, vsi->vsi_num, status,
+                          ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
                           vsi->back->hw.adminq.sq_last_status);
                goto err_out;
        }
@@ -2458,6 +2458,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
         * on this wq
         */
        if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
+               ice_napi_del(vsi);
                unregister_netdev(vsi->netdev);
                free_netdev(vsi->netdev);
                vsi->netdev = NULL;
index 05993451147a09333602f79846f9d39e208d3037..333312a1d59572dfe8cef5bc02d745ab7c2920cf 100644 (file)
@@ -1465,7 +1465,7 @@ skip_req_irq:
  * ice_napi_del - Remove NAPI handler for the VSI
  * @vsi: VSI for which NAPI handler is to be removed
  */
-static void ice_napi_del(struct ice_vsi *vsi)
+void ice_napi_del(struct ice_vsi *vsi)
 {
        int v_idx;
 
@@ -1622,7 +1622,6 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
 {
        struct ice_netdev_priv *np = netdev_priv(netdev);
        struct ice_vsi *vsi = np->vsi;
-       int ret;
 
        if (vid >= VLAN_N_VID) {
                netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
@@ -1635,7 +1634,8 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
 
        /* Enable VLAN pruning when VLAN 0 is added */
        if (unlikely(!vid)) {
-               ret = ice_cfg_vlan_pruning(vsi, true);
+               int ret = ice_cfg_vlan_pruning(vsi, true);
+
                if (ret)
                        return ret;
        }
@@ -1644,12 +1644,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
         * needed to continue allowing all untagged packets since VLAN prune
         * list is applied to all packets by the switch
         */
-       ret = ice_vsi_add_vlan(vsi, vid);
-
-       if (!ret)
-               set_bit(vid, vsi->active_vlans);
-
-       return ret;
+       return ice_vsi_add_vlan(vsi, vid);
 }
 
 /**
@@ -1677,8 +1672,6 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
        if (status)
                return status;
 
-       clear_bit(vid, vsi->active_vlans);
-
        /* Disable VLAN pruning when VLAN 0 is removed */
        if (unlikely(!vid))
                status = ice_cfg_vlan_pruning(vsi, false);
@@ -2001,6 +1994,22 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
        return 0;
 }
 
+/**
+ * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
+ * @pf: pointer to the PF structure
+ *
+ * There is no error returned here because the driver should be able to handle
+ * 128 Byte cache lines, so we only print a warning in case issues are seen,
+ * specifically with Tx.
+ */
+static void ice_verify_cacheline_size(struct ice_pf *pf)
+{
+       if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
+               dev_warn(&pf->pdev->dev,
+                        "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
+                        ICE_CACHE_LINE_BYTES);
+}
+
 /**
  * ice_probe - Device initialization routine
  * @pdev: PCI device information struct
@@ -2151,6 +2160,8 @@ static int ice_probe(struct pci_dev *pdev,
        /* since everything is good, start the service timer */
        mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
 
+       ice_verify_cacheline_size(pf);
+
        return 0;
 
 err_alloc_sw_unroll:
@@ -2182,6 +2193,12 @@ static void ice_remove(struct pci_dev *pdev)
        if (!pf)
                return;
 
+       for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
+               if (!ice_is_reset_in_progress(pf->state))
+                       break;
+               msleep(100);
+       }
+
        set_bit(__ICE_DOWN, pf->state);
        ice_service_task_stop(pf);
 
@@ -2509,31 +2526,6 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
        return ret;
 }
 
-/**
- * ice_restore_vlan - Reinstate VLANs when vsi/netdev comes back up
- * @vsi: the VSI being brought back up
- */
-static int ice_restore_vlan(struct ice_vsi *vsi)
-{
-       int err;
-       u16 vid;
-
-       if (!vsi->netdev)
-               return -EINVAL;
-
-       err = ice_vsi_vlan_setup(vsi);
-       if (err)
-               return err;
-
-       for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) {
-               err = ice_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q), vid);
-               if (err)
-                       break;
-       }
-
-       return err;
-}
-
 /**
  * ice_vsi_cfg - Setup the VSI
  * @vsi: the VSI being configured
@@ -2546,7 +2538,9 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
 
        if (vsi->netdev) {
                ice_set_rx_mode(vsi->netdev);
-               err = ice_restore_vlan(vsi);
+
+               err = ice_vsi_vlan_setup(vsi);
+
                if (err)
                        return err;
        }
@@ -3296,7 +3290,7 @@ static void ice_rebuild(struct ice_pf *pf)
        struct device *dev = &pf->pdev->dev;
        struct ice_hw *hw = &pf->hw;
        enum ice_status ret;
-       int err;
+       int err, i;
 
        if (test_bit(__ICE_DOWN, pf->state))
                goto clear_recovery;
@@ -3370,6 +3364,22 @@ static void ice_rebuild(struct ice_pf *pf)
        }
 
        ice_reset_all_vfs(pf, true);
+
+       for (i = 0; i < pf->num_alloc_vsi; i++) {
+               bool link_up;
+
+               if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
+                       continue;
+               ice_get_link_status(pf->vsi[i]->port_info, &link_up);
+               if (link_up) {
+                       netif_carrier_on(pf->vsi[i]->netdev);
+                       netif_tx_wake_all_queues(pf->vsi[i]->netdev);
+               } else {
+                       netif_carrier_off(pf->vsi[i]->netdev);
+                       netif_tx_stop_all_queues(pf->vsi[i]->netdev);
+               }
+       }
+
        /* if we get here, reset flow is successful */
        clear_bit(__ICE_RESET_FAILED, pf->state);
        return;
index 33403f39f1b3f8680dcf5b63c37956a5df2d0fad..40c9c65589568b34a1eb5ec50d842e41ddd030e3 100644 (file)
@@ -347,6 +347,18 @@ static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
        }
 }
 
+/**
+ * ice_clear_all_vsi_ctx - clear all the VSI context entries
+ * @hw: pointer to the hw struct
+ */
+void ice_clear_all_vsi_ctx(struct ice_hw *hw)
+{
+       u16 i;
+
+       for (i = 0; i < ICE_MAX_VSI; i++)
+               ice_clear_vsi_ctx(hw, i);
+}
+
 /**
  * ice_add_vsi - add VSI context to the hardware and VSI handle list
  * @hw: pointer to the hw struct
index b88d96a1ef6935c2564e07e3443378b65f32b7ea..d5ef0bd58bf9789260bbf7575868f16a6123ec81 100644 (file)
@@ -190,6 +190,8 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
               struct ice_sq_cd *cd);
 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
+void ice_clear_all_vsi_ctx(struct ice_hw *hw);
+/* Switch config */
 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
 
 /* Switch/bridge related commands */
index 5dae968d853e17b88344d3852c0b40a7ac133f66..fe5bbabbb41eacdac1bcd0466196ee835b35b97f 100644 (file)
@@ -1520,7 +1520,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
 
        /* update gso_segs and bytecount */
        first->gso_segs = skb_shinfo(skb)->gso_segs;
-       first->bytecount = (first->gso_segs - 1) * off->header_len;
+       first->bytecount += (first->gso_segs - 1) * off->header_len;
 
        cd_tso_len = skb->len - off->header_len;
        cd_mss = skb_shinfo(skb)->gso_size;
@@ -1556,15 +1556,15 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
  * magnitude greater than our largest possible GSO size.
  *
  * This would then be implemented as:
- *     return (((size >> 12) * 85) >> 8) + 1;
+ *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
  *
  * Since multiplication and division are commutative, we can reorder
  * operations into:
- *     return ((size * 85) >> 20) + 1;
+ *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
  */
 static unsigned int ice_txd_use_count(unsigned int size)
 {
-       return ((size * 85) >> 20) + 1;
+       return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
 }
 
 /**
@@ -1706,7 +1706,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
         *       + 1 desc for context descriptor,
         * otherwise try next time
         */
-       if (ice_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+       if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
+                             ICE_DESCS_FOR_CTX_DESC)) {
                tx_ring->tx_stats.tx_busy++;
                return NETDEV_TX_BUSY;
        }
index 1d0f58bd389bd35d9c5aad257e0d41c12c9ff1cd..75d0eaf6c9ddbe18a26c04d1b9edfae4322e16d7 100644 (file)
 #define ICE_RX_BUF_WRITE       16      /* Must be power of 2 */
 #define ICE_MAX_TXQ_PER_TXQG   128
 
-/* Tx Descriptors needed, worst case */
-#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+/* We are assuming that the cache line is always 64 Bytes here for ice.
+ * In order to make sure that is a correct assumption there is a check in probe
+ * to print a warning if the read from GLPCI_CNF2 tells us that the cache line
+ * size is 128 bytes. We do it this way because we do not want to read the
+ * GLPCI_CNF2 register or a variable containing the value on every pass through
+ * the Tx path.
+ */
+#define ICE_CACHE_LINE_BYTES           64
+#define ICE_DESCS_PER_CACHE_LINE       (ICE_CACHE_LINE_BYTES / \
+                                        sizeof(struct ice_tx_desc))
+#define ICE_DESCS_FOR_CTX_DESC         1
+#define ICE_DESCS_FOR_SKB_DATA_PTR     1
+/* Tx descriptors needed, worst case */
+#define DESC_NEEDED (MAX_SKB_FRAGS + ICE_DESCS_FOR_CTX_DESC + \
+                    ICE_DESCS_PER_CACHE_LINE + ICE_DESCS_FOR_SKB_DATA_PTR)
 #define ICE_DESC_UNUSED(R)     \
        ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
        (R)->next_to_clean - (R)->next_to_use - 1)
index 12f9432abf11099f2c0eecb3bfe289ee801134f7..f4dbc81c198863b5037f6acc07ed053522923065 100644 (file)
@@ -92,12 +92,12 @@ struct ice_link_status {
        u64 phy_type_low;
        u16 max_frame_size;
        u16 link_speed;
+       u16 req_speeds;
        u8 lse_ena;     /* Link Status Event notification */
        u8 link_info;
        u8 an_info;
        u8 ext_info;
        u8 pacing;
-       u8 req_speeds;
        /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
         * ice_aqc_get_phy_caps structure
         */
index 45f10f8f01dc1ba0e39cd1e87ba16a11c5fbbe7f..e71065f9d3918a7623ac1bb517c14c7c9b632a39 100644 (file)
@@ -348,7 +348,7 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
        struct ice_vsi_ctx ctxt = { 0 };
        enum ice_status status;
 
-       ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_TAGGED |
+       ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
                               ICE_AQ_VSI_PVLAN_INSERT_PVID |
                               ICE_AQ_VSI_VLAN_EMOD_STR;
        ctxt.info.pvid = cpu_to_le16(vid);
@@ -2171,7 +2171,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
 
                        if (!ice_vsi_add_vlan(vsi, vid)) {
                                vf->num_vlan++;
-                               set_bit(vid, vsi->active_vlans);
 
                                /* Enable VLAN pruning when VLAN 0 is added */
                                if (unlikely(!vid))
@@ -2190,7 +2189,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
                         */
                        if (!ice_vsi_kill_vlan(vsi, vid)) {
                                vf->num_vlan--;
-                               clear_bit(vid, vsi->active_vlans);
 
                                /* Disable VLAN pruning when removing VLAN 0 */
                                if (unlikely(!vid))
index c54ebedca6da9a3ddaeff1b43cde2993438c4714..c393cb2c0f1681f702a8b648f21ef49c3c8a565d 100644 (file)
@@ -842,6 +842,7 @@ s32 igb_pll_workaround_i210(struct e1000_hw *hw)
                nvm_word = E1000_INVM_DEFAULT_AL;
        tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
        igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE);
+       phy_word = E1000_PHY_PLL_UNCONF;
        for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
                /* check current state directly from internal PHY */
                igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
index 9f4d700e09df33cb5d3e17576859a563f9c6c52c..2b95dc9c7a6a8bd3fd0a2d03365e282b26382e70 100644 (file)
  *
  * The 40 bit 82580 SYSTIM overflows every
  *   2^40 * 10^-9 /  60  = 18.3 minutes.
+ *
+ * SYSTIM is converted to real time using a timecounter. As
+ * timecounter_cyc2time() allows old timestamps, the timecounter needs
+ * to be updated at least once per half of the SYSTIM interval.
+ * Scheduling of delayed work is not very accurate, and also the NIC
+ * clock can be adjusted to run up to 6% faster and the system clock
+ * up to 10% slower, so we aim for 6 minutes to be sure the actual
+ * interval in the NIC time is shorter than 9.16 minutes.
  */
 
-#define IGB_SYSTIM_OVERFLOW_PERIOD     (HZ * 60 * 9)
+#define IGB_SYSTIM_OVERFLOW_PERIOD     (HZ * 60 * 6)
 #define IGB_PTP_TX_TIMEOUT             (HZ * 15)
 #define INCPERIOD_82576                        BIT(E1000_TIMINCA_16NS_SHIFT)
 #define INCVALUE_82576_MASK            GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
index ca6b0c458e4a50eca96aea8ace64806cbf66aa45..4fb0d9e3f2da21db5acf17dbf5b9c091502db442 100644 (file)
@@ -17,4 +17,4 @@ ixgbe-$(CONFIG_IXGBE_DCB) +=  ixgbe_dcb.o ixgbe_dcb_82598.o \
 ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
 ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o
 ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
-ixgbe-$(CONFIG_XFRM_OFFLOAD) += ixgbe_ipsec.o
+ixgbe-$(CONFIG_IXGBE_IPSEC) += ixgbe_ipsec.o
index ec1b87cc44100904bf7b486692bc1d06b256fc80..143bdd5ee2a088a738a5fb381e33c141d774e633 100644 (file)
@@ -769,9 +769,9 @@ struct ixgbe_adapter {
 #define IXGBE_RSS_KEY_SIZE     40  /* size of RSS Hash Key in bytes */
        u32 *rss_key;
 
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBE_IPSEC
        struct ixgbe_ipsec *ipsec;
-#endif /* CONFIG_XFRM_OFFLOAD */
+#endif /* CONFIG_IXGBE_IPSEC */
 
        /* AF_XDP zero-copy */
        struct xdp_umem **xsk_umems;
@@ -1008,7 +1008,7 @@ void ixgbe_store_key(struct ixgbe_adapter *adapter);
 void ixgbe_store_reta(struct ixgbe_adapter *adapter);
 s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
                       u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBE_IPSEC
 void ixgbe_init_ipsec_offload(struct ixgbe_adapter *adapter);
 void ixgbe_stop_ipsec_offload(struct ixgbe_adapter *adapter);
 void ixgbe_ipsec_restore(struct ixgbe_adapter *adapter);
@@ -1036,5 +1036,5 @@ static inline int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter,
                                        u32 *mbuf, u32 vf) { return -EACCES; }
 static inline int ixgbe_ipsec_vf_del_sa(struct ixgbe_adapter *adapter,
                                        u32 *mbuf, u32 vf) { return -EACCES; }
-#endif /* CONFIG_XFRM_OFFLOAD */
+#endif /* CONFIG_IXGBE_IPSEC */
 #endif /* _IXGBE_H_ */
index 0049a2becd7e7349db1cc6d7acf653489882277c..113b38e0defbf547920a5f2fadb63428e50e6dc1 100644 (file)
@@ -8694,7 +8694,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
 
 #endif /* IXGBE_FCOE */
 
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBE_IPSEC
        if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
                goto out_drop;
 #endif
@@ -10190,7 +10190,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
         * the TSO, so it's the exception.
         */
        if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) {
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBE_IPSEC
                if (!skb->sp)
 #endif
                        features &= ~NETIF_F_TSO;
@@ -10883,7 +10883,7 @@ skip_sriov:
        if (hw->mac.type >= ixgbe_mac_82599EB)
                netdev->features |= NETIF_F_SCTP_CRC;
 
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBE_IPSEC
 #define IXGBE_ESP_FEATURES     (NETIF_F_HW_ESP | \
                                 NETIF_F_HW_ESP_TX_CSUM | \
                                 NETIF_F_GSO_ESP)
index af25a8fffeb8ba4f19a79f11f6d96f3d7f252047..5dacfc870259881f8746a72546f5c410f4bf06f6 100644 (file)
@@ -722,8 +722,10 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
                        ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
                                        adapter->default_up, vf);
 
-               if (vfinfo->spoofchk_enabled)
+               if (vfinfo->spoofchk_enabled) {
                        hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+                       hw->mac.ops.set_mac_anti_spoofing(hw, true, vf);
+               }
        }
 
        /* reset multicast table array for vf */
index 10dbaf4f6e808d7e2e19e6a560f11213eb1b2b87..9c42f741ed5efde3ad667bbc75d4a6b5ea5dec15 100644 (file)
@@ -2262,7 +2262,9 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
                *autoneg = false;
 
                if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
-                   hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+                   hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+                   hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+                   hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
                        *speed = IXGBE_LINK_SPEED_1GB_FULL;
                        return 0;
                }
index 297d0f0858b59eba441397d0f6924558e7836768..186a4bb24fdea58a483f161858333313c34373ae 100644 (file)
@@ -10,5 +10,5 @@ ixgbevf-objs := vf.o \
                 mbx.o \
                 ethtool.o \
                 ixgbevf_main.o
-ixgbevf-$(CONFIG_XFRM_OFFLOAD) += ipsec.o
+ixgbevf-$(CONFIG_IXGBEVF_IPSEC) += ipsec.o
 
index e399e1c0c54ab178648632cbe187e50d7bd5f0f3..ecab686574b65d23dd7840d840b26c2c002338f9 100644 (file)
@@ -459,7 +459,7 @@ int ethtool_ioctl(struct ifreq *ifr);
 
 extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
 
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBEVF_IPSEC
 void ixgbevf_init_ipsec_offload(struct ixgbevf_adapter *adapter);
 void ixgbevf_stop_ipsec_offload(struct ixgbevf_adapter *adapter);
 void ixgbevf_ipsec_restore(struct ixgbevf_adapter *adapter);
@@ -482,7 +482,7 @@ static inline int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring,
                                   struct ixgbevf_tx_buffer *first,
                                   struct ixgbevf_ipsec_tx_data *itd)
 { return 0; }
-#endif /* CONFIG_XFRM_OFFLOAD */
+#endif /* CONFIG_IXGBEVF_IPSEC */
 
 void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
 void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
index 98707ee11d72667a3ba83212ea301eeb27ba09d7..5e47ede7e832001a17575616fc0ab337d4d2f7fe 100644 (file)
@@ -4150,7 +4150,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
        first->tx_flags = tx_flags;
        first->protocol = vlan_get_protocol(skb);
 
-#ifdef CONFIG_XFRM_OFFLOAD
+#ifdef CONFIG_IXGBEVF_IPSEC
        if (skb->sp && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
                goto out_drop;
 #endif
index 8c5ba4b81fb7332aba15a0f4199155f39a4da8a3..2d4d10a017e59bfc96870e9fd94338986f6ef7f2 100644 (file)
@@ -512,7 +512,8 @@ static int xrx200_probe(struct platform_device *pdev)
        err = register_netdev(net_dev);
        if (err)
                goto err_unprepare_clk;
-       return err;
+
+       return 0;
 
 err_unprepare_clk:
        clk_disable_unprepare(priv->clk);
@@ -520,7 +521,7 @@ err_unprepare_clk:
 err_uninit_dma:
        xrx200_hw_cleanup(priv);
 
-       return 0;
+       return err;
 }
 
 static int xrx200_remove(struct platform_device *pdev)
index 5bfd349bf41ac58ffaa003ecdca1d6493f4f42eb..e5397c8197b9c3713c48e925dad0dfcee732c0b9 100644 (file)
@@ -494,7 +494,7 @@ struct mvneta_port {
 #if defined(__LITTLE_ENDIAN)
 struct mvneta_tx_desc {
        u32  command;           /* Options used by HW for packet transmitting.*/
-       u16  reserverd1;        /* csum_l4 (for future use)             */
+       u16  reserved1;         /* csum_l4 (for future use)             */
        u16  data_size;         /* Data size of transmitted packet in bytes */
        u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
        u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
@@ -519,7 +519,7 @@ struct mvneta_rx_desc {
 #else
 struct mvneta_tx_desc {
        u16  data_size;         /* Data size of transmitted packet in bytes */
-       u16  reserverd1;        /* csum_l4 (for future use)             */
+       u16  reserved1;         /* csum_l4 (for future use)             */
        u32  command;           /* Options used by HW for packet transmitting.*/
        u32  reserved2;         /* hw_cmd - (for future use, PMT)       */
        u32  buf_phys_addr;     /* Physical addr of transmitted buffer  */
@@ -3343,7 +3343,6 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
        if (state->interface != PHY_INTERFACE_MODE_NA &&
            state->interface != PHY_INTERFACE_MODE_QSGMII &&
            state->interface != PHY_INTERFACE_MODE_SGMII &&
-           state->interface != PHY_INTERFACE_MODE_2500BASEX &&
            !phy_interface_mode_is_8023z(state->interface) &&
            !phy_interface_mode_is_rgmii(state->interface)) {
                bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -3357,14 +3356,9 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
        /* Asymmetric pause is unsupported */
        phylink_set(mask, Pause);
 
-       /* We cannot use 1Gbps when using the 2.5G interface. */
-       if (state->interface == PHY_INTERFACE_MODE_2500BASEX) {
-               phylink_set(mask, 2500baseT_Full);
-               phylink_set(mask, 2500baseX_Full);
-       } else {
-               phylink_set(mask, 1000baseT_Full);
-               phylink_set(mask, 1000baseX_Full);
-       }
+       /* Half-duplex at speeds higher than 100Mbit is unsupported */
+       phylink_set(mask, 1000baseT_Full);
+       phylink_set(mask, 1000baseX_Full);
 
        if (!phy_interface_mode_is_8023z(state->interface)) {
                /* 10M and 100M are only supported in non-802.3z mode */
index 176c6b56fdccda6bc65f9c78f2b29fa88c6ed867..398328f107437e2f7c17e5fe54d0d8f36d83f058 100644 (file)
@@ -796,6 +796,7 @@ struct mvpp2_queue_vector {
        int nrxqs;
        u32 pending_cause_rx;
        struct mvpp2_port *port;
+       struct cpumask *mask;
 };
 
 struct mvpp2_port {
index 14f9679c957c6afd04929ea23ccd80afd19650ab..125ea99418df6915da4c5ad9427802e0aeb738f9 100644 (file)
@@ -3298,24 +3298,30 @@ static int mvpp2_irqs_init(struct mvpp2_port *port)
        for (i = 0; i < port->nqvecs; i++) {
                struct mvpp2_queue_vector *qv = port->qvecs + i;
 
-               if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
+               if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
+                       qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
+                       if (!qv->mask) {
+                               err = -ENOMEM;
+                               goto err;
+                       }
+
                        irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
+               }
 
                err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
                if (err)
                        goto err;
 
                if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
-                       unsigned long mask = 0;
                        unsigned int cpu;
 
                        for_each_present_cpu(cpu) {
                                if (mvpp2_cpu_to_thread(port->priv, cpu) ==
                                    qv->sw_thread_id)
-                                       mask |= BIT(cpu);
+                                       cpumask_set_cpu(cpu, qv->mask);
                        }
 
-                       irq_set_affinity_hint(qv->irq, to_cpumask(&mask));
+                       irq_set_affinity_hint(qv->irq, qv->mask);
                }
        }
 
@@ -3325,6 +3331,8 @@ err:
                struct mvpp2_queue_vector *qv = port->qvecs + i;
 
                irq_set_affinity_hint(qv->irq, NULL);
+               kfree(qv->mask);
+               qv->mask = NULL;
                free_irq(qv->irq, qv);
        }
 
@@ -3339,6 +3347,8 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
                struct mvpp2_queue_vector *qv = port->qvecs + i;
 
                irq_set_affinity_hint(qv->irq, NULL);
+               kfree(qv->mask);
+               qv->mask = NULL;
                irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
                free_irq(qv->irq, qv);
        }
@@ -4365,8 +4375,27 @@ static void mvpp2_phylink_validate(struct net_device *dev,
                                   unsigned long *supported,
                                   struct phylink_link_state *state)
 {
+       struct mvpp2_port *port = netdev_priv(dev);
        __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
 
+       /* Invalid combinations */
+       switch (state->interface) {
+       case PHY_INTERFACE_MODE_10GKR:
+       case PHY_INTERFACE_MODE_XAUI:
+               if (port->gop_id != 0)
+                       goto empty_set;
+               break;
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+               if (port->gop_id == 0)
+                       goto empty_set;
+               break;
+       default:
+               break;
+       }
+
        phylink_set(mask, Autoneg);
        phylink_set_port_modes(mask);
        phylink_set(mask, Pause);
@@ -4374,6 +4403,8 @@ static void mvpp2_phylink_validate(struct net_device *dev,
 
        switch (state->interface) {
        case PHY_INTERFACE_MODE_10GKR:
+       case PHY_INTERFACE_MODE_XAUI:
+       case PHY_INTERFACE_MODE_NA:
                phylink_set(mask, 10000baseCR_Full);
                phylink_set(mask, 10000baseSR_Full);
                phylink_set(mask, 10000baseLR_Full);
@@ -4381,7 +4412,11 @@ static void mvpp2_phylink_validate(struct net_device *dev,
                phylink_set(mask, 10000baseER_Full);
                phylink_set(mask, 10000baseKR_Full);
                /* Fall-through */
-       default:
+       case PHY_INTERFACE_MODE_RGMII:
+       case PHY_INTERFACE_MODE_RGMII_ID:
+       case PHY_INTERFACE_MODE_RGMII_RXID:
+       case PHY_INTERFACE_MODE_RGMII_TXID:
+       case PHY_INTERFACE_MODE_SGMII:
                phylink_set(mask, 10baseT_Half);
                phylink_set(mask, 10baseT_Full);
                phylink_set(mask, 100baseT_Half);
@@ -4393,11 +4428,18 @@ static void mvpp2_phylink_validate(struct net_device *dev,
                phylink_set(mask, 1000baseT_Full);
                phylink_set(mask, 1000baseX_Full);
                phylink_set(mask, 2500baseX_Full);
+               break;
+       default:
+               goto empty_set;
        }
 
        bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
        bitmap_and(state->advertising, state->advertising, mask,
                   __ETHTOOL_LINK_MODE_MASK_NBITS);
+       return;
+
+empty_set:
+       bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
 }
 
 static void mvpp22_xlg_link_state(struct mvpp2_port *port,
index 36054e6fb9d34840cd15f9c45296c9258df9a276..f200b8c420d5738e5bc5c67b124c71cc27f21fd6 100644 (file)
@@ -5,7 +5,7 @@
 config MLX4_EN
        tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
        depends on MAY_USE_DEVLINK
-       depends on PCI
+       depends on PCI && NETDEVICES && ETHERNET && INET
        select MLX4_CORE
        imply PTP_1588_CLOCK
        ---help---
index deef5a998985a9f8398d693b8bd219a5fc083313..9af34e03892c19e780149a3a8405d936fe417d45 100644 (file)
@@ -337,7 +337,7 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
 static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
                                  int align, u32 skip_mask, u32 *puid)
 {
-       u32 uid;
+       u32 uid = 0;
        u32 res;
        struct mlx4_zone_allocator *zone_alloc = zone->allocator;
        struct mlx4_zone_entry *curr_node;
index f11b45001cad8c5635684e820a03f183e12d6ef5..d290f0787dfbb22e444bc5892639e4717f11647a 100644 (file)
@@ -1084,8 +1084,8 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
 
        tx_pause = !!(pause->tx_pause);
        rx_pause = !!(pause->rx_pause);
-       rx_ppp = priv->prof->rx_ppp && !(tx_pause || rx_pause);
-       tx_ppp = priv->prof->tx_ppp && !(tx_pause || rx_pause);
+       rx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->rx_ppp;
+       tx_ppp = (tx_pause || rx_pause) ? 0 : priv->prof->tx_ppp;
 
        err = mlx4_SET_PORT_general(mdev->dev, priv->port,
                                    priv->rx_skb_size + ETH_FCS_LEN,
index b744cd49a7856e97917bcdce93e7d5ee205f09cd..6b88881b8e3585422f2548df3267175bc9d6b16f 100644 (file)
@@ -3493,8 +3493,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
        }
 
-       /* MTU range: 46 - hw-specific max */
-       dev->min_mtu = MLX4_EN_MIN_MTU;
+       /* MTU range: 68 - hw-specific max */
+       dev->min_mtu = ETH_MIN_MTU;
        dev->max_mtu = priv->max_mtu;
 
        mdev->pndev[port] = dev;
index 5a6d0919533d6e0e619927abd753c5d07ed95dac..db00bf1c23f5ad31d64652ddc8bee32e2e7534c8 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/vmalloc.h>
 #include <linux/irq.h>
 
+#include <net/ip.h>
 #if IS_ENABLED(CONFIG_IPV6)
 #include <net/ip6_checksum.h>
 #endif
index 1857ee0f0871d48285a6d3711f7c3e9a1e08a05f..6f5153afcab4dfc331c099da854c54f1b9500887 100644 (file)
@@ -1006,7 +1006,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                ring->packets++;
        }
        ring->bytes += tx_info->nr_bytes;
-       netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
        AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
 
        if (tx_info->inl)
@@ -1044,7 +1043,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
                netif_tx_stop_queue(ring->tx_queue);
                ring->queue_stopped++;
        }
-       send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue);
+
+       send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
+                                              tx_info->nr_bytes,
+                                              skb->xmit_more);
 
        real_size = (real_size / 16) & 0x3f;
 
index ebcd2778eeb3e1f22524064ff2db7762e1f852ba..23f1b5b512c2198cb664167e42fb91ff9c549f13 100644 (file)
@@ -540,8 +540,8 @@ struct slave_list {
 struct resource_allocator {
        spinlock_t alloc_lock; /* protect quotas */
        union {
-               int res_reserved;
-               int res_port_rsvd[MLX4_MAX_PORTS];
+               unsigned int res_reserved;
+               unsigned int res_port_rsvd[MLX4_MAX_PORTS];
        };
        union {
                int res_free;
index 485d856546c6c3b83cacdcbe6376d73862dd0f8a..8137454e253497e37b638a88432daf01184c95cb 100644 (file)
 #define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \
                                  ETH_HLEN + PREAMBLE_LEN)
 
-#define MLX4_EN_MIN_MTU                46
 /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
  * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
  */
index 2e84f10f59ba9ca0a69d980b87368f7310e5bb13..1a11bc0e16123e918e68e7a8f8bed703825665fa 100644 (file)
@@ -363,6 +363,7 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
                        container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
                                     buf);
 
+               (*mpt_entry)->lkey = 0;
                err = mlx4_SW2HW_MPT(dev, mailbox, key);
        }
 
index d7fbd5b6ac957d0b3b9ed9fa0000eed1ae0e73bc..11832480292646c9fcbd881402ed95497218c67e 100644 (file)
@@ -569,6 +569,7 @@ struct mlx5e_rq {
 
        unsigned long          state;
        int                    ix;
+       unsigned int           hw_mtu;
 
        struct net_dim         dim; /* Dynamic Interrupt Moderation */
 
index 023dc4bccd289e5fea787a5fe81f8c0e3617e03b..4a37713023be58311a52f62672e22b321aa80cdf 100644 (file)
@@ -88,10 +88,8 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
 
        eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
        *speed = mlx5e_port_ptys2speed(eth_proto_oper);
-       if (!(*speed)) {
-               mlx5_core_warn(mdev, "cannot get port speed\n");
+       if (!(*speed))
                err = -EINVAL;
-       }
 
        return err;
 }
@@ -258,7 +256,7 @@ static int mlx5e_fec_admin_field(u32 *pplm,
        case 40000:
                if (!write)
                        *fec_policy = MLX5_GET(pplm_reg, pplm,
-                                              fec_override_cap_10g_40g);
+                                              fec_override_admin_10g_40g);
                else
                        MLX5_SET(pplm_reg, pplm,
                                 fec_override_admin_10g_40g, *fec_policy);
@@ -310,7 +308,7 @@ static int mlx5e_get_fec_cap_field(u32 *pplm,
        case 10000:
        case 40000:
                *fec_cap = MLX5_GET(pplm_reg, pplm,
-                                   fec_override_admin_10g_40g);
+                                   fec_override_cap_10g_40g);
                break;
        case 25000:
                *fec_cap = MLX5_GET(pplm_reg, pplm,
@@ -394,12 +392,12 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
 
 int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
 {
+       u8 fec_policy_nofec = BIT(MLX5E_FEC_NOFEC);
        bool fec_mode_not_supp_in_speed = false;
-       u8 no_fec_policy = BIT(MLX5E_FEC_NOFEC);
        u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
        u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
        int sz = MLX5_ST_SZ_BYTES(pplm_reg);
-       u32 current_fec_speed;
+       u8 fec_policy_auto = 0;
        u8 fec_caps = 0;
        int err;
        int i;
@@ -415,23 +413,19 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
        if (err)
                return err;
 
-       err = mlx5e_port_linkspeed(dev, &current_fec_speed);
-       if (err)
-               return err;
+       MLX5_SET(pplm_reg, out, local_port, 1);
 
-       memset(in, 0, sz);
-       MLX5_SET(pplm_reg, in, local_port, 1);
-       for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS && !!fec_policy; i++) {
+       for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS; i++) {
                mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]);
-               /* policy supported for link speed */
-               if (!!(fec_caps & fec_policy)) {
-                       mlx5e_fec_admin_field(in, &fec_policy, 1,
+               /* policy supported for link speed, or policy is auto */
+               if (fec_caps & fec_policy || fec_policy == fec_policy_auto) {
+                       mlx5e_fec_admin_field(out, &fec_policy, 1,
                                              fec_supported_speeds[i]);
                } else {
-                       if (fec_supported_speeds[i] == current_fec_speed)
-                               return -EOPNOTSUPP;
-                       mlx5e_fec_admin_field(in, &no_fec_policy, 1,
-                                             fec_supported_speeds[i]);
+                       /* turn off FEC if supported. Else, leave it the same */
+                       if (fec_caps & fec_policy_nofec)
+                               mlx5e_fec_admin_field(out, &fec_policy_nofec, 1,
+                                                     fec_supported_speeds[i]);
                        fec_mode_not_supp_in_speed = true;
                }
        }
@@ -441,5 +435,5 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy)
                              "FEC policy 0x%x is not supported for some speeds",
                              fec_policy);
 
-       return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 1);
+       return mlx5_core_access_reg(dev, out, sz, out, sz, MLX5_REG_PPLM, 0, 1);
 }
index c047da8752daa80bf856373037ead5ba26adbefc..eac245a93f918c588dc8237e1af5996f3d0f73f0 100644 (file)
@@ -130,8 +130,10 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
        int err;
 
        err = mlx5e_port_linkspeed(priv->mdev, &speed);
-       if (err)
+       if (err) {
+               mlx5_core_warn(priv->mdev, "cannot get port speed\n");
                return 0;
+       }
 
        xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
 
index 3e770abfd80212d70e3b7952fa05ba57ae1490e1..25c1c4f96841244336c3257abbd213707655ad51 100644 (file)
@@ -843,8 +843,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
        ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
                                             Autoneg);
 
-       err = get_fec_supported_advertised(mdev, link_ksettings);
-       if (err)
+       if (get_fec_supported_advertised(mdev, link_ksettings))
                netdev_dbg(netdev, "%s: FEC caps query failed: %d\n",
                           __func__, err);
 
index 1243edbedc9e96c90196f7b7f311129f2eb0cb04..871313d6b34d1b315e6ef1a9c07cba396de14186 100644 (file)
@@ -502,6 +502,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
        rq->channel = c;
        rq->ix      = c->ix;
        rq->mdev    = mdev;
+       rq->hw_mtu  = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        rq->stats   = &c->priv->channel_stats[c->ix].rq;
 
        rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
@@ -1623,13 +1624,15 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
        int err;
        u32 i;
 
+       err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+       if (err)
+               return err;
+
        err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
                               &cq->wq_ctrl);
        if (err)
                return err;
 
-       mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
-
        mcq->cqe_sz     = 64;
        mcq->set_ci_db  = cq->wq_ctrl.db.db;
        mcq->arm_db     = cq->wq_ctrl.db.db + 1;
@@ -1687,6 +1690,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
        int eqn;
        int err;
 
+       err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+       if (err)
+               return err;
+
        inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
                sizeof(u64) * cq->wq_ctrl.buf.npages;
        in = kvzalloc(inlen, GFP_KERNEL);
@@ -1700,8 +1707,6 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
        mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
                                  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
 
-       mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
-
        MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
        MLX5_SET(cqc,   cqc, c_eqn,         eqn);
        MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
@@ -1921,6 +1926,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
        int err;
        int eqn;
 
+       err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
+       if (err)
+               return err;
+
        c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
        if (!c)
                return -ENOMEM;
@@ -1937,7 +1946,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
        c->xdp      = !!params->xdp_prog;
        c->stats    = &priv->channel_stats[ix].ch;
 
-       mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
        c->irq_desc = irq_to_desc(irq);
 
        netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
@@ -3574,6 +3582,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
        return 0;
 }
 
+#ifdef CONFIG_MLX5_ESWITCH
 static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3586,6 +3595,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
 
        return 0;
 }
+#endif
 
 static int set_feature_rx_all(struct net_device *netdev, bool enable)
 {
@@ -3684,7 +3694,9 @@ static int mlx5e_set_features(struct net_device *netdev,
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
                                    set_feature_cvlan_filter);
+#ifdef CONFIG_MLX5_ESWITCH
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
+#endif
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
@@ -3755,10 +3767,11 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
        }
 
        if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+               bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params);
                u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
                u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
 
-               reset = reset && (ppw_old != ppw_new);
+               reset = reset && (is_linear || (ppw_old != ppw_new));
        }
 
        if (!reset) {
@@ -4678,7 +4691,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
            FT_CAP(modify_root) &&
            FT_CAP(identified_miss_table_mode) &&
            FT_CAP(flow_table_modify)) {
+#ifdef CONFIG_MLX5_ESWITCH
                netdev->hw_features      |= NETIF_F_HW_TC;
+#endif
 #ifdef CONFIG_MLX5_EN_ARFS
                netdev->hw_features      |= NETIF_F_NTUPLE;
 #endif
@@ -5004,11 +5019,21 @@ err_free_netdev:
 int mlx5e_attach_netdev(struct mlx5e_priv *priv)
 {
        const struct mlx5e_profile *profile;
+       int max_nch;
        int err;
 
        profile = priv->profile;
        clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
 
+       /* max number of channels may have changed */
+       max_nch = mlx5e_get_max_num_channels(priv->mdev);
+       if (priv->channels.params.num_channels > max_nch) {
+               mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
+               priv->channels.params.num_channels = max_nch;
+               mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt,
+                                             MLX5E_INDIR_RQT_SIZE, max_nch);
+       }
+
        err = profile->init_tx(priv);
        if (err)
                goto out;
index 94224c22ecc310a87b6715051e335446f29bec03..624eed345b5d2b19fa5ed54935667b41090383f8 100644 (file)
@@ -713,48 +713,20 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
        rq->stats->ecn_mark += !!rc;
 }
 
-static __be32 mlx5e_get_fcs(struct sk_buff *skb)
+static u32 mlx5e_get_fcs(const struct sk_buff *skb)
 {
-       int last_frag_sz, bytes_in_prev, nr_frags;
-       u8 *fcs_p1, *fcs_p2;
-       skb_frag_t *last_frag;
-       __be32 fcs_bytes;
+       const void *fcs_bytes;
+       u32 _fcs_bytes;
 
-       if (!skb_is_nonlinear(skb))
-               return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
+       fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
+                                      ETH_FCS_LEN, &_fcs_bytes);
 
-       nr_frags = skb_shinfo(skb)->nr_frags;
-       last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
-       last_frag_sz = skb_frag_size(last_frag);
-
-       /* If all FCS data is in last frag */
-       if (last_frag_sz >= ETH_FCS_LEN)
-               return *(__be32 *)(skb_frag_address(last_frag) +
-                                  last_frag_sz - ETH_FCS_LEN);
-
-       fcs_p2 = (u8 *)skb_frag_address(last_frag);
-       bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
-
-       /* Find where the other part of the FCS is - Linear or another frag */
-       if (nr_frags == 1) {
-               fcs_p1 = skb_tail_pointer(skb);
-       } else {
-               skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
-
-               fcs_p1 = skb_frag_address(prev_frag) +
-                           skb_frag_size(prev_frag);
-       }
-       fcs_p1 -= bytes_in_prev;
-
-       memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
-       memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
-
-       return fcs_bytes;
+       return __get_unaligned_cpu32(fcs_bytes);
 }
 
-static u8 get_ip_proto(struct sk_buff *skb, __be16 proto)
+static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
 {
-       void *ip_p = skb->data + sizeof(struct ethhdr);
+       void *ip_p = skb->data + network_depth;
 
        return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
                                            ((struct ipv6hdr *)ip_p)->nexthdr;
@@ -783,7 +755,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                goto csum_unnecessary;
 
        if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
-               if (unlikely(get_ip_proto(skb, proto) == IPPROTO_SCTP))
+               if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
                        goto csum_unnecessary;
 
                skb->ip_summed = CHECKSUM_COMPLETE;
@@ -797,8 +769,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
                                                 network_depth - ETH_HLEN,
                                                 skb->csum);
                if (unlikely(netdev->features & NETIF_F_RXFCS))
-                       skb->csum = csum_add(skb->csum,
-                                            (__force __wsum)mlx5e_get_fcs(skb));
+                       skb->csum = csum_block_add(skb->csum,
+                                                  (__force __wsum)mlx5e_get_fcs(skb),
+                                                  skb->len - ETH_FCS_LEN);
                stats->csum_complete++;
                return;
        }
@@ -1131,6 +1104,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
        u32 frag_size;
        bool consumed;
 
+       /* Check packet size. Note LRO doesn't use linear SKB */
+       if (unlikely(cqe_bcnt > rq->hw_mtu)) {
+               rq->stats->oversize_pkts_sw_drop++;
+               return NULL;
+       }
+
        va             = page_address(di->page) + head_offset;
        data           = va + rx_headroom;
        frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
index 35ded91203f52984dfa451c3fcebaad1d4c07664..4382ef85488c5b6e936424dc902fd590d8030294 100644 (file)
@@ -98,18 +98,17 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
        return 1;
 }
 
-#ifdef CONFIG_INET
-/* loopback test */
-#define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN)
-static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST";
-#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
-
 struct mlx5ehdr {
        __be32 version;
        __be64 magic;
-       char   text[ETH_GSTRING_LEN];
 };
 
+#ifdef CONFIG_INET
+/* loopback test */
+#define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\
+                            sizeof(struct udphdr) + sizeof(struct mlx5ehdr))
+#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
+
 static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
 {
        struct sk_buff *skb = NULL;
@@ -117,10 +116,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
        struct ethhdr *ethh;
        struct udphdr *udph;
        struct iphdr *iph;
-       int datalen, iplen;
-
-       datalen = MLX5E_TEST_PKT_SIZE -
-                 (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph));
+       int    iplen;
 
        skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE);
        if (!skb) {
@@ -149,7 +145,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
        /* Fill UDP header */
        udph->source = htons(9);
        udph->dest = htons(9); /* Discard Protocol */
-       udph->len = htons(datalen + sizeof(struct udphdr));
+       udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr));
        udph->check = 0;
 
        /* Fill IP header */
@@ -157,7 +153,8 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
        iph->ttl = 32;
        iph->version = 4;
        iph->protocol = IPPROTO_UDP;
-       iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen;
+       iplen = sizeof(struct iphdr) + sizeof(struct udphdr) +
+               sizeof(struct mlx5ehdr);
        iph->tot_len = htons(iplen);
        iph->frag_off = 0;
        iph->saddr = 0;
@@ -170,9 +167,6 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
        mlxh = skb_put(skb, sizeof(*mlxh));
        mlxh->version = 0;
        mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
-       strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text));
-       datalen -= sizeof(*mlxh);
-       skb_put_zero(skb, datalen);
 
        skb->csum = 0;
        skb->ip_summed = CHECKSUM_PARTIAL;
index 1e55b9c27ffc0f1c3c20156458d85765cd5d6703..3e99d0728b2f2c5366a13f01400d4354d3c80b2c 100644 (file)
@@ -83,6 +83,7 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
@@ -161,6 +162,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                s->rx_wqe_err   += rq_stats->wqe_err;
                s->rx_mpwqe_filler_cqes    += rq_stats->mpwqe_filler_cqes;
                s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
+               s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
                s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
                s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
                s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
@@ -1189,6 +1191,7 @@ static const struct counter_desc rq_stats_desc[] = {
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
+       { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
index 77f74ce11280e26a461343adc92e56d63b387812..3f8e870ef4c903bbca01e894bc2608bbb8ead182 100644 (file)
@@ -96,6 +96,7 @@ struct mlx5e_sw_stats {
        u64 rx_wqe_err;
        u64 rx_mpwqe_filler_cqes;
        u64 rx_mpwqe_filler_strides;
+       u64 rx_oversize_pkts_sw_drop;
        u64 rx_buff_alloc_err;
        u64 rx_cqe_compress_blks;
        u64 rx_cqe_compress_pkts;
@@ -193,6 +194,7 @@ struct mlx5e_rq_stats {
        u64 wqe_err;
        u64 mpwqe_filler_cqes;
        u64 mpwqe_filler_strides;
+       u64 oversize_pkts_sw_drop;
        u64 buff_alloc_err;
        u64 cqe_compress_blks;
        u64 cqe_compress_pkts;
index 608025ca5c04d3d5249595db31edf802bb47615f..fca6f4132c91a51ac2a03eaaf64b0bbc5b6ff2c3 100644 (file)
@@ -1447,31 +1447,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                                         inner_headers);
        }
 
-       if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
-               struct flow_dissector_key_eth_addrs *key =
+       if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+               struct flow_dissector_key_basic *key =
                        skb_flow_dissector_target(f->dissector,
-                                                 FLOW_DISSECTOR_KEY_ETH_ADDRS,
+                                                 FLOW_DISSECTOR_KEY_BASIC,
                                                  f->key);
-               struct flow_dissector_key_eth_addrs *mask =
+               struct flow_dissector_key_basic *mask =
                        skb_flow_dissector_target(f->dissector,
-                                                 FLOW_DISSECTOR_KEY_ETH_ADDRS,
+                                                 FLOW_DISSECTOR_KEY_BASIC,
                                                  f->mask);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
+                        ntohs(mask->n_proto));
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+                        ntohs(key->n_proto));
 
-               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
-                                            dmac_47_16),
-                               mask->dst);
-               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
-                                            dmac_47_16),
-                               key->dst);
-
-               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
-                                            smac_47_16),
-                               mask->src);
-               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
-                                            smac_47_16),
-                               key->src);
-
-               if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
+               if (mask->n_proto)
                        *match_level = MLX5_MATCH_L2;
        }
 
@@ -1505,9 +1495,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
 
                        *match_level = MLX5_MATCH_L2;
                }
-       } else {
+       } else if (*match_level != MLX5_MATCH_NONE) {
                MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
                MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
+               *match_level = MLX5_MATCH_L2;
        }
 
        if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@@ -1545,21 +1536,31 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                }
        }
 
-       if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
-               struct flow_dissector_key_basic *key =
+       if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+               struct flow_dissector_key_eth_addrs *key =
                        skb_flow_dissector_target(f->dissector,
-                                                 FLOW_DISSECTOR_KEY_BASIC,
+                                                 FLOW_DISSECTOR_KEY_ETH_ADDRS,
                                                  f->key);
-               struct flow_dissector_key_basic *mask =
+               struct flow_dissector_key_eth_addrs *mask =
                        skb_flow_dissector_target(f->dissector,
-                                                 FLOW_DISSECTOR_KEY_BASIC,
+                                                 FLOW_DISSECTOR_KEY_ETH_ADDRS,
                                                  f->mask);
-               MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
-                        ntohs(mask->n_proto));
-               MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
-                        ntohs(key->n_proto));
 
-               if (mask->n_proto)
+               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+                                            dmac_47_16),
+                               mask->dst);
+               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+                                            dmac_47_16),
+                               key->dst);
+
+               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+                                            smac_47_16),
+                               mask->src);
+               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+                                            smac_47_16),
+                               key->src);
+
+               if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
                        *match_level = MLX5_MATCH_L2;
        }
 
@@ -1586,10 +1587,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
 
                        /* the HW doesn't need L3 inline to match on frag=no */
                        if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
-                               *match_level = MLX5_INLINE_MODE_L2;
+                               *match_level = MLX5_MATCH_L2;
        /* ***  L2 attributes parsing up to here *** */
                        else
-                               *match_level = MLX5_INLINE_MODE_IP;
+                               *match_level = MLX5_MATCH_L3;
                }
        }
 
@@ -2979,7 +2980,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
                return -EOPNOTSUPP;
 
-       if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+       if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "current firmware doesn't support split rule for port mirroring");
                netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
index 515e3d6de05165fa4564e841709a0207f8e19bd5..5a22c5874f3bc30789ffe5fade6ebda0d50ee2b3 100644 (file)
@@ -83,8 +83,14 @@ struct mlx5_fpga_ipsec_rule {
 };
 
 static const struct rhashtable_params rhash_sa = {
-       .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
-       .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
+       /* Keep out "cmd" field from the key as it's
+        * value is not constant during the lifetime
+        * of the key object.
+        */
+       .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
+                  FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
+       .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
+                     FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
        .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
        .automatic_shrinking = true,
        .min_size = 1,
index b59953daf8b440e50268c7c1c9a812ba286746d8..11dabd62e2c757e24e0947688d7f25fdd172e88c 100644 (file)
@@ -560,9 +560,9 @@ static int mlx5i_close(struct net_device *netdev)
 
        netif_carrier_off(epriv->netdev);
        mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
-       mlx5i_uninit_underlay_qp(epriv);
        mlx5e_deactivate_priv_channels(epriv);
        mlx5e_close_channels(&epriv->channels);
+       mlx5i_uninit_underlay_qp(epriv);
 unlock:
        mutex_unlock(&epriv->state_lock);
        return 0;
index 937d0ace699a7eeb4e04af3bf54eebde5dd5d459..30f751e696980d727a86200e1748adda13bb8a22 100644 (file)
@@ -943,8 +943,8 @@ static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink,
                                             mlxsw_core->bus,
                                             mlxsw_core->bus_priv, true,
                                             devlink);
-       if (err)
-               mlxsw_core->reload_fail = true;
+       mlxsw_core->reload_fail = !!err;
+
        return err;
 }
 
@@ -1083,8 +1083,15 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
 {
        struct devlink *devlink = priv_to_devlink(mlxsw_core);
 
-       if (mlxsw_core->reload_fail)
-               goto reload_fail;
+       if (mlxsw_core->reload_fail) {
+               if (!reload)
+                       /* Only the parts that were not de-initialized in the
+                        * failed reload attempt need to be de-initialized.
+                        */
+                       goto reload_fail_deinit;
+               else
+                       return;
+       }
 
        if (mlxsw_core->driver->fini)
                mlxsw_core->driver->fini(mlxsw_core);
@@ -1098,9 +1105,12 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
        if (!reload)
                devlink_resources_unregister(devlink, NULL);
        mlxsw_core->bus->fini(mlxsw_core->bus_priv);
-       if (reload)
-               return;
-reload_fail:
+
+       return;
+
+reload_fail_deinit:
+       devlink_unregister(devlink);
+       devlink_resources_unregister(devlink, NULL);
        devlink_free(devlink);
 }
 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
index 32cb6718bb173ff966639a2ffc63374e457b586a..db3d2790aeecf9c3b93fe4c66df7856d637dfc70 100644 (file)
@@ -3284,7 +3284,7 @@ static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
  * Configures the ETS elements.
  */
 #define MLXSW_REG_QEEC_ID 0x400D
-#define MLXSW_REG_QEEC_LEN 0x1C
+#define MLXSW_REG_QEEC_LEN 0x20
 
 MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN);
 
@@ -3326,6 +3326,15 @@ MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8);
  */
 MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8);
 
+/* reg_qeec_mise
+ * Min shaper configuration enable. Enables configuration of the min
+ * shaper on this ETS element
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, mise, 0x0C, 31, 1);
+
 enum {
        MLXSW_REG_QEEC_BYTES_MODE,
        MLXSW_REG_QEEC_PACKETS_MODE,
@@ -3342,6 +3351,17 @@ enum {
  */
 MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1);
 
+/* The smallest permitted min shaper rate. */
+#define MLXSW_REG_QEEC_MIS_MIN 200000          /* Kbps */
+
+/* reg_qeec_min_shaper_rate
+ * Min shaper information rate.
+ * For CPU port, can only be configured for port hierarchy.
+ * When in bytes mode, value is specified in units of 1000bps.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, min_shaper_rate, 0x0C, 0, 28);
+
 /* reg_qeec_mase
  * Max shaper configuration enable. Enables configuration of the max
  * shaper on this ETS element.
index 8a4983adae940a08b4d4d5ec39637522fb1bea46..9bec940330a450856d2dba23ed7274321cf82059 100644 (file)
@@ -2740,6 +2740,21 @@ int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
        return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
 }
 
+static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
+                                   enum mlxsw_reg_qeec_hr hr, u8 index,
+                                   u8 next_index, u32 minrate)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+       mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
+                           next_index);
+       mlxsw_reg_qeec_mise_set(qeec_pl, true);
+       mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
                              u8 switch_prio, u8 tclass)
 {
@@ -2817,6 +2832,16 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
                        return err;
        }
 
+       /* Configure the min shaper for multicast TCs. */
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
+                                              MLXSW_REG_QEEC_HIERARCY_TC,
+                                              i + 8, i,
+                                              MLXSW_REG_QEEC_MIS_MIN);
+               if (err)
+                       return err;
+       }
+
        /* Map all priorities to traffic class 0. */
        for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
                err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
@@ -3543,7 +3568,6 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
                        burst_size = 7;
                        break;
                case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
-                       is_bytes = true;
                        rate = 4 * 1024;
                        burst_size = 4;
                        break;
index ad06d9969bc13eb465a82b9138bc0ea342ab15e9..5c13674439f1f0751a369a3112d19bea46a2464c 100644 (file)
@@ -560,7 +560,7 @@ static void mlxsw_sp_nve_mc_list_ip_del(struct mlxsw_sp *mlxsw_sp,
 
        mc_record = mlxsw_sp_nve_mc_record_find(mc_list, proto, addr,
                                                &mc_entry);
-       if (WARN_ON(!mc_record))
+       if (!mc_record)
                return;
 
        mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry);
@@ -647,7 +647,7 @@ void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
 
        key.fid_index = mlxsw_sp_fid_index(fid);
        mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key);
-       if (WARN_ON(!mc_list))
+       if (!mc_list)
                return;
 
        mlxsw_sp_nve_fid_flood_index_clear(fid, mc_list);
index 9e9bb57134f2c868c63c69adc239b396244d444b..6ebf99cc315443e48b9b850957c3a77d0ca559db 100644 (file)
@@ -1275,15 +1275,12 @@ mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
 {
        u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
        enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
-       struct net_device *ipip_ul_dev;
 
        if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
                return false;
 
-       ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ipip_entry->ol_dev);
        return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
-                                                ul_tb_id, ipip_entry) &&
-              (!ipip_ul_dev || ipip_ul_dev == ul_dev);
+                                                ul_tb_id, ipip_entry);
 }
 
 /* Given decap parameters, find the corresponding IPIP entry. */
index bc60d7a8b49d764b4066c50bc808963c52c27950..50080c60a279436ad52eb95658ff01fc4587242b 100644 (file)
@@ -296,7 +296,13 @@ static bool
 mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
                                    bridge_port)
 {
-       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_port->dev);
+       struct net_device *dev = bridge_port->dev;
+       struct mlxsw_sp *mlxsw_sp;
+
+       if (is_vlan_dev(dev))
+               mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev));
+       else
+               mlxsw_sp = mlxsw_sp_lower_get(dev);
 
        /* In case ports were pulled from out of a bridged LAG, then
         * it's possible the reference count isn't zero, yet the bridge
@@ -2109,7 +2115,7 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
 
        vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
        mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
-       if (WARN_ON(!mlxsw_sp_port_vlan))
+       if (!mlxsw_sp_port_vlan)
                return;
 
        mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
@@ -2134,8 +2140,10 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
        if (!fid)
                return -EINVAL;
 
-       if (mlxsw_sp_fid_vni_is_set(fid))
-               return -EINVAL;
+       if (mlxsw_sp_fid_vni_is_set(fid)) {
+               err = -EINVAL;
+               goto err_vni_exists;
+       }
 
        err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
        if (err)
@@ -2149,6 +2157,7 @@ mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
        return 0;
 
 err_nve_fid_enable:
+err_vni_exists:
        mlxsw_sp_fid_put(fid);
        return err;
 }
@@ -2661,8 +2670,6 @@ static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
                break;
        case SWITCHDEV_FDB_DEL_TO_DEVICE:
                fdb_info = &switchdev_work->fdb_info;
-               if (!fdb_info->added_by_user)
-                       break;
                mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
                break;
        case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
index 867cddba840feb07aa93c1b58908023572951e7a..e8ca98c070f68443c6460ecb10d3eb4b3ee9a2f2 100644 (file)
@@ -1672,7 +1672,7 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
                netif_wake_queue(adapter->netdev);
        }
 
-       if (!napi_complete_done(napi, weight))
+       if (!napi_complete(napi))
                goto done;
 
        /* enable isr */
@@ -1681,7 +1681,7 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
        lan743x_csr_read(adapter, INT_STS);
 
 done:
-       return weight;
+       return 0;
 }
 
 static void lan743x_tx_ring_cleanup(struct lan743x_tx *tx)
@@ -1870,9 +1870,9 @@ static int lan743x_tx_open(struct lan743x_tx *tx)
        tx->vector_flags = lan743x_intr_get_vector_flags(adapter,
                                                         INT_BIT_DMA_TX_
                                                         (tx->channel_number));
-       netif_napi_add(adapter->netdev,
-                      &tx->napi, lan743x_tx_napi_poll,
-                      tx->ring_size - 1);
+       netif_tx_napi_add(adapter->netdev,
+                         &tx->napi, lan743x_tx_napi_poll,
+                         tx->ring_size - 1);
        napi_enable(&tx->napi);
 
        data = 0;
@@ -3017,6 +3017,7 @@ static const struct dev_pm_ops lan743x_pm_ops = {
 
 static const struct pci_device_id lan743x_pcidev_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
+       { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7431) },
        { 0, }
 };
 
index 0e82b6368798a2cf02cfef922b4feffa2ff779d1..2d6eea18973e8f4c8b5c733d825a5a0cc8492c39 100644 (file)
@@ -548,6 +548,7 @@ struct lan743x_adapter;
 /* SMSC acquired EFAR late 1990's, MCHP acquired SMSC 2012 */
 #define PCI_VENDOR_ID_SMSC             PCI_VENDOR_ID_EFAR
 #define PCI_DEVICE_ID_SMSC_LAN7430     (0x7430)
+#define PCI_DEVICE_ID_SMSC_LAN7431     (0x7431)
 
 #define PCI_CONFIG_LENGTH              (0x1000)
 
index 29c95423ab64604a9ef9629b3b5b42469994280d..2f49eb75f3cce3245b7162a2ee3cf664bc0ab7f1 100644 (file)
@@ -476,16 +476,16 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
        if (err)
                goto err_destroy_flow;
 
-       err = nfp_flower_xmit_flow(netdev, flow_pay,
-                                  NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
-       if (err)
-               goto err_destroy_flow;
-
        flow_pay->tc_flower_cookie = flow->cookie;
        err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
                                     nfp_flower_table_params);
        if (err)
-               goto err_destroy_flow;
+               goto err_release_metadata;
+
+       err = nfp_flower_xmit_flow(netdev, flow_pay,
+                                  NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
+       if (err)
+               goto err_remove_rhash;
 
        port->tc_offload_cnt++;
 
@@ -494,6 +494,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
 
        return 0;
 
+err_remove_rhash:
+       WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+                                           &flow_pay->fl_node,
+                                           nfp_flower_table_params));
+err_release_metadata:
+       nfp_modify_flow_metadata(app, flow_pay);
 err_destroy_flow:
        kfree(flow_pay->action_data);
        kfree(flow_pay->mask_data);
index 8e8fa823d611878c1ce166ae271a082b5ec84acd..69966dfc6e3d12f6b754dd94f0ca7fcc4018713f 100644 (file)
@@ -191,7 +191,7 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
 static void
 qed_dcbx_set_params(struct qed_dcbx_results *p_data,
                    struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-                   bool enable, u8 prio, u8 tc,
+                   bool app_tlv, bool enable, u8 prio, u8 tc,
                    enum dcbx_protocol_type type,
                    enum qed_pci_personality personality)
 {
@@ -210,7 +210,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
                p_data->arr[type].dont_add_vlan0 = true;
 
        /* QM reconf data */
-       if (p_hwfn->hw_info.personality == personality)
+       if (app_tlv && p_hwfn->hw_info.personality == personality)
                qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
 
        /* Configure dcbx vlan priority in doorbell block for roce EDPM */
@@ -225,7 +225,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
 static void
 qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
                         struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
-                        bool enable, u8 prio, u8 tc,
+                        bool app_tlv, bool enable, u8 prio, u8 tc,
                         enum dcbx_protocol_type type)
 {
        enum qed_pci_personality personality;
@@ -240,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
 
                personality = qed_dcbx_app_update[i].personality;
 
-               qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
+               qed_dcbx_set_params(p_data, p_hwfn, p_ptt, app_tlv, enable,
                                    prio, tc, type, personality);
        }
 }
@@ -319,8 +319,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                                enable = true;
                        }
 
-                       qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
-                                                priority, tc, type);
+                       qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, true,
+                                                enable, priority, tc, type);
                }
        }
 
@@ -341,7 +341,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
                        continue;
 
                enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
-               qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
+               qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false, enable,
                                         priority, tc, type);
        }
 
index 78a638ec7c0aee931c9d56aa278430891e76562f..979f1e4bc18bfbc38946198ea6667ff516edc580 100644 (file)
@@ -6071,7 +6071,7 @@ static const char * const s_igu_fifo_error_strs[] = {
        "no error",
        "length error",
        "function disabled",
-       "VF sent command to attnetion address",
+       "VF sent command to attention address",
        "host sent prod update command",
        "read of during interrupt register while in MIMD mode",
        "access to PXP BAR reserved address",
index 7ceb2b97538d25d767c3d8cc7e7ab79d8b03e760..88a8576ca9ceae1b4adc69b8702f1b626e82e54f 100644 (file)
@@ -185,6 +185,10 @@ void qed_resc_free(struct qed_dev *cdev)
                        qed_iscsi_free(p_hwfn);
                        qed_ooo_free(p_hwfn);
                }
+
+               if (QED_IS_RDMA_PERSONALITY(p_hwfn))
+                       qed_rdma_info_free(p_hwfn);
+
                qed_iov_free(p_hwfn);
                qed_l2_free(p_hwfn);
                qed_dmae_info_free(p_hwfn);
@@ -481,8 +485,16 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
        struct qed_qm_info *qm_info = &p_hwfn->qm_info;
 
        /* Can't have multiple flags set here */
-       if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1)
+       if (bitmap_weight((unsigned long *)&pq_flags,
+                         sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
+               DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
+               goto err;
+       }
+
+       if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
+               DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
                goto err;
+       }
 
        switch (pq_flags) {
        case PQ_FLAGS_RLS:
@@ -506,8 +518,7 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
        }
 
 err:
-       DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags);
-       return NULL;
+       return &qm_info->start_pq;
 }
 
 /* save pq index in qm info */
@@ -531,20 +542,32 @@ u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
 {
        u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
 
+       if (max_tc == 0) {
+               DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
+                      PQ_FLAGS_MCOS);
+               return p_hwfn->qm_info.start_pq;
+       }
+
        if (tc > max_tc)
                DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
 
-       return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc;
+       return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
 }
 
 u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
 {
        u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
 
+       if (max_vf == 0) {
+               DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
+                      PQ_FLAGS_VFS);
+               return p_hwfn->qm_info.start_pq;
+       }
+
        if (vf > max_vf)
                DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
 
-       return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf;
+       return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
 }
 
 u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc)
@@ -1081,6 +1104,12 @@ int qed_resc_alloc(struct qed_dev *cdev)
                                goto alloc_err;
                }
 
+               if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
+                       rc = qed_rdma_info_alloc(p_hwfn);
+                       if (rc)
+                               goto alloc_err;
+               }
+
                /* DMA info initialization */
                rc = qed_dmae_info_alloc(p_hwfn);
                if (rc)
@@ -2102,11 +2131,8 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
        if (!p_ptt)
                return -EAGAIN;
 
-       /* If roce info is allocated it means roce is initialized and should
-        * be enabled in searcher.
-        */
        if (p_hwfn->p_rdma_info &&
-           p_hwfn->b_rdma_enabled_in_prs)
+           p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs)
                qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
 
        /* Re-open incoming traffic */
index cc1b373c0ace56e08564d3527de9f5da3f87b4e4..46dc93d3b9b53db6586b791bc6ffcf65b756daba 100644 (file)
@@ -147,7 +147,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
                       "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
                       fcoe_pf_params->num_cqs,
                       p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
-               return -EINVAL;
+               rc = -EINVAL;
+               goto err;
        }
 
        p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
@@ -156,14 +157,14 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
 
        rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
        if (rc)
-               return rc;
+               goto err;
 
        cxt_info.iid = dummy_cid;
        rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
        if (rc) {
                DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
                          dummy_cid);
-               return rc;
+               goto err;
        }
        p_cxt = cxt_info.p_cxt;
        SET_FIELD(p_cxt->tstorm_ag_context.flags3,
@@ -240,6 +241,10 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
        rc = qed_spq_post(p_hwfn, p_ent, NULL);
 
        return rc;
+
+err:
+       qed_sp_destroy_request(p_hwfn, p_ent);
+       return rc;
 }
 
 static int
index 0f0aba793352c406404b53306f4bfb454b70a8b6..b22f464ea3fa770e94327640e32a35972ee35745 100644 (file)
@@ -992,6 +992,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
         */
        do {
                index = p_sb_attn->sb_index;
+               /* finish reading index before the loop condition */
+               dma_rmb();
                attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
                attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
        } while (index != p_sb_attn->sb_index);
index 1135387bd99d704f517679c4716760e39acce52c..4f8a685d1a55febcf78e3213a1c56130c8535213 100644 (file)
@@ -200,6 +200,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
                       "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
                       p_params->num_queues,
                       p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
+               qed_sp_destroy_request(p_hwfn, p_ent);
                return -EINVAL;
        }
 
index 82a1bd1f8a8ce3fd66acc6b0cc0c9e7bf6a57305..67c02ea939062dea70ae6e806546fd8266dd08cf 100644 (file)
@@ -740,8 +740,7 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
 
        rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
        if (rc) {
-               /* Return spq entry which is taken in qed_sp_init_request()*/
-               qed_spq_return_entry(p_hwfn, p_ent);
+               qed_sp_destroy_request(p_hwfn, p_ent);
                return rc;
        }
 
@@ -1355,6 +1354,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
                        DP_NOTICE(p_hwfn,
                                  "%d is not supported yet\n",
                                  p_filter_cmd->opcode);
+                       qed_sp_destroy_request(p_hwfn, *pp_ent);
                        return -EINVAL;
                }
 
@@ -2056,13 +2056,13 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
        } else {
                rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
                if (rc)
-                       return rc;
+                       goto err;
 
                if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
                        rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
                                             &abs_rx_q_id);
                        if (rc)
-                               return rc;
+                               goto err;
 
                        p_ramrod->rx_qid_valid = 1;
                        p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
@@ -2083,6 +2083,10 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
                   (u64)p_params->addr, p_params->length);
 
        return qed_spq_post(p_hwfn, p_ent, NULL);
+
+err:
+       qed_sp_destroy_request(p_hwfn, p_ent);
+       return rc;
 }
 
 int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
index 35fd0db6a67777629fbaa7e1ab89c21b8030d31e..fff7f04d4525c51f15f7e349670a698354edc0f3 100644 (file)
@@ -1782,9 +1782,9 @@ static int qed_drain(struct qed_dev *cdev)
                        return -EBUSY;
                }
                rc = qed_mcp_drain(hwfn, ptt);
+               qed_ptt_release(hwfn, ptt);
                if (rc)
                        return rc;
-               qed_ptt_release(hwfn, ptt);
        }
 
        return 0;
index f40f654398a0782457240fa74bf81e0c65d7bf32..a96364df43203dbfe9b326a385a50b53dd1900c9 100644 (file)
@@ -1944,9 +1944,12 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
                             struct qed_ptt *p_ptt, u32 *p_speed_mask)
 {
        u32 transceiver_type, transceiver_state;
+       int ret;
 
-       qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
-                                    &transceiver_type);
+       ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
+                                          &transceiver_type);
+       if (ret)
+               return ret;
 
        if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
                                     false)
index c71391b9c757a1b03f55f21cc641c4718bbce719..7873d6dfd91f55607a6d60b23b568488edf7d360 100644 (file)
@@ -140,22 +140,34 @@ static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
        return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
 }
 
-static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
-                         struct qed_ptt *p_ptt,
-                         struct qed_rdma_start_in_params *params)
+int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn)
 {
        struct qed_rdma_info *p_rdma_info;
-       u32 num_cons, num_tasks;
-       int rc = -ENOMEM;
 
-       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
-
-       /* Allocate a struct with current pf rdma info */
        p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
        if (!p_rdma_info)
-               return rc;
+               return -ENOMEM;
+
+       spin_lock_init(&p_rdma_info->lock);
 
        p_hwfn->p_rdma_info = p_rdma_info;
+       return 0;
+}
+
+void qed_rdma_info_free(struct qed_hwfn *p_hwfn)
+{
+       kfree(p_hwfn->p_rdma_info);
+       p_hwfn->p_rdma_info = NULL;
+}
+
+static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
+{
+       struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+       u32 num_cons, num_tasks;
+       int rc = -ENOMEM;
+
+       DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
+
        if (QED_IS_IWARP_PERSONALITY(p_hwfn))
                p_rdma_info->proto = PROTOCOLID_IWARP;
        else
@@ -183,7 +195,7 @@ static int qed_rdma_alloc(struct qed_hwfn *p_hwfn,
        /* Allocate a struct with device params and fill it */
        p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
        if (!p_rdma_info->dev)
-               goto free_rdma_info;
+               return rc;
 
        /* Allocate a struct with port params and fill it */
        p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
@@ -298,8 +310,6 @@ free_rdma_port:
        kfree(p_rdma_info->port);
 free_rdma_dev:
        kfree(p_rdma_info->dev);
-free_rdma_info:
-       kfree(p_rdma_info);
 
        return rc;
 }
@@ -370,8 +380,6 @@ static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
 
        kfree(p_rdma_info->port);
        kfree(p_rdma_info->dev);
-
-       kfree(p_rdma_info);
 }
 
 static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
@@ -679,8 +687,6 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
 
        DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
 
-       spin_lock_init(&p_hwfn->p_rdma_info->lock);
-
        qed_rdma_init_devinfo(p_hwfn, params);
        qed_rdma_init_port(p_hwfn);
        qed_rdma_init_events(p_hwfn, params);
@@ -727,7 +733,7 @@ static int qed_rdma_stop(void *rdma_cxt)
        /* Disable RoCE search */
        qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
        p_hwfn->b_rdma_enabled_in_prs = false;
-
+       p_hwfn->p_rdma_info->active = 0;
        qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
 
        ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
@@ -1236,7 +1242,8 @@ qed_rdma_create_qp(void *rdma_cxt,
        u8 max_stats_queues;
        int rc;
 
-       if (!rdma_cxt || !in_params || !out_params || !p_hwfn->p_rdma_info) {
+       if (!rdma_cxt || !in_params || !out_params ||
+           !p_hwfn->p_rdma_info->active) {
                DP_ERR(p_hwfn->cdev,
                       "qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
                       rdma_cxt, in_params, out_params);
@@ -1514,6 +1521,7 @@ qed_rdma_register_tid(void *rdma_cxt,
        default:
                rc = -EINVAL;
                DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+               qed_sp_destroy_request(p_hwfn, p_ent);
                return rc;
        }
        SET_FIELD(p_ramrod->flags1,
@@ -1801,8 +1809,8 @@ bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
 {
        bool result;
 
-       /* if rdma info has not been allocated, naturally there are no qps */
-       if (!p_hwfn->p_rdma_info)
+       /* if rdma wasn't activated yet, naturally there are no qps */
+       if (!p_hwfn->p_rdma_info->active)
                return false;
 
        spin_lock_bh(&p_hwfn->p_rdma_info->lock);
@@ -1848,7 +1856,7 @@ static int qed_rdma_start(void *rdma_cxt,
        if (!p_ptt)
                goto err;
 
-       rc = qed_rdma_alloc(p_hwfn, p_ptt, params);
+       rc = qed_rdma_alloc(p_hwfn);
        if (rc)
                goto err1;
 
@@ -1857,6 +1865,7 @@ static int qed_rdma_start(void *rdma_cxt,
                goto err2;
 
        qed_ptt_release(p_hwfn, p_ptt);
+       p_hwfn->p_rdma_info->active = 1;
 
        return rc;
 
index 6f722ee8ee945b13ee6f33df82d1692c0ae18304..3689fe3e593542fc487167aae73da99156add030 100644 (file)
@@ -102,6 +102,7 @@ struct qed_rdma_info {
        u16 max_queue_zones;
        enum protocol_type proto;
        struct qed_iwarp_info iwarp;
+       u8 active:1;
 };
 
 struct qed_rdma_qp {
@@ -176,10 +177,14 @@ struct qed_rdma_qp {
 #if IS_ENABLED(CONFIG_QED_RDMA)
 void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn);
+void qed_rdma_info_free(struct qed_hwfn *p_hwfn);
 #else
 static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
 static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
                                    struct qed_ptt *p_ptt) {}
+static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;}
+static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {}
 #endif
 
 int
index f9167d1354bbef3ccf2e972e8c002e64bbc24cce..e49fada854108718bf1dc5ea45fda2d4d264ded2 100644 (file)
@@ -745,6 +745,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
                DP_NOTICE(p_hwfn,
                          "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
                          rc);
+               qed_sp_destroy_request(p_hwfn, p_ent);
                return rc;
        }
 
index e95431f6acd46fb6ace4c20cfe227388c890cdea..3157c0d9944177e784a62ef983dd2adc3b5c0f11 100644 (file)
@@ -167,6 +167,9 @@ struct qed_spq_entry {
        enum spq_mode                   comp_mode;
        struct qed_spq_comp_cb          comp_cb;
        struct qed_spq_comp_done        comp_done; /* SPQ_MODE_EBLOCK */
+
+       /* Posted entry for unlimited list entry in EBLOCK mode */
+       struct qed_spq_entry            *post_ent;
 };
 
 struct qed_eq {
@@ -396,6 +399,17 @@ struct qed_sp_init_data {
        struct qed_spq_comp_cb *p_comp_data;
 };
 
+/**
+ * @brief Returns a SPQ entry to the pool / frees the entry if allocated.
+ *        Should be called on in error flows after initializing the SPQ entry
+ *        and before posting it.
+ *
+ * @param p_hwfn
+ * @param p_ent
+ */
+void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
+                           struct qed_spq_entry *p_ent);
+
 int qed_sp_init_request(struct qed_hwfn *p_hwfn,
                        struct qed_spq_entry **pp_ent,
                        u8 cmd,
index 77b6248ad3b97d3a45caf27825faddabf9695a5b..888274fa208bc768b2ab9db2514407573bfab2e1 100644 (file)
 #include "qed_sp.h"
 #include "qed_sriov.h"
 
+void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
+                           struct qed_spq_entry *p_ent)
+{
+       /* qed_spq_get_entry() can either get an entry from the free_pool,
+        * or, if no entries are left, allocate a new entry and add it to
+        * the unlimited_pending list.
+        */
+       if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
+               kfree(p_ent);
+       else
+               qed_spq_return_entry(p_hwfn, p_ent);
+}
+
 int qed_sp_init_request(struct qed_hwfn *p_hwfn,
                        struct qed_spq_entry **pp_ent,
                        u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
@@ -80,7 +93,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
 
        case QED_SPQ_MODE_BLOCK:
                if (!p_data->p_comp_data)
-                       return -EINVAL;
+                       goto err;
 
                p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
                break;
@@ -95,7 +108,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
        default:
                DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
                          p_ent->comp_mode);
-               return -EINVAL;
+               goto err;
        }
 
        DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
@@ -109,6 +122,11 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
        memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
 
        return 0;
+
+err:
+       qed_sp_destroy_request(p_hwfn, p_ent);
+
+       return -EINVAL;
 }
 
 static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
index c4a6274dd625c2bf419cc78dfab20f899ada494d..0a9c5bb0fa486658a23132680a1aeddb9a72b518 100644 (file)
@@ -142,6 +142,7 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
 
        DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
        rc = qed_mcp_drain(p_hwfn, p_ptt);
+       qed_ptt_release(p_hwfn, p_ptt);
        if (rc) {
                DP_NOTICE(p_hwfn, "MCP drain failed\n");
                goto err;
@@ -150,18 +151,15 @@ static int qed_spq_block(struct qed_hwfn *p_hwfn,
        /* Retry after drain */
        rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
        if (!rc)
-               goto out;
+               return 0;
 
        comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
-       if (comp_done->done == 1)
+       if (comp_done->done == 1) {
                if (p_fw_ret)
                        *p_fw_ret = comp_done->fw_return_code;
-out:
-       qed_ptt_release(p_hwfn, p_ptt);
-       return 0;
-
+               return 0;
+       }
 err:
-       qed_ptt_release(p_hwfn, p_ptt);
        DP_NOTICE(p_hwfn,
                  "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
                  le32_to_cpu(p_ent->elem.hdr.cid),
@@ -685,6 +683,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
                        /* EBLOCK responsible to free the allocated p_ent */
                        if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
                                kfree(p_ent);
+                       else
+                               p_ent->post_ent = p_en2;
 
                        p_ent = p_en2;
                }
@@ -767,6 +767,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
                                 SPQ_HIGH_PRI_RESERVE_DEFAULT);
 }
 
+/* Avoid overriding of SPQ entries when getting out-of-order completions, by
+ * marking the completions in a bitmap and increasing the chain consumer only
+ * for the first successive completed entries.
+ */
+static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
+{
+       u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
+       struct qed_spq *p_spq = p_hwfn->p_spq;
+
+       __set_bit(pos, p_spq->p_comp_bitmap);
+       while (test_bit(p_spq->comp_bitmap_idx,
+                       p_spq->p_comp_bitmap)) {
+               __clear_bit(p_spq->comp_bitmap_idx,
+                           p_spq->p_comp_bitmap);
+               p_spq->comp_bitmap_idx++;
+               qed_chain_return_produced(&p_spq->chain);
+       }
+}
+
 int qed_spq_post(struct qed_hwfn *p_hwfn,
                 struct qed_spq_entry *p_ent, u8 *fw_return_code)
 {
@@ -824,11 +843,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
                                   p_ent->queue == &p_spq->unlimited_pending);
 
                if (p_ent->queue == &p_spq->unlimited_pending) {
-                       /* This is an allocated p_ent which does not need to
-                        * return to pool.
-                        */
+                       struct qed_spq_entry *p_post_ent = p_ent->post_ent;
+
                        kfree(p_ent);
-                       return rc;
+
+                       /* Return the entry which was actually posted */
+                       p_ent = p_post_ent;
                }
 
                if (rc)
@@ -842,7 +862,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
 spq_post_fail2:
        spin_lock_bh(&p_spq->lock);
        list_del(&p_ent->list);
-       qed_chain_return_produced(&p_spq->chain);
+       qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
 
 spq_post_fail:
        /* return to the free pool */
@@ -874,25 +894,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
        spin_lock_bh(&p_spq->lock);
        list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
                if (p_ent->elem.hdr.echo == echo) {
-                       u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
-
                        list_del(&p_ent->list);
-
-                       /* Avoid overriding of SPQ entries when getting
-                        * out-of-order completions, by marking the completions
-                        * in a bitmap and increasing the chain consumer only
-                        * for the first successive completed entries.
-                        */
-                       __set_bit(pos, p_spq->p_comp_bitmap);
-
-                       while (test_bit(p_spq->comp_bitmap_idx,
-                                       p_spq->p_comp_bitmap)) {
-                               __clear_bit(p_spq->comp_bitmap_idx,
-                                           p_spq->p_comp_bitmap);
-                               p_spq->comp_bitmap_idx++;
-                               qed_chain_return_produced(&p_spq->chain);
-                       }
-
+                       qed_spq_comp_bmap_update(p_hwfn, echo);
                        p_spq->comp_count++;
                        found = p_ent;
                        break;
@@ -931,11 +934,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
                           QED_MSG_SPQ,
                           "Got a completion without a callback function\n");
 
-       if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
-           (found->queue == &p_spq->unlimited_pending))
+       if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
                /* EBLOCK  is responsible for returning its own entry into the
-                * free list, unless it originally added the entry into the
-                * unlimited pending list.
+                * free list.
                 */
                qed_spq_return_entry(p_hwfn, found);
 
index 9b08a9d9e15130f0518b1f7608bbaa36e6eb15b0..ca6290fa0f30940265ca1590de148c94eb2cf18e 100644 (file)
@@ -101,6 +101,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
        default:
                DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
                          p_hwfn->hw_info.personality);
+               qed_sp_destroy_request(p_hwfn, p_ent);
                return -EINVAL;
        }
 
index 9647578cbe6a8fec82409c4eadf9aee02f6c7971..14f26bf3b388bdce2913241d5790ec52617c249d 100644 (file)
@@ -459,7 +459,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
                         struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
                         struct qlcnic_host_tx_ring *tx_ring)
 {
-       u8 l4proto, opcode = 0, hdr_len = 0;
+       u8 l4proto, opcode = 0, hdr_len = 0, tag_vlan = 0;
        u16 flags = 0, vlan_tci = 0;
        int copied, offset, copy_len, size;
        struct cmd_desc_type0 *hwdesc;
@@ -472,14 +472,16 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
                flags = QLCNIC_FLAGS_VLAN_TAGGED;
                vlan_tci = ntohs(vh->h_vlan_TCI);
                protocol = ntohs(vh->h_vlan_encapsulated_proto);
+               tag_vlan = 1;
        } else if (skb_vlan_tag_present(skb)) {
                flags = QLCNIC_FLAGS_VLAN_OOB;
                vlan_tci = skb_vlan_tag_get(skb);
+               tag_vlan = 1;
        }
        if (unlikely(adapter->tx_pvid)) {
-               if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+               if (tag_vlan && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
                        return -EIO;
-               if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
+               if (tag_vlan && (adapter->flags & QLCNIC_TAGGING_ENABLED))
                        goto set_flags;
 
                flags = QLCNIC_FLAGS_VLAN_OOB;
index 0afc3d335d562d24466b9192aea291b910ebcdfe..d11c16aeb19ad45759c44e1dac2bb259cf976054 100644 (file)
@@ -234,7 +234,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
                      struct net_device *real_dev,
                      struct rmnet_endpoint *ep)
 {
-       struct rmnet_priv *priv;
+       struct rmnet_priv *priv = netdev_priv(rmnet_dev);
        int rc;
 
        if (ep->egress_dev)
@@ -247,6 +247,8 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
        rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
        rmnet_dev->hw_features |= NETIF_F_SG;
 
+       priv->real_dev = real_dev;
+
        rc = register_netdevice(rmnet_dev);
        if (!rc) {
                ep->egress_dev = rmnet_dev;
@@ -255,9 +257,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
 
                rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
 
-               priv = netdev_priv(rmnet_dev);
                priv->mux_id = id;
-               priv->real_dev = real_dev;
 
                netdev_dbg(rmnet_dev, "rmnet dev created\n");
        }
index 81045dfa1cd898726da2e2e37520be720e0e8842..44f6e4873aadd16b35ae34c3561393fc010a698b 100644 (file)
@@ -571,6 +571,7 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
        struct cp_private *cp;
        int handled = 0;
        u16 status;
+       u16 mask;
 
        if (unlikely(dev == NULL))
                return IRQ_NONE;
@@ -578,6 +579,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
 
        spin_lock(&cp->lock);
 
+       mask = cpr16(IntrMask);
+       if (!mask)
+               goto out_unlock;
+
        status = cpr16(IntrStatus);
        if (!status || (status == 0xFFFF))
                goto out_unlock;
index 6732f5cbde081052ce9e1c2417451118cadbff8f..7c7cd9d94bcc18ddb1d5685fc59f608cec765be8 100644 (file)
                                 NETIF_MSG_TX_ERR)
 
 /* Parameter for descriptor */
-#define AVE_NR_TXDESC          32      /* Tx descriptor */
-#define AVE_NR_RXDESC          64      /* Rx descriptor */
+#define AVE_NR_TXDESC          64      /* Tx descriptor */
+#define AVE_NR_RXDESC          256     /* Rx descriptor */
 
 #define AVE_DESC_OFS_CMDSTS    0
 #define AVE_DESC_OFS_ADDRL     4
 
 /* Parameter for ethernet frame */
 #define AVE_MAX_ETHFRAME       1518
+#define AVE_FRAME_HEADROOM     2
 
 /* Parameter for interrupt */
 #define AVE_INTM_COUNT         20
@@ -576,12 +577,13 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
 
        skb = priv->rx.desc[entry].skbs;
        if (!skb) {
-               skb = netdev_alloc_skb_ip_align(ndev,
-                                               AVE_MAX_ETHFRAME);
+               skb = netdev_alloc_skb(ndev, AVE_MAX_ETHFRAME);
                if (!skb) {
                        netdev_err(ndev, "can't allocate skb for Rx\n");
                        return -ENOMEM;
                }
+               skb->data += AVE_FRAME_HEADROOM;
+               skb->tail += AVE_FRAME_HEADROOM;
        }
 
        /* set disable to cmdsts */
@@ -594,12 +596,12 @@ static int ave_rxdesc_prepare(struct net_device *ndev, int entry)
         * - Rx buffer begins with 2 byte headroom, and data will be put from
         *   (buffer + 2).
         * To satisfy this, specify the address to put back the buffer
-        * pointer advanced by NET_IP_ALIGN by netdev_alloc_skb_ip_align(),
-        * and expand the map size by NET_IP_ALIGN.
+        * pointer advanced by AVE_FRAME_HEADROOM, and expand the map size
+        * by AVE_FRAME_HEADROOM.
         */
        ret = ave_dma_map(ndev, &priv->rx.desc[entry],
-                         skb->data - NET_IP_ALIGN,
-                         AVE_MAX_ETHFRAME + NET_IP_ALIGN,
+                         skb->data - AVE_FRAME_HEADROOM,
+                         AVE_MAX_ETHFRAME + AVE_FRAME_HEADROOM,
                          DMA_FROM_DEVICE, &paddr);
        if (ret) {
                netdev_err(ndev, "can't map skb for Rx\n");
@@ -1689,9 +1691,10 @@ static int ave_probe(struct platform_device *pdev)
                 pdev->name, pdev->id);
 
        /* Register as a NAPI supported driver */
-       netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx, priv->rx.ndesc);
+       netif_napi_add(ndev, &priv->napi_rx, ave_napi_poll_rx,
+                      NAPI_POLL_WEIGHT);
        netif_tx_napi_add(ndev, &priv->napi_tx, ave_napi_poll_tx,
-                         priv->tx.ndesc);
+                         NAPI_POLL_WEIGHT);
 
        platform_set_drvdata(pdev, ndev);
 
@@ -1913,5 +1916,6 @@ static struct platform_driver ave_driver = {
 };
 module_platform_driver(ave_driver);
 
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
 MODULE_DESCRIPTION("Socionext UniPhier AVE ethernet driver");
 MODULE_LICENSE("GPL v2");
index b1b305f8f4143626fc664445182c5e2afc38b87c..272b9ca663148f36ccb7ae45363df773f2dd4c4c 100644 (file)
@@ -365,7 +365,8 @@ struct dma_features {
 
 /* GMAC TX FIFO is 8K, Rx FIFO is 16K */
 #define BUF_SIZE_16KiB 16384
-#define BUF_SIZE_8KiB 8192
+/* RX Buffer size must be < 8191 and multiple of 4/8/16 bytes */
+#define BUF_SIZE_8KiB 8188
 #define BUF_SIZE_4KiB 4096
 #define BUF_SIZE_2KiB 2048
 
index ca9d7e48034ceb33f5f4eb4db5b99691ed1a278f..40d6356a7e73c213f0d1d073387b8605bb4f3726 100644 (file)
@@ -31,7 +31,7 @@
 /* Enhanced descriptors */
 static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
 {
-       p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1)
+       p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
                        << ERDES1_BUFFER2_SIZE_SHIFT)
                   & ERDES1_BUFFER2_SIZE_MASK);
 
index 77914c89d7497de6f9a251196fe079f49364ee13..5ef91a790f9d16fbd122f71e130cf7ecf5249a68 100644 (file)
@@ -262,7 +262,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
                                  int mode, int end)
 {
        p->des0 |= cpu_to_le32(RDES0_OWN);
-       p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
+       p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
 
        if (mode == STMMAC_CHAIN_MODE)
                ehn_desc_rx_set_on_chain(p);
index abc3f85270cd0709e667a17112ff2d9d7f4ff8f4..d8c5bc4122195d73f7150f2775797cc6ba9a3393 100644 (file)
@@ -140,7 +140,7 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
 static int set_16kib_bfsize(int mtu)
 {
        int ret = 0;
-       if (unlikely(mtu >= BUF_SIZE_8KiB))
+       if (unlikely(mtu > BUF_SIZE_8KiB))
                ret = BUF_SIZE_16KiB;
        return ret;
 }
index 076a8be18d6754c489ecaf031fb65db79f58c29b..5551fead8f6646f327dcfc7d5b98d8f3483f3eeb 100644 (file)
@@ -2550,12 +2550,6 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
                        netdev_warn(priv->dev, "PTP init failed\n");
        }
 
-#ifdef CONFIG_DEBUG_FS
-       ret = stmmac_init_fs(dev);
-       if (ret < 0)
-               netdev_warn(priv->dev, "%s: failed debugFS registration\n",
-                           __func__);
-#endif
        priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
 
        if (priv->use_riwt) {
@@ -2756,10 +2750,6 @@ static int stmmac_release(struct net_device *dev)
 
        netif_carrier_off(dev);
 
-#ifdef CONFIG_DEBUG_FS
-       stmmac_exit_fs(dev);
-#endif
-
        stmmac_release_ptp(priv);
 
        return 0;
@@ -3899,6 +3889,9 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
        u32 tx_count = priv->plat->tx_queues_to_use;
        u32 queue;
 
+       if ((dev->flags & IFF_UP) == 0)
+               return 0;
+
        for (queue = 0; queue < rx_count; queue++) {
                struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
 
@@ -4397,6 +4390,13 @@ int stmmac_dvr_probe(struct device *device,
                goto error_netdev_register;
        }
 
+#ifdef CONFIG_DEBUG_FS
+       ret = stmmac_init_fs(ndev);
+       if (ret < 0)
+               netdev_warn(priv->dev, "%s: failed debugFS registration\n",
+                           __func__);
+#endif
+
        return ret;
 
 error_netdev_register:
@@ -4432,6 +4432,9 @@ int stmmac_dvr_remove(struct device *dev)
 
        netdev_info(priv->dev, "%s: removing driver", __func__);
 
+#ifdef CONFIG_DEBUG_FS
+       stmmac_exit_fs(ndev);
+#endif
        stmmac_stop_all_dma(priv);
 
        stmmac_mac_set(priv, priv->ioaddr, false);
index b72ef171477e0ec6dfba85c09815226a584ec30b..bdd351597b55251b35e17daf43aeff643912e33b 100644 (file)
@@ -243,7 +243,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
  */
 int stmmac_mdio_reset(struct mii_bus *bus)
 {
-#if defined(CONFIG_STMMAC_PLATFORM)
+#if IS_ENABLED(CONFIG_STMMAC_PLATFORM)
        struct net_device *ndev = bus->priv;
        struct stmmac_priv *priv = netdev_priv(ndev);
        unsigned int mii_address = priv->hw->mii.addr;
index ef9538ee53d0db7f43eae4298dd39258b4c39122..82412691ee66bf13b488db6d61072f239d27d9c3 100644 (file)
@@ -3605,7 +3605,7 @@ static const char velocity_gstrings[][ETH_GSTRING_LEN] = {
        "tx_jumbo",
        "rx_mac_control_frames",
        "tx_mac_control_frames",
-       "rx_frame_alignement_errors",
+       "rx_frame_alignment_errors",
        "rx_long_ok",
        "rx_long_err",
        "tx_sqe_errors",
index 3b7f10a5f06a660fbf6408f7adbcd0851ad79921..c5cae8e74dc40720eb2db3a3d91118a8ceece281 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
 /*     FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
  *
  *     Copyright (c) 2018  Maciej W. Rozycki
@@ -56,7 +56,7 @@
 #define DRV_VERSION "v.1.1.4"
 #define DRV_RELDATE "Oct  6 2018"
 
-static char version[] =
+static const char version[] =
        DRV_NAME ": " DRV_VERSION "  " DRV_RELDATE "  Maciej W. Rozycki\n";
 
 MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
@@ -784,7 +784,7 @@ err_rx:
 static void fza_tx_smt(struct net_device *dev)
 {
        struct fza_private *fp = netdev_priv(dev);
-       struct fza_buffer_tx __iomem *smt_tx_ptr, *skb_data_ptr;
+       struct fza_buffer_tx __iomem *smt_tx_ptr;
        int i, len;
        u32 own;
 
@@ -799,6 +799,7 @@ static void fza_tx_smt(struct net_device *dev)
 
                if (!netif_queue_stopped(dev)) {
                        if (dev_nit_active(dev)) {
+                               struct fza_buffer_tx *skb_data_ptr;
                                struct sk_buff *skb;
 
                                /* Length must be a multiple of 4 as only word
index b06acf32738ea5f32083d4403a3b3c95315396d0..93bda61be8e382646f3c9213e519690ef8cf3ab4 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0+ */
 /*     FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
  *
  *     Copyright (c) 2018  Maciej W. Rozycki
@@ -235,6 +235,7 @@ struct fza_ring_cmd {
 #define FZA_RING_CMD           0x200400        /* command ring address */
 #define FZA_RING_CMD_SIZE      0x40            /* command descriptor ring
                                                 * size
+                                                */
 /* Command constants. */
 #define FZA_RING_CMD_MASK      0x7fffffff
 #define FZA_RING_CMD_NOP       0x00000000      /* nop */
index fc8d5f1ee1addeebd4b3748032a1632b1f41df06..0da3d36b283becf838bed9357c0520e0714529e5 100644 (file)
@@ -608,7 +608,7 @@ static int macvlan_open(struct net_device *dev)
                goto hash_add;
        }
 
-       err = -EBUSY;
+       err = -EADDRINUSE;
        if (macvlan_addr_busy(vlan->port, dev->dev_addr))
                goto out;
 
@@ -706,7 +706,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
        } else {
                /* Rehash and update the device filters */
                if (macvlan_addr_busy(vlan->port, addr))
-                       return -EBUSY;
+                       return -EADDRINUSE;
 
                if (!macvlan_passthru(port)) {
                        err = dev_uc_add(lowerdev, addr);
@@ -747,6 +747,9 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
                return dev_set_mac_address(vlan->lowerdev, addr);
        }
 
+       if (macvlan_addr_busy(vlan->port, addr->sa_data))
+               return -EADDRINUSE;
+
        return macvlan_sync_address(dev, addr->sa_data);
 }
 
index b12023bc2cab5feb15ceedbe2fc357dfcf37627e..a5bab614ff8459788493297bdcaa897106f7f1ba 100644 (file)
@@ -71,7 +71,6 @@ static unsigned int tx_start = 10;
 static unsigned int tx_stop = 5;
 
 struct ntb_netdev {
-       struct list_head list;
        struct pci_dev *pdev;
        struct net_device *ndev;
        struct ntb_transport_qp *qp;
@@ -81,8 +80,6 @@ struct ntb_netdev {
 #define        NTB_TX_TIMEOUT_MS       1000
 #define        NTB_RXQ_SIZE            100
 
-static LIST_HEAD(dev_list);
-
 static void ntb_netdev_event_handler(void *data, int link_is_up)
 {
        struct net_device *ndev = data;
@@ -236,7 +233,7 @@ static void ntb_netdev_tx_timer(struct timer_list *t)
        struct net_device *ndev = dev->ndev;
 
        if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
-               mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time));
+               mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
        } else {
                /* Make sure anybody stopping the queue after this sees the new
                 * value of ntb_transport_tx_free_entry()
@@ -452,7 +449,7 @@ static int ntb_netdev_probe(struct device *client_dev)
        if (rc)
                goto err1;
 
-       list_add(&dev->list, &dev_list);
+       dev_set_drvdata(client_dev, ndev);
        dev_info(&pdev->dev, "%s created\n", ndev->name);
        return 0;
 
@@ -465,27 +462,8 @@ err:
 
 static void ntb_netdev_remove(struct device *client_dev)
 {
-       struct ntb_dev *ntb;
-       struct net_device *ndev;
-       struct pci_dev *pdev;
-       struct ntb_netdev *dev;
-       bool found = false;
-
-       ntb = dev_ntb(client_dev->parent);
-       pdev = ntb->pdev;
-
-       list_for_each_entry(dev, &dev_list, list) {
-               if (dev->pdev == pdev) {
-                       found = true;
-                       break;
-               }
-       }
-       if (!found)
-               return;
-
-       list_del(&dev->list);
-
-       ndev = dev->ndev;
+       struct net_device *ndev = dev_get_drvdata(client_dev);
+       struct ntb_netdev *dev = netdev_priv(ndev);
 
        unregister_netdev(ndev);
        ntb_transport_free_queue(dev->qp);
index e86ea105c8022290bf1b02d781fe536d90d74d5a..70453701045371e9d2b64cd3f65e070c4b20faeb 100644 (file)
@@ -92,7 +92,7 @@ static int bcm54612e_config_init(struct phy_device *phydev)
        return 0;
 }
 
-static int bcm5481x_config(struct phy_device *phydev)
+static int bcm54xx_config_clock_delay(struct phy_device *phydev)
 {
        int rc, val;
 
@@ -429,7 +429,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
        ret = genphy_config_aneg(phydev);
 
        /* Then we can set up the delay. */
-       bcm5481x_config(phydev);
+       bcm54xx_config_clock_delay(phydev);
 
        if (of_property_read_bool(np, "enet-phy-lane-swap")) {
                /* Lane Swap - Undocumented register...magic! */
@@ -442,6 +442,19 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
        return ret;
 }
 
+static int bcm54616s_config_aneg(struct phy_device *phydev)
+{
+       int ret;
+
+       /* Aneg firsly. */
+       ret = genphy_config_aneg(phydev);
+
+       /* Then we can set up the delay. */
+       bcm54xx_config_clock_delay(phydev);
+
+       return ret;
+}
+
 static int brcm_phy_setbits(struct phy_device *phydev, int reg, int set)
 {
        int val;
@@ -636,6 +649,7 @@ static struct phy_driver broadcom_drivers[] = {
        .features       = PHY_GBIT_FEATURES,
        .flags          = PHY_HAS_INTERRUPT,
        .config_init    = bcm54xx_config_init,
+       .config_aneg    = bcm54616s_config_aneg,
        .ack_interrupt  = bcm_phy_ack_intr,
        .config_intr    = bcm_phy_config_intr,
 }, {
index 33265747bf3994c668cfcb2a8f7f3d9f768d370a..0fbcedcdf6e2ae5b6d9d8ecca66937532db263cb 100644 (file)
@@ -63,7 +63,7 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
                 * assume the pin serves as pull-up. If direction is
                 * output, the default value is high.
                 */
-               gpiod_set_value(bitbang->mdo, 1);
+               gpiod_set_value_cansleep(bitbang->mdo, 1);
                return;
        }
 
@@ -78,7 +78,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
        struct mdio_gpio_info *bitbang =
                container_of(ctrl, struct mdio_gpio_info, ctrl);
 
-       return gpiod_get_value(bitbang->mdio);
+       return gpiod_get_value_cansleep(bitbang->mdio);
 }
 
 static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -87,9 +87,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
                container_of(ctrl, struct mdio_gpio_info, ctrl);
 
        if (bitbang->mdo)
-               gpiod_set_value(bitbang->mdo, what);
+               gpiod_set_value_cansleep(bitbang->mdo, what);
        else
-               gpiod_set_value(bitbang->mdio, what);
+               gpiod_set_value_cansleep(bitbang->mdio, what);
 }
 
 static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -97,7 +97,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
        struct mdio_gpio_info *bitbang =
                container_of(ctrl, struct mdio_gpio_info, ctrl);
 
-       gpiod_set_value(bitbang->mdc, what);
+       gpiod_set_value_cansleep(bitbang->mdc, what);
 }
 
 static const struct mdiobb_ops mdio_gpio_ops = {
index a2e59f4f6f01fcfa5b4c754529a68ac1fb57e095..7cae175177449fc4eacae8a3a972bba59043ce6e 100644 (file)
@@ -810,17 +810,13 @@ static int vsc85xx_default_config(struct phy_device *phydev)
 
        phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
        mutex_lock(&phydev->lock);
-       rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
-       if (rc < 0)
-               goto out_unlock;
 
-       reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL);
-       reg_val &= ~(RGMII_RX_CLK_DELAY_MASK);
-       reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS);
-       phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val);
+       reg_val = RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS;
+
+       rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
+                             MSCC_PHY_RGMII_CNTL, RGMII_RX_CLK_DELAY_MASK,
+                             reg_val);
 
-out_unlock:
-       rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc);
        mutex_unlock(&phydev->lock);
 
        return rc;
index ab33d1777132e62a141e7ca76c11ef694a6f4e63..18e92c19c5ab8716f6a87e905689a2b0c16b56d4 100644 (file)
@@ -1880,20 +1880,17 @@ EXPORT_SYMBOL(genphy_loopback);
 
 static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
 {
-       phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
-                              PHY_10BT_FEATURES);
-
        switch (max_speed) {
-       default:
-               return -ENOTSUPP;
-       case SPEED_1000:
-               phydev->supported |= PHY_1000BT_FEATURES;
+       case SPEED_10:
+               phydev->supported &= ~PHY_100BT_FEATURES;
                /* fall through */
        case SPEED_100:
-               phydev->supported |= PHY_100BT_FEATURES;
-               /* fall through */
-       case SPEED_10:
-               phydev->supported |= PHY_10BT_FEATURES;
+               phydev->supported &= ~PHY_1000BT_FEATURES;
+               break;
+       case SPEED_1000:
+               break;
+       default:
+               return -ENOTSUPP;
        }
 
        return 0;
@@ -2197,6 +2194,14 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
        new_driver->mdiodrv.driver.remove = phy_remove;
        new_driver->mdiodrv.driver.owner = owner;
 
+       /* The following works around an issue where the PHY driver doesn't bind
+        * to the device, resulting in the genphy driver being used instead of
+        * the dedicated driver. The root cause of the issue isn't known yet
+        * and seems to be in the base driver core. Once this is fixed we may
+        * remove this workaround.
+        */
+       new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
+
        retval = driver_register(&new_driver->mdiodrv.driver);
        if (retval) {
                pr_err("%s: Error %d in registering driver\n",
index 7fc8508b5231d94beab4c45bf7666d15d4ef786f..271e8adc39f1005dcc48b678ef528d442f12b9f8 100644 (file)
@@ -220,7 +220,7 @@ static struct phy_driver realtek_drvs[] = {
                .flags          = PHY_HAS_INTERRUPT,
        }, {
                .phy_id         = 0x001cc816,
-               .name           = "RTL8201F 10/100Mbps Ethernet",
+               .name           = "RTL8201F Fast Ethernet",
                .phy_id_mask    = 0x001fffff,
                .features       = PHY_BASIC_FEATURES,
                .flags          = PHY_HAS_INTERRUPT,
index 83060fb349f4d5d458e762eb540afe0de2b935d6..ad9db652874dc737d0a76f16b9af4c56749d60fc 100644 (file)
@@ -162,7 +162,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
        /* 1000Base-PX or 1000Base-BX10 */
        if ((id->base.e_base_px || id->base.e_base_bx10) &&
            br_min <= 1300 && br_max >= 1200)
-               phylink_set(support, 1000baseX_Full);
+               phylink_set(modes, 1000baseX_Full);
 
        /* For active or passive cables, select the link modes
         * based on the bit rates and the cable compliance bytes.
index e9f101c9bae2ce1d9bde5dbe0d473119ead760e6..bfbb39f935545794c151c18de29655a574fd8bcd 100644 (file)
@@ -216,9 +216,9 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                         * it just report sending a packet to the target
                         * (without actual packet transfer).
                         */
-                       dev_kfree_skb_any(skb);
                        ndev->stats.tx_packets++;
                        ndev->stats.tx_bytes += skb->len;
+                       dev_kfree_skb_any(skb);
                }
        }
 
index db633ae9f784a7cf0128e922a046160d25b898e1..364f514d56d87368e173c56a8e28b45debe08af3 100644 (file)
@@ -985,8 +985,6 @@ static void team_port_disable(struct team *team,
        team->en_port_count--;
        team_queue_override_port_del(team, port);
        team_adjust_ops(team);
-       team_notify_peers(team);
-       team_mcast_rejoin(team);
        team_lower_state_changed(port);
 }
 
index 060135ceaf0e1a76615b2e6cd6830d42ac8becdf..005020042be946a23609dca3dfdcb589d60eff01 100644 (file)
@@ -1536,6 +1536,7 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
 
        if (!rx_batched || (!more && skb_queue_empty(queue))) {
                local_bh_disable();
+               skb_record_rx_queue(skb, tfile->queue_index);
                netif_receive_skb(skb);
                local_bh_enable();
                return;
@@ -1555,8 +1556,11 @@ static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
                struct sk_buff *nskb;
 
                local_bh_disable();
-               while ((nskb = __skb_dequeue(&process_queue)))
+               while ((nskb = __skb_dequeue(&process_queue))) {
+                       skb_record_rx_queue(nskb, tfile->queue_index);
                        netif_receive_skb(nskb);
+               }
+               skb_record_rx_queue(skb, tfile->queue_index);
                netif_receive_skb(skb);
                local_bh_enable();
        }
@@ -2289,9 +2293,9 @@ static void tun_setup(struct net_device *dev)
 static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
                        struct netlink_ext_ack *extack)
 {
-       if (!data)
-               return 0;
-       return -EINVAL;
+       NL_SET_ERR_MSG(extack,
+                      "tun/tap creation via rtnetlink is not supported.");
+       return -EOPNOTSUPP;
 }
 
 static size_t tun_get_size(const struct net_device *dev)
@@ -2381,6 +2385,7 @@ static int tun_xdp_one(struct tun_struct *tun,
                       struct tun_file *tfile,
                       struct xdp_buff *xdp, int *flush)
 {
+       unsigned int datasize = xdp->data_end - xdp->data;
        struct tun_xdp_hdr *hdr = xdp->data_hard_start;
        struct virtio_net_hdr *gso = &hdr->gso;
        struct tun_pcpu_stats *stats;
@@ -2451,12 +2456,13 @@ build:
        if (!rcu_dereference(tun->steering_prog))
                rxhash = __skb_get_hash_symmetric(skb);
 
+       skb_record_rx_queue(skb, tfile->queue_index);
        netif_receive_skb(skb);
 
        stats = get_cpu_ptr(tun->pcpu_stats);
        u64_stats_update_begin(&stats->syncp);
        stats->rx_packets++;
-       stats->rx_bytes += skb->len;
+       stats->rx_bytes += datasize;
        u64_stats_update_end(&stats->syncp);
        put_cpu_ptr(stats);
 
index 7275761a1177ca9cda569bfc734bb6de3e1558e1..3d8a70d3ea9bd67c91f85c6cab38d1afbf5f25f3 100644 (file)
@@ -140,7 +140,6 @@ struct ipheth_device {
        struct usb_device *udev;
        struct usb_interface *intf;
        struct net_device *net;
-       struct sk_buff *tx_skb;
        struct urb *tx_urb;
        struct urb *rx_urb;
        unsigned char *tx_buf;
@@ -230,6 +229,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
        case -ENOENT:
        case -ECONNRESET:
        case -ESHUTDOWN:
+       case -EPROTO:
                return;
        case 0:
                break;
@@ -281,7 +281,6 @@ static void ipheth_sndbulk_callback(struct urb *urb)
                dev_err(&dev->intf->dev, "%s: urb status: %d\n",
                __func__, status);
 
-       dev_kfree_skb_irq(dev->tx_skb);
        if (status == 0)
                netif_wake_queue(dev->net);
        else
@@ -423,7 +422,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
        if (skb->len > IPHETH_BUF_SIZE) {
                WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
                dev->net->stats.tx_dropped++;
-               dev_kfree_skb_irq(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
@@ -443,12 +442,11 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
                dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
                        __func__, retval);
                dev->net->stats.tx_errors++;
-               dev_kfree_skb_irq(skb);
+               dev_kfree_skb_any(skb);
        } else {
-               dev->tx_skb = skb;
-
                dev->net->stats.tx_packets++;
                dev->net->stats.tx_bytes += skb->len;
+               dev_consume_skb_any(skb);
                netif_stop_queue(net);
        }
 
index 262e7a3c23cb67fbfd66b81ed0d26af0f0480d84..f2d01cb6f958cd3235dba2383950dd5fa57d8c08 100644 (file)
@@ -1321,6 +1321,8 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
        dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
        dev->net->flags |= IFF_MULTICAST;
        dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
+       dev->net->min_mtu = ETH_MIN_MTU;
+       dev->net->max_mtu = ETH_DATA_LEN;
        dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
 
        pdata->dev = dev;
@@ -1598,6 +1600,8 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
                return ret;
        }
 
+       cancel_delayed_work_sync(&pdata->carrier_check);
+
        if (pdata->suspend_flags) {
                netdev_warn(dev->net, "error during last resume\n");
                pdata->suspend_flags = 0;
@@ -1840,6 +1844,11 @@ done:
         */
        if (ret && PMSG_IS_AUTO(message))
                usbnet_resume(intf);
+
+       if (ret)
+               schedule_delayed_work(&pdata->carrier_check,
+                                     CARRIER_CHECK_DELAY);
+
        return ret;
 }
 
index 3e2c041d76ac1ac61fe5f6033c378feb4e3d6fd0..ea672145f6a66b97ec3572f213939a64323fcdf3 100644 (file)
@@ -70,7 +70,8 @@ static const unsigned long guest_offloads[] = {
        VIRTIO_NET_F_GUEST_TSO4,
        VIRTIO_NET_F_GUEST_TSO6,
        VIRTIO_NET_F_GUEST_ECN,
-       VIRTIO_NET_F_GUEST_UFO
+       VIRTIO_NET_F_GUEST_UFO,
+       VIRTIO_NET_F_GUEST_CSUM
 };
 
 struct virtnet_stat_desc {
@@ -364,7 +365,8 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
                                   struct receive_queue *rq,
                                   struct page *page, unsigned int offset,
-                                  unsigned int len, unsigned int truesize)
+                                  unsigned int len, unsigned int truesize,
+                                  bool hdr_valid)
 {
        struct sk_buff *skb;
        struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -386,7 +388,8 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
        else
                hdr_padded_len = sizeof(struct padded_vnet_hdr);
 
-       memcpy(hdr, p, hdr_len);
+       if (hdr_valid)
+               memcpy(hdr, p, hdr_len);
 
        len -= hdr_len;
        offset += hdr_padded_len;
@@ -738,7 +741,8 @@ static struct sk_buff *receive_big(struct net_device *dev,
                                   struct virtnet_rq_stats *stats)
 {
        struct page *page = buf;
-       struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
+       struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len,
+                                         PAGE_SIZE, true);
 
        stats->bytes += len - vi->hdr_len;
        if (unlikely(!skb))
@@ -841,7 +845,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                rcu_read_unlock();
                                put_page(page);
                                head_skb = page_to_skb(vi, rq, xdp_page,
-                                                      offset, len, PAGE_SIZE);
+                                                      offset, len,
+                                                      PAGE_SIZE, false);
                                return head_skb;
                        }
                        break;
@@ -897,7 +902,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                goto err_skb;
        }
 
-       head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
+       head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog);
        curr_skb = head_skb;
 
        if (unlikely(!curr_skb))
@@ -2334,9 +2339,6 @@ static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
        if (!vi->guest_offloads)
                return 0;
 
-       if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
-               offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM;
-
        return virtnet_set_guest_offloads(vi, offloads);
 }
 
@@ -2346,8 +2348,6 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
 
        if (!vi->guest_offloads)
                return 0;
-       if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))
-               offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM;
 
        return virtnet_set_guest_offloads(vi, offloads);
 }
@@ -2365,8 +2365,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
            && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
                virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
                virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
-               virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) {
-               NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first");
+               virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
+               virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
+               NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
                return -EOPNOTSUPP;
        }
 
index a1c2801ded10123ae46ff1d43e4fd60a04231d90..7e49342bae384d25729e48ae31396ee958e6e967 100644 (file)
@@ -6867,7 +6867,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        u32 bitmap;
 
        if (drop) {
-               if (vif->type == NL80211_IFTYPE_STATION) {
+               if (vif && vif->type == NL80211_IFTYPE_STATION) {
                        bitmap = ~(1 << WMI_MGMT_TID);
                        list_for_each_entry(arvif, &ar->arvifs, list) {
                                if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
index 1e3b5f4a4cf929682f1aea668b851e27ef1a2f9b..f23cb2f3d296a0205c07296b212d3805ce35a330 100644 (file)
@@ -1251,6 +1251,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
        struct ath_vif *avp = (void *)vif->drv_priv;
        struct ath_node *an = &avp->mcast_node;
 
+       mutex_lock(&sc->mutex);
        if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
                if (sc->cur_chan->nvifs >= 1) {
                        mutex_unlock(&sc->mutex);
@@ -1259,8 +1260,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
                sc->tx99_vif = vif;
        }
 
-       mutex_lock(&sc->mutex);
-
        ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
        sc->cur_chan->nvifs++;
 
index 230a378c26fcfdbd7e4ed14795a20420bf0ee596..7f0a5bade70a66acf26453bb59c386245c859d06 100644 (file)
@@ -6005,7 +6005,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
                         * for subsequent chanspecs.
                         */
                        channel->flags = IEEE80211_CHAN_NO_HT40 |
-                                        IEEE80211_CHAN_NO_80MHZ;
+                                        IEEE80211_CHAN_NO_80MHZ |
+                                        IEEE80211_CHAN_NO_160MHZ;
                        ch.bw = BRCMU_CHAN_BW_20;
                        cfg->d11inf.encchspec(&ch);
                        chaninfo = ch.chspec;
index e7584b842dce4b8cc8fcd08504714e64eefc7f3a..eb5db94f57453f2aed93e89a510fdd210bef7a34 100644 (file)
@@ -193,6 +193,9 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch)
                }
                break;
        case BRCMU_CHSPEC_D11AC_BW_160:
+               ch->bw = BRCMU_CHAN_BW_160;
+               ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK,
+                                        BRCMU_CHSPEC_D11AC_SB_SHIFT);
                switch (ch->sb) {
                case BRCMU_CHAN_SB_LLL:
                        ch->control_ch_num -= CH_70MHZ_APART;
index 2439e98431eefe4f023ec9c0b38a72010b17e4d1..7492dfb6729b89d181592459e70a49d3a5d0b137 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2017        Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -26,6 +27,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2017        Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -81,7 +83,7 @@
 #define ACPI_WRDS_WIFI_DATA_SIZE       (ACPI_SAR_TABLE_SIZE + 2)
 #define ACPI_EWRD_WIFI_DATA_SIZE       ((ACPI_SAR_PROFILE_NUM - 1) * \
                                         ACPI_SAR_TABLE_SIZE + 3)
-#define ACPI_WGDS_WIFI_DATA_SIZE       18
+#define ACPI_WGDS_WIFI_DATA_SIZE       19
 #define ACPI_WRDD_WIFI_DATA_SIZE       2
 #define ACPI_SPLC_WIFI_DATA_SIZE       2
 
index 6b95d0e758897cbecf8cac036411cce590d408e8..2b8b50a77990cd453610feb194961929d7adce21 100644 (file)
@@ -154,7 +154,11 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
                        const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
                        struct dentry *dbgfs_dir);
 
-void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt);
+static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt)
+{
+       kfree(fwrt->dump.d3_debug_data);
+       fwrt->dump.d3_debug_data = NULL;
+}
 
 void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt);
 
index dade206d551151959ef400f157cf5d3d417529d8..2ba890445c356502502a5d948bb2fdb0a21042bf 100644 (file)
@@ -893,7 +893,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
        IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
 
        BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
-                    ACPI_WGDS_TABLE_SIZE !=  ACPI_WGDS_WIFI_DATA_SIZE);
+                    ACPI_WGDS_TABLE_SIZE + 1 !=  ACPI_WGDS_WIFI_DATA_SIZE);
 
        BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
 
@@ -928,6 +928,11 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
        return -ENOENT;
 }
 
+static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
+{
+       return -ENOENT;
+}
+
 static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
 {
        return 0;
@@ -954,8 +959,11 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
                IWL_DEBUG_RADIO(mvm,
                                "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
                                ret);
-               /* if not available, don't fail and don't bother with EWRD */
-               return 0;
+               /*
+                * If not available, don't fail and don't bother with EWRD.
+                * Return 1 to tell that we can't use WGDS either.
+                */
+               return 1;
        }
 
        ret = iwl_mvm_sar_get_ewrd_table(mvm);
@@ -968,9 +976,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
        /* choose profile 1 (WRDS) as default for both chains */
        ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
 
-       /* if we don't have profile 0 from BIOS, just skip it */
+       /*
+        * If we don't have profile 0 from BIOS, just skip it.  This
+        * means that SAR Geo will not be enabled either, even if we
+        * have other valid profiles.
+        */
        if (ret == -ENOENT)
-               return 0;
+               return 1;
 
        return ret;
 }
@@ -1168,11 +1180,19 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
                iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
 
        ret = iwl_mvm_sar_init(mvm);
-       if (ret)
-               goto error;
+       if (ret == 0) {
+               ret = iwl_mvm_sar_geo_init(mvm);
+       } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) {
+               /*
+                * If basic SAR is not available, we check for WGDS,
+                * which should *not* be available either.  If it is
+                * available, issue an error, because we can't use SAR
+                * Geo without basic SAR.
+                */
+               IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
+       }
 
-       ret = iwl_mvm_sar_geo_init(mvm);
-       if (ret)
+       if (ret < 0)
                goto error;
 
        iwl_mvm_leds_sync(mvm);
index 505b0385d80003e546578fa2a1d8a4684ea1a9dd..00f831d88366d00103697eac358f54c753975448 100644 (file)
@@ -301,8 +301,12 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
                goto out;
        }
 
-       if (changed)
-               *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
+       if (changed) {
+               u32 status = le32_to_cpu(resp->status);
+
+               *changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
+                           status == MCC_RESP_ILLEGAL);
+       }
 
        regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
                                      __le32_to_cpu(resp->n_channels),
@@ -4444,10 +4448,6 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
                sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
        }
 
-       if (!fw_has_capa(&mvm->fw->ucode_capa,
-                        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
-               return;
-
        /* if beacon filtering isn't on mac80211 does it anyway */
        if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
                return;
index 3633f27d048ab7bd36ac99fc38cae1c1197bd0de..6fc5cc1f2b5b3e1cf7e46c9b46e01da266325d2e 100644 (file)
@@ -539,9 +539,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
        }
 
        IWL_DEBUG_LAR(mvm,
-                     "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
-                     status, mcc, mcc >> 8, mcc & 0xff,
-                     !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
+                     "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n",
+                     status, mcc, mcc >> 8, mcc & 0xff, n_channels);
 
 exit:
        iwl_free_resp(&cmd);
index 0e2092526fae1d81623c08005bbf2f08ee8d7077..af3fba10abc195847d781832ad597a9482613ab2 100644 (file)
@@ -858,6 +858,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        iwl_mvm_thermal_exit(mvm);
  out_free:
        iwl_fw_flush_dump(&mvm->fwrt);
+       iwl_fw_runtime_free(&mvm->fwrt);
 
        if (iwlmvm_mod_params.init_dbg)
                return op_mode;
@@ -910,6 +911,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
 
        iwl_mvm_tof_clean(mvm);
 
+       iwl_fw_runtime_free(&mvm->fwrt);
        mutex_destroy(&mvm->mutex);
        mutex_destroy(&mvm->d0i3_suspend_mutex);
 
index aa8058264d5b5bd6d6d112eeaaf999acde15d263..d1464e3e1be21a23f0cbc2d82bd4360bced5b4ee 100644 (file)
@@ -2884,6 +2884,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
 
        wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
 
+       tasklet_hrtimer_init(&data->beacon_timer,
+                            mac80211_hwsim_beacon,
+                            CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+
        err = ieee80211_register_hw(hw);
        if (err < 0) {
                pr_debug("mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
@@ -2908,10 +2912,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
                                    data->debugfs,
                                    data, &hwsim_simulate_radar);
 
-       tasklet_hrtimer_init(&data->beacon_timer,
-                            mac80211_hwsim_beacon,
-                            CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-
        spin_lock_bh(&hwsim_radio_lock);
        err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht,
                                     hwsim_rht_params);
@@ -3703,16 +3703,16 @@ static int __init init_mac80211_hwsim(void)
        if (err)
                goto out_unregister_pernet;
 
+       err = hwsim_init_netlink();
+       if (err)
+               goto out_unregister_driver;
+
        hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
        if (IS_ERR(hwsim_class)) {
                err = PTR_ERR(hwsim_class);
-               goto out_unregister_driver;
+               goto out_exit_netlink;
        }
 
-       err = hwsim_init_netlink();
-       if (err < 0)
-               goto out_unregister_driver;
-
        for (i = 0; i < radios; i++) {
                struct hwsim_new_radio_params param = { 0 };
 
@@ -3818,6 +3818,8 @@ out_free_mon:
        free_netdev(hwsim_mon);
 out_free_radios:
        mac80211_hwsim_free();
+out_exit_netlink:
+       hwsim_exit_netlink();
 out_unregister_driver:
        platform_driver_unregister(&mac80211_hwsim_driver);
 out_unregister_pernet:
index 0ccbcd7e887d67d1352886736411e13c0813ccfb..c30d8f5bbf2aa2a3db56818f61821a9698963d14 100644 (file)
@@ -1,6 +1,12 @@
 config MT76_CORE
        tristate
 
+config MT76_LEDS
+       bool
+       depends on MT76_CORE
+       depends on LEDS_CLASS=y || MT76_CORE=LEDS_CLASS
+       default y
+
 config MT76_USB
        tristate
        depends on MT76_CORE
index 2a699e8b79bfbdb2aadc798278c03439ed53811d..7d219ff2d48027e6b7532f0ebec2163fb3a9567d 100644 (file)
@@ -345,9 +345,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
        mt76_check_sband(dev, NL80211_BAND_2GHZ);
        mt76_check_sband(dev, NL80211_BAND_5GHZ);
 
-       ret = mt76_led_init(dev);
-       if (ret)
-               return ret;
+       if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+               ret = mt76_led_init(dev);
+               if (ret)
+                       return ret;
+       }
 
        return ieee80211_register_hw(hw);
 }
index 47c42c607964356f1c6ec634394278b2423a8a9e..7806963b1905293f5067ab5758c35a9f190b8c7d 100644 (file)
@@ -71,7 +71,6 @@ struct mt76x02_dev {
        struct mac_address macaddr_list[8];
 
        struct mutex phy_mutex;
-       struct mutex mutex;
 
        u8 txdone_seq;
        DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status);
index 3824290b219d5b7165a47611eb42cb6bcfe9e042..fd125722d1fb67a1fe2700d34c9f298bfb02a7fc 100644 (file)
@@ -507,8 +507,10 @@ int mt76x2_register_device(struct mt76x02_dev *dev)
        mt76x2_dfs_init_detector(dev);
 
        /* init led callbacks */
-       dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
-       dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
+       if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+               dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness;
+               dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink;
+       }
 
        ret = mt76_register_device(&dev->mt76, true, mt76x02_rates,
                                   ARRAY_SIZE(mt76x02_rates));
index 034a062956681ec4f72d45501bab70c1cbe791ad..3f001bd6806ce62ad8fe1c143936549f9fdeac62 100644 (file)
@@ -272,9 +272,9 @@ mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
        if (val != ~0 && val > 0xffff)
                return -EINVAL;
 
-       mutex_lock(&dev->mutex);
+       mutex_lock(&dev->mt76.mutex);
        mt76x2_mac_set_tx_protection(dev, val);
-       mutex_unlock(&dev->mutex);
+       mutex_unlock(&dev->mt76.mutex);
 
        return 0;
 }
index 4c2154b9e6a3e59bcb772b34031e6db860227ccb..bd10165d7eec5b4d28c22d06e7a3abad79ade277 100644 (file)
@@ -285,7 +285,7 @@ static int wl1271_probe(struct sdio_func *func,
        struct resource res[2];
        mmc_pm_flag_t mmcflags;
        int ret = -ENOMEM;
-       int irq, wakeirq;
+       int irq, wakeirq, num_irqs;
        const char *chip_family;
 
        /* We are only able to handle the wlan function */
@@ -353,12 +353,17 @@ static int wl1271_probe(struct sdio_func *func,
                       irqd_get_trigger_type(irq_get_irq_data(irq));
        res[0].name = "irq";
 
-       res[1].start = wakeirq;
-       res[1].flags = IORESOURCE_IRQ |
-                      irqd_get_trigger_type(irq_get_irq_data(wakeirq));
-       res[1].name = "wakeirq";
 
-       ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+       if (wakeirq > 0) {
+               res[1].start = wakeirq;
+               res[1].flags = IORESOURCE_IRQ |
+                              irqd_get_trigger_type(irq_get_irq_data(wakeirq));
+               res[1].name = "wakeirq";
+               num_irqs = 2;
+       } else {
+               num_irqs = 1;
+       }
+       ret = platform_device_add_resources(glue->core, res, num_irqs);
        if (ret) {
                dev_err(glue->dev, "can't add resources\n");
                goto out_dev_put;
index b360e5613b9f102e74bd2a3e0bd2548d949ed951..f8948cf515ce3936bb7dc859b555a3a28b59f08b 100644 (file)
@@ -1,6 +1,7 @@
 config NTB_IDT
        tristate "IDT PCIe-switch Non-Transparent Bridge support"
        depends on PCI
+       select HWMON
        help
         This driver supports NTB of cappable IDT PCIe-switches.
 
@@ -23,9 +24,7 @@ config NTB_IDT
         BAR settings of peer NT-functions, the BAR setups can't be done over
         kernel PCI fixups. That's why the alternative pre-initialization
         techniques like BIOS using SMBus interface or EEPROM should be
-        utilized. Additionally if one needs to have temperature sensor
-        information printed to system log, the corresponding registers must
-        be initialized within BIOS/EEPROM as well.
+        utilized.
 
         If unsure, say N.
 
index dbe72f116017ab305a0a1f19276155700f69211e..1dede87dd54fadd2a0c6a5336f708e2ea968b5d0 100644 (file)
@@ -4,7 +4,7 @@
  *
  *   GPL LICENSE SUMMARY
  *
- *   Copyright (C) 2016 T-Platforms All Rights Reserved.
+ *   Copyright (C) 2016-2018 T-Platforms JSC All Rights Reserved.
  *
  *   This program is free software; you can redistribute it and/or modify it
  *   under the terms and conditions of the GNU General Public License,
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
+#include <linux/mutex.h>
 #include <linux/pci.h>
 #include <linux/aer.h>
 #include <linux/slab.h>
 #include <linux/list.h>
 #include <linux/debugfs.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
 #include <linux/ntb.h>
 
 #include "ntb_hw_idt.h"
@@ -1105,9 +1108,9 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
        }
 
        /* Allocate memory for memory window descriptors */
-       ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt,
-                               sizeof(*ret_mws), GFP_KERNEL);
-       if (IS_ERR_OR_NULL(ret_mws))
+       ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, sizeof(*ret_mws),
+                              GFP_KERNEL);
+       if (!ret_mws)
                return ERR_PTR(-ENOMEM);
 
        /* Copy the info of detected memory windows */
@@ -1320,7 +1323,7 @@ static int idt_ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
                idt_nt_write(ndev, bar->ltbase, (u32)addr);
                idt_nt_write(ndev, bar->utbase, (u32)(addr >> 32));
                /* Set the custom BAR aperture limit */
-               limit = pci_resource_start(ntb->pdev, mw_cfg->bar) + size;
+               limit = pci_bus_address(ntb->pdev, mw_cfg->bar) + size;
                idt_nt_write(ndev, bar->limit, (u32)limit);
                if (IS_FLD_SET(BARSETUP_TYPE, data, 64))
                        idt_nt_write(ndev, (bar + 1)->limit, (limit >> 32));
@@ -1821,61 +1824,284 @@ static int idt_ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx,
  *                      7. Temperature sensor operations
  *
  *    IDT PCIe-switch has an embedded temperature sensor, which can be used to
- * warn a user-space of possible chip overheating. Since workload temperature
- * can be different on different platforms, temperature thresholds as well as
- * general sensor settings must be setup in the framework of BIOS/EEPROM
- * initializations. It includes the actual sensor enabling as well.
+ * check current chip core temperature. Since a workload environment can be
+ * different on different platforms, an offset and ADC/filter settings can be
+ * specified. Although the offset configuration is only exposed to the sysfs
+ * hwmon interface at the moment. The rest of the settings can be adjusted
+ * for instance by the BIOS/EEPROM firmware.
  *=============================================================================
  */
 
+/*
+ * idt_get_deg() - convert millidegree Celsius value to just degree
+ * @mdegC:     IN - millidegree Celsius value
+ *
+ * Return: Degree corresponding to the passed millidegree value
+ */
+static inline s8 idt_get_deg(long mdegC)
+{
+       return mdegC / 1000;
+}
+
+/*
+ * idt_get_frac() - retrieve 0/0.5 fraction of the millidegree Celsius value
+ * @mdegC:     IN - millidegree Celsius value
+ *
+ * Return: 0/0.5 degree fraction of the passed millidegree value
+ */
+static inline u8 idt_get_deg_frac(long mdegC)
+{
+       return (mdegC % 1000) >= 500 ? 5 : 0;
+}
+
+/*
+ * idt_get_temp_fmt() - convert millidegree Celsius value to 0:7:1 format
+ * @mdegC:     IN - millidegree Celsius value
+ *
+ * Return: 0:7:1 format acceptable by the IDT temperature sensor
+ */
+static inline u8 idt_temp_get_fmt(long mdegC)
+{
+       return (idt_get_deg(mdegC) << 1) | (idt_get_deg_frac(mdegC) ? 1 : 0);
+}
+
+/*
+ * idt_get_temp_sval() - convert temp sample to signed millidegree Celsius
+ * @data:      IN - shifted to LSB 8-bits temperature sample
+ *
+ * Return: signed millidegree Celsius
+ */
+static inline long idt_get_temp_sval(u32 data)
+{
+       return ((s8)data / 2) * 1000 + (data & 0x1 ? 500 : 0);
+}
+
+/*
+ * idt_get_temp_sval() - convert temp sample to unsigned millidegree Celsius
+ * @data:      IN - shifted to LSB 8-bits temperature sample
+ *
+ * Return: unsigned millidegree Celsius
+ */
+static inline long idt_get_temp_uval(u32 data)
+{
+       return (data / 2) * 1000 + (data & 0x1 ? 500 : 0);
+}
+
 /*
  * idt_read_temp() - read temperature from chip sensor
  * @ntb:       NTB device context.
- * @val:       OUT - integer value of temperature
- * @frac:      OUT - fraction
+ * @type:      IN - type of the temperature value to read
+ * @val:       OUT - integer value of temperature in millidegree Celsius
  */
-static void idt_read_temp(struct idt_ntb_dev *ndev, unsigned char *val,
-                         unsigned char *frac)
+static void idt_read_temp(struct idt_ntb_dev *ndev,
+                         const enum idt_temp_val type, long *val)
 {
        u32 data;
 
-       /* Read the data from TEMP field of the TMPSTS register */
-       data = idt_sw_read(ndev, IDT_SW_TMPSTS);
-       data = GET_FIELD(TMPSTS_TEMP, data);
-       /* TEMP field has one fractional bit and seven integer bits */
-       *val = data >> 1;
-       *frac = ((data & 0x1) ? 5 : 0);
+       /* Alter the temperature field in accordance with the passed type */
+       switch (type) {
+       case IDT_TEMP_CUR:
+               data = GET_FIELD(TMPSTS_TEMP,
+                                idt_sw_read(ndev, IDT_SW_TMPSTS));
+               break;
+       case IDT_TEMP_LOW:
+               data = GET_FIELD(TMPSTS_LTEMP,
+                                idt_sw_read(ndev, IDT_SW_TMPSTS));
+               break;
+       case IDT_TEMP_HIGH:
+               data = GET_FIELD(TMPSTS_HTEMP,
+                                idt_sw_read(ndev, IDT_SW_TMPSTS));
+               break;
+       case IDT_TEMP_OFFSET:
+               /* This is the only field with signed 0:7:1 format */
+               data = GET_FIELD(TMPADJ_OFFSET,
+                                idt_sw_read(ndev, IDT_SW_TMPADJ));
+               *val = idt_get_temp_sval(data);
+               return;
+       default:
+               data = GET_FIELD(TMPSTS_TEMP,
+                                idt_sw_read(ndev, IDT_SW_TMPSTS));
+               break;
+       }
+
+       /* The rest of the fields accept unsigned 0:7:1 format */
+       *val = idt_get_temp_uval(data);
 }
 
 /*
- * idt_temp_isr() - temperature sensor alarm events ISR
- * @ndev:      IDT NTB hardware driver descriptor
- * @ntint_sts: NT-function interrupt status
+ * idt_write_temp() - write temperature to the chip sensor register
+ * @ntb:       NTB device context.
+ * @type:      IN - type of the temperature value to change
+ * @val:       IN - integer value of temperature in millidegree Celsius
+ */
+static void idt_write_temp(struct idt_ntb_dev *ndev,
+                          const enum idt_temp_val type, const long val)
+{
+       unsigned int reg;
+       u32 data;
+       u8 fmt;
+
+       /* Retrieve the properly formatted temperature value */
+       fmt = idt_temp_get_fmt(val);
+
+       mutex_lock(&ndev->hwmon_mtx);
+       switch (type) {
+       case IDT_TEMP_LOW:
+               reg = IDT_SW_TMPALARM;
+               data = SET_FIELD(TMPALARM_LTEMP, idt_sw_read(ndev, reg), fmt) &
+                       ~IDT_TMPALARM_IRQ_MASK;
+               break;
+       case IDT_TEMP_HIGH:
+               reg = IDT_SW_TMPALARM;
+               data = SET_FIELD(TMPALARM_HTEMP, idt_sw_read(ndev, reg), fmt) &
+                       ~IDT_TMPALARM_IRQ_MASK;
+               break;
+       case IDT_TEMP_OFFSET:
+               reg = IDT_SW_TMPADJ;
+               data = SET_FIELD(TMPADJ_OFFSET, idt_sw_read(ndev, reg), fmt);
+               break;
+       default:
+               goto inval_spin_unlock;
+       }
+
+       idt_sw_write(ndev, reg, data);
+
+inval_spin_unlock:
+       mutex_unlock(&ndev->hwmon_mtx);
+}
+
+/*
+ * idt_sysfs_show_temp() - printout corresponding temperature value
+ * @dev:       Pointer to the NTB device structure
+ * @da:                Sensor device attribute structure
+ * @buf:       Buffer to print temperature out
  *
- * It handles events of temperature crossing alarm thresholds. Since reading
- * of TMPALARM register clears it up, the function doesn't analyze the
- * read value, instead the current temperature value just warningly printed to
- * log.
- * The method is called from PCIe ISR bottom-half routine.
+ * Return: Number of written symbols or negative error
  */
-static void idt_temp_isr(struct idt_ntb_dev *ndev, u32 ntint_sts)
+static ssize_t idt_sysfs_show_temp(struct device *dev,
+                                  struct device_attribute *da, char *buf)
 {
-       unsigned char val, frac;
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+       struct idt_ntb_dev *ndev = dev_get_drvdata(dev);
+       enum idt_temp_val type = attr->index;
+       long mdeg;
 
-       /* Read the current temperature value */
-       idt_read_temp(ndev, &val, &frac);
+       idt_read_temp(ndev, type, &mdeg);
+       return sprintf(buf, "%ld\n", mdeg);
+}
 
-       /* Read the temperature alarm to clean the alarm status out */
-       /*(void)idt_sw_read(ndev, IDT_SW_TMPALARM);*/
+/*
+ * idt_sysfs_set_temp() - set corresponding temperature value
+ * @dev:       Pointer to the NTB device structure
+ * @da:                Sensor device attribute structure
+ * @buf:       Buffer to print temperature out
+ * @count:     Size of the passed buffer
+ *
+ * Return: Number of written symbols or negative error
+ */
+static ssize_t idt_sysfs_set_temp(struct device *dev,
+                                 struct device_attribute *da, const char *buf,
+                                 size_t count)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+       struct idt_ntb_dev *ndev = dev_get_drvdata(dev);
+       enum idt_temp_val type = attr->index;
+       long mdeg;
+       int ret;
 
-       /* Clean the corresponding interrupt bit */
-       idt_nt_write(ndev, IDT_NT_NTINTSTS, IDT_NTINTSTS_TMPSENSOR);
+       ret = kstrtol(buf, 10, &mdeg);
+       if (ret)
+               return ret;
+
+       /* Clamp the passed value in accordance with the type */
+       if (type == IDT_TEMP_OFFSET)
+               mdeg = clamp_val(mdeg, IDT_TEMP_MIN_OFFSET,
+                                IDT_TEMP_MAX_OFFSET);
+       else
+               mdeg = clamp_val(mdeg, IDT_TEMP_MIN_MDEG, IDT_TEMP_MAX_MDEG);
+
+       idt_write_temp(ndev, type, mdeg);
+
+       return count;
+}
+
+/*
+ * idt_sysfs_reset_hist() - reset temperature history
+ * @dev:       Pointer to the NTB device structure
+ * @da:                Sensor device attribute structure
+ * @buf:       Buffer to print temperature out
+ * @count:     Size of the passed buffer
+ *
+ * Return: Number of written symbols or negative error
+ */
+static ssize_t idt_sysfs_reset_hist(struct device *dev,
+                                   struct device_attribute *da,
+                                   const char *buf, size_t count)
+{
+       struct idt_ntb_dev *ndev = dev_get_drvdata(dev);
+
+       /* Just set the maximal value to the lowest temperature field and
+        * minimal value to the highest temperature field
+        */
+       idt_write_temp(ndev, IDT_TEMP_LOW, IDT_TEMP_MAX_MDEG);
+       idt_write_temp(ndev, IDT_TEMP_HIGH, IDT_TEMP_MIN_MDEG);
 
-       dev_dbg(&ndev->ntb.pdev->dev,
-               "Temp sensor IRQ detected %#08x", ntint_sts);
+       return count;
+}
+
+/*
+ * Hwmon IDT sysfs attributes
+ */
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, idt_sysfs_show_temp, NULL,
+                         IDT_TEMP_CUR);
+static SENSOR_DEVICE_ATTR(temp1_lowest, 0444, idt_sysfs_show_temp, NULL,
+                         IDT_TEMP_LOW);
+static SENSOR_DEVICE_ATTR(temp1_highest, 0444, idt_sysfs_show_temp, NULL,
+                         IDT_TEMP_HIGH);
+static SENSOR_DEVICE_ATTR(temp1_offset, 0644, idt_sysfs_show_temp,
+                         idt_sysfs_set_temp, IDT_TEMP_OFFSET);
+static DEVICE_ATTR(temp1_reset_history, 0200, NULL, idt_sysfs_reset_hist);
 
-       /* Print temperature value to log */
-       dev_warn(&ndev->ntb.pdev->dev, "Temperature %hhu.%hhu", val, frac);
+/*
+ * Hwmon IDT sysfs attributes group
+ */
+static struct attribute *idt_temp_attrs[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       &sensor_dev_attr_temp1_lowest.dev_attr.attr,
+       &sensor_dev_attr_temp1_highest.dev_attr.attr,
+       &sensor_dev_attr_temp1_offset.dev_attr.attr,
+       &dev_attr_temp1_reset_history.attr,
+       NULL
+};
+ATTRIBUTE_GROUPS(idt_temp);
+
+/*
+ * idt_init_temp() - initialize temperature sensor interface
+ * @ndev:      IDT NTB hardware driver descriptor
+ *
+ * Simple sensor initializarion method is responsible for device switching
+ * on and resource management based hwmon interface registration. Note, that
+ * since the device is shared we won't disable it on remove, but leave it
+ * working until the system is powered off.
+ */
+static void idt_init_temp(struct idt_ntb_dev *ndev)
+{
+       struct device *hwmon;
+
+       /* Enable sensor if it hasn't been already */
+       idt_sw_write(ndev, IDT_SW_TMPCTL, 0x0);
+
+       /* Initialize hwmon interface fields */
+       mutex_init(&ndev->hwmon_mtx);
+
+       hwmon = devm_hwmon_device_register_with_groups(&ndev->ntb.pdev->dev,
+               ndev->swcfg->name, ndev, idt_temp_groups);
+       if (IS_ERR(hwmon)) {
+               dev_err(&ndev->ntb.pdev->dev, "Couldn't create hwmon device");
+               return;
+       }
+
+       dev_dbg(&ndev->ntb.pdev->dev, "Temperature HWmon interface registered");
 }
 
 /*=============================================================================
@@ -1931,7 +2157,7 @@ static int idt_init_isr(struct idt_ntb_dev *ndev)
                goto err_free_vectors;
        }
 
-       /* Unmask Message/Doorbell/SE/Temperature interrupts */
+       /* Unmask Message/Doorbell/SE interrupts */
        ntint_mask = idt_nt_read(ndev, IDT_NT_NTINTMSK) & ~IDT_NTINTMSK_ALL;
        idt_nt_write(ndev, IDT_NT_NTINTMSK, ntint_mask);
 
@@ -1946,7 +2172,6 @@ err_free_vectors:
        return ret;
 }
 
-
 /*
  * idt_deinit_ist() - deinitialize PCIe interrupt handler
  * @ndev:      IDT NTB hardware driver descriptor
@@ -2007,12 +2232,6 @@ static irqreturn_t idt_thread_isr(int irq, void *devid)
                handled = true;
        }
 
-       /* Handle temperature sensor interrupt */
-       if (ntint_sts & IDT_NTINTSTS_TMPSENSOR) {
-               idt_temp_isr(ndev, ntint_sts);
-               handled = true;
-       }
-
        dev_dbg(&ndev->ntb.pdev->dev, "IDT IRQs 0x%08x handled", ntint_sts);
 
        return handled ? IRQ_HANDLED : IRQ_NONE;
@@ -2123,9 +2342,9 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
                                   size_t count, loff_t *offp)
 {
        struct idt_ntb_dev *ndev = filp->private_data;
-       unsigned char temp, frac, idx, pidx, cnt;
+       unsigned char idx, pidx, cnt;
+       unsigned long irqflags, mdeg;
        ssize_t ret = 0, off = 0;
-       unsigned long irqflags;
        enum ntb_speed speed;
        enum ntb_width width;
        char *strbuf;
@@ -2274,9 +2493,10 @@ static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf,
        off += scnprintf(strbuf + off, size - off, "\n");
 
        /* Current temperature */
-       idt_read_temp(ndev, &temp, &frac);
+       idt_read_temp(ndev, IDT_TEMP_CUR, &mdeg);
        off += scnprintf(strbuf + off, size - off,
-               "Switch temperature\t\t- %hhu.%hhuC\n", temp, frac);
+               "Switch temperature\t\t- %hhd.%hhuC\n",
+               idt_get_deg(mdeg), idt_get_deg_frac(mdeg));
 
        /* Copy the buffer to the User Space */
        ret = simple_read_from_buffer(ubuf, count, offp, strbuf, off);
@@ -2390,7 +2610,7 @@ static struct idt_ntb_dev *idt_create_dev(struct pci_dev *pdev,
 
        /* Allocate memory for the IDT PCIe-device descriptor */
        ndev = devm_kzalloc(&pdev->dev, sizeof(*ndev), GFP_KERNEL);
-       if (IS_ERR_OR_NULL(ndev)) {
+       if (!ndev) {
                dev_err(&pdev->dev, "Memory allocation failed for descriptor");
                return ERR_PTR(-ENOMEM);
        }
@@ -2571,6 +2791,9 @@ static int idt_pci_probe(struct pci_dev *pdev,
        /* Initialize Messaging subsystem */
        idt_init_msg(ndev);
 
+       /* Initialize hwmon interface */
+       idt_init_temp(ndev);
+
        /* Initialize IDT interrupts handler */
        ret = idt_init_isr(ndev);
        if (ret != 0)
index 856fd182f6f4f08816bd8523c338823187caf43c..2f1aa121b0cf381d788eb968f45fd2069a3b99e0 100644 (file)
@@ -4,7 +4,7 @@
  *
  *   GPL LICENSE SUMMARY
  *
- *   Copyright (C) 2016 T-Platforms All Rights Reserved.
+ *   Copyright (C) 2016-2018 T-Platforms JSC All Rights Reserved.
  *
  *   This program is free software; you can redistribute it and/or modify it
  *   under the terms and conditions of the GNU General Public License,
@@ -47,9 +47,9 @@
 #include <linux/pci_ids.h>
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
+#include <linux/mutex.h>
 #include <linux/ntb.h>
 
-
 /*
  * Macro is used to create the struct pci_device_id that matches
  * the supported IDT PCIe-switches
  * @IDT_NTINTMSK_DBELL:                Doorbell interrupt mask bit
  * @IDT_NTINTMSK_SEVENT:       Switch Event interrupt mask bit
  * @IDT_NTINTMSK_TMPSENSOR:    Temperature sensor interrupt mask bit
- * @IDT_NTINTMSK_ALL:          All the useful interrupts mask
+ * @IDT_NTINTMSK_ALL:          NTB-related interrupts mask
  */
 #define IDT_NTINTMSK_MSG               0x00000001U
 #define IDT_NTINTMSK_DBELL             0x00000002U
 #define IDT_NTINTMSK_SEVENT            0x00000008U
 #define IDT_NTINTMSK_TMPSENSOR         0x00000080U
 #define IDT_NTINTMSK_ALL \
-       (IDT_NTINTMSK_MSG | IDT_NTINTMSK_DBELL | \
-        IDT_NTINTMSK_SEVENT | IDT_NTINTMSK_TMPSENSOR)
+       (IDT_NTINTMSK_MSG | IDT_NTINTMSK_DBELL | IDT_NTINTMSK_SEVENT)
 
 /*
  * NTGSIGNAL register fields related constants
 #define IDT_SWPxMSGCTL_PART_MASK       0x00000070U
 #define IDT_SWPxMSGCTL_PART_FLD                4
 
+/*
+ * TMPCTL register fields related constants
+ * @IDT_TMPCTL_LTH_MASK:       Low temperature threshold field mask
+ * @IDT_TMPCTL_LTH_FLD:                Low temperature threshold field offset
+ * @IDT_TMPCTL_MTH_MASK:       Middle temperature threshold field mask
+ * @IDT_TMPCTL_MTH_FLD:                Middle temperature threshold field offset
+ * @IDT_TMPCTL_HTH_MASK:       High temperature threshold field mask
+ * @IDT_TMPCTL_HTH_FLD:                High temperature threshold field offset
+ * @IDT_TMPCTL_PDOWN:          Temperature sensor power down
+ */
+#define IDT_TMPCTL_LTH_MASK            0x000000FFU
+#define IDT_TMPCTL_LTH_FLD             0
+#define IDT_TMPCTL_MTH_MASK            0x0000FF00U
+#define IDT_TMPCTL_MTH_FLD             8
+#define IDT_TMPCTL_HTH_MASK            0x00FF0000U
+#define IDT_TMPCTL_HTH_FLD             16
+#define IDT_TMPCTL_PDOWN               0x80000000U
+
 /*
  * TMPSTS register fields related constants
  * @IDT_TMPSTS_TEMP_MASK:      Current temperature field mask
  * @IDT_TMPSTS_TEMP_FLD:       Current temperature field offset
+ * @IDT_TMPSTS_LTEMP_MASK:     Lowest temperature field mask
+ * @IDT_TMPSTS_LTEMP_FLD:      Lowest temperature field offset
+ * @IDT_TMPSTS_HTEMP_MASK:     Highest temperature field mask
+ * @IDT_TMPSTS_HTEMP_FLD:      Highest temperature field offset
  */
 #define IDT_TMPSTS_TEMP_MASK           0x000000FFU
 #define IDT_TMPSTS_TEMP_FLD            0
+#define IDT_TMPSTS_LTEMP_MASK          0x0000FF00U
+#define IDT_TMPSTS_LTEMP_FLD           8
+#define IDT_TMPSTS_HTEMP_MASK          0x00FF0000U
+#define IDT_TMPSTS_HTEMP_FLD           16
+
+/*
+ * TMPALARM register fields related constants
+ * @IDT_TMPALARM_LTEMP_MASK:   Lowest temperature field mask
+ * @IDT_TMPALARM_LTEMP_FLD:    Lowest temperature field offset
+ * @IDT_TMPALARM_HTEMP_MASK:   Highest temperature field mask
+ * @IDT_TMPALARM_HTEMP_FLD:    Highest temperature field offset
+ * @IDT_TMPALARM_IRQ_MASK:     Alarm IRQ status mask
+ */
+#define IDT_TMPALARM_LTEMP_MASK                0x0000FF00U
+#define IDT_TMPALARM_LTEMP_FLD         8
+#define IDT_TMPALARM_HTEMP_MASK                0x00FF0000U
+#define IDT_TMPALARM_HTEMP_FLD         16
+#define IDT_TMPALARM_IRQ_MASK          0x3F000000U
+
+/*
+ * TMPADJ register fields related constants
+ * @IDT_TMPADJ_OFFSET_MASK:    Temperature value offset field mask
+ * @IDT_TMPADJ_OFFSET_FLD:     Temperature value offset field offset
+ */
+#define IDT_TMPADJ_OFFSET_MASK         0x000000FFU
+#define IDT_TMPADJ_OFFSET_FLD          0
 
 /*
  * Helper macro to get/set the corresponding field value
 #define IDT_TRANS_ALIGN                4
 #define IDT_DIR_SIZE_ALIGN     1
 
+/*
+ * IDT PCIe-switch temperature sensor value limits
+ * @IDT_TEMP_MIN_MDEG: Minimal integer value of temperature
+ * @IDT_TEMP_MAX_MDEG: Maximal integer value of temperature
+ * @IDT_TEMP_MIN_OFFSET:Minimal integer value of temperature offset
+ * @IDT_TEMP_MAX_OFFSET:Maximal integer value of temperature offset
+ */
+#define IDT_TEMP_MIN_MDEG      0
+#define IDT_TEMP_MAX_MDEG      127500
+#define IDT_TEMP_MIN_OFFSET    -64000
+#define IDT_TEMP_MAX_OFFSET    63500
+
+/*
+ * Temperature sensor values enumeration
+ * @IDT_TEMP_CUR:      Current temperature
+ * @IDT_TEMP_LOW:      Lowest historical temperature
+ * @IDT_TEMP_HIGH:     Highest historical temperature
+ * @IDT_TEMP_OFFSET:   Current temperature offset
+ */
+enum idt_temp_val {
+       IDT_TEMP_CUR,
+       IDT_TEMP_LOW,
+       IDT_TEMP_HIGH,
+       IDT_TEMP_OFFSET
+};
+
 /*
  * IDT Memory Windows type. Depending on the device settings, IDT supports
  * Direct Address Translation MW registers and Lookup Table registers
@@ -1044,6 +1117,8 @@ struct idt_ntb_peer {
  * @msg_mask_lock:     Message mask register lock
  * @gasa_lock:         GASA registers access lock
  *
+ * @hwmon_mtx:         Temperature sensor interface update mutex
+ *
  * @dbgfs_info:                DebugFS info node
  */
 struct idt_ntb_dev {
@@ -1071,6 +1146,8 @@ struct idt_ntb_dev {
        spinlock_t msg_mask_lock;
        spinlock_t gasa_lock;
 
+       struct mutex hwmon_mtx;
+
        struct dentry *dbgfs_info;
 };
 #define to_ndev_ntb(__ntb) container_of(__ntb, struct idt_ntb_dev, ntb)
index 6aa57322727916bd5bc1c8e5ab13f286f8fd1b1d..2ad263f708da7ab68b12c9767058df9505501013 100644 (file)
@@ -265,7 +265,7 @@ static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
        return 0;
 }
 
-static inline int ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
+static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
 {
        u64 shift, mask;
 
index 9398959664769b5f6cd3c79e4914199d1cf35f0d..3bfdb4562408879fd04f340b05e84673c4da2232 100644 (file)
@@ -194,6 +194,8 @@ struct ntb_transport_mw {
        void __iomem *vbase;
        size_t xlat_size;
        size_t buff_size;
+       size_t alloc_size;
+       void *alloc_addr;
        void *virt_addr;
        dma_addr_t dma_addr;
 };
@@ -672,13 +674,59 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
                return;
 
        ntb_mw_clear_trans(nt->ndev, PIDX, num_mw);
-       dma_free_coherent(&pdev->dev, mw->buff_size,
-                         mw->virt_addr, mw->dma_addr);
+       dma_free_coherent(&pdev->dev, mw->alloc_size,
+                         mw->alloc_addr, mw->dma_addr);
        mw->xlat_size = 0;
        mw->buff_size = 0;
+       mw->alloc_size = 0;
+       mw->alloc_addr = NULL;
        mw->virt_addr = NULL;
 }
 
+static int ntb_alloc_mw_buffer(struct ntb_transport_mw *mw,
+                              struct device *dma_dev, size_t align)
+{
+       dma_addr_t dma_addr;
+       void *alloc_addr, *virt_addr;
+       int rc;
+
+       alloc_addr = dma_alloc_coherent(dma_dev, mw->alloc_size,
+                                       &dma_addr, GFP_KERNEL);
+       if (!alloc_addr) {
+               dev_err(dma_dev, "Unable to alloc MW buff of size %zu\n",
+                       mw->alloc_size);
+               return -ENOMEM;
+       }
+       virt_addr = alloc_addr;
+
+       /*
+        * we must ensure that the memory address allocated is BAR size
+        * aligned in order for the XLAT register to take the value. This
+        * is a requirement of the hardware. It is recommended to setup CMA
+        * for BAR sizes equal or greater than 4MB.
+        */
+       if (!IS_ALIGNED(dma_addr, align)) {
+               if (mw->alloc_size > mw->buff_size) {
+                       virt_addr = PTR_ALIGN(alloc_addr, align);
+                       dma_addr = ALIGN(dma_addr, align);
+               } else {
+                       rc = -ENOMEM;
+                       goto err;
+               }
+       }
+
+       mw->alloc_addr = alloc_addr;
+       mw->virt_addr = virt_addr;
+       mw->dma_addr = dma_addr;
+
+       return 0;
+
+err:
+       dma_free_coherent(dma_dev, mw->alloc_size, alloc_addr, dma_addr);
+
+       return rc;
+}
+
 static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
                      resource_size_t size)
 {
@@ -710,28 +758,20 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
        /* Alloc memory for receiving data.  Must be aligned */
        mw->xlat_size = xlat_size;
        mw->buff_size = buff_size;
+       mw->alloc_size = buff_size;
 
-       mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
-                                          &mw->dma_addr, GFP_KERNEL);
-       if (!mw->virt_addr) {
-               mw->xlat_size = 0;
-               mw->buff_size = 0;
-               dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
-                       buff_size);
-               return -ENOMEM;
-       }
-
-       /*
-        * we must ensure that the memory address allocated is BAR size
-        * aligned in order for the XLAT register to take the value. This
-        * is a requirement of the hardware. It is recommended to setup CMA
-        * for BAR sizes equal or greater than 4MB.
-        */
-       if (!IS_ALIGNED(mw->dma_addr, xlat_align)) {
-               dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
-                       &mw->dma_addr);
-               ntb_free_mw(nt, num_mw);
-               return -ENOMEM;
+       rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align);
+       if (rc) {
+               mw->alloc_size *= 2;
+               rc = ntb_alloc_mw_buffer(mw, &pdev->dev, xlat_align);
+               if (rc) {
+                       dev_err(&pdev->dev,
+                               "Unable to alloc aligned MW buff\n");
+                       mw->xlat_size = 0;
+                       mw->buff_size = 0;
+                       mw->alloc_size = 0;
+                       return rc;
+               }
        }
 
        /* Notify HW the memory location of the receive buffer */
@@ -1278,6 +1318,7 @@ static void ntb_rx_copy_callback(void *data,
                case DMA_TRANS_READ_FAILED:
                case DMA_TRANS_WRITE_FAILED:
                        entry->errors++;
+                       /* fall through */
                case DMA_TRANS_ABORTED:
                {
                        struct ntb_transport_qp *qp = entry->qp;
@@ -1533,6 +1574,7 @@ static void ntb_tx_copy_callback(void *data,
                case DMA_TRANS_READ_FAILED:
                case DMA_TRANS_WRITE_FAILED:
                        entry->errors++;
+                       /* fall through */
                case DMA_TRANS_ABORTED:
                {
                        void __iomem *offset =
index 182258f64417b6d8165653ecffdbb77ac853a3b8..d0c621b32f72314adcd7a382fbddf78d462dfebf 100644 (file)
@@ -111,6 +111,8 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
                struct nd_mapping *nd_mapping, resource_size_t *overlap);
 resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
 resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
+int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
+               resource_size_t size);
 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
                struct nd_label_id *label_id);
 int alias_dpa_busy(struct device *dev, void *data);
index 24c64090169e6a76e5200806bb2078c9969fc1ba..6f22272e8d8014d9ccf3a7e480ea42c2c3b8b2ba 100644 (file)
@@ -649,14 +649,47 @@ static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
                        ALIGN_DOWN(phys, nd_pfn->align));
 }
 
+/*
+ * Check if pmem collides with 'System RAM', or other regions when
+ * section aligned.  Trim it accordingly.
+ */
+static void trim_pfn_device(struct nd_pfn *nd_pfn, u32 *start_pad, u32 *end_trunc)
+{
+       struct nd_namespace_common *ndns = nd_pfn->ndns;
+       struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+       struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
+       const resource_size_t start = nsio->res.start;
+       const resource_size_t end = start + resource_size(&nsio->res);
+       resource_size_t adjust, size;
+
+       *start_pad = 0;
+       *end_trunc = 0;
+
+       adjust = start - PHYS_SECTION_ALIGN_DOWN(start);
+       size = resource_size(&nsio->res) + adjust;
+       if (region_intersects(start - adjust, size, IORESOURCE_SYSTEM_RAM,
+                               IORES_DESC_NONE) == REGION_MIXED
+                       || nd_region_conflict(nd_region, start - adjust, size))
+               *start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
+
+       /* Now check that end of the range does not collide. */
+       adjust = PHYS_SECTION_ALIGN_UP(end) - end;
+       size = resource_size(&nsio->res) + adjust;
+       if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
+                               IORES_DESC_NONE) == REGION_MIXED
+                       || !IS_ALIGNED(end, nd_pfn->align)
+                       || nd_region_conflict(nd_region, start, size + adjust))
+               *end_trunc = end - phys_pmem_align_down(nd_pfn, end);
+}
+
 static int nd_pfn_init(struct nd_pfn *nd_pfn)
 {
        u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0;
        struct nd_namespace_common *ndns = nd_pfn->ndns;
-       u32 start_pad = 0, end_trunc = 0;
+       struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
        resource_size_t start, size;
-       struct nd_namespace_io *nsio;
        struct nd_region *nd_region;
+       u32 start_pad, end_trunc;
        struct nd_pfn_sb *pfn_sb;
        unsigned long npfns;
        phys_addr_t offset;
@@ -688,30 +721,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
 
        memset(pfn_sb, 0, sizeof(*pfn_sb));
 
-       /*
-        * Check if pmem collides with 'System RAM' when section aligned and
-        * trim it accordingly
-        */
-       nsio = to_nd_namespace_io(&ndns->dev);
-       start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start);
-       size = resource_size(&nsio->res);
-       if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
-                               IORES_DESC_NONE) == REGION_MIXED) {
-               start = nsio->res.start;
-               start_pad = PHYS_SECTION_ALIGN_UP(start) - start;
-       }
-
-       start = nsio->res.start;
-       size = PHYS_SECTION_ALIGN_UP(start + size) - start;
-       if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM,
-                               IORES_DESC_NONE) == REGION_MIXED
-                       || !IS_ALIGNED(start + resource_size(&nsio->res),
-                               nd_pfn->align)) {
-               size = resource_size(&nsio->res);
-               end_trunc = start + size - phys_pmem_align_down(nd_pfn,
-                               start + size);
-       }
-
+       trim_pfn_device(nd_pfn, &start_pad, &end_trunc);
        if (start_pad + end_trunc)
                dev_info(&nd_pfn->dev, "%s alignment collision, truncate %d bytes\n",
                                dev_name(&ndns->dev), start_pad + end_trunc);
@@ -722,7 +732,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
         * implementation will limit the pfns advertised through
         * ->direct_access() to those that are included in the memmap.
         */
-       start += start_pad;
+       start = nsio->res.start + start_pad;
        size = resource_size(&nsio->res);
        npfns = PFN_SECTION_ALIGN_UP((size - start_pad - end_trunc - SZ_8K)
                        / PAGE_SIZE);
index 174a418cb171545db6c27b0896d023aea75bdee0..e7377f1028ef687637a4a9f481899b05cc264b1f 100644 (file)
@@ -1184,6 +1184,47 @@ int nvdimm_has_cache(struct nd_region *nd_region)
 }
 EXPORT_SYMBOL_GPL(nvdimm_has_cache);
 
+struct conflict_context {
+       struct nd_region *nd_region;
+       resource_size_t start, size;
+};
+
+static int region_conflict(struct device *dev, void *data)
+{
+       struct nd_region *nd_region;
+       struct conflict_context *ctx = data;
+       resource_size_t res_end, region_end, region_start;
+
+       if (!is_memory(dev))
+               return 0;
+
+       nd_region = to_nd_region(dev);
+       if (nd_region == ctx->nd_region)
+               return 0;
+
+       res_end = ctx->start + ctx->size;
+       region_start = nd_region->ndr_start;
+       region_end = region_start + nd_region->ndr_size;
+       if (ctx->start >= region_start && ctx->start < region_end)
+               return -EBUSY;
+       if (res_end > region_start && res_end <= region_end)
+               return -EBUSY;
+       return 0;
+}
+
+int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
+               resource_size_t size)
+{
+       struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
+       struct conflict_context ctx = {
+               .nd_region = nd_region,
+               .start = start,
+               .size = size,
+       };
+
+       return device_for_each_child(&nvdimm_bus->dev, &ctx, region_conflict);
+}
+
 void __exit nd_region_devs_exit(void)
 {
        ida_destroy(&region_ida);
index 2e65be8b1387af92a2b099478c36e5a28e6d7ffc..962012135b62acf7e956df6fc5780780c3f8a9c4 100644 (file)
@@ -831,6 +831,8 @@ static int nvme_submit_user_cmd(struct request_queue *q,
 static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
 {
        struct nvme_ctrl *ctrl = rq->end_io_data;
+       unsigned long flags;
+       bool startka = false;
 
        blk_mq_free_request(rq);
 
@@ -841,7 +843,13 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
                return;
        }
 
-       schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+       spin_lock_irqsave(&ctrl->lock, flags);
+       if (ctrl->state == NVME_CTRL_LIVE ||
+           ctrl->state == NVME_CTRL_CONNECTING)
+               startka = true;
+       spin_unlock_irqrestore(&ctrl->lock, flags);
+       if (startka)
+               schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
 }
 
 static int nvme_keep_alive(struct nvme_ctrl *ctrl)
@@ -1519,8 +1527,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
        if (ns->ndev)
                nvme_nvm_update_nvm_info(ns);
 #ifdef CONFIG_NVME_MULTIPATH
-       if (ns->head->disk)
+       if (ns->head->disk) {
                nvme_update_disk_info(ns->head->disk, ns, id);
+               blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+       }
 #endif
 }
 
@@ -3312,6 +3322,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
        struct nvme_ns *ns, *next;
        LIST_HEAD(ns_list);
 
+       /* prevent racing with ns scanning */
+       flush_work(&ctrl->scan_work);
+
        /*
         * The dead states indicates the controller was not gracefully
         * disconnected. In that case, we won't be able to flush any data while
@@ -3474,7 +3487,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
        nvme_mpath_stop(ctrl);
        nvme_stop_keep_alive(ctrl);
        flush_work(&ctrl->async_event_work);
-       flush_work(&ctrl->scan_work);
        cancel_work_sync(&ctrl->fw_act_work);
        if (ctrl->ops->stop_ctrl)
                ctrl->ops->stop_ctrl(ctrl);
@@ -3583,7 +3595,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
 
        return 0;
 out_free_name:
-       kfree_const(dev->kobj.name);
+       kfree_const(ctrl->device->kobj.name);
 out_release_instance:
        ida_simple_remove(&nvme_instance_ida, ctrl->instance);
 out:
@@ -3605,7 +3617,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
        down_read(&ctrl->namespaces_rwsem);
 
        /* Forcibly unquiesce queues to avoid blocking dispatch */
-       if (ctrl->admin_q)
+       if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
                blk_mq_unquiesce_queue(ctrl->admin_q);
 
        list_for_each_entry(ns, &ctrl->namespaces, list)
index e52b9d3c0bd6cc22e1ac6b97736426267bb761f6..feb86b59170e49cd38b8b7c7bf3cc71a75a828c2 100644 (file)
@@ -152,6 +152,7 @@ struct nvme_fc_ctrl {
 
        bool                    ioq_live;
        bool                    assoc_active;
+       atomic_t                err_work_active;
        u64                     association_id;
 
        struct list_head        ctrl_list;      /* rport->ctrl_list */
@@ -160,6 +161,7 @@ struct nvme_fc_ctrl {
        struct blk_mq_tag_set   tag_set;
 
        struct delayed_work     connect_work;
+       struct work_struct      err_work;
 
        struct kref             ref;
        u32                     flags;
@@ -1531,6 +1533,10 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
        struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
        int i;
 
+       /* ensure we've initialized the ops once */
+       if (!(aen_op->flags & FCOP_FLAGS_AEN))
+               return;
+
        for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
                __nvme_fc_abort_op(ctrl, aen_op);
 }
@@ -1704,7 +1710,6 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
        op->fcp_req.rspaddr = &op->rsp_iu;
        op->fcp_req.rsplen = sizeof(op->rsp_iu);
        op->fcp_req.done = nvme_fc_fcpio_done;
-       op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
        op->ctrl = ctrl;
        op->queue = queue;
        op->rq = rq;
@@ -1747,11 +1752,12 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
        struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
        int res;
 
-       nvme_req(rq)->ctrl = &ctrl->ctrl;
        res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
        if (res)
                return res;
        op->op.fcp_req.first_sgl = &op->sgl[0];
+       op->op.fcp_req.private = &op->priv[0];
+       nvme_req(rq)->ctrl = &ctrl->ctrl;
        return res;
 }
 
@@ -2049,7 +2055,25 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
 static void
 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
 {
-       /* only proceed if in LIVE state - e.g. on first error */
+       int active;
+
+       /*
+        * if an error (io timeout, etc) while (re)connecting,
+        * it's an error on creating the new association.
+        * Start the error recovery thread if it hasn't already
+        * been started. It is expected there could be multiple
+        * ios hitting this path before things are cleaned up.
+        */
+       if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
+               active = atomic_xchg(&ctrl->err_work_active, 1);
+               if (!active && !schedule_work(&ctrl->err_work)) {
+                       atomic_set(&ctrl->err_work_active, 0);
+                       WARN_ON(1);
+               }
+               return;
+       }
+
+       /* Otherwise, only proceed if in LIVE state - e.g. on first error */
        if (ctrl->ctrl.state != NVME_CTRL_LIVE)
                return;
 
@@ -2814,6 +2838,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
 {
        struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
 
+       cancel_work_sync(&ctrl->err_work);
        cancel_delayed_work_sync(&ctrl->connect_work);
        /*
         * kill the association on the link side.  this will block
@@ -2866,23 +2891,30 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
 }
 
 static void
-nvme_fc_reset_ctrl_work(struct work_struct *work)
+__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
 {
-       struct nvme_fc_ctrl *ctrl =
-               container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
-       int ret;
-
-       nvme_stop_ctrl(&ctrl->ctrl);
+       nvme_stop_keep_alive(&ctrl->ctrl);
 
        /* will block will waiting for io to terminate */
        nvme_fc_delete_association(ctrl);
 
-       if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+       if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
+           !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
                dev_err(ctrl->ctrl.device,
                        "NVME-FC{%d}: error_recovery: Couldn't change state "
                        "to CONNECTING\n", ctrl->cnum);
-               return;
-       }
+}
+
+static void
+nvme_fc_reset_ctrl_work(struct work_struct *work)
+{
+       struct nvme_fc_ctrl *ctrl =
+               container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
+       int ret;
+
+       __nvme_fc_terminate_io(ctrl);
+
+       nvme_stop_ctrl(&ctrl->ctrl);
 
        if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
                ret = nvme_fc_create_association(ctrl);
@@ -2897,6 +2929,24 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
                        ctrl->cnum);
 }
 
+static void
+nvme_fc_connect_err_work(struct work_struct *work)
+{
+       struct nvme_fc_ctrl *ctrl =
+                       container_of(work, struct nvme_fc_ctrl, err_work);
+
+       __nvme_fc_terminate_io(ctrl);
+
+       atomic_set(&ctrl->err_work_active, 0);
+
+       /*
+        * Rescheduling the connection after recovering
+        * from the io error is left to the reconnect work
+        * item, which is what should have stalled waiting on
+        * the io that had the error that scheduled this work.
+        */
+}
+
 static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
        .name                   = "fc",
        .module                 = THIS_MODULE,
@@ -3007,6 +3057,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        ctrl->cnum = idx;
        ctrl->ioq_live = false;
        ctrl->assoc_active = false;
+       atomic_set(&ctrl->err_work_active, 0);
        init_waitqueue_head(&ctrl->ioabort_wait);
 
        get_device(ctrl->dev);
@@ -3014,6 +3065,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 
        INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
        INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+       INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
        spin_lock_init(&ctrl->lock);
 
        /* io queue count */
@@ -3103,6 +3155,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
 fail_ctrl:
        nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
        cancel_work_sync(&ctrl->ctrl.reset_work);
+       cancel_work_sync(&ctrl->err_work);
        cancel_delayed_work_sync(&ctrl->connect_work);
 
        ctrl->ctrl.opts = NULL;
index 5e3cc8c59a394fce6ba25f1c621b26903185963d..9901afd804ce3720709c198fb54140d2a2ea3d85 100644 (file)
@@ -285,6 +285,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
        blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
        /* set to a default value for 512 until disk is validated */
        blk_queue_logical_block_size(q, 512);
+       blk_set_stacking_limits(&q->limits);
 
        /* we need to propagate up the VMC settings */
        if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
index cee79cb388aff0304909b99bd9f81523ba0da1ba..081cbdcce8803a8ddca91d21e63511846114ad7d 100644 (file)
@@ -531,6 +531,9 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
 static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
                struct nvme_id_ctrl *id)
 {
+       if (ctrl->subsys->cmic & (1 << 3))
+               dev_warn(ctrl->device,
+"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
        return 0;
 }
 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
index f30031945ee4b671033a0d80583d05c0724a096e..c33bb201b8846fae0319e1c575f9737af79a1944 100644 (file)
@@ -1663,6 +1663,9 @@ static void nvme_map_cmb(struct nvme_dev *dev)
        struct pci_dev *pdev = to_pci_dev(dev->dev);
        int bar;
 
+       if (dev->cmb_size)
+               return;
+
        dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
        if (!dev->cmbsz)
                return;
@@ -2147,7 +2150,6 @@ static void nvme_pci_disable(struct nvme_dev *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev->dev);
 
-       nvme_release_cmb(dev);
        pci_free_irq_vectors(pdev);
 
        if (pci_is_enabled(pdev)) {
@@ -2595,6 +2597,7 @@ static void nvme_remove(struct pci_dev *pdev)
        nvme_stop_ctrl(&dev->ctrl);
        nvme_remove_namespaces(&dev->ctrl);
        nvme_dev_disable(dev, true);
+       nvme_release_cmb(dev);
        nvme_free_host_mem(dev);
        nvme_dev_remove_admin(dev);
        nvme_free_queues(dev, 0);
index d181cafedc584916d0b04db2e08dd9e0802cba0c..ab6ec7295bf900e54d82c624e4f45f30c342d5df 100644 (file)
@@ -184,6 +184,7 @@ static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
        qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
        if (ib_dma_mapping_error(ibdev, qe->dma)) {
                kfree(qe->data);
+               qe->data = NULL;
                return -ENOMEM;
        }
 
@@ -823,6 +824,7 @@ out_free_tagset:
 out_free_async_qe:
        nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
                sizeof(struct nvme_command), DMA_TO_DEVICE);
+       ctrl->async_event_sqe.data = NULL;
 out_free_queue:
        nvme_rdma_free_queue(&ctrl->queues[0]);
        return error;
index f4efe289dc7bc2caa8ce3c2a1a44b97e66cd0324..a5f9bbce863f42dcff6c23759801fa9c48d0b210 100644 (file)
@@ -420,7 +420,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
        struct pci_dev *p2p_dev;
        int ret;
 
-       if (!ctrl->p2p_client)
+       if (!ctrl->p2p_client || !ns->use_p2pmem)
                return;
 
        if (ns->p2p_dev) {
index 39d972e2595f0dc764f2a5ac37d589422068c139..01feebec29ea2d671d2d110a8a4df7148cc2f616 100644 (file)
@@ -101,7 +101,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
                rw = READ;
        }
 
-       iov_iter_bvec(&iter, ITER_BVEC | rw, req->f.bvec, nr_segs, count);
+       iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count);
 
        iocb->ki_pos = pos;
        iocb->ki_filp = req->ns->file;
index ddce100be57a48f883558e147669a3d06f1046bb..583086dd9cb9a07252a46e6589ff9cc76bf99b33 100644 (file)
@@ -122,7 +122,6 @@ struct nvmet_rdma_device {
        int                     inline_page_count;
 };
 
-static struct workqueue_struct *nvmet_rdma_delete_wq;
 static bool nvmet_rdma_use_srq;
 module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
 MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
@@ -530,6 +529,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
 {
        struct nvmet_rdma_rsp *rsp =
                container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
+       struct nvmet_rdma_queue *queue = cq->cq_context;
 
        nvmet_rdma_release_rsp(rsp);
 
@@ -537,7 +537,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
                     wc->status != IB_WC_WR_FLUSH_ERR)) {
                pr_err("SEND for CQE 0x%p failed with status %s (%d).\n",
                        wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status);
-               nvmet_rdma_error_comp(rsp->queue);
+               nvmet_rdma_error_comp(queue);
        }
 }
 
@@ -1274,12 +1274,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
 
        if (queue->host_qid == 0) {
                /* Let inflight controller teardown complete */
-               flush_workqueue(nvmet_rdma_delete_wq);
+               flush_scheduled_work();
        }
 
        ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
        if (ret) {
-               queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+               schedule_work(&queue->release_work);
                /* Destroying rdma_cm id is not needed here */
                return 0;
        }
@@ -1344,7 +1344,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
 
        if (disconnect) {
                rdma_disconnect(queue->cm_id);
-               queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+               schedule_work(&queue->release_work);
        }
 }
 
@@ -1374,7 +1374,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
        mutex_unlock(&nvmet_rdma_queue_mutex);
 
        pr_err("failed to connect queue %d\n", queue->idx);
-       queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+       schedule_work(&queue->release_work);
 }
 
 /**
@@ -1656,17 +1656,8 @@ static int __init nvmet_rdma_init(void)
        if (ret)
                goto err_ib_client;
 
-       nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
-                       WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
-       if (!nvmet_rdma_delete_wq) {
-               ret = -ENOMEM;
-               goto err_unreg_transport;
-       }
-
        return 0;
 
-err_unreg_transport:
-       nvmet_unregister_transport(&nvmet_rdma_ops);
 err_ib_client:
        ib_unregister_client(&nvmet_rdma_ib_client);
        return ret;
@@ -1674,7 +1665,6 @@ err_ib_client:
 
 static void __exit nvmet_rdma_exit(void)
 {
-       destroy_workqueue(nvmet_rdma_delete_wq);
        nvmet_unregister_transport(&nvmet_rdma_ops);
        ib_unregister_client(&nvmet_rdma_ib_client);
        WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
index 9b18ce90f90733a0b4d6ceec64921488cd19aa80..27f67dfa649d08373de5d898cffb19f8e851ea93 100644 (file)
@@ -44,6 +44,7 @@ struct nvmem_cell {
        int                     bytes;
        int                     bit_offset;
        int                     nbits;
+       struct device_node      *np;
        struct nvmem_device     *nvmem;
        struct list_head        node;
 };
@@ -298,6 +299,7 @@ static void nvmem_cell_drop(struct nvmem_cell *cell)
        mutex_lock(&nvmem_mutex);
        list_del(&cell->node);
        mutex_unlock(&nvmem_mutex);
+       of_node_put(cell->np);
        kfree(cell->name);
        kfree(cell);
 }
@@ -530,6 +532,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
                        return -ENOMEM;
 
                cell->nvmem = nvmem;
+               cell->np = of_node_get(child);
                cell->offset = be32_to_cpup(addr++);
                cell->bytes = be32_to_cpup(addr);
                cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
@@ -960,14 +963,13 @@ out:
 
 #if IS_ENABLED(CONFIG_OF)
 static struct nvmem_cell *
-nvmem_find_cell_by_index(struct nvmem_device *nvmem, int index)
+nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
 {
        struct nvmem_cell *cell = NULL;
-       int i = 0;
 
        mutex_lock(&nvmem_mutex);
        list_for_each_entry(cell, &nvmem->cells, node) {
-               if (index == i++)
+               if (np == cell->np)
                        break;
        }
        mutex_unlock(&nvmem_mutex);
@@ -1011,7 +1013,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
        if (IS_ERR(nvmem))
                return ERR_CAST(nvmem);
 
-       cell = nvmem_find_cell_by_index(nvmem, index);
+       cell = nvmem_find_cell_by_node(nvmem, cell_np);
        if (!cell) {
                __nvmem_device_put(nvmem);
                return ERR_PTR(-ENOENT);
index d023cf303d56c3a557881a55818b9ee424e74162..09692c9b32a71c8f080653af4312b56ad55467a4 100644 (file)
@@ -777,8 +777,6 @@ struct device_node *of_get_next_cpu_node(struct device_node *prev)
                if (!(of_node_name_eq(next, "cpu") ||
                      (next->type && !of_node_cmp(next->type, "cpu"))))
                        continue;
-               if (!__of_device_is_available(next))
-                       continue;
                if (of_node_get(next))
                        break;
        }
index 0f27fad9fe940de645f61f868d0578795e073abc..5592437bb3d155aa415ee23511e404aa2afe9ba3 100644 (file)
@@ -149,9 +149,11 @@ int of_dma_configure(struct device *dev, struct device_node *np, bool force_dma)
         * set by the driver.
         */
        mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
-       dev->bus_dma_mask = mask;
        dev->coherent_dma_mask &= mask;
        *dev->dma_mask &= mask;
+       /* ...but only set bus mask if we found valid dma-ranges earlier */
+       if (!ret)
+               dev->bus_dma_mask = mask;
 
        coherent = of_dma_is_coherent(np);
        dev_dbg(dev, "device is%sdma coherent\n",
index 35c64a4295e07edc9b5f59ebaed18d3ab2ef1c52..fe6b13608e5101458254d4f522454df65dc2e8fa 100644 (file)
@@ -104,9 +104,14 @@ static int __init of_numa_parse_distance_map_v1(struct device_node *map)
                distance = of_read_number(matrix, 1);
                matrix++;
 
+               if ((nodea == nodeb && distance != LOCAL_DISTANCE) ||
+                   (nodea != nodeb && distance <= LOCAL_DISTANCE)) {
+                       pr_err("Invalid distance[node%d -> node%d] = %d\n",
+                              nodea, nodeb, distance);
+                       return -EINVAL;
+               }
+
                numa_set_distance(nodea, nodeb, distance);
-               pr_debug("distance[node%d -> node%d] = %d\n",
-                        nodea, nodeb, distance);
 
                /* Set default distance of node B->A same as A->B */
                if (nodeb > nodea)
index 5a4b47958073ee4526772f35e8b5614cc3c63418..38a08805a30c64bba10519550474cac0fc83cdad 100644 (file)
@@ -579,10 +579,8 @@ int dev_pm_opp_of_add_table_indexed(struct device *dev, int index)
                 */
                count = of_count_phandle_with_args(dev->of_node,
                                                   "operating-points-v2", NULL);
-               if (count != 1)
-                       return -ENODEV;
-
-               index = 0;
+               if (count == 1)
+                       index = 0;
        }
 
        opp_table = dev_pm_opp_get_opp_table_indexed(dev, index);
index 9e5a9a3112c9cec57abcffab6c9b23640682756c..1c69c404df1149d96ef1abe24767a96077ab199b 100644 (file)
@@ -288,7 +288,10 @@ static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data)
        int ret;
 
        vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data,
-                                         new_supply_vbb->u_volt);
+                                         new_supply_vdd->u_volt);
+
+       if (new_supply_vdd->u_volt_min < vdd_uv)
+               new_supply_vdd->u_volt_min = vdd_uv;
 
        /* Scaling up? Scale voltage before frequency */
        if (freq > old_freq) {
@@ -414,7 +417,6 @@ static struct platform_driver ti_opp_supply_driver = {
        .probe = ti_opp_supply_probe,
        .driver = {
                   .name = "ti_opp_supply",
-                  .owner = THIS_MODULE,
                   .of_match_table = of_match_ptr(ti_opp_supply_of_match),
                   },
 };
index 2cbef2d7c207afa54405e79bc2e061af87072168..88af6bff945f36cbb9b746bb92775a9b8ca073d0 100644 (file)
@@ -81,8 +81,6 @@ struct imx6_pcie {
 #define PCIE_PL_PFLR_FORCE_LINK                        (1 << 15)
 #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
 #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
-#define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING        (1 << 29)
-#define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP         (1 << 4)
 
 #define PCIE_PHY_CTRL (PL_OFFSET + 0x114)
 #define PCIE_PHY_CTRL_DATA_LOC 0
@@ -711,12 +709,6 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
        return 0;
 }
 
-static int imx6_pcie_link_up(struct dw_pcie *pci)
-{
-       return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) &
-                       PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
-}
-
 static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
        .host_init = imx6_pcie_host_init,
 };
@@ -749,7 +741,7 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie,
 }
 
 static const struct dw_pcie_ops dw_pcie_ops = {
-       .link_up = imx6_pcie_link_up,
+       /* No special ops needed, but pcie-designware still expects this struct */
 };
 
 #ifdef CONFIG_PM_SLEEP
index 3724d3ef7008e88b59e398318fd212ad76b24c3a..7aa9a82b7ebd62043c5c8fd6c64c10b190cd1c9b 100644 (file)
@@ -88,7 +88,7 @@ static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
        int i;
 
        for (i = 0; i < PCIE_IATU_NUM; i++)
-               dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i);
+               dw_pcie_disable_atu(pcie->pci, i, DW_PCIE_REGION_OUTBOUND);
 }
 
 static int ls1021_pcie_link_up(struct dw_pcie *pci)
index 1e7b02221eac934ea28b72c3094587c71c4fb96b..de8635af4cde296922ca4b0afd22bc86aee261d8 100644 (file)
@@ -440,7 +440,6 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
        tbl_offset = dw_pcie_readl_dbi(pci, reg);
        bir = (tbl_offset & PCI_MSIX_TABLE_BIR);
        tbl_offset &= PCI_MSIX_TABLE_OFFSET;
-       tbl_offset >>= 3;
 
        reg = PCI_BASE_ADDRESS_0 + (4 * bir);
        bar_addr_upper = 0;
index 2a4aa64685794434f9ffdc5c38613035e63038f3..921db6f803403a27f7f4f8bb4394b9a94d95e0bd 100644 (file)
@@ -793,15 +793,10 @@ static void pci_acpi_setup(struct device *dev)
 {
        struct pci_dev *pci_dev = to_pci_dev(dev);
        struct acpi_device *adev = ACPI_COMPANION(dev);
-       int node;
 
        if (!adev)
                return;
 
-       node = acpi_get_node(adev->handle);
-       if (node != NUMA_NO_NODE)
-               set_dev_node(dev, node);
-
        pci_acpi_optimize_delay(pci_dev, adev->handle);
 
        pci_acpi_add_pm_notifier(adev, pci_dev);
index d068f11d08a70cf3b8e86a4dd42c714ca9e2732e..c9d8e3c837de785d7960897f511328e1399239ae 100644 (file)
@@ -5556,9 +5556,13 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
        u32 lnkcap2, lnkcap;
 
        /*
-        * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link
-        * Speeds Vector in Link Capabilities 2 when supported, falling
-        * back to Max Link Speed in Link Capabilities otherwise.
+        * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.  The
+        * implementation note there recommends using the Supported Link
+        * Speeds Vector in Link Capabilities 2 when supported.
+        *
+        * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
+        * should use the Supported Link Speeds field in Link Capabilities,
+        * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
         */
        pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
        if (lnkcap2) { /* PCIe r3.0-compliant */
@@ -5574,16 +5578,10 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
        }
 
        pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
-       if (lnkcap) {
-               if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
-                       return PCIE_SPEED_16_0GT;
-               else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
-                       return PCIE_SPEED_8_0GT;
-               else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
-                       return PCIE_SPEED_5_0GT;
-               else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
-                       return PCIE_SPEED_2_5GT;
-       }
+       if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
+               return PCIE_SPEED_5_0GT;
+       else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
+               return PCIE_SPEED_2_5GT;
 
        return PCI_SPEED_UNKNOWN;
 }
index dcb29cb76dc69d1a958f4ae25732048b7532e549..f78860ce884bc531860bba73bce96b83f1c444ae 100644 (file)
@@ -895,7 +895,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
        struct pcie_link_state *link;
        int blacklist = !!pcie_aspm_sanity_check(pdev);
 
-       if (!aspm_support_enabled || aspm_disabled)
+       if (!aspm_support_enabled)
                return;
 
        if (pdev->link_state)
index 9ce531194f8af04c9bc96e2b387ca9ba0b6af3a5..6d4b44b569bc78e7f3548825ddf4522c183bbd7b 100644 (file)
@@ -231,6 +231,7 @@ static const struct qusb2_phy_cfg sdm845_phy_cfg = {
        .mask_core_ready = CORE_READY_STATUS,
        .has_pll_override = true,
        .autoresume_en    = BIT(0),
+       .update_tune1_with_efuse = true,
 };
 
 static const char * const qusb2_phy_vreg_names[] = {
@@ -402,10 +403,10 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
 
        /*
         * Read efuse register having TUNE2/1 parameter's high nibble.
-        * If efuse register shows value as 0x0, or if we fail to find
-        * a valid efuse register settings, then use default value
-        * as 0xB for high nibble that we have already set while
-        * configuring phy.
+        * If efuse register shows value as 0x0 (indicating value is not
+        * fused), or if we fail to find a valid efuse register setting,
+        * then use default value for high nibble that we have already
+        * set while configuring the phy.
         */
        val = nvmem_cell_read(qphy->cell, NULL);
        if (IS_ERR(val) || !val[0]) {
@@ -415,12 +416,13 @@ static void qusb2_phy_set_tune2_param(struct qusb2_phy *qphy)
 
        /* Fused TUNE1/2 value is the higher nibble only */
        if (cfg->update_tune1_with_efuse)
-               qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
-                             val[0] << 0x4);
+               qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE1],
+                                val[0] << HSTX_TRIM_SHIFT,
+                                HSTX_TRIM_MASK);
        else
-               qusb2_setbits(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2],
-                             val[0] << 0x4);
-
+               qusb2_write_mask(qphy->base, cfg->regs[QUSB2PHY_PORT_TUNE2],
+                                val[0] << HSTX_TRIM_SHIFT,
+                                HSTX_TRIM_MASK);
 }
 
 static int qusb2_phy_set_mode(struct phy *phy, enum phy_mode mode)
index 467e8147972b0510cc285e972768e5f1e15ca212..9c85231a6dbcc856254fd75208bbe34d1784a7fb 100644 (file)
@@ -26,7 +26,8 @@ config PHY_UNIPHIER_USB3
 
 config PHY_UNIPHIER_PCIE
        tristate "Uniphier PHY driver for PCIe controller"
-       depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+       depends on ARCH_UNIPHIER || COMPILE_TEST
+       depends on OF && HAS_IOMEM
        default PCIE_UNIPHIER
        select GENERIC_PHY
        help
index 4ceb06f8a33c965aa51cb317b56e8a672b55a090..4edeb4cae72aa28ba251558a499ce5a13436d237 100644 (file)
@@ -830,7 +830,7 @@ static struct meson_bank meson_gxbb_periphs_banks[] = {
 
 static struct meson_bank meson_gxbb_aobus_banks[] = {
        /*   name    first      last       irq    pullen  pull    dir     out     in  */
-       BANK("AO",   GPIOAO_0,  GPIOAO_13, 0, 13, 0,  0,  0, 16,  0,  0,  0, 16,  1,  0),
+       BANK("AO",   GPIOAO_0,  GPIOAO_13, 0, 13, 0,  16, 0, 0,   0,  0,  0, 16,  1,  0),
 };
 
 static struct meson_pinctrl_data meson_gxbb_periphs_pinctrl_data = {
index 7dae1d7bf6b0a50f75c9d104f43f93972abaf27d..158f618f169570d07dcbd2b9850c72f7334ca495 100644 (file)
@@ -807,7 +807,7 @@ static struct meson_bank meson_gxl_periphs_banks[] = {
 
 static struct meson_bank meson_gxl_aobus_banks[] = {
        /*   name    first      last      irq   pullen  pull    dir     out     in  */
-       BANK("AO",   GPIOAO_0,  GPIOAO_9, 0, 9, 0,  0,  0, 16,  0,  0,  0, 16,  1,  0),
+       BANK("AO",   GPIOAO_0,  GPIOAO_9, 0, 9, 0,  16, 0, 0,   0,  0,  0, 16,  1,  0),
 };
 
 static struct meson_pinctrl_data meson_gxl_periphs_pinctrl_data = {
index f8b778a7d47174b902d398fba74ba1845d88b126..53d449076dee32bb64cf3f0093b4a7e9016b9fa7 100644 (file)
@@ -192,7 +192,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin,
                        dev_dbg(pc->dev, "pin %u: disable bias\n", pin);
 
                        meson_calc_reg_and_bit(bank, pin, REG_PULL, &reg, &bit);
-                       ret = regmap_update_bits(pc->reg_pull, reg,
+                       ret = regmap_update_bits(pc->reg_pullen, reg,
                                                 BIT(bit), 0);
                        if (ret)
                                return ret;
index c6d79315218fa69cadcdde9aa49d657d6916f5bb..86466173114da013ff7dff4e6a89195c9399a82d 100644 (file)
@@ -1053,7 +1053,7 @@ static struct meson_bank meson8_cbus_banks[] = {
 
 static struct meson_bank meson8_aobus_banks[] = {
        /*   name    first     last         irq    pullen  pull    dir     out     in  */
-       BANK("AO",   GPIOAO_0, GPIO_TEST_N, 0, 13, 0,  0,  0, 16,  0,  0,  0, 16,  1,  0),
+       BANK("AO",   GPIOAO_0, GPIO_TEST_N, 0, 13, 0, 16,  0,  0,  0,  0,  0, 16,  1,  0),
 };
 
 static struct meson_pinctrl_data meson8_cbus_pinctrl_data = {
index bb2a30964fc69a20bfe488a8904ebd6badaf8e71..647ad15d5c3c41ee538688ade979145efa459b9b 100644 (file)
@@ -906,7 +906,7 @@ static struct meson_bank meson8b_cbus_banks[] = {
 
 static struct meson_bank meson8b_aobus_banks[] = {
        /*   name    first     lastc        irq    pullen  pull    dir     out     in  */
-       BANK("AO",   GPIOAO_0, GPIO_TEST_N, 0, 13, 0,  0,  0, 16,  0,  0,  0, 16,  1,  0),
+       BANK("AO",   GPIOAO_0, GPIO_TEST_N, 0, 13, 0,  16, 0, 0,  0,  0,  0, 16,  1,  0),
 };
 
 static struct meson_pinctrl_data meson8b_cbus_pinctrl_data = {
index 504d252716f2e10db4914c8de651dff05106b82c..27e5dd47a01f9564fdff4c172be76b9694223e7e 100644 (file)
@@ -447,10 +447,9 @@ config PWM_TEGRA
 
 config  PWM_TIECAP
        tristate "ECAP PWM support"
-       depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE
+       depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3
        help
-         PWM driver support for the ECAP APWM controller found on AM33XX
-         TI SOC
+         PWM driver support for the ECAP APWM controller found on TI SOCs
 
          To compile this driver as a module, choose M here: the module
          will be called pwm-tiecap.
index 5561b9e190f84a63513ff3b86ecbeef7461404e8..757230e1f575e618199d03be157f0b1bf864d243 100644 (file)
@@ -30,6 +30,7 @@ static const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = {
        .clk_rate = 19200000,
        .npwm = 1,
        .base_unit_bits = 16,
+       .other_devices_aml_touches_pwm_regs = true,
 };
 
 /* Broxton */
@@ -60,6 +61,7 @@ static int pwm_lpss_probe_platform(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, lpwm);
 
+       dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE);
        pm_runtime_set_active(&pdev->dev);
        pm_runtime_enable(&pdev->dev);
 
@@ -74,13 +76,29 @@ static int pwm_lpss_remove_platform(struct platform_device *pdev)
        return pwm_lpss_remove(lpwm);
 }
 
-static SIMPLE_DEV_PM_OPS(pwm_lpss_platform_pm_ops,
-                        pwm_lpss_suspend,
-                        pwm_lpss_resume);
+static int pwm_lpss_prepare(struct device *dev)
+{
+       struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
+
+       /*
+        * If other device's AML code touches the PWM regs on suspend/resume
+        * force runtime-resume the PWM controller to allow this.
+        */
+       if (lpwm->info->other_devices_aml_touches_pwm_regs)
+               return 0; /* Force runtime-resume */
+
+       return 1; /* If runtime-suspended leave as is */
+}
+
+static const struct dev_pm_ops pwm_lpss_platform_pm_ops = {
+       .prepare = pwm_lpss_prepare,
+       SET_SYSTEM_SLEEP_PM_OPS(pwm_lpss_suspend, pwm_lpss_resume)
+};
 
 static const struct acpi_device_id pwm_lpss_acpi_match[] = {
        { "80860F09", (unsigned long)&pwm_lpss_byt_info },
        { "80862288", (unsigned long)&pwm_lpss_bsw_info },
+       { "80862289", (unsigned long)&pwm_lpss_bsw_info },
        { "80865AC8", (unsigned long)&pwm_lpss_bxt_info },
        { },
 };
index 4721a264bac2580cf8d21ee54396e0b494f1c9dc..2ac3a2aa9e53f5cab594fccdb042c1d77bcafa87 100644 (file)
 /* Size of each PWM register space if multiple */
 #define PWM_SIZE                       0x400
 
-#define MAX_PWMS                       4
-
-struct pwm_lpss_chip {
-       struct pwm_chip chip;
-       void __iomem *regs;
-       const struct pwm_lpss_boardinfo *info;
-       u32 saved_ctrl[MAX_PWMS];
-};
-
 static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip)
 {
        return container_of(chip, struct pwm_lpss_chip, chip);
@@ -97,7 +88,7 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
        unsigned long long on_time_div;
        unsigned long c = lpwm->info->clk_rate, base_unit_range;
        unsigned long long base_unit, freq = NSEC_PER_SEC;
-       u32 ctrl;
+       u32 orig_ctrl, ctrl;
 
        do_div(freq, period_ns);
 
@@ -114,13 +105,17 @@ static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
        do_div(on_time_div, period_ns);
        on_time_div = 255ULL - on_time_div;
 
-       ctrl = pwm_lpss_read(pwm);
+       orig_ctrl = ctrl = pwm_lpss_read(pwm);
        ctrl &= ~PWM_ON_TIME_DIV_MASK;
        ctrl &= ~(base_unit_range << PWM_BASE_UNIT_SHIFT);
        base_unit &= base_unit_range;
        ctrl |= (u32) base_unit << PWM_BASE_UNIT_SHIFT;
        ctrl |= on_time_div;
-       pwm_lpss_write(pwm, ctrl);
+
+       if (orig_ctrl != ctrl) {
+               pwm_lpss_write(pwm, ctrl);
+               pwm_lpss_write(pwm, ctrl | PWM_SW_UPDATE);
+       }
 }
 
 static inline void pwm_lpss_cond_enable(struct pwm_device *pwm, bool cond)
@@ -144,7 +139,6 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                                return ret;
                        }
                        pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
-                       pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
                        pwm_lpss_cond_enable(pwm, lpwm->info->bypass == false);
                        ret = pwm_lpss_wait_for_update(pwm);
                        if (ret) {
@@ -157,7 +151,6 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                        if (ret)
                                return ret;
                        pwm_lpss_prepare(lpwm, pwm, state->duty_cycle, state->period);
-                       pwm_lpss_write(pwm, pwm_lpss_read(pwm) | PWM_SW_UPDATE);
                        return pwm_lpss_wait_for_update(pwm);
                }
        } else if (pwm_is_enabled(pwm)) {
@@ -168,8 +161,42 @@ static int pwm_lpss_apply(struct pwm_chip *chip, struct pwm_device *pwm,
        return 0;
 }
 
+/* This function gets called once from pwmchip_add to get the initial state */
+static void pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+                              struct pwm_state *state)
+{
+       struct pwm_lpss_chip *lpwm = to_lpwm(chip);
+       unsigned long base_unit_range;
+       unsigned long long base_unit, freq, on_time_div;
+       u32 ctrl;
+
+       base_unit_range = BIT(lpwm->info->base_unit_bits);
+
+       ctrl = pwm_lpss_read(pwm);
+       on_time_div = 255 - (ctrl & PWM_ON_TIME_DIV_MASK);
+       base_unit = (ctrl >> PWM_BASE_UNIT_SHIFT) & (base_unit_range - 1);
+
+       freq = base_unit * lpwm->info->clk_rate;
+       do_div(freq, base_unit_range);
+       if (freq == 0)
+               state->period = NSEC_PER_SEC;
+       else
+               state->period = NSEC_PER_SEC / (unsigned long)freq;
+
+       on_time_div *= state->period;
+       do_div(on_time_div, 255);
+       state->duty_cycle = on_time_div;
+
+       state->polarity = PWM_POLARITY_NORMAL;
+       state->enabled = !!(ctrl & PWM_ENABLE);
+
+       if (state->enabled)
+               pm_runtime_get(chip->dev);
+}
+
 static const struct pwm_ops pwm_lpss_ops = {
        .apply = pwm_lpss_apply,
+       .get_state = pwm_lpss_get_state,
        .owner = THIS_MODULE,
 };
 
@@ -214,6 +241,12 @@ EXPORT_SYMBOL_GPL(pwm_lpss_probe);
 
 int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
 {
+       int i;
+
+       for (i = 0; i < lpwm->info->npwm; i++) {
+               if (pwm_is_enabled(&lpwm->chip.pwms[i]))
+                       pm_runtime_put(lpwm->chip.dev);
+       }
        return pwmchip_remove(&lpwm->chip);
 }
 EXPORT_SYMBOL_GPL(pwm_lpss_remove);
index 7a4238ad1fcb1f25390032019170759c6666ae83..3236be835bd9c948b929cd066ae74feaff2eed73 100644 (file)
 #include <linux/device.h>
 #include <linux/pwm.h>
 
-struct pwm_lpss_chip;
+#define MAX_PWMS                       4
+
+struct pwm_lpss_chip {
+       struct pwm_chip chip;
+       void __iomem *regs;
+       const struct pwm_lpss_boardinfo *info;
+       u32 saved_ctrl[MAX_PWMS];
+};
 
 struct pwm_lpss_boardinfo {
        unsigned long clk_rate;
        unsigned int npwm;
        unsigned long base_unit_bits;
        bool bypass;
+       /*
+        * On some devices the _PS0/_PS3 AML code of the GPU (GFX0) device
+        * messes with the PWM0 controllers state,
+        */
+       bool other_devices_aml_touches_pwm_regs;
 };
 
 struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
index 748f614d53755daabdd9f1529d7ba9cd602cb611..a41812fc6f95733a2568eb0d6b5c7f44fe5a1741 100644 (file)
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * R-Car PWM Timer driver
  *
  * Copyright (C) 2015 Renesas Electronics Corporation
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
  */
 
 #include <linux/clk.h>
index 29267d12fb4c9d3cf9283c38b114b00430704b09..4a855a21b782dea30c8b6e555a76132a3e306deb 100644 (file)
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * R-Mobile TPU PWM driver
  *
  * Copyright (C) 2012 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <linux/clk.h>
index f8ebbece57b71ad5f03501304b8144b1341cbe7f..48c4595a0ffcecb815bf641a7ca7ae34f77475cd 100644 (file)
@@ -300,7 +300,6 @@ static const struct of_device_id tegra_pwm_of_match[] = {
        { .compatible = "nvidia,tegra186-pwm", .data = &tegra186_pwm_soc },
        { }
 };
-
 MODULE_DEVICE_TABLE(of, tegra_pwm_of_match);
 
 static const struct dev_pm_ops tegra_pwm_pm_ops = {
index 7c71cdb8a9d8f92102b5875d120b83a3364fb6d0..ceb233dd604840bd2370dbe3c0446a751f22fe6d 100644 (file)
@@ -249,6 +249,7 @@ static void pwm_export_release(struct device *child)
 static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
 {
        struct pwm_export *export;
+       char *pwm_prop[2];
        int ret;
 
        if (test_and_set_bit(PWMF_EXPORTED, &pwm->flags))
@@ -263,7 +264,6 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
        export->pwm = pwm;
        mutex_init(&export->lock);
 
-       export->child.class = parent->class;
        export->child.release = pwm_export_release;
        export->child.parent = parent;
        export->child.devt = MKDEV(0, 0);
@@ -277,6 +277,10 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
                export = NULL;
                return ret;
        }
+       pwm_prop[0] = kasprintf(GFP_KERNEL, "EXPORT=pwm%u", pwm->hwpwm);
+       pwm_prop[1] = NULL;
+       kobject_uevent_env(&parent->kobj, KOBJ_CHANGE, pwm_prop);
+       kfree(pwm_prop[0]);
 
        return 0;
 }
@@ -289,6 +293,7 @@ static int pwm_unexport_match(struct device *child, void *data)
 static int pwm_unexport_child(struct device *parent, struct pwm_device *pwm)
 {
        struct device *child;
+       char *pwm_prop[2];
 
        if (!test_and_clear_bit(PWMF_EXPORTED, &pwm->flags))
                return -ENODEV;
@@ -297,6 +302,11 @@ static int pwm_unexport_child(struct device *parent, struct pwm_device *pwm)
        if (!child)
                return -ENODEV;
 
+       pwm_prop[0] = kasprintf(GFP_KERNEL, "UNEXPORT=pwm%u", pwm->hwpwm);
+       pwm_prop[1] = NULL;
+       kobject_uevent_env(&parent->kobj, KOBJ_CHANGE, pwm_prop);
+       kfree(pwm_prop[0]);
+
        /* for device_find_child() */
        put_device(child);
        device_unregister(child);
index e79f2a181ad24217a3e3bc232593184b82d494fd..b9ec4a16db1f6b6fd113c5661a28aa0e9153eeaa 100644 (file)
@@ -50,8 +50,10 @@ static int __init rtc_hctosys(void)
        tv64.tv_sec = rtc_tm_to_time64(&tm);
 
 #if BITS_PER_LONG == 32
-       if (tv64.tv_sec > INT_MAX)
+       if (tv64.tv_sec > INT_MAX) {
+               err = -ERANGE;
                goto err_read;
+       }
 #endif
 
        err = do_settimeofday64(&tv64);
index df0c5776d49bb6f5553dbbf2bc97b3235beae902..a5a19ff10535463d91d39d69ced1f13110ff139d 100644 (file)
@@ -257,6 +257,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
        struct cmos_rtc *cmos = dev_get_drvdata(dev);
        unsigned char   rtc_control;
 
+       /* This not only a rtc_op, but also called directly */
        if (!is_valid_irq(cmos->irq))
                return -EIO;
 
@@ -452,6 +453,7 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
        unsigned char mon, mday, hrs, min, sec, rtc_control;
        int ret;
 
+       /* This not only a rtc_op, but also called directly */
        if (!is_valid_irq(cmos->irq))
                return -EIO;
 
@@ -516,9 +518,6 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
        struct cmos_rtc *cmos = dev_get_drvdata(dev);
        unsigned long   flags;
 
-       if (!is_valid_irq(cmos->irq))
-               return -EINVAL;
-
        spin_lock_irqsave(&rtc_lock, flags);
 
        if (enabled)
@@ -579,6 +578,12 @@ static const struct rtc_class_ops cmos_rtc_ops = {
        .alarm_irq_enable       = cmos_alarm_irq_enable,
 };
 
+static const struct rtc_class_ops cmos_rtc_ops_no_alarm = {
+       .read_time              = cmos_read_time,
+       .set_time               = cmos_set_time,
+       .proc                   = cmos_procfs,
+};
+
 /*----------------------------------------------------------------*/
 
 /*
@@ -855,9 +860,12 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
                        dev_dbg(dev, "IRQ %d is already in use\n", rtc_irq);
                        goto cleanup1;
                }
+
+               cmos_rtc.rtc->ops = &cmos_rtc_ops;
+       } else {
+               cmos_rtc.rtc->ops = &cmos_rtc_ops_no_alarm;
        }
 
-       cmos_rtc.rtc->ops = &cmos_rtc_ops;
        cmos_rtc.rtc->nvram_old_abi = true;
        retval = rtc_register_device(cmos_rtc.rtc);
        if (retval)
index 2751dba850c614f452d4f297e8bdbb461e9ae15d..3e1abb4554721c496f2c16532e9ed12590946f1b 100644 (file)
@@ -213,7 +213,7 @@ static int hid_rtc_read_time(struct device *dev, struct rtc_time *tm)
        /* get a report with all values through requesting one value */
        sensor_hub_input_attr_get_raw_value(time_state->common_attributes.hsdev,
                        HID_USAGE_SENSOR_TIME, hid_time_addresses[0],
-                       time_state->info[0].report_id, SENSOR_HUB_SYNC);
+                       time_state->info[0].report_id, SENSOR_HUB_SYNC, false);
        /* wait for all values (event) */
        ret = wait_for_completion_killable_timeout(
                        &time_state->comp_last_time, HZ*6);
index 9f99a0966550b5e77672e83cc707eda26b22ede5..7cb786d76e3c1da81bf5055bf01375546a74e9ff 100644 (file)
@@ -303,6 +303,9 @@ static int pcf2127_i2c_gather_write(void *context,
        memcpy(buf + 1, val, val_size);
 
        ret = i2c_master_send(client, buf, val_size + 1);
+
+       kfree(buf);
+
        if (ret != val_size + 1)
                return ret < 0 ? ret : -EIO;
 
index fd77e46eb3b21520f2bf155612aed0248e773884..70a006ba4d050d2d3063f6653e610372e7d2d074 100644 (file)
@@ -387,8 +387,10 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
                 * orb specified one of the unsupported formats, we defer
                 * checking for IDAWs in unsupported formats to here.
                 */
-               if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
+               if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw)) {
+                       kfree(p);
                        return -EOPNOTSUPP;
+               }
 
                if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
                        break;
@@ -528,7 +530,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
 
        ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
        if (ret < 0)
-               goto out_init;
+               goto out_unpin;
 
        /* Translate this direct ccw to a idal ccw. */
        idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
index f47d16b5810b9154c7b8bd852039d1cdc89b33d3..a10cec0e86eb495ffd45f3854a09e1a76bf3e598 100644 (file)
@@ -22,7 +22,7 @@
 #include "vfio_ccw_private.h"
 
 struct workqueue_struct *vfio_ccw_work_q;
-struct kmem_cache *vfio_ccw_io_region;
+static struct kmem_cache *vfio_ccw_io_region;
 
 /*
  * Helpers
@@ -134,14 +134,14 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
        if (ret)
                goto out_free;
 
-       ret = vfio_ccw_mdev_reg(sch);
-       if (ret)
-               goto out_disable;
-
        INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
        atomic_set(&private->avail, 1);
        private->state = VFIO_CCW_STATE_STANDBY;
 
+       ret = vfio_ccw_mdev_reg(sch);
+       if (ret)
+               goto out_disable;
+
        return 0;
 
 out_disable:
index 048665e4f13d4695ed9c85c438e7f6b36bc3f7c9..9f5a201c4c87861f6b77c6081b03a8d58d571366 100644 (file)
@@ -775,6 +775,8 @@ static int ap_device_probe(struct device *dev)
                drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
                if (!!devres != !!drvres)
                        return -ENODEV;
+               /* (re-)init queue's state machine */
+               ap_queue_reinit_state(to_ap_queue(dev));
        }
 
        /* Add queue/card to list of active queues/cards */
@@ -807,6 +809,8 @@ static int ap_device_remove(struct device *dev)
        struct ap_device *ap_dev = to_ap_dev(dev);
        struct ap_driver *ap_drv = ap_dev->drv;
 
+       if (is_queue_dev(dev))
+               ap_queue_remove(to_ap_queue(dev));
        if (ap_drv->remove)
                ap_drv->remove(ap_dev);
 
@@ -1444,10 +1448,6 @@ static void ap_scan_bus(struct work_struct *unused)
                        aq->ap_dev.device.parent = &ac->ap_dev.device;
                        dev_set_name(&aq->ap_dev.device,
                                     "%02x.%04x", id, dom);
-                       /* Start with a device reset */
-                       spin_lock_bh(&aq->lock);
-                       ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
-                       spin_unlock_bh(&aq->lock);
                        /* Register device */
                        rc = device_register(&aq->ap_dev.device);
                        if (rc) {
index 3eed1b36c876d1fde221a38e676544f2c1c3679f..bfc66e4a9de14de0fd4d4da97dedfd53f605ddea 100644 (file)
@@ -254,6 +254,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
 void ap_queue_remove(struct ap_queue *aq);
 void ap_queue_suspend(struct ap_device *ap_dev);
 void ap_queue_resume(struct ap_device *ap_dev);
+void ap_queue_reinit_state(struct ap_queue *aq);
 
 struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
                               int comp_device_type, unsigned int functions);
index 66f7334bcb03214307fb4f3fecebf91dbb3ba341..0aa4b3ccc948c10cbbd9ac904d6da353e341fd78 100644 (file)
@@ -718,5 +718,20 @@ void ap_queue_remove(struct ap_queue *aq)
 {
        ap_flush_queue(aq);
        del_timer_sync(&aq->timeout);
+
+       /* reset with zero, also clears irq registration */
+       spin_lock_bh(&aq->lock);
+       ap_zapq(aq->qid);
+       aq->state = AP_STATE_BORKED;
+       spin_unlock_bh(&aq->lock);
 }
 EXPORT_SYMBOL(ap_queue_remove);
+
+void ap_queue_reinit_state(struct ap_queue *aq)
+{
+       spin_lock_bh(&aq->lock);
+       aq->state = AP_STATE_RESET_START;
+       ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+       spin_unlock_bh(&aq->lock);
+}
+EXPORT_SYMBOL(ap_queue_reinit_state);
index 146f54f5cbb817a8bb3f40a7c38add038484ebc0..c50f3e86cc7487377e09211264a4d4cc43ec4b61 100644 (file)
@@ -196,7 +196,6 @@ static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
        struct ap_queue *aq = to_ap_queue(&ap_dev->device);
        struct zcrypt_queue *zq = aq->private;
 
-       ap_queue_remove(aq);
        if (zq)
                zcrypt_queue_unregister(zq);
 }
index 546f6767673481e52ee13149af1df50545e2350e..35c7c6672713b70b33959b48322de5c0254c4a29 100644 (file)
@@ -251,7 +251,6 @@ static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
        struct ap_queue *aq = to_ap_queue(&ap_dev->device);
        struct zcrypt_queue *zq = aq->private;
 
-       ap_queue_remove(aq);
        if (zq)
                zcrypt_queue_unregister(zq);
 }
index f9d4c6c7521d72705977a6386e0746a556e1ea8b..582ffa7e0f18793f83739978fa7143dd44ec382d 100644 (file)
@@ -275,7 +275,6 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
        struct ap_queue *aq = to_ap_queue(&ap_dev->device);
        struct zcrypt_queue *zq = aq->private;
 
-       ap_queue_remove(aq);
        if (zq)
                zcrypt_queue_unregister(zq);
 }
index f96ec68af2e58aabfa0bdb71dde1f05ff8ad4151..dcbf5c857743782871d9be3dfe51f9fee3d83d91 100644 (file)
@@ -415,9 +415,9 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
                        break;
 
                clear_bit_inv(bit, bv);
+               ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
                barrier();
                smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
-               ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
        }
 
        if (ism->sba->e) {
index 6843bc7ee9f24525789e3bd5100ee8278fd3e413..04e294d1d16d7ea68f8c9aea0d2f56bc5cb6ec11 100644 (file)
@@ -87,6 +87,18 @@ struct qeth_dbf_info {
 #define SENSE_RESETTING_EVENT_BYTE 1
 #define SENSE_RESETTING_EVENT_FLAG 0x80
 
+static inline u32 qeth_get_device_id(struct ccw_device *cdev)
+{
+       struct ccw_dev_id dev_id;
+       u32 id;
+
+       ccw_device_get_id(cdev, &dev_id);
+       id = dev_id.devno;
+       id |= (u32) (dev_id.ssid << 16);
+
+       return id;
+}
+
 /*
  * Common IO related definitions
  */
@@ -97,7 +109,8 @@ struct qeth_dbf_info {
 #define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
 #define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
 #define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
-#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev)
+#define CCW_DEVID(cdev)                (qeth_get_device_id(cdev))
+#define CARD_DEVID(card)       (CCW_DEVID(CARD_RDEV(card)))
 
 /**
  * card stuff
@@ -830,6 +843,11 @@ struct qeth_trap_id {
 /*some helper functions*/
 #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
 
+static inline bool qeth_netdev_is_registered(struct net_device *dev)
+{
+       return dev->netdev_ops != NULL;
+}
+
 static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
                                          unsigned int elements)
 {
@@ -973,7 +991,7 @@ int qeth_wait_for_threads(struct qeth_card *, unsigned long);
 int qeth_do_run_thread(struct qeth_card *, unsigned long);
 void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
 void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
-int qeth_core_hardsetup_card(struct qeth_card *);
+int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
 void qeth_print_status_message(struct qeth_card *);
 int qeth_init_qdio_queues(struct qeth_card *);
 int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
@@ -1028,11 +1046,6 @@ int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
 int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
 void qeth_trace_features(struct qeth_card *);
 void qeth_close_dev(struct qeth_card *);
-int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
-                         long,
-                         int (*reply_cb)(struct qeth_card *,
-                                         struct qeth_reply *, unsigned long),
-                         void *);
 int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
                                                 enum qeth_ipa_funcs,
index 3274f13aad57612967cbcfd832a2349572a0b122..254065271867146bc91db048116371ac5d3b2867 100644 (file)
@@ -167,6 +167,8 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
                                return "OSD_1000";
                        case QETH_LINK_TYPE_10GBIT_ETH:
                                return "OSD_10GIG";
+                       case QETH_LINK_TYPE_25GBIT_ETH:
+                               return "OSD_25GIG";
                        case QETH_LINK_TYPE_LANE_ETH100:
                                return "OSD_FE_LANE";
                        case QETH_LINK_TYPE_LANE_TR:
@@ -554,8 +556,8 @@ static int __qeth_issue_next_read(struct qeth_card *card)
        if (!iob) {
                dev_warn(&card->gdev->dev, "The qeth device driver "
                        "failed to recover an error on the device\n");
-               QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob "
-                       "available\n", dev_name(&card->gdev->dev));
+               QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n",
+                                CARD_DEVID(card));
                return -ENOMEM;
        }
        qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
@@ -563,8 +565,8 @@ static int __qeth_issue_next_read(struct qeth_card *card)
        rc = ccw_device_start(channel->ccwdev, channel->ccw,
                              (addr_t) iob, 0, 0);
        if (rc) {
-               QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
-                       "rc=%i\n", dev_name(&card->gdev->dev), rc);
+               QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
+                                rc, CARD_DEVID(card));
                atomic_set(&channel->irq_pending, 0);
                card->read_or_write_problem = 1;
                qeth_schedule_recovery(card);
@@ -613,16 +615,14 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
        const char *ipa_name;
        int com = cmd->hdr.command;
        ipa_name = qeth_get_ipa_cmd_name(com);
+
        if (rc)
-               QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
-                               "x%X \"%s\"\n",
-                               ipa_name, com, dev_name(&card->gdev->dev),
-                               QETH_CARD_IFNAME(card), rc,
-                               qeth_get_ipa_msg(rc));
+               QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
+                                ipa_name, com, CARD_DEVID(card), rc,
+                                qeth_get_ipa_msg(rc));
        else
-               QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
-                               ipa_name, com, dev_name(&card->gdev->dev),
-                               QETH_CARD_IFNAME(card));
+               QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
+                                ipa_name, com, CARD_DEVID(card));
 }
 
 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
@@ -711,7 +711,7 @@ static int qeth_check_idx_response(struct qeth_card *card,
 
        QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
        if ((buffer[2] & 0xc0) == 0xc0) {
-               QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#02x\n",
+               QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
                                 buffer[4]);
                QETH_CARD_TEXT(card, 2, "ckidxres");
                QETH_CARD_TEXT(card, 2, " idxterm");
@@ -972,8 +972,8 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
                QETH_CARD_TEXT(card, 2, "CGENCHK");
                dev_warn(&cdev->dev, "The qeth device driver "
                        "failed to recover an error on the device\n");
-               QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
-                       dev_name(&cdev->dev), dstat, cstat);
+               QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
+                                CCW_DEVID(cdev), dstat, cstat);
                print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
                                16, 1, irb, 64, 1);
                return 1;
@@ -1013,8 +1013,8 @@ static long qeth_check_irb_error(struct qeth_card *card,
 
        switch (PTR_ERR(irb)) {
        case -EIO:
-               QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
-                       dev_name(&cdev->dev));
+               QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
+                                CCW_DEVID(cdev));
                QETH_CARD_TEXT(card, 2, "ckirberr");
                QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
                break;
@@ -1031,8 +1031,8 @@ static long qeth_check_irb_error(struct qeth_card *card,
                }
                break;
        default:
-               QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
-                       dev_name(&cdev->dev), PTR_ERR(irb));
+               QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
+                                PTR_ERR(irb), CCW_DEVID(cdev));
                QETH_CARD_TEXT(card, 2, "ckirberr");
                QETH_CARD_TEXT(card, 2, "  rc???");
        }
@@ -1114,9 +1114,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
                        dev_warn(&channel->ccwdev->dev,
                                "The qeth device driver failed to recover "
                                "an error on the device\n");
-                       QETH_DBF_MESSAGE(2, "%s sense data available. cstat "
-                               "0x%X dstat 0x%X\n",
-                               dev_name(&channel->ccwdev->dev), cstat, dstat);
+                       QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
+                                        CCW_DEVID(channel->ccwdev), cstat,
+                                        dstat);
                        print_hex_dump(KERN_WARNING, "qeth: irb ",
                                DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
                        print_hex_dump(KERN_WARNING, "qeth: sense data ",
@@ -1890,8 +1890,8 @@ static int qeth_idx_activate_channel(struct qeth_card *card,
        if (channel->state != CH_STATE_ACTIVATING) {
                dev_warn(&channel->ccwdev->dev, "The qeth device driver"
                        " failed to recover an error on the device\n");
-               QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
-                       dev_name(&channel->ccwdev->dev));
+               QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n",
+                                CCW_DEVID(channel->ccwdev));
                QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
                return -ETIME;
        }
@@ -1926,17 +1926,15 @@ static void qeth_idx_write_cb(struct qeth_card *card,
                                "The adapter is used exclusively by another "
                                "host\n");
                else
-                       QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:"
-                               " negative reply\n",
-                               dev_name(&channel->ccwdev->dev));
+                       QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
+                                        CCW_DEVID(channel->ccwdev));
                goto out;
        }
        memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
        if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
-               QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: "
-                       "function level mismatch (sent: 0x%x, received: "
-                       "0x%x)\n", dev_name(&channel->ccwdev->dev),
-                       card->info.func_level, temp);
+               QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+                                CCW_DEVID(channel->ccwdev),
+                                card->info.func_level, temp);
                goto out;
        }
        channel->state = CH_STATE_UP;
@@ -1973,9 +1971,8 @@ static void qeth_idx_read_cb(struct qeth_card *card,
                                "insufficient authorization\n");
                        break;
                default:
-                       QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
-                               " negative reply\n",
-                               dev_name(&channel->ccwdev->dev));
+                       QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
+                                        CCW_DEVID(channel->ccwdev));
                }
                QETH_CARD_TEXT_(card, 2, "idxread%c",
                        QETH_IDX_ACT_CAUSE_CODE(iob->data));
@@ -1984,10 +1981,9 @@ static void qeth_idx_read_cb(struct qeth_card *card,
 
        memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
        if (temp != qeth_peer_func_level(card->info.func_level)) {
-               QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
-                       "level mismatch (sent: 0x%x, received: 0x%x)\n",
-                       dev_name(&channel->ccwdev->dev),
-                       card->info.func_level, temp);
+               QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+                                CCW_DEVID(channel->ccwdev),
+                                card->info.func_level, temp);
                goto out;
        }
        memcpy(&card->token.issuer_rm_r,
@@ -2096,9 +2092,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
                                      (addr_t) iob, 0, 0, event_timeout);
        spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
        if (rc) {
-               QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
-                       "ccw_device_start rc = %i\n",
-                       dev_name(&channel->ccwdev->dev), rc);
+               QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
+                                CARD_DEVID(card), rc);
                QETH_CARD_TEXT_(card, 2, " err%d", rc);
                spin_lock_irq(&card->lock);
                list_del_init(&reply->list);
@@ -2853,8 +2848,8 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
        } else {
                dev_warn(&card->gdev->dev,
                         "The qeth driver ran out of channel command buffers\n");
-               QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
-                                dev_name(&card->gdev->dev));
+               QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers",
+                                CARD_DEVID(card));
        }
 
        return iob;
@@ -2989,10 +2984,9 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
                return 0;
        default:
                if (cmd->hdr.return_code) {
-                       QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
-                                               "rc=%d\n",
-                                               dev_name(&card->gdev->dev),
-                                               cmd->hdr.return_code);
+                       QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
+                                        CARD_DEVID(card),
+                                        cmd->hdr.return_code);
                        return 0;
                }
        }
@@ -3004,8 +2998,8 @@ static int qeth_query_ipassists_cb(struct qeth_card *card,
                card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
                card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
        } else
-               QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected"
-                                       "\n", dev_name(&card->gdev->dev));
+               QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
+                                CARD_DEVID(card));
        return 0;
 }
 
@@ -4297,10 +4291,9 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
                cmd->data.setadapterparms.hdr.return_code);
        if (cmd->data.setadapterparms.hdr.return_code !=
                                                SET_ACCESS_CTRL_RC_SUCCESS)
-               QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
-                               card->gdev->dev.kobj.name,
-                               access_ctrl_req->subcmd_code,
-                               cmd->data.setadapterparms.hdr.return_code);
+               QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
+                                access_ctrl_req->subcmd_code, CARD_DEVID(card),
+                                cmd->data.setadapterparms.hdr.return_code);
        switch (cmd->data.setadapterparms.hdr.return_code) {
        case SET_ACCESS_CTRL_RC_SUCCESS:
                if (card->options.isolation == ISOLATION_MODE_NONE) {
@@ -4312,14 +4305,14 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
                }
                break;
        case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
-               QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already "
-                               "deactivated\n", dev_name(&card->gdev->dev));
+               QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
+                                CARD_DEVID(card));
                if (fallback)
                        card->options.isolation = card->options.prev_isolation;
                break;
        case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
-               QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already"
-                               " activated\n", dev_name(&card->gdev->dev));
+               QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
+                                CARD_DEVID(card));
                if (fallback)
                        card->options.isolation = card->options.prev_isolation;
                break;
@@ -4405,10 +4398,8 @@ int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
                rc = qeth_setadpparms_set_access_ctrl(card,
                        card->options.isolation, fallback);
                if (rc) {
-                       QETH_DBF_MESSAGE(3,
-                               "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
-                               card->gdev->dev.kobj.name,
-                               rc);
+                       QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
+                                        rc, CARD_DEVID(card));
                        rc = -EOPNOTSUPP;
                }
        } else if (card->options.isolation != ISOLATION_MODE_NONE) {
@@ -4443,7 +4434,8 @@ static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
                rc = BMCR_FULLDPLX;
                if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
                    (card->info.link_type != QETH_LINK_TYPE_OSN) &&
-                   (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
+                   (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
+                   (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
                        rc |= BMCR_SPEED100;
                break;
        case MII_BMSR: /* Basic mode status register */
@@ -4526,8 +4518,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
 {
        struct qeth_ipa_cmd *cmd;
        struct qeth_arp_query_info *qinfo;
-       struct qeth_snmp_cmd *snmp;
        unsigned char *data;
+       void *snmp_data;
        __u16 data_len;
 
        QETH_CARD_TEXT(card, 3, "snpcmdcb");
@@ -4535,7 +4527,6 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
        cmd = (struct qeth_ipa_cmd *) sdata;
        data = (unsigned char *)((char *)cmd - reply->offset);
        qinfo = (struct qeth_arp_query_info *) reply->param;
-       snmp = &cmd->data.setadapterparms.data.snmp;
 
        if (cmd->hdr.return_code) {
                QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
@@ -4548,10 +4539,15 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
                return 0;
        }
        data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
-       if (cmd->data.setadapterparms.hdr.seq_no == 1)
-               data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
-       else
-               data_len -= (__u16)((char *)&snmp->request - (char *)cmd);
+       if (cmd->data.setadapterparms.hdr.seq_no == 1) {
+               snmp_data = &cmd->data.setadapterparms.data.snmp;
+               data_len -= offsetof(struct qeth_ipa_cmd,
+                                    data.setadapterparms.data.snmp);
+       } else {
+               snmp_data = &cmd->data.setadapterparms.data.snmp.request;
+               data_len -= offsetof(struct qeth_ipa_cmd,
+                                    data.setadapterparms.data.snmp.request);
+       }
 
        /* check if there is enough room in userspace */
        if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
@@ -4564,16 +4560,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
        QETH_CARD_TEXT_(card, 4, "sseqn%i",
                cmd->data.setadapterparms.hdr.seq_no);
        /*copy entries to user buffer*/
-       if (cmd->data.setadapterparms.hdr.seq_no == 1) {
-               memcpy(qinfo->udata + qinfo->udata_offset,
-                      (char *)snmp,
-                      data_len + offsetof(struct qeth_snmp_cmd, data));
-               qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
-       } else {
-               memcpy(qinfo->udata + qinfo->udata_offset,
-                      (char *)&snmp->request, data_len);
-       }
+       memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
        qinfo->udata_offset += data_len;
+
        /* check if all replies received ... */
                QETH_CARD_TEXT_(card, 4, "srtot%i",
                               cmd->data.setadapterparms.hdr.used_total);
@@ -4634,8 +4623,8 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
        rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
                                    qeth_snmp_command_cb, (void *)&qinfo);
        if (rc)
-               QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n",
-                          QETH_CARD_IFNAME(card), rc);
+               QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
+                                CARD_DEVID(card), rc);
        else {
                if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
                        rc = -EFAULT;
@@ -4869,8 +4858,8 @@ static void qeth_determine_capabilities(struct qeth_card *card)
 
        rc = qeth_read_conf_data(card, (void **) &prcd, &length);
        if (rc) {
-               QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
-                       dev_name(&card->gdev->dev), rc);
+               QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
+                                CARD_DEVID(card), rc);
                QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
                goto out_offline;
        }
@@ -5086,7 +5075,7 @@ static struct ccw_driver qeth_ccw_driver = {
        .remove = ccwgroup_remove_ccwdev,
 };
 
-int qeth_core_hardsetup_card(struct qeth_card *card)
+int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
 {
        int retries = 3;
        int rc;
@@ -5096,8 +5085,8 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
        qeth_update_from_chp_desc(card);
 retry:
        if (retries < 3)
-               QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
-                       dev_name(&card->gdev->dev));
+               QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
+                                CARD_DEVID(card));
        rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
        ccw_device_set_offline(CARD_DDEV(card));
        ccw_device_set_offline(CARD_WDEV(card));
@@ -5161,13 +5150,20 @@ retriable:
                if (rc == IPA_RC_LAN_OFFLINE) {
                        dev_warn(&card->gdev->dev,
                                "The LAN is offline\n");
-                       netif_carrier_off(card->dev);
+                       *carrier_ok = false;
                } else {
                        rc = -ENODEV;
                        goto out;
                }
        } else {
-               netif_carrier_on(card->dev);
+               *carrier_ok = true;
+       }
+
+       if (qeth_netdev_is_registered(card->dev)) {
+               if (*carrier_ok)
+                       netif_carrier_on(card->dev);
+               else
+                       netif_carrier_off(card->dev);
        }
 
        card->options.ipa4.supported_funcs = 0;
@@ -5201,8 +5197,8 @@ retriable:
 out:
        dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
                "an error on the device\n");
-       QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n",
-               dev_name(&card->gdev->dev), rc);
+       QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
+                        CARD_DEVID(card), rc);
        return rc;
 }
 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
@@ -5481,11 +5477,12 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
 }
 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
 
-int qeth_send_setassparms(struct qeth_card *card,
-                         struct qeth_cmd_buffer *iob, __u16 len, long data,
-                         int (*reply_cb)(struct qeth_card *,
-                                         struct qeth_reply *, unsigned long),
-                         void *reply_param)
+static int qeth_send_setassparms(struct qeth_card *card,
+                                struct qeth_cmd_buffer *iob, u16 len,
+                                long data, int (*reply_cb)(struct qeth_card *,
+                                                           struct qeth_reply *,
+                                                           unsigned long),
+                                void *reply_param)
 {
        int rc;
        struct qeth_ipa_cmd *cmd;
@@ -5501,7 +5498,6 @@ int qeth_send_setassparms(struct qeth_card *card,
        rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
        return rc;
 }
-EXPORT_SYMBOL_GPL(qeth_send_setassparms);
 
 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
                                      enum qeth_ipa_funcs ipa_func,
@@ -6170,8 +6166,14 @@ static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd,
                WARN_ON_ONCE(1);
        }
 
-       /* fallthrough from high to low, to select all legal speeds: */
+       /* partially does fall through, to also select lower speeds */
        switch (maxspeed) {
+       case SPEED_25000:
+               ethtool_link_ksettings_add_link_mode(cmd, supported,
+                                                    25000baseSR_Full);
+               ethtool_link_ksettings_add_link_mode(cmd, advertising,
+                                                    25000baseSR_Full);
+               break;
        case SPEED_10000:
                ethtool_link_ksettings_add_link_mode(cmd, supported,
                                                     10000baseT_Full);
@@ -6254,6 +6256,10 @@ int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
                cmd->base.speed = SPEED_10000;
                cmd->base.port = PORT_FIBRE;
                break;
+       case QETH_LINK_TYPE_25GBIT_ETH:
+               cmd->base.speed = SPEED_25000;
+               cmd->base.port = PORT_FIBRE;
+               break;
        default:
                cmd->base.speed = SPEED_10;
                cmd->base.port = PORT_TP;
@@ -6320,6 +6326,9 @@ int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev,
        case CARD_INFO_PORTS_10G:
                cmd->base.speed = SPEED_10000;
                break;
+       case CARD_INFO_PORTS_25G:
+               cmd->base.speed = SPEED_25000;
+               break;
        }
 
        return 0;
index e85090467afe0a9e05b6d9b00355713ce53b2ef6..3e54be201b279f07b09481fd9c1c746802496c1e 100644 (file)
@@ -90,6 +90,7 @@ enum qeth_link_types {
        QETH_LINK_TYPE_GBIT_ETH     = 0x03,
        QETH_LINK_TYPE_OSN          = 0x04,
        QETH_LINK_TYPE_10GBIT_ETH   = 0x10,
+       QETH_LINK_TYPE_25GBIT_ETH   = 0x12,
        QETH_LINK_TYPE_LANE_ETH100  = 0x81,
        QETH_LINK_TYPE_LANE_TR      = 0x82,
        QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
@@ -347,6 +348,7 @@ enum qeth_card_info_port_speed {
        CARD_INFO_PORTS_100M            = 0x00000006,
        CARD_INFO_PORTS_1G              = 0x00000007,
        CARD_INFO_PORTS_10G             = 0x00000008,
+       CARD_INFO_PORTS_25G             = 0x0000000A,
 };
 
 /* (SET)DELIP(M) IPA stuff ***************************************************/
@@ -436,7 +438,7 @@ struct qeth_ipacmd_setassparms {
                __u32 flags_32bit;
                struct qeth_ipa_caps caps;
                struct qeth_checksum_cmd chksum;
-               struct qeth_arp_cache_entry add_arp_entry;
+               struct qeth_arp_cache_entry arp_entry;
                struct qeth_arp_query_data query_arp;
                struct qeth_tso_start_data tso;
                __u8 ip[16];
index 23aaf373f631e2283e7c84ddc43f913d876f7058..2914a1a69f8300a36c1bf0580094532cb9ccecd4 100644 (file)
@@ -146,11 +146,11 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
        QETH_CARD_TEXT(card, 2, "L2Wmac");
        rc = qeth_l2_send_setdelmac(card, mac, cmd);
        if (rc == -EEXIST)
-               QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n",
-                                mac, QETH_CARD_IFNAME(card));
+               QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n",
+                                CARD_DEVID(card));
        else if (rc)
-               QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n",
-                                mac, QETH_CARD_IFNAME(card), rc);
+               QETH_DBF_MESSAGE(2, "Failed to register MAC on device %x: %d\n",
+                                CARD_DEVID(card), rc);
        return rc;
 }
 
@@ -163,8 +163,8 @@ static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
        QETH_CARD_TEXT(card, 2, "L2Rmac");
        rc = qeth_l2_send_setdelmac(card, mac, cmd);
        if (rc)
-               QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n",
-                                mac, QETH_CARD_IFNAME(card), rc);
+               QETH_DBF_MESSAGE(2, "Failed to delete MAC on device %u: %d\n",
+                                CARD_DEVID(card), rc);
        return rc;
 }
 
@@ -260,9 +260,9 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card,
 
        QETH_CARD_TEXT(card, 2, "L2sdvcb");
        if (cmd->hdr.return_code) {
-               QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x.\n",
+               QETH_DBF_MESSAGE(2, "Error in processing VLAN %u on device %x: %#x.\n",
                                 cmd->data.setdelvlan.vlan_id,
-                                QETH_CARD_IFNAME(card), cmd->hdr.return_code);
+                                CARD_DEVID(card), cmd->hdr.return_code);
                QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
                QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
        }
@@ -455,8 +455,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
                rc = qeth_vm_request_mac(card);
                if (!rc)
                        goto out;
-               QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %s: x%x\n",
-                                CARD_BUS_ID(card), rc);
+               QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n",
+                                CARD_DEVID(card), rc);
                QETH_DBF_TEXT_(SETUP, 2, "err%04x", rc);
                /* fall back to alternative mechanism: */
        }
@@ -468,8 +468,8 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
                rc = qeth_setadpparms_change_macaddr(card);
                if (!rc)
                        goto out;
-               QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %s: x%x\n",
-                                CARD_BUS_ID(card), rc);
+               QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
+                                CARD_DEVID(card), rc);
                QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
                /* fall back once more: */
        }
@@ -826,7 +826,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
 
        if (cgdev->state == CCWGROUP_ONLINE)
                qeth_l2_set_offline(cgdev);
-       unregister_netdev(card->dev);
+       if (qeth_netdev_is_registered(card->dev))
+               unregister_netdev(card->dev);
 }
 
 static const struct ethtool_ops qeth_l2_ethtool_ops = {
@@ -862,11 +863,11 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
        .ndo_set_features       = qeth_set_features
 };
 
-static int qeth_l2_setup_netdev(struct qeth_card *card)
+static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
 {
        int rc;
 
-       if (card->dev->netdev_ops)
+       if (qeth_netdev_is_registered(card->dev))
                return 0;
 
        card->dev->priv_flags |= IFF_UNICAST_FLT;
@@ -919,6 +920,9 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
        qeth_l2_request_initial_mac(card);
        netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
        rc = register_netdev(card->dev);
+       if (!rc && carrier_ok)
+               netif_carrier_on(card->dev);
+
        if (rc)
                card->dev->netdev_ops = NULL;
        return rc;
@@ -949,6 +953,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
        int rc = 0;
        enum qeth_card_states recover_flag;
+       bool carrier_ok;
 
        mutex_lock(&card->discipline_mutex);
        mutex_lock(&card->conf_mutex);
@@ -956,7 +961,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
 
        recover_flag = card->state;
-       rc = qeth_core_hardsetup_card(card);
+       rc = qeth_core_hardsetup_card(card, &carrier_ok);
        if (rc) {
                QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
                rc = -ENODEV;
@@ -967,7 +972,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                dev_info(&card->gdev->dev,
                "The device represents a Bridge Capable Port\n");
 
-       rc = qeth_l2_setup_netdev(card);
+       rc = qeth_l2_setup_netdev(card, carrier_ok);
        if (rc)
                goto out_remove;
 
index 0b161cc1fd2e62f2251be71e939813ed58b5529c..f08b745c20073b92bd2a78da983ff2a9ade3ca77 100644 (file)
@@ -278,9 +278,6 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover)
 
        QETH_CARD_TEXT(card, 4, "clearip");
 
-       if (recover && card->options.sniffer)
-               return;
-
        spin_lock_bh(&card->ip_lock);
 
        hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
@@ -494,9 +491,8 @@ int qeth_l3_setrouting_v4(struct qeth_card *card)
                                  QETH_PROT_IPV4);
        if (rc) {
                card->options.route4.type = NO_ROUTER;
-               QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
-                       " on %s. Type set to 'no router'.\n", rc,
-                       QETH_CARD_IFNAME(card));
+               QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
+                                rc, CARD_DEVID(card));
        }
        return rc;
 }
@@ -518,9 +514,8 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
                                  QETH_PROT_IPV6);
        if (rc) {
                card->options.route6.type = NO_ROUTER;
-               QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
-                       " on %s. Type set to 'no router'.\n", rc,
-                       QETH_CARD_IFNAME(card));
+               QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
+                                rc, CARD_DEVID(card));
        }
        return rc;
 }
@@ -663,6 +658,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card,
        int rc = 0;
        int cnt = 3;
 
+       if (card->options.sniffer)
+               return 0;
 
        if (addr->proto == QETH_PROT_IPV4) {
                QETH_CARD_TEXT(card, 2, "setaddr4");
@@ -697,6 +694,9 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card,
 {
        int rc = 0;
 
+       if (card->options.sniffer)
+               return 0;
+
        if (addr->proto == QETH_PROT_IPV4) {
                QETH_CARD_TEXT(card, 2, "deladdr4");
                QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
@@ -1070,8 +1070,8 @@ qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
                }
                break;
        default:
-               QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
-                       cmd->data.diagass.action, QETH_CARD_IFNAME(card));
+               QETH_DBF_MESSAGE(2, "Unknown sniffer action (%#06x) on device %x\n",
+                                cmd->data.diagass.action, CARD_DEVID(card));
        }
 
        return 0;
@@ -1517,32 +1517,25 @@ static void qeth_l3_set_rx_mode(struct net_device *dev)
        qeth_l3_handle_promisc_mode(card);
 }
 
-static const char *qeth_l3_arp_get_error_cause(int *rc)
+static int qeth_l3_arp_makerc(int rc)
 {
-       switch (*rc) {
-       case QETH_IPA_ARP_RC_FAILED:
-               *rc = -EIO;
-               return "operation failed";
+       switch (rc) {
+       case IPA_RC_SUCCESS:
+               return 0;
        case QETH_IPA_ARP_RC_NOTSUPP:
-               *rc = -EOPNOTSUPP;
-               return "operation not supported";
-       case QETH_IPA_ARP_RC_OUT_OF_RANGE:
-               *rc = -EINVAL;
-               return "argument out of range";
        case QETH_IPA_ARP_RC_Q_NOTSUPP:
-               *rc = -EOPNOTSUPP;
-               return "query operation not supported";
+               return -EOPNOTSUPP;
+       case QETH_IPA_ARP_RC_OUT_OF_RANGE:
+               return -EINVAL;
        case QETH_IPA_ARP_RC_Q_NO_DATA:
-               *rc = -ENOENT;
-               return "no query data available";
+               return -ENOENT;
        default:
-               return "unknown error";
+               return -EIO;
        }
 }
 
 static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
 {
-       int tmp;
        int rc;
 
        QETH_CARD_TEXT(card, 3, "arpstnoe");
@@ -1560,13 +1553,10 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
        rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
                                          IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
                                          no_entries);
-       if (rc) {
-               tmp = rc;
-               QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on "
-                       "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
-                       qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
-       }
-       return rc;
+       if (rc)
+               QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n",
+                                CARD_DEVID(card), rc);
+       return qeth_l3_arp_makerc(rc);
 }
 
 static __u32 get_arp_entry_size(struct qeth_card *card,
@@ -1716,7 +1706,6 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
 {
        struct qeth_cmd_buffer *iob;
        struct qeth_ipa_cmd *cmd;
-       int tmp;
        int rc;
 
        QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
@@ -1735,15 +1724,10 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
        rc = qeth_l3_send_ipa_arp_cmd(card, iob,
                           QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
                           qeth_l3_arp_query_cb, (void *)qinfo);
-       if (rc) {
-               tmp = rc;
-               QETH_DBF_MESSAGE(2,
-                       "Error while querying ARP cache on %s: %s "
-                       "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
-                       qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
-       }
-
-       return rc;
+       if (rc)
+               QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n",
+                                CARD_DEVID(card), rc);
+       return qeth_l3_arp_makerc(rc);
 }
 
 static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
@@ -1793,15 +1777,18 @@ out:
        return rc;
 }
 
-static int qeth_l3_arp_add_entry(struct qeth_card *card,
-                               struct qeth_arp_cache_entry *entry)
+static int qeth_l3_arp_modify_entry(struct qeth_card *card,
+                                   struct qeth_arp_cache_entry *entry,
+                                   enum qeth_arp_process_subcmds arp_cmd)
 {
+       struct qeth_arp_cache_entry *cmd_entry;
        struct qeth_cmd_buffer *iob;
-       char buf[16];
-       int tmp;
        int rc;
 
-       QETH_CARD_TEXT(card, 3, "arpadent");
+       if (arp_cmd == IPA_CMD_ASS_ARP_ADD_ENTRY)
+               QETH_CARD_TEXT(card, 3, "arpadd");
+       else
+               QETH_CARD_TEXT(card, 3, "arpdel");
 
        /*
         * currently GuestLAN only supports the ARP assist function
@@ -1814,71 +1801,25 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
                return -EOPNOTSUPP;
        }
 
-       iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
-                                      IPA_CMD_ASS_ARP_ADD_ENTRY,
-                                      sizeof(struct qeth_arp_cache_entry),
-                                      QETH_PROT_IPV4);
+       iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd,
+                                      sizeof(*cmd_entry), QETH_PROT_IPV4);
        if (!iob)
                return -ENOMEM;
-       rc = qeth_send_setassparms(card, iob,
-                                  sizeof(struct qeth_arp_cache_entry),
-                                  (unsigned long) entry,
-                                  qeth_setassparms_cb, NULL);
-       if (rc) {
-               tmp = rc;
-               qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
-               QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s "
-                       "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
-                       qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
-       }
-       return rc;
-}
-
-static int qeth_l3_arp_remove_entry(struct qeth_card *card,
-                               struct qeth_arp_cache_entry *entry)
-{
-       struct qeth_cmd_buffer *iob;
-       char buf[16] = {0, };
-       int tmp;
-       int rc;
 
-       QETH_CARD_TEXT(card, 3, "arprment");
+       cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry;
+       ether_addr_copy(cmd_entry->macaddr, entry->macaddr);
+       memcpy(cmd_entry->ipaddr, entry->ipaddr, 4);
+       rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
+       if (rc)
+               QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n",
+                                arp_cmd, CARD_DEVID(card), rc);
 
-       /*
-        * currently GuestLAN only supports the ARP assist function
-        * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
-        * thus we say EOPNOTSUPP for this ARP function
-        */
-       if (card->info.guestlan)
-               return -EOPNOTSUPP;
-       if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
-               return -EOPNOTSUPP;
-       }
-       memcpy(buf, entry, 12);
-       iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
-                                      IPA_CMD_ASS_ARP_REMOVE_ENTRY,
-                                      12,
-                                      QETH_PROT_IPV4);
-       if (!iob)
-               return -ENOMEM;
-       rc = qeth_send_setassparms(card, iob,
-                                  12, (unsigned long)buf,
-                                  qeth_setassparms_cb, NULL);
-       if (rc) {
-               tmp = rc;
-               memset(buf, 0, 16);
-               qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
-               QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s"
-                       " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
-                       qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
-       }
-       return rc;
+       return qeth_l3_arp_makerc(rc);
 }
 
 static int qeth_l3_arp_flush_cache(struct qeth_card *card)
 {
        int rc;
-       int tmp;
 
        QETH_CARD_TEXT(card, 3, "arpflush");
 
@@ -1894,19 +1835,17 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
        }
        rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
                                          IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
-       if (rc) {
-               tmp = rc;
-               QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s "
-                       "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
-                       qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
-       }
-       return rc;
+       if (rc)
+               QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n",
+                                CARD_DEVID(card), rc);
+       return qeth_l3_arp_makerc(rc);
 }
 
 static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
        struct qeth_card *card = dev->ml_priv;
        struct qeth_arp_cache_entry arp_entry;
+       enum qeth_arp_process_subcmds arp_cmd;
        int rc = 0;
 
        switch (cmd) {
@@ -1925,27 +1864,16 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
                rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
                break;
        case SIOC_QETH_ARP_ADD_ENTRY:
-               if (!capable(CAP_NET_ADMIN)) {
-                       rc = -EPERM;
-                       break;
-               }
-               if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
-                                  sizeof(struct qeth_arp_cache_entry)))
-                       rc = -EFAULT;
-               else
-                       rc = qeth_l3_arp_add_entry(card, &arp_entry);
-               break;
        case SIOC_QETH_ARP_REMOVE_ENTRY:
-               if (!capable(CAP_NET_ADMIN)) {
-                       rc = -EPERM;
-                       break;
-               }
-               if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
-                                  sizeof(struct qeth_arp_cache_entry)))
-                       rc = -EFAULT;
-               else
-                       rc = qeth_l3_arp_remove_entry(card, &arp_entry);
-               break;
+               if (!capable(CAP_NET_ADMIN))
+                       return -EPERM;
+               if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry)))
+                       return -EFAULT;
+
+               arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ?
+                               IPA_CMD_ASS_ARP_ADD_ENTRY :
+                               IPA_CMD_ASS_ARP_REMOVE_ENTRY;
+               return qeth_l3_arp_modify_entry(card, &arp_entry, arp_cmd);
        case SIOC_QETH_ARP_FLUSH_CACHE:
                if (!capable(CAP_NET_ADMIN)) {
                        rc = -EPERM;
@@ -2383,12 +2311,12 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
        .ndo_neigh_setup        = qeth_l3_neigh_setup,
 };
 
-static int qeth_l3_setup_netdev(struct qeth_card *card)
+static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
 {
        unsigned int headroom;
        int rc;
 
-       if (card->dev->netdev_ops)
+       if (qeth_netdev_is_registered(card->dev))
                return 0;
 
        if (card->info.type == QETH_CARD_TYPE_OSD ||
@@ -2457,6 +2385,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
 
        netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
        rc = register_netdev(card->dev);
+       if (!rc && carrier_ok)
+               netif_carrier_on(card->dev);
+
 out:
        if (rc)
                card->dev->netdev_ops = NULL;
@@ -2497,7 +2428,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
        if (cgdev->state == CCWGROUP_ONLINE)
                qeth_l3_set_offline(cgdev);
 
-       unregister_netdev(card->dev);
+       if (qeth_netdev_is_registered(card->dev))
+               unregister_netdev(card->dev);
        qeth_l3_clear_ip_htable(card, 0);
        qeth_l3_clear_ipato_list(card);
 }
@@ -2507,6 +2439,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
        int rc = 0;
        enum qeth_card_states recover_flag;
+       bool carrier_ok;
 
        mutex_lock(&card->discipline_mutex);
        mutex_lock(&card->conf_mutex);
@@ -2514,14 +2447,14 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
 
        recover_flag = card->state;
-       rc = qeth_core_hardsetup_card(card);
+       rc = qeth_core_hardsetup_card(card, &carrier_ok);
        if (rc) {
                QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
                rc = -ENODEV;
                goto out_remove;
        }
 
-       rc = qeth_l3_setup_netdev(card);
+       rc = qeth_l3_setup_netdev(card, carrier_ok);
        if (rc)
                goto out_remove;
 
index 97b6f197f0079b949e4947df00efaed4dbfd76fe..c9c57b4a0b71850a75cebbcf1a664293d7d5156a 100644 (file)
@@ -56,6 +56,7 @@ struct virtio_ccw_device {
        unsigned int revision; /* Transport revision */
        wait_queue_head_t wait_q;
        spinlock_t lock;
+       struct mutex io_lock; /* Serializes I/O requests */
        struct list_head virtqueues;
        unsigned long indicators;
        unsigned long indicators2;
@@ -296,6 +297,7 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
        unsigned long flags;
        int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
 
+       mutex_lock(&vcdev->io_lock);
        do {
                spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
                ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
@@ -308,7 +310,9 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
                cpu_relax();
        } while (ret == -EBUSY);
        wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
-       return ret ? ret : vcdev->err;
+       ret = ret ? ret : vcdev->err;
+       mutex_unlock(&vcdev->io_lock);
+       return ret;
 }
 
 static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
@@ -828,6 +832,7 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
        int ret;
        struct ccw1 *ccw;
        void *config_area;
+       unsigned long flags;
 
        ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
        if (!ccw)
@@ -846,11 +851,13 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
        if (ret)
                goto out_free;
 
+       spin_lock_irqsave(&vcdev->lock, flags);
        memcpy(vcdev->config, config_area, offset + len);
-       if (buf)
-               memcpy(buf, &vcdev->config[offset], len);
        if (vcdev->config_ready < offset + len)
                vcdev->config_ready = offset + len;
+       spin_unlock_irqrestore(&vcdev->lock, flags);
+       if (buf)
+               memcpy(buf, config_area + offset, len);
 
 out_free:
        kfree(config_area);
@@ -864,6 +871,7 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
        struct virtio_ccw_device *vcdev = to_vc_device(vdev);
        struct ccw1 *ccw;
        void *config_area;
+       unsigned long flags;
 
        ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
        if (!ccw)
@@ -876,9 +884,11 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
        /* Make sure we don't overwrite fields. */
        if (vcdev->config_ready < offset)
                virtio_ccw_get_config(vdev, 0, NULL, offset);
+       spin_lock_irqsave(&vcdev->lock, flags);
        memcpy(&vcdev->config[offset], buf, len);
        /* Write the config area to the host. */
        memcpy(config_area, vcdev->config, sizeof(vcdev->config));
+       spin_unlock_irqrestore(&vcdev->lock, flags);
        ccw->cmd_code = CCW_CMD_WRITE_CONF;
        ccw->flags = 0;
        ccw->count = offset + len;
@@ -1247,6 +1257,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
        init_waitqueue_head(&vcdev->wait_q);
        INIT_LIST_HEAD(&vcdev->virtqueues);
        spin_lock_init(&vcdev->lock);
+       mutex_init(&vcdev->io_lock);
 
        spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
        dev_set_drvdata(&cdev->dev, vcdev);
index 5c8ed7350a04a4f65be2eefa103637f2943e5a37..a36e4cf1841d9da7fd22cb5f6491d134b9b7f96f 100644 (file)
@@ -220,6 +220,7 @@ static int d7s_probe(struct platform_device *op)
        dev_set_drvdata(&op->dev, p);
        d7s_device = p;
        err = 0;
+       of_node_put(opts);
 
 out:
        return err;
index 56e962a014939e31c7ad74687263ecd69558ef7b..b8481927bfe4048b4147e01c77c0f7f695ab90bb 100644 (file)
@@ -910,8 +910,10 @@ static void envctrl_init_i2c_child(struct device_node *dp,
                        for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) {
                                pchild->mon_type[len] = ENVCTRL_NOMON;
                        }
+                       of_node_put(root_node);
                        return;
                }
+               of_node_put(root_node);
        }
 
        /* Get the monitor channels. */
index 05293babb03106ebc2e7bb231c57967b77050823..2d655a97b959e93523f3fdd9ad12f1ef1aea60cd 100644 (file)
@@ -143,7 +143,9 @@ static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int secon
 static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
 static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
 static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
-static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
+static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+                                  unsigned char *cdb, int use_sg,
+                                  TW_SG_Entry *sglistarg);
 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
 
@@ -278,7 +280,7 @@ out:
 static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
 {
        int request_id = 0;
-       char cdb[TW_MAX_CDB_LEN];
+       unsigned char cdb[TW_MAX_CDB_LEN];
        TW_SG_Entry sglist[1];
        int finished = 0, count = 0;
        TW_Command_Full *full_command_packet;
@@ -423,7 +425,7 @@ static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_H
 /* This function will read the aen queue from the isr */
 static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
 {
-       char cdb[TW_MAX_CDB_LEN];
+       unsigned char cdb[TW_MAX_CDB_LEN];
        TW_SG_Entry sglist[1];
        TW_Command_Full *full_command_packet;
        int retval = 1;
@@ -1798,7 +1800,9 @@ out:
 static DEF_SCSI_QCMD(twa_scsi_queue)
 
 /* This function hands scsi cdb's to the firmware */
-static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
+static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+                                  unsigned char *cdb, int use_sg,
+                                  TW_SG_Entry *sglistarg)
 {
        TW_Command_Full *full_command_packet;
        TW_Command_Apache *command_packet;
index 266bdac7530427ea3914705446b58facf0a01014..480cf82700e9f48a225baeda0007f2e6d5f84ece 100644 (file)
@@ -287,7 +287,9 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
 } /* End twl_post_command_packet() */
 
 /* This function hands scsi cdb's to the firmware */
-static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
+static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
+                                  unsigned char *cdb, int use_sg,
+                                  TW_SG_Entry_ISO *sglistarg)
 {
        TW_Command_Full *full_command_packet;
        TW_Command_Apache *command_packet;
@@ -372,7 +374,7 @@ out:
 /* This function will read the aen queue from the isr */
 static int twl_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
 {
-       char cdb[TW_MAX_CDB_LEN];
+       unsigned char cdb[TW_MAX_CDB_LEN];
        TW_SG_Entry_ISO sglist[1];
        TW_Command_Full *full_command_packet;
        int retval = 1;
@@ -554,7 +556,7 @@ out:
 static int twl_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
 {
        int request_id = 0;
-       char cdb[TW_MAX_CDB_LEN];
+       unsigned char cdb[TW_MAX_CDB_LEN];
        TW_SG_Entry_ISO sglist[1];
        int finished = 0, count = 0;
        TW_Command_Full *full_command_packet;
index 70988c3812684cc8d1d573e3d75bdc30ef5d1a3f..640cd1b31a18d2bfc164e50adaf819ce27d6d754 100644 (file)
@@ -538,7 +538,7 @@ config SCSI_HPTIOP
 
 config SCSI_BUSLOGIC
        tristate "BusLogic SCSI support"
-       depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API && VIRT_TO_BUS
+       depends on (PCI || ISA) && SCSI && ISA_DMA_API && VIRT_TO_BUS
        ---help---
          This is support for BusLogic MultiMaster and FlashPoint SCSI Host
          Adapters. Consult the SCSI-HOWTO, available from
@@ -578,6 +578,7 @@ config SCSI_MYRB
 config SCSI_MYRS
        tristate "Mylex DAC960/DAC1100 PCI RAID Controller (SCSI Interface)"
        depends on PCI
+       depends on !CPU_BIG_ENDIAN || COMPILE_TEST
        select RAID_ATTRS
        help
          This driver adds support for the Mylex DAC960, AcceleRAID, and
@@ -1175,12 +1176,12 @@ config SCSI_LPFC_DEBUG_FS
 
 config SCSI_SIM710
        tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
-       depends on (EISA || MCA) && SCSI
+       depends on EISA && SCSI
        select SCSI_SPI_ATTRS
        ---help---
          This driver is for NCR53c710 based SCSI host adapters.
 
-         It currently supports Compaq EISA cards and NCR MCA cards
+         It currently supports Compaq EISA cards.
 
 config SCSI_DC395x
        tristate "Tekram DC395(U/UW/F) and DC315(U) SCSI support"
index 8429c855701fca200b56df1453a4fc5188a9a38e..01c23d27f290b114c3f9e0b20b713edd30e2d884 100644 (file)
@@ -1198,7 +1198,7 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
 
 out:
        if (!hostdata->selecting)
-               return NULL;
+               return false;
        hostdata->selecting = NULL;
        return ret;
 }
index 4d7b0e0adbf70c735f0535f911dee1ef9cc0fe73..301b3cad15f88f6d4c9022d3e763a625a35172ff 100644 (file)
@@ -269,7 +269,7 @@ static LIST_HEAD(aha152x_host_list);
 /* DEFINES */
 
 /* For PCMCIA cards, always use AUTOCONF */
-#if defined(PCMCIA) || defined(MODULE)
+#if defined(AHA152X_PCMCIA) || defined(MODULE)
 #if !defined(AUTOCONF)
 #define AUTOCONF
 #endif
@@ -297,7 +297,7 @@ CMD_INC_RESID(struct scsi_cmnd *cmd, int inc)
 
 #define DELAY_DEFAULT 1000
 
-#if defined(PCMCIA)
+#if defined(AHA152X_PCMCIA)
 #define IRQ_MIN 0
 #define IRQ_MAX 16
 #else
@@ -328,7 +328,7 @@ MODULE_AUTHOR("Jürgen Fischer");
 MODULE_DESCRIPTION(AHA152X_REVID);
 MODULE_LICENSE("GPL");
 
-#if !defined(PCMCIA)
+#if !defined(AHA152X_PCMCIA)
 #if defined(MODULE)
 static int io[] = {0, 0};
 module_param_hw_array(io, int, ioport, NULL, 0);
@@ -391,7 +391,7 @@ static struct isapnp_device_id id_table[] = {
 MODULE_DEVICE_TABLE(isapnp, id_table);
 #endif /* ISAPNP */
 
-#endif /* !PCMCIA */
+#endif /* !AHA152X_PCMCIA */
 
 static struct scsi_host_template aha152x_driver_template;
 
@@ -863,7 +863,7 @@ void aha152x_release(struct Scsi_Host *shpnt)
        if (shpnt->irq)
                free_irq(shpnt->irq, shpnt);
 
-#if !defined(PCMCIA)
+#if !defined(AHA152X_PCMCIA)
        if (shpnt->io_port)
                release_region(shpnt->io_port, IO_RANGE);
 #endif
@@ -2924,7 +2924,7 @@ static struct scsi_host_template aha152x_driver_template = {
        .slave_alloc                    = aha152x_adjust_queue,
 };
 
-#if !defined(PCMCIA)
+#if !defined(AHA152X_PCMCIA)
 static int setup_count;
 static struct aha152x_setup setup[2];
 
@@ -3392,4 +3392,4 @@ static int __init aha152x_setup(char *str)
 __setup("aha152x=", aha152x_setup);
 #endif
 
-#endif /* !PCMCIA */
+#endif /* !AHA152X_PCMCIA */
index f0e457e6884e5ca9c92d84c12c1db2cf6906104b..8df822a4a1bd6624abb8814a70e8bdc502274192 100644 (file)
@@ -904,11 +904,9 @@ static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
 {
        struct hisi_hba *hisi_hba = dq->hisi_hba;
        struct hisi_sas_slot *s, *s1, *s2 = NULL;
-       struct list_head *dq_list;
        int dlvry_queue = dq->id;
        int wp;
 
-       dq_list = &dq->list;
        list_for_each_entry_safe(s, s1, &dq->list, delivery) {
                if (!s->ready)
                        break;
index cc36b6473e986b3191f88160ac41f24f730999d6..77a85ead483e098a8e1d837c8130bfb873dc3671 100644 (file)
@@ -1670,11 +1670,9 @@ static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
 {
        struct hisi_hba *hisi_hba = dq->hisi_hba;
        struct hisi_sas_slot *s, *s1, *s2 = NULL;
-       struct list_head *dq_list;
        int dlvry_queue = dq->id;
        int wp;
 
-       dq_list = &dq->list;
        list_for_each_entry_safe(s, s1, &dq->list, delivery) {
                if (!s->ready)
                        break;
index bd4ce38b98d229ad56fb4a224621929b34d11640..a369450a1fa7bfd71160c06211969609cc056f15 100644 (file)
@@ -886,11 +886,9 @@ static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
 {
        struct hisi_hba *hisi_hba = dq->hisi_hba;
        struct hisi_sas_slot *s, *s1, *s2 = NULL;
-       struct list_head *dq_list;
        int dlvry_queue = dq->id;
        int wp;
 
-       dq_list = &dq->list;
        list_for_each_entry_safe(s, s1, &dq->list, delivery) {
                if (!s->ready)
                        break;
index 93c66ebad907ee0e9b99505e54aed32853ab4aae..f78d2e5c1471d3faf22d8e5a9b98e566f1531189 100644 (file)
@@ -2416,8 +2416,8 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
 failed:
                ISCSI_DBG_EH(session,
                             "failing session reset: Could not log back into "
-                            "%s, %s [age %d]\n", session->targetname,
-                            conn->persistent_address, session->age);
+                            "%s [age %d]\n", session->targetname,
+                            session->age);
                spin_unlock_bh(&session->frwd_lock);
                mutex_unlock(&session->eh_mutex);
                return FAILED;
index 0c8005bb0f53f271e958c4b1f51eadd7551474b8..34d311a7dbef1b5a0ad9e0cff3ef52d2f0e9103d 100644 (file)
@@ -698,6 +698,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
                rport = lpfc_ndlp_get_nrport(ndlp);
                if (rport)
                        nrport = rport->remoteport;
+               else
+                       nrport = NULL;
                spin_unlock(&phba->hbalock);
                if (!nrport)
                        continue;
index 20fa6785a0e2e882b6e63ae4914625f24ec7679b..68d62d55a3a50429eb97febc179726300618f151 100644 (file)
@@ -167,7 +167,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
                       sizeof(phba->wwpn));
        }
 
-       phba->sli3_options = 0x0;
+       /*
+        * Clear all option bits except LPFC_SLI3_BG_ENABLED,
+        * which was already set in lpfc_get_cfgparam()
+        */
+       phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
 
        /* Setup and issue mailbox READ REV command */
        lpfc_read_rev(phba, pmb);
index 783a1540cfbea6e918fb08ee71a13642e03164f8..b9e5cd79931a21293a58e18125b2e2f650d47a73 100644 (file)
@@ -4965,7 +4965,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
                phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
                                        LPFC_SLI3_HBQ_ENABLED |
                                        LPFC_SLI3_CRP_ENABLED |
-                                       LPFC_SLI3_BG_ENABLED |
                                        LPFC_SLI3_DSS_ENABLED);
                if (rc != MBX_SUCCESS) {
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
index 3df1428df31727c1bad7b24904fa2b9cca670f82..311d23c727cef06c2a85cbd5e6b4f29967ef25ef 100644 (file)
@@ -790,12 +790,11 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf
        slot->n_elem = n_elem;
        slot->slot_tag = tag;
 
-       slot->buf = dma_pool_alloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
+       slot->buf = dma_pool_zalloc(mvi->dma_pool, GFP_ATOMIC, &slot->buf_dma);
        if (!slot->buf) {
                rc = -ENOMEM;
                goto err_out_tag;
        }
-       memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
 
        tei.task = task;
        tei.hdr = &mvi->slot[tag];
@@ -1906,8 +1905,7 @@ static void mvs_work_queue(struct work_struct *work)
 
                if (phy->phy_event & PHY_PLUG_OUT) {
                        u32 tmp;
-                       struct sas_identify_frame *id;
-                       id = (struct sas_identify_frame *)phy->frame_rcvd;
+
                        tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
                        phy->phy_event &= ~PHY_PLUG_OUT;
                        if (!(tmp & PHY_READY_MASK)) {
index aeb282f617c5c43fd182065e1fd9dbab7e6f69d5..0642f2d0a3bb687a1c7ca3de09e24865afa637a7 100644 (file)
@@ -1049,7 +1049,8 @@ static int myrb_get_hba_config(struct myrb_hba *cb)
                enquiry2->fw.firmware_type = '0';
                enquiry2->fw.turn_id = 0;
        }
-       sprintf(cb->fw_version, "%d.%02d-%c-%02d",
+       snprintf(cb->fw_version, sizeof(cb->fw_version),
+               "%d.%02d-%c-%02d",
                enquiry2->fw.major_version,
                enquiry2->fw.minor_version,
                enquiry2->fw.firmware_type,
index 0264a2e2bc190e068832a89452215919b9b22c30..b8d54ef8cf6dfb84c6d4cb3ce2714071cc2de1d6 100644 (file)
@@ -163,9 +163,12 @@ static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
        dma_addr_t ctlr_info_addr;
        union myrs_sgl *sgl;
        unsigned char status;
-       struct myrs_ctlr_info old;
+       unsigned short ldev_present, ldev_critical, ldev_offline;
+
+       ldev_present = cs->ctlr_info->ldev_present;
+       ldev_critical = cs->ctlr_info->ldev_critical;
+       ldev_offline = cs->ctlr_info->ldev_offline;
 
-       memcpy(&old, cs->ctlr_info, sizeof(struct myrs_ctlr_info));
        ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
                                        sizeof(struct myrs_ctlr_info),
                                        DMA_FROM_DEVICE);
@@ -198,9 +201,9 @@ static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
                    cs->ctlr_info->rbld_active +
                    cs->ctlr_info->exp_active != 0)
                        cs->needs_update = true;
-               if (cs->ctlr_info->ldev_present != old.ldev_present ||
-                   cs->ctlr_info->ldev_critical != old.ldev_critical ||
-                   cs->ctlr_info->ldev_offline != old.ldev_offline)
+               if (cs->ctlr_info->ldev_present != ldev_present ||
+                   cs->ctlr_info->ldev_critical != ldev_critical ||
+                   cs->ctlr_info->ldev_offline != ldev_offline)
                        shost_printk(KERN_INFO, cs->host,
                                     "Logical drive count changes (%d/%d/%d)\n",
                                     cs->ctlr_info->ldev_critical,
index dba3716511c56595967c617b139ef46abd28f23e..24b89228b2414c1ab041d916d386ae347deb572c 100644 (file)
@@ -1,3 +1,3 @@
-#define PCMCIA 1
+#define AHA152X_PCMCIA 1
 #define AHA152X_STAT 1
 #include "aha152x.c"
index b28f159fdaee79fe194be3cddc81e1842059e104..0bb9ac6ece9205b8f652fa55a7f752b780012ff5 100644 (file)
@@ -218,7 +218,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
 
        mutex_lock(&ha->optrom_mutex);
        if (qla2x00_chip_is_down(vha)) {
-               mutex_unlock(&vha->hw->optrom_mutex);
+               mutex_unlock(&ha->optrom_mutex);
                return -EAGAIN;
        }
 
index c72d8012fe2aabfa7d1bea57532b2d170babe169..eb59c796a795de06af7d2fa19a64a30e022a3b87 100644 (file)
@@ -425,7 +425,7 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
        __qla24xx_handle_gpdb_event(vha, ea);
 }
 
-int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport)
 {
        struct qla_work_evt *e;
 
@@ -680,7 +680,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
                                            fcport);
                                        break;
                                }
-                               /* drop through */
+                               /* fall through */
                        default:
                                if (fcport_is_smaller(fcport)) {
                                        /* local adapter is bigger */
@@ -1551,7 +1551,8 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
 }
 
 
-void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea)
+static void qla_handle_els_plogi_done(scsi_qla_host_t *vha,
+                                     struct event_arg *ea)
 {
        ql_dbg(ql_dbg_disc, vha, 0x2118,
            "%s %d %8phC post PRLI\n",
@@ -4762,6 +4763,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
        fcport->loop_id = FC_NO_LOOP_ID;
        qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
        fcport->supported_classes = FC_COS_UNSPECIFIED;
+       fcport->fp_speed = PORT_SPEED_UNKNOWN;
 
        fcport->ct_desc.ct_sns = dma_alloc_coherent(&vha->hw->pdev->dev,
                sizeof(struct ct_sns_pkt), &fcport->ct_desc.ct_sns_dma,
index 86fb8b21aa71085c44f403e1ee17832f01714618..032635321ad6e75a17e3c216692bd120456edae0 100644 (file)
@@ -1195,8 +1195,8 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
  * @sp: SRB command to process
  * @cmd_pkt: Command type 3 IOCB
  * @tot_dsds: Total number of segments to transfer
- * @tot_prot_dsds:
- * @fw_prot_opts:
+ * @tot_prot_dsds: Total number of segments with protection information
+ * @fw_prot_opts: Protection options to be passed to firmware
  */
 inline int
 qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
index d73b04e405902b02d0ea5822b44c105760166e72..30d3090842f856d27fb32f23e051832d8eb7b500 100644 (file)
@@ -25,7 +25,7 @@ static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
 
 /**
  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
- * @irq:
+ * @irq: interrupt number
  * @dev_id: SCSI driver HA context
  *
  * Called by system whenever the host adapter generates an interrupt.
@@ -144,7 +144,7 @@ qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
 
 /**
  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
- * @irq:
+ * @irq: interrupt number
  * @dev_id: SCSI driver HA context
  *
  * Called by system whenever the host adapter generates an interrupt.
@@ -3109,7 +3109,7 @@ done:
 
 /**
  * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
- * @irq:
+ * @irq: interrupt number
  * @dev_id: SCSI driver HA context
  *
  * Called by system whenever the host adapter generates an interrupt.
index 2f3e5075ae76e8d4fccacdf3a8aee7f14de42a24..191b6b7c8747df06419b998a8fbf06541658080a 100644 (file)
@@ -3478,9 +3478,9 @@ qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
 /**
  * qla2x00_set_serdes_params() -
  * @vha: HA context
- * @sw_em_1g:
- * @sw_em_2g:
- * @sw_em_4g:
+ * @sw_em_1g: serial link options
+ * @sw_em_2g: serial link options
+ * @sw_em_4g: serial link options
  *
  * Returns
  */
index 521a513705549a9b263566c5bd9ea7b568bf4029..60f964c53c01a76023b627666548c3b9e0203aca 100644 (file)
@@ -2212,7 +2212,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
        struct bsg_job *bsg_job;
        struct fc_bsg_reply *bsg_reply;
        struct srb_iocb *iocb_job;
-       int res;
+       int res = 0;
        struct qla_mt_iocb_rsp_fx00 fstatus;
        uint8_t *fw_sts_ptr;
 
@@ -2624,7 +2624,7 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
  * qlafx00_multistatus_entry() - Process Multi response queue entries.
  * @vha: SCSI driver HA context
  * @rsp: response queue
- * @pkt:
+ * @pkt: received packet
  */
 static void
 qlafx00_multistatus_entry(struct scsi_qla_host *vha,
@@ -2681,12 +2681,10 @@ qlafx00_multistatus_entry(struct scsi_qla_host *vha,
  * @vha: SCSI driver HA context
  * @rsp: response queue
  * @pkt: Entry pointer
- * @estatus:
- * @etype:
  */
 static void
 qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
-                   struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
+                   struct sts_entry_fx00 *pkt)
 {
        srb_t *sp;
        struct qla_hw_data *ha = vha->hw;
@@ -2695,9 +2693,6 @@ qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
        struct req_que *req = NULL;
        int res = DID_ERROR << 16;
 
-       ql_dbg(ql_dbg_async, vha, 0x507f,
-           "type of error status in response: 0x%x\n", estatus);
-
        req = ha->req_q_map[que];
 
        sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
@@ -2745,9 +2740,11 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
 
                if (pkt->entry_status != 0 &&
                    pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
+                       ql_dbg(ql_dbg_async, vha, 0x507f,
+                              "type of error status in response: 0x%x\n",
+                              pkt->entry_status);
                        qlafx00_error_entry(vha, rsp,
-                           (struct sts_entry_fx00 *)pkt, pkt->entry_status,
-                           pkt->entry_type);
+                                           (struct sts_entry_fx00 *)pkt);
                        continue;
                }
 
@@ -2867,7 +2864,7 @@ qlafx00_async_event(scsi_qla_host_t *vha)
 /**
  * qlafx00x_mbx_completion() - Process mailbox command completions.
  * @vha: SCSI driver HA context
- * @mb0:
+ * @mb0: value to be written into mailbox register 0
  */
 static void
 qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
@@ -2893,7 +2890,7 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
 
 /**
  * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
- * @irq:
+ * @irq: interrupt number
  * @dev_id: SCSI driver HA context
  *
  * Called by system whenever the host adapter generates an interrupt.
index 121e18b3b9f8399ee7c63095761583db2ffc1706..f2f54806f4da9dac0aa90431e3aee47abeef2550 100644 (file)
@@ -2010,7 +2010,7 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
 
 /**
  * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
- * @irq:
+ * @irq: interrupt number
  * @dev_id: SCSI driver HA context
  *
  * Called by system whenever the host adapter generates an interrupt.
index 3a2b0282df149531789bddd8cfe4d7ec69ebfbf3..fe856b602e03198686eec132fe55ed7f30676dda 100644 (file)
@@ -3878,7 +3878,7 @@ out:
 #define PF_BITS_MASK           (0xF << 16)
 /**
  * qla8044_intr_handler() - Process interrupts for the ISP8044
- * @irq:
+ * @irq: interrupt number
  * @dev_id: SCSI driver HA context
  *
  * Called by system whenever the host adapter generates an interrupt.
index 8794e54f43a95d568e88ff4944fb0301d1699271..b658b9a5eb1e172b6549d05b1e8c3aece913f715 100644 (file)
@@ -67,7 +67,7 @@ module_param(ql2xplogiabsentdevice, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xplogiabsentdevice,
                "Option to enable PLOGI to devices that are not present after "
                "a Fabric scan.  This is needed for several broken switches. "
-               "Default is 0 - no PLOGI. 1 - perfom PLOGI.");
+               "Default is 0 - no PLOGI. 1 - perform PLOGI.");
 
 int ql2xloginretrycount = 0;
 module_param(ql2xloginretrycount, int, S_IRUGO);
@@ -1800,9 +1800,15 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
                                                spin_unlock_irqrestore
                                                        (qp->qp_lock_ptr, flags);
                                                status = qla2xxx_eh_abort(
-                                                   GET_CMD_SP(sp));
+                                                       GET_CMD_SP(sp));
                                                spin_lock_irqsave
                                                        (qp->qp_lock_ptr, flags);
+                                               /*
+                                                * Get rid of extra reference caused
+                                                * by early exit from qla2xxx_eh_abort
+                                                */
+                                               if (status == FAST_IO_FAIL)
+                                                       atomic_dec(&sp->ref_count);
                                        }
                                }
                                sp->done(sp, res);
index 4499c787165f14f63f9190800886b39374054352..2a3055c799fb613039fb73718ace4fd70b23886d 100644 (file)
@@ -2229,7 +2229,7 @@ qla2x00_erase_flash_sector(struct qla_hw_data *ha, uint32_t addr,
 
 /**
  * qla2x00_get_flash_manufacturer() - Read manufacturer ID from flash chip.
- * @ha:
+ * @ha: host adapter
  * @man_id: Flash manufacturer ID
  * @flash_id: Flash ID
  */
index 39828207bc1d223fc729252e127c43ac61e4f9f2..c4504740f0e2123ba410eada3b84bb3c80ce8fca 100644 (file)
@@ -4540,7 +4540,7 @@ static int qlt_issue_task_mgmt(struct fc_port *sess, u64 lun,
        case QLA_TGT_CLEAR_TS:
        case QLA_TGT_ABORT_TS:
                abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
-               /* drop through */
+               /* fall through */
        case QLA_TGT_CLEAR_ACA:
                h = qlt_find_qphint(vha, mcmd->unpacked_lun);
                mcmd->qpair = h->qpair;
@@ -6598,9 +6598,9 @@ static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
  * qla_tgt_lport_register - register lport with external module
  *
  * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
- * @phys_wwpn:
- * @npiv_wwpn:
- * @npiv_wwnn:
+ * @phys_wwpn: physical port WWPN
+ * @npiv_wwpn: NPIV WWPN
+ * @npiv_wwnn: NPIV WWNN
  * @callback:  lport initialization callback for tcm_qla2xxx code
  */
 int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
index c7fccbb8f5545e463537389bc34b607de3dc9cb3..fa6e0c3b3aa678cd1e62f91021dc89036211aa8a 100644 (file)
@@ -697,6 +697,12 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
                 */
                scsi_mq_uninit_cmd(cmd);
 
+               /*
+                * queue is still alive, so grab the ref for preventing it
+                * from being cleaned up during running queue.
+                */
+               percpu_ref_get(&q->q_usage_counter);
+
                __blk_mq_end_request(req, error);
 
                if (scsi_target(sdev)->single_lun ||
@@ -704,6 +710,8 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
                        kblockd_schedule_work(&sdev->requeue_work);
                else
                        blk_mq_run_hw_queues(q, true);
+
+               percpu_ref_put(&q->q_usage_counter);
        } else {
                unsigned long flags;
 
index f03dc03a42c35c31eb91ec2af88f18ddf98069de..8f88348ebe4245b8206c1b68115360e10d61c66a 100644 (file)
@@ -446,7 +446,6 @@ struct storvsc_device {
 
        bool     destroy;
        bool     drain_notify;
-       bool     open_sub_channel;
        atomic_t num_outstanding_req;
        struct Scsi_Host *host;
 
@@ -636,33 +635,38 @@ get_in_err:
 static void handle_sc_creation(struct vmbus_channel *new_sc)
 {
        struct hv_device *device = new_sc->primary_channel->device_obj;
+       struct device *dev = &device->device;
        struct storvsc_device *stor_device;
        struct vmstorage_channel_properties props;
+       int ret;
 
        stor_device = get_out_stor_device(device);
        if (!stor_device)
                return;
 
-       if (stor_device->open_sub_channel == false)
-               return;
-
        memset(&props, 0, sizeof(struct vmstorage_channel_properties));
 
-       vmbus_open(new_sc,
-                  storvsc_ringbuffer_size,
-                  storvsc_ringbuffer_size,
-                  (void *)&props,
-                  sizeof(struct vmstorage_channel_properties),
-                  storvsc_on_channel_callback, new_sc);
+       ret = vmbus_open(new_sc,
+                        storvsc_ringbuffer_size,
+                        storvsc_ringbuffer_size,
+                        (void *)&props,
+                        sizeof(struct vmstorage_channel_properties),
+                        storvsc_on_channel_callback, new_sc);
 
-       if (new_sc->state == CHANNEL_OPENED_STATE) {
-               stor_device->stor_chns[new_sc->target_cpu] = new_sc;
-               cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
+       /* In case vmbus_open() fails, we don't use the sub-channel. */
+       if (ret != 0) {
+               dev_err(dev, "Failed to open sub-channel: err=%d\n", ret);
+               return;
        }
+
+       /* Add the sub-channel to the array of available channels. */
+       stor_device->stor_chns[new_sc->target_cpu] = new_sc;
+       cpumask_set_cpu(new_sc->target_cpu, &stor_device->alloced_cpus);
 }
 
 static void  handle_multichannel_storage(struct hv_device *device, int max_chns)
 {
+       struct device *dev = &device->device;
        struct storvsc_device *stor_device;
        int num_cpus = num_online_cpus();
        int num_sc;
@@ -679,21 +683,11 @@ static void  handle_multichannel_storage(struct hv_device *device, int max_chns)
        request = &stor_device->init_request;
        vstor_packet = &request->vstor_packet;
 
-       stor_device->open_sub_channel = true;
        /*
         * Establish a handler for dealing with subchannels.
         */
        vmbus_set_sc_create_callback(device->channel, handle_sc_creation);
 
-       /*
-        * Check to see if sub-channels have already been created. This
-        * can happen when this driver is re-loaded after unloading.
-        */
-
-       if (vmbus_are_subchannels_present(device->channel))
-               return;
-
-       stor_device->open_sub_channel = false;
        /*
         * Request the host to create sub-channels.
         */
@@ -710,23 +704,29 @@ static void  handle_multichannel_storage(struct hv_device *device, int max_chns)
                               VM_PKT_DATA_INBAND,
                               VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 
-       if (ret != 0)
+       if (ret != 0) {
+               dev_err(dev, "Failed to create sub-channel: err=%d\n", ret);
                return;
+       }
 
        t = wait_for_completion_timeout(&request->wait_event, 10*HZ);
-       if (t == 0)
+       if (t == 0) {
+               dev_err(dev, "Failed to create sub-channel: timed out\n");
                return;
+       }
 
        if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
-           vstor_packet->status != 0)
+           vstor_packet->status != 0) {
+               dev_err(dev, "Failed to create sub-channel: op=%d, sts=%d\n",
+                       vstor_packet->operation, vstor_packet->status);
                return;
+       }
 
        /*
-        * Now that we created the sub-channels, invoke the check; this
-        * may trigger the callback.
+        * We need to do nothing here, because vmbus_process_offer()
+        * invokes channel->sc_creation_callback, which will open and use
+        * the sub-channel(s).
         */
-       stor_device->open_sub_channel = true;
-       vmbus_are_subchannels_present(device->channel);
 }
 
 static void cache_wwn(struct storvsc_device *stor_device,
@@ -1794,7 +1794,6 @@ static int storvsc_probe(struct hv_device *device,
        }
 
        stor_device->destroy = false;
-       stor_device->open_sub_channel = false;
        init_waitqueue_head(&stor_device->waiting_to_drain);
        stor_device->device = device;
        stor_device->host = host;
index 46df707e6f2c0404a3c1c4b5f92a76e03feee622..452e19f8fb47027ab4c264f60da67b69eafdbf49 100644 (file)
@@ -20,6 +20,7 @@
 #include "unipro.h"
 #include "ufs-hisi.h"
 #include "ufshci.h"
+#include "ufs_quirks.h"
 
 static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
 {
@@ -390,6 +391,14 @@ static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param)
 
 static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
 {
+       if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
+               pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
+               /* VS_DebugSaveConfigTime */
+               ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10);
+               /* sync length */
+               ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48);
+       }
+
        /* update */
        ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
        /* PA_TxSkip */
index 71f73d1d1ad1fb9b7c357c65baf0f49ec9dfa780..5d2dfdb41a6ffcc6c20a88189c070a3917d8d4e3 100644 (file)
@@ -131,4 +131,10 @@ struct ufs_dev_fix {
  */
 #define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME        (1 << 8)
 
+/*
+ * Some UFS devices require VS_DebugSaveConfigTime is 0x10,
+ * enabling this quirk ensure this.
+ */
+#define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME   (1 << 9)
+
 #endif /* UFS_QUIRKS_H_ */
index 23d7cca36ff031b6463aae8a639b061bfd2c825c..f1c57cd33b5ba3bd78c7b66e0b9b4ba171f17f14 100644 (file)
@@ -231,6 +231,8 @@ static struct ufs_dev_fix ufs_fixups[] = {
        UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
        UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
                UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+       UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/,
+               UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME),
 
        END_FIX
 };
@@ -8099,13 +8101,6 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
                err = -ENOMEM;
                goto out_error;
        }
-
-       /*
-        * Do not use blk-mq at this time because blk-mq does not support
-        * runtime pm.
-        */
-       host->use_blk_mq = false;
-
        hba = shost_priv(host);
        hba->host = host;
        hba->dev = dev;
index 6e491023fdd88b2860b92699896569edbcfe4da6..0d6b2a88fc8e26d9297801304c0d2f1a65fbbb43 100644 (file)
@@ -1202,8 +1202,6 @@ static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
 
 static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
 {
-       pvscsi_shutdown_intr(adapter);
-
        if (adapter->workqueue)
                destroy_workqueue(adapter->workqueue);
 
@@ -1534,6 +1532,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 out_reset_adapter:
        ll_adapter_reset(adapter);
 out_release_resources:
+       pvscsi_shutdown_intr(adapter);
        pvscsi_release_resources(adapter);
        scsi_host_put(host);
 out_disable_device:
@@ -1542,6 +1541,7 @@ out_disable_device:
        return error;
 
 out_release_resources_and_disable:
+       pvscsi_shutdown_intr(adapter);
        pvscsi_release_resources(adapter);
        goto out_disable_device;
 }
index 7218fb963d0a111d5ccc15cc6d96d1370f9fe3ff..1382a8df6c75f8439f98261811768ca3126883af 100644 (file)
@@ -777,9 +777,6 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl,
        u8 la = txn->la;
        bool usr_msg = false;
 
-       if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)
-               return -EPROTONOSUPPORT;
-
        if (txn->mt == SLIM_MSG_MT_CORE &&
                (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION &&
                 txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW))
index 4399d1873e2d929b4897ad7fedfd8ca69d08ae72..9be41089edde7385abd89b87385376d730be7d29 100644 (file)
 #define SLIM_MSG_MC_NEXT_REMOVE_CHANNEL          0x58
 #define SLIM_MSG_MC_RECONFIGURE_NOW              0x5F
 
-/*
- * Clock pause flag to indicate that the reconfig message
- * corresponds to clock pause sequence
- */
-#define SLIM_MSG_CLK_PAUSE_SEQ_FLG             (1U << 8)
-
 /* Clock pause values per SLIMbus spec */
 #define SLIM_CLK_FAST                          0
 #define SLIM_CLK_CONST_PHASE                   1
index 7c128132799e0cd355c7bae1298d5a776b6c8ec9..4c28fa938ac76a28ddfba64fbfe35e4e2054c3f6 100644 (file)
@@ -329,8 +329,8 @@ struct knav_range_ops {
 };
 
 struct knav_irq_info {
-       int     irq;
-       u32     cpu_map;
+       int             irq;
+       struct cpumask  *cpu_mask;
 };
 
 struct knav_range_info {
index 316e82e46f6cbff0500ba8409529dffc3d0eddbe..2f7fb2dcc1d66d130580b57d7574ecabbbdb1546 100644 (file)
@@ -205,18 +205,18 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range,
 {
        struct knav_device *kdev = range->kdev;
        struct knav_acc_channel *acc;
-       unsigned long cpu_map;
+       struct cpumask *cpu_mask;
        int ret = 0, irq;
        u32 old, new;
 
        if (range->flags & RANGE_MULTI_QUEUE) {
                acc = range->acc;
                irq = range->irqs[0].irq;
-               cpu_map = range->irqs[0].cpu_map;
+               cpu_mask = range->irqs[0].cpu_mask;
        } else {
                acc = range->acc + queue;
                irq = range->irqs[queue].irq;
-               cpu_map = range->irqs[queue].cpu_map;
+               cpu_mask = range->irqs[queue].cpu_mask;
        }
 
        old = acc->open_mask;
@@ -239,8 +239,8 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range,
                        acc->name, acc->name);
                ret = request_irq(irq, knav_acc_int_handler, 0, acc->name,
                                  range);
-               if (!ret && cpu_map) {
-                       ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
+               if (!ret && cpu_mask) {
+                       ret = irq_set_affinity_hint(irq, cpu_mask);
                        if (ret) {
                                dev_warn(range->kdev->dev,
                                         "Failed to set IRQ affinity\n");
index b5d5673c255cad4f9326a318c5c441ba288a7591..8b418379272da7438f495b2283140e8beba7e9a1 100644 (file)
@@ -118,19 +118,17 @@ static int knav_queue_setup_irq(struct knav_range_info *range,
                          struct knav_queue_inst *inst)
 {
        unsigned queue = inst->id - range->queue_base;
-       unsigned long cpu_map;
        int ret = 0, irq;
 
        if (range->flags & RANGE_HAS_IRQ) {
                irq = range->irqs[queue].irq;
-               cpu_map = range->irqs[queue].cpu_map;
                ret = request_irq(irq, knav_queue_int_handler, 0,
                                        inst->irq_name, inst);
                if (ret)
                        return ret;
                disable_irq(irq);
-               if (cpu_map) {
-                       ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
+               if (range->irqs[queue].cpu_mask) {
+                       ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
                        if (ret) {
                                dev_warn(range->kdev->dev,
                                         "Failed to set IRQ affinity\n");
@@ -1262,9 +1260,19 @@ static int knav_setup_queue_range(struct knav_device *kdev,
 
                range->num_irqs++;
 
-               if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3)
-                       range->irqs[i].cpu_map =
-                               (oirq.args[2] & 0x0000ff00) >> 8;
+               if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
+                       unsigned long mask;
+                       int bit;
+
+                       range->irqs[i].cpu_mask = devm_kzalloc(dev,
+                                                              cpumask_size(), GFP_KERNEL);
+                       if (!range->irqs[i].cpu_mask)
+                               return -ENOMEM;
+
+                       mask = (oirq.args[2] & 0x0000ff00) >> 8;
+                       for_each_set_bit(bit, &mask, BITS_PER_LONG)
+                               cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
+               }
        }
 
        range->num_irqs = min(range->num_irqs, range->num_queues);
index 3dc31627c655809eca5c11eca309de9ddcfe5b68..0c2867deb36fce48c74b92388d210371ebd1a6d0 100644 (file)
@@ -522,11 +522,11 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
                mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
                mtk_spi_setup_packet(master);
 
-               cnt = len / 4;
+               cnt = mdata->xfer_len / 4;
                iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
                                trans->tx_buf + mdata->num_xfered, cnt);
 
-               remainder = len % 4;
+               remainder = mdata->xfer_len % 4;
                if (remainder > 0) {
                        reg_val = 0;
                        memcpy(&reg_val,
index f024c3fc3679de30c7969b28199fabcbbd562087..2fd8881fcd65c96ca76e5932bb310605a2ff1c49 100644 (file)
@@ -1540,13 +1540,26 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
 /* work with hotplug and coldplug */
 MODULE_ALIAS("platform:omap2_mcspi");
 
-#ifdef CONFIG_SUSPEND
-static int omap2_mcspi_suspend_noirq(struct device *dev)
+static int __maybe_unused omap2_mcspi_suspend(struct device *dev)
 {
-       return pinctrl_pm_select_sleep_state(dev);
+       struct spi_master *master = dev_get_drvdata(dev);
+       struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+       int error;
+
+       error = pinctrl_pm_select_sleep_state(dev);
+       if (error)
+               dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
+                        __func__, error);
+
+       error = spi_master_suspend(master);
+       if (error)
+               dev_warn(mcspi->dev, "%s: master suspend failed: %i\n",
+                        __func__, error);
+
+       return pm_runtime_force_suspend(dev);
 }
 
-static int omap2_mcspi_resume_noirq(struct device *dev)
+static int __maybe_unused omap2_mcspi_resume(struct device *dev)
 {
        struct spi_master *master = dev_get_drvdata(dev);
        struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
@@ -1557,17 +1570,17 @@ static int omap2_mcspi_resume_noirq(struct device *dev)
                dev_warn(mcspi->dev, "%s: failed to set pins: %i\n",
                         __func__, error);
 
-       return 0;
-}
+       error = spi_master_resume(master);
+       if (error)
+               dev_warn(mcspi->dev, "%s: master resume failed: %i\n",
+                        __func__, error);
 
-#else
-#define omap2_mcspi_suspend_noirq      NULL
-#define omap2_mcspi_resume_noirq       NULL
-#endif
+       return pm_runtime_force_resume(dev);
+}
 
 static const struct dev_pm_ops omap2_mcspi_pm_ops = {
-       .suspend_noirq = omap2_mcspi_suspend_noirq,
-       .resume_noirq = omap2_mcspi_resume_noirq,
+       SET_SYSTEM_SLEEP_PM_OPS(omap2_mcspi_suspend,
+                               omap2_mcspi_resume)
        .runtime_resume = omap_mcspi_runtime_resume,
 };
 
index e90b1777528403ea43e3d575b17e90c363f5b9e4..09a940066c0ec6a931436063b8a19047a00e8005 100644 (file)
@@ -1005,35 +1005,38 @@ enum i8254_mode {
  * and INSN_DEVICE_CONFIG_GET_ROUTES.
  */
 #define NI_NAMES_BASE  0x8000u
+
+#define _TERM_N(base, n, x)    ((base) + ((x) & ((n) - 1)))
+
 /*
  * not necessarily all allowed 64 PFIs are valid--certainly not for all devices
  */
-#define NI_PFI(x)      (NI_NAMES_BASE        + ((x) & 0x3f))
+#define NI_PFI(x)              _TERM_N(NI_NAMES_BASE, 64, x)
 /* 8 trigger lines by standard, Some devices cannot talk to all eight. */
-#define TRIGGER_LINE(x)        (NI_PFI(-1)       + 1 + ((x) & 0x7))
+#define TRIGGER_LINE(x)                _TERM_N(NI_PFI(-1) + 1, 8, x)
 /* 4 RTSI shared MUXes to route signals to/from TRIGGER_LINES on NI hardware */
-#define NI_RTSI_BRD(x) (TRIGGER_LINE(-1) + 1 + ((x) & 0x3))
+#define NI_RTSI_BRD(x)         _TERM_N(TRIGGER_LINE(-1) + 1, 4, x)
 
 /* *** Counter/timer names : 8 counters max *** */
-#define NI_COUNTER_NAMES_BASE  (NI_RTSI_BRD(-1)  + 1)
-#define NI_MAX_COUNTERS               7
-#define NI_CtrSource(x)               (NI_COUNTER_NAMES_BASE + ((x) & NI_MAX_COUNTERS))
+#define NI_MAX_COUNTERS                8
+#define NI_COUNTER_NAMES_BASE  (NI_RTSI_BRD(-1)  + 1)
+#define NI_CtrSource(x)              _TERM_N(NI_COUNTER_NAMES_BASE, NI_MAX_COUNTERS, x)
 /* Gate, Aux, A,B,Z are all treated, at times as gates */
-#define NI_GATES_NAMES_BASE    (NI_CtrSource(-1) + 1)
-#define NI_CtrGate(x)         (NI_GATES_NAMES_BASE   + ((x) & NI_MAX_COUNTERS))
-#define NI_CtrAux(x)          (NI_CtrGate(-1)   + 1  + ((x) & NI_MAX_COUNTERS))
-#define NI_CtrA(x)            (NI_CtrAux(-1)    + 1  + ((x) & NI_MAX_COUNTERS))
-#define NI_CtrB(x)            (NI_CtrA(-1)      + 1  + ((x) & NI_MAX_COUNTERS))
-#define NI_CtrZ(x)            (NI_CtrB(-1)      + 1  + ((x) & NI_MAX_COUNTERS))
-#define NI_GATES_NAMES_MAX     NI_CtrZ(-1)
-#define NI_CtrArmStartTrigger(x) (NI_CtrZ(-1)    + 1  + ((x) & NI_MAX_COUNTERS))
+#define NI_GATES_NAMES_BASE    (NI_CtrSource(-1) + 1)
+#define NI_CtrGate(x)          _TERM_N(NI_GATES_NAMES_BASE, NI_MAX_COUNTERS, x)
+#define NI_CtrAux(x)           _TERM_N(NI_CtrGate(-1)  + 1, NI_MAX_COUNTERS, x)
+#define NI_CtrA(x)             _TERM_N(NI_CtrAux(-1)   + 1, NI_MAX_COUNTERS, x)
+#define NI_CtrB(x)             _TERM_N(NI_CtrA(-1)     + 1, NI_MAX_COUNTERS, x)
+#define NI_CtrZ(x)             _TERM_N(NI_CtrB(-1)     + 1, NI_MAX_COUNTERS, x)
+#define NI_GATES_NAMES_MAX     NI_CtrZ(-1)
+#define NI_CtrArmStartTrigger(x) _TERM_N(NI_CtrZ(-1)    + 1, NI_MAX_COUNTERS, x)
 #define NI_CtrInternalOutput(x) \
-                    (NI_CtrArmStartTrigger(-1)  + 1  + ((x) & NI_MAX_COUNTERS))
+                     _TERM_N(NI_CtrArmStartTrigger(-1) + 1, NI_MAX_COUNTERS, x)
 /** external pin(s) labeled conveniently as Ctr<i>Out. */
-#define NI_CtrOut(x)  (NI_CtrInternalOutput(-1)  + 1  + ((x) & NI_MAX_COUNTERS))
+#define NI_CtrOut(x)   _TERM_N(NI_CtrInternalOutput(-1) + 1, NI_MAX_COUNTERS, x)
 /** For Buffered sampling of ctr -- x series capability. */
-#define NI_CtrSampleClock(x)   (NI_CtrOut(-1)   + 1  + ((x) & NI_MAX_COUNTERS))
-#define NI_COUNTER_NAMES_MAX   NI_CtrSampleClock(-1)
+#define NI_CtrSampleClock(x)   _TERM_N(NI_CtrOut(-1)   + 1, NI_MAX_COUNTERS, x)
+#define NI_COUNTER_NAMES_MAX   NI_CtrSampleClock(-1)
 
 enum ni_common_signal_names {
        /* PXI_Star: this is a non-NI-specific signal */
index 2d1e0325d04d122280eb267c7e1ccd997b6988b4..5edf59ac6706d3b5cd7d23d0f945895dc1cb8f48 100644 (file)
@@ -2843,7 +2843,8 @@ static int ni_ao_insn_config(struct comedi_device *dev,
                return ni_ao_arm(dev, s);
        case INSN_CONFIG_GET_CMD_TIMING_CONSTRAINTS:
                /* we don't care about actual channels */
-               data[1] = board->ao_speed;
+               /* data[3] : chanlist_len */
+               data[1] = board->ao_speed * data[3];
                data[2] = 0;
                return 0;
        default:
index a53231b08d30ee390e9565cfc52c185023692778..e3425bf082ae986ab4c28893c11112654a9fbb2a 100644 (file)
@@ -310,6 +310,7 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd)
                        ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2);
                        break;
                }
+               /* fall through */
 
        case IPIPEIF_SDRAM_YUV:
                /* Set clock divider */
index ec277ece47afd87dbb61d00f66568e138657d3d1..a951b3fd1ea1dee94c7e572affcbce22dd3ee7ed 100644 (file)
@@ -5,3 +5,8 @@ Before this stateless decoder driver can leave the staging area:
 * Userspace support for the Request API needs to be reviewed;
 * Another stateless decoder driver should be submitted;
 * At least one stateless encoder driver should be submitted.
+* When queueing a request containing references to I frames, the
+  refcount of the memory for those I frames needs to be incremented
+  and decremented when the request is completed. This will likely
+  require some help from vb2. The driver should fail the request
+  if the memory/buffer is gone.
index 82558455384af6f1bbd576e0181261163c1bdce3..c912c70b3ef77e3f9794696fb95b24171671fab7 100644 (file)
@@ -108,17 +108,6 @@ static int cedrus_request_validate(struct media_request *req)
        unsigned int count;
        unsigned int i;
 
-       count = vb2_request_buffer_cnt(req);
-       if (!count) {
-               v4l2_info(&ctx->dev->v4l2_dev,
-                         "No buffer was provided with the request\n");
-               return -ENOENT;
-       } else if (count > 1) {
-               v4l2_info(&ctx->dev->v4l2_dev,
-                         "More than one buffer was provided with the request\n");
-               return -EINVAL;
-       }
-
        list_for_each_entry(obj, &req->objects, list) {
                struct vb2_buffer *vb;
 
@@ -133,6 +122,17 @@ static int cedrus_request_validate(struct media_request *req)
        if (!ctx)
                return -ENOENT;
 
+       count = vb2_request_buffer_cnt(req);
+       if (!count) {
+               v4l2_info(&ctx->dev->v4l2_dev,
+                         "No buffer was provided with the request\n");
+               return -ENOENT;
+       } else if (count > 1) {
+               v4l2_info(&ctx->dev->v4l2_dev,
+                         "More than one buffer was provided with the request\n");
+               return -EINVAL;
+       }
+
        parent_hdl = &ctx->hdl;
 
        hdl = v4l2_ctrl_request_hdl_find(req, parent_hdl);
@@ -253,7 +253,7 @@ static const struct v4l2_m2m_ops cedrus_m2m_ops = {
 
 static const struct media_device_ops cedrus_m2m_media_ops = {
        .req_validate   = cedrus_request_validate,
-       .req_queue      = vb2_m2m_request_queue,
+       .req_queue      = v4l2_m2m_request_queue,
 };
 
 static int cedrus_probe(struct platform_device *pdev)
index 6a18cf73c85eaf7463cb5a7da4f684d03f98d832..18936cdb10830ae4506435377a3342bb0c2e076e 100644 (file)
@@ -351,7 +351,7 @@ static ssize_t set_datatype_show(struct device *dev,
 
        for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) {
                if (c->cfg.data_type & ch_data_type[i].most_ch_data_type)
-                       return snprintf(buf, PAGE_SIZE, ch_data_type[i].name);
+                       return snprintf(buf, PAGE_SIZE, "%s", ch_data_type[i].name);
        }
        return snprintf(buf, PAGE_SIZE, "unconfigured\n");
 }
index df6ebf41bdea4dc7301414c5cb9b42e5679d5472..5831f816c17b1d37c86b3c332aeff2305930c117 100644 (file)
@@ -335,6 +335,8 @@ static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
        /* tx desc */
        src = sg->src_addr;
        for (i = 0; i < chan->desc->num_sgs; i++) {
+               tx_desc = &chan->tx_ring[chan->tx_idx];
+
                if (len > HSDMA_MAX_PLEN)
                        tlen = HSDMA_MAX_PLEN;
                else
@@ -344,7 +346,6 @@ static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
                        tx_desc->addr1 = src;
                        tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
                } else {
-                       tx_desc = &chan->tx_ring[chan->tx_idx];
                        tx_desc->addr0 = src;
                        tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
 
index b8566ed898f15afdbbd85d61289543efbc3e1bdc..aa98fbb170139ef7ba443d80ae2c8da72e9a4357 100644 (file)
@@ -82,7 +82,7 @@ static int rt2880_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrldev,
        struct property *prop;
        const char *function_name, *group_name;
        int ret;
-       int ngroups;
+       int ngroups = 0;
        unsigned int reserved_maps = 0;
 
        for_each_node_with_property(np_config, "group")
index 9d156efbc9edfe9933143464a0c689fb762635c7..4d473f008aa48f7c3f5d2735402a5edc257dbf61 100644 (file)
@@ -146,7 +146,7 @@ void r8712_report_sec_ie(struct _adapter *adapter, u8 authmode, u8 *sec_ie)
                p = buff;
                p += sprintf(p, "ASSOCINFO(ReqIEs=");
                len = sec_ie[1] + 2;
-               len =  (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX - 1;
+               len =  (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
                for (i = 0; i < len; i++)
                        p += sprintf(p, "%02x", sec_ie[i]);
                p += sprintf(p, ")");
index a7374006a9fbe1eef65649377bd07f6e9aef3354..986a1d52691804fb5173b780347f907fc7a5ed3c 100644 (file)
@@ -1346,7 +1346,7 @@ sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie,
                     u8 *out_ie, uint in_len)
 {
        u8 authmode = 0, match;
-       u8 sec_ie[255], uncst_oui[4], bkup_ie[255];
+       u8 sec_ie[IW_CUSTOM_MAX], uncst_oui[4], bkup_ie[255];
        u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
        uint ielength, cnt, remove_cnt;
        int iEntry;
index 69c7abc0e3a551af5fd0328a0f6926010a32986d..8445d516c93d352cb0f83ab809154109faee5cde 100644 (file)
@@ -1565,7 +1565,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
        if (pstat->aid > 0) {
                DBG_871X("  old AID %d\n", pstat->aid);
        } else {
-               for (pstat->aid = 1; pstat->aid < NUM_STA; pstat->aid++)
+               for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
                        if (pstapriv->sta_aid[pstat->aid - 1] == NULL)
                                break;
 
index 85077947b9b8d44b82a467a8e4cd5f86e5f5feb9..85aba8a503cd200b5fbfddc39a62932303767121 100644 (file)
@@ -109,12 +109,12 @@ static void update_recvframe_phyinfo(union recv_frame *precvframe,
        rx_bssid = get_hdr_bssid(wlanhdr);
        pkt_info.bssid_match = ((!IsFrameTypeCtrl(wlanhdr)) &&
                                !pattrib->icv_err && !pattrib->crc_err &&
-                               !ether_addr_equal(rx_bssid, my_bssid));
+                               ether_addr_equal(rx_bssid, my_bssid));
 
        rx_ra = get_ra(wlanhdr);
        my_hwaddr = myid(&padapter->eeprompriv);
        pkt_info.to_self = pkt_info.bssid_match &&
-               !ether_addr_equal(rx_ra, my_hwaddr);
+               ether_addr_equal(rx_ra, my_hwaddr);
 
 
        pkt_info.is_beacon = pkt_info.bssid_match &&
index af2234798fa8e18f4fd2dcff394e6a9d9343b2e8..db553f2e4c0b835386330ec9773c219085b82c2f 100644 (file)
@@ -1277,7 +1277,7 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy,
 
                sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
                sinfo->tx_packets = psta->sta_stats.tx_pkts;
-
+               sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
        }
 
        /* for Ad-Hoc/AP mode */
index 28bfdbdc6e76aa1bcc4aecbb1b07c7a8f3483f32..b8631baf128d62abef43fa9336f69645bee37e96 100644 (file)
@@ -2289,7 +2289,7 @@ static int rtw_wx_read32(struct net_device *dev,
 exit:
        kfree(ptmp);
 
-       return 0;
+       return ret;
 }
 
 static int rtw_wx_write32(struct net_device *dev,
index ea789376de0f8cfa7442edb2ddcfb6cde5bb2ef0..45de21c210c1c0f361a18e1da515e8bd979b9cbb 100644 (file)
@@ -1795,6 +1795,7 @@ vchiq_compat_ioctl_await_completion(struct file *file,
        struct vchiq_await_completion32 args32;
        struct vchiq_completion_data32 completion32;
        unsigned int *msgbufcount32;
+       unsigned int msgbufcount_native;
        compat_uptr_t msgbuf32;
        void *msgbuf;
        void **msgbufptr;
@@ -1906,7 +1907,11 @@ vchiq_compat_ioctl_await_completion(struct file *file,
                         sizeof(completion32)))
                return -EFAULT;
 
-       args32.msgbufcount--;
+       if (get_user(msgbufcount_native, &args->msgbufcount))
+               return -EFAULT;
+
+       if (!msgbufcount_native)
+               args32.msgbufcount--;
 
        msgbufcount32 =
                &((struct vchiq_await_completion32 __user *)arg)->msgbufcount;
index 1227872227dc446b70b492600f52214c256098ad..36b742932c724c8e6f80ca29cb36d0d20208a9a7 100644 (file)
@@ -1245,8 +1245,7 @@ static int iscsit_do_rx_data(
                return -1;
 
        memset(&msg, 0, sizeof(struct msghdr));
-       iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC,
-                     count->iov, count->iov_count, data);
+       iov_iter_kvec(&msg.msg_iter, READ, count->iov, count->iov_count, data);
 
        while (msg_data_left(&msg)) {
                rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
@@ -1302,8 +1301,7 @@ int tx_data(
 
        memset(&msg, 0, sizeof(struct msghdr));
 
-       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC,
-                     iov, iov_count, data);
+       iov_iter_kvec(&msg.msg_iter, WRITE, iov, iov_count, data);
 
        while (msg_data_left(&msg)) {
                int tx_loop = sock_sendmsg(conn->sock, &msg);
index e46ca968009c06a2958e347104168cca32c37278..4f134b0c3e29e012cc05191545cf5ac6733f9282 100644 (file)
@@ -268,7 +268,7 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
        }
        transport_kunmap_data_sg(cmd);
 
-       target_complete_cmd(cmd, GOOD);
+       target_complete_cmd_with_length(cmd, GOOD, rd_len + 4);
        return 0;
 }
 
index 16751ae55d7b6f64b2bc9551f588f8b0e5d06cb8..49b110d1b972b671b17f0e1e1dab588628155bc8 100644 (file)
@@ -303,7 +303,7 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                len += sg->length;
        }
 
-       iov_iter_bvec(&iter, ITER_BVEC | is_write, bvec, sgl_nents, len);
+       iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
 
        aio_cmd->cmd = cmd;
        aio_cmd->len = len;
@@ -353,7 +353,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
                len += sg->length;
        }
 
-       iov_iter_bvec(&iter, ITER_BVEC, bvec, sgl_nents, len);
+       iov_iter_bvec(&iter, READ, bvec, sgl_nents, len);
        if (is_write)
                ret = vfs_iter_write(fd, &iter, &pos, 0);
        else
@@ -490,7 +490,7 @@ fd_execute_write_same(struct se_cmd *cmd)
                len += se_dev->dev_attrib.block_size;
        }
 
-       iov_iter_bvec(&iter, ITER_BVEC, bvec, nolb, len);
+       iov_iter_bvec(&iter, READ, bvec, nolb, len);
        ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
 
        kfree(bvec);
index 4cf33e2cc7058843fd547ffc2da8a02a262dd5e8..2cfd61d62e9730503414817d52c053c4083ba9e7 100644 (file)
@@ -205,19 +205,19 @@ void transport_subsystem_check_init(void)
        if (sub_api_initialized)
                return;
 
-       ret = request_module("target_core_iblock");
+       ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
        if (ret != 0)
                pr_err("Unable to load target_core_iblock\n");
 
-       ret = request_module("target_core_file");
+       ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
        if (ret != 0)
                pr_err("Unable to load target_core_file\n");
 
-       ret = request_module("target_core_pscsi");
+       ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
        if (ret != 0)
                pr_err("Unable to load target_core_pscsi\n");
 
-       ret = request_module("target_core_user");
+       ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
        if (ret != 0)
                pr_err("Unable to load target_core_user\n");
 
@@ -1778,7 +1778,7 @@ EXPORT_SYMBOL(target_submit_tmr);
 void transport_generic_request_failure(struct se_cmd *cmd,
                sense_reason_t sense_reason)
 {
-       int ret = 0;
+       int ret = 0, post_ret;
 
        pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
                 sense_reason);
@@ -1790,7 +1790,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
        transport_complete_task_attr(cmd);
 
        if (cmd->transport_complete_callback)
-               cmd->transport_complete_callback(cmd, false, NULL);
+               cmd->transport_complete_callback(cmd, false, &post_ret);
 
        if (transport_check_aborted_status(cmd, 1))
                return;
index 92f67d40f2e96a876083a71b6287e2bf159243c6..d7105d01859ab086de87482535019408ae15c818 100644 (file)
@@ -357,7 +357,7 @@ static int armada_get_temp_legacy(struct thermal_zone_device *thermal,
        int ret;
 
        /* Valid check */
-       if (armada_is_valid(priv)) {
+       if (!armada_is_valid(priv)) {
                dev_err(priv->dev,
                        "Temperature sensor reading not valid\n");
                return -EIO;
@@ -395,7 +395,7 @@ unlock_mutex:
        return ret;
 }
 
-static struct thermal_zone_of_device_ops of_ops = {
+static const struct thermal_zone_of_device_ops of_ops = {
        .get_temp = armada_get_temp,
 };
 
@@ -526,23 +526,21 @@ static int armada_thermal_probe_legacy(struct platform_device *pdev,
 
        /* First memory region points towards the status register */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res)
-               return -EIO;
-
-       /*
-        * Edit the resource start address and length to map over all the
-        * registers, instead of pointing at them one by one.
-        */
-       res->start -= data->syscon_status_off;
-       res->end = res->start + max(data->syscon_status_off,
-                                   max(data->syscon_control0_off,
-                                       data->syscon_control1_off)) +
-                  sizeof(unsigned int) - 1;
-
        base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
 
+       /*
+        * Fix up from the old individual DT register specification to
+        * cover all the registers.  We do this by adjusting the ioremap()
+        * result, which should be fine as ioremap() deals with pages.
+        * However, validate that we do not cross a page boundary while
+        * making this adjustment.
+        */
+       if (((unsigned long)base & ~PAGE_MASK) < data->syscon_status_off)
+               return -EINVAL;
+       base -= data->syscon_status_off;
+
        priv->syscon = devm_regmap_init_mmio(&pdev->dev, base,
                                             &armada_thermal_regmap_config);
        if (IS_ERR(priv->syscon))
index 23ad4f9f21438e45a819da46962025eeeb922590..b9d90f0ed504dc20357da58104eaf63526f73e06 100644 (file)
@@ -1,17 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
 /*
  * Driver for Broadcom BCM2835 SoC temperature sensor
  *
  * Copyright (C) 2016 Martin Sperl
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #include <linux/clk.h>
index 1919f91fa756597ff63f6e1518d92363f402e769..e8b1570cc3888a3eff4e066c324a3d3c4d10b95c 100644 (file)
@@ -299,7 +299,7 @@ static int brcmstb_set_trips(void *data, int low, int high)
        return 0;
 }
 
-static struct thermal_zone_of_device_ops of_ops = {
+static const struct thermal_zone_of_device_ops of_ops = {
        .get_temp       = brcmstb_get_temp,
        .set_trips      = brcmstb_set_trips,
 };
index 52ff854f0d6c10898d060b78a0b8c13212f8c192..cd96994dc0947137672d4b82a3d017e1cd1c8520 100644 (file)
@@ -863,6 +863,30 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR(key, 0600, key_show, key_store);
 
+static void nvm_authenticate_start(struct tb_switch *sw)
+{
+       struct pci_dev *root_port;
+
+       /*
+        * During host router NVM upgrade we should not allow root port to
+        * go into D3cold because some root ports cannot trigger PME
+        * itself. To be on the safe side keep the root port in D0 during
+        * the whole upgrade process.
+        */
+       root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+       if (root_port)
+               pm_runtime_get_noresume(&root_port->dev);
+}
+
+static void nvm_authenticate_complete(struct tb_switch *sw)
+{
+       struct pci_dev *root_port;
+
+       root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev);
+       if (root_port)
+               pm_runtime_put(&root_port->dev);
+}
+
 static ssize_t nvm_authenticate_show(struct device *dev,
        struct device_attribute *attr, char *buf)
 {
@@ -912,10 +936,18 @@ static ssize_t nvm_authenticate_store(struct device *dev,
 
                sw->nvm->authenticating = true;
 
-               if (!tb_route(sw))
+               if (!tb_route(sw)) {
+                       /*
+                        * Keep root port from suspending as long as the
+                        * NVM upgrade process is running.
+                        */
+                       nvm_authenticate_start(sw);
                        ret = nvm_authenticate_host(sw);
-               else
+                       if (ret)
+                               nvm_authenticate_complete(sw);
+               } else {
                        ret = nvm_authenticate_device(sw);
+               }
                pm_runtime_mark_last_busy(&sw->dev);
                pm_runtime_put_autosuspend(&sw->dev);
        }
@@ -1334,6 +1366,10 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
        if (ret <= 0)
                return ret;
 
+       /* Now we can allow root port to suspend again */
+       if (!tb_route(sw))
+               nvm_authenticate_complete(sw);
+
        if (status) {
                tb_sw_info(sw, "switch flash authentication failed\n");
                tb_switch_set_uuid(sw);
index dd5e1cede2b5847979965aa217c29028c36f89cd..c3f933d10295eba70bc8cff4a501333109c1abeb 100644 (file)
@@ -213,17 +213,17 @@ static int mtk8250_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, data);
 
-       pm_runtime_enable(&pdev->dev);
-       if (!pm_runtime_enabled(&pdev->dev)) {
-               err = mtk8250_runtime_resume(&pdev->dev);
-               if (err)
-                       return err;
-       }
+       err = mtk8250_runtime_resume(&pdev->dev);
+       if (err)
+               return err;
 
        data->line = serial8250_register_8250_port(&uart);
        if (data->line < 0)
                return data->line;
 
+       pm_runtime_set_active(&pdev->dev);
+       pm_runtime_enable(&pdev->dev);
+
        return 0;
 }
 
@@ -234,13 +234,11 @@ static int mtk8250_remove(struct platform_device *pdev)
        pm_runtime_get_sync(&pdev->dev);
 
        serial8250_unregister_port(data->line);
+       mtk8250_runtime_suspend(&pdev->dev);
 
        pm_runtime_disable(&pdev->dev);
        pm_runtime_put_noidle(&pdev->dev);
 
-       if (!pm_runtime_status_suspended(&pdev->dev))
-               mtk8250_runtime_suspend(&pdev->dev);
-
        return 0;
 }
 
index baeeeaec3f030f94d7c0176647dff851e6647232..6fb312e7af713ecd3efcc4c0ef069602635f7681 100644 (file)
@@ -233,7 +233,7 @@ static void kgdboc_put_char(u8 chr)
 static int param_set_kgdboc_var(const char *kmessage,
                                const struct kernel_param *kp)
 {
-       int len = strlen(kmessage);
+       size_t len = strlen(kmessage);
 
        if (len >= MAX_CONFIG_LEN) {
                pr_err("config string too long\n");
@@ -254,7 +254,7 @@ static int param_set_kgdboc_var(const char *kmessage,
 
        strcpy(config, kmessage);
        /* Chop out \n char as a result of echo */
-       if (config[len - 1] == '\n')
+       if (len && config[len - 1] == '\n')
                config[len - 1] = '\0';
 
        if (configured == 1)
index ff6ba6d86cd8bf9ba43349fde40ec2d576ffed05..cc56cb3b3ecaa222222587da30460a58b2685bd7 100644 (file)
@@ -1614,10 +1614,10 @@ static void sci_request_dma(struct uart_port *port)
                hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
                s->rx_timer.function = rx_timer_fn;
 
+               s->chan_rx_saved = s->chan_rx = chan;
+
                if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
                        sci_submit_rx(s);
-
-               s->chan_rx_saved = s->chan_rx = chan;
        }
 }
 
@@ -3102,6 +3102,7 @@ static struct uart_driver sci_uart_driver = {
 static int sci_remove(struct platform_device *dev)
 {
        struct sci_port *port = platform_get_drvdata(dev);
+       unsigned int type = port->port.type;    /* uart_remove_... clears it */
 
        sci_ports_in_use &= ~BIT(port->port.line);
        uart_remove_one_port(&sci_uart_driver, &port->port);
@@ -3112,8 +3113,7 @@ static int sci_remove(struct platform_device *dev)
                sysfs_remove_file(&dev->dev.kobj,
                                  &dev_attr_rx_fifo_trigger.attr);
        }
-       if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB ||
-           port->port.type == PORT_HSCIF) {
+       if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) {
                sysfs_remove_file(&dev->dev.kobj,
                                  &dev_attr_rx_fifo_timeout.attr);
        }
index 70a4ea4eaa6e72b1191c27d66111c4ddb11d46f5..990376576970ae3607a5b645407050a4dfab82af 100644 (file)
@@ -112,6 +112,7 @@ void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
                mode = of_get_property(dp, mode_prop, NULL);
                if (!mode)
                        mode = "9600,8,n,1,-";
+               of_node_put(dp);
        }
 
        cflag = CREAD | HUPCL | CLOCAL;
index 7576ceace57151a21007847f14dcf091005f09bb..f438eaa682463bffe42c27d923fe8426adef0926 100644 (file)
@@ -77,7 +77,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
                else
                        cbaud += 15;
        }
-       return baud_table[cbaud];
+       return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
 }
 EXPORT_SYMBOL(tty_termios_baud_rate);
 
@@ -113,7 +113,7 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
                else
                        cbaud += 15;
        }
-       return baud_table[cbaud];
+       return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
 #else  /* IBSHIFT */
        return tty_termios_baud_rate(termios);
 #endif /* IBSHIFT */
index ee80dfbd5442b034451e396cd66608e41e8e43bb..687250ec8032359f7ccdd0bfbd2b6e8b3bb4e1f5 100644 (file)
@@ -1373,7 +1373,13 @@ err_release_lock:
        return ERR_PTR(retval);
 }
 
-static void tty_free_termios(struct tty_struct *tty)
+/**
+ * tty_save_termios() - save tty termios data in driver table
+ * @tty: tty whose termios data to save
+ *
+ * Locking: Caller guarantees serialisation with tty_init_termios().
+ */
+void tty_save_termios(struct tty_struct *tty)
 {
        struct ktermios *tp;
        int idx = tty->index;
@@ -1392,6 +1398,7 @@ static void tty_free_termios(struct tty_struct *tty)
        }
        *tp = tty->termios;
 }
+EXPORT_SYMBOL_GPL(tty_save_termios);
 
 /**
  *     tty_flush_works         -       flush all works of a tty/pty pair
@@ -1491,7 +1498,7 @@ static void release_tty(struct tty_struct *tty, int idx)
        WARN_ON(!mutex_is_locked(&tty_mutex));
        if (tty->ops->shutdown)
                tty->ops->shutdown(tty);
-       tty_free_termios(tty);
+       tty_save_termios(tty);
        tty_driver_remove_tty(tty->driver, tty);
        tty->port->itty = NULL;
        if (tty->link)
index cb6075096a5b41b6fbf5e87b6b22239c8a5082e3..044c3cbdcfa40664497d13bd00e607584eff99c7 100644 (file)
@@ -633,7 +633,8 @@ void tty_port_close(struct tty_port *port, struct tty_struct *tty,
        if (tty_port_close_start(port, tty, filp) == 0)
                return;
        tty_port_shutdown(port, tty);
-       set_bit(TTY_IO_ERROR, &tty->flags);
+       if (!port->console)
+               set_bit(TTY_IO_ERROR, &tty->flags);
        tty_port_close_end(port, tty);
        tty_port_tty_set(port, NULL);
 }
index 55370e651db31424db2330b80bd6bd4c4f9d02b8..41ec8e5010f30a544b82ca439cc5a481fe499b19 100644 (file)
@@ -1548,7 +1548,7 @@ static void csi_K(struct vc_data *vc, int vpar)
        scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count);
        vc->vc_need_wrap = 0;
        if (con_should_update(vc))
-               do_update_region(vc, (unsigned long) start, count);
+               do_update_region(vc, (unsigned long)(start + offset), count);
 }
 
 static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar positions */
index 85644669fbe7b013b59b24adff5fd466230a102b..0a357db4b31b37ddd3ceedd25ecde5f7572d7690 100644 (file)
@@ -961,6 +961,8 @@ int __uio_register_device(struct module *owner,
        if (ret)
                goto err_uio_dev_add_attributes;
 
+       info->uio_dev = idev;
+
        if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
                /*
                 * Note that we deliberately don't use devm_request_irq
@@ -972,11 +974,12 @@ int __uio_register_device(struct module *owner,
                 */
                ret = request_irq(info->irq, uio_interrupt,
                                  info->irq_flags, info->name, idev);
-               if (ret)
+               if (ret) {
+                       info->uio_dev = NULL;
                        goto err_request_irq;
+               }
        }
 
-       info->uio_dev = idev;
        return 0;
 
 err_request_irq:
index 47d75c20c211c6f70cad75fcae4ae6b75c458c09..1b68fed464cb96456fcd566f409608bfda34b4f1 100644 (file)
@@ -1696,6 +1696,9 @@ static const struct usb_device_id acm_ids[] = {
        { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
        .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
        },
+       { USB_DEVICE(0x0572, 0x1349), /* Hiro (Conexant) USB MODEM H50228 */
+       .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+       },
        { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
        .driver_info = QUIRK_CONTROL_LINE_STATE, },
        { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
index c6077d582d2966a4fbeffd092092c1bf58429db1..f76b2e0aba9d5f11d994cd06ebe217f283bada16 100644 (file)
@@ -2251,7 +2251,7 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
                /* descriptor may appear anywhere in config */
                err = __usb_get_extra_descriptor(udev->rawdescriptors[0],
                                le16_to_cpu(udev->config[0].desc.wTotalLength),
-                               USB_DT_OTG, (void **) &desc);
+                               USB_DT_OTG, (void **) &desc, sizeof(*desc));
                if (err || !(desc->bmAttributes & USB_OTG_HNP))
                        return 0;
 
@@ -2794,6 +2794,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
        int i, status;
        u16 portchange, portstatus;
        struct usb_port *port_dev = hub->ports[port1 - 1];
+       int reset_recovery_time;
 
        if (!hub_is_superspeed(hub->hdev)) {
                if (warm) {
@@ -2849,7 +2850,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
                                        USB_PORT_FEAT_C_BH_PORT_RESET);
                        usb_clear_port_feature(hub->hdev, port1,
                                        USB_PORT_FEAT_C_PORT_LINK_STATE);
-                       usb_clear_port_feature(hub->hdev, port1,
+
+                       if (udev)
+                               usb_clear_port_feature(hub->hdev, port1,
                                        USB_PORT_FEAT_C_CONNECTION);
 
                        /*
@@ -2885,11 +2888,18 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
 
 done:
        if (status == 0) {
-               /* TRSTRCY = 10 ms; plus some extra */
                if (port_dev->quirks & USB_PORT_QUIRK_FAST_ENUM)
                        usleep_range(10000, 12000);
-               else
-                       msleep(10 + 40);
+               else {
+                       /* TRSTRCY = 10 ms; plus some extra */
+                       reset_recovery_time = 10 + 40;
+
+                       /* Hub needs extra delay after resetting its port. */
+                       if (hub->hdev->quirks & USB_QUIRK_HUB_SLOW_RESET)
+                               reset_recovery_time += 100;
+
+                       msleep(reset_recovery_time);
+               }
 
                if (udev) {
                        struct usb_hcd *hcd = bus_to_hcd(udev->bus);
@@ -5153,7 +5163,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
 /* Handle notifying userspace about hub over-current events */
 static void port_over_current_notify(struct usb_port *port_dev)
 {
-       static char *envp[] = { NULL, NULL, NULL };
+       char *envp[3];
        struct device *hub_dev;
        char *port_dev_path;
 
@@ -5177,6 +5187,7 @@ static void port_over_current_notify(struct usb_port *port_dev)
        if (!envp[1])
                goto exit;
 
+       envp[2] = NULL;
        kobject_uevent_env(&hub_dev->kobj, KOBJ_CHANGE, envp);
 
        kfree(envp[1]);
index 178d6c6063c0280a06c6dc3dc6611394d8233479..514c5214ddb246be61bd9c30bcfe8a5123e41f34 100644 (file)
@@ -128,6 +128,9 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
                        case 'n':
                                flags |= USB_QUIRK_DELAY_CTRL_MSG;
                                break;
+                       case 'o':
+                               flags |= USB_QUIRK_HUB_SLOW_RESET;
+                               break;
                        /* Ignore unrecognized flag characters */
                        }
                }
@@ -206,6 +209,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Microsoft LifeCam-VX700 v2.0 */
        { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
+       { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
        { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
        { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -327,6 +333,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Midiman M-Audio Keystation 88es */
        { USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* SanDisk Ultra Fit and Ultra Flair */
+       { USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
+       { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
+
        /* M-Systems Flash Disk Pioneers */
        { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -380,6 +390,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
                        USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
 
+       /* Terminus Technology Inc. Hub */
+       { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET },
+
        /* Corsair K70 RGB */
        { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
 
@@ -391,6 +404,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
          USB_QUIRK_DELAY_CTRL_MSG },
 
+       /* Corsair K70 LUX RGB */
+       { USB_DEVICE(0x1b1c, 0x1b33), .driver_info = USB_QUIRK_DELAY_INIT },
+
        /* Corsair K70 LUX */
        { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
 
@@ -411,6 +427,11 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x2040, 0x7200), .driver_info =
                        USB_QUIRK_CONFIG_INTF_STRINGS },
 
+       /* Raydium Touchscreen */
+       { USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM },
+
+       { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
+
        /* DJI CineSSD */
        { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
 
index 79d8bd7a612e65b5c765f16e0c5567a440cdc1da..4ebfbd737905169d1b96904952576defbd01e8f1 100644 (file)
@@ -832,14 +832,14 @@ EXPORT_SYMBOL_GPL(usb_get_current_frame_number);
  */
 
 int __usb_get_extra_descriptor(char *buffer, unsigned size,
-                              unsigned char type, void **ptr)
+                              unsigned char type, void **ptr, size_t minsize)
 {
        struct usb_descriptor_header *header;
 
        while (size >= sizeof(struct usb_descriptor_header)) {
                header = (struct usb_descriptor_header *)buffer;
 
-               if (header->bLength < 2) {
+               if (header->bLength < 2 || header->bLength > size) {
                        printk(KERN_ERR
                                "%s: bogus descriptor, type %d length %d\n",
                                usbcore_name,
@@ -848,7 +848,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size,
                        return -1;
                }
 
-               if (header->bDescriptorType == type) {
+               if (header->bDescriptorType == type && header->bLength >= minsize) {
                        *ptr = header;
                        return 0;
                }
index d257c541e51ba4e51530e5d8c3d1048794f5f013..7afc10872f1f031370727a277180f664fe8e4c56 100644 (file)
@@ -120,6 +120,7 @@ static int dwc2_pci_probe(struct pci_dev *pci,
        dwc2 = platform_device_alloc("dwc2", PLATFORM_DEVID_AUTO);
        if (!dwc2) {
                dev_err(dev, "couldn't allocate dwc2 device\n");
+               ret = -ENOMEM;
                goto err;
        }
 
index becfbb87f791dad714b5ab23049ede45f9dd63e6..2f2048aa5fde13dde439e52236620f9bab603118 100644 (file)
@@ -1499,6 +1499,7 @@ static int dwc3_probe(struct platform_device *pdev)
 
 err5:
        dwc3_event_buffers_cleanup(dwc);
+       dwc3_ulpi_exit(dwc);
 
 err4:
        dwc3_free_scratch_buffers(dwc);
index 1286076a8890308a66d6ef494b85169ae92d7798..842795856bf49e5093459777b252b33d67e89a96 100644 (file)
@@ -283,8 +283,10 @@ err:
 static void dwc3_pci_remove(struct pci_dev *pci)
 {
        struct dwc3_pci         *dwc = pci_get_drvdata(pci);
+       struct pci_dev          *pdev = dwc->pci;
 
-       gpiod_remove_lookup_table(&platform_bytcr_gpios);
+       if (pdev->device == PCI_DEVICE_ID_INTEL_BYT)
+               gpiod_remove_lookup_table(&platform_bytcr_gpios);
 #ifdef CONFIG_PM
        cancel_work_sync(&dwc->wakeup_work);
 #endif
index 679c12e145225899c1ac5ff3f4f8489d8552a928..9f92ee03dde7048deaca902430ae60c06018c098 100644 (file)
@@ -1081,7 +1081,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
                        /* Now prepare one extra TRB to align transfer size */
                        trb = &dep->trb_pool[dep->trb_enqueue];
                        __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr,
-                                       maxp - rem, false, 0,
+                                       maxp - rem, false, 1,
                                        req->request.stream_id,
                                        req->request.short_not_ok,
                                        req->request.no_interrupt);
@@ -1125,7 +1125,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
                /* Now prepare one extra TRB to align transfer size */
                trb = &dep->trb_pool[dep->trb_enqueue];
                __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem,
-                               false, 0, req->request.stream_id,
+                               false, 1, req->request.stream_id,
                                req->request.short_not_ok,
                                req->request.no_interrupt);
        } else if (req->request.zero && req->request.length &&
@@ -1141,7 +1141,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
                /* Now prepare one extra TRB to handle ZLP */
                trb = &dep->trb_pool[dep->trb_enqueue];
                __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
-                               false, 0, req->request.stream_id,
+                               false, 1, req->request.stream_id,
                                req->request.short_not_ok,
                                req->request.no_interrupt);
        } else {
@@ -1470,9 +1470,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
                unsigned transfer_in_flight;
                unsigned started;
 
-               if (dep->flags & DWC3_EP_STALL)
-                       return 0;
-
                if (dep->number > 1)
                        trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue);
                else
@@ -1494,8 +1491,6 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
                else
                        dep->flags |= DWC3_EP_STALL;
        } else {
-               if (!(dep->flags & DWC3_EP_STALL))
-                       return 0;
 
                ret = dwc3_send_clear_stall_ep_cmd(dep);
                if (ret)
@@ -2259,7 +2254,7 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
         * with one TRB pending in the ring. We need to manually clear HWO bit
         * from that TRB.
         */
-       if ((req->zero || req->unaligned) && (trb->ctrl & DWC3_TRB_CTRL_HWO)) {
+       if ((req->zero || req->unaligned) && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) {
                trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
                return 1;
        }
index 3ada83d81bda8d2810ab04d154b65658f57e0ba2..31e8bf3578c891303a9194c6c23df987cf59f38f 100644 (file)
@@ -215,7 +215,6 @@ struct ffs_io_data {
 
        struct mm_struct *mm;
        struct work_struct work;
-       struct work_struct cancellation_work;
 
        struct usb_ep *ep;
        struct usb_request *req;
@@ -1073,31 +1072,22 @@ ffs_epfile_open(struct inode *inode, struct file *file)
        return 0;
 }
 
-static void ffs_aio_cancel_worker(struct work_struct *work)
-{
-       struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
-                                                  cancellation_work);
-
-       ENTER();
-
-       usb_ep_dequeue(io_data->ep, io_data->req);
-}
-
 static int ffs_aio_cancel(struct kiocb *kiocb)
 {
        struct ffs_io_data *io_data = kiocb->private;
-       struct ffs_data *ffs = io_data->ffs;
+       struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
        int value;
 
        ENTER();
 
-       if (likely(io_data && io_data->ep && io_data->req)) {
-               INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker);
-               queue_work(ffs->io_completion_wq, &io_data->cancellation_work);
-               value = -EINPROGRESS;
-       } else {
+       spin_lock_irq(&epfile->ffs->eps_lock);
+
+       if (likely(io_data && io_data->ep && io_data->req))
+               value = usb_ep_dequeue(io_data->ep, io_data->req);
+       else
                value = -EINVAL;
-       }
+
+       spin_unlock_irq(&epfile->ffs->eps_lock);
 
        return value;
 }
index 1000d864929c3569162e49d83058f29eb3310706..0f026d445e316aec85c3dbcdbcb79700e39401ed 100644 (file)
@@ -401,12 +401,12 @@ done:
 static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
 {
        struct usb_request      *req;
-       struct usb_request      *tmp;
        unsigned long           flags;
 
        /* fill unused rxq slots with some skb */
        spin_lock_irqsave(&dev->req_lock, flags);
-       list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
+       while (!list_empty(&dev->rx_reqs)) {
+               req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
                list_del_init(&req->list);
                spin_unlock_irqrestore(&dev->req_lock, flags);
 
@@ -1125,7 +1125,6 @@ void gether_disconnect(struct gether *link)
 {
        struct eth_dev          *dev = link->ioport;
        struct usb_request      *req;
-       struct usb_request      *tmp;
 
        WARN_ON(!dev);
        if (!dev)
@@ -1142,7 +1141,8 @@ void gether_disconnect(struct gether *link)
         */
        usb_ep_disable(link->in_ep);
        spin_lock(&dev->req_lock);
-       list_for_each_entry_safe(req, tmp, &dev->tx_reqs, list) {
+       while (!list_empty(&dev->tx_reqs)) {
+               req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
                list_del(&req->list);
 
                spin_unlock(&dev->req_lock);
@@ -1154,7 +1154,8 @@ void gether_disconnect(struct gether *link)
 
        usb_ep_disable(link->out_ep);
        spin_lock(&dev->req_lock);
-       list_for_each_entry_safe(req, tmp, &dev->rx_reqs, list) {
+       while (!list_empty(&dev->rx_reqs)) {
+               req = list_first_entry(&dev->rx_reqs, struct usb_request, list);
                list_del(&req->list);
 
                spin_unlock(&dev->req_lock);
index 3a16431da3211c470db38e5e2980996142073b65..fcf13ef33b312020d744cd99898294f02c1ba72a 100644 (file)
@@ -2033,6 +2033,7 @@ static inline int machine_without_vbus_sense(void)
 {
        return machine_is_omap_innovator()
                || machine_is_omap_osk()
+               || machine_is_omap_palmte()
                || machine_is_sx1()
                /* No known omap7xx boards with vbus sense */
                || cpu_is_omap7xx();
@@ -2041,7 +2042,7 @@ static inline int machine_without_vbus_sense(void)
 static int omap_udc_start(struct usb_gadget *g,
                struct usb_gadget_driver *driver)
 {
-       int             status = -ENODEV;
+       int             status;
        struct omap_ep  *ep;
        unsigned long   flags;
 
@@ -2079,6 +2080,7 @@ static int omap_udc_start(struct usb_gadget *g,
                        goto done;
                }
        } else {
+               status = 0;
                if (can_pullup(udc))
                        pullup_enable(udc);
                else
@@ -2593,9 +2595,22 @@ omap_ep_setup(char *name, u8 addr, u8 type,
 
 static void omap_udc_release(struct device *dev)
 {
-       complete(udc->done);
+       pullup_disable(udc);
+       if (!IS_ERR_OR_NULL(udc->transceiver)) {
+               usb_put_phy(udc->transceiver);
+               udc->transceiver = NULL;
+       }
+       omap_writew(0, UDC_SYSCON1);
+       remove_proc_file();
+       if (udc->dc_clk) {
+               if (udc->clk_requested)
+                       omap_udc_enable_clock(0);
+               clk_put(udc->hhc_clk);
+               clk_put(udc->dc_clk);
+       }
+       if (udc->done)
+               complete(udc->done);
        kfree(udc);
-       udc = NULL;
 }
 
 static int
@@ -2627,6 +2642,7 @@ omap_udc_setup(struct platform_device *odev, struct usb_phy *xceiv)
        udc->gadget.speed = USB_SPEED_UNKNOWN;
        udc->gadget.max_speed = USB_SPEED_FULL;
        udc->gadget.name = driver_name;
+       udc->gadget.quirk_ep_out_aligned_size = 1;
        udc->transceiver = xceiv;
 
        /* ep0 is special; put it right after the SETUP buffer */
@@ -2867,8 +2883,8 @@ bad_on_1710:
                udc->clr_halt = UDC_RESET_EP;
 
        /* USB general purpose IRQ:  ep0, state changes, dma, etc */
-       status = request_irq(pdev->resource[1].start, omap_udc_irq,
-                       0, driver_name, udc);
+       status = devm_request_irq(&pdev->dev, pdev->resource[1].start,
+                                 omap_udc_irq, 0, driver_name, udc);
        if (status != 0) {
                ERR("can't get irq %d, err %d\n",
                        (int) pdev->resource[1].start, status);
@@ -2876,20 +2892,20 @@ bad_on_1710:
        }
 
        /* USB "non-iso" IRQ (PIO for all but ep0) */
-       status = request_irq(pdev->resource[2].start, omap_udc_pio_irq,
-                       0, "omap_udc pio", udc);
+       status = devm_request_irq(&pdev->dev, pdev->resource[2].start,
+                                 omap_udc_pio_irq, 0, "omap_udc pio", udc);
        if (status != 0) {
                ERR("can't get irq %d, err %d\n",
                        (int) pdev->resource[2].start, status);
-               goto cleanup2;
+               goto cleanup1;
        }
 #ifdef USE_ISO
-       status = request_irq(pdev->resource[3].start, omap_udc_iso_irq,
-                       0, "omap_udc iso", udc);
+       status = devm_request_irq(&pdev->dev, pdev->resource[3].start,
+                                 omap_udc_iso_irq, 0, "omap_udc iso", udc);
        if (status != 0) {
                ERR("can't get irq %d, err %d\n",
                        (int) pdev->resource[3].start, status);
-               goto cleanup3;
+               goto cleanup1;
        }
 #endif
        if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
@@ -2900,23 +2916,8 @@ bad_on_1710:
        }
 
        create_proc_file();
-       status = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
-                       omap_udc_release);
-       if (status)
-               goto cleanup4;
-
-       return 0;
-
-cleanup4:
-       remove_proc_file();
-
-#ifdef USE_ISO
-cleanup3:
-       free_irq(pdev->resource[2].start, udc);
-#endif
-
-cleanup2:
-       free_irq(pdev->resource[1].start, udc);
+       return usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
+                                         omap_udc_release);
 
 cleanup1:
        kfree(udc);
@@ -2943,42 +2944,15 @@ static int omap_udc_remove(struct platform_device *pdev)
 {
        DECLARE_COMPLETION_ONSTACK(done);
 
-       if (!udc)
-               return -ENODEV;
-
-       usb_del_gadget_udc(&udc->gadget);
-       if (udc->driver)
-               return -EBUSY;
-
        udc->done = &done;
 
-       pullup_disable(udc);
-       if (!IS_ERR_OR_NULL(udc->transceiver)) {
-               usb_put_phy(udc->transceiver);
-               udc->transceiver = NULL;
-       }
-       omap_writew(0, UDC_SYSCON1);
-
-       remove_proc_file();
-
-#ifdef USE_ISO
-       free_irq(pdev->resource[3].start, udc);
-#endif
-       free_irq(pdev->resource[2].start, udc);
-       free_irq(pdev->resource[1].start, udc);
+       usb_del_gadget_udc(&udc->gadget);
 
-       if (udc->dc_clk) {
-               if (udc->clk_requested)
-                       omap_udc_enable_clock(0);
-               clk_put(udc->hhc_clk);
-               clk_put(udc->dc_clk);
-       }
+       wait_for_completion(&done);
 
        release_mem_region(pdev->resource[0].start,
                        pdev->resource[0].end - pdev->resource[0].start + 1);
 
-       wait_for_completion(&done);
-
        return 0;
 }
 
index 684d6f074c3a490a291109ff9603ec4b44f84e58..09a8ebd955888d6d375d7cee3d0fc53fd1cf0d9c 100644 (file)
@@ -640,7 +640,7 @@ static int hwahc_security_create(struct hwahc *hwahc)
        top = itr + itr_size;
        result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index],
                        le16_to_cpu(usb_dev->actconfig->desc.wTotalLength),
-                       USB_DT_SECURITY, (void **) &secd);
+                       USB_DT_SECURITY, (void **) &secd, sizeof(*secd));
        if (result == -1) {
                dev_warn(dev, "BUG? WUSB host has no security descriptors\n");
                return 0;
index 27f00160332e2186327da464d7be327d82bc3170..3c4abb5a1c3fc6bdb86e749ab62cf256381c1f75 100644 (file)
@@ -325,14 +325,16 @@ static int xhci_histb_remove(struct platform_device *dev)
        struct xhci_hcd_histb *histb = platform_get_drvdata(dev);
        struct usb_hcd *hcd = histb->hcd;
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       struct usb_hcd *shared_hcd = xhci->shared_hcd;
 
        xhci->xhc_state |= XHCI_STATE_REMOVING;
 
-       usb_remove_hcd(xhci->shared_hcd);
+       usb_remove_hcd(shared_hcd);
+       xhci->shared_hcd = NULL;
        device_wakeup_disable(&dev->dev);
 
        usb_remove_hcd(hcd);
-       usb_put_hcd(xhci->shared_hcd);
+       usb_put_hcd(shared_hcd);
 
        xhci_histb_host_disable(histb);
        usb_put_hcd(hcd);
index 12eea73d9f20b00701e22da8debdcaa1d9283c92..94aca1b5ac8a228b6ecb690f84441f37c976fbdb 100644 (file)
@@ -876,7 +876,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
                        status |= USB_PORT_STAT_SUSPEND;
        }
        if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME &&
-               !DEV_SUPERSPEED_ANY(raw_port_status)) {
+               !DEV_SUPERSPEED_ANY(raw_port_status) && hcd->speed < HCD_USB3) {
                if ((raw_port_status & PORT_RESET) ||
                                !(raw_port_status & PORT_PE))
                        return 0xffffffff;
@@ -921,7 +921,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
                        time_left = wait_for_completion_timeout(
                                        &bus_state->rexit_done[wIndex],
                                        msecs_to_jiffies(
-                                               XHCI_MAX_REXIT_TIMEOUT));
+                                               XHCI_MAX_REXIT_TIMEOUT_MS));
                        spin_lock_irqsave(&xhci->lock, flags);
 
                        if (time_left) {
@@ -935,7 +935,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
                        } else {
                                int port_status = readl(port->addr);
                                xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
-                                               XHCI_MAX_REXIT_TIMEOUT,
+                                               XHCI_MAX_REXIT_TIMEOUT_MS,
                                                port_status);
                                status |= USB_PORT_STAT_SUSPEND;
                                clear_bit(wIndex, &bus_state->rexit_ports);
@@ -1474,15 +1474,18 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
        unsigned long flags;
        struct xhci_hub *rhub;
        struct xhci_port **ports;
+       u32 portsc_buf[USB_MAXCHILDREN];
+       bool wake_enabled;
 
        rhub = xhci_get_rhub(hcd);
        ports = rhub->ports;
        max_ports = rhub->num_ports;
        bus_state = &xhci->bus_state[hcd_index(hcd)];
+       wake_enabled = hcd->self.root_hub->do_remote_wakeup;
 
        spin_lock_irqsave(&xhci->lock, flags);
 
-       if (hcd->self.root_hub->do_remote_wakeup) {
+       if (wake_enabled) {
                if (bus_state->resuming_ports ||        /* USB2 */
                    bus_state->port_remote_wakeup) {    /* USB3 */
                        spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1490,26 +1493,36 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                        return -EBUSY;
                }
        }
-
-       port_index = max_ports;
+       /*
+        * Prepare ports for suspend, but don't write anything before all ports
+        * are checked and we know bus suspend can proceed
+        */
        bus_state->bus_suspended = 0;
+       port_index = max_ports;
        while (port_index--) {
-               /* suspend the port if the port is not suspended */
                u32 t1, t2;
-               int slot_id;
 
                t1 = readl(ports[port_index]->addr);
                t2 = xhci_port_state_to_neutral(t1);
+               portsc_buf[port_index] = 0;
 
-               if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
-                       xhci_dbg(xhci, "port %d not suspended\n", port_index);
-                       slot_id = xhci_find_slot_id_by_port(hcd, xhci,
-                                       port_index + 1);
-                       if (slot_id) {
+               /* Bail out if a USB3 port has a new device in link training */
+               if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) {
+                       bus_state->bus_suspended = 0;
+                       spin_unlock_irqrestore(&xhci->lock, flags);
+                       xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
+                       return -EBUSY;
+               }
+
+               /* suspend ports in U0, or bail out for new connect changes */
+               if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
+                       if ((t1 & PORT_CSC) && wake_enabled) {
+                               bus_state->bus_suspended = 0;
                                spin_unlock_irqrestore(&xhci->lock, flags);
-                               xhci_stop_device(xhci, slot_id, 1);
-                               spin_lock_irqsave(&xhci->lock, flags);
+                               xhci_dbg(xhci, "Bus suspend bailout, port connect change\n");
+                               return -EBUSY;
                        }
+                       xhci_dbg(xhci, "port %d not suspended\n", port_index);
                        t2 &= ~PORT_PLS_MASK;
                        t2 |= PORT_LINK_STROBE | XDEV_U3;
                        set_bit(port_index, &bus_state->bus_suspended);
@@ -1518,7 +1531,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                 * including the USB 3.0 roothub, but only if CONFIG_PM
                 * is enabled, so also enable remote wake here.
                 */
-               if (hcd->self.root_hub->do_remote_wakeup) {
+               if (wake_enabled) {
                        if (t1 & PORT_CONNECT) {
                                t2 |= PORT_WKOC_E | PORT_WKDISC_E;
                                t2 &= ~PORT_WKCONN_E;
@@ -1538,7 +1551,26 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
 
                t1 = xhci_port_state_to_neutral(t1);
                if (t1 != t2)
-                       writel(t2, ports[port_index]->addr);
+                       portsc_buf[port_index] = t2;
+       }
+
+       /* write port settings, stopping and suspending ports if needed */
+       port_index = max_ports;
+       while (port_index--) {
+               if (!portsc_buf[port_index])
+                       continue;
+               if (test_bit(port_index, &bus_state->bus_suspended)) {
+                       int slot_id;
+
+                       slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+                                                           port_index + 1);
+                       if (slot_id) {
+                               spin_unlock_irqrestore(&xhci->lock, flags);
+                               xhci_stop_device(xhci, slot_id, 1);
+                               spin_lock_irqsave(&xhci->lock, flags);
+                       }
+               }
+               writel(portsc_buf[port_index], ports[port_index]->addr);
        }
        hcd->state = HC_STATE_SUSPENDED;
        bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
index 71d0d33c3286254b0327646720161df6ba5cc1e9..60987c787e44f457d918659140a0671ed7a717e8 100644 (file)
@@ -590,12 +590,14 @@ static int xhci_mtk_remove(struct platform_device *dev)
        struct xhci_hcd_mtk *mtk = platform_get_drvdata(dev);
        struct usb_hcd  *hcd = mtk->hcd;
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       struct usb_hcd  *shared_hcd = xhci->shared_hcd;
 
-       usb_remove_hcd(xhci->shared_hcd);
+       usb_remove_hcd(shared_hcd);
+       xhci->shared_hcd = NULL;
        device_init_wakeup(&dev->dev, false);
 
        usb_remove_hcd(hcd);
-       usb_put_hcd(xhci->shared_hcd);
+       usb_put_hcd(shared_hcd);
        usb_put_hcd(hcd);
        xhci_mtk_sch_exit(mtk);
        xhci_mtk_clks_disable(mtk);
index 01c57055c0c5bf6b85b8fd45ce2d09c1a0ad3d15..a9ec7051f286414cff61cfe492b6de17a4f74462 100644 (file)
@@ -139,6 +139,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                 pdev->device == 0x43bb))
                xhci->quirks |= XHCI_SUSPEND_DELAY;
 
+       if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+           (pdev->device == 0x15e0 || pdev->device == 0x15e1))
+               xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
+
        if (pdev->vendor == PCI_VENDOR_ID_AMD)
                xhci->quirks |= XHCI_TRUST_TX_LENGTH;
 
@@ -248,6 +252,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241)
                xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7;
 
+       if ((pdev->vendor == PCI_VENDOR_ID_BROADCOM ||
+            pdev->vendor == PCI_VENDOR_ID_CAVIUM) &&
+            pdev->device == 0x9026)
+               xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT;
+
        if (xhci->quirks & XHCI_RESET_ON_RESUME)
                xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
                                "QUIRK: Resetting on resume");
@@ -380,6 +389,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
        if (xhci->shared_hcd) {
                usb_remove_hcd(xhci->shared_hcd);
                usb_put_hcd(xhci->shared_hcd);
+               xhci->shared_hcd = NULL;
        }
 
        /* Workaround for spurious wakeups at shutdown with HSW */
index 32b5574ad5c56403eb876a70b492f88182096903..ef09cb06212fd367da3593092a31dd5c32d95f58 100644 (file)
@@ -362,14 +362,16 @@ static int xhci_plat_remove(struct platform_device *dev)
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        struct clk *clk = xhci->clk;
        struct clk *reg_clk = xhci->reg_clk;
+       struct usb_hcd *shared_hcd = xhci->shared_hcd;
 
        xhci->xhc_state |= XHCI_STATE_REMOVING;
 
-       usb_remove_hcd(xhci->shared_hcd);
+       usb_remove_hcd(shared_hcd);
+       xhci->shared_hcd = NULL;
        usb_phy_shutdown(hcd->usb_phy);
 
        usb_remove_hcd(hcd);
-       usb_put_hcd(xhci->shared_hcd);
+       usb_put_hcd(shared_hcd);
 
        clk_disable_unprepare(clk);
        clk_disable_unprepare(reg_clk);
index a8d92c90fb58755ad4359d94d4e52f50b949b2a0..65750582133f6a20eae8949e8a06c2c92c913881 100644 (file)
@@ -1521,6 +1521,35 @@ static void handle_device_notification(struct xhci_hcd *xhci,
                usb_wakeup_notification(udev->parent, udev->portnum);
 }
 
+/*
+ * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI
+ * Controller.
+ * As per ThunderX2errata-129 USB 2 device may come up as USB 1
+ * If a connection to a USB 1 device is followed by another connection
+ * to a USB 2 device.
+ *
+ * Reset the PHY after the USB device is disconnected if device speed
+ * is less than HCD_USB3.
+ * Retry the reset sequence max of 4 times checking the PLL lock status.
+ *
+ */
+static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
+{
+       struct usb_hcd *hcd = xhci_to_hcd(xhci);
+       u32 pll_lock_check;
+       u32 retry_count = 4;
+
+       do {
+               /* Assert PHY reset */
+               writel(0x6F, hcd->regs + 0x1048);
+               udelay(10);
+               /* De-assert the PHY reset */
+               writel(0x7F, hcd->regs + 0x1048);
+               udelay(200);
+               pll_lock_check = readl(hcd->regs + 0x1070);
+       } while (!(pll_lock_check & 0x1) && --retry_count);
+}
+
 static void handle_port_status(struct xhci_hcd *xhci,
                union xhci_trb *event)
 {
@@ -1556,6 +1585,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
                goto cleanup;
        }
 
+       /* We might get interrupts after shared_hcd is removed */
+       if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
+               xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
+               bogus_port_status = true;
+               goto cleanup;
+       }
+
        hcd = port->rhub->hcd;
        bus_state = &xhci->bus_state[hcd_index(hcd)];
        hcd_portnum = port->hcd_portnum;
@@ -1639,7 +1675,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
         * RExit to a disconnect state).  If so, let the the driver know it's
         * out of the RExit state.
         */
-       if (!DEV_SUPERSPEED_ANY(portsc) &&
+       if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 &&
                        test_and_clear_bit(hcd_portnum,
                                &bus_state->rexit_ports)) {
                complete(&bus_state->rexit_done[hcd_portnum]);
@@ -1647,8 +1683,12 @@ static void handle_port_status(struct xhci_hcd *xhci,
                goto cleanup;
        }
 
-       if (hcd->speed < HCD_USB3)
+       if (hcd->speed < HCD_USB3) {
                xhci_test_and_clear_bit(xhci, port, PORT_PLC);
+               if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) &&
+                   (portsc & PORT_CSC) && !(portsc & PORT_CONNECT))
+                       xhci_cavium_reset_phy_quirk(xhci);
+       }
 
 cleanup:
        /* Update event ring dequeue pointer before dropping the lock */
@@ -2266,6 +2306,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                        goto cleanup;
                case COMP_RING_UNDERRUN:
                case COMP_RING_OVERRUN:
+               case COMP_STOPPED_LENGTH_INVALID:
                        goto cleanup;
                default:
                        xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n",
index 6b5db344de3011df7154f698333e97c5f62f8f69..938ff06c034959f445dc8ad764d2e8a97887c15b 100644 (file)
@@ -1303,6 +1303,7 @@ static int tegra_xusb_remove(struct platform_device *pdev)
 
        usb_remove_hcd(xhci->shared_hcd);
        usb_put_hcd(xhci->shared_hcd);
+       xhci->shared_hcd = NULL;
        usb_remove_hcd(tegra->hcd);
        usb_put_hcd(tegra->hcd);
 
index 0420eefa647a15cb5321dfa5fd95556a5a5432e5..dae3be1b9c8f01078a0f6ab259b6f4640ed5534c 100644 (file)
@@ -719,8 +719,6 @@ static void xhci_stop(struct usb_hcd *hcd)
 
        /* Only halt host and free memory after both hcds are removed */
        if (!usb_hcd_is_primary_hcd(hcd)) {
-               /* usb core will free this hcd shortly, unset pointer */
-               xhci->shared_hcd = NULL;
                mutex_unlock(&xhci->mutex);
                return;
        }
@@ -970,6 +968,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
        unsigned int            delay = XHCI_MAX_HALT_USEC;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        u32                     command;
+       u32                     res;
 
        if (!hcd->state)
                return 0;
@@ -1023,11 +1022,28 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
        command = readl(&xhci->op_regs->command);
        command |= CMD_CSS;
        writel(command, &xhci->op_regs->command);
+       xhci->broken_suspend = 0;
        if (xhci_handshake(&xhci->op_regs->status,
                                STS_SAVE, 0, 10 * 1000)) {
-               xhci_warn(xhci, "WARN: xHC save state timeout\n");
-               spin_unlock_irq(&xhci->lock);
-               return -ETIMEDOUT;
+       /*
+        * AMD SNPS xHC 3.0 occasionally does not clear the
+        * SSS bit of USBSTS and when driver tries to poll
+        * to see if the xHC clears BIT(8) which never happens
+        * and driver assumes that controller is not responding
+        * and times out. To workaround this, its good to check
+        * if SRE and HCE bits are not set (as per xhci
+        * Section 5.4.2) and bypass the timeout.
+        */
+               res = readl(&xhci->op_regs->status);
+               if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) &&
+                   (((res & STS_SRE) == 0) &&
+                               ((res & STS_HCE) == 0))) {
+                       xhci->broken_suspend = 1;
+               } else {
+                       xhci_warn(xhci, "WARN: xHC save state timeout\n");
+                       spin_unlock_irq(&xhci->lock);
+                       return -ETIMEDOUT;
+               }
        }
        spin_unlock_irq(&xhci->lock);
 
@@ -1080,7 +1096,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
        set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
 
        spin_lock_irq(&xhci->lock);
-       if (xhci->quirks & XHCI_RESET_ON_RESUME)
+       if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend)
                hibernated = true;
 
        if (!hibernated) {
@@ -4498,6 +4514,14 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
 {
        unsigned long long timeout_ns;
 
+       /* Prevent U1 if service interval is shorter than U1 exit latency */
+       if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
+               if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
+                       dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
+                       return USB3_LPM_DISABLED;
+               }
+       }
+
        if (xhci->quirks & XHCI_INTEL_HOST)
                timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
        else
@@ -4554,6 +4578,14 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
 {
        unsigned long long timeout_ns;
 
+       /* Prevent U2 if service interval is shorter than U2 exit latency */
+       if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
+               if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
+                       dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
+                       return USB3_LPM_DISABLED;
+               }
+       }
+
        if (xhci->quirks & XHCI_INTEL_HOST)
                timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
        else
index bf0b3692dc9a118d30664963e88d6757c7a5f31e..c3515bad5dbbad26efcc71f89fdd64cc28911329 100644 (file)
@@ -1680,7 +1680,7 @@ struct xhci_bus_state {
  * It can take up to 20 ms to transition from RExit to U0 on the
  * Intel Lynx Point LP xHCI host.
  */
-#define        XHCI_MAX_REXIT_TIMEOUT  (20 * 1000)
+#define        XHCI_MAX_REXIT_TIMEOUT_MS       20
 
 static inline unsigned int hcd_index(struct usb_hcd *hcd)
 {
@@ -1849,6 +1849,8 @@ struct xhci_hcd {
 #define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
 #define XHCI_ZERO_64B_REGS     BIT_ULL(32)
 #define XHCI_DEFAULT_PM_RUNTIME_ALLOW  BIT_ULL(33)
+#define XHCI_RESET_PLL_ON_DISCONNECT   BIT_ULL(34)
+#define XHCI_SNPS_BROKEN_SUSPEND    BIT_ULL(35)
 
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
@@ -1878,6 +1880,8 @@ struct xhci_hcd {
        void                    *dbc;
        /* platform-specific data -- must come last */
        unsigned long           priv[0] __aligned(sizeof(s64));
+       /* Broken Suspend flag for SNPS Suspend resume issue */
+       u8                      broken_suspend;
 };
 
 /* Platform specific overrides to generic XHCI hc_driver ops */
index bd539f3058bcaa3d754e07bdb7034d4f2786e8a7..39ca31b4de4667a49bc8016d79dd95be05f970f1 100644 (file)
@@ -50,6 +50,8 @@ static const struct usb_device_id appledisplay_table[] = {
        { APPLEDISPLAY_DEVICE(0x9219) },
        { APPLEDISPLAY_DEVICE(0x921c) },
        { APPLEDISPLAY_DEVICE(0x921d) },
+       { APPLEDISPLAY_DEVICE(0x9222) },
+       { APPLEDISPLAY_DEVICE(0x9226) },
        { APPLEDISPLAY_DEVICE(0x9236) },
 
        /* Terminating entry */
index 17940589c647cfdcbcd81e7e3b0c5d30dec6e8b8..7d289302ff6cfd22578885acbbadb6a048a9f5af 100644 (file)
@@ -101,7 +101,6 @@ static int usb_console_setup(struct console *co, char *options)
                cflag |= PARENB;
                break;
        }
-       co->cflag = cflag;
 
        /*
         * no need to check the index here: if the index is wrong, console
@@ -164,6 +163,7 @@ static int usb_console_setup(struct console *co, char *options)
                        serial->type->set_termios(tty, port, &dummy);
 
                        tty_port_tty_set(&port->port, NULL);
+                       tty_save_termios(tty);
                        tty_kref_put(tty);
                }
                tty_port_set_initialized(&port->port, 1);
index d17cd95b55bbdec3874c80d0d09b449c4f1f8fa1..6b2140f966ef87e751171373a5ab8e2756e5c33f 100644 (file)
@@ -27,4 +27,14 @@ UNUSUAL_DEV(0x0bda, 0x0159, 0x0000, 0x9999,
                "USB Card Reader",
                USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
 
+UNUSUAL_DEV(0x0bda, 0x0177, 0x0000, 0x9999,
+               "Realtek",
+               "USB Card Reader",
+               USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
+UNUSUAL_DEV(0x0bda, 0x0184, 0x0000, 0x9999,
+               "Realtek",
+               "USB Card Reader",
+               USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
 #endif  /* defined(CONFIG_USB_STORAGE_REALTEK) || ... */
index e36d6c73c4a4184c6246b14ab27b8e0ecb393be8..78118883f96c8ffdf868a4474ba569be59857865 100644 (file)
@@ -23,6 +23,16 @@ config TYPEC_UCSI
 
 if TYPEC_UCSI
 
+config UCSI_CCG
+       tristate "UCSI Interface Driver for Cypress CCGx"
+       depends on I2C
+       help
+         This driver enables UCSI support on platforms that expose a
+         Cypress CCGx Type-C controller over I2C interface.
+
+         To compile the driver as a module, choose M here: the module will be
+         called ucsi_ccg.
+
 config UCSI_ACPI
        tristate "UCSI ACPI Interface Driver"
        depends on ACPI
index 7afbea5122077b3dd0cbe217ad7c839837f499b4..2f4900b26210e245a65115ed1280bab41c19f62d 100644 (file)
@@ -8,3 +8,5 @@ typec_ucsi-y                    := ucsi.o
 typec_ucsi-$(CONFIG_TRACING)   += trace.o
 
 obj-$(CONFIG_UCSI_ACPI)                += ucsi_acpi.o
+
+obj-$(CONFIG_UCSI_CCG)         += ucsi_ccg.o
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
new file mode 100644 (file)
index 0000000..de8a43b
--- /dev/null
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UCSI driver for Cypress CCGx Type-C controller
+ *
+ * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
+ * Author: Ajay Gupta <ajayg@nvidia.com>
+ *
+ * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c
+ */
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include <asm/unaligned.h>
+#include "ucsi.h"
+
+struct ucsi_ccg {
+       struct device *dev;
+       struct ucsi *ucsi;
+       struct ucsi_ppm ppm;
+       struct i2c_client *client;
+};
+
+#define CCGX_RAB_INTR_REG                      0x06
+#define CCGX_RAB_UCSI_CONTROL                  0x39
+#define CCGX_RAB_UCSI_CONTROL_START            BIT(0)
+#define CCGX_RAB_UCSI_CONTROL_STOP             BIT(1)
+#define CCGX_RAB_UCSI_DATA_BLOCK(offset)       (0xf000 | ((offset) & 0xff))
+
+static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+{
+       struct i2c_client *client = uc->client;
+       const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
+       unsigned char buf[2];
+       struct i2c_msg msgs[] = {
+               {
+                       .addr   = client->addr,
+                       .flags  = 0x0,
+                       .len    = sizeof(buf),
+                       .buf    = buf,
+               },
+               {
+                       .addr   = client->addr,
+                       .flags  = I2C_M_RD,
+                       .buf    = data,
+               },
+       };
+       u32 rlen, rem_len = len, max_read_len = len;
+       int status;
+
+       /* check any max_read_len limitation on i2c adapter */
+       if (quirks && quirks->max_read_len)
+               max_read_len = quirks->max_read_len;
+
+       while (rem_len > 0) {
+               msgs[1].buf = &data[len - rem_len];
+               rlen = min_t(u16, rem_len, max_read_len);
+               msgs[1].len = rlen;
+               put_unaligned_le16(rab, buf);
+               status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+               if (status < 0) {
+                       dev_err(uc->dev, "i2c_transfer failed %d\n", status);
+                       return status;
+               }
+               rab += rlen;
+               rem_len -= rlen;
+       }
+
+       return 0;
+}
+
+static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+{
+       struct i2c_client *client = uc->client;
+       unsigned char *buf;
+       struct i2c_msg msgs[] = {
+               {
+                       .addr   = client->addr,
+                       .flags  = 0x0,
+               }
+       };
+       int status;
+
+       buf = kzalloc(len + sizeof(rab), GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       put_unaligned_le16(rab, buf);
+       memcpy(buf + sizeof(rab), data, len);
+
+       msgs[0].len = len + sizeof(rab);
+       msgs[0].buf = buf;
+
+       status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+       if (status < 0) {
+               dev_err(uc->dev, "i2c_transfer failed %d\n", status);
+               kfree(buf);
+               return status;
+       }
+
+       kfree(buf);
+       return 0;
+}
+
+static int ucsi_ccg_init(struct ucsi_ccg *uc)
+{
+       unsigned int count = 10;
+       u8 data;
+       int status;
+
+       data = CCGX_RAB_UCSI_CONTROL_STOP;
+       status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
+       if (status < 0)
+               return status;
+
+       data = CCGX_RAB_UCSI_CONTROL_START;
+       status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
+       if (status < 0)
+               return status;
+
+       /*
+        * Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control
+        * register write will push response which must be cleared.
+        */
+       do {
+               status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+               if (status < 0)
+                       return status;
+
+               if (!data)
+                       return 0;
+
+               status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+               if (status < 0)
+                       return status;
+
+               usleep_range(10000, 11000);
+       } while (--count);
+
+       return -ETIMEDOUT;
+}
+
+static int ucsi_ccg_send_data(struct ucsi_ccg *uc)
+{
+       u8 *ppm = (u8 *)uc->ppm.data;
+       int status;
+       u16 rab;
+
+       rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_out));
+       status = ccg_write(uc, rab, ppm +
+                          offsetof(struct ucsi_data, message_out),
+                          sizeof(uc->ppm.data->message_out));
+       if (status < 0)
+               return status;
+
+       rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, ctrl));
+       return ccg_write(uc, rab, ppm + offsetof(struct ucsi_data, ctrl),
+                        sizeof(uc->ppm.data->ctrl));
+}
+
+static int ucsi_ccg_recv_data(struct ucsi_ccg *uc)
+{
+       u8 *ppm = (u8 *)uc->ppm.data;
+       int status;
+       u16 rab;
+
+       rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, cci));
+       status = ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, cci),
+                         sizeof(uc->ppm.data->cci));
+       if (status < 0)
+               return status;
+
+       rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_in));
+       return ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, message_in),
+                       sizeof(uc->ppm.data->message_in));
+}
+
+static int ucsi_ccg_ack_interrupt(struct ucsi_ccg *uc)
+{
+       int status;
+       unsigned char data;
+
+       status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+       if (status < 0)
+               return status;
+
+       return ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+}
+
+static int ucsi_ccg_sync(struct ucsi_ppm *ppm)
+{
+       struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
+       int status;
+
+       status = ucsi_ccg_recv_data(uc);
+       if (status < 0)
+               return status;
+
+       /* ack interrupt to allow next command to run */
+       return ucsi_ccg_ack_interrupt(uc);
+}
+
+static int ucsi_ccg_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
+{
+       struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
+
+       ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
+       return ucsi_ccg_send_data(uc);
+}
+
+static irqreturn_t ccg_irq_handler(int irq, void *data)
+{
+       struct ucsi_ccg *uc = data;
+
+       ucsi_notify(uc->ucsi);
+
+       return IRQ_HANDLED;
+}
+
+static int ucsi_ccg_probe(struct i2c_client *client,
+                         const struct i2c_device_id *id)
+{
+       struct device *dev = &client->dev;
+       struct ucsi_ccg *uc;
+       int status;
+       u16 rab;
+
+       uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
+       if (!uc)
+               return -ENOMEM;
+
+       uc->ppm.data = devm_kzalloc(dev, sizeof(struct ucsi_data), GFP_KERNEL);
+       if (!uc->ppm.data)
+               return -ENOMEM;
+
+       uc->ppm.cmd = ucsi_ccg_cmd;
+       uc->ppm.sync = ucsi_ccg_sync;
+       uc->dev = dev;
+       uc->client = client;
+
+       /* reset ccg device and initialize ucsi */
+       status = ucsi_ccg_init(uc);
+       if (status < 0) {
+               dev_err(uc->dev, "ucsi_ccg_init failed - %d\n", status);
+               return status;
+       }
+
+       status = devm_request_threaded_irq(dev, client->irq, NULL,
+                                          ccg_irq_handler,
+                                          IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+                                          dev_name(dev), uc);
+       if (status < 0) {
+               dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
+               return status;
+       }
+
+       uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
+       if (IS_ERR(uc->ucsi)) {
+               dev_err(uc->dev, "ucsi_register_ppm failed\n");
+               return PTR_ERR(uc->ucsi);
+       }
+
+       rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, version));
+       status = ccg_read(uc, rab, (u8 *)(uc->ppm.data) +
+                         offsetof(struct ucsi_data, version),
+                         sizeof(uc->ppm.data->version));
+       if (status < 0) {
+               ucsi_unregister_ppm(uc->ucsi);
+               return status;
+       }
+
+       i2c_set_clientdata(client, uc);
+       return 0;
+}
+
+static int ucsi_ccg_remove(struct i2c_client *client)
+{
+       struct ucsi_ccg *uc = i2c_get_clientdata(client);
+
+       ucsi_unregister_ppm(uc->ucsi);
+
+       return 0;
+}
+
+static const struct i2c_device_id ucsi_ccg_device_id[] = {
+       {"ccgx-ucsi", 0},
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id);
+
+static struct i2c_driver ucsi_ccg_driver = {
+       .driver = {
+               .name = "ucsi_ccg",
+       },
+       .probe = ucsi_ccg_probe,
+       .remove = ucsi_ccg_remove,
+       .id_table = ucsi_ccg_device_id,
+};
+
+module_i2c_driver(ucsi_ccg_driver);
+
+MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
+MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller");
+MODULE_LICENSE("GPL v2");
index 9756752c0681f99c2acb1aaf1213ea23e03055f9..45da3e01c7b03ae04b3c889e73d140baa2c9dc7f 100644 (file)
@@ -309,7 +309,7 @@ int usbip_recv(struct socket *sock, void *buf, int size)
        if (!sock || !buf || !size)
                return -EINVAL;
 
-       iov_iter_kvec(&msg.msg_iter, READ|ITER_KVEC, &iov, 1, size);
+       iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, size);
 
        usbip_dbg_xmit("enter\n");
 
index c24bb690680b4104d6621c5b984d869f51af2cd3..50dffe83714c63f180f4ceba7dddeb8498021322 100644 (file)
@@ -203,6 +203,19 @@ struct vhost_scsi {
        int vs_events_nr; /* num of pending events, protected by vq->mutex */
 };
 
+/*
+ * Context for processing request and control queue operations.
+ */
+struct vhost_scsi_ctx {
+       int head;
+       unsigned int out, in;
+       size_t req_size, rsp_size;
+       size_t out_size, in_size;
+       u8 *target, *lunp;
+       void *req;
+       struct iov_iter out_iter;
+};
+
 static struct workqueue_struct *vhost_scsi_workqueue;
 
 /* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
@@ -800,24 +813,120 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs,
                pr_err("Faulted on virtio_scsi_cmd_resp\n");
 }
 
+static int
+vhost_scsi_get_desc(struct vhost_scsi *vs, struct vhost_virtqueue *vq,
+                   struct vhost_scsi_ctx *vc)
+{
+       int ret = -ENXIO;
+
+       vc->head = vhost_get_vq_desc(vq, vq->iov,
+                                    ARRAY_SIZE(vq->iov), &vc->out, &vc->in,
+                                    NULL, NULL);
+
+       pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
+                vc->head, vc->out, vc->in);
+
+       /* On error, stop handling until the next kick. */
+       if (unlikely(vc->head < 0))
+               goto done;
+
+       /* Nothing new?  Wait for eventfd to tell us they refilled. */
+       if (vc->head == vq->num) {
+               if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
+                       vhost_disable_notify(&vs->dev, vq);
+                       ret = -EAGAIN;
+               }
+               goto done;
+       }
+
+       /*
+        * Get the size of request and response buffers.
+        * FIXME: Not correct for BIDI operation
+        */
+       vc->out_size = iov_length(vq->iov, vc->out);
+       vc->in_size = iov_length(&vq->iov[vc->out], vc->in);
+
+       /*
+        * Copy over the virtio-scsi request header, which for a
+        * ANY_LAYOUT enabled guest may span multiple iovecs, or a
+        * single iovec may contain both the header + outgoing
+        * WRITE payloads.
+        *
+        * copy_from_iter() will advance out_iter, so that it will
+        * point at the start of the outgoing WRITE payload, if
+        * DMA_TO_DEVICE is set.
+        */
+       iov_iter_init(&vc->out_iter, WRITE, vq->iov, vc->out, vc->out_size);
+       ret = 0;
+
+done:
+       return ret;
+}
+
+static int
+vhost_scsi_chk_size(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc)
+{
+       if (unlikely(vc->in_size < vc->rsp_size)) {
+               vq_err(vq,
+                      "Response buf too small, need min %zu bytes got %zu",
+                      vc->rsp_size, vc->in_size);
+               return -EINVAL;
+       } else if (unlikely(vc->out_size < vc->req_size)) {
+               vq_err(vq,
+                      "Request buf too small, need min %zu bytes got %zu",
+                      vc->req_size, vc->out_size);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int
+vhost_scsi_get_req(struct vhost_virtqueue *vq, struct vhost_scsi_ctx *vc,
+                  struct vhost_scsi_tpg **tpgp)
+{
+       int ret = -EIO;
+
+       if (unlikely(!copy_from_iter_full(vc->req, vc->req_size,
+                                         &vc->out_iter))) {
+               vq_err(vq, "Faulted on copy_from_iter\n");
+       } else if (unlikely(*vc->lunp != 1)) {
+               /* virtio-scsi spec requires byte 0 of the lun to be 1 */
+               vq_err(vq, "Illegal virtio-scsi lun: %u\n", *vc->lunp);
+       } else {
+               struct vhost_scsi_tpg **vs_tpg, *tpg;
+
+               vs_tpg = vq->private_data;      /* validated at handler entry */
+
+               tpg = READ_ONCE(vs_tpg[*vc->target]);
+               if (unlikely(!tpg)) {
+                       vq_err(vq, "Target 0x%x does not exist\n", *vc->target);
+               } else {
+                       if (tpgp)
+                               *tpgp = tpg;
+                       ret = 0;
+               }
+       }
+
+       return ret;
+}
+
 static void
 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 {
        struct vhost_scsi_tpg **vs_tpg, *tpg;
        struct virtio_scsi_cmd_req v_req;
        struct virtio_scsi_cmd_req_pi v_req_pi;
+       struct vhost_scsi_ctx vc;
        struct vhost_scsi_cmd *cmd;
-       struct iov_iter out_iter, in_iter, prot_iter, data_iter;
+       struct iov_iter in_iter, prot_iter, data_iter;
        u64 tag;
        u32 exp_data_len, data_direction;
-       unsigned int out = 0, in = 0;
-       int head, ret, prot_bytes;
-       size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
-       size_t out_size, in_size;
+       int ret, prot_bytes;
        u16 lun;
-       u8 *target, *lunp, task_attr;
+       u8 task_attr;
        bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
-       void *req, *cdb;
+       void *cdb;
 
        mutex_lock(&vq->mutex);
        /*
@@ -828,85 +937,47 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
        if (!vs_tpg)
                goto out;
 
+       memset(&vc, 0, sizeof(vc));
+       vc.rsp_size = sizeof(struct virtio_scsi_cmd_resp);
+
        vhost_disable_notify(&vs->dev, vq);
 
        for (;;) {
-               head = vhost_get_vq_desc(vq, vq->iov,
-                                        ARRAY_SIZE(vq->iov), &out, &in,
-                                        NULL, NULL);
-               pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
-                        head, out, in);
-               /* On error, stop handling until the next kick. */
-               if (unlikely(head < 0))
-                       break;
-               /* Nothing new?  Wait for eventfd to tell us they refilled. */
-               if (head == vq->num) {
-                       if (unlikely(vhost_enable_notify(&vs->dev, vq))) {
-                               vhost_disable_notify(&vs->dev, vq);
-                               continue;
-                       }
-                       break;
-               }
-               /*
-                * Check for a sane response buffer so we can report early
-                * errors back to the guest.
-                */
-               if (unlikely(vq->iov[out].iov_len < rsp_size)) {
-                       vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
-                               " size, got %zu bytes\n", vq->iov[out].iov_len);
-                       break;
-               }
+               ret = vhost_scsi_get_desc(vs, vq, &vc);
+               if (ret)
+                       goto err;
+
                /*
                 * Setup pointers and values based upon different virtio-scsi
                 * request header if T10_PI is enabled in KVM guest.
                 */
                if (t10_pi) {
-                       req = &v_req_pi;
-                       req_size = sizeof(v_req_pi);
-                       lunp = &v_req_pi.lun[0];
-                       target = &v_req_pi.lun[1];
+                       vc.req = &v_req_pi;
+                       vc.req_size = sizeof(v_req_pi);
+                       vc.lunp = &v_req_pi.lun[0];
+                       vc.target = &v_req_pi.lun[1];
                } else {
-                       req = &v_req;
-                       req_size = sizeof(v_req);
-                       lunp = &v_req.lun[0];
-                       target = &v_req.lun[1];
+                       vc.req = &v_req;
+                       vc.req_size = sizeof(v_req);
+                       vc.lunp = &v_req.lun[0];
+                       vc.target = &v_req.lun[1];
                }
-               /*
-                * FIXME: Not correct for BIDI operation
-                */
-               out_size = iov_length(vq->iov, out);
-               in_size = iov_length(&vq->iov[out], in);
 
                /*
-                * Copy over the virtio-scsi request header, which for a
-                * ANY_LAYOUT enabled guest may span multiple iovecs, or a
-                * single iovec may contain both the header + outgoing
-                * WRITE payloads.
-                *
-                * copy_from_iter() will advance out_iter, so that it will
-                * point at the start of the outgoing WRITE payload, if
-                * DMA_TO_DEVICE is set.
+                * Validate the size of request and response buffers.
+                * Check for a sane response buffer so we can report
+                * early errors back to the guest.
                 */
-               iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
+               ret = vhost_scsi_chk_size(vq, &vc);
+               if (ret)
+                       goto err;
 
-               if (unlikely(!copy_from_iter_full(req, req_size, &out_iter))) {
-                       vq_err(vq, "Faulted on copy_from_iter\n");
-                       vhost_scsi_send_bad_target(vs, vq, head, out);
-                       continue;
-               }
-               /* virtio-scsi spec requires byte 0 of the lun to be 1 */
-               if (unlikely(*lunp != 1)) {
-                       vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
-                       vhost_scsi_send_bad_target(vs, vq, head, out);
-                       continue;
-               }
+               ret = vhost_scsi_get_req(vq, &vc, &tpg);
+               if (ret)
+                       goto err;
+
+               ret = -EIO;     /* bad target on any error from here on */
 
-               tpg = READ_ONCE(vs_tpg[*target]);
-               if (unlikely(!tpg)) {
-                       /* Target does not exist, fail the request */
-                       vhost_scsi_send_bad_target(vs, vq, head, out);
-                       continue;
-               }
                /*
                 * Determine data_direction by calculating the total outgoing
                 * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
@@ -924,17 +995,17 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                 */
                prot_bytes = 0;
 
-               if (out_size > req_size) {
+               if (vc.out_size > vc.req_size) {
                        data_direction = DMA_TO_DEVICE;
-                       exp_data_len = out_size - req_size;
-                       data_iter = out_iter;
-               } else if (in_size > rsp_size) {
+                       exp_data_len = vc.out_size - vc.req_size;
+                       data_iter = vc.out_iter;
+               } else if (vc.in_size > vc.rsp_size) {
                        data_direction = DMA_FROM_DEVICE;
-                       exp_data_len = in_size - rsp_size;
+                       exp_data_len = vc.in_size - vc.rsp_size;
 
-                       iov_iter_init(&in_iter, READ, &vq->iov[out], in,
-                                     rsp_size + exp_data_len);
-                       iov_iter_advance(&in_iter, rsp_size);
+                       iov_iter_init(&in_iter, READ, &vq->iov[vc.out], vc.in,
+                                     vc.rsp_size + exp_data_len);
+                       iov_iter_advance(&in_iter, vc.rsp_size);
                        data_iter = in_iter;
                } else {
                        data_direction = DMA_NONE;
@@ -950,21 +1021,20 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                                if (data_direction != DMA_TO_DEVICE) {
                                        vq_err(vq, "Received non zero pi_bytesout,"
                                                " but wrong data_direction\n");
-                                       vhost_scsi_send_bad_target(vs, vq, head, out);
-                                       continue;
+                                       goto err;
                                }
                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
                        } else if (v_req_pi.pi_bytesin) {
                                if (data_direction != DMA_FROM_DEVICE) {
                                        vq_err(vq, "Received non zero pi_bytesin,"
                                                " but wrong data_direction\n");
-                                       vhost_scsi_send_bad_target(vs, vq, head, out);
-                                       continue;
+                                       goto err;
                                }
                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
                        }
                        /*
-                        * Set prot_iter to data_iter, and advance past any
+                        * Set prot_iter to data_iter and truncate it to
+                        * prot_bytes, and advance data_iter past any
                         * preceeding prot_bytes that may be present.
                         *
                         * Also fix up the exp_data_len to reflect only the
@@ -973,6 +1043,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                        if (prot_bytes) {
                                exp_data_len -= prot_bytes;
                                prot_iter = data_iter;
+                               iov_iter_truncate(&prot_iter, prot_bytes);
                                iov_iter_advance(&data_iter, prot_bytes);
                        }
                        tag = vhost64_to_cpu(vq, v_req_pi.tag);
@@ -996,8 +1067,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                        vq_err(vq, "Received SCSI CDB with command_size: %d that"
                                " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
                                scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
-                       vhost_scsi_send_bad_target(vs, vq, head, out);
-                       continue;
+                               goto err;
                }
                cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
                                         exp_data_len + prot_bytes,
@@ -1005,13 +1075,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                if (IS_ERR(cmd)) {
                        vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
                               PTR_ERR(cmd));
-                       vhost_scsi_send_bad_target(vs, vq, head, out);
-                       continue;
+                       goto err;
                }
                cmd->tvc_vhost = vs;
                cmd->tvc_vq = vq;
-               cmd->tvc_resp_iov = vq->iov[out];
-               cmd->tvc_in_iovs = in;
+               cmd->tvc_resp_iov = vq->iov[vc.out];
+               cmd->tvc_in_iovs = vc.in;
 
                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
                         cmd->tvc_cdb[0], cmd->tvc_lun);
@@ -1019,14 +1088,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                         " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
 
                if (data_direction != DMA_NONE) {
-                       ret = vhost_scsi_mapal(cmd,
-                                              prot_bytes, &prot_iter,
-                                              exp_data_len, &data_iter);
-                       if (unlikely(ret)) {
+                       if (unlikely(vhost_scsi_mapal(cmd, prot_bytes,
+                                                     &prot_iter, exp_data_len,
+                                                     &data_iter))) {
                                vq_err(vq, "Failed to map iov to sgl\n");
                                vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
-                               vhost_scsi_send_bad_target(vs, vq, head, out);
-                               continue;
+                               goto err;
                        }
                }
                /*
@@ -1034,7 +1101,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                 * complete the virtio-scsi request in TCM callback context via
                 * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
                 */
-               cmd->tvc_vq_desc = head;
+               cmd->tvc_vq_desc = vc.head;
                /*
                 * Dispatch cmd descriptor for cmwq execution in process
                 * context provided by vhost_scsi_workqueue.  This also ensures
@@ -1043,6 +1110,166 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                 */
                INIT_WORK(&cmd->work, vhost_scsi_submission_work);
                queue_work(vhost_scsi_workqueue, &cmd->work);
+               ret = 0;
+err:
+               /*
+                * ENXIO:  No more requests, or read error, wait for next kick
+                * EINVAL: Invalid response buffer, drop the request
+                * EIO:    Respond with bad target
+                * EAGAIN: Pending request
+                */
+               if (ret == -ENXIO)
+                       break;
+               else if (ret == -EIO)
+                       vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
+       }
+out:
+       mutex_unlock(&vq->mutex);
+}
+
+static void
+vhost_scsi_send_tmf_reject(struct vhost_scsi *vs,
+                          struct vhost_virtqueue *vq,
+                          struct vhost_scsi_ctx *vc)
+{
+       struct virtio_scsi_ctrl_tmf_resp __user *resp;
+       struct virtio_scsi_ctrl_tmf_resp rsp;
+       int ret;
+
+       pr_debug("%s\n", __func__);
+       memset(&rsp, 0, sizeof(rsp));
+       rsp.response = VIRTIO_SCSI_S_FUNCTION_REJECTED;
+       resp = vq->iov[vc->out].iov_base;
+       ret = __copy_to_user(resp, &rsp, sizeof(rsp));
+       if (!ret)
+               vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
+       else
+               pr_err("Faulted on virtio_scsi_ctrl_tmf_resp\n");
+}
+
+static void
+vhost_scsi_send_an_resp(struct vhost_scsi *vs,
+                       struct vhost_virtqueue *vq,
+                       struct vhost_scsi_ctx *vc)
+{
+       struct virtio_scsi_ctrl_an_resp __user *resp;
+       struct virtio_scsi_ctrl_an_resp rsp;
+       int ret;
+
+       pr_debug("%s\n", __func__);
+       memset(&rsp, 0, sizeof(rsp));   /* event_actual = 0 */
+       rsp.response = VIRTIO_SCSI_S_OK;
+       resp = vq->iov[vc->out].iov_base;
+       ret = __copy_to_user(resp, &rsp, sizeof(rsp));
+       if (!ret)
+               vhost_add_used_and_signal(&vs->dev, vq, vc->head, 0);
+       else
+               pr_err("Faulted on virtio_scsi_ctrl_an_resp\n");
+}
+
+static void
+vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
+{
+       union {
+               __virtio32 type;
+               struct virtio_scsi_ctrl_an_req an;
+               struct virtio_scsi_ctrl_tmf_req tmf;
+       } v_req;
+       struct vhost_scsi_ctx vc;
+       size_t typ_size;
+       int ret;
+
+       mutex_lock(&vq->mutex);
+       /*
+        * We can handle the vq only after the endpoint is setup by calling the
+        * VHOST_SCSI_SET_ENDPOINT ioctl.
+        */
+       if (!vq->private_data)
+               goto out;
+
+       memset(&vc, 0, sizeof(vc));
+
+       vhost_disable_notify(&vs->dev, vq);
+
+       for (;;) {
+               ret = vhost_scsi_get_desc(vs, vq, &vc);
+               if (ret)
+                       goto err;
+
+               /*
+                * Get the request type first in order to setup
+                * other parameters dependent on the type.
+                */
+               vc.req = &v_req.type;
+               typ_size = sizeof(v_req.type);
+
+               if (unlikely(!copy_from_iter_full(vc.req, typ_size,
+                                                 &vc.out_iter))) {
+                       vq_err(vq, "Faulted on copy_from_iter tmf type\n");
+                       /*
+                        * The size of the response buffer depends on the
+                        * request type and must be validated against it.
+                        * Since the request type is not known, don't send
+                        * a response.
+                        */
+                       continue;
+               }
+
+               switch (v_req.type) {
+               case VIRTIO_SCSI_T_TMF:
+                       vc.req = &v_req.tmf;
+                       vc.req_size = sizeof(struct virtio_scsi_ctrl_tmf_req);
+                       vc.rsp_size = sizeof(struct virtio_scsi_ctrl_tmf_resp);
+                       vc.lunp = &v_req.tmf.lun[0];
+                       vc.target = &v_req.tmf.lun[1];
+                       break;
+               case VIRTIO_SCSI_T_AN_QUERY:
+               case VIRTIO_SCSI_T_AN_SUBSCRIBE:
+                       vc.req = &v_req.an;
+                       vc.req_size = sizeof(struct virtio_scsi_ctrl_an_req);
+                       vc.rsp_size = sizeof(struct virtio_scsi_ctrl_an_resp);
+                       vc.lunp = &v_req.an.lun[0];
+                       vc.target = NULL;
+                       break;
+               default:
+                       vq_err(vq, "Unknown control request %d", v_req.type);
+                       continue;
+               }
+
+               /*
+                * Validate the size of request and response buffers.
+                * Check for a sane response buffer so we can report
+                * early errors back to the guest.
+                */
+               ret = vhost_scsi_chk_size(vq, &vc);
+               if (ret)
+                       goto err;
+
+               /*
+                * Get the rest of the request now that its size is known.
+                */
+               vc.req += typ_size;
+               vc.req_size -= typ_size;
+
+               ret = vhost_scsi_get_req(vq, &vc, NULL);
+               if (ret)
+                       goto err;
+
+               if (v_req.type == VIRTIO_SCSI_T_TMF)
+                       vhost_scsi_send_tmf_reject(vs, vq, &vc);
+               else
+                       vhost_scsi_send_an_resp(vs, vq, &vc);
+err:
+               /*
+                * ENXIO:  No more requests, or read error, wait for next kick
+                * EINVAL: Invalid response buffer, drop the request
+                * EIO:    Respond with bad target
+                * EAGAIN: Pending request
+                */
+               if (ret == -ENXIO)
+                       break;
+               else if (ret == -EIO)
+                       vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
        }
 out:
        mutex_unlock(&vq->mutex);
@@ -1050,7 +1277,12 @@ out:
 
 static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
 {
+       struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+                                               poll.work);
+       struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev);
+
        pr_debug("%s: The handling func for control queue.\n", __func__);
+       vhost_scsi_ctl_handle_vq(vs, vq);
 }
 
 static void
index f52008bb8df76e2760dce41a656be7407aff4738..6b98d8e3a5bf8247784303ce890a990fb8ec1259 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/sched/mm.h>
 #include <linux/sched/signal.h>
 #include <linux/interval_tree_generic.h>
+#include <linux/nospec.h>
 
 #include "vhost.h"
 
@@ -943,10 +944,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
                if (msg->iova <= vq_msg->iova &&
                    msg->iova + msg->size - 1 >= vq_msg->iova &&
                    vq_msg->type == VHOST_IOTLB_MISS) {
-                       mutex_lock(&node->vq->mutex);
                        vhost_poll_queue(&node->vq->poll);
-                       mutex_unlock(&node->vq->mutex);
-
                        list_del(&node->node);
                        kfree(node);
                }
@@ -1387,6 +1385,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
        if (idx >= d->nvqs)
                return -ENOBUFS;
 
+       idx = array_index_nospec(idx, d->nvqs);
        vq = d->vqs[idx];
 
        mutex_lock(&vq->mutex);
index 34bc3ab40c6da8d5637e6e5bd19e6fb27ec27fba..98ed5be132c6a59a798346f182259b398cdfc11c 100644 (file)
@@ -15,6 +15,7 @@
 #include <net/sock.h>
 #include <linux/virtio_vsock.h>
 #include <linux/vhost.h>
+#include <linux/hashtable.h>
 
 #include <net/af_vsock.h>
 #include "vhost.h"
@@ -27,14 +28,14 @@ enum {
 
 /* Used to track all the vhost_vsock instances on the system. */
 static DEFINE_SPINLOCK(vhost_vsock_lock);
-static LIST_HEAD(vhost_vsock_list);
+static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8);
 
 struct vhost_vsock {
        struct vhost_dev dev;
        struct vhost_virtqueue vqs[2];
 
-       /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
-       struct list_head list;
+       /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
+       struct hlist_node hash;
 
        struct vhost_work send_pkt_work;
        spinlock_t send_pkt_list_lock;
@@ -50,11 +51,14 @@ static u32 vhost_transport_get_local_cid(void)
        return VHOST_VSOCK_DEFAULT_HOST_CID;
 }
 
-static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
+/* Callers that dereference the return value must hold vhost_vsock_lock or the
+ * RCU read lock.
+ */
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
 {
        struct vhost_vsock *vsock;
 
-       list_for_each_entry(vsock, &vhost_vsock_list, list) {
+       hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
                u32 other_cid = vsock->guest_cid;
 
                /* Skip instances that have no CID yet */
@@ -69,17 +73,6 @@ static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
        return NULL;
 }
 
-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
-{
-       struct vhost_vsock *vsock;
-
-       spin_lock_bh(&vhost_vsock_lock);
-       vsock = __vhost_vsock_get(guest_cid);
-       spin_unlock_bh(&vhost_vsock_lock);
-
-       return vsock;
-}
-
 static void
 vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
                            struct vhost_virtqueue *vq)
@@ -210,9 +203,12 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
        struct vhost_vsock *vsock;
        int len = pkt->len;
 
+       rcu_read_lock();
+
        /* Find the vhost_vsock according to guest context id  */
        vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
        if (!vsock) {
+               rcu_read_unlock();
                virtio_transport_free_pkt(pkt);
                return -ENODEV;
        }
@@ -225,6 +221,8 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
        spin_unlock_bh(&vsock->send_pkt_list_lock);
 
        vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+
+       rcu_read_unlock();
        return len;
 }
 
@@ -234,12 +232,15 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
        struct vhost_vsock *vsock;
        struct virtio_vsock_pkt *pkt, *n;
        int cnt = 0;
+       int ret = -ENODEV;
        LIST_HEAD(freeme);
 
+       rcu_read_lock();
+
        /* Find the vhost_vsock according to guest context id  */
        vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
        if (!vsock)
-               return -ENODEV;
+               goto out;
 
        spin_lock_bh(&vsock->send_pkt_list_lock);
        list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
@@ -265,7 +266,10 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
                        vhost_poll_queue(&tx_vq->poll);
        }
 
-       return 0;
+       ret = 0;
+out:
+       rcu_read_unlock();
+       return ret;
 }
 
 static struct virtio_vsock_pkt *
@@ -533,10 +537,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
        spin_lock_init(&vsock->send_pkt_list_lock);
        INIT_LIST_HEAD(&vsock->send_pkt_list);
        vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
-
-       spin_lock_bh(&vhost_vsock_lock);
-       list_add_tail(&vsock->list, &vhost_vsock_list);
-       spin_unlock_bh(&vhost_vsock_lock);
        return 0;
 
 out:
@@ -563,13 +563,21 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
         * executing.
         */
 
-       if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
-               sock_set_flag(sk, SOCK_DONE);
-               vsk->peer_shutdown = SHUTDOWN_MASK;
-               sk->sk_state = SS_UNCONNECTED;
-               sk->sk_err = ECONNRESET;
-               sk->sk_error_report(sk);
-       }
+       /* If the peer is still valid, no need to reset connection */
+       if (vhost_vsock_get(vsk->remote_addr.svm_cid))
+               return;
+
+       /* If the close timeout is pending, let it expire.  This avoids races
+        * with the timeout callback.
+        */
+       if (vsk->close_work_scheduled)
+               return;
+
+       sock_set_flag(sk, SOCK_DONE);
+       vsk->peer_shutdown = SHUTDOWN_MASK;
+       sk->sk_state = SS_UNCONNECTED;
+       sk->sk_err = ECONNRESET;
+       sk->sk_error_report(sk);
 }
 
 static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
@@ -577,9 +585,13 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
        struct vhost_vsock *vsock = file->private_data;
 
        spin_lock_bh(&vhost_vsock_lock);
-       list_del(&vsock->list);
+       if (vsock->guest_cid)
+               hash_del_rcu(&vsock->hash);
        spin_unlock_bh(&vhost_vsock_lock);
 
+       /* Wait for other CPUs to finish using vsock */
+       synchronize_rcu();
+
        /* Iterating over all connections for all CIDs to find orphans is
         * inefficient.  Room for improvement here. */
        vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
@@ -620,12 +632,17 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
 
        /* Refuse if CID is already in use */
        spin_lock_bh(&vhost_vsock_lock);
-       other = __vhost_vsock_get(guest_cid);
+       other = vhost_vsock_get(guest_cid);
        if (other && other != vsock) {
                spin_unlock_bh(&vhost_vsock_lock);
                return -EADDRINUSE;
        }
+
+       if (vsock->guest_cid)
+               hash_del_rcu(&vsock->hash);
+
        vsock->guest_cid = guest_cid;
+       hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
        spin_unlock_bh(&vhost_vsock_lock);
 
        return 0;
index 678b270631983a47718d5d8d78f395e9d639ec83..f9ef0673a083cb63c776ca58631710e659329ee3 100644 (file)
@@ -562,7 +562,30 @@ static int pwm_backlight_probe(struct platform_device *pdev)
                goto err_alloc;
        }
 
-       if (!data->levels) {
+       if (data->levels) {
+               /*
+                * For the DT case, only when brightness levels is defined
+                * data->levels is filled. For the non-DT case, data->levels
+                * can come from platform data, however is not usual.
+                */
+               for (i = 0; i <= data->max_brightness; i++) {
+                       if (data->levels[i] > pb->scale)
+                               pb->scale = data->levels[i];
+
+                       pb->levels = data->levels;
+               }
+       } else if (!data->max_brightness) {
+               /*
+                * If no brightness levels are provided and max_brightness is
+                * not set, use the default brightness table. For the DT case,
+                * max_brightness is set to 0 when brightness levels is not
+                * specified. For the non-DT case, max_brightness is usually
+                * set to some value.
+                */
+
+               /* Get the PWM period (in nanoseconds) */
+               pwm_get_state(pb->pwm, &state);
+
                ret = pwm_backlight_brightness_default(&pdev->dev, data,
                                                       state.period);
                if (ret < 0) {
@@ -570,13 +593,19 @@ static int pwm_backlight_probe(struct platform_device *pdev)
                                "failed to setup default brightness table\n");
                        goto err_alloc;
                }
-       }
 
-       for (i = 0; i <= data->max_brightness; i++) {
-               if (data->levels[i] > pb->scale)
-                       pb->scale = data->levels[i];
+               for (i = 0; i <= data->max_brightness; i++) {
+                       if (data->levels[i] > pb->scale)
+                               pb->scale = data->levels[i];
 
-               pb->levels = data->levels;
+                       pb->levels = data->levels;
+               }
+       } else {
+               /*
+                * That only happens for the non-DT case, where platform data
+                * sets the max_brightness value.
+                */
+               pb->scale = data->max_brightness;
        }
 
        pb->lth_brightness = data->lth_brightness * (state.period / pb->scale);
index d1c1f6283729623d86ebea846a7e506653f997c0..728ecd1eea305a50b5a899de53ac873cb8bceca2 100644 (file)
 #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
 #define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80
 
+#define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
+                                            __GFP_NOMEMALLOC)
+/* The order of free page blocks to report to host */
+#define VIRTIO_BALLOON_FREE_PAGE_ORDER (MAX_ORDER - 1)
+/* The size of a free page block in bytes */
+#define VIRTIO_BALLOON_FREE_PAGE_SIZE \
+       (1 << (VIRTIO_BALLOON_FREE_PAGE_ORDER + PAGE_SHIFT))
+
 #ifdef CONFIG_BALLOON_COMPACTION
 static struct vfsmount *balloon_mnt;
 #endif
 
+enum virtio_balloon_vq {
+       VIRTIO_BALLOON_VQ_INFLATE,
+       VIRTIO_BALLOON_VQ_DEFLATE,
+       VIRTIO_BALLOON_VQ_STATS,
+       VIRTIO_BALLOON_VQ_FREE_PAGE,
+       VIRTIO_BALLOON_VQ_MAX
+};
+
 struct virtio_balloon {
        struct virtio_device *vdev;
-       struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
+       struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq;
+
+       /* Balloon's own wq for cpu-intensive work items */
+       struct workqueue_struct *balloon_wq;
+       /* The free page reporting work item submitted to the balloon wq */
+       struct work_struct report_free_page_work;
 
        /* The balloon servicing is delegated to a freezable workqueue. */
        struct work_struct update_balloon_stats_work;
@@ -57,6 +78,18 @@ struct virtio_balloon {
        spinlock_t stop_update_lock;
        bool stop_update;
 
+       /* The list of allocated free pages, waiting to be given back to mm */
+       struct list_head free_page_list;
+       spinlock_t free_page_list_lock;
+       /* The number of free page blocks on the above list */
+       unsigned long num_free_page_blocks;
+       /* The cmd id received from host */
+       u32 cmd_id_received;
+       /* The cmd id that is actively in use */
+       __virtio32 cmd_id_active;
+       /* Buffer to store the stop sign */
+       __virtio32 cmd_id_stop;
+
        /* Waiting for host to ack the pages we released. */
        wait_queue_head_t acked;
 
@@ -320,17 +353,6 @@ static void stats_handle_request(struct virtio_balloon *vb)
        virtqueue_kick(vq);
 }
 
-static void virtballoon_changed(struct virtio_device *vdev)
-{
-       struct virtio_balloon *vb = vdev->priv;
-       unsigned long flags;
-
-       spin_lock_irqsave(&vb->stop_update_lock, flags);
-       if (!vb->stop_update)
-               queue_work(system_freezable_wq, &vb->update_balloon_size_work);
-       spin_unlock_irqrestore(&vb->stop_update_lock, flags);
-}
-
 static inline s64 towards_target(struct virtio_balloon *vb)
 {
        s64 target;
@@ -347,6 +369,60 @@ static inline s64 towards_target(struct virtio_balloon *vb)
        return target - vb->num_pages;
 }
 
+/* Gives back @num_to_return blocks of free pages to mm. */
+static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb,
+                                            unsigned long num_to_return)
+{
+       struct page *page;
+       unsigned long num_returned;
+
+       spin_lock_irq(&vb->free_page_list_lock);
+       for (num_returned = 0; num_returned < num_to_return; num_returned++) {
+               page = balloon_page_pop(&vb->free_page_list);
+               if (!page)
+                       break;
+               free_pages((unsigned long)page_address(page),
+                          VIRTIO_BALLOON_FREE_PAGE_ORDER);
+       }
+       vb->num_free_page_blocks -= num_returned;
+       spin_unlock_irq(&vb->free_page_list_lock);
+
+       return num_returned;
+}
+
+static void virtballoon_changed(struct virtio_device *vdev)
+{
+       struct virtio_balloon *vb = vdev->priv;
+       unsigned long flags;
+       s64 diff = towards_target(vb);
+
+       if (diff) {
+               spin_lock_irqsave(&vb->stop_update_lock, flags);
+               if (!vb->stop_update)
+                       queue_work(system_freezable_wq,
+                                  &vb->update_balloon_size_work);
+               spin_unlock_irqrestore(&vb->stop_update_lock, flags);
+       }
+
+       if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
+               virtio_cread(vdev, struct virtio_balloon_config,
+                            free_page_report_cmd_id, &vb->cmd_id_received);
+               if (vb->cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) {
+                       /* Pass ULONG_MAX to give back all the free pages */
+                       return_free_pages_to_mm(vb, ULONG_MAX);
+               } else if (vb->cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP &&
+                          vb->cmd_id_received !=
+                          virtio32_to_cpu(vdev, vb->cmd_id_active)) {
+                       spin_lock_irqsave(&vb->stop_update_lock, flags);
+                       if (!vb->stop_update) {
+                               queue_work(vb->balloon_wq,
+                                          &vb->report_free_page_work);
+                       }
+                       spin_unlock_irqrestore(&vb->stop_update_lock, flags);
+               }
+       }
+}
+
 static void update_balloon_size(struct virtio_balloon *vb)
 {
        u32 actual = vb->num_pages;
@@ -389,26 +465,44 @@ static void update_balloon_size_func(struct work_struct *work)
 
 static int init_vqs(struct virtio_balloon *vb)
 {
-       struct virtqueue *vqs[3];
-       vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
-       static const char * const names[] = { "inflate", "deflate", "stats" };
-       int err, nvqs;
+       struct virtqueue *vqs[VIRTIO_BALLOON_VQ_MAX];
+       vq_callback_t *callbacks[VIRTIO_BALLOON_VQ_MAX];
+       const char *names[VIRTIO_BALLOON_VQ_MAX];
+       int err;
 
        /*
-        * We expect two virtqueues: inflate and deflate, and
-        * optionally stat.
+        * Inflateq and deflateq are used unconditionally. The names[]
+        * will be NULL if the related feature is not enabled, which will
+        * cause no allocation for the corresponding virtqueue in find_vqs.
         */
-       nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
-       err = virtio_find_vqs(vb->vdev, nvqs, vqs, callbacks, names, NULL);
+       callbacks[VIRTIO_BALLOON_VQ_INFLATE] = balloon_ack;
+       names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate";
+       callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack;
+       names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate";
+       names[VIRTIO_BALLOON_VQ_STATS] = NULL;
+       names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
+
+       if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
+               names[VIRTIO_BALLOON_VQ_STATS] = "stats";
+               callbacks[VIRTIO_BALLOON_VQ_STATS] = stats_request;
+       }
+
+       if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
+               names[VIRTIO_BALLOON_VQ_FREE_PAGE] = "free_page_vq";
+               callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
+       }
+
+       err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX,
+                                        vqs, callbacks, names, NULL, NULL);
        if (err)
                return err;
 
-       vb->inflate_vq = vqs[0];
-       vb->deflate_vq = vqs[1];
+       vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE];
+       vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE];
        if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
                struct scatterlist sg;
                unsigned int num_stats;
-               vb->stats_vq = vqs[2];
+               vb->stats_vq = vqs[VIRTIO_BALLOON_VQ_STATS];
 
                /*
                 * Prime this virtqueue with one buffer so the hypervisor can
@@ -426,9 +520,145 @@ static int init_vqs(struct virtio_balloon *vb)
                }
                virtqueue_kick(vb->stats_vq);
        }
+
+       if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+               vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE];
+
+       return 0;
+}
+
+static int send_cmd_id_start(struct virtio_balloon *vb)
+{
+       struct scatterlist sg;
+       struct virtqueue *vq = vb->free_page_vq;
+       int err, unused;
+
+       /* Detach all the used buffers from the vq */
+       while (virtqueue_get_buf(vq, &unused))
+               ;
+
+       vb->cmd_id_active = cpu_to_virtio32(vb->vdev, vb->cmd_id_received);
+       sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active));
+       err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL);
+       if (!err)
+               virtqueue_kick(vq);
+       return err;
+}
+
+static int send_cmd_id_stop(struct virtio_balloon *vb)
+{
+       struct scatterlist sg;
+       struct virtqueue *vq = vb->free_page_vq;
+       int err, unused;
+
+       /* Detach all the used buffers from the vq */
+       while (virtqueue_get_buf(vq, &unused))
+               ;
+
+       sg_init_one(&sg, &vb->cmd_id_stop, sizeof(vb->cmd_id_stop));
+       err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_stop, GFP_KERNEL);
+       if (!err)
+               virtqueue_kick(vq);
+       return err;
+}
+
+static int get_free_page_and_send(struct virtio_balloon *vb)
+{
+       struct virtqueue *vq = vb->free_page_vq;
+       struct page *page;
+       struct scatterlist sg;
+       int err, unused;
+       void *p;
+
+       /* Detach all the used buffers from the vq */
+       while (virtqueue_get_buf(vq, &unused))
+               ;
+
+       page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG,
+                          VIRTIO_BALLOON_FREE_PAGE_ORDER);
+       /*
+        * When the allocation returns NULL, it indicates that we have got all
+        * the possible free pages, so return -EINTR to stop.
+        */
+       if (!page)
+               return -EINTR;
+
+       p = page_address(page);
+       sg_init_one(&sg, p, VIRTIO_BALLOON_FREE_PAGE_SIZE);
+       /* There is always 1 entry reserved for the cmd id to use. */
+       if (vq->num_free > 1) {
+               err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL);
+               if (unlikely(err)) {
+                       free_pages((unsigned long)p,
+                                  VIRTIO_BALLOON_FREE_PAGE_ORDER);
+                       return err;
+               }
+               virtqueue_kick(vq);
+               spin_lock_irq(&vb->free_page_list_lock);
+               balloon_page_push(&vb->free_page_list, page);
+               vb->num_free_page_blocks++;
+               spin_unlock_irq(&vb->free_page_list_lock);
+       } else {
+               /*
+                * The vq has no available entry to add this page block, so
+                * just free it.
+                */
+               free_pages((unsigned long)p, VIRTIO_BALLOON_FREE_PAGE_ORDER);
+       }
+
+       return 0;
+}
+
+static int send_free_pages(struct virtio_balloon *vb)
+{
+       int err;
+       u32 cmd_id_active;
+
+       while (1) {
+               /*
+                * If a stop id or a new cmd id was just received from host,
+                * stop the reporting.
+                */
+               cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active);
+               if (cmd_id_active != vb->cmd_id_received)
+                       break;
+
+               /*
+                * The free page blocks are allocated and sent to host one by
+                * one.
+                */
+               err = get_free_page_and_send(vb);
+               if (err == -EINTR)
+                       break;
+               else if (unlikely(err))
+                       return err;
+       }
+
        return 0;
 }
 
+static void report_free_page_func(struct work_struct *work)
+{
+       int err;
+       struct virtio_balloon *vb = container_of(work, struct virtio_balloon,
+                                                report_free_page_work);
+       struct device *dev = &vb->vdev->dev;
+
+       /* Start by sending the received cmd id to host with an outbuf. */
+       err = send_cmd_id_start(vb);
+       if (unlikely(err))
+               dev_err(dev, "Failed to send a start id, err = %d\n", err);
+
+       err = send_free_pages(vb);
+       if (unlikely(err))
+               dev_err(dev, "Failed to send a free page, err = %d\n", err);
+
+       /* End by sending a stop id to host with an outbuf. */
+       err = send_cmd_id_stop(vb);
+       if (unlikely(err))
+               dev_err(dev, "Failed to send a stop id, err = %d\n", err);
+}
+
 #ifdef CONFIG_BALLOON_COMPACTION
 /*
  * virtballoon_migratepage - perform the balloon page migration on behalf of
@@ -512,14 +742,23 @@ static struct file_system_type balloon_fs = {
 
 #endif /* CONFIG_BALLOON_COMPACTION */
 
-static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
-                                                 struct shrink_control *sc)
+static unsigned long shrink_free_pages(struct virtio_balloon *vb,
+                                      unsigned long pages_to_free)
 {
-       unsigned long pages_to_free, pages_freed = 0;
-       struct virtio_balloon *vb = container_of(shrinker,
-                                       struct virtio_balloon, shrinker);
+       unsigned long blocks_to_free, blocks_freed;
 
-       pages_to_free = sc->nr_to_scan * VIRTIO_BALLOON_PAGES_PER_PAGE;
+       pages_to_free = round_up(pages_to_free,
+                                1 << VIRTIO_BALLOON_FREE_PAGE_ORDER);
+       blocks_to_free = pages_to_free >> VIRTIO_BALLOON_FREE_PAGE_ORDER;
+       blocks_freed = return_free_pages_to_mm(vb, blocks_to_free);
+
+       return blocks_freed << VIRTIO_BALLOON_FREE_PAGE_ORDER;
+}
+
+static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
+                                         unsigned long pages_to_free)
+{
+       unsigned long pages_freed = 0;
 
        /*
         * One invocation of leak_balloon can deflate at most
@@ -527,12 +766,33 @@ static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
         * multiple times to deflate pages till reaching pages_to_free.
         */
        while (vb->num_pages && pages_to_free) {
+               pages_freed += leak_balloon(vb, pages_to_free) /
+                                       VIRTIO_BALLOON_PAGES_PER_PAGE;
                pages_to_free -= pages_freed;
-               pages_freed += leak_balloon(vb, pages_to_free);
        }
        update_balloon_size(vb);
 
-       return pages_freed / VIRTIO_BALLOON_PAGES_PER_PAGE;
+       return pages_freed;
+}
+
+static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
+                                                 struct shrink_control *sc)
+{
+       unsigned long pages_to_free, pages_freed = 0;
+       struct virtio_balloon *vb = container_of(shrinker,
+                                       struct virtio_balloon, shrinker);
+
+       pages_to_free = sc->nr_to_scan * VIRTIO_BALLOON_PAGES_PER_PAGE;
+
+       if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+               pages_freed = shrink_free_pages(vb, pages_to_free);
+
+       if (pages_freed >= pages_to_free)
+               return pages_freed;
+
+       pages_freed += shrink_balloon_pages(vb, pages_to_free - pages_freed);
+
+       return pages_freed;
 }
 
 static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
@@ -540,8 +800,12 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
 {
        struct virtio_balloon *vb = container_of(shrinker,
                                        struct virtio_balloon, shrinker);
+       unsigned long count;
 
-       return vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
+       count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
+       count += vb->num_free_page_blocks >> VIRTIO_BALLOON_FREE_PAGE_ORDER;
+
+       return count;
 }
 
 static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb)
@@ -561,6 +825,7 @@ static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
 static int virtballoon_probe(struct virtio_device *vdev)
 {
        struct virtio_balloon *vb;
+       __u32 poison_val;
        int err;
 
        if (!vdev->config->get) {
@@ -604,6 +869,36 @@ static int virtballoon_probe(struct virtio_device *vdev)
        }
        vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
 #endif
+       if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
+               /*
+                * There is always one entry reserved for cmd id, so the ring
+                * size needs to be at least two to report free page hints.
+                */
+               if (virtqueue_get_vring_size(vb->free_page_vq) < 2) {
+                       err = -ENOSPC;
+                       goto out_del_vqs;
+               }
+               vb->balloon_wq = alloc_workqueue("balloon-wq",
+                                       WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0);
+               if (!vb->balloon_wq) {
+                       err = -ENOMEM;
+                       goto out_del_vqs;
+               }
+               INIT_WORK(&vb->report_free_page_work, report_free_page_func);
+               vb->cmd_id_received = VIRTIO_BALLOON_CMD_ID_STOP;
+               vb->cmd_id_active = cpu_to_virtio32(vb->vdev,
+                                                 VIRTIO_BALLOON_CMD_ID_STOP);
+               vb->cmd_id_stop = cpu_to_virtio32(vb->vdev,
+                                                 VIRTIO_BALLOON_CMD_ID_STOP);
+               vb->num_free_page_blocks = 0;
+               spin_lock_init(&vb->free_page_list_lock);
+               INIT_LIST_HEAD(&vb->free_page_list);
+               if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
+                       memset(&poison_val, PAGE_POISON, sizeof(poison_val));
+                       virtio_cwrite(vb->vdev, struct virtio_balloon_config,
+                                     poison_val, &poison_val);
+               }
+       }
        /*
         * We continue to use VIRTIO_BALLOON_F_DEFLATE_ON_OOM to decide if a
         * shrinker needs to be registered to relieve memory pressure.
@@ -611,7 +906,7 @@ static int virtballoon_probe(struct virtio_device *vdev)
        if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
                err = virtio_balloon_register_shrinker(vb);
                if (err)
-                       goto out_del_vqs;
+                       goto out_del_balloon_wq;
        }
        virtio_device_ready(vdev);
 
@@ -619,6 +914,9 @@ static int virtballoon_probe(struct virtio_device *vdev)
                virtballoon_changed(vdev);
        return 0;
 
+out_del_balloon_wq:
+       if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+               destroy_workqueue(vb->balloon_wq);
 out_del_vqs:
        vdev->config->del_vqs(vdev);
 out_free_vb:
@@ -652,6 +950,11 @@ static void virtballoon_remove(struct virtio_device *vdev)
        cancel_work_sync(&vb->update_balloon_size_work);
        cancel_work_sync(&vb->update_balloon_stats_work);
 
+       if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) {
+               cancel_work_sync(&vb->report_free_page_work);
+               destroy_workqueue(vb->balloon_wq);
+       }
+
        remove_common(vb);
 #ifdef CONFIG_BALLOON_COMPACTION
        if (vb->vb_dev_info.inode)
@@ -695,6 +998,9 @@ static int virtballoon_restore(struct virtio_device *vdev)
 
 static int virtballoon_validate(struct virtio_device *vdev)
 {
+       if (!page_poisoning_enabled())
+               __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON);
+
        __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
        return 0;
 }
@@ -703,6 +1009,8 @@ static unsigned int features[] = {
        VIRTIO_BALLOON_F_MUST_TELL_HOST,
        VIRTIO_BALLOON_F_STATS_VQ,
        VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
+       VIRTIO_BALLOON_F_FREE_PAGE_HINT,
+       VIRTIO_BALLOON_F_PAGE_POISON,
 };
 
 static struct virtio_driver virtio_balloon_driver = {
index fdfc64f5aceaa807e392b887d278daf466de3eae..221b7333d067bc95c262a3eee4913ea70dfde1bd 100644 (file)
@@ -251,25 +251,10 @@ static void release_memory_resource(struct resource *resource)
        kfree(resource);
 }
 
-/*
- * Host memory not allocated to dom0. We can use this range for hotplug-based
- * ballooning.
- *
- * It's a type-less resource. Setting IORESOURCE_MEM will make resource
- * management algorithms (arch_remove_reservations()) look into guest e820,
- * which we don't want.
- */
-static struct resource hostmem_resource = {
-       .name   = "Host RAM",
-};
-
-void __attribute__((weak)) __init arch_xen_balloon_init(struct resource *res)
-{}
-
 static struct resource *additional_memory_resource(phys_addr_t size)
 {
-       struct resource *res, *res_hostmem;
-       int ret = -ENOMEM;
+       struct resource *res;
+       int ret;
 
        res = kzalloc(sizeof(*res), GFP_KERNEL);
        if (!res)
@@ -278,42 +263,13 @@ static struct resource *additional_memory_resource(phys_addr_t size)
        res->name = "System RAM";
        res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
-       res_hostmem = kzalloc(sizeof(*res), GFP_KERNEL);
-       if (res_hostmem) {
-               /* Try to grab a range from hostmem */
-               res_hostmem->name = "Host memory";
-               ret = allocate_resource(&hostmem_resource, res_hostmem,
-                                       size, 0, -1,
-                                       PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
-       }
-
-       if (!ret) {
-               /*
-                * Insert this resource into iomem. Because hostmem_resource
-                * tracks portion of guest e820 marked as UNUSABLE noone else
-                * should try to use it.
-                */
-               res->start = res_hostmem->start;
-               res->end = res_hostmem->end;
-               ret = insert_resource(&iomem_resource, res);
-               if (ret < 0) {
-                       pr_err("Can't insert iomem_resource [%llx - %llx]\n",
-                               res->start, res->end);
-                       release_memory_resource(res_hostmem);
-                       res_hostmem = NULL;
-                       res->start = res->end = 0;
-               }
-       }
-
-       if (ret) {
-               ret = allocate_resource(&iomem_resource, res,
-                                       size, 0, -1,
-                                       PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
-               if (ret < 0) {
-                       pr_err("Cannot allocate new System RAM resource\n");
-                       kfree(res);
-                       return NULL;
-               }
+       ret = allocate_resource(&iomem_resource, res,
+                               size, 0, -1,
+                               PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
+       if (ret < 0) {
+               pr_err("Cannot allocate new System RAM resource\n");
+               kfree(res);
+               return NULL;
        }
 
 #ifdef CONFIG_SPARSEMEM
@@ -325,7 +281,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
                        pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
                               pfn, limit);
                        release_memory_resource(res);
-                       release_memory_resource(res_hostmem);
                        return NULL;
                }
        }
@@ -750,8 +705,6 @@ static int __init balloon_init(void)
        set_online_page_callback(&xen_online_page);
        register_memory_notifier(&xen_memory_nb);
        register_sysctl_table(xen_root);
-
-       arch_xen_balloon_init(&hostmem_resource);
 #endif
 
 #ifdef CONFIG_XEN_PV
index f15f89df1f3653675da3b84b1c7ce47debd1bac6..7ea6fb6a2e5dd78c53a79bfea140e2e297858f7d 100644 (file)
@@ -914,7 +914,7 @@ int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
 
        ret = xenmem_reservation_increase(args->nr_pages, args->frames);
        if (ret != args->nr_pages) {
-               pr_debug("Failed to decrease reservation for DMA buffer\n");
+               pr_debug("Failed to increase reservation for DMA buffer\n");
                ret = -EFAULT;
        } else {
                ret = 0;
index df1ed37c3269ebd8170a21583676b80efe50a47f..de01a6d0059dc4adcb98a24197750f72b0b4ceaf 100644 (file)
 
 MODULE_LICENSE("GPL");
 
-static unsigned int limit = 64;
-module_param(limit, uint, 0644);
-MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
-                       "the privcmd-buf device per open file");
-
 struct privcmd_buf_private {
        struct mutex lock;
        struct list_head list;
-       unsigned int allocated;
 };
 
 struct privcmd_buf_vma_private {
@@ -60,13 +54,10 @@ static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
 {
        unsigned int i;
 
-       vma_priv->file_priv->allocated -= vma_priv->n_pages;
-
        list_del(&vma_priv->list);
 
        for (i = 0; i < vma_priv->n_pages; i++)
-               if (vma_priv->pages[i])
-                       __free_page(vma_priv->pages[i]);
+               __free_page(vma_priv->pages[i]);
 
        kfree(vma_priv);
 }
@@ -146,8 +137,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
        unsigned int i;
        int ret = 0;
 
-       if (!(vma->vm_flags & VM_SHARED) || count > limit ||
-           file_priv->allocated + count > limit)
+       if (!(vma->vm_flags & VM_SHARED))
                return -EINVAL;
 
        vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
@@ -155,19 +145,15 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
        if (!vma_priv)
                return -ENOMEM;
 
-       vma_priv->n_pages = count;
-       count = 0;
-       for (i = 0; i < vma_priv->n_pages; i++) {
+       for (i = 0; i < count; i++) {
                vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
                if (!vma_priv->pages[i])
                        break;
-               count++;
+               vma_priv->n_pages++;
        }
 
        mutex_lock(&file_priv->lock);
 
-       file_priv->allocated += count;
-
        vma_priv->file_priv = file_priv;
        vma_priv->users = 1;
 
index b1092fbefa6309d2535b17b78979b6f3fa9b2b42..2e5d845b5091478252dfb1ed17395cf9c5ce870a 100644 (file)
@@ -137,13 +137,13 @@ static void pvcalls_conn_back_read(void *opaque)
        if (masked_prod < masked_cons) {
                vec[0].iov_base = data->in + masked_prod;
                vec[0].iov_len = wanted;
-               iov_iter_kvec(&msg.msg_iter, ITER_KVEC|WRITE, vec, 1, wanted);
+               iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, wanted);
        } else {
                vec[0].iov_base = data->in + masked_prod;
                vec[0].iov_len = array_size - masked_prod;
                vec[1].iov_base = data->in;
                vec[1].iov_len = wanted - vec[0].iov_len;
-               iov_iter_kvec(&msg.msg_iter, ITER_KVEC|WRITE, vec, 2, wanted);
+               iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, wanted);
        }
 
        atomic_set(&map->read, 0);
@@ -195,13 +195,13 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
        if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
                vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
                vec[0].iov_len = size;
-               iov_iter_kvec(&msg.msg_iter, ITER_KVEC|READ, vec, 1, size);
+               iov_iter_kvec(&msg.msg_iter, READ, vec, 1, size);
        } else {
                vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
                vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
                vec[1].iov_base = data->out;
                vec[1].iov_len = size - vec[0].iov_len;
-               iov_iter_kvec(&msg.msg_iter, ITER_KVEC|READ, vec, 2, size);
+               iov_iter_kvec(&msg.msg_iter, READ, vec, 2, size);
        }
 
        atomic_set(&map->write, 0);
index 2f11ca72a281410122ef0b4f0dc4f173ef7c6697..77224d8f3e6fe6ee17cb06f81f20be18069422a9 100644 (file)
@@ -385,8 +385,8 @@ static int create_active(struct sock_mapping *map, int *evtchn)
 out_error:
        if (*evtchn >= 0)
                xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
-       kfree(map->active.data.in);
-       kfree(map->active.ring);
+       free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
+       free_page((unsigned long)map->active.ring);
        return ret;
 }
 
index 23f1387b3ef791b515b97b2ee694dd946ff712b3..e7df65d32c9181f74cf337fe181876f05f87bfe6 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/xen/hypervisor.h>
 
 #include <xen/xen.h>
+#include <xen/xen-ops.h>
 #include <xen/page.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/memory.h>
index e1cbdfdb7c684fd24fdb6f25ee03f4e253e9ef58..0bcbcc20f76954e4e8e3d959628e87f3915b3ce3 100644 (file)
@@ -65,7 +65,7 @@ static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page)
        if (retval == 0)
                return retval;
 
-       iov_iter_bvec(&to, ITER_BVEC | READ, &bvec, 1, PAGE_SIZE);
+       iov_iter_bvec(&to, READ, &bvec, 1, PAGE_SIZE);
 
        retval = p9_client_read(fid, page_offset(page), &to, &err);
        if (err) {
@@ -175,7 +175,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
        bvec.bv_page = page;
        bvec.bv_offset = 0;
        bvec.bv_len = len;
-       iov_iter_bvec(&from, ITER_BVEC | WRITE, &bvec, 1, len);
+       iov_iter_bvec(&from, WRITE, &bvec, 1, len);
 
        /* We should have writeback_fid always set */
        BUG_ON(!v9inode->writeback_fid);
index cb6c4031af552b010c2e8ff5469b9088c769b3b7..00745147329dc9b6876404011378a8a29ef1791c 100644 (file)
@@ -123,7 +123,7 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
                if (rdir->tail == rdir->head) {
                        struct iov_iter to;
                        int n;
-                       iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buflen);
+                       iov_iter_kvec(&to, READ, &kvec, 1, buflen);
                        n = p9_client_read(file->private_data, ctx->pos, &to,
                                           &err);
                        if (err)
index 352abc39e891a1468d3576cc199fa13b089f32f7..ac8ff8ca4c115fa0ae52ec4c8fcbe47499780d94 100644 (file)
@@ -32,7 +32,7 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name,
        struct iov_iter to;
        int err;
 
-       iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buffer_size);
+       iov_iter_kvec(&to, READ, &kvec, 1, buffer_size);
 
        attr_fid = p9_client_xattrwalk(fid, name, &attr_size);
        if (IS_ERR(attr_fid)) {
@@ -107,7 +107,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
        struct iov_iter from;
        int retval, err;
 
-       iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
+       iov_iter_kvec(&from, WRITE, &kvec, 1, value_len);
 
        p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n",
                 name, value_len, flags);
index ebba3b18e5da6dd5ff7d27cd248218937ab19a0f..701aaa9b18994a1e789adad6e7544817c320bc2b 100644 (file)
@@ -27,3 +27,15 @@ config AFS_FSCACHE
        help
          Say Y here if you want AFS data to be cached locally on disk through
          the generic filesystem cache manager
+
+config AFS_DEBUG_CURSOR
+       bool "AFS server cursor debugging"
+       depends on AFS_FS
+       help
+         Say Y here to cause the contents of a server cursor to be dumped to
+         the dmesg log if the server rotation algorithm fails to successfully
+         contact a server.
+
+         See <file:Documentation/filesystems/afs.txt> for more information.
+
+         If unsure, say N.
index 546874057bd3594bd0997d37b8801fd866461077..0738e2bf51936ed1e45ee84a0ed32a61e96c35f6 100644 (file)
@@ -17,6 +17,7 @@ kafs-y := \
        file.o \
        flock.o \
        fsclient.o \
+       fs_probe.o \
        inode.o \
        main.o \
        misc.o \
@@ -29,9 +30,13 @@ kafs-y := \
        super.o \
        netdevices.o \
        vlclient.o \
+       vl_list.o \
+       vl_probe.o \
+       vl_rotate.o \
        volume.o \
        write.o \
-       xattr.o
+       xattr.o \
+       yfsclient.o
 
 kafs-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_AFS_FS)  := kafs.o
index 55a756c60746ca7924625b99b0c2a42d429119fa..967db336d11ae016324f4f15d7cbd33b809045c2 100644 (file)
@@ -64,19 +64,25 @@ struct afs_addr_list *afs_alloc_addrlist(unsigned int nr,
 /*
  * Parse a text string consisting of delimited addresses.
  */
-struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
-                                          char delim,
-                                          unsigned short service,
-                                          unsigned short port)
+struct afs_vlserver_list *afs_parse_text_addrs(struct afs_net *net,
+                                              const char *text, size_t len,
+                                              char delim,
+                                              unsigned short service,
+                                              unsigned short port)
 {
+       struct afs_vlserver_list *vllist;
        struct afs_addr_list *alist;
        const char *p, *end = text + len;
+       const char *problem;
        unsigned int nr = 0;
+       int ret = -ENOMEM;
 
        _enter("%*.*s,%c", (int)len, (int)len, text, delim);
 
-       if (!len)
+       if (!len) {
+               _leave(" = -EDESTADDRREQ [empty]");
                return ERR_PTR(-EDESTADDRREQ);
+       }
 
        if (delim == ':' && (memchr(text, ',', len) || !memchr(text, '.', len)))
                delim = ',';
@@ -84,18 +90,24 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
        /* Count the addresses */
        p = text;
        do {
-               if (!*p)
-                       return ERR_PTR(-EINVAL);
+               if (!*p) {
+                       problem = "nul";
+                       goto inval;
+               }
                if (*p == delim)
                        continue;
                nr++;
                if (*p == '[') {
                        p++;
-                       if (p == end)
-                               return ERR_PTR(-EINVAL);
+                       if (p == end) {
+                               problem = "brace1";
+                               goto inval;
+                       }
                        p = memchr(p, ']', end - p);
-                       if (!p)
-                               return ERR_PTR(-EINVAL);
+                       if (!p) {
+                               problem = "brace2";
+                               goto inval;
+                       }
                        p++;
                        if (p >= end)
                                break;
@@ -109,10 +121,19 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
 
        _debug("%u/%u addresses", nr, AFS_MAX_ADDRESSES);
 
-       alist = afs_alloc_addrlist(nr, service, port);
-       if (!alist)
+       vllist = afs_alloc_vlserver_list(1);
+       if (!vllist)
                return ERR_PTR(-ENOMEM);
 
+       vllist->nr_servers = 1;
+       vllist->servers[0].server = afs_alloc_vlserver("<dummy>", 7, AFS_VL_PORT);
+       if (!vllist->servers[0].server)
+               goto error_vl;
+
+       alist = afs_alloc_addrlist(nr, service, AFS_VL_PORT);
+       if (!alist)
+               goto error;
+
        /* Extract the addresses */
        p = text;
        do {
@@ -135,17 +156,21 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
                                        break;
                }
 
-               if (in4_pton(p, q - p, (u8 *)&x[0], -1, &stop))
+               if (in4_pton(p, q - p, (u8 *)&x[0], -1, &stop)) {
                        family = AF_INET;
-               else if (in6_pton(p, q - p, (u8 *)x, -1, &stop))
+               } else if (in6_pton(p, q - p, (u8 *)x, -1, &stop)) {
                        family = AF_INET6;
-               else
+               } else {
+                       problem = "family";
                        goto bad_address;
+               }
 
-               if (stop != q)
+               p = q;
+               if (stop != p) {
+                       problem = "nostop";
                        goto bad_address;
+               }
 
-               p = q;
                if (q < end && *q == ']')
                        p++;
 
@@ -154,18 +179,23 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
                                /* Port number specification "+1234" */
                                xport = 0;
                                p++;
-                               if (p >= end || !isdigit(*p))
+                               if (p >= end || !isdigit(*p)) {
+                                       problem = "port";
                                        goto bad_address;
+                               }
                                do {
                                        xport *= 10;
                                        xport += *p - '0';
-                                       if (xport > 65535)
+                                       if (xport > 65535) {
+                                               problem = "pval";
                                                goto bad_address;
+                                       }
                                        p++;
                                } while (p < end && isdigit(*p));
                        } else if (*p == delim) {
                                p++;
                        } else {
+                               problem = "weird";
                                goto bad_address;
                        }
                }
@@ -177,12 +207,23 @@ struct afs_addr_list *afs_parse_text_addrs(const char *text, size_t len,
 
        } while (p < end);
 
+       rcu_assign_pointer(vllist->servers[0].server->addresses, alist);
        _leave(" = [nr %u]", alist->nr_addrs);
-       return alist;
+       return vllist;
 
-bad_address:
-       kfree(alist);
+inval:
+       _leave(" = -EINVAL [%s %zu %*.*s]",
+              problem, p - text, (int)len, (int)len, text);
        return ERR_PTR(-EINVAL);
+bad_address:
+       _leave(" = -EINVAL [%s %zu %*.*s]",
+              problem, p - text, (int)len, (int)len, text);
+       ret = -EINVAL;
+error:
+       afs_put_addrlist(alist);
+error_vl:
+       afs_put_vlserverlist(net, vllist);
+       return ERR_PTR(ret);
 }
 
 /*
@@ -201,30 +242,34 @@ static int afs_cmp_addr_list(const struct afs_addr_list *a1,
 /*
  * Perform a DNS query for VL servers and build a up an address list.
  */
-struct afs_addr_list *afs_dns_query(struct afs_cell *cell, time64_t *_expiry)
+struct afs_vlserver_list *afs_dns_query(struct afs_cell *cell, time64_t *_expiry)
 {
-       struct afs_addr_list *alist;
-       char *vllist = NULL;
+       struct afs_vlserver_list *vllist;
+       char *result = NULL;
        int ret;
 
        _enter("%s", cell->name);
 
-       ret = dns_query("afsdb", cell->name, cell->name_len,
-                       "", &vllist, _expiry);
-       if (ret < 0)
+       ret = dns_query("afsdb", cell->name, cell->name_len, "srv=1",
+                       &result, _expiry);
+       if (ret < 0) {
+               _leave(" = %d [dns]", ret);
                return ERR_PTR(ret);
-
-       alist = afs_parse_text_addrs(vllist, strlen(vllist), ',',
-                                    VL_SERVICE, AFS_VL_PORT);
-       if (IS_ERR(alist)) {
-               kfree(vllist);
-               if (alist != ERR_PTR(-ENOMEM))
-                       pr_err("Failed to parse DNS data\n");
-               return alist;
        }
 
-       kfree(vllist);
-       return alist;
+       if (*_expiry == 0)
+               *_expiry = ktime_get_real_seconds() + 60;
+
+       if (ret > 1 && result[0] == 0)
+               vllist = afs_extract_vlserver_list(cell, result, ret);
+       else
+               vllist = afs_parse_text_addrs(cell->net, result, ret, ',',
+                                             VL_SERVICE, AFS_VL_PORT);
+       kfree(result);
+       if (IS_ERR(vllist) && vllist != ERR_PTR(-ENOMEM))
+               pr_err("Failed to parse DNS data %ld\n", PTR_ERR(vllist));
+
+       return vllist;
 }
 
 /*
@@ -258,6 +303,8 @@ void afs_merge_fs_addr4(struct afs_addr_list *alist, __be32 xdr, u16 port)
                        sizeof(alist->addrs[0]) * (alist->nr_addrs - i));
 
        srx = &alist->addrs[i];
+       srx->srx_family = AF_RXRPC;
+       srx->transport_type = SOCK_DGRAM;
        srx->transport_len = sizeof(srx->transport.sin);
        srx->transport.sin.sin_family = AF_INET;
        srx->transport.sin.sin_port = htons(port);
@@ -296,6 +343,8 @@ void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port)
                        sizeof(alist->addrs[0]) * (alist->nr_addrs - i));
 
        srx = &alist->addrs[i];
+       srx->srx_family = AF_RXRPC;
+       srx->transport_type = SOCK_DGRAM;
        srx->transport_len = sizeof(srx->transport.sin6);
        srx->transport.sin6.sin6_family = AF_INET6;
        srx->transport.sin6.sin6_port = htons(port);
@@ -308,25 +357,33 @@ void afs_merge_fs_addr6(struct afs_addr_list *alist, __be32 *xdr, u16 port)
  */
 bool afs_iterate_addresses(struct afs_addr_cursor *ac)
 {
-       _enter("%hu+%hd", ac->start, (short)ac->index);
+       unsigned long set, failed;
+       int index;
 
        if (!ac->alist)
                return false;
 
-       if (ac->begun) {
-               ac->index++;
-               if (ac->index == ac->alist->nr_addrs)
-                       ac->index = 0;
+       set = ac->alist->responded;
+       failed = ac->alist->failed;
+       _enter("%lx-%lx-%lx,%d", set, failed, ac->tried, ac->index);
 
-               if (ac->index == ac->start) {
-                       ac->error = -EDESTADDRREQ;
-                       return false;
-               }
-       }
+       ac->nr_iterations++;
+
+       set &= ~(failed | ac->tried);
+
+       if (!set)
+               return false;
 
-       ac->begun = true;
+       index = READ_ONCE(ac->alist->preferred);
+       if (test_bit(index, &set))
+               goto selected;
+
+       index = __ffs(set);
+
+selected:
+       ac->index = index;
+       set_bit(index, &ac->tried);
        ac->responded = false;
-       ac->addr = &ac->alist->addrs[ac->index];
        return true;
 }
 
@@ -339,53 +396,13 @@ int afs_end_cursor(struct afs_addr_cursor *ac)
 
        alist = ac->alist;
        if (alist) {
-               if (ac->responded && ac->index != ac->start)
-                       WRITE_ONCE(alist->index, ac->index);
+               if (ac->responded &&
+                   ac->index != alist->preferred &&
+                   test_bit(ac->alist->preferred, &ac->tried))
+                       WRITE_ONCE(alist->preferred, ac->index);
                afs_put_addrlist(alist);
+               ac->alist = NULL;
        }
 
-       ac->addr = NULL;
-       ac->alist = NULL;
-       ac->begun = false;
        return ac->error;
 }
-
-/*
- * Set the address cursor for iterating over VL servers.
- */
-int afs_set_vl_cursor(struct afs_addr_cursor *ac, struct afs_cell *cell)
-{
-       struct afs_addr_list *alist;
-       int ret;
-
-       if (!rcu_access_pointer(cell->vl_addrs)) {
-               ret = wait_on_bit(&cell->flags, AFS_CELL_FL_NO_LOOKUP_YET,
-                                 TASK_INTERRUPTIBLE);
-               if (ret < 0)
-                       return ret;
-
-               if (!rcu_access_pointer(cell->vl_addrs) &&
-                   ktime_get_real_seconds() < cell->dns_expiry)
-                       return cell->error;
-       }
-
-       read_lock(&cell->vl_addrs_lock);
-       alist = rcu_dereference_protected(cell->vl_addrs,
-                                         lockdep_is_held(&cell->vl_addrs_lock));
-       if (alist->nr_addrs > 0)
-               afs_get_addrlist(alist);
-       else
-               alist = NULL;
-       read_unlock(&cell->vl_addrs_lock);
-
-       if (!alist)
-               return -EDESTADDRREQ;
-
-       ac->alist = alist;
-       ac->addr = NULL;
-       ac->start = READ_ONCE(alist->index);
-       ac->index = ac->start;
-       ac->error = 0;
-       ac->begun = false;
-       return 0;
-}
index b4ff1f7ae4ab048a345bdbfae6ea895e31299abb..d12ffb457e4745809460707c02176d2e4a657e4b 100644 (file)
@@ -23,9 +23,9 @@
 #define AFSPATHMAX             1024    /* Maximum length of a pathname plus NUL */
 #define AFSOPAQUEMAX           1024    /* Maximum length of an opaque field */
 
-typedef unsigned                       afs_volid_t;
-typedef unsigned                       afs_vnodeid_t;
-typedef unsigned long long             afs_dataversion_t;
+typedef u64                    afs_volid_t;
+typedef u64                    afs_vnodeid_t;
+typedef u64                    afs_dataversion_t;
 
 typedef enum {
        AFSVL_RWVOL,                    /* read/write volume */
@@ -52,8 +52,9 @@ typedef enum {
  */
 struct afs_fid {
        afs_volid_t     vid;            /* volume ID */
-       afs_vnodeid_t   vnode;          /* file index within volume */
-       unsigned        unique;         /* unique ID number (file index version) */
+       afs_vnodeid_t   vnode;          /* Lower 64-bits of file index within volume */
+       u32             vnode_hi;       /* Upper 32-bits of file index */
+       u32             unique;         /* unique ID number (file index version) */
 };
 
 /*
@@ -67,14 +68,14 @@ typedef enum {
 } afs_callback_type_t;
 
 struct afs_callback {
+       time64_t                expires_at;     /* Time at which expires */
        unsigned                version;        /* Callback version */
-       unsigned                expiry;         /* Time at which expires */
        afs_callback_type_t     type;           /* Type of callback */
 };
 
 struct afs_callback_break {
        struct afs_fid          fid;            /* File identifier */
-       struct afs_callback     cb;             /* Callback details */
+       //struct afs_callback   cb;             /* Callback details */
 };
 
 #define AFSCBMAX 50    /* maximum callbacks transferred per bulk op */
@@ -129,19 +130,18 @@ typedef u32 afs_access_t;
 struct afs_file_status {
        u64                     size;           /* file size */
        afs_dataversion_t       data_version;   /* current data version */
-       time_t                  mtime_client;   /* last time client changed data */
-       time_t                  mtime_server;   /* last time server changed data */
-       unsigned                abort_code;     /* Abort if bulk-fetching this failed */
-
-       afs_file_type_t         type;           /* file type */
-       unsigned                nlink;          /* link count */
-       u32                     author;         /* author ID */
-       u32                     owner;          /* owner ID */
-       u32                     group;          /* group ID */
+       struct timespec64       mtime_client;   /* Last time client changed data */
+       struct timespec64       mtime_server;   /* Last time server changed data */
+       s64                     author;         /* author ID */
+       s64                     owner;          /* owner ID */
+       s64                     group;          /* group ID */
        afs_access_t            caller_access;  /* access rights for authenticated caller */
        afs_access_t            anon_access;    /* access rights for unauthenticated caller */
        umode_t                 mode;           /* UNIX mode */
+       afs_file_type_t         type;           /* file type */
+       u32                     nlink;          /* link count */
        s32                     lock_count;     /* file lock count (0=UNLK -1=WRLCK +ve=#RDLCK */
+       u32                     abort_code;     /* Abort if bulk-fetching this failed */
 };
 
 /*
@@ -158,25 +158,27 @@ struct afs_file_status {
  * AFS volume synchronisation information
  */
 struct afs_volsync {
-       time_t                  creation;       /* volume creation time */
+       time64_t                creation;       /* volume creation time */
 };
 
 /*
  * AFS volume status record
  */
 struct afs_volume_status {
-       u32                     vid;            /* volume ID */
-       u32                     parent_id;      /* parent volume ID */
+       afs_volid_t             vid;            /* volume ID */
+       afs_volid_t             parent_id;      /* parent volume ID */
        u8                      online;         /* true if volume currently online and available */
        u8                      in_service;     /* true if volume currently in service */
        u8                      blessed;        /* same as in_service */
        u8                      needs_salvage;  /* true if consistency checking required */
        u32                     type;           /* volume type (afs_voltype_t) */
-       u32                     min_quota;      /* minimum space set aside (blocks) */
-       u32                     max_quota;      /* maximum space this volume may occupy (blocks) */
-       u32                     blocks_in_use;  /* space this volume currently occupies (blocks) */
-       u32                     part_blocks_avail; /* space available in volume's partition */
-       u32                     part_max_blocks; /* size of volume's partition */
+       u64                     min_quota;      /* minimum space set aside (blocks) */
+       u64                     max_quota;      /* maximum space this volume may occupy (blocks) */
+       u64                     blocks_in_use;  /* space this volume currently occupies (blocks) */
+       u64                     part_blocks_avail; /* space available in volume's partition */
+       u64                     part_max_blocks; /* size of volume's partition */
+       s64                     vol_copy_date;
+       s64                     vol_backup_date;
 };
 
 #define AFS_BLOCK_SIZE 1024
index b1c31ec4523a897b0142ba8699ff48ac10f2d801..f6d0a21e8052f066482def18a8740c32032b731d 100644 (file)
@@ -49,7 +49,7 @@ static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data,
        struct afs_vnode *vnode = cookie_netfs_data;
        struct afs_vnode_cache_aux aux;
 
-       _enter("{%x,%x,%llx},%p,%u",
+       _enter("{%llx,%x,%llx},%p,%u",
               vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version,
               buffer, buflen);
 
index 5f261fbf2182b22a47fc93b7c6fee35f113e0097..1c7955f5cdaf2e776026390f615806f3e6ce535c 100644 (file)
@@ -210,12 +210,10 @@ void afs_init_callback_state(struct afs_server *server)
 /*
  * actually break a callback
  */
-void afs_break_callback(struct afs_vnode *vnode)
+void __afs_break_callback(struct afs_vnode *vnode)
 {
        _enter("");
 
-       write_seqlock(&vnode->cb_lock);
-
        clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
        if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
                vnode->cb_break++;
@@ -230,7 +228,12 @@ void afs_break_callback(struct afs_vnode *vnode)
                        afs_lock_may_be_available(vnode);
                spin_unlock(&vnode->lock);
        }
+}
 
+void afs_break_callback(struct afs_vnode *vnode)
+{
+       write_seqlock(&vnode->cb_lock);
+       __afs_break_callback(vnode);
        write_sequnlock(&vnode->cb_lock);
 }
 
@@ -310,14 +313,10 @@ void afs_break_callbacks(struct afs_server *server, size_t count,
        /* TODO: Sort the callback break list by volume ID */
 
        for (; count > 0; callbacks++, count--) {
-               _debug("- Fid { vl=%08x n=%u u=%u }  CB { v=%u x=%u t=%u }",
+               _debug("- Fid { vl=%08llx n=%llu u=%u }",
                       callbacks->fid.vid,
                       callbacks->fid.vnode,
-                      callbacks->fid.unique,
-                      callbacks->cb.version,
-                      callbacks->cb.expiry,
-                      callbacks->cb.type
-                      );
+                      callbacks->fid.unique);
                afs_break_one_callback(server, &callbacks->fid);
        }
 
index 6127f0fcd62c4e376bd2554c1003aedb40aab471..cf445dbd5f2e05d4c716dadb3123fb397537d4e6 100644 (file)
@@ -20,6 +20,8 @@
 #include "internal.h"
 
 static unsigned __read_mostly afs_cell_gc_delay = 10;
+static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
+static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
 
 static void afs_manage_cell(struct work_struct *);
 
@@ -119,7 +121,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
  */
 static struct afs_cell *afs_alloc_cell(struct afs_net *net,
                                       const char *name, unsigned int namelen,
-                                      const char *vllist)
+                                      const char *addresses)
 {
        struct afs_cell *cell;
        int i, ret;
@@ -134,7 +136,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
        if (namelen == 5 && memcmp(name, "@cell", 5) == 0)
                return ERR_PTR(-EINVAL);
 
-       _enter("%*.*s,%s", namelen, namelen, name, vllist);
+       _enter("%*.*s,%s", namelen, namelen, name, addresses);
 
        cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
        if (!cell) {
@@ -153,23 +155,26 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
                       (1 << AFS_CELL_FL_NO_LOOKUP_YET));
        INIT_LIST_HEAD(&cell->proc_volumes);
        rwlock_init(&cell->proc_lock);
-       rwlock_init(&cell->vl_addrs_lock);
+       rwlock_init(&cell->vl_servers_lock);
 
        /* Fill in the VL server list if we were given a list of addresses to
         * use.
         */
-       if (vllist) {
-               struct afs_addr_list *alist;
-
-               alist = afs_parse_text_addrs(vllist, strlen(vllist), ':',
-                                            VL_SERVICE, AFS_VL_PORT);
-               if (IS_ERR(alist)) {
-                       ret = PTR_ERR(alist);
+       if (addresses) {
+               struct afs_vlserver_list *vllist;
+
+               vllist = afs_parse_text_addrs(net,
+                                             addresses, strlen(addresses), ':',
+                                             VL_SERVICE, AFS_VL_PORT);
+               if (IS_ERR(vllist)) {
+                       ret = PTR_ERR(vllist);
                        goto parse_failed;
                }
 
-               rcu_assign_pointer(cell->vl_addrs, alist);
+               rcu_assign_pointer(cell->vl_servers, vllist);
                cell->dns_expiry = TIME64_MAX;
+       } else {
+               cell->dns_expiry = ktime_get_real_seconds();
        }
 
        _leave(" = %p", cell);
@@ -356,26 +361,40 @@ int afs_cell_init(struct afs_net *net, const char *rootcell)
  */
 static void afs_update_cell(struct afs_cell *cell)
 {
-       struct afs_addr_list *alist, *old;
-       time64_t now, expiry;
+       struct afs_vlserver_list *vllist, *old;
+       unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl);
+       unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl);
+       time64_t now, expiry = 0;
 
        _enter("%s", cell->name);
 
-       alist = afs_dns_query(cell, &expiry);
-       if (IS_ERR(alist)) {
-               switch (PTR_ERR(alist)) {
+       vllist = afs_dns_query(cell, &expiry);
+
+       now = ktime_get_real_seconds();
+       if (min_ttl > max_ttl)
+               max_ttl = min_ttl;
+       if (expiry < now + min_ttl)
+               expiry = now + min_ttl;
+       else if (expiry > now + max_ttl)
+               expiry = now + max_ttl;
+
+       if (IS_ERR(vllist)) {
+               switch (PTR_ERR(vllist)) {
                case -ENODATA:
-                       /* The DNS said that the cell does not exist */
+               case -EDESTADDRREQ:
+                       /* The DNS said that the cell does not exist or there
+                        * weren't any addresses to be had.
+                        */
                        set_bit(AFS_CELL_FL_NOT_FOUND, &cell->flags);
                        clear_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
-                       cell->dns_expiry = ktime_get_real_seconds() + 61;
+                       cell->dns_expiry = expiry;
                        break;
 
                case -EAGAIN:
                case -ECONNREFUSED:
                default:
                        set_bit(AFS_CELL_FL_DNS_FAIL, &cell->flags);
-                       cell->dns_expiry = ktime_get_real_seconds() + 10;
+                       cell->dns_expiry = now + 10;
                        break;
                }
 
@@ -387,12 +406,12 @@ static void afs_update_cell(struct afs_cell *cell)
                /* Exclusion on changing vl_addrs is achieved by a
                 * non-reentrant work item.
                 */
-               old = rcu_dereference_protected(cell->vl_addrs, true);
-               rcu_assign_pointer(cell->vl_addrs, alist);
+               old = rcu_dereference_protected(cell->vl_servers, true);
+               rcu_assign_pointer(cell->vl_servers, vllist);
                cell->dns_expiry = expiry;
 
                if (old)
-                       afs_put_addrlist(old);
+                       afs_put_vlserverlist(cell->net, old);
        }
 
        if (test_and_clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags))
@@ -414,7 +433,7 @@ static void afs_cell_destroy(struct rcu_head *rcu)
 
        ASSERTCMP(atomic_read(&cell->usage), ==, 0);
 
-       afs_put_addrlist(rcu_access_pointer(cell->vl_addrs));
+       afs_put_vlserverlist(cell->net, rcu_access_pointer(cell->vl_servers));
        key_put(cell->anonymous_key);
        kfree(cell);
 
index 9e51d6fe7e8f975f34f877217a28a8e99bcfa5e4..8ee5972893ed5a75583bfb2821a42636403ee086 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/ip.h>
 #include "internal.h"
 #include "afs_cm.h"
+#include "protocol_yfs.h"
 
 static int afs_deliver_cb_init_call_back_state(struct afs_call *);
 static int afs_deliver_cb_init_call_back_state3(struct afs_call *);
@@ -30,6 +31,8 @@ static void SRXAFSCB_Probe(struct work_struct *);
 static void SRXAFSCB_ProbeUuid(struct work_struct *);
 static void SRXAFSCB_TellMeAboutYourself(struct work_struct *);
 
+static int afs_deliver_yfs_cb_callback(struct afs_call *);
+
 #define CM_NAME(name) \
        const char afs_SRXCB##name##_name[] __tracepoint_string =       \
                "CB." #name
@@ -100,13 +103,26 @@ static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
        .work           = SRXAFSCB_TellMeAboutYourself,
 };
 
+/*
+ * YFS CB.CallBack operation type
+ */
+static CM_NAME(YFS_CallBack);
+static const struct afs_call_type afs_SRXYFSCB_CallBack = {
+       .name           = afs_SRXCBYFS_CallBack_name,
+       .deliver        = afs_deliver_yfs_cb_callback,
+       .destructor     = afs_cm_destructor,
+       .work           = SRXAFSCB_CallBack,
+};
+
 /*
  * route an incoming cache manager call
  * - return T if supported, F if not
  */
 bool afs_cm_incoming_call(struct afs_call *call)
 {
-       _enter("{CB.OP %u}", call->operation_ID);
+       _enter("{%u, CB.OP %u}", call->service_id, call->operation_ID);
+
+       call->epoch = rxrpc_kernel_get_epoch(call->net->socket, call->rxcall);
 
        switch (call->operation_ID) {
        case CBCallBack:
@@ -127,11 +143,101 @@ bool afs_cm_incoming_call(struct afs_call *call)
        case CBTellMeAboutYourself:
                call->type = &afs_SRXCBTellMeAboutYourself;
                return true;
+       case YFSCBCallBack:
+               if (call->service_id != YFS_CM_SERVICE)
+                       return false;
+               call->type = &afs_SRXYFSCB_CallBack;
+               return true;
        default:
                return false;
        }
 }
 
+/*
+ * Record a probe to the cache manager from a server.
+ */
+static int afs_record_cm_probe(struct afs_call *call, struct afs_server *server)
+{
+       _enter("");
+
+       if (test_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags) &&
+           !test_bit(AFS_SERVER_FL_PROBING, &server->flags)) {
+               if (server->cm_epoch == call->epoch)
+                       return 0;
+
+               if (!server->probe.said_rebooted) {
+                       pr_notice("kAFS: FS rebooted %pU\n", &server->uuid);
+                       server->probe.said_rebooted = true;
+               }
+       }
+
+       spin_lock(&server->probe_lock);
+
+       if (!test_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags)) {
+               server->cm_epoch = call->epoch;
+               server->probe.cm_epoch = call->epoch;
+               goto out;
+       }
+
+       if (server->probe.cm_probed &&
+           call->epoch != server->probe.cm_epoch &&
+           !server->probe.said_inconsistent) {
+               pr_notice("kAFS: FS endpoints inconsistent %pU\n",
+                         &server->uuid);
+               server->probe.said_inconsistent = true;
+       }
+
+       if (!server->probe.cm_probed || call->epoch == server->cm_epoch)
+               server->probe.cm_epoch = server->cm_epoch;
+
+out:
+       server->probe.cm_probed = true;
+       spin_unlock(&server->probe_lock);
+       return 0;
+}
+
+/*
+ * Find the server record by peer address and record a probe to the cache
+ * manager from a server.
+ */
+static int afs_find_cm_server_by_peer(struct afs_call *call)
+{
+       struct sockaddr_rxrpc srx;
+       struct afs_server *server;
+
+       rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
+
+       server = afs_find_server(call->net, &srx);
+       if (!server) {
+               trace_afs_cm_no_server(call, &srx);
+               return 0;
+       }
+
+       call->cm_server = server;
+       return afs_record_cm_probe(call, server);
+}
+
+/*
+ * Find the server record by server UUID and record a probe to the cache
+ * manager from a server.
+ */
+static int afs_find_cm_server_by_uuid(struct afs_call *call,
+                                     struct afs_uuid *uuid)
+{
+       struct afs_server *server;
+
+       rcu_read_lock();
+       server = afs_find_server_by_uuid(call->net, call->request);
+       rcu_read_unlock();
+       if (!server) {
+               trace_afs_cm_no_server_u(call, call->request);
+               return 0;
+       }
+
+       call->cm_server = server;
+       return afs_record_cm_probe(call, server);
+}
+
 /*
  * Clean up a cache manager call.
  */
@@ -168,7 +274,6 @@ static void SRXAFSCB_CallBack(struct work_struct *work)
 static int afs_deliver_cb_callback(struct afs_call *call)
 {
        struct afs_callback_break *cb;
-       struct sockaddr_rxrpc srx;
        __be32 *bp;
        int ret, loop;
 
@@ -176,32 +281,32 @@ static int afs_deliver_cb_callback(struct afs_call *call)
 
        switch (call->unmarshall) {
        case 0:
-               call->offset = 0;
+               afs_extract_to_tmp(call);
                call->unmarshall++;
 
                /* extract the FID array and its count in two steps */
        case 1:
                _debug("extract FID count");
-               ret = afs_extract_data(call, &call->tmp, 4, true);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
                call->count = ntohl(call->tmp);
                _debug("FID count: %u", call->count);
                if (call->count > AFSCBMAX)
-                       return afs_protocol_error(call, -EBADMSG);
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_cb_fid_count);
 
                call->buffer = kmalloc(array3_size(call->count, 3, 4),
                                       GFP_KERNEL);
                if (!call->buffer)
                        return -ENOMEM;
-               call->offset = 0;
+               afs_extract_to_buf(call, call->count * 3 * 4);
                call->unmarshall++;
 
        case 2:
                _debug("extract FID array");
-               ret = afs_extract_data(call, call->buffer,
-                                      call->count * 3 * 4, true);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
@@ -218,59 +323,46 @@ static int afs_deliver_cb_callback(struct afs_call *call)
                        cb->fid.vid     = ntohl(*bp++);
                        cb->fid.vnode   = ntohl(*bp++);
                        cb->fid.unique  = ntohl(*bp++);
-                       cb->cb.type     = AFSCM_CB_UNTYPED;
                }
 
-               call->offset = 0;
+               afs_extract_to_tmp(call);
                call->unmarshall++;
 
                /* extract the callback array and its count in two steps */
        case 3:
                _debug("extract CB count");
-               ret = afs_extract_data(call, &call->tmp, 4, true);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
                call->count2 = ntohl(call->tmp);
                _debug("CB count: %u", call->count2);
                if (call->count2 != call->count && call->count2 != 0)
-                       return afs_protocol_error(call, -EBADMSG);
-               call->offset = 0;
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_cb_count);
+               call->_iter = &call->iter;
+               iov_iter_discard(&call->iter, READ, call->count2 * 3 * 4);
                call->unmarshall++;
 
        case 4:
-               _debug("extract CB array");
-               ret = afs_extract_data(call, call->buffer,
-                                      call->count2 * 3 * 4, false);
+               _debug("extract discard %zu/%u",
+                      iov_iter_count(&call->iter), call->count2 * 3 * 4);
+
+               ret = afs_extract_data(call, false);
                if (ret < 0)
                        return ret;
 
-               _debug("unmarshall CB array");
-               cb = call->request;
-               bp = call->buffer;
-               for (loop = call->count2; loop > 0; loop--, cb++) {
-                       cb->cb.version  = ntohl(*bp++);
-                       cb->cb.expiry   = ntohl(*bp++);
-                       cb->cb.type     = ntohl(*bp++);
-               }
-
-               call->offset = 0;
                call->unmarshall++;
        case 5:
                break;
        }
 
        if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
-               return -EIO;
+               return afs_io_error(call, afs_io_error_cm_reply);
 
        /* we'll need the file server record as that tells us which set of
         * vnodes to operate upon */
-       rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
-       call->cm_server = afs_find_server(call->net, &srx);
-       if (!call->cm_server)
-               trace_afs_cm_no_server(call, &srx);
-
-       return afs_queue_call_work(call);
+       return afs_find_cm_server_by_peer(call);
 }
 
 /*
@@ -294,24 +386,18 @@ static void SRXAFSCB_InitCallBackState(struct work_struct *work)
  */
 static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
 {
-       struct sockaddr_rxrpc srx;
        int ret;
 
        _enter("");
 
-       rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
-
-       ret = afs_extract_data(call, NULL, 0, false);
+       afs_extract_discard(call, 0);
+       ret = afs_extract_data(call, false);
        if (ret < 0)
                return ret;
 
        /* we'll need the file server record as that tells us which set of
         * vnodes to operate upon */
-       call->cm_server = afs_find_server(call->net, &srx);
-       if (!call->cm_server)
-               trace_afs_cm_no_server(call, &srx);
-
-       return afs_queue_call_work(call);
+       return afs_find_cm_server_by_peer(call);
 }
 
 /*
@@ -330,16 +416,15 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
 
        switch (call->unmarshall) {
        case 0:
-               call->offset = 0;
                call->buffer = kmalloc_array(11, sizeof(__be32), GFP_KERNEL);
                if (!call->buffer)
                        return -ENOMEM;
+               afs_extract_to_buf(call, 11 * sizeof(__be32));
                call->unmarshall++;
 
        case 1:
                _debug("extract UUID");
-               ret = afs_extract_data(call, call->buffer,
-                                      11 * sizeof(__be32), false);
+               ret = afs_extract_data(call, false);
                switch (ret) {
                case 0:         break;
                case -EAGAIN:   return 0;
@@ -362,7 +447,6 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
                for (loop = 0; loop < 6; loop++)
                        r->node[loop] = ntohl(b[loop + 5]);
 
-               call->offset = 0;
                call->unmarshall++;
 
        case 2:
@@ -370,17 +454,11 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
        }
 
        if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
-               return -EIO;
+               return afs_io_error(call, afs_io_error_cm_reply);
 
        /* we'll need the file server record as that tells us which set of
         * vnodes to operate upon */
-       rcu_read_lock();
-       call->cm_server = afs_find_server_by_uuid(call->net, call->request);
-       rcu_read_unlock();
-       if (!call->cm_server)
-               trace_afs_cm_no_server_u(call, call->request);
-
-       return afs_queue_call_work(call);
+       return afs_find_cm_server_by_uuid(call, call->request);
 }
 
 /*
@@ -405,14 +483,14 @@ static int afs_deliver_cb_probe(struct afs_call *call)
 
        _enter("");
 
-       ret = afs_extract_data(call, NULL, 0, false);
+       afs_extract_discard(call, 0);
+       ret = afs_extract_data(call, false);
        if (ret < 0)
                return ret;
 
        if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
-               return -EIO;
-
-       return afs_queue_call_work(call);
+               return afs_io_error(call, afs_io_error_cm_reply);
+       return afs_find_cm_server_by_peer(call);
 }
 
 /*
@@ -453,16 +531,15 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
 
        switch (call->unmarshall) {
        case 0:
-               call->offset = 0;
                call->buffer = kmalloc_array(11, sizeof(__be32), GFP_KERNEL);
                if (!call->buffer)
                        return -ENOMEM;
+               afs_extract_to_buf(call, 11 * sizeof(__be32));
                call->unmarshall++;
 
        case 1:
                _debug("extract UUID");
-               ret = afs_extract_data(call, call->buffer,
-                                      11 * sizeof(__be32), false);
+               ret = afs_extract_data(call, false);
                switch (ret) {
                case 0:         break;
                case -EAGAIN:   return 0;
@@ -485,7 +562,6 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
                for (loop = 0; loop < 6; loop++)
                        r->node[loop] = ntohl(b[loop + 5]);
 
-               call->offset = 0;
                call->unmarshall++;
 
        case 2:
@@ -493,9 +569,8 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call)
        }
 
        if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
-               return -EIO;
-
-       return afs_queue_call_work(call);
+               return afs_io_error(call, afs_io_error_cm_reply);
+       return afs_find_cm_server_by_uuid(call, call->request);
 }
 
 /*
@@ -570,12 +645,88 @@ static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call)
 
        _enter("");
 
-       ret = afs_extract_data(call, NULL, 0, false);
+       afs_extract_discard(call, 0);
+       ret = afs_extract_data(call, false);
        if (ret < 0)
                return ret;
 
        if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
-               return -EIO;
+               return afs_io_error(call, afs_io_error_cm_reply);
+       return afs_find_cm_server_by_peer(call);
+}
+
+/*
+ * deliver request data to a YFS CB.CallBack call
+ */
+static int afs_deliver_yfs_cb_callback(struct afs_call *call)
+{
+       struct afs_callback_break *cb;
+       struct yfs_xdr_YFSFid *bp;
+       size_t size;
+       int ret, loop;
+
+       _enter("{%u}", call->unmarshall);
+
+       switch (call->unmarshall) {
+       case 0:
+               afs_extract_to_tmp(call);
+               call->unmarshall++;
+
+               /* extract the FID array and its count in two steps */
+       case 1:
+               _debug("extract FID count");
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
+
+               call->count = ntohl(call->tmp);
+               _debug("FID count: %u", call->count);
+               if (call->count > YFSCBMAX)
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_cb_fid_count);
+
+               size = array_size(call->count, sizeof(struct yfs_xdr_YFSFid));
+               call->buffer = kmalloc(size, GFP_KERNEL);
+               if (!call->buffer)
+                       return -ENOMEM;
+               afs_extract_to_buf(call, size);
+               call->unmarshall++;
+
+       case 2:
+               _debug("extract FID array");
+               ret = afs_extract_data(call, false);
+               if (ret < 0)
+                       return ret;
+
+               _debug("unmarshall FID array");
+               call->request = kcalloc(call->count,
+                                       sizeof(struct afs_callback_break),
+                                       GFP_KERNEL);
+               if (!call->request)
+                       return -ENOMEM;
+
+               cb = call->request;
+               bp = call->buffer;
+               for (loop = call->count; loop > 0; loop--, cb++) {
+                       cb->fid.vid     = xdr_to_u64(bp->volume);
+                       cb->fid.vnode   = xdr_to_u64(bp->vnode.lo);
+                       cb->fid.vnode_hi = ntohl(bp->vnode.hi);
+                       cb->fid.unique  = ntohl(bp->vnode.unique);
+                       bp++;
+               }
+
+               afs_extract_to_tmp(call);
+               call->unmarshall++;
+
+       case 3:
+               break;
+       }
+
+       if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
+               return afs_io_error(call, afs_io_error_cm_reply);
 
-       return afs_queue_call_work(call);
+       /* We'll need the file server record as that tells us which set of
+        * vnodes to operate upon.
+        */
+       return afs_find_cm_server_by_peer(call);
 }
index 855bf2b79fed4117559f6f011cacd3b43f74b927..8a2562e3a3163378deb576daeb6129ffa03cfafe 100644 (file)
@@ -138,6 +138,7 @@ static bool afs_dir_check_page(struct afs_vnode *dvnode, struct page *page,
                               ntohs(dbuf->blocks[tmp].hdr.magic));
                        trace_afs_dir_check_failed(dvnode, off, i_size);
                        kunmap(page);
+                       trace_afs_file_error(dvnode, -EIO, afs_file_error_dir_bad_magic);
                        goto error;
                }
 
@@ -190,9 +191,11 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
 retry:
        i_size = i_size_read(&dvnode->vfs_inode);
        if (i_size < 2048)
-               return ERR_PTR(-EIO);
-       if (i_size > 2048 * 1024)
+               return ERR_PTR(afs_bad(dvnode, afs_file_error_dir_small));
+       if (i_size > 2048 * 1024) {
+               trace_afs_file_error(dvnode, -EFBIG, afs_file_error_dir_big);
                return ERR_PTR(-EFBIG);
+       }
 
        _enter("%llu", i_size);
 
@@ -315,7 +318,8 @@ content_has_grown:
 /*
  * deal with one block in an AFS directory
  */
-static int afs_dir_iterate_block(struct dir_context *ctx,
+static int afs_dir_iterate_block(struct afs_vnode *dvnode,
+                                struct dir_context *ctx,
                                 union afs_xdr_dir_block *block,
                                 unsigned blkoff)
 {
@@ -365,7 +369,7 @@ static int afs_dir_iterate_block(struct dir_context *ctx,
                                       " (len %u/%zu)",
                                       blkoff / sizeof(union afs_xdr_dir_block),
                                       offset, next, tmp, nlen);
-                               return -EIO;
+                               return afs_bad(dvnode, afs_file_error_dir_over_end);
                        }
                        if (!(block->hdr.bitmap[next / 8] &
                              (1 << (next % 8)))) {
@@ -373,7 +377,7 @@ static int afs_dir_iterate_block(struct dir_context *ctx,
                                       " %u unmarked extension (len %u/%zu)",
                                       blkoff / sizeof(union afs_xdr_dir_block),
                                       offset, next, tmp, nlen);
-                               return -EIO;
+                               return afs_bad(dvnode, afs_file_error_dir_unmarked_ext);
                        }
 
                        _debug("ENT[%zu.%u]: ext %u/%zu",
@@ -442,7 +446,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
                 */
                page = req->pages[blkoff / PAGE_SIZE];
                if (!page) {
-                       ret = -EIO;
+                       ret = afs_bad(dvnode, afs_file_error_dir_missing_page);
                        break;
                }
                mark_page_accessed(page);
@@ -455,7 +459,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
                do {
                        dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) /
                                               sizeof(union afs_xdr_dir_block)];
-                       ret = afs_dir_iterate_block(ctx, dblock, blkoff);
+                       ret = afs_dir_iterate_block(dvnode, ctx, dblock, blkoff);
                        if (ret != 1) {
                                kunmap(page);
                                goto out;
@@ -548,7 +552,7 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
        }
 
        *fid = cookie.fid;
-       _leave(" = 0 { vn=%u u=%u }", fid->vnode, fid->unique);
+       _leave(" = 0 { vn=%llu u=%u }", fid->vnode, fid->unique);
        return 0;
 }
 
@@ -826,7 +830,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
        struct key *key;
        int ret;
 
-       _enter("{%x:%u},%p{%pd},",
+       _enter("{%llx:%llu},%p{%pd},",
               dvnode->fid.vid, dvnode->fid.vnode, dentry, dentry);
 
        ASSERTCMP(d_inode(dentry), ==, NULL);
@@ -896,7 +900,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
 
        if (d_really_is_positive(dentry)) {
                vnode = AFS_FS_I(d_inode(dentry));
-               _enter("{v={%x:%u} n=%pd fl=%lx},",
+               _enter("{v={%llx:%llu} n=%pd fl=%lx},",
                       vnode->fid.vid, vnode->fid.vnode, dentry,
                       vnode->flags);
        } else {
@@ -965,7 +969,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
                /* if the vnode ID has changed, then the dirent points to a
                 * different file */
                if (fid.vnode != vnode->fid.vnode) {
-                       _debug("%pd: dirent changed [%u != %u]",
+                       _debug("%pd: dirent changed [%llu != %llu]",
                               dentry, fid.vnode,
                               vnode->fid.vnode);
                        goto not_found;
@@ -1071,8 +1075,6 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
        if (fc->ac.error < 0)
                return;
 
-       d_drop(new_dentry);
-
        inode = afs_iget(fc->vnode->vfs_inode.i_sb, fc->key,
                         newfid, newstatus, newcb, fc->cbi);
        if (IS_ERR(inode)) {
@@ -1085,7 +1087,8 @@ static void afs_vnode_new_inode(struct afs_fs_cursor *fc,
 
        vnode = AFS_FS_I(inode);
        set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
-       d_add(new_dentry, inode);
+       afs_vnode_commit_status(fc, vnode, 0);
+       d_instantiate(new_dentry, inode);
 }
 
 /*
@@ -1104,7 +1107,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 
        mode |= S_IFDIR;
 
-       _enter("{%x:%u},{%pd},%ho",
+       _enter("{%llx:%llu},{%pd},%ho",
               dvnode->fid.vid, dvnode->fid.vnode, dentry, mode);
 
        key = afs_request_key(dvnode->volume->cell);
@@ -1169,12 +1172,12 @@ static void afs_dir_remove_subdir(struct dentry *dentry)
 static int afs_rmdir(struct inode *dir, struct dentry *dentry)
 {
        struct afs_fs_cursor fc;
-       struct afs_vnode *dvnode = AFS_FS_I(dir);
+       struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL;
        struct key *key;
        u64 data_version = dvnode->status.data_version;
        int ret;
 
-       _enter("{%x:%u},{%pd}",
+       _enter("{%llx:%llu},{%pd}",
               dvnode->fid.vid, dvnode->fid.vnode, dentry);
 
        key = afs_request_key(dvnode->volume->cell);
@@ -1183,11 +1186,19 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
                goto error;
        }
 
+       /* Try to make sure we have a callback promise on the victim. */
+       if (d_really_is_positive(dentry)) {
+               vnode = AFS_FS_I(d_inode(dentry));
+               ret = afs_validate(vnode, key);
+               if (ret < 0)
+                       goto error_key;
+       }
+
        ret = -ERESTARTSYS;
        if (afs_begin_vnode_operation(&fc, dvnode, key)) {
                while (afs_select_fileserver(&fc)) {
                        fc.cb_break = afs_calc_vnode_cb_break(dvnode);
-                       afs_fs_remove(&fc, dentry->d_name.name, true,
+                       afs_fs_remove(&fc, vnode, dentry->d_name.name, true,
                                      data_version);
                }
 
@@ -1201,6 +1212,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
                }
        }
 
+error_key:
        key_put(key);
 error:
        return ret;
@@ -1231,7 +1243,9 @@ static int afs_dir_remove_link(struct dentry *dentry, struct key *key,
        if (d_really_is_positive(dentry)) {
                struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
 
-               if (dir_valid) {
+               if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
+                       /* Already done */
+               } else if (dir_valid) {
                        drop_nlink(&vnode->vfs_inode);
                        if (vnode->vfs_inode.i_nlink == 0) {
                                set_bit(AFS_VNODE_DELETED, &vnode->flags);
@@ -1260,13 +1274,13 @@ static int afs_dir_remove_link(struct dentry *dentry, struct key *key,
 static int afs_unlink(struct inode *dir, struct dentry *dentry)
 {
        struct afs_fs_cursor fc;
-       struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
+       struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode = NULL;
        struct key *key;
        unsigned long d_version = (unsigned long)dentry->d_fsdata;
        u64 data_version = dvnode->status.data_version;
        int ret;
 
-       _enter("{%x:%u},{%pd}",
+       _enter("{%llx:%llu},{%pd}",
               dvnode->fid.vid, dvnode->fid.vnode, dentry);
 
        if (dentry->d_name.len >= AFSNAMEMAX)
@@ -1290,7 +1304,18 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
        if (afs_begin_vnode_operation(&fc, dvnode, key)) {
                while (afs_select_fileserver(&fc)) {
                        fc.cb_break = afs_calc_vnode_cb_break(dvnode);
-                       afs_fs_remove(&fc, dentry->d_name.name, false,
+
+                       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc.cbi->server->flags) &&
+                           !test_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags)) {
+                               yfs_fs_remove_file2(&fc, vnode, dentry->d_name.name,
+                                                   data_version);
+                               if (fc.ac.error != -ECONNABORTED ||
+                                   fc.ac.abort_code != RXGEN_OPCODE)
+                                       continue;
+                               set_bit(AFS_SERVER_FL_NO_RM2, &fc.cbi->server->flags);
+                       }
+
+                       afs_fs_remove(&fc, vnode, dentry->d_name.name, false,
                                      data_version);
                }
 
@@ -1330,7 +1355,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
 
        mode |= S_IFREG;
 
-       _enter("{%x:%u},{%pd},%ho,",
+       _enter("{%llx:%llu},{%pd},%ho,",
               dvnode->fid.vid, dvnode->fid.vnode, dentry, mode);
 
        ret = -ENAMETOOLONG;
@@ -1393,7 +1418,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
        dvnode = AFS_FS_I(dir);
        data_version = dvnode->status.data_version;
 
-       _enter("{%x:%u},{%x:%u},{%pd}",
+       _enter("{%llx:%llu},{%llx:%llu},{%pd}",
               vnode->fid.vid, vnode->fid.vnode,
               dvnode->fid.vid, dvnode->fid.vnode,
               dentry);
@@ -1464,7 +1489,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
        u64 data_version = dvnode->status.data_version;
        int ret;
 
-       _enter("{%x:%u},{%pd},%s",
+       _enter("{%llx:%llu},{%pd},%s",
               dvnode->fid.vid, dvnode->fid.vnode, dentry,
               content);
 
@@ -1540,7 +1565,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
        orig_data_version = orig_dvnode->status.data_version;
        new_data_version = new_dvnode->status.data_version;
 
-       _enter("{%x:%u},{%x:%u},{%x:%u},{%pd}",
+       _enter("{%llx:%llu},{%llx:%llu},{%llx:%llu},{%pd}",
               orig_dvnode->fid.vid, orig_dvnode->fid.vnode,
               vnode->fid.vid, vnode->fid.vnode,
               new_dvnode->fid.vid, new_dvnode->fid.vnode,
@@ -1607,7 +1632,7 @@ static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags)
 {
        struct afs_vnode *dvnode = AFS_FS_I(page->mapping->host);
 
-       _enter("{{%x:%u}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, page->index);
+       _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, page->index);
 
        set_page_private(page, 0);
        ClearPagePrivate(page);
index f29c6dade7f6250348b886b44b8be150199f78f7..a9ba81ddf1546272d4a5cbb7e0885326c250c6ff 100644 (file)
@@ -46,7 +46,7 @@ static int afs_probe_cell_name(struct dentry *dentry)
                return 0;
        }
 
-       ret = dns_query("afsdb", name, len, "", NULL, NULL);
+       ret = dns_query("afsdb", name, len, "srv=1", NULL, NULL);
        if (ret == -ENODATA)
                ret = -EDESTADDRREQ;
        return ret;
@@ -62,7 +62,7 @@ struct inode *afs_try_auto_mntpt(struct dentry *dentry, struct inode *dir)
        struct inode *inode;
        int ret = -ENOENT;
 
-       _enter("%p{%pd}, {%x:%u}",
+       _enter("%p{%pd}, {%llx:%llu}",
               dentry, dentry, vnode->fid.vid, vnode->fid.vnode);
 
        if (!test_bit(AFS_VNODE_AUTOCELL, &vnode->flags))
index 7d4f26198573d7f6a4dffb7ff4a82ee0f8fbb573..d6bc3f5d784b5676185070ae208345dd2eef3325 100644 (file)
@@ -121,7 +121,7 @@ int afs_open(struct inode *inode, struct file *file)
        struct key *key;
        int ret;
 
-       _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
+       _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
 
        key = afs_request_key(vnode->volume->cell);
        if (IS_ERR(key)) {
@@ -170,7 +170,7 @@ int afs_release(struct inode *inode, struct file *file)
        struct afs_vnode *vnode = AFS_FS_I(inode);
        struct afs_file *af = file->private_data;
 
-       _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
+       _enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
 
        if ((file->f_mode & FMODE_WRITE))
                return vfs_fsync(file, 0);
@@ -228,7 +228,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *de
        struct afs_fs_cursor fc;
        int ret;
 
-       _enter("%s{%x:%u.%u},%x,,,",
+       _enter("%s{%llx:%llu.%u},%x,,,",
               vnode->volume->name,
               vnode->fid.vid,
               vnode->fid.vnode,
@@ -634,7 +634,7 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
        struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
        unsigned long priv;
 
-       _enter("{{%x:%u}[%lu],%lx},%x",
+       _enter("{{%llx:%llu}[%lu],%lx},%x",
               vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
               gfp_flags);
 
index dc62d15a964b8809d7028d33a393c41b6963242b..0568fd98682109e0dd686d5097bf9fe2e40ecfe1 100644 (file)
@@ -29,7 +29,7 @@ static const struct file_lock_operations afs_lock_ops = {
  */
 void afs_lock_may_be_available(struct afs_vnode *vnode)
 {
-       _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
+       _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
 
        queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
 }
@@ -76,7 +76,7 @@ static int afs_set_lock(struct afs_vnode *vnode, struct key *key,
        struct afs_fs_cursor fc;
        int ret;
 
-       _enter("%s{%x:%u.%u},%x,%u",
+       _enter("%s{%llx:%llu.%u},%x,%u",
               vnode->volume->name,
               vnode->fid.vid,
               vnode->fid.vnode,
@@ -107,7 +107,7 @@ static int afs_extend_lock(struct afs_vnode *vnode, struct key *key)
        struct afs_fs_cursor fc;
        int ret;
 
-       _enter("%s{%x:%u.%u},%x",
+       _enter("%s{%llx:%llu.%u},%x",
               vnode->volume->name,
               vnode->fid.vid,
               vnode->fid.vnode,
@@ -138,7 +138,7 @@ static int afs_release_lock(struct afs_vnode *vnode, struct key *key)
        struct afs_fs_cursor fc;
        int ret;
 
-       _enter("%s{%x:%u.%u},%x",
+       _enter("%s{%llx:%llu.%u},%x",
               vnode->volume->name,
               vnode->fid.vid,
               vnode->fid.vnode,
@@ -175,7 +175,7 @@ void afs_lock_work(struct work_struct *work)
        struct key *key;
        int ret;
 
-       _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
+       _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
 
        spin_lock(&vnode->lock);
 
@@ -192,7 +192,7 @@ again:
                ret = afs_release_lock(vnode, vnode->lock_key);
                if (ret < 0)
                        printk(KERN_WARNING "AFS:"
-                              " Failed to release lock on {%x:%x} error %d\n",
+                              " Failed to release lock on {%llx:%llx} error %d\n",
                               vnode->fid.vid, vnode->fid.vnode, ret);
 
                spin_lock(&vnode->lock);
@@ -229,7 +229,7 @@ again:
                key_put(key);
 
                if (ret < 0)
-                       pr_warning("AFS: Failed to extend lock on {%x:%x} error %d\n",
+                       pr_warning("AFS: Failed to extend lock on {%llx:%llx} error %d\n",
                                   vnode->fid.vid, vnode->fid.vnode, ret);
 
                spin_lock(&vnode->lock);
@@ -430,7 +430,7 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl)
        struct key *key = afs_file_key(file);
        int ret;
 
-       _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
+       _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
 
        /* only whole-file locks are supported */
        if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX)
@@ -582,7 +582,7 @@ static int afs_do_unlk(struct file *file, struct file_lock *fl)
        struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
        int ret;
 
-       _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
+       _enter("{%llx:%llu},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
 
        /* Flush all pending writes before doing anything with locks. */
        vfs_fsync(file, 0);
@@ -639,7 +639,7 @@ int afs_lock(struct file *file, int cmd, struct file_lock *fl)
 {
        struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
 
-       _enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
+       _enter("{%llx:%llu},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
               vnode->fid.vid, vnode->fid.vnode, cmd,
               fl->fl_type, fl->fl_flags,
               (long long) fl->fl_start, (long long) fl->fl_end);
@@ -662,7 +662,7 @@ int afs_flock(struct file *file, int cmd, struct file_lock *fl)
 {
        struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
 
-       _enter("{%x:%u},%d,{t=%x,fl=%x}",
+       _enter("{%llx:%llu},%d,{t=%x,fl=%x}",
               vnode->fid.vid, vnode->fid.vnode, cmd,
               fl->fl_type, fl->fl_flags);
 
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
new file mode 100644 (file)
index 0000000..fde6b4d
--- /dev/null
@@ -0,0 +1,279 @@
+/* AFS fileserver probing
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include "afs_fs.h"
+#include "internal.h"
+#include "protocol_yfs.h"
+
+static bool afs_fs_probe_done(struct afs_server *server)
+{
+       if (!atomic_dec_and_test(&server->probe_outstanding))
+               return false;
+
+       wake_up_var(&server->probe_outstanding);
+       clear_bit_unlock(AFS_SERVER_FL_PROBING, &server->flags);
+       wake_up_bit(&server->flags, AFS_SERVER_FL_PROBING);
+       return true;
+}
+
+/*
+ * Process the result of probing a fileserver.  This is called after successful
+ * or failed delivery of an FS.GetCapabilities operation.
+ */
+void afs_fileserver_probe_result(struct afs_call *call)
+{
+       struct afs_addr_list *alist = call->alist;
+       struct afs_server *server = call->reply[0];
+       unsigned int server_index = (long)call->reply[1];
+       unsigned int index = call->addr_ix;
+       unsigned int rtt = UINT_MAX;
+       bool have_result = false;
+       u64 _rtt;
+       int ret = call->error;
+
+       _enter("%pU,%u", &server->uuid, index);
+
+       spin_lock(&server->probe_lock);
+
+       switch (ret) {
+       case 0:
+               server->probe.error = 0;
+               goto responded;
+       case -ECONNABORTED:
+               if (!server->probe.responded) {
+                       server->probe.abort_code = call->abort_code;
+                       server->probe.error = ret;
+               }
+               goto responded;
+       case -ENOMEM:
+       case -ENONET:
+               server->probe.local_failure = true;
+               afs_io_error(call, afs_io_error_fs_probe_fail);
+               goto out;
+       case -ECONNRESET: /* Responded, but call expired. */
+       case -ERFKILL:
+       case -EADDRNOTAVAIL:
+       case -ENETUNREACH:
+       case -EHOSTUNREACH:
+       case -EHOSTDOWN:
+       case -ECONNREFUSED:
+       case -ETIMEDOUT:
+       case -ETIME:
+       default:
+               clear_bit(index, &alist->responded);
+               set_bit(index, &alist->failed);
+               if (!server->probe.responded &&
+                   (server->probe.error == 0 ||
+                    server->probe.error == -ETIMEDOUT ||
+                    server->probe.error == -ETIME))
+                       server->probe.error = ret;
+               afs_io_error(call, afs_io_error_fs_probe_fail);
+               goto out;
+       }
+
+responded:
+       set_bit(index, &alist->responded);
+       clear_bit(index, &alist->failed);
+
+       if (call->service_id == YFS_FS_SERVICE) {
+               server->probe.is_yfs = true;
+               set_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
+               alist->addrs[index].srx_service = call->service_id;
+       } else {
+               server->probe.not_yfs = true;
+               if (!server->probe.is_yfs) {
+                       clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
+                       alist->addrs[index].srx_service = call->service_id;
+               }
+       }
+
+       /* Get the RTT and scale it to fit into a 32-bit value that represents
+        * over a minute of time so that we can access it with one instruction
+        * on a 32-bit system.
+        */
+       _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall);
+       _rtt /= 64;
+       rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt;
+       if (rtt < server->probe.rtt) {
+               server->probe.rtt = rtt;
+               alist->preferred = index;
+               have_result = true;
+       }
+
+       smp_wmb(); /* Set rtt before responded. */
+       server->probe.responded = true;
+       set_bit(AFS_SERVER_FL_PROBED, &server->flags);
+out:
+       spin_unlock(&server->probe_lock);
+
+       _debug("probe [%u][%u] %pISpc rtt=%u ret=%d",
+              server_index, index, &alist->addrs[index].transport,
+              (unsigned int)rtt, ret);
+
+       have_result |= afs_fs_probe_done(server);
+       if (have_result) {
+               server->probe.have_result = true;
+               wake_up_var(&server->probe.have_result);
+               wake_up_all(&server->probe_wq);
+       }
+}
+
+/*
+ * Probe all of a fileserver's addresses to find out the best route and to
+ * query its capabilities.
+ */
+static int afs_do_probe_fileserver(struct afs_net *net,
+                                  struct afs_server *server,
+                                  struct key *key,
+                                  unsigned int server_index,
+                                  struct afs_error *_e)
+{
+       struct afs_addr_cursor ac = {
+               .index = 0,
+       };
+       bool in_progress = false;
+       int err;
+
+       _enter("%pU", &server->uuid);
+
+       read_lock(&server->fs_lock);
+       ac.alist = rcu_dereference_protected(server->addresses,
+                                            lockdep_is_held(&server->fs_lock));
+       read_unlock(&server->fs_lock);
+
+       atomic_set(&server->probe_outstanding, ac.alist->nr_addrs);
+       memset(&server->probe, 0, sizeof(server->probe));
+       server->probe.rtt = UINT_MAX;
+
+       for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) {
+               err = afs_fs_get_capabilities(net, server, &ac, key, server_index,
+                                             true);
+               if (err == -EINPROGRESS)
+                       in_progress = true;
+               else
+                       afs_prioritise_error(_e, err, ac.abort_code);
+       }
+
+       if (!in_progress)
+               afs_fs_probe_done(server);
+       return in_progress;
+}
+
+/*
+ * Send off probes to all unprobed servers.
+ */
+int afs_probe_fileservers(struct afs_net *net, struct key *key,
+                         struct afs_server_list *list)
+{
+       struct afs_server *server;
+       struct afs_error e;
+       bool in_progress = false;
+       int i;
+
+       e.error = 0;
+       e.responded = false;
+       for (i = 0; i < list->nr_servers; i++) {
+               server = list->servers[i].server;
+               if (test_bit(AFS_SERVER_FL_PROBED, &server->flags))
+                       continue;
+
+               if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &server->flags) &&
+                   afs_do_probe_fileserver(net, server, key, i, &e))
+                       in_progress = true;
+       }
+
+       return in_progress ? 0 : e.error;
+}
+
+/*
+ * Wait for the first as-yet untried fileserver to respond.
+ */
+int afs_wait_for_fs_probes(struct afs_server_list *slist, unsigned long untried)
+{
+       struct wait_queue_entry *waits;
+       struct afs_server *server;
+       unsigned int rtt = UINT_MAX;
+       bool have_responders = false;
+       int pref = -1, i;
+
+       _enter("%u,%lx", slist->nr_servers, untried);
+
+       /* Only wait for servers that have a probe outstanding. */
+       for (i = 0; i < slist->nr_servers; i++) {
+               if (test_bit(i, &untried)) {
+                       server = slist->servers[i].server;
+                       if (!test_bit(AFS_SERVER_FL_PROBING, &server->flags))
+                               __clear_bit(i, &untried);
+                       if (server->probe.responded)
+                               have_responders = true;
+               }
+       }
+       if (have_responders || !untried)
+               return 0;
+
+       waits = kmalloc(array_size(slist->nr_servers, sizeof(*waits)), GFP_KERNEL);
+       if (!waits)
+               return -ENOMEM;
+
+       for (i = 0; i < slist->nr_servers; i++) {
+               if (test_bit(i, &untried)) {
+                       server = slist->servers[i].server;
+                       init_waitqueue_entry(&waits[i], current);
+                       add_wait_queue(&server->probe_wq, &waits[i]);
+               }
+       }
+
+       for (;;) {
+               bool still_probing = false;
+
+               set_current_state(TASK_INTERRUPTIBLE);
+               for (i = 0; i < slist->nr_servers; i++) {
+                       if (test_bit(i, &untried)) {
+                               server = slist->servers[i].server;
+                               if (server->probe.responded)
+                                       goto stop;
+                               if (test_bit(AFS_SERVER_FL_PROBING, &server->flags))
+                                       still_probing = true;
+                       }
+               }
+
+               if (!still_probing || unlikely(signal_pending(current)))
+                       goto stop;
+               schedule();
+       }
+
+stop:
+       set_current_state(TASK_RUNNING);
+
+       for (i = 0; i < slist->nr_servers; i++) {
+               if (test_bit(i, &untried)) {
+                       server = slist->servers[i].server;
+                       if (server->probe.responded &&
+                           server->probe.rtt < rtt) {
+                               pref = i;
+                               rtt = server->probe.rtt;
+                       }
+
+                       remove_wait_queue(&server->probe_wq, &waits[i]);
+               }
+       }
+
+       kfree(waits);
+
+       if (pref == -1 && signal_pending(current))
+               return -ERESTARTSYS;
+
+       if (pref >= 0)
+               slist->preferred = pref;
+       return 0;
+}
index 50929cb91732f5adec19706788e6a31aeb8beb03..ca08c83168f5fbf1f7f6b52c8c3ff769bf70cf04 100644 (file)
 #include "internal.h"
 #include "afs_fs.h"
 #include "xdr_fs.h"
+#include "protocol_yfs.h"
 
 static const struct afs_fid afs_zero_fid;
 
-/*
- * We need somewhere to discard into in case the server helpfully returns more
- * than we asked for in FS.FetchData{,64}.
- */
-static u8 afs_discard_buffer[64];
-
 static inline void afs_use_fs_server(struct afs_call *call, struct afs_cb_interest *cbi)
 {
        call->cbi = afs_get_cb_interest(cbi);
@@ -75,8 +70,7 @@ void afs_update_inode_from_status(struct afs_vnode *vnode,
        struct timespec64 t;
        umode_t mode;
 
-       t.tv_sec = status->mtime_client;
-       t.tv_nsec = 0;
+       t = status->mtime_client;
        vnode->vfs_inode.i_ctime = t;
        vnode->vfs_inode.i_mtime = t;
        vnode->vfs_inode.i_atime = t;
@@ -96,7 +90,7 @@ void afs_update_inode_from_status(struct afs_vnode *vnode,
        if (!(flags & AFS_VNODE_NOT_YET_SET)) {
                if (expected_version &&
                    *expected_version != status->data_version) {
-                       _debug("vnode modified %llx on {%x:%u} [exp %llx]",
+                       _debug("vnode modified %llx on {%llx:%llu} [exp %llx]",
                               (unsigned long long) status->data_version,
                               vnode->fid.vid, vnode->fid.vnode,
                               (unsigned long long) *expected_version);
@@ -170,7 +164,7 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call,
                if (type != status->type &&
                    vnode &&
                    !test_bit(AFS_VNODE_UNSET, &vnode->flags)) {
-                       pr_warning("Vnode %x:%x:%x changed type %u to %u\n",
+                       pr_warning("Vnode %llx:%llx:%x changed type %u to %u\n",
                                   vnode->fid.vid,
                                   vnode->fid.vnode,
                                   vnode->fid.unique,
@@ -200,8 +194,10 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call,
        EXTRACT_M(mode);
        EXTRACT_M(group);
 
-       status->mtime_client = ntohl(xdr->mtime_client);
-       status->mtime_server = ntohl(xdr->mtime_server);
+       status->mtime_client.tv_sec = ntohl(xdr->mtime_client);
+       status->mtime_client.tv_nsec = 0;
+       status->mtime_server.tv_sec = ntohl(xdr->mtime_server);
+       status->mtime_server.tv_nsec = 0;
        status->lock_count   = ntohl(xdr->lock_count);
 
        size  = (u64)ntohl(xdr->size_lo);
@@ -233,7 +229,7 @@ static int xdr_decode_AFSFetchStatus(struct afs_call *call,
 
 bad:
        xdr_dump_bad(*_bp);
-       return afs_protocol_error(call, -EBADMSG);
+       return afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
 }
 
 /*
@@ -273,7 +269,7 @@ static void xdr_decode_AFSCallBack(struct afs_call *call,
 
        write_seqlock(&vnode->cb_lock);
 
-       if (call->cb_break == afs_cb_break_sum(vnode, cbi)) {
+       if (!afs_cb_is_broken(call->cb_break, vnode, cbi)) {
                vnode->cb_version       = ntohl(*bp++);
                cb_expiry               = ntohl(*bp++);
                vnode->cb_type          = ntohl(*bp++);
@@ -293,13 +289,19 @@ static void xdr_decode_AFSCallBack(struct afs_call *call,
        *_bp = bp;
 }
 
-static void xdr_decode_AFSCallBack_raw(const __be32 **_bp,
+static ktime_t xdr_decode_expiry(struct afs_call *call, u32 expiry)
+{
+       return ktime_add_ns(call->reply_time, expiry * NSEC_PER_SEC);
+}
+
+static void xdr_decode_AFSCallBack_raw(struct afs_call *call,
+                                      const __be32 **_bp,
                                       struct afs_callback *cb)
 {
        const __be32 *bp = *_bp;
 
        cb->version     = ntohl(*bp++);
-       cb->expiry      = ntohl(*bp++);
+       cb->expires_at  = xdr_decode_expiry(call, ntohl(*bp++));
        cb->type        = ntohl(*bp++);
        *_bp = bp;
 }
@@ -311,14 +313,18 @@ static void xdr_decode_AFSVolSync(const __be32 **_bp,
                                  struct afs_volsync *volsync)
 {
        const __be32 *bp = *_bp;
+       u32 creation;
 
-       volsync->creation = ntohl(*bp++);
+       creation = ntohl(*bp++);
        bp++; /* spare2 */
        bp++; /* spare3 */
        bp++; /* spare4 */
        bp++; /* spare5 */
        bp++; /* spare6 */
        *_bp = bp;
+
+       if (volsync)
+               volsync->creation = creation;
 }
 
 /*
@@ -379,6 +385,8 @@ static void xdr_decode_AFSFetchVolumeStatus(const __be32 **_bp,
        vs->blocks_in_use       = ntohl(*bp++);
        vs->part_blocks_avail   = ntohl(*bp++);
        vs->part_max_blocks     = ntohl(*bp++);
+       vs->vol_copy_date       = 0;
+       vs->vol_backup_date     = 0;
        *_bp = bp;
 }
 
@@ -395,16 +403,16 @@ static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call)
        if (ret < 0)
                return ret;
 
-       _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
+       _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
 
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
-       if (afs_decode_status(call, &bp, &vnode->status, vnode,
-                             &call->expected_version, NULL) < 0)
-               return afs_protocol_error(call, -EBADMSG);
+       ret = afs_decode_status(call, &bp, &vnode->status, vnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
        xdr_decode_AFSCallBack(call, vnode, &bp);
-       if (call->reply[1])
-               xdr_decode_AFSVolSync(&bp, call->reply[1]);
+       xdr_decode_AFSVolSync(&bp, call->reply[1]);
 
        _leave(" = 0 [done]");
        return 0;
@@ -431,7 +439,10 @@ int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsy
        struct afs_net *net = afs_v2net(vnode);
        __be32 *bp;
 
-       _enter(",%x,{%x:%u},,",
+       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+               return yfs_fs_fetch_file_status(fc, volsync, new_inode);
+
+       _enter(",%x,{%llx:%llu},,",
               key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
 
        call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus_vnode,
@@ -445,6 +456,7 @@ int afs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsy
        call->reply[0] = vnode;
        call->reply[1] = volsync;
        call->expected_version = new_inode ? 1 : vnode->status.data_version;
+       call->want_reply_time = true;
 
        /* marshall the parameters */
        bp = call->request;
@@ -468,139 +480,117 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
        struct afs_read *req = call->reply[2];
        const __be32 *bp;
        unsigned int size;
-       void *buffer;
        int ret;
 
-       _enter("{%u,%zu/%u;%llu/%llu}",
-              call->unmarshall, call->offset, call->count,
-              req->remain, req->actual_len);
+       _enter("{%u,%zu/%llu}",
+              call->unmarshall, iov_iter_count(&call->iter), req->actual_len);
 
        switch (call->unmarshall) {
        case 0:
                req->actual_len = 0;
-               call->offset = 0;
+               req->index = 0;
+               req->offset = req->pos & (PAGE_SIZE - 1);
                call->unmarshall++;
-               if (call->operation_ID != FSFETCHDATA64) {
-                       call->unmarshall++;
-                       goto no_msw;
+               if (call->operation_ID == FSFETCHDATA64) {
+                       afs_extract_to_tmp64(call);
+               } else {
+                       call->tmp_u = htonl(0);
+                       afs_extract_to_tmp(call);
                }
 
-               /* extract the upper part of the returned data length of an
-                * FSFETCHDATA64 op (which should always be 0 using this
-                * client) */
-       case 1:
-               _debug("extract data length (MSW)");
-               ret = afs_extract_data(call, &call->tmp, 4, true);
-               if (ret < 0)
-                       return ret;
-
-               req->actual_len = ntohl(call->tmp);
-               req->actual_len <<= 32;
-               call->offset = 0;
-               call->unmarshall++;
-
-       no_msw:
                /* extract the returned data length */
-       case 2:
+       case 1:
                _debug("extract data length");
-               ret = afs_extract_data(call, &call->tmp, 4, true);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
-               req->actual_len |= ntohl(call->tmp);
+               req->actual_len = be64_to_cpu(call->tmp64);
                _debug("DATA length: %llu", req->actual_len);
-
-               req->remain = req->actual_len;
-               call->offset = req->pos & (PAGE_SIZE - 1);
-               req->index = 0;
-               if (req->actual_len == 0)
+               req->remain = min(req->len, req->actual_len);
+               if (req->remain == 0)
                        goto no_more_data;
+
                call->unmarshall++;
 
        begin_page:
                ASSERTCMP(req->index, <, req->nr_pages);
-               if (req->remain > PAGE_SIZE - call->offset)
-                       size = PAGE_SIZE - call->offset;
+               if (req->remain > PAGE_SIZE - req->offset)
+                       size = PAGE_SIZE - req->offset;
                else
                        size = req->remain;
-               call->count = call->offset + size;
-               ASSERTCMP(call->count, <=, PAGE_SIZE);
-               req->remain -= size;
+               call->bvec[0].bv_len = size;
+               call->bvec[0].bv_offset = req->offset;
+               call->bvec[0].bv_page = req->pages[req->index];
+               iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
+               ASSERTCMP(size, <=, PAGE_SIZE);
 
                /* extract the returned data */
-       case 3:
-               _debug("extract data %llu/%llu %zu/%u",
-                      req->remain, req->actual_len, call->offset, call->count);
+       case 2:
+               _debug("extract data %zu/%llu",
+                      iov_iter_count(&call->iter), req->remain);
 
-               buffer = kmap(req->pages[req->index]);
-               ret = afs_extract_data(call, buffer, call->count, true);
-               kunmap(req->pages[req->index]);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
-               if (call->offset == PAGE_SIZE) {
+               req->remain -= call->bvec[0].bv_len;
+               req->offset += call->bvec[0].bv_len;
+               ASSERTCMP(req->offset, <=, PAGE_SIZE);
+               if (req->offset == PAGE_SIZE) {
+                       req->offset = 0;
                        if (req->page_done)
                                req->page_done(call, req);
                        req->index++;
-                       if (req->remain > 0) {
-                               call->offset = 0;
-                               if (req->index >= req->nr_pages) {
-                                       call->unmarshall = 4;
-                                       goto begin_discard;
-                               }
+                       if (req->remain > 0)
                                goto begin_page;
-                       }
                }
-               goto no_more_data;
+
+               ASSERTCMP(req->remain, ==, 0);
+               if (req->actual_len <= req->len)
+                       goto no_more_data;
 
                /* Discard any excess data the server gave us */
-       begin_discard:
-       case 4:
-               size = min_t(loff_t, sizeof(afs_discard_buffer), req->remain);
-               call->count = size;
-               _debug("extract discard %llu/%llu %zu/%u",
-                      req->remain, req->actual_len, call->offset, call->count);
-
-               call->offset = 0;
-               ret = afs_extract_data(call, afs_discard_buffer, call->count, true);
-               req->remain -= call->offset;
+               iov_iter_discard(&call->iter, READ, req->actual_len - req->len);
+               call->unmarshall = 3;
+       case 3:
+               _debug("extract discard %zu/%llu",
+                      iov_iter_count(&call->iter), req->actual_len - req->len);
+
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
-               if (req->remain > 0)
-                       goto begin_discard;
 
        no_more_data:
-               call->offset = 0;
-               call->unmarshall = 5;
+               call->unmarshall = 4;
+               afs_extract_to_buf(call, (21 + 3 + 6) * 4);
 
                /* extract the metadata */
-       case 5:
-               ret = afs_extract_data(call, call->buffer,
-                                      (21 + 3 + 6) * 4, false);
+       case 4:
+               ret = afs_extract_data(call, false);
                if (ret < 0)
                        return ret;
 
                bp = call->buffer;
-               if (afs_decode_status(call, &bp, &vnode->status, vnode,
-                                     &vnode->status.data_version, req) < 0)
-                       return afs_protocol_error(call, -EBADMSG);
+               ret = afs_decode_status(call, &bp, &vnode->status, vnode,
+                                       &vnode->status.data_version, req);
+               if (ret < 0)
+                       return ret;
                xdr_decode_AFSCallBack(call, vnode, &bp);
-               if (call->reply[1])
-                       xdr_decode_AFSVolSync(&bp, call->reply[1]);
+               xdr_decode_AFSVolSync(&bp, call->reply[1]);
 
-               call->offset = 0;
                call->unmarshall++;
 
-       case 6:
+       case 5:
                break;
        }
 
        for (; req->index < req->nr_pages; req->index++) {
-               if (call->count < PAGE_SIZE)
+               if (req->offset < PAGE_SIZE)
                        zero_user_segment(req->pages[req->index],
-                                         call->count, PAGE_SIZE);
+                                         req->offset, PAGE_SIZE);
                if (req->page_done)
                        req->page_done(call, req);
-               call->count = 0;
+               req->offset = 0;
        }
 
        _leave(" = 0 [done]");
@@ -653,6 +643,7 @@ static int afs_fs_fetch_data64(struct afs_fs_cursor *fc, struct afs_read *req)
        call->reply[1] = NULL; /* volsync */
        call->reply[2] = req;
        call->expected_version = vnode->status.data_version;
+       call->want_reply_time = true;
 
        /* marshall the parameters */
        bp = call->request;
@@ -682,6 +673,9 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
        struct afs_net *net = afs_v2net(vnode);
        __be32 *bp;
 
+       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+               return yfs_fs_fetch_data(fc, req);
+
        if (upper_32_bits(req->pos) ||
            upper_32_bits(req->len) ||
            upper_32_bits(req->pos + req->len))
@@ -698,6 +692,7 @@ int afs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
        call->reply[1] = NULL; /* volsync */
        call->reply[2] = req;
        call->expected_version = vnode->status.data_version;
+       call->want_reply_time = true;
 
        /* marshall the parameters */
        bp = call->request;
@@ -733,11 +728,14 @@ static int afs_deliver_fs_create_vnode(struct afs_call *call)
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
        xdr_decode_AFSFid(&bp, call->reply[1]);
-       if (afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL) < 0 ||
-           afs_decode_status(call, &bp, &vnode->status, vnode,
-                             &call->expected_version, NULL) < 0)
-               return afs_protocol_error(call, -EBADMSG);
-       xdr_decode_AFSCallBack_raw(&bp, call->reply[3]);
+       ret = afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL);
+       if (ret < 0)
+               return ret;
+       ret = afs_decode_status(call, &bp, &vnode->status, vnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
+       xdr_decode_AFSCallBack_raw(call, &bp, call->reply[3]);
        /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
 
        _leave(" = 0 [done]");
@@ -778,6 +776,15 @@ int afs_fs_create(struct afs_fs_cursor *fc,
        size_t namesz, reqsz, padsz;
        __be32 *bp;
 
+       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags)){
+               if (S_ISDIR(mode))
+                       return yfs_fs_make_dir(fc, name, mode, current_data_version,
+                                              newfid, newstatus, newcb);
+               else
+                       return yfs_fs_create_file(fc, name, mode, current_data_version,
+                                                 newfid, newstatus, newcb);
+       }
+
        _enter("");
 
        namesz = strlen(name);
@@ -796,6 +803,7 @@ int afs_fs_create(struct afs_fs_cursor *fc,
        call->reply[2] = newstatus;
        call->reply[3] = newcb;
        call->expected_version = current_data_version + 1;
+       call->want_reply_time = true;
 
        /* marshall the parameters */
        bp = call->request;
@@ -839,9 +847,10 @@ static int afs_deliver_fs_remove(struct afs_call *call)
 
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
-       if (afs_decode_status(call, &bp, &vnode->status, vnode,
-                             &call->expected_version, NULL) < 0)
-               return afs_protocol_error(call, -EBADMSG);
+       ret = afs_decode_status(call, &bp, &vnode->status, vnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
        /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
 
        _leave(" = 0 [done]");
@@ -868,15 +877,18 @@ static const struct afs_call_type afs_RXFSRemoveDir = {
 /*
  * remove a file or directory
  */
-int afs_fs_remove(struct afs_fs_cursor *fc, const char *name, bool isdir,
-                 u64 current_data_version)
+int afs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
+                 const char *name, bool isdir, u64 current_data_version)
 {
-       struct afs_vnode *vnode = fc->vnode;
+       struct afs_vnode *dvnode = fc->vnode;
        struct afs_call *call;
-       struct afs_net *net = afs_v2net(vnode);
+       struct afs_net *net = afs_v2net(dvnode);
        size_t namesz, reqsz, padsz;
        __be32 *bp;
 
+       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+               return yfs_fs_remove(fc, vnode, name, isdir, current_data_version);
+
        _enter("");
 
        namesz = strlen(name);
@@ -890,15 +902,16 @@ int afs_fs_remove(struct afs_fs_cursor *fc, const char *name, bool isdir,
                return -ENOMEM;
 
        call->key = fc->key;
-       call->reply[0] = vnode;
+       call->reply[0] = dvnode;
+       call->reply[1] = vnode;
        call->expected_version = current_data_version + 1;
 
        /* marshall the parameters */
        bp = call->request;
        *bp++ = htonl(isdir ? FSREMOVEDIR : FSREMOVEFILE);
-       *bp++ = htonl(vnode->fid.vid);
-       *bp++ = htonl(vnode->fid.vnode);
-       *bp++ = htonl(vnode->fid.unique);
+       *bp++ = htonl(dvnode->fid.vid);
+       *bp++ = htonl(dvnode->fid.vnode);
+       *bp++ = htonl(dvnode->fid.unique);
        *bp++ = htonl(namesz);
        memcpy(bp, name, namesz);
        bp = (void *) bp + namesz;
@@ -908,7 +921,7 @@ int afs_fs_remove(struct afs_fs_cursor *fc, const char *name, bool isdir,
        }
 
        afs_use_fs_server(call, fc->cbi);
-       trace_afs_make_fs_call(call, &vnode->fid);
+       trace_afs_make_fs_call(call, &dvnode->fid);
        return afs_make_call(&fc->ac, call, GFP_NOFS, false);
 }
 
@@ -929,10 +942,13 @@ static int afs_deliver_fs_link(struct afs_call *call)
 
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
-       if (afs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL) < 0 ||
-           afs_decode_status(call, &bp, &dvnode->status, dvnode,
-                             &call->expected_version, NULL) < 0)
-               return afs_protocol_error(call, -EBADMSG);
+       ret = afs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL);
+       if (ret < 0)
+               return ret;
+       ret = afs_decode_status(call, &bp, &dvnode->status, dvnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
        /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
 
        _leave(" = 0 [done]");
@@ -961,6 +977,9 @@ int afs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
        size_t namesz, reqsz, padsz;
        __be32 *bp;
 
+       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+               return yfs_fs_link(fc, vnode, name, current_data_version);
+
        _enter("");
 
        namesz = strlen(name);
@@ -1016,10 +1035,13 @@ static int afs_deliver_fs_symlink(struct afs_call *call)
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
        xdr_decode_AFSFid(&bp, call->reply[1]);
-       if (afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL) ||
-           afs_decode_status(call, &bp, &vnode->status, vnode,
-                             &call->expected_version, NULL) < 0)
-               return afs_protocol_error(call, -EBADMSG);
+       ret = afs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL);
+       if (ret < 0)
+               return ret;
+       ret = afs_decode_status(call, &bp, &vnode->status, vnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
        /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
 
        _leave(" = 0 [done]");
@@ -1052,6 +1074,10 @@ int afs_fs_symlink(struct afs_fs_cursor *fc,
        size_t namesz, reqsz, padsz, c_namesz, c_padsz;
        __be32 *bp;
 
+       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+               return yfs_fs_symlink(fc, name, contents, current_data_version,
+                                     newfid, newstatus);
+
        _enter("");
 
        namesz = strlen(name);
@@ -1122,13 +1148,16 @@ static int afs_deliver_fs_rename(struct afs_call *call)
 
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
-       if (afs_decode_status(call, &bp, &orig_dvnode->status, orig_dvnode,
-                             &call->expected_version, NULL) < 0)
-               return afs_protocol_error(call, -EBADMSG);
-       if (new_dvnode != orig_dvnode &&
-           afs_decode_status(call, &bp, &new_dvnode->status, new_dvnode,
-                             &call->expected_version_2, NULL) < 0)
-               return afs_protocol_error(call, -EBADMSG);
+       ret = afs_decode_status(call, &bp, &orig_dvnode->status, orig_dvnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
+       if (new_dvnode != orig_dvnode) {
+               ret = afs_decode_status(call, &bp, &new_dvnode->status, new_dvnode,
+                                       &call->expected_version_2, NULL);
+               if (ret < 0)
+                       return ret;
+       }
        /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
 
        _leave(" = 0 [done]");
@@ -1161,6 +1190,12 @@ int afs_fs_rename(struct afs_fs_cursor *fc,
        size_t reqsz, o_namesz, o_padsz, n_namesz, n_padsz;
        __be32 *bp;
 
+       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+               return yfs_fs_rename(fc, orig_name,
+                                    new_dvnode, new_name,
+                                    current_orig_data_version,
+                                    current_new_data_version);
+
        _enter("");
 
        o_namesz = strlen(orig_name);
@@ -1231,9 +1266,10 @@ static int afs_deliver_fs_store_data(struct afs_call *call)
 
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
-       if (afs_decode_status(call, &bp, &vnode->status, vnode,
-                             &call->expected_version, NULL) < 0)
-               return afs_protocol_error(call, -EBADMSG);
+       ret = afs_decode_status(call, &bp, &vnode->status, vnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
        /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
 
        afs_pages_written_back(vnode, call);
@@ -1273,7 +1309,7 @@ static int afs_fs_store_data64(struct afs_fs_cursor *fc,
        struct afs_net *net = afs_v2net(vnode);
        __be32 *bp;
 
-       _enter(",%x,{%x:%u},,",
+       _enter(",%x,{%llx:%llu},,",
               key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
 
        call = afs_alloc_flat_call(net, &afs_RXFSStoreData64,
@@ -1330,7 +1366,10 @@ int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
        loff_t size, pos, i_size;
        __be32 *bp;
 
-       _enter(",%x,{%x:%u},,",
+       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+               return yfs_fs_store_data(fc, mapping, first, last, offset, to);
+
+       _enter(",%x,{%llx:%llu},,",
               key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
 
        size = (loff_t)to - (loff_t)offset;
@@ -1407,9 +1446,10 @@ static int afs_deliver_fs_store_status(struct afs_call *call)
 
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
-       if (afs_decode_status(call, &bp, &vnode->status, vnode,
-                             &call->expected_version, NULL) < 0)
-               return afs_protocol_error(call, -EBADMSG);
+       ret = afs_decode_status(call, &bp, &vnode->status, vnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
        /* xdr_decode_AFSVolSync(&bp, call->reply[X]); */
 
        _leave(" = 0 [done]");
@@ -1451,7 +1491,7 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
        struct afs_net *net = afs_v2net(vnode);
        __be32 *bp;
 
-       _enter(",%x,{%x:%u},,",
+       _enter(",%x,{%llx:%llu},,",
               key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
 
        ASSERT(attr->ia_valid & ATTR_SIZE);
@@ -1498,7 +1538,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
        struct afs_net *net = afs_v2net(vnode);
        __be32 *bp;
 
-       _enter(",%x,{%x:%u},,",
+       _enter(",%x,{%llx:%llu},,",
               key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
 
        ASSERT(attr->ia_valid & ATTR_SIZE);
@@ -1544,10 +1584,13 @@ int afs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr)
        struct afs_net *net = afs_v2net(vnode);
        __be32 *bp;
 
+       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+               return yfs_fs_setattr(fc, attr);
+
        if (attr->ia_valid & ATTR_SIZE)
                return afs_fs_setattr_size(fc, attr);
 
-       _enter(",%x,{%x:%u},,",
+       _enter(",%x,{%llx:%llu},,",
               key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
 
        call = afs_alloc_flat_call(net, &afs_RXFSStoreStatus,
@@ -1581,164 +1624,114 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
 {
        const __be32 *bp;
        char *p;
+       u32 size;
        int ret;
 
        _enter("{%u}", call->unmarshall);
 
        switch (call->unmarshall) {
        case 0:
-               call->offset = 0;
                call->unmarshall++;
+               afs_extract_to_buf(call, 12 * 4);
 
                /* extract the returned status record */
        case 1:
                _debug("extract status");
-               ret = afs_extract_data(call, call->buffer,
-                                      12 * 4, true);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
                bp = call->buffer;
                xdr_decode_AFSFetchVolumeStatus(&bp, call->reply[1]);
-               call->offset = 0;
                call->unmarshall++;
+               afs_extract_to_tmp(call);
 
                /* extract the volume name length */
        case 2:
-               ret = afs_extract_data(call, &call->tmp, 4, true);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
                call->count = ntohl(call->tmp);
                _debug("volname length: %u", call->count);
                if (call->count >= AFSNAMEMAX)
-                       return afs_protocol_error(call, -EBADMSG);
-               call->offset = 0;
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_volname_len);
+               size = (call->count + 3) & ~3; /* It's padded */
+               afs_extract_begin(call, call->reply[2], size);
                call->unmarshall++;
 
                /* extract the volume name */
        case 3:
                _debug("extract volname");
-               if (call->count > 0) {
-                       ret = afs_extract_data(call, call->reply[2],
-                                              call->count, true);
-                       if (ret < 0)
-                               return ret;
-               }
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
 
                p = call->reply[2];
                p[call->count] = 0;
                _debug("volname '%s'", p);
-
-               call->offset = 0;
+               afs_extract_to_tmp(call);
                call->unmarshall++;
 
-               /* extract the volume name padding */
-               if ((call->count & 3) == 0) {
-                       call->unmarshall++;
-                       goto no_volname_padding;
-               }
-               call->count = 4 - (call->count & 3);
-
-       case 4:
-               ret = afs_extract_data(call, call->buffer,
-                                      call->count, true);
-               if (ret < 0)
-                       return ret;
-
-               call->offset = 0;
-               call->unmarshall++;
-       no_volname_padding:
-
                /* extract the offline message length */
-       case 5:
-               ret = afs_extract_data(call, &call->tmp, 4, true);
+       case 4:
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
                call->count = ntohl(call->tmp);
                _debug("offline msg length: %u", call->count);
                if (call->count >= AFSNAMEMAX)
-                       return afs_protocol_error(call, -EBADMSG);
-               call->offset = 0;
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_offline_msg_len);
+               size = (call->count + 3) & ~3; /* It's padded */
+               afs_extract_begin(call, call->reply[2], size);
                call->unmarshall++;
 
                /* extract the offline message */
-       case 6:
+       case 5:
                _debug("extract offline");
-               if (call->count > 0) {
-                       ret = afs_extract_data(call, call->reply[2],
-                                              call->count, true);
-                       if (ret < 0)
-                               return ret;
-               }
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
 
                p = call->reply[2];
                p[call->count] = 0;
                _debug("offline '%s'", p);
 
-               call->offset = 0;
+               afs_extract_to_tmp(call);
                call->unmarshall++;
 
-               /* extract the offline message padding */
-               if ((call->count & 3) == 0) {
-                       call->unmarshall++;
-                       goto no_offline_padding;
-               }
-               call->count = 4 - (call->count & 3);
-
-       case 7:
-               ret = afs_extract_data(call, call->buffer,
-                                      call->count, true);
-               if (ret < 0)
-                       return ret;
-
-               call->offset = 0;
-               call->unmarshall++;
-       no_offline_padding:
-
                /* extract the message of the day length */
-       case 8:
-               ret = afs_extract_data(call, &call->tmp, 4, true);
+       case 6:
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
                call->count = ntohl(call->tmp);
                _debug("motd length: %u", call->count);
                if (call->count >= AFSNAMEMAX)
-                       return afs_protocol_error(call, -EBADMSG);
-               call->offset = 0;
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_motd_len);
+               size = (call->count + 3) & ~3; /* It's padded */
+               afs_extract_begin(call, call->reply[2], size);
                call->unmarshall++;
 
                /* extract the message of the day */
-       case 9:
+       case 7:
                _debug("extract motd");
-               if (call->count > 0) {
-                       ret = afs_extract_data(call, call->reply[2],
-                                              call->count, true);
-                       if (ret < 0)
-                               return ret;
-               }
+               ret = afs_extract_data(call, false);
+               if (ret < 0)
+                       return ret;
 
                p = call->reply[2];
                p[call->count] = 0;
                _debug("motd '%s'", p);
 
-               call->offset = 0;
                call->unmarshall++;
 
-               /* extract the message of the day padding */
-               call->count = (4 - (call->count & 3)) & 3;
-
-       case 10:
-               ret = afs_extract_data(call, call->buffer,
-                                      call->count, false);
-               if (ret < 0)
-                       return ret;
-
-               call->offset = 0;
-               call->unmarshall++;
-       case 11:
+       case 8:
                break;
        }
 
@@ -1778,6 +1771,9 @@ int afs_fs_get_volume_status(struct afs_fs_cursor *fc,
        __be32 *bp;
        void *tmpbuf;
 
+       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+               return yfs_fs_get_volume_status(fc, vs);
+
        _enter("");
 
        tmpbuf = kmalloc(AFSOPAQUEMAX, GFP_KERNEL);
@@ -1867,6 +1863,9 @@ int afs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type)
        struct afs_net *net = afs_v2net(vnode);
        __be32 *bp;
 
+       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+               return yfs_fs_set_lock(fc, type);
+
        _enter("");
 
        call = afs_alloc_flat_call(net, &afs_RXFSSetLock, 5 * 4, 6 * 4);
@@ -1899,6 +1898,9 @@ int afs_fs_extend_lock(struct afs_fs_cursor *fc)
        struct afs_net *net = afs_v2net(vnode);
        __be32 *bp;
 
+       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+               return yfs_fs_extend_lock(fc);
+
        _enter("");
 
        call = afs_alloc_flat_call(net, &afs_RXFSExtendLock, 4 * 4, 6 * 4);
@@ -1930,6 +1932,9 @@ int afs_fs_release_lock(struct afs_fs_cursor *fc)
        struct afs_net *net = afs_v2net(vnode);
        __be32 *bp;
 
+       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+               return yfs_fs_release_lock(fc);
+
        _enter("");
 
        call = afs_alloc_flat_call(net, &afs_RXFSReleaseLock, 4 * 4, 6 * 4);
@@ -2004,19 +2009,16 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
        u32 count;
        int ret;
 
-       _enter("{%u,%zu/%u}", call->unmarshall, call->offset, call->count);
+       _enter("{%u,%zu}", call->unmarshall, iov_iter_count(&call->iter));
 
-again:
        switch (call->unmarshall) {
        case 0:
-               call->offset = 0;
+               afs_extract_to_tmp(call);
                call->unmarshall++;
 
                /* Extract the capabilities word count */
        case 1:
-               ret = afs_extract_data(call, &call->tmp,
-                                      1 * sizeof(__be32),
-                                      true);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
@@ -2024,24 +2026,17 @@ again:
 
                call->count = count;
                call->count2 = count;
-               call->offset = 0;
+               iov_iter_discard(&call->iter, READ, count * sizeof(__be32));
                call->unmarshall++;
 
                /* Extract capabilities words */
        case 2:
-               count = min(call->count, 16U);
-               ret = afs_extract_data(call, call->buffer,
-                                      count * sizeof(__be32),
-                                      call->count > 16);
+               ret = afs_extract_data(call, false);
                if (ret < 0)
                        return ret;
 
                /* TODO: Examine capabilities */
 
-               call->count -= count;
-               if (call->count > 0)
-                       goto again;
-               call->offset = 0;
                call->unmarshall++;
                break;
        }
@@ -2050,6 +2045,14 @@ again:
        return 0;
 }
 
+static void afs_destroy_fs_get_capabilities(struct afs_call *call)
+{
+       struct afs_server *server = call->reply[0];
+
+       afs_put_server(call->net, server);
+       afs_flat_call_destructor(call);
+}
+
 /*
  * FS.GetCapabilities operation type
  */
@@ -2057,7 +2060,8 @@ static const struct afs_call_type afs_RXFSGetCapabilities = {
        .name           = "FS.GetCapabilities",
        .op             = afs_FS_GetCapabilities,
        .deliver        = afs_deliver_fs_get_capabilities,
-       .destructor     = afs_flat_call_destructor,
+       .done           = afs_fileserver_probe_result,
+       .destructor     = afs_destroy_fs_get_capabilities,
 };
 
 /*
@@ -2067,7 +2071,9 @@ static const struct afs_call_type afs_RXFSGetCapabilities = {
 int afs_fs_get_capabilities(struct afs_net *net,
                            struct afs_server *server,
                            struct afs_addr_cursor *ac,
-                           struct key *key)
+                           struct key *key,
+                           unsigned int server_index,
+                           bool async)
 {
        struct afs_call *call;
        __be32 *bp;
@@ -2079,6 +2085,10 @@ int afs_fs_get_capabilities(struct afs_net *net,
                return -ENOMEM;
 
        call->key = key;
+       call->reply[0] = afs_get_server(server);
+       call->reply[1] = (void *)(long)server_index;
+       call->upgrade = true;
+       call->want_reply_time = true;
 
        /* marshall the parameters */
        bp = call->request;
@@ -2086,7 +2096,7 @@ int afs_fs_get_capabilities(struct afs_net *net,
 
        /* Can't take a ref on server */
        trace_afs_make_fs_call(call, NULL);
-       return afs_make_call(ac, call, GFP_NOFS, false);
+       return afs_make_call(ac, call, GFP_NOFS, async);
 }
 
 /*
@@ -2097,7 +2107,7 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call)
        struct afs_file_status *status = call->reply[1];
        struct afs_callback *callback = call->reply[2];
        struct afs_volsync *volsync = call->reply[3];
-       struct afs_vnode *vnode = call->reply[0];
+       struct afs_fid *fid = call->reply[0];
        const __be32 *bp;
        int ret;
 
@@ -2105,21 +2115,16 @@ static int afs_deliver_fs_fetch_status(struct afs_call *call)
        if (ret < 0)
                return ret;
 
-       _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
+       _enter("{%llx:%llu}", fid->vid, fid->vnode);
 
        /* unmarshall the reply once we've received all of it */
        bp = call->buffer;
-       afs_decode_status(call, &bp, status, vnode,
-                         &call->expected_version, NULL);
-       callback[call->count].version   = ntohl(bp[0]);
-       callback[call->count].expiry    = ntohl(bp[1]);
-       callback[call->count].type      = ntohl(bp[2]);
-       if (vnode)
-               xdr_decode_AFSCallBack(call, vnode, &bp);
-       else
-               bp += 3;
-       if (volsync)
-               xdr_decode_AFSVolSync(&bp, volsync);
+       ret = afs_decode_status(call, &bp, status, NULL,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
+       xdr_decode_AFSCallBack_raw(call, &bp, callback);
+       xdr_decode_AFSVolSync(&bp, volsync);
 
        _leave(" = 0 [done]");
        return 0;
@@ -2148,7 +2153,10 @@ int afs_fs_fetch_status(struct afs_fs_cursor *fc,
        struct afs_call *call;
        __be32 *bp;
 
-       _enter(",%x,{%x:%u},,",
+       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+               return yfs_fs_fetch_status(fc, net, fid, status, callback, volsync);
+
+       _enter(",%x,{%llx:%llu},,",
               key_serial(fc->key), fid->vid, fid->vnode);
 
        call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4);
@@ -2158,11 +2166,12 @@ int afs_fs_fetch_status(struct afs_fs_cursor *fc,
        }
 
        call->key = fc->key;
-       call->reply[0] = NULL; /* vnode for fid[0] */
+       call->reply[0] = fid;
        call->reply[1] = status;
        call->reply[2] = callback;
        call->reply[3] = volsync;
        call->expected_version = 1; /* vnode->status.data_version */
+       call->want_reply_time = true;
 
        /* marshall the parameters */
        bp = call->request;
@@ -2193,38 +2202,40 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
 
        switch (call->unmarshall) {
        case 0:
-               call->offset = 0;
+               afs_extract_to_tmp(call);
                call->unmarshall++;
 
                /* Extract the file status count and array in two steps */
        case 1:
                _debug("extract status count");
-               ret = afs_extract_data(call, &call->tmp, 4, true);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
                tmp = ntohl(call->tmp);
                _debug("status count: %u/%u", tmp, call->count2);
                if (tmp != call->count2)
-                       return afs_protocol_error(call, -EBADMSG);
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_ibulkst_count);
 
                call->count = 0;
                call->unmarshall++;
        more_counts:
-               call->offset = 0;
+               afs_extract_to_buf(call, 21 * sizeof(__be32));
 
        case 2:
                _debug("extract status array %u", call->count);
-               ret = afs_extract_data(call, call->buffer, 21 * 4, true);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
                bp = call->buffer;
                statuses = call->reply[1];
-               if (afs_decode_status(call, &bp, &statuses[call->count],
-                                     call->count == 0 ? vnode : NULL,
-                                     NULL, NULL) < 0)
-                       return afs_protocol_error(call, -EBADMSG);
+               ret = afs_decode_status(call, &bp, &statuses[call->count],
+                                       call->count == 0 ? vnode : NULL,
+                                       NULL, NULL);
+               if (ret < 0)
+                       return ret;
 
                call->count++;
                if (call->count < call->count2)
@@ -2232,27 +2243,28 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
 
                call->count = 0;
                call->unmarshall++;
-               call->offset = 0;
+               afs_extract_to_tmp(call);
 
                /* Extract the callback count and array in two steps */
        case 3:
                _debug("extract CB count");
-               ret = afs_extract_data(call, &call->tmp, 4, true);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
                tmp = ntohl(call->tmp);
                _debug("CB count: %u", tmp);
                if (tmp != call->count2)
-                       return afs_protocol_error(call, -EBADMSG);
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_ibulkst_cb_count);
                call->count = 0;
                call->unmarshall++;
        more_cbs:
-               call->offset = 0;
+               afs_extract_to_buf(call, 3 * sizeof(__be32));
 
        case 4:
                _debug("extract CB array");
-               ret = afs_extract_data(call, call->buffer, 3 * 4, true);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
@@ -2260,7 +2272,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
                bp = call->buffer;
                callbacks = call->reply[2];
                callbacks[call->count].version  = ntohl(bp[0]);
-               callbacks[call->count].expiry   = ntohl(bp[1]);
+               callbacks[call->count].expires_at = xdr_decode_expiry(call, ntohl(bp[1]));
                callbacks[call->count].type     = ntohl(bp[2]);
                statuses = call->reply[1];
                if (call->count == 0 && vnode && statuses[0].abort_code == 0)
@@ -2269,19 +2281,17 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
                if (call->count < call->count2)
                        goto more_cbs;
 
-               call->offset = 0;
+               afs_extract_to_buf(call, 6 * sizeof(__be32));
                call->unmarshall++;
 
        case 5:
-               ret = afs_extract_data(call, call->buffer, 6 * 4, false);
+               ret = afs_extract_data(call, false);
                if (ret < 0)
                        return ret;
 
                bp = call->buffer;
-               if (call->reply[3])
-                       xdr_decode_AFSVolSync(&bp, call->reply[3]);
+               xdr_decode_AFSVolSync(&bp, call->reply[3]);
 
-               call->offset = 0;
                call->unmarshall++;
 
        case 6:
@@ -2317,7 +2327,11 @@ int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
        __be32 *bp;
        int i;
 
-       _enter(",%x,{%x:%u},%u",
+       if (test_bit(AFS_SERVER_FL_IS_YFS, &fc->cbi->server->flags))
+               return yfs_fs_inline_bulk_status(fc, net, fids, statuses, callbacks,
+                                                nr_fids, volsync);
+
+       _enter(",%x,{%llx:%llu},%u",
               key_serial(fc->key), fids[0].vid, fids[1].vnode, nr_fids);
 
        call = afs_alloc_flat_call(net, &afs_RXFSInlineBulkStatus,
@@ -2334,6 +2348,7 @@ int afs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
        call->reply[2] = callbacks;
        call->reply[3] = volsync;
        call->count2 = nr_fids;
+       call->want_reply_time = true;
 
        /* marshall the parameters */
        bp = call->request;
index 479b7fdda1244f5bf210694e275826cba99b5553..6b17d362041426967dd5fdc875a80c46d4e30742 100644 (file)
@@ -82,7 +82,7 @@ static int afs_inode_init_from_status(struct afs_vnode *vnode, struct key *key)
        default:
                printk("kAFS: AFS vnode with undefined type\n");
                read_sequnlock_excl(&vnode->cb_lock);
-               return afs_protocol_error(NULL, -EBADMSG);
+               return afs_protocol_error(NULL, -EBADMSG, afs_eproto_file_type);
        }
 
        inode->i_blocks         = 0;
@@ -100,7 +100,7 @@ int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool new_inode)
        struct afs_fs_cursor fc;
        int ret;
 
-       _enter("%s,{%x:%u.%u,S=%lx}",
+       _enter("%s,{%llx:%llu.%u,S=%lx}",
               vnode->volume->name,
               vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique,
               vnode->flags);
@@ -127,9 +127,9 @@ int afs_fetch_status(struct afs_vnode *vnode, struct key *key, bool new_inode)
 int afs_iget5_test(struct inode *inode, void *opaque)
 {
        struct afs_iget_data *data = opaque;
+       struct afs_vnode *vnode = AFS_FS_I(inode);
 
-       return inode->i_ino == data->fid.vnode &&
-               inode->i_generation == data->fid.unique;
+       return memcmp(&vnode->fid, &data->fid, sizeof(data->fid)) == 0;
 }
 
 /*
@@ -150,11 +150,14 @@ static int afs_iget5_set(struct inode *inode, void *opaque)
        struct afs_iget_data *data = opaque;
        struct afs_vnode *vnode = AFS_FS_I(inode);
 
-       inode->i_ino = data->fid.vnode;
-       inode->i_generation = data->fid.unique;
        vnode->fid = data->fid;
        vnode->volume = data->volume;
 
+       /* YFS supports 96-bit vnode IDs, but Linux only supports
+        * 64-bit inode numbers.
+        */
+       inode->i_ino = data->fid.vnode;
+       inode->i_generation = data->fid.unique;
        return 0;
 }
 
@@ -193,7 +196,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root)
                return ERR_PTR(-ENOMEM);
        }
 
-       _debug("GOT INODE %p { ino=%lu, vl=%x, vn=%x, u=%x }",
+       _debug("GOT INODE %p { ino=%lu, vl=%llx, vn=%llx, u=%x }",
               inode, inode->i_ino, data.fid.vid, data.fid.vnode,
               data.fid.unique);
 
@@ -252,8 +255,8 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
 
        key.vnode_id            = vnode->fid.vnode;
        key.unique              = vnode->fid.unique;
-       key.vnode_id_ext[0]     = 0;
-       key.vnode_id_ext[1]     = 0;
+       key.vnode_id_ext[0]     = vnode->fid.vnode >> 32;
+       key.vnode_id_ext[1]     = vnode->fid.vnode_hi;
        aux.data_version        = vnode->status.data_version;
 
        vnode->cache = fscache_acquire_cookie(vnode->volume->cache,
@@ -277,7 +280,7 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
        struct inode *inode;
        int ret;
 
-       _enter(",{%x:%u.%u},,", fid->vid, fid->vnode, fid->unique);
+       _enter(",{%llx:%llu.%u},,", fid->vid, fid->vnode, fid->unique);
 
        as = sb->s_fs_info;
        data.volume = as->volume;
@@ -289,7 +292,7 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
                return ERR_PTR(-ENOMEM);
        }
 
-       _debug("GOT INODE %p { vl=%x vn=%x, u=%x }",
+       _debug("GOT INODE %p { vl=%llx vn=%llx, u=%x }",
               inode, fid->vid, fid->vnode, fid->unique);
 
        vnode = AFS_FS_I(inode);
@@ -314,11 +317,11 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
                         * didn't give us a callback) */
                        vnode->cb_version = 0;
                        vnode->cb_type = 0;
-                       vnode->cb_expires_at = 0;
+                       vnode->cb_expires_at = ktime_get();
                } else {
                        vnode->cb_version = cb->version;
                        vnode->cb_type = cb->type;
-                       vnode->cb_expires_at = cb->expiry;
+                       vnode->cb_expires_at = cb->expires_at;
                        vnode->cb_interest = afs_get_cb_interest(cbi);
                        set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
                }
@@ -352,7 +355,7 @@ bad_inode:
  */
 void afs_zap_data(struct afs_vnode *vnode)
 {
-       _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
+       _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
 
 #ifdef CONFIG_AFS_FSCACHE
        fscache_invalidate(vnode->cache);
@@ -379,10 +382,10 @@ void afs_zap_data(struct afs_vnode *vnode)
 int afs_validate(struct afs_vnode *vnode, struct key *key)
 {
        time64_t now = ktime_get_real_seconds();
-       bool valid = false;
+       bool valid;
        int ret;
 
-       _enter("{v={%x:%u} fl=%lx},%x",
+       _enter("{v={%llx:%llu} fl=%lx},%x",
               vnode->fid.vid, vnode->fid.vnode, vnode->flags,
               key_serial(key));
 
@@ -399,15 +402,21 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
                        vnode->cb_v_break = vnode->volume->cb_v_break;
                        valid = false;
                } else if (vnode->status.type == AFS_FTYPE_DIR &&
-                          test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) &&
-                          vnode->cb_expires_at - 10 > now) {
-                       valid = true;
-               } else if (!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) &&
-                          vnode->cb_expires_at - 10 > now) {
+                          (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) ||
+                           vnode->cb_expires_at - 10 <= now)) {
+                       valid = false;
+               } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) ||
+                          vnode->cb_expires_at - 10 <= now) {
+                       valid = false;
+               } else {
                        valid = true;
                }
        } else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
                valid = true;
+       } else {
+               vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
+               vnode->cb_v_break = vnode->volume->cb_v_break;
+               valid = false;
        }
 
        read_sequnlock_excl(&vnode->cb_lock);
@@ -501,7 +510,7 @@ void afs_evict_inode(struct inode *inode)
 
        vnode = AFS_FS_I(inode);
 
-       _enter("{%x:%u.%d}",
+       _enter("{%llx:%llu.%d}",
               vnode->fid.vid,
               vnode->fid.vnode,
               vnode->fid.unique);
@@ -550,7 +559,7 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr)
        struct key *key;
        int ret;
 
-       _enter("{%x:%u},{n=%pd},%x",
+       _enter("{%llx:%llu},{n=%pd},%x",
               vnode->fid.vid, vnode->fid.vnode, dentry,
               attr->ia_valid);
 
index 72de1f157d20235b4c2a103d5f098b5b9cf7dd1f..8871b9e8645f15ce0963745813c6de62cc8ea786 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/backing-dev.h>
 #include <linux/uuid.h>
 #include <linux/mm_types.h>
+#include <linux/dns_resolver.h>
 #include <net/net_namespace.h>
 #include <net/netns/generic.h>
 #include <net/sock.h>
@@ -75,10 +76,13 @@ struct afs_addr_list {
        u32                     version;        /* Version */
        unsigned char           max_addrs;
        unsigned char           nr_addrs;
-       unsigned char           index;          /* Address currently in use */
+       unsigned char           preferred;      /* Preferred address */
        unsigned char           nr_ipv4;        /* Number of IPv4 addresses */
+       enum dns_record_source  source:8;
+       enum dns_lookup_status  status:8;
        unsigned long           probed;         /* Mask of servers that have been probed */
-       unsigned long           yfs;            /* Mask of servers that are YFS */
+       unsigned long           failed;         /* Mask of addrs that failed locally/ICMP */
+       unsigned long           responded;      /* Mask of addrs that responded */
        struct sockaddr_rxrpc   addrs[];
 #define AFS_MAX_ADDRESSES ((unsigned int)(sizeof(unsigned long) * 8))
 };
@@ -88,6 +92,7 @@ struct afs_addr_list {
  */
 struct afs_call {
        const struct afs_call_type *type;       /* type of call */
+       struct afs_addr_list    *alist;         /* Address is alist[addr_ix] */
        wait_queue_head_t       waitq;          /* processes awaiting completion */
        struct work_struct      async_work;     /* async I/O processor */
        struct work_struct      work;           /* actual work processor */
@@ -98,16 +103,22 @@ struct afs_call {
        struct afs_cb_interest  *cbi;           /* Callback interest for server used */
        void                    *request;       /* request data (first part) */
        struct address_space    *mapping;       /* Pages being written from */
+       struct iov_iter         iter;           /* Buffer iterator */
+       struct iov_iter         *_iter;         /* Iterator currently in use */
+       union { /* Convenience for ->iter */
+               struct kvec     kvec[1];
+               struct bio_vec  bvec[1];
+       };
        void                    *buffer;        /* reply receive buffer */
        void                    *reply[4];      /* Where to put the reply */
        pgoff_t                 first;          /* first page in mapping to deal with */
        pgoff_t                 last;           /* last page in mapping to deal with */
-       size_t                  offset;         /* offset into received data store */
        atomic_t                usage;
        enum afs_call_state     state;
        spinlock_t              state_lock;
        int                     error;          /* error code */
        u32                     abort_code;     /* Remote abort ID or 0 */
+       u32                     epoch;
        unsigned                request_size;   /* size of request data */
        unsigned                reply_max;      /* maximum size of reply */
        unsigned                first_offset;   /* offset into mapping[first] */
@@ -117,19 +128,28 @@ struct afs_call {
                unsigned        count2;         /* count used in unmarshalling */
        };
        unsigned char           unmarshall;     /* unmarshalling phase */
+       unsigned char           addr_ix;        /* Address in ->alist */
        bool                    incoming;       /* T if incoming call */
        bool                    send_pages;     /* T if data from mapping should be sent */
        bool                    need_attention; /* T if RxRPC poked us */
        bool                    async;          /* T if asynchronous */
        bool                    ret_reply0;     /* T if should return reply[0] on success */
        bool                    upgrade;        /* T to request service upgrade */
+       bool                    want_reply_time; /* T if want reply_time */
        u16                     service_id;     /* Actual service ID (after upgrade) */
        unsigned int            debug_id;       /* Trace ID */
        u32                     operation_ID;   /* operation ID for an incoming call */
        u32                     count;          /* count for use in unmarshalling */
-       __be32                  tmp;            /* place to extract temporary data */
+       union {                                 /* place to extract temporary data */
+               struct {
+                       __be32  tmp_u;
+                       __be32  tmp;
+               } __attribute__((packed));
+               __be64          tmp64;
+       };
        afs_dataversion_t       expected_version; /* Updated version expected from store */
        afs_dataversion_t       expected_version_2; /* 2nd updated version expected from store */
+       ktime_t                 reply_time;     /* Time of first reply packet */
 };
 
 struct afs_call_type {
@@ -146,6 +166,9 @@ struct afs_call_type {
 
        /* Work function */
        void (*work)(struct work_struct *work);
+
+       /* Call done function (gets called immediately on success or failure) */
+       void (*done)(struct afs_call *call);
 };
 
 /*
@@ -185,6 +208,7 @@ struct afs_read {
        refcount_t              usage;
        unsigned int            index;          /* Which page we're reading into */
        unsigned int            nr_pages;
+       unsigned int            offset;         /* offset into current page */
        void (*page_done)(struct afs_call *, struct afs_read *);
        struct page             **pages;
        struct page             *array[];
@@ -343,12 +367,69 @@ struct afs_cell {
        rwlock_t                proc_lock;
 
        /* VL server list. */
-       rwlock_t                vl_addrs_lock;  /* Lock on vl_addrs */
-       struct afs_addr_list    __rcu *vl_addrs; /* List of VL servers */
+       rwlock_t                vl_servers_lock; /* Lock on vl_servers */
+       struct afs_vlserver_list __rcu *vl_servers;
+
        u8                      name_len;       /* Length of name */
        char                    name[64 + 1];   /* Cell name, case-flattened and NUL-padded */
 };
 
+/*
+ * Volume Location server record.
+ */
+struct afs_vlserver {
+       struct rcu_head         rcu;
+       struct afs_addr_list    __rcu *addresses; /* List of addresses for this VL server */
+       unsigned long           flags;
+#define AFS_VLSERVER_FL_PROBED 0               /* The VL server has been probed */
+#define AFS_VLSERVER_FL_PROBING        1               /* VL server is being probed */
+#define AFS_VLSERVER_FL_IS_YFS 2               /* Server is YFS not AFS */
+       rwlock_t                lock;           /* Lock on addresses */
+       atomic_t                usage;
+
+       /* Probe state */
+       wait_queue_head_t       probe_wq;
+       atomic_t                probe_outstanding;
+       spinlock_t              probe_lock;
+       struct {
+               unsigned int    rtt;            /* RTT as ktime/64 */
+               u32             abort_code;
+               short           error;
+               bool            have_result;
+               bool            responded:1;
+               bool            is_yfs:1;
+               bool            not_yfs:1;
+               bool            local_failure:1;
+       } probe;
+
+       u16                     port;
+       u16                     name_len;       /* Length of name */
+       char                    name[];         /* Server name, case-flattened */
+};
+
+/*
+ * Weighted list of Volume Location servers.
+ */
+struct afs_vlserver_entry {
+       u16                     priority;       /* Preference (as SRV) */
+       u16                     weight;         /* Weight (as SRV) */
+       enum dns_record_source  source:8;
+       enum dns_lookup_status  status:8;
+       struct afs_vlserver     *server;
+};
+
+struct afs_vlserver_list {
+       struct rcu_head         rcu;
+       atomic_t                usage;
+       u8                      nr_servers;
+       u8                      index;          /* Server currently in use */
+       u8                      preferred;      /* Preferred server */
+       enum dns_record_source  source:8;
+       enum dns_lookup_status  status:8;
+       rwlock_t                lock;
+       struct afs_vlserver_entry servers[];
+};
+
 /*
  * Cached VLDB entry.
  *
@@ -403,8 +484,12 @@ struct afs_server {
 #define AFS_SERVER_FL_PROBING  6               /* Fileserver is being probed */
 #define AFS_SERVER_FL_NO_IBULK 7               /* Fileserver doesn't support FS.InlineBulkStatus */
 #define AFS_SERVER_FL_MAY_HAVE_CB 8            /* May have callbacks on this fileserver */
+#define AFS_SERVER_FL_IS_YFS   9               /* Server is YFS not AFS */
+#define AFS_SERVER_FL_NO_RM2   10              /* Fileserver doesn't support YFS.RemoveFile2 */
+#define AFS_SERVER_FL_HAVE_EPOCH 11            /* ->epoch is valid */
        atomic_t                usage;
        u32                     addr_version;   /* Address list version */
+       u32                     cm_epoch;       /* Server RxRPC epoch */
 
        /* file service access */
        rwlock_t                fs_lock;        /* access lock */
@@ -413,6 +498,26 @@ struct afs_server {
        struct hlist_head       cb_volumes;     /* List of volume interests on this server */
        unsigned                cb_s_break;     /* Break-everything counter. */
        rwlock_t                cb_break_lock;  /* Volume finding lock */
+
+       /* Probe state */
+       wait_queue_head_t       probe_wq;
+       atomic_t                probe_outstanding;
+       spinlock_t              probe_lock;
+       struct {
+               unsigned int    rtt;            /* RTT as ktime/64 */
+               u32             abort_code;
+               u32             cm_epoch;
+               short           error;
+               bool            have_result;
+               bool            responded:1;
+               bool            is_yfs:1;
+               bool            not_yfs:1;
+               bool            local_failure:1;
+               bool            no_epoch:1;
+               bool            cm_probed:1;
+               bool            said_rebooted:1;
+               bool            said_inconsistent:1;
+       } probe;
 };
 
 /*
@@ -447,8 +552,8 @@ struct afs_server_entry {
 
 struct afs_server_list {
        refcount_t              usage;
-       unsigned short          nr_servers;
-       unsigned short          index;          /* Server currently in use */
+       unsigned char           nr_servers;
+       unsigned char           preferred;      /* Preferred server */
        unsigned short          vnovol_mask;    /* Servers to be skipped due to VNOVOL */
        unsigned int            seq;            /* Set to ->servers_seq when installed */
        rwlock_t                lock;
@@ -550,6 +655,15 @@ struct afs_vnode {
        afs_callback_type_t     cb_type;        /* type of callback */
 };
 
+static inline struct fscache_cookie *afs_vnode_cache(struct afs_vnode *vnode)
+{
+#ifdef CONFIG_AFS_FSCACHE
+       return vnode->cache;
+#else
+       return NULL;
+#endif
+}
+
 /*
  * cached security record for one user's attempt to access a vnode
  */
@@ -581,18 +695,44 @@ struct afs_interface {
        unsigned        mtu;            /* MTU of interface */
 };
 
+/*
+ * Error prioritisation and accumulation.
+ */
+struct afs_error {
+       short   error;                  /* Accumulated error */
+       bool    responded;              /* T if server responded */
+};
+
 /*
  * Cursor for iterating over a server's address list.
  */
 struct afs_addr_cursor {
        struct afs_addr_list    *alist;         /* Current address list (pins ref) */
-       struct sockaddr_rxrpc   *addr;
+       unsigned long           tried;          /* Tried addresses */
+       signed char             index;          /* Current address */
+       bool                    responded;      /* T if the current address responded */
+       unsigned short          nr_iterations;  /* Number of address iterations */
+       short                   error;
        u32                     abort_code;
-       unsigned short          start;          /* Starting point in alist->addrs[] */
-       unsigned short          index;          /* Wrapping offset from start to current addr */
+};
+
+/*
+ * Cursor for iterating over a set of volume location servers.
+ */
+struct afs_vl_cursor {
+       struct afs_addr_cursor  ac;
+       struct afs_cell         *cell;          /* The cell we're querying */
+       struct afs_vlserver_list *server_list;  /* Current server list (pins ref) */
+       struct afs_vlserver     *server;        /* Server on which this resides */
+       struct key              *key;           /* Key for the server */
+       unsigned long           untried;        /* Bitmask of untried servers */
+       short                   index;          /* Current server */
        short                   error;
-       bool                    begun;          /* T if we've begun iteration */
-       bool                    responded;      /* T if the current address responded */
+       unsigned short          flags;
+#define AFS_VL_CURSOR_STOP     0x0001          /* Set to cease iteration */
+#define AFS_VL_CURSOR_RETRY    0x0002          /* Set to do a retry */
+#define AFS_VL_CURSOR_RETRIED  0x0004          /* Set if started a retry */
+       unsigned short          nr_iterations;  /* Number of server iterations */
 };
 
 /*
@@ -604,10 +744,11 @@ struct afs_fs_cursor {
        struct afs_server_list  *server_list;   /* Current server list (pins ref) */
        struct afs_cb_interest  *cbi;           /* Server on which this resides (pins ref) */
        struct key              *key;           /* Key for the server */
+       unsigned long           untried;        /* Bitmask of untried servers */
        unsigned int            cb_break;       /* cb_break + cb_s_break before the call */
        unsigned int            cb_break_2;     /* cb_break + cb_s_break (2nd vnode) */
-       unsigned char           start;          /* Initial index in server list */
-       unsigned char           index;          /* Number of servers tried beyond start */
+       short                   index;          /* Current server */
+       short                   error;
        unsigned short          flags;
 #define AFS_FS_CURSOR_STOP     0x0001          /* Set to cease iteration */
 #define AFS_FS_CURSOR_VBUSY    0x0002          /* Set if seen VBUSY */
@@ -615,6 +756,7 @@ struct afs_fs_cursor {
 #define AFS_FS_CURSOR_VNOVOL   0x0008          /* Set if seen VNOVOL */
 #define AFS_FS_CURSOR_CUR_ONLY 0x0010          /* Set if current server only (file lock held) */
 #define AFS_FS_CURSOR_NO_VSLEEP        0x0020          /* Set to prevent sleep on VBUSY, VOFFLINE, ... */
+       unsigned short          nr_iterations;  /* Number of server iterations */
 };
 
 /*
@@ -640,12 +782,12 @@ extern struct afs_addr_list *afs_alloc_addrlist(unsigned int,
                                                unsigned short,
                                                unsigned short);
 extern void afs_put_addrlist(struct afs_addr_list *);
-extern struct afs_addr_list *afs_parse_text_addrs(const char *, size_t, char,
-                                                 unsigned short, unsigned short);
-extern struct afs_addr_list *afs_dns_query(struct afs_cell *, time64_t *);
+extern struct afs_vlserver_list *afs_parse_text_addrs(struct afs_net *,
+                                                     const char *, size_t, char,
+                                                     unsigned short, unsigned short);
+extern struct afs_vlserver_list *afs_dns_query(struct afs_cell *, time64_t *);
 extern bool afs_iterate_addresses(struct afs_addr_cursor *);
 extern int afs_end_cursor(struct afs_addr_cursor *);
-extern int afs_set_vl_cursor(struct afs_addr_cursor *, struct afs_cell *);
 
 extern void afs_merge_fs_addr4(struct afs_addr_list *, __be32, u16);
 extern void afs_merge_fs_addr6(struct afs_addr_list *, __be32 *, u16);
@@ -668,6 +810,7 @@ extern struct fscache_cookie_def afs_vnode_cache_index_def;
  * callback.c
  */
 extern void afs_init_callback_state(struct afs_server *);
+extern void __afs_break_callback(struct afs_vnode *);
 extern void afs_break_callback(struct afs_vnode *);
 extern void afs_break_callbacks(struct afs_server *, size_t, struct afs_callback_break*);
 
@@ -688,10 +831,13 @@ static inline unsigned int afs_calc_vnode_cb_break(struct afs_vnode *vnode)
        return vnode->cb_break + vnode->cb_s_break + vnode->cb_v_break;
 }
 
-static inline unsigned int afs_cb_break_sum(struct afs_vnode *vnode,
-                                           struct afs_cb_interest *cbi)
+static inline bool afs_cb_is_broken(unsigned int cb_break,
+                                   const struct afs_vnode *vnode,
+                                   const struct afs_cb_interest *cbi)
 {
-       return vnode->cb_break + cbi->server->cb_s_break + vnode->volume->cb_v_break;
+       return !cbi || cb_break != (vnode->cb_break +
+                                   cbi->server->cb_s_break +
+                                   vnode->volume->cb_v_break);
 }
 
 /*
@@ -781,7 +927,7 @@ extern int afs_fs_give_up_callbacks(struct afs_net *, struct afs_server *);
 extern int afs_fs_fetch_data(struct afs_fs_cursor *, struct afs_read *);
 extern int afs_fs_create(struct afs_fs_cursor *, const char *, umode_t, u64,
                         struct afs_fid *, struct afs_file_status *, struct afs_callback *);
-extern int afs_fs_remove(struct afs_fs_cursor *, const char *, bool, u64);
+extern int afs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool, u64);
 extern int afs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *, u64);
 extern int afs_fs_symlink(struct afs_fs_cursor *, const char *, const char *, u64,
                          struct afs_fid *, struct afs_file_status *);
@@ -797,7 +943,7 @@ extern int afs_fs_release_lock(struct afs_fs_cursor *);
 extern int afs_fs_give_up_all_callbacks(struct afs_net *, struct afs_server *,
                                        struct afs_addr_cursor *, struct key *);
 extern int afs_fs_get_capabilities(struct afs_net *, struct afs_server *,
-                                  struct afs_addr_cursor *, struct key *);
+                                  struct afs_addr_cursor *, struct key *, unsigned int, bool);
 extern int afs_fs_inline_bulk_status(struct afs_fs_cursor *, struct afs_net *,
                                     struct afs_fid *, struct afs_file_status *,
                                     struct afs_callback *, unsigned int,
@@ -806,6 +952,13 @@ extern int afs_fs_fetch_status(struct afs_fs_cursor *, struct afs_net *,
                               struct afs_fid *, struct afs_file_status *,
                               struct afs_callback *, struct afs_volsync *);
 
+/*
+ * fs_probe.c
+ */
+extern void afs_fileserver_probe_result(struct afs_call *);
+extern int afs_probe_fileservers(struct afs_net *, struct key *, struct afs_server_list *);
+extern int afs_wait_for_fs_probes(struct afs_server_list *, unsigned long);
+
 /*
  * inode.c
  */
@@ -870,6 +1023,7 @@ static inline void __afs_stat(atomic_t *s)
  * misc.c
  */
 extern int afs_abort_to_error(u32);
+extern void afs_prioritise_error(struct afs_error *, int, u32);
 
 /*
  * mntpt.c
@@ -922,7 +1076,6 @@ extern int __net_init afs_open_socket(struct afs_net *);
 extern void __net_exit afs_close_socket(struct afs_net *);
 extern void afs_charge_preallocation(struct work_struct *);
 extern void afs_put_call(struct afs_call *);
-extern int afs_queue_call_work(struct afs_call *);
 extern long afs_make_call(struct afs_addr_cursor *, struct afs_call *, gfp_t, bool);
 extern struct afs_call *afs_alloc_flat_call(struct afs_net *,
                                            const struct afs_call_type *,
@@ -930,12 +1083,39 @@ extern struct afs_call *afs_alloc_flat_call(struct afs_net *,
 extern void afs_flat_call_destructor(struct afs_call *);
 extern void afs_send_empty_reply(struct afs_call *);
 extern void afs_send_simple_reply(struct afs_call *, const void *, size_t);
-extern int afs_extract_data(struct afs_call *, void *, size_t, bool);
-extern int afs_protocol_error(struct afs_call *, int);
+extern int afs_extract_data(struct afs_call *, bool);
+extern int afs_protocol_error(struct afs_call *, int, enum afs_eproto_cause);
+
+static inline void afs_extract_begin(struct afs_call *call, void *buf, size_t size)
+{
+       call->kvec[0].iov_base = buf;
+       call->kvec[0].iov_len = size;
+       iov_iter_kvec(&call->iter, READ, call->kvec, 1, size);
+}
+
+static inline void afs_extract_to_tmp(struct afs_call *call)
+{
+       afs_extract_begin(call, &call->tmp, sizeof(call->tmp));
+}
+
+static inline void afs_extract_to_tmp64(struct afs_call *call)
+{
+       afs_extract_begin(call, &call->tmp64, sizeof(call->tmp64));
+}
+
+static inline void afs_extract_discard(struct afs_call *call, size_t size)
+{
+       iov_iter_discard(&call->iter, READ, size);
+}
+
+static inline void afs_extract_to_buf(struct afs_call *call, size_t size)
+{
+       afs_extract_begin(call, call->buffer, size);
+}
 
 static inline int afs_transfer_reply(struct afs_call *call)
 {
-       return afs_extract_data(call, call->buffer, call->reply_max, false);
+       return afs_extract_data(call, false);
 }
 
 static inline bool afs_check_call_state(struct afs_call *call,
@@ -1012,7 +1192,6 @@ extern void afs_put_server(struct afs_net *, struct afs_server *);
 extern void afs_manage_servers(struct work_struct *);
 extern void afs_servers_timer(struct timer_list *);
 extern void __net_exit afs_purge_servers(struct afs_net *);
-extern bool afs_probe_fileserver(struct afs_fs_cursor *);
 extern bool afs_check_server_record(struct afs_fs_cursor *, struct afs_server *);
 
 /*
@@ -1039,14 +1218,51 @@ extern void afs_fs_exit(void);
 /*
  * vlclient.c
  */
-extern struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_net *,
-                                                        struct afs_addr_cursor *,
-                                                        struct key *, const char *, int);
-extern struct afs_addr_list *afs_vl_get_addrs_u(struct afs_net *, struct afs_addr_cursor *,
-                                               struct key *, const uuid_t *);
-extern int afs_vl_get_capabilities(struct afs_net *, struct afs_addr_cursor *, struct key *);
-extern struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_net *, struct afs_addr_cursor *,
-                                                    struct key *, const uuid_t *);
+extern struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_vl_cursor *,
+                                                        const char *, int);
+extern struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *, const uuid_t *);
+extern int afs_vl_get_capabilities(struct afs_net *, struct afs_addr_cursor *, struct key *,
+                                  struct afs_vlserver *, unsigned int, bool);
+extern struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *, const uuid_t *);
+
+/*
+ * vl_probe.c
+ */
+extern void afs_vlserver_probe_result(struct afs_call *);
+extern int afs_send_vl_probes(struct afs_net *, struct key *, struct afs_vlserver_list *);
+extern int afs_wait_for_vl_probes(struct afs_vlserver_list *, unsigned long);
+
+/*
+ * vl_rotate.c
+ */
+extern bool afs_begin_vlserver_operation(struct afs_vl_cursor *,
+                                        struct afs_cell *, struct key *);
+extern bool afs_select_vlserver(struct afs_vl_cursor *);
+extern bool afs_select_current_vlserver(struct afs_vl_cursor *);
+extern int afs_end_vlserver_operation(struct afs_vl_cursor *);
+
+/*
+ * vlserver_list.c
+ */
+static inline struct afs_vlserver *afs_get_vlserver(struct afs_vlserver *vlserver)
+{
+       atomic_inc(&vlserver->usage);
+       return vlserver;
+}
+
+static inline struct afs_vlserver_list *afs_get_vlserverlist(struct afs_vlserver_list *vllist)
+{
+       if (vllist)
+               atomic_inc(&vllist->usage);
+       return vllist;
+}
+
+extern struct afs_vlserver *afs_alloc_vlserver(const char *, size_t, unsigned short);
+extern void afs_put_vlserver(struct afs_net *, struct afs_vlserver *);
+extern struct afs_vlserver_list *afs_alloc_vlserver_list(unsigned int);
+extern void afs_put_vlserverlist(struct afs_net *, struct afs_vlserver_list *);
+extern struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *,
+                                                          const void *, size_t);
 
 /*
  * volume.c
@@ -1089,6 +1305,36 @@ extern int afs_launder_page(struct page *);
 extern const struct xattr_handler *afs_xattr_handlers[];
 extern ssize_t afs_listxattr(struct dentry *, char *, size_t);
 
+/*
+ * yfsclient.c
+ */
+extern int yfs_fs_fetch_file_status(struct afs_fs_cursor *, struct afs_volsync *, bool);
+extern int yfs_fs_fetch_data(struct afs_fs_cursor *, struct afs_read *);
+extern int yfs_fs_create_file(struct afs_fs_cursor *, const char *, umode_t, u64,
+                             struct afs_fid *, struct afs_file_status *, struct afs_callback *);
+extern int yfs_fs_make_dir(struct afs_fs_cursor *, const char *, umode_t, u64,
+                        struct afs_fid *, struct afs_file_status *, struct afs_callback *);
+extern int yfs_fs_remove_file2(struct afs_fs_cursor *, struct afs_vnode *, const char *, u64);
+extern int yfs_fs_remove(struct afs_fs_cursor *, struct afs_vnode *, const char *, bool, u64);
+extern int yfs_fs_link(struct afs_fs_cursor *, struct afs_vnode *, const char *, u64);
+extern int yfs_fs_symlink(struct afs_fs_cursor *, const char *, const char *, u64,
+                         struct afs_fid *, struct afs_file_status *);
+extern int yfs_fs_rename(struct afs_fs_cursor *, const char *,
+                        struct afs_vnode *, const char *, u64, u64);
+extern int yfs_fs_store_data(struct afs_fs_cursor *, struct address_space *,
+                            pgoff_t, pgoff_t, unsigned, unsigned);
+extern int yfs_fs_setattr(struct afs_fs_cursor *, struct iattr *);
+extern int yfs_fs_get_volume_status(struct afs_fs_cursor *, struct afs_volume_status *);
+extern int yfs_fs_set_lock(struct afs_fs_cursor *, afs_lock_type_t);
+extern int yfs_fs_extend_lock(struct afs_fs_cursor *);
+extern int yfs_fs_release_lock(struct afs_fs_cursor *);
+extern int yfs_fs_fetch_status(struct afs_fs_cursor *, struct afs_net *,
+                              struct afs_fid *, struct afs_file_status *,
+                              struct afs_callback *, struct afs_volsync *);
+extern int yfs_fs_inline_bulk_status(struct afs_fs_cursor *, struct afs_net *,
+                                    struct afs_fid *, struct afs_file_status *,
+                                    struct afs_callback *, unsigned int,
+                                    struct afs_volsync *);
 
 /*
  * Miscellaneous inline functions.
@@ -1120,6 +1366,17 @@ static inline void afs_check_for_remote_deletion(struct afs_fs_cursor *fc,
        }
 }
 
+static inline int afs_io_error(struct afs_call *call, enum afs_io_error where)
+{
+       trace_afs_io_error(call->debug_id, -EIO, where);
+       return -EIO;
+}
+
+static inline int afs_bad(struct afs_vnode *vnode, enum afs_file_error where)
+{
+       trace_afs_file_error(vnode, -EIO, where);
+       return -EIO;
+}
 
 /*****************************************************************************/
 /*
index 700a5fa7f4ece2151c68f87ee58bd6d2e8e97dc3..bbb1fd51b019ead4d6cd5aeee6b26861dd73fe04 100644 (file)
@@ -118,3 +118,55 @@ int afs_abort_to_error(u32 abort_code)
        default:                return -EREMOTEIO;
        }
 }
+
+/*
+ * Select the error to report from a set of errors.
+ */
+void afs_prioritise_error(struct afs_error *e, int error, u32 abort_code)
+{
+       switch (error) {
+       case 0:
+               return;
+       default:
+               if (e->error == -ETIMEDOUT ||
+                   e->error == -ETIME)
+                       return;
+       case -ETIMEDOUT:
+       case -ETIME:
+               if (e->error == -ENOMEM ||
+                   e->error == -ENONET)
+                       return;
+       case -ENOMEM:
+       case -ENONET:
+               if (e->error == -ERFKILL)
+                       return;
+       case -ERFKILL:
+               if (e->error == -EADDRNOTAVAIL)
+                       return;
+       case -EADDRNOTAVAIL:
+               if (e->error == -ENETUNREACH)
+                       return;
+       case -ENETUNREACH:
+               if (e->error == -EHOSTUNREACH)
+                       return;
+       case -EHOSTUNREACH:
+               if (e->error == -EHOSTDOWN)
+                       return;
+       case -EHOSTDOWN:
+               if (e->error == -ECONNREFUSED)
+                       return;
+       case -ECONNREFUSED:
+               if (e->error == -ECONNRESET)
+                       return;
+       case -ECONNRESET: /* Responded, but call expired. */
+               if (e->responded)
+                       return;
+               e->error = error;
+               return;
+
+       case -ECONNABORTED:
+               e->responded = true;
+               e->error = afs_abort_to_error(abort_code);
+               return;
+       }
+}
index 99fd13500a97f9e77e2cbf603ae012bc460d1a2f..2e51c6994148f30f4ec8d858b1e318b1d58980c0 100644 (file)
@@ -130,9 +130,10 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
                        goto error_no_page;
                }
 
-               ret = -EIO;
-               if (PageError(page))
+               if (PageError(page)) {
+                       ret = afs_bad(AFS_FS_I(d_inode(mntpt)), afs_file_error_mntpt);
                        goto error;
+               }
 
                buf = kmap_atomic(page);
                memcpy(devname, buf, size);
index 9101f62707af2da3dbff5e33c6067d0cafbb9013..be2ee3bbd0a953349ccba4a30eecbd2366b840c1 100644 (file)
 #include <linux/uaccess.h>
 #include "internal.h"
 
+struct afs_vl_seq_net_private {
+       struct seq_net_private          seq;    /* Must be first */
+       struct afs_vlserver_list        *vllist;
+};
+
 static inline struct afs_net *afs_seq2net(struct seq_file *m)
 {
        return afs_net(seq_file_net(m));
@@ -32,16 +37,24 @@ static inline struct afs_net *afs_seq2net_single(struct seq_file *m)
  */
 static int afs_proc_cells_show(struct seq_file *m, void *v)
 {
-       struct afs_cell *cell = list_entry(v, struct afs_cell, proc_link);
+       struct afs_vlserver_list *vllist;
+       struct afs_cell *cell;
 
        if (v == SEQ_START_TOKEN) {
                /* display header on line 1 */
-               seq_puts(m, "USE NAME\n");
+               seq_puts(m, "USE    TTL SV NAME\n");
                return 0;
        }
 
+       cell = list_entry(v, struct afs_cell, proc_link);
+       vllist = rcu_dereference(cell->vl_servers);
+
        /* display one cell per line on subsequent lines */
-       seq_printf(m, "%3u %s\n", atomic_read(&cell->usage), cell->name);
+       seq_printf(m, "%3u %6lld %2u %s\n",
+                  atomic_read(&cell->usage),
+                  cell->dns_expiry - ktime_get_real_seconds(),
+                  vllist ? vllist->nr_servers : 0,
+                  cell->name);
        return 0;
 }
 
@@ -208,7 +221,7 @@ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
                return 0;
        }
 
-       seq_printf(m, "%3d %08x %s\n",
+       seq_printf(m, "%3d %08llx %s\n",
                   atomic_read(&vol->usage), vol->vid,
                   afs_vol_types[vol->type]);
 
@@ -247,61 +260,102 @@ static const struct seq_operations afs_proc_cell_volumes_ops = {
        .show   = afs_proc_cell_volumes_show,
 };
 
+static const char *const dns_record_sources[NR__dns_record_source + 1] = {
+       [DNS_RECORD_UNAVAILABLE]        = "unav",
+       [DNS_RECORD_FROM_CONFIG]        = "cfg",
+       [DNS_RECORD_FROM_DNS_A]         = "A",
+       [DNS_RECORD_FROM_DNS_AFSDB]     = "AFSDB",
+       [DNS_RECORD_FROM_DNS_SRV]       = "SRV",
+       [DNS_RECORD_FROM_NSS]           = "nss",
+       [NR__dns_record_source]         = "[weird]"
+};
+
+static const char *const dns_lookup_statuses[NR__dns_lookup_status + 1] = {
+       [DNS_LOOKUP_NOT_DONE]           = "no-lookup",
+       [DNS_LOOKUP_GOOD]               = "good",
+       [DNS_LOOKUP_GOOD_WITH_BAD]      = "good/bad",
+       [DNS_LOOKUP_BAD]                = "bad",
+       [DNS_LOOKUP_GOT_NOT_FOUND]      = "not-found",
+       [DNS_LOOKUP_GOT_LOCAL_FAILURE]  = "local-failure",
+       [DNS_LOOKUP_GOT_TEMP_FAILURE]   = "temp-failure",
+       [DNS_LOOKUP_GOT_NS_FAILURE]     = "ns-failure",
+       [NR__dns_lookup_status]         = "[weird]"
+};
+
 /*
  * Display the list of Volume Location servers we're using for a cell.
  */
 static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v)
 {
-       struct sockaddr_rxrpc *addr = v;
+       const struct afs_vl_seq_net_private *priv = m->private;
+       const struct afs_vlserver_list *vllist = priv->vllist;
+       const struct afs_vlserver_entry *entry;
+       const struct afs_vlserver *vlserver;
+       const struct afs_addr_list *alist;
+       int i;
 
-       /* display header on line 1 */
-       if (v == (void *)1) {
-               seq_puts(m, "ADDRESS\n");
+       if (v == SEQ_START_TOKEN) {
+               seq_printf(m, "# source %s, status %s\n",
+                          dns_record_sources[vllist->source],
+                          dns_lookup_statuses[vllist->status]);
                return 0;
        }
 
-       /* display one cell per line on subsequent lines */
-       seq_printf(m, "%pISp\n", &addr->transport);
+       entry = v;
+       vlserver = entry->server;
+       alist = rcu_dereference(vlserver->addresses);
+
+       seq_printf(m, "%s [p=%hu w=%hu s=%s,%s]:\n",
+                  vlserver->name, entry->priority, entry->weight,
+                  dns_record_sources[alist ? alist->source : entry->source],
+                  dns_lookup_statuses[alist ? alist->status : entry->status]);
+       if (alist) {
+               for (i = 0; i < alist->nr_addrs; i++)
+                       seq_printf(m, " %c %pISpc\n",
+                                  alist->preferred == i ? '>' : '-',
+                                  &alist->addrs[i].transport);
+       }
        return 0;
 }
 
 static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos)
        __acquires(rcu)
 {
-       struct afs_addr_list *alist;
+       struct afs_vl_seq_net_private *priv = m->private;
+       struct afs_vlserver_list *vllist;
        struct afs_cell *cell = PDE_DATA(file_inode(m->file));
        loff_t pos = *_pos;
 
        rcu_read_lock();
 
-       alist = rcu_dereference(cell->vl_addrs);
+       vllist = rcu_dereference(cell->vl_servers);
+       priv->vllist = vllist;
 
-       /* allow for the header line */
-       if (!pos)
-               return (void *) 1;
-       pos--;
+       if (pos < 0)
+               *_pos = pos = 0;
+       if (pos == 0)
+               return SEQ_START_TOKEN;
 
-       if (!alist || pos >= alist->nr_addrs)
+       if (!vllist || pos - 1 >= vllist->nr_servers)
                return NULL;
 
-       return alist->addrs + pos;
+       return &vllist->servers[pos - 1];
 }
 
 static void *afs_proc_cell_vlservers_next(struct seq_file *m, void *v,
                                          loff_t *_pos)
 {
-       struct afs_addr_list *alist;
-       struct afs_cell *cell = PDE_DATA(file_inode(m->file));
+       struct afs_vl_seq_net_private *priv = m->private;
+       struct afs_vlserver_list *vllist = priv->vllist;
        loff_t pos;
 
-       alist = rcu_dereference(cell->vl_addrs);
-
        pos = *_pos;
-       (*_pos)++;
-       if (!alist || pos >= alist->nr_addrs)
+       pos++;
+       *_pos = pos;
+       if (!vllist || pos - 1 >= vllist->nr_servers)
                return NULL;
 
-       return alist->addrs + pos;
+       return &vllist->servers[pos - 1];
 }
 
 static void afs_proc_cell_vlservers_stop(struct seq_file *m, void *v)
@@ -337,11 +391,11 @@ static int afs_proc_servers_show(struct seq_file *m, void *v)
                   &server->uuid,
                   atomic_read(&server->usage),
                   &alist->addrs[0].transport,
-                  alist->index == 0 ? "*" : "");
+                  alist->preferred == 0 ? "*" : "");
        for (i = 1; i < alist->nr_addrs; i++)
                seq_printf(m, "                                         %pISpc%s\n",
                           &alist->addrs[i].transport,
-                          alist->index == i ? "*" : "");
+                          alist->preferred == i ? "*" : "");
        return 0;
 }
 
@@ -562,7 +616,7 @@ int afs_proc_cell_setup(struct afs_cell *cell)
 
        if (!proc_create_net_data("vlservers", 0444, dir,
                                  &afs_proc_cell_vlservers_ops,
-                                 sizeof(struct seq_net_private),
+                                 sizeof(struct afs_vl_seq_net_private),
                                  cell) ||
            !proc_create_net_data("volumes", 0444, dir,
                                  &afs_proc_cell_volumes_ops,
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
new file mode 100644 (file)
index 0000000..07bc10f
--- /dev/null
@@ -0,0 +1,163 @@
+/* YFS protocol bits
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#define YFS_FS_SERVICE 2500
+#define YFS_CM_SERVICE 2501
+
+#define YFSCBMAX 1024
+
+enum YFS_CM_Operations {
+       YFSCBProbe              = 206,  /* probe client */
+       YFSCBGetLock            = 207,  /* get contents of CM lock table */
+       YFSCBXStatsVersion      = 209,  /* get version of extended statistics */
+       YFSCBGetXStats          = 210,  /* get contents of extended statistics data */
+       YFSCBInitCallBackState3 = 213,  /* initialise callback state, version 3 */
+       YFSCBProbeUuid          = 214,  /* check the client hasn't rebooted */
+       YFSCBGetServerPrefs     = 215,
+       YFSCBGetCellServDV      = 216,
+       YFSCBGetLocalCell       = 217,
+       YFSCBGetCacheConfig     = 218,
+       YFSCBGetCellByNum       = 65537,
+       YFSCBTellMeAboutYourself = 65538, /* get client capabilities */
+       YFSCBCallBack           = 64204,
+};
+
+enum YFS_FS_Operations {
+       YFSFETCHACL             = 64131, /* YFS Fetch file ACL */
+       YFSFETCHSTATUS          = 64132, /* YFS Fetch file status */
+       YFSSTOREACL             = 64134, /* YFS Store file ACL */
+       YFSSTORESTATUS          = 64135, /* YFS Store file status */
+       YFSREMOVEFILE           = 64136, /* YFS Remove a file */
+       YFSCREATEFILE           = 64137, /* YFS Create a file */
+       YFSRENAME               = 64138, /* YFS Rename or move a file or directory */
+       YFSSYMLINK              = 64139, /* YFS Create a symbolic link */
+       YFSLINK                 = 64140, /* YFS Create a hard link */
+       YFSMAKEDIR              = 64141, /* YFS Create a directory */
+       YFSREMOVEDIR            = 64142, /* YFS Remove a directory */
+       YFSGETVOLUMESTATUS      = 64149, /* YFS Get volume status information */
+       YFSSETVOLUMESTATUS      = 64150, /* YFS Set volume status information */
+       YFSSETLOCK              = 64156, /* YFS Request a file lock */
+       YFSEXTENDLOCK           = 64157, /* YFS Extend a file lock */
+       YFSRELEASELOCK          = 64158, /* YFS Release a file lock */
+       YFSLOOKUP               = 64161, /* YFS lookup file in directory */
+       YFSFLUSHCPS             = 64165,
+       YFSFETCHOPAQUEACL       = 64168,
+       YFSWHOAMI               = 64170,
+       YFSREMOVEACL            = 64171,
+       YFSREMOVEFILE2          = 64173,
+       YFSSTOREOPAQUEACL2      = 64174,
+       YFSINLINEBULKSTATUS     = 64536, /* YFS Fetch multiple file statuses with errors */
+       YFSFETCHDATA64          = 64537, /* YFS Fetch file data */
+       YFSSTOREDATA64          = 64538, /* YFS Store file data */
+       YFSUPDATESYMLINK        = 64540,
+};
+
+struct yfs_xdr_u64 {
+       __be32                  msw;
+       __be32                  lsw;
+} __packed;
+
+static inline u64 xdr_to_u64(const struct yfs_xdr_u64 x)
+{
+       return ((u64)ntohl(x.msw) << 32) | ntohl(x.lsw);
+}
+
+static inline struct yfs_xdr_u64 u64_to_xdr(const u64 x)
+{
+       return (struct yfs_xdr_u64){ .msw = htonl(x >> 32), .lsw = htonl(x) };
+}
+
+struct yfs_xdr_vnode {
+       struct yfs_xdr_u64      lo;
+       __be32                  hi;
+       __be32                  unique;
+} __packed;
+
+struct yfs_xdr_YFSFid {
+       struct yfs_xdr_u64      volume;
+       struct yfs_xdr_vnode    vnode;
+} __packed;
+
+
+struct yfs_xdr_YFSFetchStatus {
+       __be32                  type;
+       __be32                  nlink;
+       struct yfs_xdr_u64      size;
+       struct yfs_xdr_u64      data_version;
+       struct yfs_xdr_u64      author;
+       struct yfs_xdr_u64      owner;
+       struct yfs_xdr_u64      group;
+       __be32                  mode;
+       __be32                  caller_access;
+       __be32                  anon_access;
+       struct yfs_xdr_vnode    parent;
+       __be32                  data_access_protocol;
+       struct yfs_xdr_u64      mtime_client;
+       struct yfs_xdr_u64      mtime_server;
+       __be32                  lock_count;
+       __be32                  abort_code;
+} __packed;
+
+struct yfs_xdr_YFSCallBack {
+       __be32                  version;
+       struct yfs_xdr_u64      expiration_time;
+       __be32                  type;
+} __packed;
+
+struct yfs_xdr_YFSStoreStatus {
+       __be32                  mask;
+       __be32                  mode;
+       struct yfs_xdr_u64      mtime_client;
+       struct yfs_xdr_u64      owner;
+       struct yfs_xdr_u64      group;
+} __packed;
+
+struct yfs_xdr_RPCFlags {
+       __be32                  rpc_flags;
+} __packed;
+
+struct yfs_xdr_YFSVolSync {
+       struct yfs_xdr_u64      vol_creation_date;
+       struct yfs_xdr_u64      vol_update_date;
+       struct yfs_xdr_u64      max_quota;
+       struct yfs_xdr_u64      blocks_in_use;
+       struct yfs_xdr_u64      blocks_avail;
+} __packed;
+
+enum yfs_volume_type {
+       yfs_volume_type_ro = 0,
+       yfs_volume_type_rw = 1,
+};
+
+#define yfs_FVSOnline          0x1
+#define yfs_FVSInservice       0x2
+#define yfs_FVSBlessed         0x4
+#define yfs_FVSNeedsSalvage    0x8
+
+struct yfs_xdr_YFSFetchVolumeStatus {
+       struct yfs_xdr_u64      vid;
+       struct yfs_xdr_u64      parent_id;
+       __be32                  flags;
+       __be32                  type;
+       struct yfs_xdr_u64      max_quota;
+       struct yfs_xdr_u64      blocks_in_use;
+       struct yfs_xdr_u64      part_blocks_avail;
+       struct yfs_xdr_u64      part_max_blocks;
+       struct yfs_xdr_u64      vol_copy_date;
+       struct yfs_xdr_u64      vol_backup_date;
+} __packed;
+
+struct yfs_xdr_YFSStoreVolumeStatus {
+       __be32                  mask;
+       struct yfs_xdr_u64      min_quota;
+       struct yfs_xdr_u64      max_quota;
+       struct yfs_xdr_u64      file_quota;
+} __packed;
index 1faef56b12bd3f9591b2acc29ce89c6689e224e6..c3ae324781f846b8122b6b0a80085efaafdcaaba 100644 (file)
 #include "internal.h"
 #include "afs_fs.h"
 
-/*
- * Initialise a filesystem server cursor for iterating over FS servers.
- */
-static void afs_init_fs_cursor(struct afs_fs_cursor *fc, struct afs_vnode *vnode)
-{
-       memset(fc, 0, sizeof(*fc));
-}
-
 /*
  * Begin an operation on the fileserver.
  *
@@ -35,13 +27,14 @@ static void afs_init_fs_cursor(struct afs_fs_cursor *fc, struct afs_vnode *vnode
 bool afs_begin_vnode_operation(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
                               struct key *key)
 {
-       afs_init_fs_cursor(fc, vnode);
+       memset(fc, 0, sizeof(*fc));
        fc->vnode = vnode;
        fc->key = key;
        fc->ac.error = SHRT_MAX;
+       fc->error = -EDESTADDRREQ;
 
        if (mutex_lock_interruptible(&vnode->io_lock) < 0) {
-               fc->ac.error = -EINTR;
+               fc->error = -EINTR;
                fc->flags |= AFS_FS_CURSOR_STOP;
                return false;
        }
@@ -65,12 +58,15 @@ static bool afs_start_fs_iteration(struct afs_fs_cursor *fc,
        fc->server_list = afs_get_serverlist(vnode->volume->servers);
        read_unlock(&vnode->volume->servers_lock);
 
+       fc->untried = (1UL << fc->server_list->nr_servers) - 1;
+       fc->index = READ_ONCE(fc->server_list->preferred);
+
        cbi = vnode->cb_interest;
        if (cbi) {
                /* See if the vnode's preferred record is still available */
                for (i = 0; i < fc->server_list->nr_servers; i++) {
                        if (fc->server_list->servers[i].cb_interest == cbi) {
-                               fc->start = i;
+                               fc->index = i;
                                goto found_interest;
                        }
                }
@@ -80,7 +76,7 @@ static bool afs_start_fs_iteration(struct afs_fs_cursor *fc,
                 * and have to return an error.
                 */
                if (fc->flags & AFS_FS_CURSOR_CUR_ONLY) {
-                       fc->ac.error = -ESTALE;
+                       fc->error = -ESTALE;
                        return false;
                }
 
@@ -94,12 +90,9 @@ static bool afs_start_fs_iteration(struct afs_fs_cursor *fc,
 
                afs_put_cb_interest(afs_v2net(vnode), cbi);
                cbi = NULL;
-       } else {
-               fc->start = READ_ONCE(fc->server_list->index);
        }
 
 found_interest:
-       fc->index = fc->start;
        return true;
 }
 
@@ -117,7 +110,7 @@ static void afs_busy(struct afs_volume *volume, u32 abort_code)
        default:                m = "busy";             break;
        }
 
-       pr_notice("kAFS: Volume %u '%s' is %s\n", volume->vid, volume->name, m);
+       pr_notice("kAFS: Volume %llu '%s' is %s\n", volume->vid, volume->name, m);
 }
 
 /*
@@ -127,7 +120,7 @@ static bool afs_sleep_and_retry(struct afs_fs_cursor *fc)
 {
        msleep_interruptible(1000);
        if (signal_pending(current)) {
-               fc->ac.error = -ERESTARTSYS;
+               fc->error = -ERESTARTSYS;
                return false;
        }
 
@@ -143,27 +136,33 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
        struct afs_addr_list *alist;
        struct afs_server *server;
        struct afs_vnode *vnode = fc->vnode;
+       struct afs_error e;
+       u32 rtt;
+       int error = fc->ac.error, i;
 
-       _enter("%u/%u,%u/%u,%d,%d",
-              fc->index, fc->start,
-              fc->ac.index, fc->ac.start,
-              fc->ac.error, fc->ac.abort_code);
+       _enter("%lx[%d],%lx[%d],%d,%d",
+              fc->untried, fc->index,
+              fc->ac.tried, fc->ac.index,
+              error, fc->ac.abort_code);
 
        if (fc->flags & AFS_FS_CURSOR_STOP) {
                _leave(" = f [stopped]");
                return false;
        }
 
+       fc->nr_iterations++;
+
        /* Evaluate the result of the previous operation, if there was one. */
-       switch (fc->ac.error) {
+       switch (error) {
        case SHRT_MAX:
                goto start;
 
        case 0:
        default:
                /* Success or local failure.  Stop. */
+               fc->error = error;
                fc->flags |= AFS_FS_CURSOR_STOP;
-               _leave(" = f [okay/local %d]", fc->ac.error);
+               _leave(" = f [okay/local %d]", error);
                return false;
 
        case -ECONNABORTED:
@@ -178,7 +177,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
                         * - May indicate that the fileserver couldn't attach to the vol.
                         */
                        if (fc->flags & AFS_FS_CURSOR_VNOVOL) {
-                               fc->ac.error = -EREMOTEIO;
+                               fc->error = -EREMOTEIO;
                                goto next_server;
                        }
 
@@ -187,12 +186,12 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
                        write_unlock(&vnode->volume->servers_lock);
 
                        set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags);
-                       fc->ac.error = afs_check_volume_status(vnode->volume, fc->key);
-                       if (fc->ac.error < 0)
-                               goto failed;
+                       error = afs_check_volume_status(vnode->volume, fc->key);
+                       if (error < 0)
+                               goto failed_set_error;
 
                        if (test_bit(AFS_VOLUME_DELETED, &vnode->volume->flags)) {
-                               fc->ac.error = -ENOMEDIUM;
+                               fc->error = -ENOMEDIUM;
                                goto failed;
                        }
 
@@ -200,7 +199,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
                         * it's the fileserver having trouble.
                         */
                        if (vnode->volume->servers == fc->server_list) {
-                               fc->ac.error = -EREMOTEIO;
+                               fc->error = -EREMOTEIO;
                                goto next_server;
                        }
 
@@ -215,7 +214,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
                case VONLINE:
                case VDISKFULL:
                case VOVERQUOTA:
-                       fc->ac.error = afs_abort_to_error(fc->ac.abort_code);
+                       fc->error = afs_abort_to_error(fc->ac.abort_code);
                        goto next_server;
 
                case VOFFLINE:
@@ -224,11 +223,11 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
                                clear_bit(AFS_VOLUME_BUSY, &vnode->volume->flags);
                        }
                        if (fc->flags & AFS_FS_CURSOR_NO_VSLEEP) {
-                               fc->ac.error = -EADV;
+                               fc->error = -EADV;
                                goto failed;
                        }
                        if (fc->flags & AFS_FS_CURSOR_CUR_ONLY) {
-                               fc->ac.error = -ESTALE;
+                               fc->error = -ESTALE;
                                goto failed;
                        }
                        goto busy;
@@ -240,7 +239,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
                         * have a file lock we need to maintain.
                         */
                        if (fc->flags & AFS_FS_CURSOR_NO_VSLEEP) {
-                               fc->ac.error = -EBUSY;
+                               fc->error = -EBUSY;
                                goto failed;
                        }
                        if (!test_and_set_bit(AFS_VOLUME_BUSY, &vnode->volume->flags)) {
@@ -269,16 +268,16 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
                         * honour, just in case someone sets up a loop.
                         */
                        if (fc->flags & AFS_FS_CURSOR_VMOVED) {
-                               fc->ac.error = -EREMOTEIO;
+                               fc->error = -EREMOTEIO;
                                goto failed;
                        }
                        fc->flags |= AFS_FS_CURSOR_VMOVED;
 
                        set_bit(AFS_VOLUME_WAIT, &vnode->volume->flags);
                        set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags);
-                       fc->ac.error = afs_check_volume_status(vnode->volume, fc->key);
-                       if (fc->ac.error < 0)
-                               goto failed;
+                       error = afs_check_volume_status(vnode->volume, fc->key);
+                       if (error < 0)
+                               goto failed_set_error;
 
                        /* If the server list didn't change, then the VLDB is
                         * out of sync with the fileservers.  This is hopefully
@@ -290,7 +289,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
                         * TODO: Retry a few times with sleeps.
                         */
                        if (vnode->volume->servers == fc->server_list) {
-                               fc->ac.error = -ENOMEDIUM;
+                               fc->error = -ENOMEDIUM;
                                goto failed;
                        }
 
@@ -299,20 +298,28 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
                default:
                        clear_bit(AFS_VOLUME_OFFLINE, &vnode->volume->flags);
                        clear_bit(AFS_VOLUME_BUSY, &vnode->volume->flags);
-                       fc->ac.error = afs_abort_to_error(fc->ac.abort_code);
+                       fc->error = afs_abort_to_error(fc->ac.abort_code);
                        goto failed;
                }
 
+       case -ETIMEDOUT:
+       case -ETIME:
+               if (fc->error != -EDESTADDRREQ)
+                       goto iterate_address;
+               /* Fall through */
+       case -ERFKILL:
+       case -EADDRNOTAVAIL:
        case -ENETUNREACH:
        case -EHOSTUNREACH:
+       case -EHOSTDOWN:
        case -ECONNREFUSED:
-       case -ETIMEDOUT:
-       case -ETIME:
                _debug("no conn");
+               fc->error = error;
                goto iterate_address;
 
        case -ECONNRESET:
                _debug("call reset");
+               fc->error = error;
                goto failed;
        }
 
@@ -328,15 +335,57 @@ start:
        /* See if we need to do an update of the volume record.  Note that the
         * volume may have moved or even have been deleted.
         */
-       fc->ac.error = afs_check_volume_status(vnode->volume, fc->key);
-       if (fc->ac.error < 0)
-               goto failed;
+       error = afs_check_volume_status(vnode->volume, fc->key);
+       if (error < 0)
+               goto failed_set_error;
 
        if (!afs_start_fs_iteration(fc, vnode))
                goto failed;
 
-use_server:
-       _debug("use");
+       _debug("__ VOL %llx __", vnode->volume->vid);
+       error = afs_probe_fileservers(afs_v2net(vnode), fc->key, fc->server_list);
+       if (error < 0)
+               goto failed_set_error;
+
+pick_server:
+       _debug("pick [%lx]", fc->untried);
+
+       error = afs_wait_for_fs_probes(fc->server_list, fc->untried);
+       if (error < 0)
+               goto failed_set_error;
+
+       /* Pick the untried server with the lowest RTT.  If we have outstanding
+        * callbacks, we stick with the server we're already using if we can.
+        */
+       if (fc->cbi) {
+               _debug("cbi %u", fc->index);
+               if (test_bit(fc->index, &fc->untried))
+                       goto selected_server;
+               afs_put_cb_interest(afs_v2net(vnode), fc->cbi);
+               fc->cbi = NULL;
+               _debug("nocbi");
+       }
+
+       fc->index = -1;
+       rtt = U32_MAX;
+       for (i = 0; i < fc->server_list->nr_servers; i++) {
+               struct afs_server *s = fc->server_list->servers[i].server;
+
+               if (!test_bit(i, &fc->untried) || !s->probe.responded)
+                       continue;
+               if (s->probe.rtt < rtt) {
+                       fc->index = i;
+                       rtt = s->probe.rtt;
+               }
+       }
+
+       if (fc->index == -1)
+               goto no_more_servers;
+
+selected_server:
+       _debug("use %d", fc->index);
+       __clear_bit(fc->index, &fc->untried);
+
        /* We're starting on a different fileserver from the list.  We need to
         * check it, create a callback intercept, find its address list and
         * probe its capabilities before we use it.
@@ -354,10 +403,10 @@ use_server:
         * break request before we've finished decoding the reply and
         * installing the vnode.
         */
-       fc->ac.error = afs_register_server_cb_interest(vnode, fc->server_list,
-                                                      fc->index);
-       if (fc->ac.error < 0)
-               goto failed;
+       error = afs_register_server_cb_interest(vnode, fc->server_list,
+                                               fc->index);
+       if (error < 0)
+               goto failed_set_error;
 
        fc->cbi = afs_get_cb_interest(vnode->cb_interest);
 
@@ -369,66 +418,53 @@ use_server:
 
        memset(&fc->ac, 0, sizeof(fc->ac));
 
-       /* Probe the current fileserver if we haven't done so yet. */
-       if (!test_bit(AFS_SERVER_FL_PROBED, &server->flags)) {
-               fc->ac.alist = afs_get_addrlist(alist);
-
-               if (!afs_probe_fileserver(fc)) {
-                       switch (fc->ac.error) {
-                       case -ENOMEM:
-                       case -ERESTARTSYS:
-                       case -EINTR:
-                               goto failed;
-                       default:
-                               goto next_server;
-                       }
-               }
-       }
-
        if (!fc->ac.alist)
                fc->ac.alist = alist;
        else
                afs_put_addrlist(alist);
 
-       fc->ac.start = READ_ONCE(alist->index);
-       fc->ac.index = fc->ac.start;
+       fc->ac.index = -1;
 
 iterate_address:
        ASSERT(fc->ac.alist);
-       _debug("iterate %d/%d", fc->ac.index, fc->ac.alist->nr_addrs);
        /* Iterate over the current server's address list to try and find an
         * address on which it will respond to us.
         */
        if (!afs_iterate_addresses(&fc->ac))
                goto next_server;
 
+       _debug("address [%u] %u/%u", fc->index, fc->ac.index, fc->ac.alist->nr_addrs);
+
        _leave(" = t");
        return true;
 
 next_server:
        _debug("next");
        afs_end_cursor(&fc->ac);
-       afs_put_cb_interest(afs_v2net(vnode), fc->cbi);
-       fc->cbi = NULL;
-       fc->index++;
-       if (fc->index >= fc->server_list->nr_servers)
-               fc->index = 0;
-       if (fc->index != fc->start)
-               goto use_server;
+       goto pick_server;
 
+no_more_servers:
        /* That's all the servers poked to no good effect.  Try again if some
         * of them were busy.
         */
        if (fc->flags & AFS_FS_CURSOR_VBUSY)
                goto restart_from_beginning;
 
-       fc->ac.error = -EDESTADDRREQ;
-       goto failed;
+       e.error = -EDESTADDRREQ;
+       e.responded = false;
+       for (i = 0; i < fc->server_list->nr_servers; i++) {
+               struct afs_server *s = fc->server_list->servers[i].server;
+
+               afs_prioritise_error(&e, READ_ONCE(s->probe.error),
+                                    s->probe.abort_code);
+       }
 
+failed_set_error:
+       fc->error = error;
 failed:
        fc->flags |= AFS_FS_CURSOR_STOP;
        afs_end_cursor(&fc->ac);
-       _leave(" = f [failed %d]", fc->ac.error);
+       _leave(" = f [failed %d]", fc->error);
        return false;
 }
 
@@ -442,13 +478,14 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
        struct afs_vnode *vnode = fc->vnode;
        struct afs_cb_interest *cbi = vnode->cb_interest;
        struct afs_addr_list *alist;
+       int error = fc->ac.error;
 
        _enter("");
 
-       switch (fc->ac.error) {
+       switch (error) {
        case SHRT_MAX:
                if (!cbi) {
-                       fc->ac.error = -ESTALE;
+                       fc->error = -ESTALE;
                        fc->flags |= AFS_FS_CURSOR_STOP;
                        return false;
                }
@@ -461,35 +498,40 @@ bool afs_select_current_fileserver(struct afs_fs_cursor *fc)
                afs_get_addrlist(alist);
                read_unlock(&cbi->server->fs_lock);
                if (!alist) {
-                       fc->ac.error = -ESTALE;
+                       fc->error = -ESTALE;
                        fc->flags |= AFS_FS_CURSOR_STOP;
                        return false;
                }
 
                memset(&fc->ac, 0, sizeof(fc->ac));
                fc->ac.alist = alist;
-               fc->ac.start = READ_ONCE(alist->index);
-               fc->ac.index = fc->ac.start;
+               fc->ac.index = -1;
                goto iterate_address;
 
        case 0:
        default:
                /* Success or local failure.  Stop. */
+               fc->error = error;
                fc->flags |= AFS_FS_CURSOR_STOP;
-               _leave(" = f [okay/local %d]", fc->ac.error);
+               _leave(" = f [okay/local %d]", error);
                return false;
 
        case -ECONNABORTED:
+               fc->error = afs_abort_to_error(fc->ac.abort_code);
                fc->flags |= AFS_FS_CURSOR_STOP;
                _leave(" = f [abort]");
                return false;
 
+       case -ERFKILL:
+       case -EADDRNOTAVAIL:
        case -ENETUNREACH:
        case -EHOSTUNREACH:
+       case -EHOSTDOWN:
        case -ECONNREFUSED:
        case -ETIMEDOUT:
        case -ETIME:
                _debug("no conn");
+               fc->error = error;
                goto iterate_address;
        }
 
@@ -506,13 +548,67 @@ iterate_address:
        return false;
 }
 
+/*
+ * Dump cursor state in the case of the error being EDESTADDRREQ.
+ */
+static void afs_dump_edestaddrreq(const struct afs_fs_cursor *fc)
+{
+       static int count;
+       int i;
+
+       if (!IS_ENABLED(CONFIG_AFS_DEBUG_CURSOR) || count > 3)
+               return;
+       count++;
+
+       rcu_read_lock();
+
+       pr_notice("EDESTADDR occurred\n");
+       pr_notice("FC: cbb=%x cbb2=%x fl=%hx err=%hd\n",
+                 fc->cb_break, fc->cb_break_2, fc->flags, fc->error);
+       pr_notice("FC: ut=%lx ix=%d ni=%u\n",
+                 fc->untried, fc->index, fc->nr_iterations);
+
+       if (fc->server_list) {
+               const struct afs_server_list *sl = fc->server_list;
+               pr_notice("FC: SL nr=%u pr=%u vnov=%hx\n",
+                         sl->nr_servers, sl->preferred, sl->vnovol_mask);
+               for (i = 0; i < sl->nr_servers; i++) {
+                       const struct afs_server *s = sl->servers[i].server;
+                       pr_notice("FC: server fl=%lx av=%u %pU\n",
+                                 s->flags, s->addr_version, &s->uuid);
+                       if (s->addresses) {
+                               const struct afs_addr_list *a =
+                                       rcu_dereference(s->addresses);
+                               pr_notice("FC:  - av=%u nr=%u/%u/%u pr=%u\n",
+                                         a->version,
+                                         a->nr_ipv4, a->nr_addrs, a->max_addrs,
+                                         a->preferred);
+                               pr_notice("FC:  - pr=%lx R=%lx F=%lx\n",
+                                         a->probed, a->responded, a->failed);
+                               if (a == fc->ac.alist)
+                                       pr_notice("FC:  - current\n");
+                       }
+               }
+       }
+
+       pr_notice("AC: t=%lx ax=%u ac=%d er=%d r=%u ni=%u\n",
+                 fc->ac.tried, fc->ac.index, fc->ac.abort_code, fc->ac.error,
+                 fc->ac.responded, fc->ac.nr_iterations);
+       rcu_read_unlock();
+}
+
 /*
  * Tidy up a filesystem cursor and unlock the vnode.
  */
 int afs_end_vnode_operation(struct afs_fs_cursor *fc)
 {
        struct afs_net *net = afs_v2net(fc->vnode);
-       int ret;
+
+       if (fc->error == -EDESTADDRREQ ||
+           fc->error == -EADDRNOTAVAIL ||
+           fc->error == -ENETUNREACH ||
+           fc->error == -EHOSTUNREACH)
+               afs_dump_edestaddrreq(fc);
 
        mutex_unlock(&fc->vnode->io_lock);
 
@@ -520,9 +616,8 @@ int afs_end_vnode_operation(struct afs_fs_cursor *fc)
        afs_put_cb_interest(net, fc->cbi);
        afs_put_serverlist(net, fc->server_list);
 
-       ret = fc->ac.error;
-       if (ret == -ECONNABORTED)
-               afs_abort_to_error(fc->ac.abort_code);
+       if (fc->error == -ECONNABORTED)
+               fc->error = afs_abort_to_error(fc->ac.abort_code);
 
-       return fc->ac.error;
+       return fc->error;
 }
index 77a83790a31f38c9e25ffeaa1c190eb8958e7fa9..a7b44863d502e95cbb28a1f7ed2f2a17d7ba1043 100644 (file)
@@ -16,6 +16,7 @@
 #include <net/af_rxrpc.h>
 #include "internal.h"
 #include "afs_cm.h"
+#include "protocol_yfs.h"
 
 struct workqueue_struct *afs_async_calls;
 
@@ -75,6 +76,18 @@ int afs_open_socket(struct afs_net *net)
        if (ret < 0)
                goto error_2;
 
+       srx.srx_service = YFS_CM_SERVICE;
+       ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
+       if (ret < 0)
+               goto error_2;
+
+       /* Ideally, we'd turn on service upgrade here, but we can't because
+        * OpenAFS is buggy and leaks the userStatus field from packet to
+        * packet and between FS packets and CB packets - so if we try to do an
+        * upgrade on an FS packet, OpenAFS will leak that into the CB packet
+        * it sends back to us.
+        */
+
        rxrpc_kernel_new_call_notification(socket, afs_rx_new_call,
                                           afs_rx_discard_new_call);
 
@@ -143,6 +156,7 @@ static struct afs_call *afs_alloc_call(struct afs_net *net,
        INIT_WORK(&call->async_work, afs_process_async_call);
        init_waitqueue_head(&call->waitq);
        spin_lock_init(&call->state_lock);
+       call->_iter = &call->iter;
 
        o = atomic_inc_return(&net->nr_outstanding_calls);
        trace_afs_call(call, afs_call_trace_alloc, 1, o,
@@ -176,6 +190,7 @@ void afs_put_call(struct afs_call *call)
 
                afs_put_server(call->net, call->cm_server);
                afs_put_cb_interest(call->net, call->cbi);
+               afs_put_addrlist(call->alist);
                kfree(call->request);
 
                trace_afs_call(call, afs_call_trace_free, 0, o,
@@ -189,21 +204,22 @@ void afs_put_call(struct afs_call *call)
 }
 
 /*
- * Queue the call for actual work.  Returns 0 unconditionally for convenience.
+ * Queue the call for actual work.
  */
-int afs_queue_call_work(struct afs_call *call)
+static void afs_queue_call_work(struct afs_call *call)
 {
-       int u = atomic_inc_return(&call->usage);
+       if (call->type->work) {
+               int u = atomic_inc_return(&call->usage);
 
-       trace_afs_call(call, afs_call_trace_work, u,
-                      atomic_read(&call->net->nr_outstanding_calls),
-                      __builtin_return_address(0));
+               trace_afs_call(call, afs_call_trace_work, u,
+                              atomic_read(&call->net->nr_outstanding_calls),
+                              __builtin_return_address(0));
 
-       INIT_WORK(&call->work, call->type->work);
+               INIT_WORK(&call->work, call->type->work);
 
-       if (!queue_work(afs_wq, &call->work))
-               afs_put_call(call);
-       return 0;
+               if (!queue_work(afs_wq, &call->work))
+                       afs_put_call(call);
+       }
 }
 
 /*
@@ -233,6 +249,7 @@ struct afs_call *afs_alloc_flat_call(struct afs_net *net,
                        goto nomem_free;
        }
 
+       afs_extract_to_buf(call, call->reply_max);
        call->operation_ID = type->op;
        init_waitqueue_head(&call->waitq);
        return call;
@@ -286,7 +303,7 @@ static void afs_load_bvec(struct afs_call *call, struct msghdr *msg,
                offset = 0;
        }
 
-       iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, bv, nr, bytes);
+       iov_iter_bvec(&msg->msg_iter, WRITE, bv, nr, bytes);
 }
 
 /*
@@ -342,7 +359,7 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
 long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
                   gfp_t gfp, bool async)
 {
-       struct sockaddr_rxrpc *srx = ac->addr;
+       struct sockaddr_rxrpc *srx = &ac->alist->addrs[ac->index];
        struct rxrpc_call *rxcall;
        struct msghdr msg;
        struct kvec iov[1];
@@ -359,6 +376,8 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
               atomic_read(&call->net->nr_outstanding_calls));
 
        call->async = async;
+       call->addr_ix = ac->index;
+       call->alist = afs_get_addrlist(ac->alist);
 
        /* Work out the length we're going to transmit.  This is awkward for
         * calls such as FS.StoreData where there's an extra injection of data
@@ -390,6 +409,7 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
                                         call->debug_id);
        if (IS_ERR(rxcall)) {
                ret = PTR_ERR(rxcall);
+               call->error = ret;
                goto error_kill_call;
        }
 
@@ -401,8 +421,7 @@ long afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call,
 
        msg.msg_name            = NULL;
        msg.msg_namelen         = 0;
-       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1,
-                     call->request_size);
+       iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, call->request_size);
        msg.msg_control         = NULL;
        msg.msg_controllen      = 0;
        msg.msg_flags           = MSG_WAITALL | (call->send_pages ? MSG_MORE : 0);
@@ -432,7 +451,7 @@ error_do_abort:
                rxrpc_kernel_abort_call(call->net->socket, rxcall,
                                        RX_USER_ABORT, ret, "KSD");
        } else {
-               iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, NULL, 0, 0);
+               iov_iter_kvec(&msg.msg_iter, READ, NULL, 0, 0);
                rxrpc_kernel_recv_data(call->net->socket, rxcall,
                                       &msg.msg_iter, false,
                                       &call->abort_code, &call->service_id);
@@ -442,6 +461,8 @@ error_do_abort:
        call->error = ret;
        trace_afs_call_done(call);
 error_kill_call:
+       if (call->type->done)
+               call->type->done(call);
        afs_put_call(call);
        ac->error = ret;
        _leave(" = %d", ret);
@@ -466,14 +487,12 @@ static void afs_deliver_to_call(struct afs_call *call)
               state == AFS_CALL_SV_AWAIT_ACK
               ) {
                if (state == AFS_CALL_SV_AWAIT_ACK) {
-                       struct iov_iter iter;
-
-                       iov_iter_kvec(&iter, READ | ITER_KVEC, NULL, 0, 0);
+                       iov_iter_kvec(&call->iter, READ, NULL, 0, 0);
                        ret = rxrpc_kernel_recv_data(call->net->socket,
-                                                    call->rxcall, &iter, false,
-                                                    &remote_abort,
+                                                    call->rxcall, &call->iter,
+                                                    false, &remote_abort,
                                                     &call->service_id);
-                       trace_afs_recv_data(call, 0, 0, false, ret);
+                       trace_afs_receive_data(call, &call->iter, false, ret);
 
                        if (ret == -EINPROGRESS || ret == -EAGAIN)
                                return;
@@ -485,10 +504,17 @@ static void afs_deliver_to_call(struct afs_call *call)
                        return;
                }
 
+               if (call->want_reply_time &&
+                   rxrpc_kernel_get_reply_time(call->net->socket,
+                                               call->rxcall,
+                                               &call->reply_time))
+                       call->want_reply_time = false;
+
                ret = call->type->deliver(call);
                state = READ_ONCE(call->state);
                switch (ret) {
                case 0:
+                       afs_queue_call_work(call);
                        if (state == AFS_CALL_CL_PROC_REPLY) {
                                if (call->cbi)
                                        set_bit(AFS_SERVER_FL_MAY_HAVE_CB,
@@ -500,7 +526,6 @@ static void afs_deliver_to_call(struct afs_call *call)
                case -EINPROGRESS:
                case -EAGAIN:
                        goto out;
-               case -EIO:
                case -ECONNABORTED:
                        ASSERTCMP(state, ==, AFS_CALL_COMPLETE);
                        goto done;
@@ -509,6 +534,10 @@ static void afs_deliver_to_call(struct afs_call *call)
                        rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
                                                abort_code, ret, "KIV");
                        goto local_abort;
+               case -EIO:
+                       pr_err("kAFS: Call %u in bad state %u\n",
+                              call->debug_id, state);
+                       /* Fall through */
                case -ENODATA:
                case -EBADMSG:
                case -EMSGSIZE:
@@ -517,12 +546,14 @@ static void afs_deliver_to_call(struct afs_call *call)
                        if (state != AFS_CALL_CL_AWAIT_REPLY)
                                abort_code = RXGEN_SS_UNMARSHAL;
                        rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                               abort_code, -EBADMSG, "KUM");
+                                               abort_code, ret, "KUM");
                        goto local_abort;
                }
        }
 
 done:
+       if (call->type->done)
+               call->type->done(call);
        if (state == AFS_CALL_COMPLETE && call->incoming)
                afs_put_call(call);
 out:
@@ -545,6 +576,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
 {
        signed long rtt2, timeout;
        long ret;
+       bool stalled = false;
        u64 rtt;
        u32 life, last_life;
 
@@ -578,12 +610,20 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
 
                life = rxrpc_kernel_check_life(call->net->socket, call->rxcall);
                if (timeout == 0 &&
-                   life == last_life && signal_pending(current))
+                   life == last_life && signal_pending(current)) {
+                       if (stalled)
                                break;
+                       __set_current_state(TASK_RUNNING);
+                       rxrpc_kernel_probe_life(call->net->socket, call->rxcall);
+                       timeout = rtt2;
+                       stalled = true;
+                       continue;
+               }
 
                if (life != last_life) {
                        timeout = rtt2;
                        last_life = life;
+                       stalled = false;
                }
 
                timeout = schedule_timeout(timeout);
@@ -728,6 +768,7 @@ void afs_charge_preallocation(struct work_struct *work)
                        call->async = true;
                        call->state = AFS_CALL_SV_AWAIT_OP_ID;
                        init_waitqueue_head(&call->waitq);
+                       afs_extract_to_tmp(call);
                }
 
                if (rxrpc_kernel_charge_accept(net->socket,
@@ -773,18 +814,15 @@ static int afs_deliver_cm_op_id(struct afs_call *call)
 {
        int ret;
 
-       _enter("{%zu}", call->offset);
-
-       ASSERTCMP(call->offset, <, 4);
+       _enter("{%zu}", iov_iter_count(call->_iter));
 
        /* the operation ID forms the first four bytes of the request data */
-       ret = afs_extract_data(call, &call->tmp, 4, true);
+       ret = afs_extract_data(call, true);
        if (ret < 0)
                return ret;
 
        call->operation_ID = ntohl(call->tmp);
        afs_set_call_state(call, AFS_CALL_SV_AWAIT_OP_ID, AFS_CALL_SV_AWAIT_REQUEST);
-       call->offset = 0;
 
        /* ask the cache manager to route the call (it'll change the call type
         * if successful) */
@@ -825,7 +863,7 @@ void afs_send_empty_reply(struct afs_call *call)
 
        msg.msg_name            = NULL;
        msg.msg_namelen         = 0;
-       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
+       iov_iter_kvec(&msg.msg_iter, WRITE, NULL, 0, 0);
        msg.msg_control         = NULL;
        msg.msg_controllen      = 0;
        msg.msg_flags           = 0;
@@ -864,7 +902,7 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
        iov[0].iov_len          = len;
        msg.msg_name            = NULL;
        msg.msg_namelen         = 0;
-       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iov, 1, len);
+       iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, len);
        msg.msg_control         = NULL;
        msg.msg_controllen      = 0;
        msg.msg_flags           = 0;
@@ -888,30 +926,19 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
 /*
  * Extract a piece of data from the received data socket buffers.
  */
-int afs_extract_data(struct afs_call *call, void *buf, size_t count,
-                    bool want_more)
+int afs_extract_data(struct afs_call *call, bool want_more)
 {
        struct afs_net *net = call->net;
-       struct iov_iter iter;
-       struct kvec iov;
+       struct iov_iter *iter = call->_iter;
        enum afs_call_state state;
        u32 remote_abort = 0;
        int ret;
 
-       _enter("{%s,%zu},,%zu,%d",
-              call->type->name, call->offset, count, want_more);
-
-       ASSERTCMP(call->offset, <=, count);
-
-       iov.iov_base = buf + call->offset;
-       iov.iov_len = count - call->offset;
-       iov_iter_kvec(&iter, ITER_KVEC | READ, &iov, 1, count - call->offset);
+       _enter("{%s,%zu},%d", call->type->name, iov_iter_count(iter), want_more);
 
-       ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, &iter,
+       ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, iter,
                                     want_more, &remote_abort,
                                     &call->service_id);
-       call->offset += (count - call->offset) - iov_iter_count(&iter);
-       trace_afs_recv_data(call, count, call->offset, want_more, ret);
        if (ret == 0 || ret == -EAGAIN)
                return ret;
 
@@ -926,7 +953,7 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count,
                        break;
                case AFS_CALL_COMPLETE:
                        kdebug("prem complete %d", call->error);
-                       return -EIO;
+                       return afs_io_error(call, afs_io_error_extract);
                default:
                        break;
                }
@@ -940,8 +967,9 @@ int afs_extract_data(struct afs_call *call, void *buf, size_t count,
 /*
  * Log protocol error production.
  */
-noinline int afs_protocol_error(struct afs_call *call, int error)
+noinline int afs_protocol_error(struct afs_call *call, int error,
+                               enum afs_eproto_cause cause)
 {
-       trace_afs_protocol_error(call, error, __builtin_return_address(0));
+       trace_afs_protocol_error(call, error, cause);
        return error;
 }
index 81dfedb7879ff9bf56ab4fcca26ef6c90d835de2..5f58a9a17e694a09dbe0d0b70d9dbc0cc9833aa4 100644 (file)
@@ -126,7 +126,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
        bool changed = false;
        int i, j;
 
-       _enter("{%x:%u},%x,%x",
+       _enter("{%llx:%llu},%x,%x",
               vnode->fid.vid, vnode->fid.vnode, key_serial(key), caller_access);
 
        rcu_read_lock();
@@ -147,7 +147,8 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
                                        break;
                                }
 
-                               if (cb_break != afs_cb_break_sum(vnode, vnode->cb_interest)) {
+                               if (afs_cb_is_broken(cb_break, vnode,
+                                                    vnode->cb_interest)) {
                                        changed = true;
                                        break;
                                }
@@ -177,7 +178,7 @@ void afs_cache_permit(struct afs_vnode *vnode, struct key *key,
                }
        }
 
-       if (cb_break != afs_cb_break_sum(vnode, vnode->cb_interest))
+       if (afs_cb_is_broken(cb_break, vnode, vnode->cb_interest))
                goto someone_else_changed_it;
 
        /* We need a ref on any permits list we want to copy as we'll have to
@@ -256,7 +257,7 @@ found:
 
        spin_lock(&vnode->lock);
        zap = rcu_access_pointer(vnode->permit_cache);
-       if (cb_break == afs_cb_break_sum(vnode, vnode->cb_interest) &&
+       if (!afs_cb_is_broken(cb_break, vnode, vnode->cb_interest) &&
            zap == permits)
                rcu_assign_pointer(vnode->permit_cache, replacement);
        else
@@ -289,7 +290,7 @@ int afs_check_permit(struct afs_vnode *vnode, struct key *key,
        bool valid = false;
        int i, ret;
 
-       _enter("{%x:%u},%x",
+       _enter("{%llx:%llu},%x",
               vnode->fid.vid, vnode->fid.vnode, key_serial(key));
 
        /* check the permits to see if we've got one yet */
@@ -349,7 +350,7 @@ int afs_permission(struct inode *inode, int mask)
        if (mask & MAY_NOT_BLOCK)
                return -ECHILD;
 
-       _enter("{{%x:%u},%lx},%x,",
+       _enter("{{%llx:%llu},%lx},%x,",
               vnode->fid.vid, vnode->fid.vnode, vnode->flags, mask);
 
        key = afs_request_key(vnode->volume->cell);
index 1d329e6981d515c06bb5b711a1e3880226c2cce8..642afa2e9783c4f95284980dd8054610fa4d49cf 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/slab.h>
 #include "afs_fs.h"
 #include "internal.h"
+#include "protocol_yfs.h"
 
 static unsigned afs_server_gc_delay = 10;      /* Server record timeout in seconds */
 static unsigned afs_server_update_delay = 30;  /* Time till VLDB recheck in secs */
@@ -230,6 +231,8 @@ static struct afs_server *afs_alloc_server(struct afs_net *net,
        rwlock_init(&server->fs_lock);
        INIT_HLIST_HEAD(&server->cb_volumes);
        rwlock_init(&server->cb_break_lock);
+       init_waitqueue_head(&server->probe_wq);
+       spin_lock_init(&server->probe_lock);
 
        afs_inc_servers_outstanding(net);
        _leave(" = %p", server);
@@ -246,41 +249,23 @@ enomem:
 static struct afs_addr_list *afs_vl_lookup_addrs(struct afs_cell *cell,
                                                 struct key *key, const uuid_t *uuid)
 {
-       struct afs_addr_cursor ac;
-       struct afs_addr_list *alist;
+       struct afs_vl_cursor vc;
+       struct afs_addr_list *alist = NULL;
        int ret;
 
-       ret = afs_set_vl_cursor(&ac, cell);
-       if (ret < 0)
-               return ERR_PTR(ret);
-
-       while (afs_iterate_addresses(&ac)) {
-               if (test_bit(ac.index, &ac.alist->yfs))
-                       alist = afs_yfsvl_get_endpoints(cell->net, &ac, key, uuid);
-               else
-                       alist = afs_vl_get_addrs_u(cell->net, &ac, key, uuid);
-               switch (ac.error) {
-               case 0:
-                       afs_end_cursor(&ac);
-                       return alist;
-               case -ECONNABORTED:
-                       ac.error = afs_abort_to_error(ac.abort_code);
-                       goto error;
-               case -ENOMEM:
-               case -ENONET:
-                       goto error;
-               case -ENETUNREACH:
-               case -EHOSTUNREACH:
-               case -ECONNREFUSED:
-                       break;
-               default:
-                       ac.error = -EIO;
-                       goto error;
+       ret = -ERESTARTSYS;
+       if (afs_begin_vlserver_operation(&vc, cell, key)) {
+               while (afs_select_vlserver(&vc)) {
+                       if (test_bit(AFS_VLSERVER_FL_IS_YFS, &vc.server->flags))
+                               alist = afs_yfsvl_get_endpoints(&vc, uuid);
+                       else
+                               alist = afs_vl_get_addrs_u(&vc, uuid);
                }
+
+               ret = afs_end_vlserver_operation(&vc);
        }
 
-error:
-       return ERR_PTR(afs_end_cursor(&ac));
+       return ret < 0 ? ERR_PTR(ret) : alist;
 }
 
 /*
@@ -382,9 +367,7 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
        struct afs_addr_list *alist = rcu_access_pointer(server->addresses);
        struct afs_addr_cursor ac = {
                .alist  = alist,
-               .start  = alist->index,
-               .index  = 0,
-               .addr   = &alist->addrs[alist->index],
+               .index  = alist->preferred,
                .error  = 0,
        };
        _enter("%p", server);
@@ -392,6 +375,9 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
        if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
                afs_fs_give_up_all_callbacks(net, server, &ac, NULL);
 
+       wait_var_event(&server->probe_outstanding,
+                      atomic_read(&server->probe_outstanding) == 0);
+
        call_rcu(&server->rcu, afs_server_rcu);
        afs_dec_servers_outstanding(net);
 }
@@ -524,99 +510,6 @@ void afs_purge_servers(struct afs_net *net)
        _leave("");
 }
 
-/*
- * Probe a fileserver to find its capabilities.
- *
- * TODO: Try service upgrade.
- */
-static bool afs_do_probe_fileserver(struct afs_fs_cursor *fc)
-{
-       _enter("");
-
-       fc->ac.addr = NULL;
-       fc->ac.start = READ_ONCE(fc->ac.alist->index);
-       fc->ac.index = fc->ac.start;
-       fc->ac.error = 0;
-       fc->ac.begun = false;
-
-       while (afs_iterate_addresses(&fc->ac)) {
-               afs_fs_get_capabilities(afs_v2net(fc->vnode), fc->cbi->server,
-                                       &fc->ac, fc->key);
-               switch (fc->ac.error) {
-               case 0:
-                       afs_end_cursor(&fc->ac);
-                       set_bit(AFS_SERVER_FL_PROBED, &fc->cbi->server->flags);
-                       return true;
-               case -ECONNABORTED:
-                       fc->ac.error = afs_abort_to_error(fc->ac.abort_code);
-                       goto error;
-               case -ENOMEM:
-               case -ENONET:
-                       goto error;
-               case -ENETUNREACH:
-               case -EHOSTUNREACH:
-               case -ECONNREFUSED:
-               case -ETIMEDOUT:
-               case -ETIME:
-                       break;
-               default:
-                       fc->ac.error = -EIO;
-                       goto error;
-               }
-       }
-
-error:
-       afs_end_cursor(&fc->ac);
-       return false;
-}
-
-/*
- * If we haven't already, try probing the fileserver to get its capabilities.
- * We try not to instigate parallel probes, but it's possible that the parallel
- * probes will fail due to authentication failure when ours would succeed.
- *
- * TODO: Try sending an anonymous probe if an authenticated probe fails.
- */
-bool afs_probe_fileserver(struct afs_fs_cursor *fc)
-{
-       bool success;
-       int ret, retries = 0;
-
-       _enter("");
-
-retry:
-       if (test_bit(AFS_SERVER_FL_PROBED, &fc->cbi->server->flags)) {
-               _leave(" = t");
-               return true;
-       }
-
-       if (!test_and_set_bit_lock(AFS_SERVER_FL_PROBING, &fc->cbi->server->flags)) {
-               success = afs_do_probe_fileserver(fc);
-               clear_bit_unlock(AFS_SERVER_FL_PROBING, &fc->cbi->server->flags);
-               wake_up_bit(&fc->cbi->server->flags, AFS_SERVER_FL_PROBING);
-               _leave(" = t");
-               return success;
-       }
-
-       _debug("wait");
-       ret = wait_on_bit(&fc->cbi->server->flags, AFS_SERVER_FL_PROBING,
-                         TASK_INTERRUPTIBLE);
-       if (ret == -ERESTARTSYS) {
-               fc->ac.error = ret;
-               _leave(" = f [%d]", ret);
-               return false;
-       }
-
-       retries++;
-       if (retries == 4) {
-               fc->ac.error = -ESTALE;
-               _leave(" = f [stale]");
-               return false;
-       }
-       _debug("retry");
-       goto retry;
-}
-
 /*
  * Get an update for a server's address list.
  */
index 8a5760aa583213a608d686b60f0782ecbed648e1..95d0761cdb34ef3c0a214693651292ba08b2def1 100644 (file)
@@ -118,11 +118,11 @@ bool afs_annotate_server_list(struct afs_server_list *new,
        return false;
 
 changed:
-       /* Maintain the same current server as before if possible. */
-       cur = old->servers[old->index].server;
+       /* Maintain the same preferred server as before if possible. */
+       cur = old->servers[old->preferred].server;
        for (j = 0; j < new->nr_servers; j++) {
                if (new->servers[j].server == cur) {
-                       new->index = j;
+                       new->preferred = j;
                        break;
                }
        }
index 4d3e274207fb7aa05aa320b957a03911984cf67d..dcd07fe99871b9b38541293b164ac1cf8328a005 100644 (file)
@@ -406,10 +406,11 @@ static int afs_fill_super(struct super_block *sb,
                inode = afs_iget_pseudo_dir(sb, true);
                sb->s_flags     |= SB_RDONLY;
        } else {
-               sprintf(sb->s_id, "%u", as->volume->vid);
+               sprintf(sb->s_id, "%llu", as->volume->vid);
                afs_activate_volume(as->volume);
                fid.vid         = as->volume->vid;
                fid.vnode       = 1;
+               fid.vnode_hi    = 0;
                fid.unique      = 1;
                inode = afs_iget(sb, params->key, &fid, NULL, NULL, NULL);
        }
@@ -663,7 +664,7 @@ static void afs_destroy_inode(struct inode *inode)
 {
        struct afs_vnode *vnode = AFS_FS_I(inode);
 
-       _enter("%p{%x:%u}", inode, vnode->fid.vid, vnode->fid.vnode);
+       _enter("%p{%llx:%llu}", inode, vnode->fid.vid, vnode->fid.vnode);
 
        _debug("DESTROY INODE %p", inode);
 
diff --git a/fs/afs/vl_list.c b/fs/afs/vl_list.c
new file mode 100644 (file)
index 0000000..b4f1a84
--- /dev/null
@@ -0,0 +1,340 @@
+/* AFS vlserver list management.
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+struct afs_vlserver *afs_alloc_vlserver(const char *name, size_t name_len,
+                                       unsigned short port)
+{
+       struct afs_vlserver *vlserver;
+
+       vlserver = kzalloc(struct_size(vlserver, name, name_len + 1),
+                          GFP_KERNEL);
+       if (vlserver) {
+               atomic_set(&vlserver->usage, 1);
+               rwlock_init(&vlserver->lock);
+               init_waitqueue_head(&vlserver->probe_wq);
+               spin_lock_init(&vlserver->probe_lock);
+               vlserver->name_len = name_len;
+               vlserver->port = port;
+               memcpy(vlserver->name, name, name_len);
+       }
+       return vlserver;
+}
+
+static void afs_vlserver_rcu(struct rcu_head *rcu)
+{
+       struct afs_vlserver *vlserver = container_of(rcu, struct afs_vlserver, rcu);
+
+       afs_put_addrlist(rcu_access_pointer(vlserver->addresses));
+       kfree_rcu(vlserver, rcu);
+}
+
+void afs_put_vlserver(struct afs_net *net, struct afs_vlserver *vlserver)
+{
+       if (vlserver) {
+               unsigned int u = atomic_dec_return(&vlserver->usage);
+               //_debug("VL PUT %p{%u}", vlserver, u);
+
+               if (u == 0)
+                       call_rcu(&vlserver->rcu, afs_vlserver_rcu);
+       }
+}
+
+struct afs_vlserver_list *afs_alloc_vlserver_list(unsigned int nr_servers)
+{
+       struct afs_vlserver_list *vllist;
+
+       vllist = kzalloc(struct_size(vllist, servers, nr_servers), GFP_KERNEL);
+       if (vllist) {
+               atomic_set(&vllist->usage, 1);
+               rwlock_init(&vllist->lock);
+       }
+
+       return vllist;
+}
+
+void afs_put_vlserverlist(struct afs_net *net, struct afs_vlserver_list *vllist)
+{
+       if (vllist) {
+               unsigned int u = atomic_dec_return(&vllist->usage);
+
+               //_debug("VLLS PUT %p{%u}", vllist, u);
+               if (u == 0) {
+                       int i;
+
+                       for (i = 0; i < vllist->nr_servers; i++) {
+                               afs_put_vlserver(net, vllist->servers[i].server);
+                       }
+                       kfree_rcu(vllist, rcu);
+               }
+       }
+}
+
+static u16 afs_extract_le16(const u8 **_b)
+{
+       u16 val;
+
+       val  = (u16)*(*_b)++ << 0;
+       val |= (u16)*(*_b)++ << 8;
+       return val;
+}
+
+/*
+ * Build a VL server address list from a DNS queried server list.
+ */
+static struct afs_addr_list *afs_extract_vl_addrs(const u8 **_b, const u8 *end,
+                                                 u8 nr_addrs, u16 port)
+{
+       struct afs_addr_list *alist;
+       const u8 *b = *_b;
+       int ret = -EINVAL;
+
+       alist = afs_alloc_addrlist(nr_addrs, VL_SERVICE, port);
+       if (!alist)
+               return ERR_PTR(-ENOMEM);
+       if (nr_addrs == 0)
+               return alist;
+
+       for (; nr_addrs > 0 && end - b >= nr_addrs; nr_addrs--) {
+               struct dns_server_list_v1_address hdr;
+               __be32 x[4];
+
+               hdr.address_type = *b++;
+
+               switch (hdr.address_type) {
+               case DNS_ADDRESS_IS_IPV4:
+                       if (end - b < 4) {
+                               _leave(" = -EINVAL [short inet]");
+                               goto error;
+                       }
+                       memcpy(x, b, 4);
+                       afs_merge_fs_addr4(alist, x[0], port);
+                       b += 4;
+                       break;
+
+               case DNS_ADDRESS_IS_IPV6:
+                       if (end - b < 16) {
+                               _leave(" = -EINVAL [short inet6]");
+                               goto error;
+                       }
+                       memcpy(x, b, 16);
+                       afs_merge_fs_addr6(alist, x, port);
+                       b += 16;
+                       break;
+
+               default:
+                       _leave(" = -EADDRNOTAVAIL [unknown af %u]",
+                              hdr.address_type);
+                       ret = -EADDRNOTAVAIL;
+                       goto error;
+               }
+       }
+
+       /* Start with IPv6 if available. */
+       if (alist->nr_ipv4 < alist->nr_addrs)
+               alist->preferred = alist->nr_ipv4;
+
+       *_b = b;
+       return alist;
+
+error:
+       *_b = b;
+       afs_put_addrlist(alist);
+       return ERR_PTR(ret);
+}
+
+/*
+ * Build a VL server list from a DNS queried server list.
+ */
+struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *cell,
+                                                   const void *buffer,
+                                                   size_t buffer_size)
+{
+       const struct dns_server_list_v1_header *hdr = buffer;
+       struct dns_server_list_v1_server bs;
+       struct afs_vlserver_list *vllist, *previous;
+       struct afs_addr_list *addrs;
+       struct afs_vlserver *server;
+       const u8 *b = buffer, *end = buffer + buffer_size;
+       int ret = -ENOMEM, nr_servers, i, j;
+
+       _enter("");
+
+       /* Check that it's a server list, v1 */
+       if (end - b < sizeof(*hdr) ||
+           hdr->hdr.content != DNS_PAYLOAD_IS_SERVER_LIST ||
+           hdr->hdr.version != 1) {
+               pr_notice("kAFS: Got DNS record [%u,%u] len %zu\n",
+                         hdr->hdr.content, hdr->hdr.version, end - b);
+               ret = -EDESTADDRREQ;
+               goto dump;
+       }
+
+       nr_servers = hdr->nr_servers;
+
+       vllist = afs_alloc_vlserver_list(nr_servers);
+       if (!vllist)
+               return ERR_PTR(-ENOMEM);
+
+       vllist->source = (hdr->source < NR__dns_record_source) ?
+               hdr->source : NR__dns_record_source;
+       vllist->status = (hdr->status < NR__dns_lookup_status) ?
+               hdr->status : NR__dns_lookup_status;
+
+       read_lock(&cell->vl_servers_lock);
+       previous = afs_get_vlserverlist(
+               rcu_dereference_protected(cell->vl_servers,
+                                         lockdep_is_held(&cell->vl_servers_lock)));
+       read_unlock(&cell->vl_servers_lock);
+
+       b += sizeof(*hdr);
+       while (end - b >= sizeof(bs)) {
+               bs.name_len     = afs_extract_le16(&b);
+               bs.priority     = afs_extract_le16(&b);
+               bs.weight       = afs_extract_le16(&b);
+               bs.port         = afs_extract_le16(&b);
+               bs.source       = *b++;
+               bs.status       = *b++;
+               bs.protocol     = *b++;
+               bs.nr_addrs     = *b++;
+
+               _debug("extract %u %u %u %u %u %u %*.*s",
+                      bs.name_len, bs.priority, bs.weight,
+                      bs.port, bs.protocol, bs.nr_addrs,
+                      bs.name_len, bs.name_len, b);
+
+               if (end - b < bs.name_len)
+                       break;
+
+               ret = -EPROTONOSUPPORT;
+               if (bs.protocol == DNS_SERVER_PROTOCOL_UNSPECIFIED) {
+                       bs.protocol = DNS_SERVER_PROTOCOL_UDP;
+               } else if (bs.protocol != DNS_SERVER_PROTOCOL_UDP) {
+                       _leave(" = [proto %u]", bs.protocol);
+                       goto error;
+               }
+
+               if (bs.port == 0)
+                       bs.port = AFS_VL_PORT;
+               if (bs.source > NR__dns_record_source)
+                       bs.source = NR__dns_record_source;
+               if (bs.status > NR__dns_lookup_status)
+                       bs.status = NR__dns_lookup_status;
+
+               server = NULL;
+               if (previous) {
+                       /* See if we can update an old server record */
+                       for (i = 0; i < previous->nr_servers; i++) {
+                               struct afs_vlserver *p = previous->servers[i].server;
+
+                               if (p->name_len == bs.name_len &&
+                                   p->port == bs.port &&
+                                   strncasecmp(b, p->name, bs.name_len) == 0) {
+                                       server = afs_get_vlserver(p);
+                                       break;
+                               }
+                       }
+               }
+
+               if (!server) {
+                       ret = -ENOMEM;
+                       server = afs_alloc_vlserver(b, bs.name_len, bs.port);
+                       if (!server)
+                               goto error;
+               }
+
+               b += bs.name_len;
+
+               /* Extract the addresses - note that we can't skip this as we
+                * have to advance the payload pointer.
+                */
+               addrs = afs_extract_vl_addrs(&b, end, bs.nr_addrs, bs.port);
+               if (IS_ERR(addrs)) {
+                       ret = PTR_ERR(addrs);
+                       goto error_2;
+               }
+
+               if (vllist->nr_servers >= nr_servers) {
+                       _debug("skip %u >= %u", vllist->nr_servers, nr_servers);
+                       afs_put_addrlist(addrs);
+                       afs_put_vlserver(cell->net, server);
+                       continue;
+               }
+
+               addrs->source = bs.source;
+               addrs->status = bs.status;
+
+               if (addrs->nr_addrs == 0) {
+                       afs_put_addrlist(addrs);
+                       if (!rcu_access_pointer(server->addresses)) {
+                               afs_put_vlserver(cell->net, server);
+                               continue;
+                       }
+               } else {
+                       struct afs_addr_list *old = addrs;
+
+                       write_lock(&server->lock);
+                       rcu_swap_protected(server->addresses, old,
+                                          lockdep_is_held(&server->lock));
+                       write_unlock(&server->lock);
+                       afs_put_addrlist(old);
+               }
+
+
+               /* TODO: Might want to check for duplicates */
+
+               /* Insertion-sort by priority and weight */
+               for (j = 0; j < vllist->nr_servers; j++) {
+                       if (bs.priority < vllist->servers[j].priority)
+                               break; /* Lower preferable */
+                       if (bs.priority == vllist->servers[j].priority &&
+                           bs.weight > vllist->servers[j].weight)
+                               break; /* Higher preferable */
+               }
+
+               if (j < vllist->nr_servers) {
+                       memmove(vllist->servers + j + 1,
+                               vllist->servers + j,
+                               (vllist->nr_servers - j) * sizeof(struct afs_vlserver_entry));
+               }
+
+               clear_bit(AFS_VLSERVER_FL_PROBED, &server->flags);
+
+               vllist->servers[j].priority = bs.priority;
+               vllist->servers[j].weight = bs.weight;
+               vllist->servers[j].server = server;
+               vllist->nr_servers++;
+       }
+
+       if (b != end) {
+               _debug("parse error %zd", b - end);
+               goto error;
+       }
+
+       afs_put_vlserverlist(cell->net, previous);
+       _leave(" = ok [%u]", vllist->nr_servers);
+       return vllist;
+
+error_2:
+       afs_put_vlserver(cell->net, server);
+error:
+       afs_put_vlserverlist(cell->net, vllist);
+       afs_put_vlserverlist(cell->net, previous);
+dump:
+       if (ret != -ENOMEM) {
+               printk(KERN_DEBUG "DNS: at %zu\n", (const void *)b - buffer);
+               print_hex_dump_bytes("DNS: ", DUMP_PREFIX_NONE, buffer, buffer_size);
+       }
+       return ERR_PTR(ret);
+}
diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c
new file mode 100644 (file)
index 0000000..f0b0329
--- /dev/null
@@ -0,0 +1,282 @@
+/* AFS vlserver probing
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include "afs_fs.h"
+#include "internal.h"
+#include "protocol_yfs.h"
+
+static bool afs_vl_probe_done(struct afs_vlserver *server)
+{
+       if (!atomic_dec_and_test(&server->probe_outstanding))
+               return false;
+
+       wake_up_var(&server->probe_outstanding);
+       clear_bit_unlock(AFS_VLSERVER_FL_PROBING, &server->flags);
+       wake_up_bit(&server->flags, AFS_VLSERVER_FL_PROBING);
+       return true;
+}
+
+/*
+ * Process the result of probing a vlserver.  This is called after successful
+ * or failed delivery of an VL.GetCapabilities operation.
+ */
+void afs_vlserver_probe_result(struct afs_call *call)
+{
+       struct afs_addr_list *alist = call->alist;
+       struct afs_vlserver *server = call->reply[0];
+       unsigned int server_index = (long)call->reply[1];
+       unsigned int index = call->addr_ix;
+       unsigned int rtt = UINT_MAX;
+       bool have_result = false;
+       u64 _rtt;
+       int ret = call->error;
+
+       _enter("%s,%u,%u,%d,%d", server->name, server_index, index, ret, call->abort_code);
+
+       spin_lock(&server->probe_lock);
+
+       switch (ret) {
+       case 0:
+               server->probe.error = 0;
+               goto responded;
+       case -ECONNABORTED:
+               if (!server->probe.responded) {
+                       server->probe.abort_code = call->abort_code;
+                       server->probe.error = ret;
+               }
+               goto responded;
+       case -ENOMEM:
+       case -ENONET:
+               server->probe.local_failure = true;
+               afs_io_error(call, afs_io_error_vl_probe_fail);
+               goto out;
+       case -ECONNRESET: /* Responded, but call expired. */
+       case -ERFKILL:
+       case -EADDRNOTAVAIL:
+       case -ENETUNREACH:
+       case -EHOSTUNREACH:
+       case -EHOSTDOWN:
+       case -ECONNREFUSED:
+       case -ETIMEDOUT:
+       case -ETIME:
+       default:
+               clear_bit(index, &alist->responded);
+               set_bit(index, &alist->failed);
+               if (!server->probe.responded &&
+                   (server->probe.error == 0 ||
+                    server->probe.error == -ETIMEDOUT ||
+                    server->probe.error == -ETIME))
+                       server->probe.error = ret;
+               afs_io_error(call, afs_io_error_vl_probe_fail);
+               goto out;
+       }
+
+responded:
+       set_bit(index, &alist->responded);
+       clear_bit(index, &alist->failed);
+
+       if (call->service_id == YFS_VL_SERVICE) {
+               server->probe.is_yfs = true;
+               set_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags);
+               alist->addrs[index].srx_service = call->service_id;
+       } else {
+               server->probe.not_yfs = true;
+               if (!server->probe.is_yfs) {
+                       clear_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags);
+                       alist->addrs[index].srx_service = call->service_id;
+               }
+       }
+
+       /* Get the RTT and scale it to fit into a 32-bit value that represents
+        * over a minute of time so that we can access it with one instruction
+        * on a 32-bit system.
+        */
+       _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall);
+       _rtt /= 64;
+       rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt;
+       if (rtt < server->probe.rtt) {
+               server->probe.rtt = rtt;
+               alist->preferred = index;
+               have_result = true;
+       }
+
+       smp_wmb(); /* Set rtt before responded. */
+       server->probe.responded = true;
+       set_bit(AFS_VLSERVER_FL_PROBED, &server->flags);
+out:
+       spin_unlock(&server->probe_lock);
+
+       _debug("probe [%u][%u] %pISpc rtt=%u ret=%d",
+              server_index, index, &alist->addrs[index].transport,
+              (unsigned int)rtt, ret);
+
+       have_result |= afs_vl_probe_done(server);
+       if (have_result) {
+               server->probe.have_result = true;
+               wake_up_var(&server->probe.have_result);
+               wake_up_all(&server->probe_wq);
+       }
+}
+
+/*
+ * Probe all of a vlserver's addresses to find out the best route and to
+ * query its capabilities.
+ */
+static bool afs_do_probe_vlserver(struct afs_net *net,
+                                 struct afs_vlserver *server,
+                                 struct key *key,
+                                 unsigned int server_index,
+                                 struct afs_error *_e)
+{
+       struct afs_addr_cursor ac = {
+               .index = 0,
+       };
+       bool in_progress = false;
+       int err;
+
+       _enter("%s", server->name);
+
+       read_lock(&server->lock);
+       ac.alist = rcu_dereference_protected(server->addresses,
+                                            lockdep_is_held(&server->lock));
+       read_unlock(&server->lock);
+
+       atomic_set(&server->probe_outstanding, ac.alist->nr_addrs);
+       memset(&server->probe, 0, sizeof(server->probe));
+       server->probe.rtt = UINT_MAX;
+
+       for (ac.index = 0; ac.index < ac.alist->nr_addrs; ac.index++) {
+               err = afs_vl_get_capabilities(net, &ac, key, server,
+                                             server_index, true);
+               if (err == -EINPROGRESS)
+                       in_progress = true;
+               else
+                       afs_prioritise_error(_e, err, ac.abort_code);
+       }
+
+       if (!in_progress)
+               afs_vl_probe_done(server);
+       return in_progress;
+}
+
+/*
+ * Send off probes to all unprobed servers.
+ */
+int afs_send_vl_probes(struct afs_net *net, struct key *key,
+                      struct afs_vlserver_list *vllist)
+{
+       struct afs_vlserver *server;
+       struct afs_error e;
+       bool in_progress = false;
+       int i;
+
+       e.error = 0;
+       e.responded = false;
+       for (i = 0; i < vllist->nr_servers; i++) {
+               server = vllist->servers[i].server;
+               if (test_bit(AFS_VLSERVER_FL_PROBED, &server->flags))
+                       continue;
+
+               if (!test_and_set_bit_lock(AFS_VLSERVER_FL_PROBING, &server->flags) &&
+                   afs_do_probe_vlserver(net, server, key, i, &e))
+                       in_progress = true;
+       }
+
+       return in_progress ? 0 : e.error;
+}
+
+/*
+ * Wait for the first as-yet untried server to respond.
+ */
+int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist,
+                          unsigned long untried)
+{
+       struct wait_queue_entry *waits;
+       struct afs_vlserver *server;
+       unsigned int rtt = UINT_MAX;
+       bool have_responders = false;
+       int pref = -1, i;
+
+       _enter("%u,%lx", vllist->nr_servers, untried);
+
+       /* Only wait for servers that have a probe outstanding. */
+       for (i = 0; i < vllist->nr_servers; i++) {
+               if (test_bit(i, &untried)) {
+                       server = vllist->servers[i].server;
+                       if (!test_bit(AFS_VLSERVER_FL_PROBING, &server->flags))
+                               __clear_bit(i, &untried);
+                       if (server->probe.responded)
+                               have_responders = true;
+               }
+       }
+       if (have_responders || !untried)
+               return 0;
+
+       waits = kmalloc(array_size(vllist->nr_servers, sizeof(*waits)), GFP_KERNEL);
+       if (!waits)
+               return -ENOMEM;
+
+       for (i = 0; i < vllist->nr_servers; i++) {
+               if (test_bit(i, &untried)) {
+                       server = vllist->servers[i].server;
+                       init_waitqueue_entry(&waits[i], current);
+                       add_wait_queue(&server->probe_wq, &waits[i]);
+               }
+       }
+
+       for (;;) {
+               bool still_probing = false;
+
+               set_current_state(TASK_INTERRUPTIBLE);
+               for (i = 0; i < vllist->nr_servers; i++) {
+                       if (test_bit(i, &untried)) {
+                               server = vllist->servers[i].server;
+                               if (server->probe.responded)
+                                       goto stop;
+                               if (test_bit(AFS_VLSERVER_FL_PROBING, &server->flags))
+                                       still_probing = true;
+                       }
+               }
+
+               if (!still_probing || unlikely(signal_pending(current)))
+                       goto stop;
+               schedule();
+       }
+
+stop:
+       set_current_state(TASK_RUNNING);
+
+       for (i = 0; i < vllist->nr_servers; i++) {
+               if (test_bit(i, &untried)) {
+                       server = vllist->servers[i].server;
+                       if (server->probe.responded &&
+                           server->probe.rtt < rtt) {
+                               pref = i;
+                               rtt = server->probe.rtt;
+                       }
+
+                       remove_wait_queue(&server->probe_wq, &waits[i]);
+               }
+       }
+
+       kfree(waits);
+
+       if (pref == -1 && signal_pending(current))
+               return -ERESTARTSYS;
+
+       if (pref >= 0)
+               vllist->preferred = pref;
+
+       _leave(" = 0 [%u]", pref);
+       return 0;
+}
diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c
new file mode 100644 (file)
index 0000000..7adde83
--- /dev/null
@@ -0,0 +1,325 @@
+/* Handle vlserver selection and rotation.
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include "internal.h"
+#include "afs_vl.h"
+
+/*
+ * Begin an operation on a volume location server.
+ */
+bool afs_begin_vlserver_operation(struct afs_vl_cursor *vc, struct afs_cell *cell,
+                                 struct key *key)
+{
+       memset(vc, 0, sizeof(*vc));
+       vc->cell = cell;
+       vc->key = key;
+       vc->error = -EDESTADDRREQ;
+       vc->ac.error = SHRT_MAX;
+
+       if (signal_pending(current)) {
+               vc->error = -EINTR;
+               vc->flags |= AFS_VL_CURSOR_STOP;
+               return false;
+       }
+
+       return true;
+}
+
+/*
+ * Begin iteration through a server list, starting with the last used server if
+ * possible, or the last recorded good server if not.
+ */
+static bool afs_start_vl_iteration(struct afs_vl_cursor *vc)
+{
+       struct afs_cell *cell = vc->cell;
+
+       if (wait_on_bit(&cell->flags, AFS_CELL_FL_NO_LOOKUP_YET,
+                       TASK_INTERRUPTIBLE)) {
+               vc->error = -ERESTARTSYS;
+               return false;
+       }
+
+       read_lock(&cell->vl_servers_lock);
+       vc->server_list = afs_get_vlserverlist(
+               rcu_dereference_protected(cell->vl_servers,
+                                         lockdep_is_held(&cell->vl_servers_lock)));
+       read_unlock(&cell->vl_servers_lock);
+       if (!vc->server_list || !vc->server_list->nr_servers)
+               return false;
+
+       vc->untried = (1UL << vc->server_list->nr_servers) - 1;
+       vc->index = -1;
+       return true;
+}
+
+/*
+ * Select the vlserver to use.  May be called multiple times to rotate
+ * through the vlservers.
+ */
+bool afs_select_vlserver(struct afs_vl_cursor *vc)
+{
+       struct afs_addr_list *alist;
+       struct afs_vlserver *vlserver;
+       struct afs_error e;
+       u32 rtt;
+       int error = vc->ac.error, i;
+
+       _enter("%lx[%d],%lx[%d],%d,%d",
+              vc->untried, vc->index,
+              vc->ac.tried, vc->ac.index,
+              error, vc->ac.abort_code);
+
+       if (vc->flags & AFS_VL_CURSOR_STOP) {
+               _leave(" = f [stopped]");
+               return false;
+       }
+
+       vc->nr_iterations++;
+
+       /* Evaluate the result of the previous operation, if there was one. */
+       switch (error) {
+       case SHRT_MAX:
+               goto start;
+
+       default:
+       case 0:
+               /* Success or local failure.  Stop. */
+               vc->error = error;
+               vc->flags |= AFS_VL_CURSOR_STOP;
+               _leave(" = f [okay/local %d]", vc->ac.error);
+               return false;
+
+       case -ECONNABORTED:
+               /* The far side rejected the operation on some grounds.  This
+                * might involve the server being busy or the volume having been moved.
+                */
+               switch (vc->ac.abort_code) {
+               case AFSVL_IO:
+               case AFSVL_BADVOLOPER:
+               case AFSVL_NOMEM:
+                       /* The server went weird. */
+                       vc->error = -EREMOTEIO;
+                       //write_lock(&vc->cell->vl_servers_lock);
+                       //vc->server_list->weird_mask |= 1 << vc->index;
+                       //write_unlock(&vc->cell->vl_servers_lock);
+                       goto next_server;
+
+               default:
+                       vc->error = afs_abort_to_error(vc->ac.abort_code);
+                       goto failed;
+               }
+
+       case -ERFKILL:
+       case -EADDRNOTAVAIL:
+       case -ENETUNREACH:
+       case -EHOSTUNREACH:
+       case -EHOSTDOWN:
+       case -ECONNREFUSED:
+       case -ETIMEDOUT:
+       case -ETIME:
+               _debug("no conn %d", error);
+               vc->error = error;
+               goto iterate_address;
+
+       case -ECONNRESET:
+               _debug("call reset");
+               vc->error = error;
+               vc->flags |= AFS_VL_CURSOR_RETRY;
+               goto next_server;
+       }
+
+restart_from_beginning:
+       _debug("restart");
+       afs_end_cursor(&vc->ac);
+       afs_put_vlserverlist(vc->cell->net, vc->server_list);
+       vc->server_list = NULL;
+       if (vc->flags & AFS_VL_CURSOR_RETRIED)
+               goto failed;
+       vc->flags |= AFS_VL_CURSOR_RETRIED;
+start:
+       _debug("start");
+
+       if (!afs_start_vl_iteration(vc))
+               goto failed;
+
+       error = afs_send_vl_probes(vc->cell->net, vc->key, vc->server_list);
+       if (error < 0)
+               goto failed_set_error;
+
+pick_server:
+       _debug("pick [%lx]", vc->untried);
+
+       error = afs_wait_for_vl_probes(vc->server_list, vc->untried);
+       if (error < 0)
+               goto failed_set_error;
+
+       /* Pick the untried server with the lowest RTT. */
+       vc->index = vc->server_list->preferred;
+       if (test_bit(vc->index, &vc->untried))
+               goto selected_server;
+
+       vc->index = -1;
+       rtt = U32_MAX;
+       for (i = 0; i < vc->server_list->nr_servers; i++) {
+               struct afs_vlserver *s = vc->server_list->servers[i].server;
+
+               if (!test_bit(i, &vc->untried) || !s->probe.responded)
+                       continue;
+               if (s->probe.rtt < rtt) {
+                       vc->index = i;
+                       rtt = s->probe.rtt;
+               }
+       }
+
+       if (vc->index == -1)
+               goto no_more_servers;
+
+selected_server:
+       _debug("use %d", vc->index);
+       __clear_bit(vc->index, &vc->untried);
+
+       /* We're starting on a different vlserver from the list.  We need to
+        * check it, find its address list and probe its capabilities before we
+        * use it.
+        */
+       ASSERTCMP(vc->ac.alist, ==, NULL);
+       vlserver = vc->server_list->servers[vc->index].server;
+       vc->server = vlserver;
+
+       _debug("USING VLSERVER: %s", vlserver->name);
+
+       read_lock(&vlserver->lock);
+       alist = rcu_dereference_protected(vlserver->addresses,
+                                         lockdep_is_held(&vlserver->lock));
+       afs_get_addrlist(alist);
+       read_unlock(&vlserver->lock);
+
+       memset(&vc->ac, 0, sizeof(vc->ac));
+
+       if (!vc->ac.alist)
+               vc->ac.alist = alist;
+       else
+               afs_put_addrlist(alist);
+
+       vc->ac.index = -1;
+
+iterate_address:
+       ASSERT(vc->ac.alist);
+       /* Iterate over the current server's address list to try and find an
+        * address on which it will respond to us.
+        */
+       if (!afs_iterate_addresses(&vc->ac))
+               goto next_server;
+
+       _debug("VL address %d/%d", vc->ac.index, vc->ac.alist->nr_addrs);
+
+       _leave(" = t %pISpc", &vc->ac.alist->addrs[vc->ac.index].transport);
+       return true;
+
+next_server:
+       _debug("next");
+       afs_end_cursor(&vc->ac);
+       goto pick_server;
+
+no_more_servers:
+       /* That's all the servers poked to no good effect.  Try again if some
+        * of them were busy.
+        */
+       if (vc->flags & AFS_VL_CURSOR_RETRY)
+               goto restart_from_beginning;
+
+       e.error = -EDESTADDRREQ;
+       e.responded = false;
+       for (i = 0; i < vc->server_list->nr_servers; i++) {
+               struct afs_vlserver *s = vc->server_list->servers[i].server;
+
+               afs_prioritise_error(&e, READ_ONCE(s->probe.error),
+                                    s->probe.abort_code);
+       }
+
+failed_set_error:
+       vc->error = error;
+failed:
+       vc->flags |= AFS_VL_CURSOR_STOP;
+       afs_end_cursor(&vc->ac);
+       _leave(" = f [failed %d]", vc->error);
+       return false;
+}
+
+/*
+ * Dump cursor state in the case of the error being EDESTADDRREQ.
+ */
+static void afs_vl_dump_edestaddrreq(const struct afs_vl_cursor *vc)
+{
+       static int count;
+       int i;
+
+       if (!IS_ENABLED(CONFIG_AFS_DEBUG_CURSOR) || count > 3)
+               return;
+       count++;
+
+       rcu_read_lock();
+       pr_notice("EDESTADDR occurred\n");
+       pr_notice("VC: ut=%lx ix=%u ni=%hu fl=%hx err=%hd\n",
+                 vc->untried, vc->index, vc->nr_iterations, vc->flags, vc->error);
+
+       if (vc->server_list) {
+               const struct afs_vlserver_list *sl = vc->server_list;
+               pr_notice("VC: SL nr=%u ix=%u\n",
+                         sl->nr_servers, sl->index);
+               for (i = 0; i < sl->nr_servers; i++) {
+                       const struct afs_vlserver *s = sl->servers[i].server;
+                       pr_notice("VC: server %s+%hu fl=%lx E=%hd\n",
+                                 s->name, s->port, s->flags, s->probe.error);
+                       if (s->addresses) {
+                               const struct afs_addr_list *a =
+                                       rcu_dereference(s->addresses);
+                               pr_notice("VC:  - nr=%u/%u/%u pf=%u\n",
+                                         a->nr_ipv4, a->nr_addrs, a->max_addrs,
+                                         a->preferred);
+                               pr_notice("VC:  - pr=%lx R=%lx F=%lx\n",
+                                         a->probed, a->responded, a->failed);
+                               if (a == vc->ac.alist)
+                                       pr_notice("VC:  - current\n");
+                       }
+               }
+       }
+
+       pr_notice("AC: t=%lx ax=%u ac=%d er=%d r=%u ni=%u\n",
+                 vc->ac.tried, vc->ac.index, vc->ac.abort_code, vc->ac.error,
+                 vc->ac.responded, vc->ac.nr_iterations);
+       rcu_read_unlock();
+}
+
+/*
+ * Tidy up a volume location server cursor and unlock the vnode.
+ */
+int afs_end_vlserver_operation(struct afs_vl_cursor *vc)
+{
+       struct afs_net *net = vc->cell->net;
+
+       if (vc->error == -EDESTADDRREQ ||
+           vc->error == -EADDRNOTAVAIL ||
+           vc->error == -ENETUNREACH ||
+           vc->error == -EHOSTUNREACH)
+               afs_vl_dump_edestaddrreq(vc);
+
+       afs_end_cursor(&vc->ac);
+       afs_put_vlserverlist(net, vc->server_list);
+
+       if (vc->error == -ECONNABORTED)
+               vc->error = afs_abort_to_error(vc->ac.abort_code);
+
+       return vc->error;
+}
index c3b740813fc719850ca188f892d4f653352e8600..c3d9e5a5f67eeb13670b372b04f5b5dc4241282d 100644 (file)
@@ -128,14 +128,13 @@ static const struct afs_call_type afs_RXVLGetEntryByNameU = {
  * Dispatch a get volume entry by name or ID operation (uuid variant).  If the
  * volname is a decimal number then it's a volume ID not a volume name.
  */
-struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_net *net,
-                                                 struct afs_addr_cursor *ac,
-                                                 struct key *key,
+struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_vl_cursor *vc,
                                                  const char *volname,
                                                  int volnamesz)
 {
        struct afs_vldb_entry *entry;
        struct afs_call *call;
+       struct afs_net *net = vc->cell->net;
        size_t reqsz, padsz;
        __be32 *bp;
 
@@ -155,7 +154,7 @@ struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_net *net,
                return ERR_PTR(-ENOMEM);
        }
 
-       call->key = key;
+       call->key = vc->key;
        call->reply[0] = entry;
        call->ret_reply0 = true;
 
@@ -168,7 +167,7 @@ struct afs_vldb_entry *afs_vl_get_entry_by_name_u(struct afs_net *net,
                memset((void *)bp + volnamesz, 0, padsz);
 
        trace_afs_make_vl_call(call);
-       return (struct afs_vldb_entry *)afs_make_call(ac, call, GFP_KERNEL, false);
+       return (struct afs_vldb_entry *)afs_make_call(&vc->ac, call, GFP_KERNEL, false);
 }
 
 /*
@@ -187,19 +186,18 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call)
        u32 uniquifier, nentries, count;
        int i, ret;
 
-       _enter("{%u,%zu/%u}", call->unmarshall, call->offset, call->count);
+       _enter("{%u,%zu/%u}",
+              call->unmarshall, iov_iter_count(call->_iter), call->count);
 
-again:
        switch (call->unmarshall) {
        case 0:
-               call->offset = 0;
+               afs_extract_to_buf(call,
+                                  sizeof(struct afs_uuid__xdr) + 3 * sizeof(__be32));
                call->unmarshall++;
 
                /* Extract the returned uuid, uniquifier, nentries and blkaddrs size */
        case 1:
-               ret = afs_extract_data(call, call->buffer,
-                                      sizeof(struct afs_uuid__xdr) + 3 * sizeof(__be32),
-                                      true);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
@@ -216,28 +214,28 @@ again:
                call->reply[0] = alist;
                call->count = count;
                call->count2 = nentries;
-               call->offset = 0;
                call->unmarshall++;
 
+       more_entries:
+               count = min(call->count, 4U);
+               afs_extract_to_buf(call, count * sizeof(__be32));
+
                /* Extract entries */
        case 2:
-               count = min(call->count, 4U);
-               ret = afs_extract_data(call, call->buffer,
-                                      count * sizeof(__be32),
-                                      call->count > 4);
+               ret = afs_extract_data(call, call->count > 4);
                if (ret < 0)
                        return ret;
 
                alist = call->reply[0];
                bp = call->buffer;
+               count = min(call->count, 4U);
                for (i = 0; i < count; i++)
                        if (alist->nr_addrs < call->count2)
                                afs_merge_fs_addr4(alist, *bp++, AFS_FS_PORT);
 
                call->count -= count;
                if (call->count > 0)
-                       goto again;
-               call->offset = 0;
+                       goto more_entries;
                call->unmarshall++;
                break;
        }
@@ -267,14 +265,13 @@ static const struct afs_call_type afs_RXVLGetAddrsU = {
  * Dispatch an operation to get the addresses for a server, where the server is
  * nominated by UUID.
  */
-struct afs_addr_list *afs_vl_get_addrs_u(struct afs_net *net,
-                                        struct afs_addr_cursor *ac,
-                                        struct key *key,
+struct afs_addr_list *afs_vl_get_addrs_u(struct afs_vl_cursor *vc,
                                         const uuid_t *uuid)
 {
        struct afs_ListAddrByAttributes__xdr *r;
        const struct afs_uuid *u = (const struct afs_uuid *)uuid;
        struct afs_call *call;
+       struct afs_net *net = vc->cell->net;
        __be32 *bp;
        int i;
 
@@ -286,7 +283,7 @@ struct afs_addr_list *afs_vl_get_addrs_u(struct afs_net *net,
        if (!call)
                return ERR_PTR(-ENOMEM);
 
-       call->key = key;
+       call->key = vc->key;
        call->reply[0] = NULL;
        call->ret_reply0 = true;
 
@@ -307,7 +304,7 @@ struct afs_addr_list *afs_vl_get_addrs_u(struct afs_net *net,
                r->uuid.node[i] = htonl(u->node[i]);
 
        trace_afs_make_vl_call(call);
-       return (struct afs_addr_list *)afs_make_call(ac, call, GFP_KERNEL, false);
+       return (struct afs_addr_list *)afs_make_call(&vc->ac, call, GFP_KERNEL, false);
 }
 
 /*
@@ -318,54 +315,51 @@ static int afs_deliver_vl_get_capabilities(struct afs_call *call)
        u32 count;
        int ret;
 
-       _enter("{%u,%zu/%u}", call->unmarshall, call->offset, call->count);
+       _enter("{%u,%zu/%u}",
+              call->unmarshall, iov_iter_count(call->_iter), call->count);
 
-again:
        switch (call->unmarshall) {
        case 0:
-               call->offset = 0;
+               afs_extract_to_tmp(call);
                call->unmarshall++;
 
                /* Extract the capabilities word count */
        case 1:
-               ret = afs_extract_data(call, &call->tmp,
-                                      1 * sizeof(__be32),
-                                      true);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
                count = ntohl(call->tmp);
-
                call->count = count;
                call->count2 = count;
-               call->offset = 0;
+
                call->unmarshall++;
+               afs_extract_discard(call, count * sizeof(__be32));
 
                /* Extract capabilities words */
        case 2:
-               count = min(call->count, 16U);
-               ret = afs_extract_data(call, call->buffer,
-                                      count * sizeof(__be32),
-                                      call->count > 16);
+               ret = afs_extract_data(call, false);
                if (ret < 0)
                        return ret;
 
                /* TODO: Examine capabilities */
 
-               call->count -= count;
-               if (call->count > 0)
-                       goto again;
-               call->offset = 0;
                call->unmarshall++;
                break;
        }
 
-       call->reply[0] = (void *)(unsigned long)call->service_id;
-
        _leave(" = 0 [done]");
        return 0;
 }
 
+static void afs_destroy_vl_get_capabilities(struct afs_call *call)
+{
+       struct afs_vlserver *server = call->reply[0];
+
+       afs_put_vlserver(call->net, server);
+       afs_flat_call_destructor(call);
+}
+
 /*
  * VL.GetCapabilities operation type
  */
@@ -373,11 +367,12 @@ static const struct afs_call_type afs_RXVLGetCapabilities = {
        .name           = "VL.GetCapabilities",
        .op             = afs_VL_GetCapabilities,
        .deliver        = afs_deliver_vl_get_capabilities,
-       .destructor     = afs_flat_call_destructor,
+       .done           = afs_vlserver_probe_result,
+       .destructor     = afs_destroy_vl_get_capabilities,
 };
 
 /*
- * Probe a fileserver for the capabilities that it supports.  This can
+ * Probe a volume server for the capabilities that it supports.  This can
  * return up to 196 words.
  *
  * We use this to probe for service upgrade to determine what the server at the
@@ -385,7 +380,10 @@ static const struct afs_call_type afs_RXVLGetCapabilities = {
  */
 int afs_vl_get_capabilities(struct afs_net *net,
                            struct afs_addr_cursor *ac,
-                           struct key *key)
+                           struct key *key,
+                           struct afs_vlserver *server,
+                           unsigned int server_index,
+                           bool async)
 {
        struct afs_call *call;
        __be32 *bp;
@@ -397,9 +395,10 @@ int afs_vl_get_capabilities(struct afs_net *net,
                return -ENOMEM;
 
        call->key = key;
-       call->upgrade = true; /* Let's see if this is a YFS server */
-       call->reply[0] = (void *)VLGETCAPABILITIES;
-       call->ret_reply0 = true;
+       call->reply[0] = afs_get_vlserver(server);
+       call->reply[1] = (void *)(long)server_index;
+       call->upgrade = true;
+       call->want_reply_time = true;
 
        /* marshall the parameters */
        bp = call->request;
@@ -407,7 +406,7 @@ int afs_vl_get_capabilities(struct afs_net *net,
 
        /* Can't take a ref on server */
        trace_afs_make_vl_call(call);
-       return afs_make_call(ac, call, GFP_KERNEL, false);
+       return afs_make_call(ac, call, GFP_KERNEL, async);
 }
 
 /*
@@ -426,22 +425,19 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
        u32 uniquifier, size;
        int ret;
 
-       _enter("{%u,%zu/%u,%u}", call->unmarshall, call->offset, call->count, call->count2);
+       _enter("{%u,%zu,%u}",
+              call->unmarshall, iov_iter_count(call->_iter), call->count2);
 
-again:
        switch (call->unmarshall) {
        case 0:
-               call->offset = 0;
+               afs_extract_to_buf(call, sizeof(uuid_t) + 3 * sizeof(__be32));
                call->unmarshall = 1;
 
                /* Extract the returned uuid, uniquifier, fsEndpoints count and
                 * either the first fsEndpoint type or the volEndpoints
                 * count if there are no fsEndpoints. */
        case 1:
-               ret = afs_extract_data(call, call->buffer,
-                                      sizeof(uuid_t) +
-                                      3 * sizeof(__be32),
-                                      true);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
@@ -451,22 +447,19 @@ again:
                call->count2    = ntohl(*bp); /* Type or next count */
 
                if (call->count > YFS_MAXENDPOINTS)
-                       return afs_protocol_error(call, -EBADMSG);
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_yvl_fsendpt_num);
 
                alist = afs_alloc_addrlist(call->count, FS_SERVICE, AFS_FS_PORT);
                if (!alist)
                        return -ENOMEM;
                alist->version = uniquifier;
                call->reply[0] = alist;
-               call->offset = 0;
 
                if (call->count == 0)
                        goto extract_volendpoints;
 
-               call->unmarshall = 2;
-
-               /* Extract fsEndpoints[] entries */
-       case 2:
+       next_fsendpoint:
                switch (call->count2) {
                case YFS_ENDPOINT_IPV4:
                        size = sizeof(__be32) * (1 + 1 + 1);
@@ -475,11 +468,17 @@ again:
                        size = sizeof(__be32) * (1 + 4 + 1);
                        break;
                default:
-                       return afs_protocol_error(call, -EBADMSG);
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_yvl_fsendpt_type);
                }
 
                size += sizeof(__be32);
-               ret = afs_extract_data(call, call->buffer, size, true);
+               afs_extract_to_buf(call, size);
+               call->unmarshall = 2;
+
+               /* Extract fsEndpoints[] entries */
+       case 2:
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
@@ -488,18 +487,21 @@ again:
                switch (call->count2) {
                case YFS_ENDPOINT_IPV4:
                        if (ntohl(bp[0]) != sizeof(__be32) * 2)
-                               return afs_protocol_error(call, -EBADMSG);
+                               return afs_protocol_error(call, -EBADMSG,
+                                                         afs_eproto_yvl_fsendpt4_len);
                        afs_merge_fs_addr4(alist, bp[1], ntohl(bp[2]));
                        bp += 3;
                        break;
                case YFS_ENDPOINT_IPV6:
                        if (ntohl(bp[0]) != sizeof(__be32) * 5)
-                               return afs_protocol_error(call, -EBADMSG);
+                               return afs_protocol_error(call, -EBADMSG,
+                                                         afs_eproto_yvl_fsendpt6_len);
                        afs_merge_fs_addr6(alist, bp + 1, ntohl(bp[5]));
                        bp += 6;
                        break;
                default:
-                       return afs_protocol_error(call, -EBADMSG);
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_yvl_fsendpt_type);
                }
 
                /* Got either the type of the next entry or the count of
@@ -507,10 +509,9 @@ again:
                 */
                call->count2 = ntohl(*bp++);
 
-               call->offset = 0;
                call->count--;
                if (call->count > 0)
-                       goto again;
+                       goto next_fsendpoint;
 
        extract_volendpoints:
                /* Extract the list of volEndpoints. */
@@ -518,8 +519,10 @@ again:
                if (!call->count)
                        goto end;
                if (call->count > YFS_MAXENDPOINTS)
-                       return afs_protocol_error(call, -EBADMSG);
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_yvl_vlendpt_type);
 
+               afs_extract_to_buf(call, 1 * sizeof(__be32));
                call->unmarshall = 3;
 
                /* Extract the type of volEndpoints[0].  Normally we would
@@ -527,17 +530,14 @@ again:
                 * data of the current one, but this is the first...
                 */
        case 3:
-               ret = afs_extract_data(call, call->buffer, sizeof(__be32), true);
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
                bp = call->buffer;
-               call->count2 = ntohl(*bp++);
-               call->offset = 0;
-               call->unmarshall = 4;
 
-               /* Extract volEndpoints[] entries */
-       case 4:
+       next_volendpoint:
+               call->count2 = ntohl(*bp++);
                switch (call->count2) {
                case YFS_ENDPOINT_IPV4:
                        size = sizeof(__be32) * (1 + 1 + 1);
@@ -546,12 +546,18 @@ again:
                        size = sizeof(__be32) * (1 + 4 + 1);
                        break;
                default:
-                       return afs_protocol_error(call, -EBADMSG);
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_yvl_vlendpt_type);
                }
 
                if (call->count > 1)
-                       size += sizeof(__be32);
-               ret = afs_extract_data(call, call->buffer, size, true);
+                       size += sizeof(__be32); /* Get next type too */
+               afs_extract_to_buf(call, size);
+               call->unmarshall = 4;
+
+               /* Extract volEndpoints[] entries */
+       case 4:
+               ret = afs_extract_data(call, true);
                if (ret < 0)
                        return ret;
 
@@ -559,34 +565,35 @@ again:
                switch (call->count2) {
                case YFS_ENDPOINT_IPV4:
                        if (ntohl(bp[0]) != sizeof(__be32) * 2)
-                               return afs_protocol_error(call, -EBADMSG);
+                               return afs_protocol_error(call, -EBADMSG,
+                                                         afs_eproto_yvl_vlendpt4_len);
                        bp += 3;
                        break;
                case YFS_ENDPOINT_IPV6:
                        if (ntohl(bp[0]) != sizeof(__be32) * 5)
-                               return afs_protocol_error(call, -EBADMSG);
+                               return afs_protocol_error(call, -EBADMSG,
+                                                         afs_eproto_yvl_vlendpt6_len);
                        bp += 6;
                        break;
                default:
-                       return afs_protocol_error(call, -EBADMSG);
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_yvl_vlendpt_type);
                }
 
                /* Got either the type of the next entry or the count of
                 * volEndpoints if no more fsEndpoints.
                 */
-               call->offset = 0;
                call->count--;
-               if (call->count > 0) {
-                       call->count2 = ntohl(*bp++);
-                       goto again;
-               }
+               if (call->count > 0)
+                       goto next_volendpoint;
 
        end:
+               afs_extract_discard(call, 0);
                call->unmarshall = 5;
 
                /* Done */
        case 5:
-               ret = afs_extract_data(call, call->buffer, 0, false);
+               ret = afs_extract_data(call, false);
                if (ret < 0)
                        return ret;
                call->unmarshall = 6;
@@ -596,11 +603,6 @@ again:
        }
 
        alist = call->reply[0];
-
-       /* Start with IPv6 if available. */
-       if (alist->nr_ipv4 < alist->nr_addrs)
-               alist->index = alist->nr_ipv4;
-
        _leave(" = 0 [done]");
        return 0;
 }
@@ -619,12 +621,11 @@ static const struct afs_call_type afs_YFSVLGetEndpoints = {
  * Dispatch an operation to get the addresses for a server, where the server is
  * nominated by UUID.
  */
-struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_net *net,
-                                             struct afs_addr_cursor *ac,
-                                             struct key *key,
+struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_vl_cursor *vc,
                                              const uuid_t *uuid)
 {
        struct afs_call *call;
+       struct afs_net *net = vc->cell->net;
        __be32 *bp;
 
        _enter("");
@@ -635,7 +636,7 @@ struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_net *net,
        if (!call)
                return ERR_PTR(-ENOMEM);
 
-       call->key = key;
+       call->key = vc->key;
        call->reply[0] = NULL;
        call->ret_reply0 = true;
 
@@ -646,5 +647,5 @@ struct afs_addr_list *afs_yfsvl_get_endpoints(struct afs_net *net,
        memcpy(bp, uuid, sizeof(*uuid)); /* Type opr_uuid */
 
        trace_afs_make_vl_call(call);
-       return (struct afs_addr_list *)afs_make_call(ac, call, GFP_KERNEL, false);
+       return (struct afs_addr_list *)afs_make_call(&vc->ac, call, GFP_KERNEL, false);
 }
index 3037bd01f617d13b1589d823cb6bdc112014bdca..00975ed3640f8ae535d7438d2076ef681434f378 100644 (file)
@@ -74,55 +74,19 @@ static struct afs_vldb_entry *afs_vl_lookup_vldb(struct afs_cell *cell,
                                                 const char *volname,
                                                 size_t volnamesz)
 {
-       struct afs_addr_cursor ac;
-       struct afs_vldb_entry *vldb;
+       struct afs_vldb_entry *vldb = ERR_PTR(-EDESTADDRREQ);
+       struct afs_vl_cursor vc;
        int ret;
 
-       ret = afs_set_vl_cursor(&ac, cell);
-       if (ret < 0)
-               return ERR_PTR(ret);
-
-       while (afs_iterate_addresses(&ac)) {
-               if (!test_bit(ac.index, &ac.alist->probed)) {
-                       ret = afs_vl_get_capabilities(cell->net, &ac, key);
-                       switch (ret) {
-                       case VL_SERVICE:
-                               clear_bit(ac.index, &ac.alist->yfs);
-                               set_bit(ac.index, &ac.alist->probed);
-                               ac.addr->srx_service = ret;
-                               break;
-                       case YFS_VL_SERVICE:
-                               set_bit(ac.index, &ac.alist->yfs);
-                               set_bit(ac.index, &ac.alist->probed);
-                               ac.addr->srx_service = ret;
-                               break;
-                       }
-               }
-               
-               vldb = afs_vl_get_entry_by_name_u(cell->net, &ac, key,
-                                                 volname, volnamesz);
-               switch (ac.error) {
-               case 0:
-                       afs_end_cursor(&ac);
-                       return vldb;
-               case -ECONNABORTED:
-                       ac.error = afs_abort_to_error(ac.abort_code);
-                       goto error;
-               case -ENOMEM:
-               case -ENONET:
-                       goto error;
-               case -ENETUNREACH:
-               case -EHOSTUNREACH:
-               case -ECONNREFUSED:
-                       break;
-               default:
-                       ac.error = -EIO;
-                       goto error;
-               }
+       if (!afs_begin_vlserver_operation(&vc, cell, key))
+               return ERR_PTR(-ERESTARTSYS);
+
+       while (afs_select_vlserver(&vc)) {
+               vldb = afs_vl_get_entry_by_name_u(&vc, volname, volnamesz);
        }
 
-error:
-       return ERR_PTR(afs_end_cursor(&ac));
+       ret = afs_end_vlserver_operation(&vc);
+       return ret < 0 ? ERR_PTR(ret) : vldb;
 }
 
 /*
@@ -270,7 +234,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
        /* We look up an ID by passing it as a decimal string in the
         * operation's name parameter.
         */
-       idsz = sprintf(idbuf, "%u", volume->vid);
+       idsz = sprintf(idbuf, "%llu", volume->vid);
 
        vldb = afs_vl_lookup_vldb(volume->cell, key, idbuf, idsz);
        if (IS_ERR(vldb)) {
index 19c04caf3c012bd777d20e31355357154cc38a83..72efcfcf9f95efd2b5cae1257a8d01247367ebeb 100644 (file)
@@ -33,10 +33,21 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
                         loff_t pos, unsigned int len, struct page *page)
 {
        struct afs_read *req;
+       size_t p;
+       void *data;
        int ret;
 
        _enter(",,%llu", (unsigned long long)pos);
 
+       if (pos >= vnode->vfs_inode.i_size) {
+               p = pos & ~PAGE_MASK;
+               ASSERTCMP(p + len, <=, PAGE_SIZE);
+               data = kmap(page);
+               memset(data + p, 0, len);
+               kunmap(page);
+               return 0;
+       }
+
        req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
                      GFP_KERNEL);
        if (!req)
@@ -81,7 +92,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
        pgoff_t index = pos >> PAGE_SHIFT;
        int ret;
 
-       _enter("{%x:%u},{%lx},%u,%u",
+       _enter("{%llx:%llu},{%lx},%u,%u",
               vnode->fid.vid, vnode->fid.vnode, index, from, to);
 
        /* We want to store information about how much of a page is altered in
@@ -181,7 +192,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
        loff_t i_size, maybe_i_size;
        int ret;
 
-       _enter("{%x:%u},{%lx}",
+       _enter("{%llx:%llu},{%lx}",
               vnode->fid.vid, vnode->fid.vnode, page->index);
 
        maybe_i_size = pos + copied;
@@ -230,7 +241,7 @@ static void afs_kill_pages(struct address_space *mapping,
        struct pagevec pv;
        unsigned count, loop;
 
-       _enter("{%x:%u},%lx-%lx",
+       _enter("{%llx:%llu},%lx-%lx",
               vnode->fid.vid, vnode->fid.vnode, first, last);
 
        pagevec_init(&pv);
@@ -272,7 +283,7 @@ static void afs_redirty_pages(struct writeback_control *wbc,
        struct pagevec pv;
        unsigned count, loop;
 
-       _enter("{%x:%u},%lx-%lx",
+       _enter("{%llx:%llu},%lx-%lx",
               vnode->fid.vid, vnode->fid.vnode, first, last);
 
        pagevec_init(&pv);
@@ -314,7 +325,7 @@ static int afs_store_data(struct address_space *mapping,
        struct list_head *p;
        int ret = -ENOKEY, ret2;
 
-       _enter("%s{%x:%u.%u},%lx,%lx,%x,%x",
+       _enter("%s{%llx:%llu.%u},%lx,%lx,%x,%x",
               vnode->volume->name,
               vnode->fid.vid,
               vnode->fid.vnode,
@@ -533,6 +544,7 @@ no_more:
        case -ENOENT:
        case -ENOMEDIUM:
        case -ENXIO:
+               trace_afs_file_error(vnode, ret, afs_file_error_writeback_fail);
                afs_kill_pages(mapping, first, last);
                mapping_set_error(mapping, ret);
                break;
@@ -675,7 +687,7 @@ void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
        unsigned count, loop;
        pgoff_t first = call->first, last = call->last;
 
-       _enter("{%x:%u},{%lx-%lx}",
+       _enter("{%llx:%llu},{%lx-%lx}",
               vnode->fid.vid, vnode->fid.vnode, first, last);
 
        pagevec_init(&pv);
@@ -714,7 +726,7 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
        ssize_t result;
        size_t count = iov_iter_count(from);
 
-       _enter("{%x.%u},{%zu},",
+       _enter("{%llx:%llu},{%zu},",
               vnode->fid.vid, vnode->fid.vnode, count);
 
        if (IS_SWAPFILE(&vnode->vfs_inode)) {
@@ -742,7 +754,7 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
        struct inode *inode = file_inode(file);
        struct afs_vnode *vnode = AFS_FS_I(inode);
 
-       _enter("{%x:%u},{n=%pD},%d",
+       _enter("{%llx:%llu},{n=%pD},%d",
               vnode->fid.vid, vnode->fid.vnode, file,
               datasync);
 
@@ -760,7 +772,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
        struct afs_vnode *vnode = AFS_FS_I(inode);
        unsigned long priv;
 
-       _enter("{{%x:%u}},{%lx}",
+       _enter("{{%llx:%llu}},{%lx}",
               vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
 
        sb_start_pagefault(inode->i_sb);
index cfcc674e64a55bc52be024d873a5b70fa85fafc1..a2cdf25573e2419592b7460aafc7a6da019293a9 100644 (file)
@@ -72,7 +72,7 @@ static int afs_xattr_get_fid(const struct xattr_handler *handler,
        char text[8 + 1 + 8 + 1 + 8 + 1];
        size_t len;
 
-       len = sprintf(text, "%x:%x:%x",
+       len = sprintf(text, "%llx:%llx:%x",
                      vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
        if (size == 0)
                return len;
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
new file mode 100644 (file)
index 0000000..12658c1
--- /dev/null
@@ -0,0 +1,2184 @@
+/* YFS File Server client stubs
+ *
+ * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/circ_buf.h>
+#include <linux/iversion.h>
+#include "internal.h"
+#include "afs_fs.h"
+#include "xdr_fs.h"
+#include "protocol_yfs.h"
+
+static const struct afs_fid afs_zero_fid;
+
+static inline void afs_use_fs_server(struct afs_call *call, struct afs_cb_interest *cbi)
+{
+       call->cbi = afs_get_cb_interest(cbi);
+}
+
+#define xdr_size(x) (sizeof(*x) / sizeof(__be32))
+
+static void xdr_decode_YFSFid(const __be32 **_bp, struct afs_fid *fid)
+{
+       const struct yfs_xdr_YFSFid *x = (const void *)*_bp;
+
+       fid->vid        = xdr_to_u64(x->volume);
+       fid->vnode      = xdr_to_u64(x->vnode.lo);
+       fid->vnode_hi   = ntohl(x->vnode.hi);
+       fid->unique     = ntohl(x->vnode.unique);
+       *_bp += xdr_size(x);
+}
+
+static __be32 *xdr_encode_u32(__be32 *bp, u32 n)
+{
+       *bp++ = htonl(n);
+       return bp;
+}
+
+static __be32 *xdr_encode_u64(__be32 *bp, u64 n)
+{
+       struct yfs_xdr_u64 *x = (void *)bp;
+
+       *x = u64_to_xdr(n);
+       return bp + xdr_size(x);
+}
+
+static __be32 *xdr_encode_YFSFid(__be32 *bp, struct afs_fid *fid)
+{
+       struct yfs_xdr_YFSFid *x = (void *)bp;
+
+       x->volume       = u64_to_xdr(fid->vid);
+       x->vnode.lo     = u64_to_xdr(fid->vnode);
+       x->vnode.hi     = htonl(fid->vnode_hi);
+       x->vnode.unique = htonl(fid->unique);
+       return bp + xdr_size(x);
+}
+
+static size_t xdr_strlen(unsigned int len)
+{
+       return sizeof(__be32) + round_up(len, sizeof(__be32));
+}
+
+static __be32 *xdr_encode_string(__be32 *bp, const char *p, unsigned int len)
+{
+       bp = xdr_encode_u32(bp, len);
+       bp = memcpy(bp, p, len);
+       if (len & 3) {
+               unsigned int pad = 4 - (len & 3);
+
+               memset((u8 *)bp + len, 0, pad);
+               len += pad;
+       }
+
+       return bp + len / sizeof(__be32);
+}
+
+static s64 linux_to_yfs_time(const struct timespec64 *t)
+{
+       /* Convert to 100ns intervals. */
+       return (u64)t->tv_sec * 10000000 + t->tv_nsec/100;
+}
+
+static __be32 *xdr_encode_YFSStoreStatus_mode(__be32 *bp, mode_t mode)
+{
+       struct yfs_xdr_YFSStoreStatus *x = (void *)bp;
+
+       x->mask         = htonl(AFS_SET_MODE);
+       x->mode         = htonl(mode & S_IALLUGO);
+       x->mtime_client = u64_to_xdr(0);
+       x->owner        = u64_to_xdr(0);
+       x->group        = u64_to_xdr(0);
+       return bp + xdr_size(x);
+}
+
+static __be32 *xdr_encode_YFSStoreStatus_mtime(__be32 *bp, const struct timespec64 *t)
+{
+       struct yfs_xdr_YFSStoreStatus *x = (void *)bp;
+       s64 mtime = linux_to_yfs_time(t);
+
+       x->mask         = htonl(AFS_SET_MTIME);
+       x->mode         = htonl(0);
+       x->mtime_client = u64_to_xdr(mtime);
+       x->owner        = u64_to_xdr(0);
+       x->group        = u64_to_xdr(0);
+       return bp + xdr_size(x);
+}
+
+/*
+ * Convert a signed 100ns-resolution 64-bit time into a timespec.
+ */
+static struct timespec64 yfs_time_to_linux(s64 t)
+{
+       struct timespec64 ts;
+       u64 abs_t;
+
+       /*
+        * Unfortunately can not use normal 64 bit division on 32 bit arch, but
+        * the alternative, do_div, does not work with negative numbers so have
+        * to special case them
+        */
+       if (t < 0) {
+               abs_t = -t;
+               ts.tv_nsec = (time64_t)(do_div(abs_t, 10000000) * 100);
+               ts.tv_nsec = -ts.tv_nsec;
+               ts.tv_sec = -abs_t;
+       } else {
+               abs_t = t;
+               ts.tv_nsec = (time64_t)do_div(abs_t, 10000000) * 100;
+               ts.tv_sec = abs_t;
+       }
+
+       return ts;
+}
+
+static struct timespec64 xdr_to_time(const struct yfs_xdr_u64 xdr)
+{
+       s64 t = xdr_to_u64(xdr);
+
+       return yfs_time_to_linux(t);
+}
+
+static void yfs_check_req(struct afs_call *call, __be32 *bp)
+{
+       size_t len = (void *)bp - call->request;
+
+       if (len > call->request_size)
+               pr_err("kAFS: %s: Request buffer overflow (%zu>%u)\n",
+                      call->type->name, len, call->request_size);
+       else if (len < call->request_size)
+               pr_warning("kAFS: %s: Request buffer underflow (%zu<%u)\n",
+                          call->type->name, len, call->request_size);
+}
+
+/*
+ * Dump a bad file status record.
+ */
+static void xdr_dump_bad(const __be32 *bp)
+{
+       __be32 x[4];
+       int i;
+
+       pr_notice("YFS XDR: Bad status record\n");
+       for (i = 0; i < 5 * 4 * 4; i += 16) {
+               memcpy(x, bp, 16);
+               bp += 4;
+               pr_notice("%03x: %08x %08x %08x %08x\n",
+                         i, ntohl(x[0]), ntohl(x[1]), ntohl(x[2]), ntohl(x[3]));
+       }
+
+       memcpy(x, bp, 4);
+       pr_notice("0x50: %08x\n", ntohl(x[0]));
+}
+
+/*
+ * Decode a YFSFetchStatus block
+ */
+static int xdr_decode_YFSFetchStatus(struct afs_call *call,
+                                    const __be32 **_bp,
+                                    struct afs_file_status *status,
+                                    struct afs_vnode *vnode,
+                                    const afs_dataversion_t *expected_version,
+                                    struct afs_read *read_req)
+{
+       const struct yfs_xdr_YFSFetchStatus *xdr = (const void *)*_bp;
+       u32 type;
+       u8 flags = 0;
+
+       status->abort_code = ntohl(xdr->abort_code);
+       if (status->abort_code != 0) {
+               if (vnode && status->abort_code == VNOVNODE) {
+                       set_bit(AFS_VNODE_DELETED, &vnode->flags);
+                       status->nlink = 0;
+                       __afs_break_callback(vnode);
+               }
+               return 0;
+       }
+
+       type = ntohl(xdr->type);
+       switch (type) {
+       case AFS_FTYPE_FILE:
+       case AFS_FTYPE_DIR:
+       case AFS_FTYPE_SYMLINK:
+               if (type != status->type &&
+                   vnode &&
+                   !test_bit(AFS_VNODE_UNSET, &vnode->flags)) {
+                       pr_warning("Vnode %llx:%llx:%x changed type %u to %u\n",
+                                  vnode->fid.vid,
+                                  vnode->fid.vnode,
+                                  vnode->fid.unique,
+                                  status->type, type);
+                       goto bad;
+               }
+               status->type = type;
+               break;
+       default:
+               goto bad;
+       }
+
+#define EXTRACT_M4(FIELD)                                      \
+       do {                                                    \
+               u32 x = ntohl(xdr->FIELD);                      \
+               if (status->FIELD != x) {                       \
+                       flags |= AFS_VNODE_META_CHANGED;        \
+                       status->FIELD = x;                      \
+               }                                               \
+       } while (0)
+
+#define EXTRACT_M8(FIELD)                                      \
+       do {                                                    \
+               u64 x = xdr_to_u64(xdr->FIELD);                 \
+               if (status->FIELD != x) {                       \
+                       flags |= AFS_VNODE_META_CHANGED;        \
+                       status->FIELD = x;                      \
+               }                                               \
+       } while (0)
+
+#define EXTRACT_D8(FIELD)                                      \
+       do {                                                    \
+               u64 x = xdr_to_u64(xdr->FIELD);                 \
+               if (status->FIELD != x) {                       \
+                       flags |= AFS_VNODE_DATA_CHANGED;        \
+                       status->FIELD = x;                      \
+               }                                               \
+       } while (0)
+
+       EXTRACT_M4(nlink);
+       EXTRACT_D8(size);
+       EXTRACT_D8(data_version);
+       EXTRACT_M8(author);
+       EXTRACT_M8(owner);
+       EXTRACT_M8(group);
+       EXTRACT_M4(mode);
+       EXTRACT_M4(caller_access); /* call ticket dependent */
+       EXTRACT_M4(anon_access);
+
+       status->mtime_client = xdr_to_time(xdr->mtime_client);
+       status->mtime_server = xdr_to_time(xdr->mtime_server);
+       status->lock_count   = ntohl(xdr->lock_count);
+
+       if (read_req) {
+               read_req->data_version = status->data_version;
+               read_req->file_size = status->size;
+       }
+
+       *_bp += xdr_size(xdr);
+
+       if (vnode) {
+               if (test_bit(AFS_VNODE_UNSET, &vnode->flags))
+                       flags |= AFS_VNODE_NOT_YET_SET;
+               afs_update_inode_from_status(vnode, status, expected_version,
+                                            flags);
+       }
+
+       return 0;
+
+bad:
+       xdr_dump_bad(*_bp);
+       return afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
+}
+
+/*
+ * Decode the file status.  We need to lock the target vnode if we're going to
+ * update its status so that stat() sees the attributes update atomically.
+ */
+static int yfs_decode_status(struct afs_call *call,
+                            const __be32 **_bp,
+                            struct afs_file_status *status,
+                            struct afs_vnode *vnode,
+                            const afs_dataversion_t *expected_version,
+                            struct afs_read *read_req)
+{
+       int ret;
+
+       if (!vnode)
+               return xdr_decode_YFSFetchStatus(call, _bp, status, vnode,
+                                                expected_version, read_req);
+
+       write_seqlock(&vnode->cb_lock);
+       ret = xdr_decode_YFSFetchStatus(call, _bp, status, vnode,
+                                       expected_version, read_req);
+       write_sequnlock(&vnode->cb_lock);
+       return ret;
+}
+
+/*
+ * Decode a YFSCallBack block
+ */
+static void xdr_decode_YFSCallBack(struct afs_call *call,
+                                  struct afs_vnode *vnode,
+                                  const __be32 **_bp)
+{
+       struct yfs_xdr_YFSCallBack *xdr = (void *)*_bp;
+       struct afs_cb_interest *old, *cbi = call->cbi;
+       u64 cb_expiry;
+
+       write_seqlock(&vnode->cb_lock);
+
+       if (!afs_cb_is_broken(call->cb_break, vnode, cbi)) {
+               cb_expiry = xdr_to_u64(xdr->expiration_time);
+               do_div(cb_expiry, 10 * 1000 * 1000);
+               vnode->cb_version       = ntohl(xdr->version);
+               vnode->cb_type          = ntohl(xdr->type);
+               vnode->cb_expires_at    = cb_expiry + ktime_get_real_seconds();
+               old = vnode->cb_interest;
+               if (old != call->cbi) {
+                       vnode->cb_interest = cbi;
+                       cbi = old;
+               }
+               set_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+       }
+
+       write_sequnlock(&vnode->cb_lock);
+       call->cbi = cbi;
+       *_bp += xdr_size(xdr);
+}
+
+static void xdr_decode_YFSCallBack_raw(const __be32 **_bp,
+                                      struct afs_callback *cb)
+{
+       struct yfs_xdr_YFSCallBack *x = (void *)*_bp;
+       u64 cb_expiry;
+
+       cb_expiry = xdr_to_u64(x->expiration_time);
+       do_div(cb_expiry, 10 * 1000 * 1000);
+       cb->version     = ntohl(x->version);
+       cb->type        = ntohl(x->type);
+       cb->expires_at  = cb_expiry + ktime_get_real_seconds();
+
+       *_bp += xdr_size(x);
+}
+
+/*
+ * Decode a YFSVolSync block
+ */
+static void xdr_decode_YFSVolSync(const __be32 **_bp,
+                                 struct afs_volsync *volsync)
+{
+       struct yfs_xdr_YFSVolSync *x = (void *)*_bp;
+       u64 creation;
+
+       if (volsync) {
+               creation = xdr_to_u64(x->vol_creation_date);
+               do_div(creation, 10 * 1000 * 1000);
+               volsync->creation = creation;
+       }
+
+       *_bp += xdr_size(x);
+}
+
+/*
+ * Encode the requested attributes into a YFSStoreStatus block
+ */
+static __be32 *xdr_encode_YFS_StoreStatus(__be32 *bp, struct iattr *attr)
+{
+       struct yfs_xdr_YFSStoreStatus *x = (void *)bp;
+       s64 mtime = 0, owner = 0, group = 0;
+       u32 mask = 0, mode = 0;
+
+       mask = 0;
+       if (attr->ia_valid & ATTR_MTIME) {
+               mask |= AFS_SET_MTIME;
+               mtime = linux_to_yfs_time(&attr->ia_mtime);
+       }
+
+       if (attr->ia_valid & ATTR_UID) {
+               mask |= AFS_SET_OWNER;
+               owner = from_kuid(&init_user_ns, attr->ia_uid);
+       }
+
+       if (attr->ia_valid & ATTR_GID) {
+               mask |= AFS_SET_GROUP;
+               group = from_kgid(&init_user_ns, attr->ia_gid);
+       }
+
+       if (attr->ia_valid & ATTR_MODE) {
+               mask |= AFS_SET_MODE;
+               mode = attr->ia_mode & S_IALLUGO;
+       }
+
+       x->mask         = htonl(mask);
+       x->mode         = htonl(mode);
+       x->mtime_client = u64_to_xdr(mtime);
+       x->owner        = u64_to_xdr(owner);
+       x->group        = u64_to_xdr(group);
+       return bp + xdr_size(x);
+}
+
+/*
+ * Decode a YFSFetchVolumeStatus block.
+ */
+static void xdr_decode_YFSFetchVolumeStatus(const __be32 **_bp,
+                                           struct afs_volume_status *vs)
+{
+       const struct yfs_xdr_YFSFetchVolumeStatus *x = (const void *)*_bp;
+       u32 flags;
+
+       vs->vid                 = xdr_to_u64(x->vid);
+       vs->parent_id           = xdr_to_u64(x->parent_id);
+       flags                   = ntohl(x->flags);
+       vs->online              = flags & yfs_FVSOnline;
+       vs->in_service          = flags & yfs_FVSInservice;
+       vs->blessed             = flags & yfs_FVSBlessed;
+       vs->needs_salvage       = flags & yfs_FVSNeedsSalvage;
+       vs->type                = ntohl(x->type);
+       vs->min_quota           = 0;
+       vs->max_quota           = xdr_to_u64(x->max_quota);
+       vs->blocks_in_use       = xdr_to_u64(x->blocks_in_use);
+       vs->part_blocks_avail   = xdr_to_u64(x->part_blocks_avail);
+       vs->part_max_blocks     = xdr_to_u64(x->part_max_blocks);
+       vs->vol_copy_date       = xdr_to_u64(x->vol_copy_date);
+       vs->vol_backup_date     = xdr_to_u64(x->vol_backup_date);
+       *_bp += sizeof(*x) / sizeof(__be32);
+}
+
+/*
+ * deliver reply data to an FS.FetchStatus
+ */
+static int yfs_deliver_fs_fetch_status_vnode(struct afs_call *call)
+{
+       struct afs_vnode *vnode = call->reply[0];
+       const __be32 *bp;
+       int ret;
+
+       ret = afs_transfer_reply(call);
+       if (ret < 0)
+               return ret;
+
+       _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
+       xdr_decode_YFSCallBack(call, vnode, &bp);
+       xdr_decode_YFSVolSync(&bp, call->reply[1]);
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * YFS.FetchStatus operation type
+ */
+static const struct afs_call_type yfs_RXYFSFetchStatus_vnode = {
+       .name           = "YFS.FetchStatus(vnode)",
+       .op             = yfs_FS_FetchStatus,
+       .deliver        = yfs_deliver_fs_fetch_status_vnode,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * Fetch the status information for a file.
+ */
+int yfs_fs_fetch_file_status(struct afs_fs_cursor *fc, struct afs_volsync *volsync,
+                            bool new_inode)
+{
+       struct afs_vnode *vnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(vnode);
+       __be32 *bp;
+
+       _enter(",%x,{%llx:%llu},,",
+              key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+
+       call = afs_alloc_flat_call(net, &yfs_RXYFSFetchStatus_vnode,
+                                  sizeof(__be32) * 2 +
+                                  sizeof(struct yfs_xdr_YFSFid),
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSCallBack) +
+                                  sizeof(struct yfs_xdr_YFSVolSync));
+       if (!call) {
+               fc->ac.error = -ENOMEM;
+               return -ENOMEM;
+       }
+
+       call->key = fc->key;
+       call->reply[0] = vnode;
+       call->reply[1] = volsync;
+       call->expected_version = new_inode ? 1 : vnode->status.data_version;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSFETCHSTATUS);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, &vnode->fid);
+       yfs_check_req(call, bp);
+
+       call->cb_break = fc->cb_break;
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &vnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to an YFS.FetchData64.
+ */
+static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
+{
+       struct afs_vnode *vnode = call->reply[0];
+       struct afs_read *req = call->reply[2];
+       const __be32 *bp;
+       unsigned int size;
+       int ret;
+
+       _enter("{%u,%zu/%llu}",
+              call->unmarshall, iov_iter_count(&call->iter), req->actual_len);
+
+       switch (call->unmarshall) {
+       case 0:
+               req->actual_len = 0;
+               req->index = 0;
+               req->offset = req->pos & (PAGE_SIZE - 1);
+               afs_extract_to_tmp64(call);
+               call->unmarshall++;
+
+               /* extract the returned data length */
+       case 1:
+               _debug("extract data length");
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
+
+               req->actual_len = be64_to_cpu(call->tmp64);
+               _debug("DATA length: %llu", req->actual_len);
+               req->remain = min(req->len, req->actual_len);
+               if (req->remain == 0)
+                       goto no_more_data;
+
+               call->unmarshall++;
+
+       begin_page:
+               ASSERTCMP(req->index, <, req->nr_pages);
+               if (req->remain > PAGE_SIZE - req->offset)
+                       size = PAGE_SIZE - req->offset;
+               else
+                       size = req->remain;
+               call->bvec[0].bv_len = size;
+               call->bvec[0].bv_offset = req->offset;
+               call->bvec[0].bv_page = req->pages[req->index];
+               iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
+               ASSERTCMP(size, <=, PAGE_SIZE);
+
+               /* extract the returned data */
+       case 2:
+               _debug("extract data %zu/%llu",
+                      iov_iter_count(&call->iter), req->remain);
+
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
+               req->remain -= call->bvec[0].bv_len;
+               req->offset += call->bvec[0].bv_len;
+               ASSERTCMP(req->offset, <=, PAGE_SIZE);
+               if (req->offset == PAGE_SIZE) {
+                       req->offset = 0;
+                       if (req->page_done)
+                               req->page_done(call, req);
+                       req->index++;
+                       if (req->remain > 0)
+                               goto begin_page;
+               }
+
+               ASSERTCMP(req->remain, ==, 0);
+               if (req->actual_len <= req->len)
+                       goto no_more_data;
+
+               /* Discard any excess data the server gave us */
+               iov_iter_discard(&call->iter, READ, req->actual_len - req->len);
+               call->unmarshall = 3;
+       case 3:
+               _debug("extract discard %zu/%llu",
+                      iov_iter_count(&call->iter), req->actual_len - req->len);
+
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
+
+       no_more_data:
+               call->unmarshall = 4;
+               afs_extract_to_buf(call,
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSCallBack) +
+                                  sizeof(struct yfs_xdr_YFSVolSync));
+
+               /* extract the metadata */
+       case 4:
+               ret = afs_extract_data(call, false);
+               if (ret < 0)
+                       return ret;
+
+               bp = call->buffer;
+               ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
+                                       &vnode->status.data_version, req);
+               if (ret < 0)
+                       return ret;
+               xdr_decode_YFSCallBack(call, vnode, &bp);
+               xdr_decode_YFSVolSync(&bp, call->reply[1]);
+
+               call->unmarshall++;
+
+       case 5:
+               break;
+       }
+
+       for (; req->index < req->nr_pages; req->index++) {
+               if (req->offset < PAGE_SIZE)
+                       zero_user_segment(req->pages[req->index],
+                                         req->offset, PAGE_SIZE);
+               if (req->page_done)
+                       req->page_done(call, req);
+               req->offset = 0;
+       }
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+static void yfs_fetch_data_destructor(struct afs_call *call)
+{
+       struct afs_read *req = call->reply[2];
+
+       afs_put_read(req);
+       afs_flat_call_destructor(call);
+}
+
+/*
+ * YFS.FetchData64 operation type
+ */
+static const struct afs_call_type yfs_RXYFSFetchData64 = {
+       .name           = "YFS.FetchData64",
+       .op             = yfs_FS_FetchData64,
+       .deliver        = yfs_deliver_fs_fetch_data64,
+       .destructor     = yfs_fetch_data_destructor,
+};
+
+/*
+ * Fetch data from a file.
+ */
+int yfs_fs_fetch_data(struct afs_fs_cursor *fc, struct afs_read *req)
+{
+       struct afs_vnode *vnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(vnode);
+       __be32 *bp;
+
+       _enter(",%x,{%llx:%llu},%llx,%llx",
+              key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode,
+              req->pos, req->len);
+
+       call = afs_alloc_flat_call(net, &yfs_RXYFSFetchData64,
+                                  sizeof(__be32) * 2 +
+                                  sizeof(struct yfs_xdr_YFSFid) +
+                                  sizeof(struct yfs_xdr_u64) * 2,
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSCallBack) +
+                                  sizeof(struct yfs_xdr_YFSVolSync));
+       if (!call)
+               return -ENOMEM;
+
+       call->key = fc->key;
+       call->reply[0] = vnode;
+       call->reply[1] = NULL; /* volsync */
+       call->reply[2] = req;
+       call->expected_version = vnode->status.data_version;
+       call->want_reply_time = true;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSFETCHDATA64);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, &vnode->fid);
+       bp = xdr_encode_u64(bp, req->pos);
+       bp = xdr_encode_u64(bp, req->len);
+       yfs_check_req(call, bp);
+
+       refcount_inc(&req->usage);
+       call->cb_break = fc->cb_break;
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &vnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data for YFS.CreateFile or YFS.MakeDir.
+ */
+static int yfs_deliver_fs_create_vnode(struct afs_call *call)
+{
+       struct afs_vnode *vnode = call->reply[0];
+       const __be32 *bp;
+       int ret;
+
+       _enter("{%u}", call->unmarshall);
+
+       ret = afs_transfer_reply(call);
+       if (ret < 0)
+               return ret;
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       xdr_decode_YFSFid(&bp, call->reply[1]);
+       ret = yfs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL);
+       if (ret < 0)
+               return ret;
+       ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
+       xdr_decode_YFSCallBack_raw(&bp, call->reply[3]);
+       xdr_decode_YFSVolSync(&bp, NULL);
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * FS.CreateFile and FS.MakeDir operation type
+ */
+static const struct afs_call_type afs_RXFSCreateFile = {
+       .name           = "YFS.CreateFile",
+       .op             = yfs_FS_CreateFile,
+       .deliver        = yfs_deliver_fs_create_vnode,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * Create a file.
+ */
+int yfs_fs_create_file(struct afs_fs_cursor *fc,
+                      const char *name,
+                      umode_t mode,
+                      u64 current_data_version,
+                      struct afs_fid *newfid,
+                      struct afs_file_status *newstatus,
+                      struct afs_callback *newcb)
+{
+       struct afs_vnode *vnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(vnode);
+       size_t namesz, reqsz, rplsz;
+       __be32 *bp;
+
+       _enter("");
+
+       namesz = strlen(name);
+       reqsz = (sizeof(__be32) +
+                sizeof(__be32) +
+                sizeof(struct yfs_xdr_YFSFid) +
+                xdr_strlen(namesz) +
+                sizeof(struct yfs_xdr_YFSStoreStatus) +
+                sizeof(__be32));
+       rplsz = (sizeof(struct yfs_xdr_YFSFid) +
+                sizeof(struct yfs_xdr_YFSFetchStatus) +
+                sizeof(struct yfs_xdr_YFSFetchStatus) +
+                sizeof(struct yfs_xdr_YFSCallBack) +
+                sizeof(struct yfs_xdr_YFSVolSync));
+
+       call = afs_alloc_flat_call(net, &afs_RXFSCreateFile, reqsz, rplsz);
+       if (!call)
+               return -ENOMEM;
+
+       call->key = fc->key;
+       call->reply[0] = vnode;
+       call->reply[1] = newfid;
+       call->reply[2] = newstatus;
+       call->reply[3] = newcb;
+       call->expected_version = current_data_version + 1;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSCREATEFILE);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, &vnode->fid);
+       bp = xdr_encode_string(bp, name, namesz);
+       bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
+       bp = xdr_encode_u32(bp, 0); /* ViceLockType */
+       yfs_check_req(call, bp);
+
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &vnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+static const struct afs_call_type yfs_RXFSMakeDir = {
+       .name           = "YFS.MakeDir",
+       .op             = yfs_FS_MakeDir,
+       .deliver        = yfs_deliver_fs_create_vnode,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * Make a directory.
+ */
+int yfs_fs_make_dir(struct afs_fs_cursor *fc,
+                   const char *name,
+                   umode_t mode,
+                   u64 current_data_version,
+                   struct afs_fid *newfid,
+                   struct afs_file_status *newstatus,
+                   struct afs_callback *newcb)
+{
+       struct afs_vnode *vnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(vnode);
+       size_t namesz, reqsz, rplsz;
+       __be32 *bp;
+
+       _enter("");
+
+       namesz = strlen(name);
+       reqsz = (sizeof(__be32) +
+                sizeof(struct yfs_xdr_RPCFlags) +
+                sizeof(struct yfs_xdr_YFSFid) +
+                xdr_strlen(namesz) +
+                sizeof(struct yfs_xdr_YFSStoreStatus));
+       rplsz = (sizeof(struct yfs_xdr_YFSFid) +
+                sizeof(struct yfs_xdr_YFSFetchStatus) +
+                sizeof(struct yfs_xdr_YFSFetchStatus) +
+                sizeof(struct yfs_xdr_YFSCallBack) +
+                sizeof(struct yfs_xdr_YFSVolSync));
+
+       call = afs_alloc_flat_call(net, &yfs_RXFSMakeDir, reqsz, rplsz);
+       if (!call)
+               return -ENOMEM;
+
+       call->key = fc->key;
+       call->reply[0] = vnode;
+       call->reply[1] = newfid;
+       call->reply[2] = newstatus;
+       call->reply[3] = newcb;
+       call->expected_version = current_data_version + 1;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSMAKEDIR);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, &vnode->fid);
+       bp = xdr_encode_string(bp, name, namesz);
+       bp = xdr_encode_YFSStoreStatus_mode(bp, mode);
+       yfs_check_req(call, bp);
+
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &vnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to a YFS.RemoveFile2 operation.
+ */
+static int yfs_deliver_fs_remove_file2(struct afs_call *call)
+{
+       struct afs_vnode *dvnode = call->reply[0];
+       struct afs_vnode *vnode = call->reply[1];
+       struct afs_fid fid;
+       const __be32 *bp;
+       int ret;
+
+       _enter("{%u}", call->unmarshall);
+
+       ret = afs_transfer_reply(call);
+       if (ret < 0)
+               return ret;
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       ret = yfs_decode_status(call, &bp, &dvnode->status, dvnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
+
+       xdr_decode_YFSFid(&bp, &fid);
+       ret = yfs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL);
+       if (ret < 0)
+               return ret;
+       /* Was deleted if vnode->status.abort_code == VNOVNODE. */
+
+       xdr_decode_YFSVolSync(&bp, NULL);
+       return 0;
+}
+
+/*
+ * YFS.RemoveFile2 operation type.
+ */
+static const struct afs_call_type yfs_RXYFSRemoveFile2 = {
+       .name           = "YFS.RemoveFile2",
+       .op             = yfs_FS_RemoveFile2,
+       .deliver        = yfs_deliver_fs_remove_file2,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * Remove a file and retrieve new file status.
+ */
+int yfs_fs_remove_file2(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
+                       const char *name, u64 current_data_version)
+{
+       struct afs_vnode *dvnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(dvnode);
+       size_t namesz;
+       __be32 *bp;
+
+       _enter("");
+
+       namesz = strlen(name);
+
+       call = afs_alloc_flat_call(net, &yfs_RXYFSRemoveFile2,
+                                  sizeof(__be32) +
+                                  sizeof(struct yfs_xdr_RPCFlags) +
+                                  sizeof(struct yfs_xdr_YFSFid) +
+                                  xdr_strlen(namesz),
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSFid) +
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSVolSync));
+       if (!call)
+               return -ENOMEM;
+
+       call->key = fc->key;
+       call->reply[0] = dvnode;
+       call->reply[1] = vnode;
+       call->expected_version = current_data_version + 1;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSREMOVEFILE2);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, &dvnode->fid);
+       bp = xdr_encode_string(bp, name, namesz);
+       yfs_check_req(call, bp);
+
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &dvnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to a YFS.RemoveFile or YFS.RemoveDir operation.
+ */
+static int yfs_deliver_fs_remove(struct afs_call *call)
+{
+       struct afs_vnode *dvnode = call->reply[0];
+       const __be32 *bp;
+       int ret;
+
+       _enter("{%u}", call->unmarshall);
+
+       ret = afs_transfer_reply(call);
+       if (ret < 0)
+               return ret;
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       ret = yfs_decode_status(call, &bp, &dvnode->status, dvnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
+
+       xdr_decode_YFSVolSync(&bp, NULL);
+       return 0;
+}
+
+/*
+ * FS.RemoveDir and FS.RemoveFile operation types.
+ */
+static const struct afs_call_type yfs_RXYFSRemoveFile = {
+       .name           = "YFS.RemoveFile",
+       .op             = yfs_FS_RemoveFile,
+       .deliver        = yfs_deliver_fs_remove,
+       .destructor     = afs_flat_call_destructor,
+};
+
+static const struct afs_call_type yfs_RXYFSRemoveDir = {
+       .name           = "YFS.RemoveDir",
+       .op             = yfs_FS_RemoveDir,
+       .deliver        = yfs_deliver_fs_remove,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * remove a file or directory
+ */
+int yfs_fs_remove(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
+                 const char *name, bool isdir, u64 current_data_version)
+{
+       struct afs_vnode *dvnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(dvnode);
+       size_t namesz;
+       __be32 *bp;
+
+       _enter("");
+
+       namesz = strlen(name);
+       call = afs_alloc_flat_call(
+               net, isdir ? &yfs_RXYFSRemoveDir : &yfs_RXYFSRemoveFile,
+               sizeof(__be32) +
+               sizeof(struct yfs_xdr_RPCFlags) +
+               sizeof(struct yfs_xdr_YFSFid) +
+               xdr_strlen(namesz),
+               sizeof(struct yfs_xdr_YFSFetchStatus) +
+               sizeof(struct yfs_xdr_YFSVolSync));
+       if (!call)
+               return -ENOMEM;
+
+       call->key = fc->key;
+       call->reply[0] = dvnode;
+       call->reply[1] = vnode;
+       call->expected_version = current_data_version + 1;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, isdir ? YFSREMOVEDIR : YFSREMOVEFILE);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, &dvnode->fid);
+       bp = xdr_encode_string(bp, name, namesz);
+       yfs_check_req(call, bp);
+
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &dvnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to a YFS.Link operation.
+ */
+static int yfs_deliver_fs_link(struct afs_call *call)
+{
+       struct afs_vnode *dvnode = call->reply[0], *vnode = call->reply[1];
+       const __be32 *bp;
+       int ret;
+
+       _enter("{%u}", call->unmarshall);
+
+       ret = afs_transfer_reply(call);
+       if (ret < 0)
+               return ret;
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       ret = yfs_decode_status(call, &bp, &vnode->status, vnode, NULL, NULL);
+       if (ret < 0)
+               return ret;
+       ret = yfs_decode_status(call, &bp, &dvnode->status, dvnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
+       xdr_decode_YFSVolSync(&bp, NULL);
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * YFS.Link operation type.
+ */
+static const struct afs_call_type yfs_RXYFSLink = {
+       .name           = "YFS.Link",
+       .op             = yfs_FS_Link,
+       .deliver        = yfs_deliver_fs_link,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * Make a hard link.
+ */
+int yfs_fs_link(struct afs_fs_cursor *fc, struct afs_vnode *vnode,
+               const char *name, u64 current_data_version)
+{
+       struct afs_vnode *dvnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(vnode);
+       size_t namesz;
+       __be32 *bp;
+
+       _enter("");
+
+       namesz = strlen(name);
+       call = afs_alloc_flat_call(net, &yfs_RXYFSLink,
+                                  sizeof(__be32) +
+                                  sizeof(struct yfs_xdr_RPCFlags) +
+                                  sizeof(struct yfs_xdr_YFSFid) +
+                                  xdr_strlen(namesz) +
+                                  sizeof(struct yfs_xdr_YFSFid),
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSVolSync));
+       if (!call)
+               return -ENOMEM;
+
+       call->key = fc->key;
+       call->reply[0] = dvnode;
+       call->reply[1] = vnode;
+       call->expected_version = current_data_version + 1;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSLINK);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, &dvnode->fid);
+       bp = xdr_encode_string(bp, name, namesz);
+       bp = xdr_encode_YFSFid(bp, &vnode->fid);
+       yfs_check_req(call, bp);
+
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &vnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to a YFS.Symlink operation.
+ */
+static int yfs_deliver_fs_symlink(struct afs_call *call)
+{
+       struct afs_vnode *vnode = call->reply[0];
+       const __be32 *bp;
+       int ret;
+
+       _enter("{%u}", call->unmarshall);
+
+       ret = afs_transfer_reply(call);
+       if (ret < 0)
+               return ret;
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       xdr_decode_YFSFid(&bp, call->reply[1]);
+       ret = yfs_decode_status(call, &bp, call->reply[2], NULL, NULL, NULL);
+       if (ret < 0)
+               return ret;
+       ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
+       xdr_decode_YFSVolSync(&bp, NULL);
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * YFS.Symlink operation type
+ */
+static const struct afs_call_type yfs_RXYFSSymlink = {
+       .name           = "YFS.Symlink",
+       .op             = yfs_FS_Symlink,
+       .deliver        = yfs_deliver_fs_symlink,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * Create a symbolic link.
+ */
+int yfs_fs_symlink(struct afs_fs_cursor *fc,
+                  const char *name,
+                  const char *contents,
+                  u64 current_data_version,
+                  struct afs_fid *newfid,
+                  struct afs_file_status *newstatus)
+{
+       struct afs_vnode *dvnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(dvnode);
+       size_t namesz, contents_sz;
+       __be32 *bp;
+
+       _enter("");
+
+       namesz = strlen(name);
+       contents_sz = strlen(contents);
+       call = afs_alloc_flat_call(net, &yfs_RXYFSSymlink,
+                                  sizeof(__be32) +
+                                  sizeof(struct yfs_xdr_RPCFlags) +
+                                  sizeof(struct yfs_xdr_YFSFid) +
+                                  xdr_strlen(namesz) +
+                                  xdr_strlen(contents_sz) +
+                                  sizeof(struct yfs_xdr_YFSStoreStatus),
+                                  sizeof(struct yfs_xdr_YFSFid) +
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSVolSync));
+       if (!call)
+               return -ENOMEM;
+
+       call->key = fc->key;
+       call->reply[0] = dvnode;
+       call->reply[1] = newfid;
+       call->reply[2] = newstatus;
+       call->expected_version = current_data_version + 1;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSSYMLINK);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, &dvnode->fid);
+       bp = xdr_encode_string(bp, name, namesz);
+       bp = xdr_encode_string(bp, contents, contents_sz);
+       bp = xdr_encode_YFSStoreStatus_mode(bp, S_IRWXUGO);
+       yfs_check_req(call, bp);
+
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &dvnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to a YFS.Rename operation.
+ */
+static int yfs_deliver_fs_rename(struct afs_call *call)
+{
+       struct afs_vnode *orig_dvnode = call->reply[0];
+       struct afs_vnode *new_dvnode = call->reply[1];
+       const __be32 *bp;
+       int ret;
+
+       _enter("{%u}", call->unmarshall);
+
+       ret = afs_transfer_reply(call);
+       if (ret < 0)
+               return ret;
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       ret = yfs_decode_status(call, &bp, &orig_dvnode->status, orig_dvnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
+       if (new_dvnode != orig_dvnode) {
+               ret = yfs_decode_status(call, &bp, &new_dvnode->status, new_dvnode,
+                                       &call->expected_version_2, NULL);
+               if (ret < 0)
+                       return ret;
+       }
+
+       xdr_decode_YFSVolSync(&bp, NULL);
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * YFS.Rename operation type
+ */
+static const struct afs_call_type yfs_RXYFSRename = {
+       .name           = "FS.Rename",
+       .op             = yfs_FS_Rename,
+       .deliver        = yfs_deliver_fs_rename,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * Rename a file or directory.
+ */
+int yfs_fs_rename(struct afs_fs_cursor *fc,
+                 const char *orig_name,
+                 struct afs_vnode *new_dvnode,
+                 const char *new_name,
+                 u64 current_orig_data_version,
+                 u64 current_new_data_version)
+{
+       struct afs_vnode *orig_dvnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(orig_dvnode);
+       size_t o_namesz, n_namesz;
+       __be32 *bp;
+
+       _enter("");
+
+       o_namesz = strlen(orig_name);
+       n_namesz = strlen(new_name);
+       call = afs_alloc_flat_call(net, &yfs_RXYFSRename,
+                                  sizeof(__be32) +
+                                  sizeof(struct yfs_xdr_RPCFlags) +
+                                  sizeof(struct yfs_xdr_YFSFid) +
+                                  xdr_strlen(o_namesz) +
+                                  sizeof(struct yfs_xdr_YFSFid) +
+                                  xdr_strlen(n_namesz),
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSVolSync));
+       if (!call)
+               return -ENOMEM;
+
+       call->key = fc->key;
+       call->reply[0] = orig_dvnode;
+       call->reply[1] = new_dvnode;
+       call->expected_version = current_orig_data_version + 1;
+       call->expected_version_2 = current_new_data_version + 1;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSRENAME);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, &orig_dvnode->fid);
+       bp = xdr_encode_string(bp, orig_name, o_namesz);
+       bp = xdr_encode_YFSFid(bp, &new_dvnode->fid);
+       bp = xdr_encode_string(bp, new_name, n_namesz);
+       yfs_check_req(call, bp);
+
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &orig_dvnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to a YFS.StoreData64 operation.
+ */
+static int yfs_deliver_fs_store_data(struct afs_call *call)
+{
+       struct afs_vnode *vnode = call->reply[0];
+       const __be32 *bp;
+       int ret;
+
+       _enter("");
+
+       ret = afs_transfer_reply(call);
+       if (ret < 0)
+               return ret;
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
+       xdr_decode_YFSVolSync(&bp, NULL);
+
+       afs_pages_written_back(vnode, call);
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * YFS.StoreData64 operation type.
+ */
+static const struct afs_call_type yfs_RXYFSStoreData64 = {
+       .name           = "YFS.StoreData64",
+       .op             = yfs_FS_StoreData64,
+       .deliver        = yfs_deliver_fs_store_data,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * Store a set of pages to a large file.
+ */
+int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping,
+                     pgoff_t first, pgoff_t last,
+                     unsigned offset, unsigned to)
+{
+       struct afs_vnode *vnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(vnode);
+       loff_t size, pos, i_size;
+       __be32 *bp;
+
+       _enter(",%x,{%llx:%llu},,",
+              key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+
+       size = (loff_t)to - (loff_t)offset;
+       if (first != last)
+               size += (loff_t)(last - first) << PAGE_SHIFT;
+       pos = (loff_t)first << PAGE_SHIFT;
+       pos += offset;
+
+       i_size = i_size_read(&vnode->vfs_inode);
+       if (pos + size > i_size)
+               i_size = size + pos;
+
+       _debug("size %llx, at %llx, i_size %llx",
+              (unsigned long long)size, (unsigned long long)pos,
+              (unsigned long long)i_size);
+
+       call = afs_alloc_flat_call(net, &yfs_RXYFSStoreData64,
+                                  sizeof(__be32) +
+                                  sizeof(__be32) +
+                                  sizeof(struct yfs_xdr_YFSFid) +
+                                  sizeof(struct yfs_xdr_YFSStoreStatus) +
+                                  sizeof(struct yfs_xdr_u64) * 3,
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSVolSync));
+       if (!call)
+               return -ENOMEM;
+
+       call->key = fc->key;
+       call->mapping = mapping;
+       call->reply[0] = vnode;
+       call->first = first;
+       call->last = last;
+       call->first_offset = offset;
+       call->last_to = to;
+       call->send_pages = true;
+       call->expected_version = vnode->status.data_version + 1;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSSTOREDATA64);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, &vnode->fid);
+       bp = xdr_encode_YFSStoreStatus_mtime(bp, &vnode->vfs_inode.i_mtime);
+       bp = xdr_encode_u64(bp, pos);
+       bp = xdr_encode_u64(bp, size);
+       bp = xdr_encode_u64(bp, i_size);
+       yfs_check_req(call, bp);
+
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &vnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * deliver reply data to an FS.StoreStatus
+ */
+static int yfs_deliver_fs_store_status(struct afs_call *call)
+{
+       struct afs_vnode *vnode = call->reply[0];
+       const __be32 *bp;
+       int ret;
+
+       _enter("");
+
+       ret = afs_transfer_reply(call);
+       if (ret < 0)
+               return ret;
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
+       xdr_decode_YFSVolSync(&bp, NULL);
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * YFS.StoreStatus operation type
+ */
+static const struct afs_call_type yfs_RXYFSStoreStatus = {
+       .name           = "YFS.StoreStatus",
+       .op             = yfs_FS_StoreStatus,
+       .deliver        = yfs_deliver_fs_store_status,
+       .destructor     = afs_flat_call_destructor,
+};
+
+static const struct afs_call_type yfs_RXYFSStoreData64_as_Status = {
+       .name           = "YFS.StoreData64",
+       .op             = yfs_FS_StoreData64,
+       .deliver        = yfs_deliver_fs_store_status,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * Set the attributes on a file, using YFS.StoreData64 rather than
+ * YFS.StoreStatus so as to alter the file size also.
+ */
+static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
+{
+       struct afs_vnode *vnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(vnode);
+       __be32 *bp;
+
+       _enter(",%x,{%llx:%llu},,",
+              key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+
+       call = afs_alloc_flat_call(net, &yfs_RXYFSStoreData64_as_Status,
+                                  sizeof(__be32) * 2 +
+                                  sizeof(struct yfs_xdr_YFSFid) +
+                                  sizeof(struct yfs_xdr_YFSStoreStatus) +
+                                  sizeof(struct yfs_xdr_u64) * 3,
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSVolSync));
+       if (!call)
+               return -ENOMEM;
+
+       call->key = fc->key;
+       call->reply[0] = vnode;
+       call->expected_version = vnode->status.data_version + 1;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSSTOREDATA64);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, &vnode->fid);
+       bp = xdr_encode_YFS_StoreStatus(bp, attr);
+       bp = xdr_encode_u64(bp, 0);             /* position of start of write */
+       bp = xdr_encode_u64(bp, 0);             /* size of write */
+       bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */
+       yfs_check_req(call, bp);
+
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &vnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Set the attributes on a file, using YFS.StoreData64 if there's a change in
+ * file size, and YFS.StoreStatus otherwise.
+ */
+int yfs_fs_setattr(struct afs_fs_cursor *fc, struct iattr *attr)
+{
+       struct afs_vnode *vnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(vnode);
+       __be32 *bp;
+
+       if (attr->ia_valid & ATTR_SIZE)
+               return yfs_fs_setattr_size(fc, attr);
+
+       _enter(",%x,{%llx:%llu},,",
+              key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
+
+       call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus,
+                                  sizeof(__be32) * 2 +
+                                  sizeof(struct yfs_xdr_YFSFid) +
+                                  sizeof(struct yfs_xdr_YFSStoreStatus),
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSVolSync));
+       if (!call)
+               return -ENOMEM;
+
+       call->key = fc->key;
+       call->reply[0] = vnode;
+       call->expected_version = vnode->status.data_version;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSSTORESTATUS);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, &vnode->fid);
+       bp = xdr_encode_YFS_StoreStatus(bp, attr);
+       yfs_check_req(call, bp);
+
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &vnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to a YFS.GetVolumeStatus operation.
+ */
+static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
+{
+       const __be32 *bp;
+       char *p;
+       u32 size;
+       int ret;
+
+       _enter("{%u}", call->unmarshall);
+
+       switch (call->unmarshall) {
+       case 0:
+               call->unmarshall++;
+               afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchVolumeStatus));
+
+               /* extract the returned status record */
+       case 1:
+               _debug("extract status");
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
+
+               bp = call->buffer;
+               xdr_decode_YFSFetchVolumeStatus(&bp, call->reply[1]);
+               call->unmarshall++;
+               afs_extract_to_tmp(call);
+
+               /* extract the volume name length */
+       case 2:
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
+
+               call->count = ntohl(call->tmp);
+               _debug("volname length: %u", call->count);
+               if (call->count >= AFSNAMEMAX)
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_volname_len);
+               size = (call->count + 3) & ~3; /* It's padded */
+               afs_extract_begin(call, call->reply[2], size);
+               call->unmarshall++;
+
+               /* extract the volume name */
+       case 3:
+               _debug("extract volname");
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
+
+               p = call->reply[2];
+               p[call->count] = 0;
+               _debug("volname '%s'", p);
+               afs_extract_to_tmp(call);
+               call->unmarshall++;
+
+               /* extract the offline message length */
+       case 4:
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
+
+               call->count = ntohl(call->tmp);
+               _debug("offline msg length: %u", call->count);
+               if (call->count >= AFSNAMEMAX)
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_offline_msg_len);
+               size = (call->count + 3) & ~3; /* It's padded */
+               afs_extract_begin(call, call->reply[2], size);
+               call->unmarshall++;
+
+               /* extract the offline message */
+       case 5:
+               _debug("extract offline");
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
+
+               p = call->reply[2];
+               p[call->count] = 0;
+               _debug("offline '%s'", p);
+
+               afs_extract_to_tmp(call);
+               call->unmarshall++;
+
+               /* extract the message of the day length */
+       case 6:
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
+
+               call->count = ntohl(call->tmp);
+               _debug("motd length: %u", call->count);
+               if (call->count >= AFSNAMEMAX)
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_motd_len);
+               size = (call->count + 3) & ~3; /* It's padded */
+               afs_extract_begin(call, call->reply[2], size);
+               call->unmarshall++;
+
+               /* extract the message of the day */
+       case 7:
+               _debug("extract motd");
+               ret = afs_extract_data(call, false);
+               if (ret < 0)
+                       return ret;
+
+               p = call->reply[2];
+               p[call->count] = 0;
+               _debug("motd '%s'", p);
+
+               call->unmarshall++;
+
+       case 8:
+               break;
+       }
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * Destroy a YFS.GetVolumeStatus call.
+ */
+static void yfs_get_volume_status_call_destructor(struct afs_call *call)
+{
+       kfree(call->reply[2]);
+       call->reply[2] = NULL;
+       afs_flat_call_destructor(call);
+}
+
+/*
+ * YFS.GetVolumeStatus operation type
+ */
+static const struct afs_call_type yfs_RXYFSGetVolumeStatus = {
+       .name           = "YFS.GetVolumeStatus",
+       .op             = yfs_FS_GetVolumeStatus,
+       .deliver        = yfs_deliver_fs_get_volume_status,
+       .destructor     = yfs_get_volume_status_call_destructor,
+};
+
+/*
+ * fetch the status of a volume
+ */
+int yfs_fs_get_volume_status(struct afs_fs_cursor *fc,
+                            struct afs_volume_status *vs)
+{
+       struct afs_vnode *vnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(vnode);
+       __be32 *bp;
+       void *tmpbuf;
+
+       _enter("");
+
+       tmpbuf = kmalloc(AFSOPAQUEMAX, GFP_KERNEL);
+       if (!tmpbuf)
+               return -ENOMEM;
+
+       call = afs_alloc_flat_call(net, &yfs_RXYFSGetVolumeStatus,
+                                  sizeof(__be32) * 2 +
+                                  sizeof(struct yfs_xdr_u64),
+                                  sizeof(struct yfs_xdr_YFSFetchVolumeStatus) +
+                                  sizeof(__be32));
+       if (!call) {
+               kfree(tmpbuf);
+               return -ENOMEM;
+       }
+
+       call->key = fc->key;
+       call->reply[0] = vnode;
+       call->reply[1] = vs;
+       call->reply[2] = tmpbuf;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSGETVOLUMESTATUS);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_u64(bp, vnode->fid.vid);
+       yfs_check_req(call, bp);
+
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &vnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to an YFS.SetLock, YFS.ExtendLock or YFS.ReleaseLock
+ */
+static int yfs_deliver_fs_xxxx_lock(struct afs_call *call)
+{
+       struct afs_vnode *vnode = call->reply[0];
+       const __be32 *bp;
+       int ret;
+
+       _enter("{%u}", call->unmarshall);
+
+       ret = afs_transfer_reply(call);
+       if (ret < 0)
+               return ret;
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       ret = yfs_decode_status(call, &bp, &vnode->status, vnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
+       xdr_decode_YFSVolSync(&bp, NULL);
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * YFS.SetLock operation type
+ */
+static const struct afs_call_type yfs_RXYFSSetLock = {
+       .name           = "YFS.SetLock",
+       .op             = yfs_FS_SetLock,
+       .deliver        = yfs_deliver_fs_xxxx_lock,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * YFS.ExtendLock operation type
+ */
+static const struct afs_call_type yfs_RXYFSExtendLock = {
+       .name           = "YFS.ExtendLock",
+       .op             = yfs_FS_ExtendLock,
+       .deliver        = yfs_deliver_fs_xxxx_lock,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * YFS.ReleaseLock operation type
+ */
+static const struct afs_call_type yfs_RXYFSReleaseLock = {
+       .name           = "YFS.ReleaseLock",
+       .op             = yfs_FS_ReleaseLock,
+       .deliver        = yfs_deliver_fs_xxxx_lock,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * Set a lock on a file
+ */
+int yfs_fs_set_lock(struct afs_fs_cursor *fc, afs_lock_type_t type)
+{
+       struct afs_vnode *vnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(vnode);
+       __be32 *bp;
+
+       _enter("");
+
+       call = afs_alloc_flat_call(net, &yfs_RXYFSSetLock,
+                                  sizeof(__be32) * 2 +
+                                  sizeof(struct yfs_xdr_YFSFid) +
+                                  sizeof(__be32),
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSVolSync));
+       if (!call)
+               return -ENOMEM;
+
+       call->key = fc->key;
+       call->reply[0] = vnode;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSSETLOCK);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, &vnode->fid);
+       bp = xdr_encode_u32(bp, type);
+       yfs_check_req(call, bp);
+
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &vnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * extend a lock on a file
+ */
+int yfs_fs_extend_lock(struct afs_fs_cursor *fc)
+{
+       struct afs_vnode *vnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(vnode);
+       __be32 *bp;
+
+       _enter("");
+
+       call = afs_alloc_flat_call(net, &yfs_RXYFSExtendLock,
+                                  sizeof(__be32) * 2 +
+                                  sizeof(struct yfs_xdr_YFSFid),
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSVolSync));
+       if (!call)
+               return -ENOMEM;
+
+       call->key = fc->key;
+       call->reply[0] = vnode;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSEXTENDLOCK);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, &vnode->fid);
+       yfs_check_req(call, bp);
+
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &vnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * release a lock on a file
+ */
+int yfs_fs_release_lock(struct afs_fs_cursor *fc)
+{
+       struct afs_vnode *vnode = fc->vnode;
+       struct afs_call *call;
+       struct afs_net *net = afs_v2net(vnode);
+       __be32 *bp;
+
+       _enter("");
+
+       call = afs_alloc_flat_call(net, &yfs_RXYFSReleaseLock,
+                                  sizeof(__be32) * 2 +
+                                  sizeof(struct yfs_xdr_YFSFid),
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSVolSync));
+       if (!call)
+               return -ENOMEM;
+
+       call->key = fc->key;
+       call->reply[0] = vnode;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSRELEASELOCK);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, &vnode->fid);
+       yfs_check_req(call, bp);
+
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &vnode->fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to an FS.FetchStatus with no vnode.
+ */
+static int yfs_deliver_fs_fetch_status(struct afs_call *call)
+{
+       struct afs_file_status *status = call->reply[1];
+       struct afs_callback *callback = call->reply[2];
+       struct afs_volsync *volsync = call->reply[3];
+       struct afs_vnode *vnode = call->reply[0];
+       const __be32 *bp;
+       int ret;
+
+       ret = afs_transfer_reply(call);
+       if (ret < 0)
+               return ret;
+
+       _enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
+
+       /* unmarshall the reply once we've received all of it */
+       bp = call->buffer;
+       ret = yfs_decode_status(call, &bp, status, vnode,
+                               &call->expected_version, NULL);
+       if (ret < 0)
+               return ret;
+       xdr_decode_YFSCallBack_raw(&bp, callback);
+       xdr_decode_YFSVolSync(&bp, volsync);
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * YFS.FetchStatus operation type
+ */
+static const struct afs_call_type yfs_RXYFSFetchStatus = {
+       .name           = "YFS.FetchStatus",
+       .op             = yfs_FS_FetchStatus,
+       .deliver        = yfs_deliver_fs_fetch_status,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * Fetch the status information for a fid without needing a vnode handle.
+ */
+int yfs_fs_fetch_status(struct afs_fs_cursor *fc,
+                       struct afs_net *net,
+                       struct afs_fid *fid,
+                       struct afs_file_status *status,
+                       struct afs_callback *callback,
+                       struct afs_volsync *volsync)
+{
+       struct afs_call *call;
+       __be32 *bp;
+
+       _enter(",%x,{%llx:%llu},,",
+              key_serial(fc->key), fid->vid, fid->vnode);
+
+       call = afs_alloc_flat_call(net, &yfs_RXYFSFetchStatus,
+                                  sizeof(__be32) * 2 +
+                                  sizeof(struct yfs_xdr_YFSFid),
+                                  sizeof(struct yfs_xdr_YFSFetchStatus) +
+                                  sizeof(struct yfs_xdr_YFSCallBack) +
+                                  sizeof(struct yfs_xdr_YFSVolSync));
+       if (!call) {
+               fc->ac.error = -ENOMEM;
+               return -ENOMEM;
+       }
+
+       call->key = fc->key;
+       call->reply[0] = NULL; /* vnode for fid[0] */
+       call->reply[1] = status;
+       call->reply[2] = callback;
+       call->reply[3] = volsync;
+       call->expected_version = 1; /* vnode->status.data_version */
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSFETCHSTATUS);
+       bp = xdr_encode_u32(bp, 0); /* RPC flags */
+       bp = xdr_encode_YFSFid(bp, fid);
+       yfs_check_req(call, bp);
+
+       call->cb_break = fc->cb_break;
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, fid);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
+
+/*
+ * Deliver reply data to an YFS.InlineBulkStatus call
+ */
+static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
+{
+       struct afs_file_status *statuses;
+       struct afs_callback *callbacks;
+       struct afs_vnode *vnode = call->reply[0];
+       const __be32 *bp;
+       u32 tmp;
+       int ret;
+
+       _enter("{%u}", call->unmarshall);
+
+       switch (call->unmarshall) {
+       case 0:
+               afs_extract_to_tmp(call);
+               call->unmarshall++;
+
+               /* Extract the file status count and array in two steps */
+       case 1:
+               _debug("extract status count");
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
+
+               tmp = ntohl(call->tmp);
+               _debug("status count: %u/%u", tmp, call->count2);
+               if (tmp != call->count2)
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_ibulkst_count);
+
+               call->count = 0;
+               call->unmarshall++;
+       more_counts:
+               afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchStatus));
+
+       case 2:
+               _debug("extract status array %u", call->count);
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
+
+               bp = call->buffer;
+               statuses = call->reply[1];
+               ret = yfs_decode_status(call, &bp, &statuses[call->count],
+                                       call->count == 0 ? vnode : NULL,
+                                       NULL, NULL);
+               if (ret < 0)
+                       return ret;
+
+               call->count++;
+               if (call->count < call->count2)
+                       goto more_counts;
+
+               call->count = 0;
+               call->unmarshall++;
+               afs_extract_to_tmp(call);
+
+               /* Extract the callback count and array in two steps */
+       case 3:
+               _debug("extract CB count");
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
+
+               tmp = ntohl(call->tmp);
+               _debug("CB count: %u", tmp);
+               if (tmp != call->count2)
+                       return afs_protocol_error(call, -EBADMSG,
+                                                 afs_eproto_ibulkst_cb_count);
+               call->count = 0;
+               call->unmarshall++;
+       more_cbs:
+               afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSCallBack));
+
+       case 4:
+               _debug("extract CB array");
+               ret = afs_extract_data(call, true);
+               if (ret < 0)
+                       return ret;
+
+               _debug("unmarshall CB array");
+               bp = call->buffer;
+               callbacks = call->reply[2];
+               xdr_decode_YFSCallBack_raw(&bp, &callbacks[call->count]);
+               statuses = call->reply[1];
+               if (call->count == 0 && vnode && statuses[0].abort_code == 0) {
+                       bp = call->buffer;
+                       xdr_decode_YFSCallBack(call, vnode, &bp);
+               }
+               call->count++;
+               if (call->count < call->count2)
+                       goto more_cbs;
+
+               afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync));
+               call->unmarshall++;
+
+       case 5:
+               ret = afs_extract_data(call, false);
+               if (ret < 0)
+                       return ret;
+
+               bp = call->buffer;
+               xdr_decode_YFSVolSync(&bp, call->reply[3]);
+
+               call->unmarshall++;
+
+       case 6:
+               break;
+       }
+
+       _leave(" = 0 [done]");
+       return 0;
+}
+
+/*
+ * FS.InlineBulkStatus operation type
+ */
+static const struct afs_call_type yfs_RXYFSInlineBulkStatus = {
+       .name           = "YFS.InlineBulkStatus",
+       .op             = yfs_FS_InlineBulkStatus,
+       .deliver        = yfs_deliver_fs_inline_bulk_status,
+       .destructor     = afs_flat_call_destructor,
+};
+
+/*
+ * Fetch the status information for up to 1024 files
+ */
+int yfs_fs_inline_bulk_status(struct afs_fs_cursor *fc,
+                             struct afs_net *net,
+                             struct afs_fid *fids,
+                             struct afs_file_status *statuses,
+                             struct afs_callback *callbacks,
+                             unsigned int nr_fids,
+                             struct afs_volsync *volsync)
+{
+       struct afs_call *call;
+       __be32 *bp;
+       int i;
+
+       _enter(",%x,{%llx:%llu},%u",
+              key_serial(fc->key), fids[0].vid, fids[1].vnode, nr_fids);
+
+       call = afs_alloc_flat_call(net, &yfs_RXYFSInlineBulkStatus,
+                                  sizeof(__be32) +
+                                  sizeof(__be32) +
+                                  sizeof(__be32) +
+                                  sizeof(struct yfs_xdr_YFSFid) * nr_fids,
+                                  sizeof(struct yfs_xdr_YFSFetchStatus));
+       if (!call) {
+               fc->ac.error = -ENOMEM;
+               return -ENOMEM;
+       }
+
+       call->key = fc->key;
+       call->reply[0] = NULL; /* vnode for fid[0] */
+       call->reply[1] = statuses;
+       call->reply[2] = callbacks;
+       call->reply[3] = volsync;
+       call->count2 = nr_fids;
+
+       /* marshall the parameters */
+       bp = call->request;
+       bp = xdr_encode_u32(bp, YFSINLINEBULKSTATUS);
+       bp = xdr_encode_u32(bp, 0); /* RPCFlags */
+       bp = xdr_encode_u32(bp, nr_fids);
+       for (i = 0; i < nr_fids; i++)
+               bp = xdr_encode_YFSFid(bp, &fids[i]);
+       yfs_check_req(call, bp);
+
+       call->cb_break = fc->cb_break;
+       afs_use_fs_server(call, fc->cbi);
+       trace_afs_make_fs_call(call, &fids[0]);
+       return afs_make_call(&fc->ac, call, GFP_NOFS, false);
+}
index 301e6314183b66756077fed9e37630cc5436ff40..97f9835929256bfd6027d6958e14fb3ec64d34da 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1436,6 +1436,7 @@ static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
                ret = ioprio_check_cap(iocb->aio_reqprio);
                if (ret) {
                        pr_debug("aio ioprio check cap error: %d\n", ret);
+                       fput(req->ki_filp);
                        return ret;
                }
 
index 9a69392f1fb375c4c16b07332248edf7da57c9b6..d81c148682e715a9f0ed4937185dd3b5538a9603 100644 (file)
@@ -350,7 +350,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
 
        s->s_magic = BFS_MAGIC;
 
-       if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) {
+       if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end) ||
+           le32_to_cpu(bfs_sb->s_start) < BFS_BSIZE) {
                printf("Superblock is corrupted\n");
                goto out1;
        }
@@ -359,9 +360,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
                                        sizeof(struct bfs_inode)
                                        + BFS_ROOT_INO - 1;
        imap_len = (info->si_lasti / 8) + 1;
-       info->si_imap = kzalloc(imap_len, GFP_KERNEL);
-       if (!info->si_imap)
+       info->si_imap = kzalloc(imap_len, GFP_KERNEL | __GFP_NOWARN);
+       if (!info->si_imap) {
+               printf("Cannot allocate %u bytes\n", imap_len);
                goto out1;
+       }
        for (i = 0; i < BFS_ROOT_INO; i++)
                set_bit(i, info->si_imap);
 
index 38b8ce05cbc7e693d5d4b835eb586c586680ef0c..a80b4f0ee7c4f172d19b4df1c8c330e4c0738557 100644 (file)
@@ -349,7 +349,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
 
        dio->size = 0;
        dio->multi_bio = false;
-       dio->should_dirty = is_read && (iter->type == ITER_IOVEC);
+       dio->should_dirty = is_read && iter_is_iovec(iter);
 
        blk_start_plug(&plug);
        for (;;) {
index 68ca41dbbef387f93de28ca845c144fc37bbab79..68f322f600a0677813867c5278b5aca9c23a6e8f 100644 (file)
@@ -3163,6 +3163,9 @@ void btrfs_destroy_inode(struct inode *inode);
 int btrfs_drop_inode(struct inode *inode);
 int __init btrfs_init_cachep(void);
 void __cold btrfs_destroy_cachep(void);
+struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
+                             struct btrfs_root *root, int *new,
+                             struct btrfs_path *path);
 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
                         struct btrfs_root *root, int *was_new);
 struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
@@ -3201,9 +3204,6 @@ void btrfs_get_block_group_info(struct list_head *groups_list,
                                struct btrfs_ioctl_space_info *space);
 void btrfs_update_ioctl_balance_args(struct btrfs_fs_info *fs_info,
                               struct btrfs_ioctl_balance_args *bargs);
-int btrfs_dedupe_file_range(struct file *src_file, loff_t src_loff,
-                           struct file *dst_file, loff_t dst_loff,
-                           u64 olen);
 
 /* file.c */
 int __init btrfs_auto_defrag_init(void);
@@ -3233,8 +3233,9 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,
                      size_t num_pages, loff_t pos, size_t write_bytes,
                      struct extent_state **cached);
 int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
-int btrfs_clone_file_range(struct file *file_in, loff_t pos_in,
-                          struct file *file_out, loff_t pos_out, u64 len);
+loff_t btrfs_remap_file_range(struct file *file_in, loff_t pos_in,
+                             struct file *file_out, loff_t pos_out,
+                             loff_t len, unsigned int remap_flags);
 
 /* tree-defrag.c */
 int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
index b0ab41da91d122b8cd8c8010cd56973af2afd240..6d776717d8b39b566e6ec14f479648ad6f788905 100644 (file)
@@ -477,9 +477,9 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
        int mirror_num = 0;
        int failed_mirror = 0;
 
-       clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
        io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
        while (1) {
+               clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
                ret = read_extent_buffer_pages(io_tree, eb, WAIT_COMPLETE,
                                               mirror_num);
                if (!ret) {
@@ -493,15 +493,6 @@ static int btree_read_extent_buffer_pages(struct btrfs_fs_info *fs_info,
                                break;
                }
 
-               /*
-                * This buffer's crc is fine, but its contents are corrupted, so
-                * there is no reason to read the other copies, they won't be
-                * any less wrong.
-                */
-               if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags) ||
-                   ret == -EUCLEAN)
-                       break;
-
                num_copies = btrfs_num_copies(fs_info,
                                              eb->start, eb->len);
                if (num_copies == 1)
@@ -1664,9 +1655,8 @@ static int cleaner_kthread(void *arg)
        struct btrfs_root *root = arg;
        struct btrfs_fs_info *fs_info = root->fs_info;
        int again;
-       struct btrfs_trans_handle *trans;
 
-       do {
+       while (1) {
                again = 0;
 
                /* Make the cleaner go to sleep early. */
@@ -1715,42 +1705,16 @@ static int cleaner_kthread(void *arg)
                 */
                btrfs_delete_unused_bgs(fs_info);
 sleep:
+               if (kthread_should_park())
+                       kthread_parkme();
+               if (kthread_should_stop())
+                       return 0;
                if (!again) {
                        set_current_state(TASK_INTERRUPTIBLE);
-                       if (!kthread_should_stop())
-                               schedule();
+                       schedule();
                        __set_current_state(TASK_RUNNING);
                }
-       } while (!kthread_should_stop());
-
-       /*
-        * Transaction kthread is stopped before us and wakes us up.
-        * However we might have started a new transaction and COWed some
-        * tree blocks when deleting unused block groups for example. So
-        * make sure we commit the transaction we started to have a clean
-        * shutdown when evicting the btree inode - if it has dirty pages
-        * when we do the final iput() on it, eviction will trigger a
-        * writeback for it which will fail with null pointer dereferences
-        * since work queues and other resources were already released and
-        * destroyed by the time the iput/eviction/writeback is made.
-        */
-       trans = btrfs_attach_transaction(root);
-       if (IS_ERR(trans)) {
-               if (PTR_ERR(trans) != -ENOENT)
-                       btrfs_err(fs_info,
-                                 "cleaner transaction attach returned %ld",
-                                 PTR_ERR(trans));
-       } else {
-               int ret;
-
-               ret = btrfs_commit_transaction(trans);
-               if (ret)
-                       btrfs_err(fs_info,
-                                 "cleaner open transaction commit returned %d",
-                                 ret);
        }
-
-       return 0;
 }
 
 static int transaction_kthread(void *arg)
@@ -3931,6 +3895,13 @@ void close_ctree(struct btrfs_fs_info *fs_info)
        int ret;
 
        set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
+       /*
+        * We don't want the cleaner to start new transactions, add more delayed
+        * iputs, etc. while we're closing. We can't use kthread_stop() yet
+        * because that frees the task_struct, and the transaction kthread might
+        * still try to wake up the cleaner.
+        */
+       kthread_park(fs_info->cleaner_kthread);
 
        /* wait for the qgroup rescan worker to stop */
        btrfs_qgroup_wait_for_completion(fs_info, false);
@@ -3958,9 +3929,8 @@ void close_ctree(struct btrfs_fs_info *fs_info)
 
        if (!sb_rdonly(fs_info->sb)) {
                /*
-                * If the cleaner thread is stopped and there are
-                * block groups queued for removal, the deletion will be
-                * skipped when we quit the cleaner thread.
+                * The cleaner kthread is stopped, so do one final pass over
+                * unused block groups.
                 */
                btrfs_delete_unused_bgs(fs_info);
 
@@ -4359,13 +4329,23 @@ static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
        unpin = pinned_extents;
 again:
        while (1) {
+               /*
+                * The btrfs_finish_extent_commit() may get the same range as
+                * ours between find_first_extent_bit and clear_extent_dirty.
+                * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
+                * the same extent range.
+                */
+               mutex_lock(&fs_info->unused_bg_unpin_mutex);
                ret = find_first_extent_bit(unpin, 0, &start, &end,
                                            EXTENT_DIRTY, NULL);
-               if (ret)
+               if (ret) {
+                       mutex_unlock(&fs_info->unused_bg_unpin_mutex);
                        break;
+               }
 
                clear_extent_dirty(unpin, start, end);
                btrfs_error_unpin_extent_range(fs_info, start, end);
+               mutex_unlock(&fs_info->unused_bg_unpin_mutex);
                cond_resched();
        }
 
index 97c7a086f7bd69f1dddb68fbdbf5c8bcc995024c..58e93bce30362dc0584c212ba18822ed13fde723 100644 (file)
@@ -2088,6 +2088,30 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        atomic_inc(&root->log_batch);
 
+       /*
+        * Before we acquired the inode's lock, someone may have dirtied more
+        * pages in the target range. We need to make sure that writeback for
+        * any such pages does not start while we are logging the inode, because
+        * if it does, any of the following might happen when we are not doing a
+        * full inode sync:
+        *
+        * 1) We log an extent after its writeback finishes but before its
+        *    checksums are added to the csum tree, leading to -EIO errors
+        *    when attempting to read the extent after a log replay.
+        *
+        * 2) We can end up logging an extent before its writeback finishes.
+        *    Therefore after the log replay we will have a file extent item
+        *    pointing to an unwritten extent (and no data checksums as well).
+        *
+        * So trigger writeback for any eventual new dirty pages and then we
+        * wait for all ordered extents to complete below.
+        */
+       ret = start_ordered_ops(inode, start, end);
+       if (ret) {
+               inode_unlock(inode);
+               goto out;
+       }
+
        /*
         * We have to do this here to avoid the priority inversion of waiting on
         * IO of a lower priority task while holding a transaciton open.
@@ -3298,8 +3322,7 @@ const struct file_operations btrfs_file_operations = {
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = btrfs_compat_ioctl,
 #endif
-       .clone_file_range = btrfs_clone_file_range,
-       .dedupe_file_range = btrfs_dedupe_file_range,
+       .remap_file_range = btrfs_remap_file_range,
 };
 
 void __cold btrfs_auto_defrag_exit(void)
index 4ba0aedc878bd4422e67d47879b1de297ed848c8..74aa552f47930699aa2a81134c3200bf2f5d5c5e 100644 (file)
@@ -75,7 +75,8 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
         * sure NOFS is set to keep us from deadlocking.
         */
        nofs_flag = memalloc_nofs_save();
-       inode = btrfs_iget(fs_info->sb, &location, root, NULL);
+       inode = btrfs_iget_path(fs_info->sb, &location, root, NULL, path);
+       btrfs_release_path(path);
        memalloc_nofs_restore(nofs_flag);
        if (IS_ERR(inode))
                return inode;
@@ -838,6 +839,25 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
        path->search_commit_root = 1;
        path->skip_locking = 1;
 
+       /*
+        * We must pass a path with search_commit_root set to btrfs_iget in
+        * order to avoid a deadlock when allocating extents for the tree root.
+        *
+        * When we are COWing an extent buffer from the tree root, when looking
+        * for a free extent, at extent-tree.c:find_free_extent(), we can find
+        * block group without its free space cache loaded. When we find one
+        * we must load its space cache which requires reading its free space
+        * cache's inode item from the root tree. If this inode item is located
+        * in the same leaf that we started COWing before, then we end up in
+        * deadlock on the extent buffer (trying to read lock it when we
+        * previously write locked it).
+        *
+        * It's safe to read the inode item using the commit root because
+        * block groups, once loaded, stay in memory forever (until they are
+        * removed) as well as their space caches once loaded. New block groups
+        * once created get their ->cached field set to BTRFS_CACHE_FINISHED so
+        * we will never try to read their inode item while the fs is mounted.
+        */
        inode = lookup_free_space_inode(fs_info, block_group, path);
        if (IS_ERR(inode)) {
                btrfs_free_path(path);
index d3df5b52278cea06c05384ef7bf63aaed6b5404f..9ea4c6f0352f06e828a400890c50122c7ec33ee5 100644 (file)
@@ -1531,12 +1531,11 @@ out_check:
        }
        btrfs_release_path(path);
 
-       if (cur_offset <= end && cow_start == (u64)-1) {
+       if (cur_offset <= end && cow_start == (u64)-1)
                cow_start = cur_offset;
-               cur_offset = end;
-       }
 
        if (cow_start != (u64)-1) {
+               cur_offset = end;
                ret = cow_file_range(inode, locked_page, cow_start, end, end,
                                     page_started, nr_written, 1, NULL);
                if (ret)
@@ -3570,10 +3569,11 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
 /*
  * read an inode from the btree into the in-memory inode
  */
-static int btrfs_read_locked_inode(struct inode *inode)
+static int btrfs_read_locked_inode(struct inode *inode,
+                                  struct btrfs_path *in_path)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-       struct btrfs_path *path;
+       struct btrfs_path *path = in_path;
        struct extent_buffer *leaf;
        struct btrfs_inode_item *inode_item;
        struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -3589,15 +3589,18 @@ static int btrfs_read_locked_inode(struct inode *inode)
        if (!ret)
                filled = true;
 
-       path = btrfs_alloc_path();
-       if (!path)
-               return -ENOMEM;
+       if (!path) {
+               path = btrfs_alloc_path();
+               if (!path)
+                       return -ENOMEM;
+       }
 
        memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
 
        ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
        if (ret) {
-               btrfs_free_path(path);
+               if (path != in_path)
+                       btrfs_free_path(path);
                return ret;
        }
 
@@ -3722,7 +3725,8 @@ cache_acl:
                                  btrfs_ino(BTRFS_I(inode)),
                                  root->root_key.objectid, ret);
        }
-       btrfs_free_path(path);
+       if (path != in_path)
+               btrfs_free_path(path);
 
        if (!maybe_acls)
                cache_no_acl(inode);
@@ -5644,8 +5648,9 @@ static struct inode *btrfs_iget_locked(struct super_block *s,
 /* Get an inode object given its location and corresponding root.
  * Returns in *is_new if the inode was read from disk
  */
-struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
-                        struct btrfs_root *root, int *new)
+struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
+                             struct btrfs_root *root, int *new,
+                             struct btrfs_path *path)
 {
        struct inode *inode;
 
@@ -5656,7 +5661,7 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
        if (inode->i_state & I_NEW) {
                int ret;
 
-               ret = btrfs_read_locked_inode(inode);
+               ret = btrfs_read_locked_inode(inode, path);
                if (!ret) {
                        inode_tree_add(inode);
                        unlock_new_inode(inode);
@@ -5678,6 +5683,12 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
        return inode;
 }
 
+struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
+                        struct btrfs_root *root, int *new)
+{
+       return btrfs_iget_path(s, location, root, new, NULL);
+}
+
 static struct inode *new_simple_dir(struct super_block *s,
                                    struct btrfs_key *key,
                                    struct btrfs_root *root)
index a990a904513929d8e735ba00d3f120dbed975f6c..802a628e9f7d7fe629a76e8d108b75c04ed4246e 100644 (file)
@@ -3488,6 +3488,8 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
                        const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
 
                        len = round_down(i_size_read(src), sz) - loff;
+                       if (len == 0)
+                               return 0;
                        olen = len;
                }
        }
@@ -3629,26 +3631,6 @@ out_unlock:
        return ret;
 }
 
-int btrfs_dedupe_file_range(struct file *src_file, loff_t src_loff,
-                           struct file *dst_file, loff_t dst_loff,
-                           u64 olen)
-{
-       struct inode *src = file_inode(src_file);
-       struct inode *dst = file_inode(dst_file);
-       u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
-
-       if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
-               /*
-                * Btrfs does not support blocksize < page_size. As a
-                * result, btrfs_cmp_data() won't correctly handle
-                * this situation without an update.
-                */
-               return -EINVAL;
-       }
-
-       return btrfs_extent_same(src, src_loff, olen, dst, dst_loff);
-}
-
 static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
                                     struct inode *inode,
                                     u64 endoff,
@@ -4277,9 +4259,17 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
                goto out_unlock;
        if (len == 0)
                olen = len = src->i_size - off;
-       /* if we extend to eof, continue to block boundary */
-       if (off + len == src->i_size)
+       /*
+        * If we extend to eof, continue to block boundary if and only if the
+        * destination end offset matches the destination file's size, otherwise
+        * we would be corrupting data by placing the eof block into the middle
+        * of a file.
+        */
+       if (off + len == src->i_size) {
+               if (!IS_ALIGNED(len, bs) && destoff + len < inode->i_size)
+                       goto out_unlock;
                len = ALIGN(src->i_size, bs) - off;
+       }
 
        if (len == 0) {
                ret = 0;
@@ -4350,10 +4340,34 @@ out_unlock:
        return ret;
 }
 
-int btrfs_clone_file_range(struct file *src_file, loff_t off,
-               struct file *dst_file, loff_t destoff, u64 len)
+loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
+               struct file *dst_file, loff_t destoff, loff_t len,
+               unsigned int remap_flags)
 {
-       return btrfs_clone_files(dst_file, src_file, off, len, destoff);
+       int ret;
+
+       if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
+               return -EINVAL;
+
+       if (remap_flags & REMAP_FILE_DEDUP) {
+               struct inode *src = file_inode(src_file);
+               struct inode *dst = file_inode(dst_file);
+               u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
+
+               if (WARN_ON_ONCE(bs < PAGE_SIZE)) {
+                       /*
+                        * Btrfs does not support blocksize < page_size. As a
+                        * result, btrfs_cmp_data() won't correctly handle
+                        * this situation without an update.
+                        */
+                       return -EINVAL;
+               }
+
+               ret = btrfs_extent_same(src, off, len, dst, destoff);
+       } else {
+               ret = btrfs_clone_files(dst_file, src_file, off, len, destoff);
+       }
+       return ret < 0 ? ret : len;
 }
 
 static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
index 45868fd76209024dc2cfc790801ffac9133a3ef1..f70825af6438e9c48ebe782f6b8049d27ae807fc 100644 (file)
@@ -2659,7 +2659,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
        int i;
        u64 *i_qgroups;
        struct btrfs_fs_info *fs_info = trans->fs_info;
-       struct btrfs_root *quota_root = fs_info->quota_root;
+       struct btrfs_root *quota_root;
        struct btrfs_qgroup *srcgroup;
        struct btrfs_qgroup *dstgroup;
        u32 level_size = 0;
@@ -2669,6 +2669,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
        if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
                goto out;
 
+       quota_root = fs_info->quota_root;
        if (!quota_root) {
                ret = -EINVAL;
                goto out;
index 924116f654a110cb9e23a878002bed0ae967029e..a3f75b8926d4474aa1093ffdd852d422b853e2d2 100644 (file)
@@ -3959,6 +3959,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
 restart:
                if (update_backref_cache(trans, &rc->backref_cache)) {
                        btrfs_end_transaction(trans);
+                       trans = NULL;
                        continue;
                }
 
index 094cc1444a90caaf0a597bd6c4a16ae265462701..5be83b5a1b43121234c6d0edda9e1c0566f30132 100644 (file)
@@ -3340,7 +3340,8 @@ static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
        kfree(m);
 }
 
-static void tail_append_pending_moves(struct pending_dir_move *moves,
+static void tail_append_pending_moves(struct send_ctx *sctx,
+                                     struct pending_dir_move *moves,
                                      struct list_head *stack)
 {
        if (list_empty(&moves->list)) {
@@ -3351,6 +3352,10 @@ static void tail_append_pending_moves(struct pending_dir_move *moves,
                list_add_tail(&moves->list, stack);
                list_splice_tail(&list, stack);
        }
+       if (!RB_EMPTY_NODE(&moves->node)) {
+               rb_erase(&moves->node, &sctx->pending_dir_moves);
+               RB_CLEAR_NODE(&moves->node);
+       }
 }
 
 static int apply_children_dir_moves(struct send_ctx *sctx)
@@ -3365,7 +3370,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
                return 0;
 
        INIT_LIST_HEAD(&stack);
-       tail_append_pending_moves(pm, &stack);
+       tail_append_pending_moves(sctx, pm, &stack);
 
        while (!list_empty(&stack)) {
                pm = list_first_entry(&stack, struct pending_dir_move, list);
@@ -3376,7 +3381,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
                        goto out;
                pm = get_pending_dir_moves(sctx, parent_ino);
                if (pm)
-                       tail_append_pending_moves(pm, &stack);
+                       tail_append_pending_moves(sctx, pm, &stack);
        }
        return 0;
 
index b362b45dd7578ff2517baf86ac8779a211aba098..645fc81e2a948eab03431a08c0c4b062ce7fd25e 100644 (file)
@@ -1916,7 +1916,7 @@ restore:
 }
 
 /* Used to sort the devices by max_avail(descending sort) */
-static int btrfs_cmp_device_free_bytes(const void *dev_info1,
+static inline int btrfs_cmp_device_free_bytes(const void *dev_info1,
                                       const void *dev_info2)
 {
        if (((struct btrfs_device_info *)dev_info1)->max_avail >
@@ -1945,8 +1945,8 @@ static inline void btrfs_descending_sort_devices(
  * The helper to calc the free space on the devices that can be used to store
  * file data.
  */
-static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
-                                      u64 *free_bytes)
+static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
+                                             u64 *free_bytes)
 {
        struct btrfs_device_info *devices_info;
        struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
@@ -2237,6 +2237,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
        vol = memdup_user((void __user *)arg, sizeof(*vol));
        if (IS_ERR(vol))
                return PTR_ERR(vol);
+       vol->name[BTRFS_PATH_NAME_MAX] = '\0';
 
        switch (cmd) {
        case BTRFS_IOC_SCAN_DEV:
index cab0b1f1f741797c32b8d399194554642ecf8a64..1a4e2b101ef24a09c63793f6e2d94f84443957cd 100644 (file)
@@ -389,13 +389,11 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info,
 
        /*
         * Here we don't really care about alignment since extent allocator can
-        * handle it.  We care more about the size, as if one block group is
-        * larger than maximum size, it's must be some obvious corruption.
+        * handle it.  We care more about the size.
         */
-       if (key->offset > BTRFS_MAX_DATA_CHUNK_SIZE || key->offset == 0) {
+       if (key->offset == 0) {
                block_group_err(fs_info, leaf, slot,
-                       "invalid block group size, have %llu expect (0, %llu]",
-                               key->offset, BTRFS_MAX_DATA_CHUNK_SIZE);
+                               "invalid block group size 0");
                return -EUCLEAN;
        }
 
@@ -440,7 +438,7 @@ static int check_block_group_item(struct btrfs_fs_info *fs_info,
            type != (BTRFS_BLOCK_GROUP_METADATA |
                           BTRFS_BLOCK_GROUP_DATA)) {
                block_group_err(fs_info, leaf, slot,
-"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llu or 0x%llx",
+"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
                        type, hweight64(type),
                        BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
                        BTRFS_BLOCK_GROUP_SYSTEM,
index e07f3376b7dfc0c9350117bf3db956781e57d45e..a5ce99a6c936558f820d94a63d1301a111f8486b 100644 (file)
@@ -4396,6 +4396,23 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
        logged_end = end;
 
        list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
+               /*
+                * Skip extents outside our logging range. It's important to do
+                * it for correctness because if we don't ignore them, we may
+                * log them before their ordered extent completes, and therefore
+                * we could log them without logging their respective checksums
+                * (the checksum items are added to the csum tree at the very
+                * end of btrfs_finish_ordered_io()). Also leave such extents
+                * outside of our range in the list, since we may have another
+                * ranged fsync in the near future that needs them. If an extent
+                * outside our range corresponds to a hole, log it to avoid
+                * leaving gaps between extents (fsck will complain when we are
+                * not using the NO_HOLES feature).
+                */
+               if ((em->start > end || em->start + em->len <= start) &&
+                   em->block_start != EXTENT_MAP_HOLE)
+                       continue;
+
                list_del_init(&em->list);
                /*
                 * Just an arbitrary number, this can be really CPU intensive
index d60d61e8ed7de495bddd0bc799f16c2606a4c68b..1286c2b95498de47d2ba08b57a93901bdf4367bd 100644 (file)
@@ -3060,6 +3060,11 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
         */
        bio = bio_alloc(GFP_NOIO, 1);
 
+       if (wbc) {
+               wbc_init_bio(wbc, bio);
+               wbc_account_io(wbc, bh->b_page, bh->b_size);
+       }
+
        bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio_set_dev(bio, bh->b_bdev);
        bio->bi_write_hint = write_hint;
@@ -3079,11 +3084,6 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
                op_flags |= REQ_PRIO;
        bio_set_op_attrs(bio, op, op_flags);
 
-       if (wbc) {
-               wbc_init_bio(wbc, bio);
-               wbc_account_io(wbc, bh->b_page, bh->b_size);
-       }
-
        submit_bio(bio);
        return 0;
 }
index 95983c744164a830661f105cd7b5ca54645a043f..1645fcfd9691c33bb58114546c262911edfdf25c 100644 (file)
@@ -244,11 +244,13 @@ wait_for_old_object:
 
        ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
 
-       cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_retry);
+       cache->cache.ops->put_object(&xobject->fscache,
+               (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_retry);
        goto try_again;
 
 requeue:
-       cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo);
+       cache->cache.ops->put_object(&xobject->fscache,
+               (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_timeo);
        _leave(" = -ETIMEDOUT");
        return -ETIMEDOUT;
 }
@@ -336,7 +338,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
 try_again:
        /* first step is to make up a grave dentry in the graveyard */
        sprintf(nbuffer, "%08x%08x",
-               (uint32_t) get_seconds(),
+               (uint32_t) ktime_get_real_seconds(),
                (uint32_t) atomic_inc_return(&cache->gravecounter));
 
        /* do the multiway lock magic */
index 40f7595aad10f20666df7741b8ce8dce3db37b6e..8a577409d030b79757d546388bdbe88bbe124391 100644 (file)
@@ -535,7 +535,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
                                            netpage->index, cachefiles_gfp);
                if (ret < 0) {
                        if (ret == -EEXIST) {
+                               put_page(backpage);
+                               backpage = NULL;
                                put_page(netpage);
+                               netpage = NULL;
                                fscache_retrieval_complete(op, 1);
                                continue;
                        }
@@ -608,7 +611,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object,
                                            netpage->index, cachefiles_gfp);
                if (ret < 0) {
                        if (ret == -EEXIST) {
+                               put_page(backpage);
+                               backpage = NULL;
                                put_page(netpage);
+                               netpage = NULL;
                                fscache_retrieval_complete(op, 1);
                                continue;
                        }
@@ -962,11 +968,8 @@ void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
        __releases(&object->fscache.cookie->lock)
 {
        struct cachefiles_object *object;
-       struct cachefiles_cache *cache;
 
        object = container_of(_object, struct cachefiles_object, fscache);
-       cache = container_of(object->fscache.cache,
-                            struct cachefiles_cache, cache);
 
        _enter("%p,{%lu}", object, page->index);
 
index 0a29a00aed2eba522c5b11ac509e8020d56d625b..511e6c68156a72355c63113205a20a1672c311e9 100644 (file)
@@ -135,7 +135,8 @@ int cachefiles_update_object_xattr(struct cachefiles_object *object,
        struct dentry *dentry = object->dentry;
        int ret;
 
-       ASSERT(dentry);
+       if (!dentry)
+               return -ESTALE;
 
        _enter("%p,#%d", object, auxdata->len);
 
index f788496fafcc9eeab5907cdcd7c68b16e0aecf8a..189df668b6a0cf0dc2d000184d170ce58d248076 100644 (file)
@@ -615,7 +615,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
 
                more = len < iov_iter_count(to);
 
-               if (unlikely(to->type & ITER_PIPE)) {
+               if (unlikely(iov_iter_is_pipe(to))) {
                        ret = iov_iter_get_pages_alloc(to, &pages, len,
                                                       &page_off);
                        if (ret <= 0) {
@@ -662,7 +662,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
                        ret += zlen;
                }
 
-               if (unlikely(to->type & ITER_PIPE)) {
+               if (unlikely(iov_iter_is_pipe(to))) {
                        if (ret > 0) {
                                iov_iter_advance(to, ret);
                                off += ret;
@@ -815,7 +815,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
                                aio_req->total_len = rc + zlen;
                        }
 
-                       iov_iter_bvec(&i, ITER_BVEC, osd_data->bvec_pos.bvecs,
+                       iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
                                      osd_data->num_bvecs,
                                      osd_data->bvec_pos.iter.bi_size);
                        iov_iter_advance(&i, rc);
@@ -1038,8 +1038,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
                                int zlen = min_t(size_t, len - ret,
                                                 size - pos - ret);
 
-                               iov_iter_bvec(&i, ITER_BVEC, bvecs, num_pages,
-                                             len);
+                               iov_iter_bvec(&i, READ, bvecs, num_pages, len);
                                iov_iter_advance(&i, ret);
                                iov_iter_zero(zlen, &i);
                                ret += zlen;
@@ -1932,10 +1931,17 @@ static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
        if (!prealloc_cf)
                return -ENOMEM;
 
-       /* Start by sync'ing the source file */
+       /* Start by sync'ing the source and destination files */
        ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
-       if (ret < 0)
+       if (ret < 0) {
+               dout("failed to write src file (%zd)\n", ret);
+               goto out;
+       }
+       ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
+       if (ret < 0) {
+               dout("failed to write dst file (%zd)\n", ret);
                goto out;
+       }
 
        /*
         * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
index 67a9aeb2f4ecdc66ea3cfd6131bf0e4082cb0691..bd13a3267ae03c401d7b0dd0c1f37626bbc42b0a 100644 (file)
@@ -80,12 +80,8 @@ static int parse_reply_info_in(void **p, void *end,
        info->symlink = *p;
        *p += info->symlink_len;
 
-       if (features & CEPH_FEATURE_DIRLAYOUTHASH)
-               ceph_decode_copy_safe(p, end, &info->dir_layout,
-                                     sizeof(info->dir_layout), bad);
-       else
-               memset(&info->dir_layout, 0, sizeof(info->dir_layout));
-
+       ceph_decode_copy_safe(p, end, &info->dir_layout,
+                             sizeof(info->dir_layout), bad);
        ceph_decode_32_safe(p, end, info->xattr_len, bad);
        ceph_decode_need(p, end, info->xattr_len, bad);
        info->xattr_data = *p;
@@ -3182,10 +3178,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
        recon_state.pagelist = pagelist;
        if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
                recon_state.msg_version = 3;
-       else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK)
-               recon_state.msg_version = 2;
        else
-               recon_state.msg_version = 1;
+               recon_state.msg_version = 2;
        err = iterate_session_caps(session, encode_caps_cb, &recon_state);
        if (err < 0)
                goto fail;
index 32d4f13784ba5da85e420a565297eff6b3bf132a..03f4d24db8fe009dc4384b83162979c34f11d1e0 100644 (file)
@@ -237,7 +237,8 @@ static bool check_quota_exceeded(struct inode *inode, enum quota_check_op op,
                ceph_put_snap_realm(mdsc, realm);
                realm = next;
        }
-       ceph_put_snap_realm(mdsc, realm);
+       if (realm)
+               ceph_put_snap_realm(mdsc, realm);
        up_read(&mdsc->snap_rwsem);
 
        return exceeded;
index abcd78e332feb05ad8fa7f540d14917d2835f68b..85dadb93c9926cad276833c82c5045f71e52ca16 100644 (file)
@@ -133,7 +133,7 @@ config CIFS_XATTR
 
 config CIFS_POSIX
         bool "CIFS POSIX Extensions"
-        depends on CIFS_XATTR
+        depends on CIFS && CIFS_ALLOW_INSECURE_LEGACY && CIFS_XATTR
         help
           Enabling this option will cause the cifs client to attempt to
          negotiate a newer dialect with servers, such as Samba 3.0.5
index 3e812428ac8d95210c78ac51bb84852b9ec9ad32..ba178b09de0b48f1ce0e2285899f66de91cd2741 100644 (file)
@@ -145,6 +145,58 @@ cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
                seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr);
 }
 
+static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
+{
+       struct list_head *stmp, *tmp, *tmp1, *tmp2;
+       struct TCP_Server_Info *server;
+       struct cifs_ses *ses;
+       struct cifs_tcon *tcon;
+       struct cifsFileInfo *cfile;
+
+       seq_puts(m, "# Version:1\n");
+       seq_puts(m, "# Format:\n");
+       seq_puts(m, "# <tree id> <persistent fid> <flags> <count> <pid> <uid>");
+#ifdef CONFIG_CIFS_DEBUG2
+       seq_printf(m, " <filename> <mid>\n");
+#else
+       seq_printf(m, " <filename>\n");
+#endif /* CIFS_DEBUG2 */
+       spin_lock(&cifs_tcp_ses_lock);
+       list_for_each(stmp, &cifs_tcp_ses_list) {
+               server = list_entry(stmp, struct TCP_Server_Info,
+                                   tcp_ses_list);
+               list_for_each(tmp, &server->smb_ses_list) {
+                       ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+                       list_for_each(tmp1, &ses->tcon_list) {
+                               tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
+                               spin_lock(&tcon->open_file_lock);
+                               list_for_each(tmp2, &tcon->openFileList) {
+                                       cfile = list_entry(tmp2, struct cifsFileInfo,
+                                                    tlist);
+                                       seq_printf(m,
+                                               "0x%x 0x%llx 0x%x %d %d %d %s",
+                                               tcon->tid,
+                                               cfile->fid.persistent_fid,
+                                               cfile->f_flags,
+                                               cfile->count,
+                                               cfile->pid,
+                                               from_kuid(&init_user_ns, cfile->uid),
+                                               cfile->dentry->d_name.name);
+#ifdef CONFIG_CIFS_DEBUG2
+                                       seq_printf(m, " 0x%llx\n", cfile->fid.mid);
+#else
+                                       seq_printf(m, "\n");
+#endif /* CIFS_DEBUG2 */
+                               }
+                               spin_unlock(&tcon->open_file_lock);
+                       }
+               }
+       }
+       spin_unlock(&cifs_tcp_ses_lock);
+       seq_putc(m, '\n');
+       return 0;
+}
+
 static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
 {
        struct list_head *tmp1, *tmp2, *tmp3;
@@ -565,6 +617,9 @@ cifs_proc_init(void)
        proc_create_single("DebugData", 0, proc_fs_cifs,
                        cifs_debug_data_proc_show);
 
+       proc_create_single("open_files", 0400, proc_fs_cifs,
+                       cifs_debug_files_proc_show);
+
        proc_create("Stats", 0644, proc_fs_cifs, &cifs_stats_proc_fops);
        proc_create("cifsFYI", 0644, proc_fs_cifs, &cifsFYI_proc_fops);
        proc_create("traceSMB", 0644, proc_fs_cifs, &traceSMB_proc_fops);
@@ -601,6 +656,7 @@ cifs_proc_clean(void)
                return;
 
        remove_proc_entry("DebugData", proc_fs_cifs);
+       remove_proc_entry("open_files", proc_fs_cifs);
        remove_proc_entry("cifsFYI", proc_fs_cifs);
        remove_proc_entry("traceSMB", proc_fs_cifs);
        remove_proc_entry("Stats", proc_fs_cifs);
index b611fc2e8984e0fce26c08367b1306764685054a..7f01c6e607918d4e1356a9e7d305113777910089 100644 (file)
@@ -147,8 +147,10 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
                sprintf(dp, ";sec=krb5");
        else if (server->sec_mskerberos)
                sprintf(dp, ";sec=mskrb5");
-       else
-               goto out;
+       else {
+               cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n");
+               sprintf(dp, ";sec=krb5");
+       }
 
        dp = description + strlen(description);
        sprintf(dp, ";uid=0x%x",
index 7de9603c54f10383c086f99dcfc5a5f17a8c716e..865706edb307dfd04fe66822b26c2461f87d49ab 100644 (file)
@@ -992,17 +992,21 @@ const struct inode_operations cifs_symlink_inode_ops = {
        .listxattr = cifs_listxattr,
 };
 
-static int cifs_clone_file_range(struct file *src_file, loff_t off,
-               struct file *dst_file, loff_t destoff, u64 len)
+static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
+               struct file *dst_file, loff_t destoff, loff_t len,
+               unsigned int remap_flags)
 {
        struct inode *src_inode = file_inode(src_file);
        struct inode *target_inode = file_inode(dst_file);
        struct cifsFileInfo *smb_file_src = src_file->private_data;
-       struct cifsFileInfo *smb_file_target = dst_file->private_data;
-       struct cifs_tcon *target_tcon = tlink_tcon(smb_file_target->tlink);
+       struct cifsFileInfo *smb_file_target;
+       struct cifs_tcon *target_tcon;
        unsigned int xid;
        int rc;
 
+       if (remap_flags & ~REMAP_FILE_ADVISORY)
+               return -EINVAL;
+
        cifs_dbg(FYI, "clone range\n");
 
        xid = get_xid();
@@ -1013,6 +1017,9 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off,
                goto out;
        }
 
+       smb_file_target = dst_file->private_data;
+       target_tcon = tlink_tcon(smb_file_target->tlink);
+
        /*
         * Note: cifs case is easier than btrfs since server responsible for
         * checks for proper open modes and file type and if it wants
@@ -1042,7 +1049,7 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off,
        unlock_two_nondirectories(src_inode, target_inode);
 out:
        free_xid(xid);
-       return rc;
+       return rc < 0 ? rc : len;
 }
 
 ssize_t cifs_file_copychunk_range(unsigned int xid,
@@ -1151,7 +1158,7 @@ const struct file_operations cifs_file_ops = {
        .llseek = cifs_llseek,
        .unlocked_ioctl = cifs_ioctl,
        .copy_file_range = cifs_copy_file_range,
-       .clone_file_range = cifs_clone_file_range,
+       .remap_file_range = cifs_remap_file_range,
        .setlease = cifs_setlease,
        .fallocate = cifs_fallocate,
 };
@@ -1170,15 +1177,14 @@ const struct file_operations cifs_file_strict_ops = {
        .llseek = cifs_llseek,
        .unlocked_ioctl = cifs_ioctl,
        .copy_file_range = cifs_copy_file_range,
-       .clone_file_range = cifs_clone_file_range,
+       .remap_file_range = cifs_remap_file_range,
        .setlease = cifs_setlease,
        .fallocate = cifs_fallocate,
 };
 
 const struct file_operations cifs_file_direct_ops = {
-       /* BB reevaluate whether they can be done with directio, no cache */
-       .read_iter = cifs_user_readv,
-       .write_iter = cifs_user_writev,
+       .read_iter = cifs_direct_readv,
+       .write_iter = cifs_direct_writev,
        .open = cifs_open,
        .release = cifs_close,
        .lock = cifs_lock,
@@ -1189,7 +1195,7 @@ const struct file_operations cifs_file_direct_ops = {
        .splice_write = iter_file_splice_write,
        .unlocked_ioctl  = cifs_ioctl,
        .copy_file_range = cifs_copy_file_range,
-       .clone_file_range = cifs_clone_file_range,
+       .remap_file_range = cifs_remap_file_range,
        .llseek = cifs_llseek,
        .setlease = cifs_setlease,
        .fallocate = cifs_fallocate,
@@ -1208,7 +1214,7 @@ const struct file_operations cifs_file_nobrl_ops = {
        .llseek = cifs_llseek,
        .unlocked_ioctl = cifs_ioctl,
        .copy_file_range = cifs_copy_file_range,
-       .clone_file_range = cifs_clone_file_range,
+       .remap_file_range = cifs_remap_file_range,
        .setlease = cifs_setlease,
        .fallocate = cifs_fallocate,
 };
@@ -1226,15 +1232,14 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
        .llseek = cifs_llseek,
        .unlocked_ioctl = cifs_ioctl,
        .copy_file_range = cifs_copy_file_range,
-       .clone_file_range = cifs_clone_file_range,
+       .remap_file_range = cifs_remap_file_range,
        .setlease = cifs_setlease,
        .fallocate = cifs_fallocate,
 };
 
 const struct file_operations cifs_file_direct_nobrl_ops = {
-       /* BB reevaluate whether they can be done with directio, no cache */
-       .read_iter = cifs_user_readv,
-       .write_iter = cifs_user_writev,
+       .read_iter = cifs_direct_readv,
+       .write_iter = cifs_direct_writev,
        .open = cifs_open,
        .release = cifs_close,
        .fsync = cifs_fsync,
@@ -1244,7 +1249,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
        .splice_write = iter_file_splice_write,
        .unlocked_ioctl  = cifs_ioctl,
        .copy_file_range = cifs_copy_file_range,
-       .clone_file_range = cifs_clone_file_range,
+       .remap_file_range = cifs_remap_file_range,
        .llseek = cifs_llseek,
        .setlease = cifs_setlease,
        .fallocate = cifs_fallocate,
@@ -1256,7 +1261,7 @@ const struct file_operations cifs_dir_ops = {
        .read    = generic_read_dir,
        .unlocked_ioctl  = cifs_ioctl,
        .copy_file_range = cifs_copy_file_range,
-       .clone_file_range = cifs_clone_file_range,
+       .remap_file_range = cifs_remap_file_range,
        .llseek = generic_file_llseek,
        .fsync = cifs_dir_fsync,
 };
index 24e265a51874653401d0ecce7fa74b2b582aa62c..4c3b5cfccc49aef63af46583f771c7d9302dca19 100644 (file)
@@ -101,8 +101,10 @@ extern int cifs_open(struct inode *inode, struct file *file);
 extern int cifs_close(struct inode *inode, struct file *file);
 extern int cifs_closedir(struct inode *inode, struct file *file);
 extern ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to);
+extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to);
 extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to);
 extern ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from);
+extern ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from);
 extern ssize_t cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from);
 extern int cifs_lock(struct file *, int, struct file_lock *);
 extern int cifs_fsync(struct file *, loff_t, loff_t, int);
index ed1e0fcb69e3f606b8426091d63febea9e28dc29..38ab0fca49e1dafe66bf7dcc233e67ef42b6b301 100644 (file)
@@ -1125,6 +1125,9 @@ struct cifs_fid {
        __u8 create_guid[16];
        struct cifs_pending_open *pending_open;
        unsigned int epoch;
+#ifdef CONFIG_CIFS_DEBUG2
+       __u64 mid;
+#endif /* CIFS_DEBUG2 */
        bool purge_cache;
 };
 
@@ -1183,6 +1186,11 @@ struct cifs_aio_ctx {
        unsigned int            len;
        unsigned int            total_len;
        bool                    should_dirty;
+       /*
+        * Indicates if this aio_ctx is for direct_io,
+        * If yes, iter is a copy of the user passed iov_iter
+        */
+       bool                    direct_io;
 };
 
 struct cifs_readdata;
index 1ce733f3582f66702c18db771fac1416d702a706..79d842e7240c7cff027eebb5915cded50e8b8e3c 100644 (file)
@@ -1539,6 +1539,9 @@ struct reparse_symlink_data {
        char    PathBuffer[0];
 } __attribute__((packed));
 
+/* Flag above */
+#define SYMLINK_FLAG_RELATIVE 0x00000001
+
 /* For IO_REPARSE_TAG_NFS */
 #define NFS_SPECFILE_LNK       0x00000000014B4E4C
 #define NFS_SPECFILE_CHR       0x0000000000524843
index d82f0cc7175508e9881873a2ee9c8a53c1a72944..6f24f129a75135d2da48f414698d7a5439c11960 100644 (file)
@@ -589,7 +589,7 @@ cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
 {
        struct msghdr smb_msg;
        struct kvec iov = {.iov_base = buf, .iov_len = to_read};
-       iov_iter_kvec(&smb_msg.msg_iter, READ | ITER_KVEC, &iov, 1, to_read);
+       iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read);
 
        return cifs_readv_from_socket(server, &smb_msg);
 }
@@ -601,7 +601,7 @@ cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
        struct msghdr smb_msg;
        struct bio_vec bv = {
                .bv_page = page, .bv_len = to_read, .bv_offset = page_offset};
-       iov_iter_bvec(&smb_msg.msg_iter, READ | ITER_BVEC, &bv, 1, to_read);
+       iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read);
        return cifs_readv_from_socket(server, &smb_msg);
 }
 
index 3713d22b95a7011bda1e8701e5aea54fee96ea2a..907e85d65bb4e09b5fdc8f7c1e6c35ef56b519ad 100644 (file)
@@ -174,7 +174,7 @@ cifs_bp_rename_retry:
 
                cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
                memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
-               full_path[dfsplen] = '\\';
+               full_path[dfsplen] = dirsep;
                for (i = 0; i < pplen-1; i++)
                        if (full_path[dfsplen+1+i] == '/')
                                full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
index c620d4b5d5d4c84448cc44258ae1dd8ae36ce3a8..c9bc56b1baac2deac379103f1a4a32da45747b9c 100644 (file)
@@ -1005,7 +1005,7 @@ cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
  * Set the byte-range lock (mandatory style). Returns:
  * 1) 0, if we set the lock and don't need to request to the server;
  * 2) 1, if no locks prevent us but we need to request to the server;
- * 3) -EACCESS, if there is a lock that prevents us and wait is false.
+ * 3) -EACCES, if there is a lock that prevents us and wait is false.
  */
 static int
 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
@@ -2537,6 +2537,54 @@ wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
        return 0;
 }
 
+static int
+cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
+       struct cifs_aio_ctx *ctx)
+{
+       unsigned int wsize, credits;
+       int rc;
+       struct TCP_Server_Info *server =
+               tlink_tcon(wdata->cfile->tlink)->ses->server;
+
+       /*
+        * Wait for credits to resend this wdata.
+        * Note: we are attempting to resend the whole wdata not in segments
+        */
+       do {
+               rc = server->ops->wait_mtu_credits(
+                       server, wdata->bytes, &wsize, &credits);
+
+               if (rc)
+                       goto out;
+
+               if (wsize < wdata->bytes) {
+                       add_credits_and_wake_if(server, credits, 0);
+                       msleep(1000);
+               }
+       } while (wsize < wdata->bytes);
+
+       rc = -EAGAIN;
+       while (rc == -EAGAIN) {
+               rc = 0;
+               if (wdata->cfile->invalidHandle)
+                       rc = cifs_reopen_file(wdata->cfile, false);
+               if (!rc)
+                       rc = server->ops->async_writev(wdata,
+                                       cifs_uncached_writedata_release);
+       }
+
+       if (!rc) {
+               list_add_tail(&wdata->list, wdata_list);
+               return 0;
+       }
+
+       add_credits_and_wake_if(server, wdata->credits, 0);
+out:
+       kref_put(&wdata->refcount, cifs_uncached_writedata_release);
+
+       return rc;
+}
+
 static int
 cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
                     struct cifsFileInfo *open_file,
@@ -2551,6 +2599,8 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
        loff_t saved_offset = offset;
        pid_t pid;
        struct TCP_Server_Info *server;
+       struct page **pagevec;
+       size_t start;
 
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
                pid = open_file->pid;
@@ -2567,38 +2617,79 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
                if (rc)
                        break;
 
-               nr_pages = get_numpages(wsize, len, &cur_len);
-               wdata = cifs_writedata_alloc(nr_pages,
+               if (ctx->direct_io) {
+                       ssize_t result;
+
+                       result = iov_iter_get_pages_alloc(
+                               from, &pagevec, wsize, &start);
+                       if (result < 0) {
+                               cifs_dbg(VFS,
+                                       "direct_writev couldn't get user pages "
+                                       "(rc=%zd) iter type %d iov_offset %zd "
+                                       "count %zd\n",
+                                       result, from->type,
+                                       from->iov_offset, from->count);
+                               dump_stack();
+                               break;
+                       }
+                       cur_len = (size_t)result;
+                       iov_iter_advance(from, cur_len);
+
+                       nr_pages =
+                               (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
+
+                       wdata = cifs_writedata_direct_alloc(pagevec,
                                             cifs_uncached_writev_complete);
-               if (!wdata) {
-                       rc = -ENOMEM;
-                       add_credits_and_wake_if(server, credits, 0);
-                       break;
-               }
+                       if (!wdata) {
+                               rc = -ENOMEM;
+                               add_credits_and_wake_if(server, credits, 0);
+                               break;
+                       }
 
-               rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
-               if (rc) {
-                       kfree(wdata);
-                       add_credits_and_wake_if(server, credits, 0);
-                       break;
-               }
 
-               num_pages = nr_pages;
-               rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
-               if (rc) {
-                       for (i = 0; i < nr_pages; i++)
-                               put_page(wdata->pages[i]);
-                       kfree(wdata);
-                       add_credits_and_wake_if(server, credits, 0);
-                       break;
-               }
+                       wdata->page_offset = start;
+                       wdata->tailsz =
+                               nr_pages > 1 ?
+                                       cur_len - (PAGE_SIZE - start) -
+                                       (nr_pages - 2) * PAGE_SIZE :
+                                       cur_len;
+               } else {
+                       nr_pages = get_numpages(wsize, len, &cur_len);
+                       wdata = cifs_writedata_alloc(nr_pages,
+                                            cifs_uncached_writev_complete);
+                       if (!wdata) {
+                               rc = -ENOMEM;
+                               add_credits_and_wake_if(server, credits, 0);
+                               break;
+                       }
 
-               /*
-                * Bring nr_pages down to the number of pages we actually used,
-                * and free any pages that we didn't use.
-                */
-               for ( ; nr_pages > num_pages; nr_pages--)
-                       put_page(wdata->pages[nr_pages - 1]);
+                       rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
+                       if (rc) {
+                               kfree(wdata);
+                               add_credits_and_wake_if(server, credits, 0);
+                               break;
+                       }
+
+                       num_pages = nr_pages;
+                       rc = wdata_fill_from_iovec(
+                               wdata, from, &cur_len, &num_pages);
+                       if (rc) {
+                               for (i = 0; i < nr_pages; i++)
+                                       put_page(wdata->pages[i]);
+                               kfree(wdata);
+                               add_credits_and_wake_if(server, credits, 0);
+                               break;
+                       }
+
+                       /*
+                        * Bring nr_pages down to the number of pages we
+                        * actually used, and free any pages that we didn't use.
+                        */
+                       for ( ; nr_pages > num_pages; nr_pages--)
+                               put_page(wdata->pages[nr_pages - 1]);
+
+                       wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
+               }
 
                wdata->sync_mode = WB_SYNC_ALL;
                wdata->nr_pages = nr_pages;
@@ -2607,7 +2698,6 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
                wdata->pid = pid;
                wdata->bytes = cur_len;
                wdata->pagesz = PAGE_SIZE;
-               wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
                wdata->credits = credits;
                wdata->ctx = ctx;
                kref_get(&ctx->refcount);
@@ -2682,13 +2772,18 @@ restart_loop:
                                INIT_LIST_HEAD(&tmp_list);
                                list_del_init(&wdata->list);
 
-                               iov_iter_advance(&tmp_from,
+                               if (ctx->direct_io)
+                                       rc = cifs_resend_wdata(
+                                               wdata, &tmp_list, ctx);
+                               else {
+                                       iov_iter_advance(&tmp_from,
                                                 wdata->offset - ctx->pos);
 
-                               rc = cifs_write_from_iter(wdata->offset,
+                                       rc = cifs_write_from_iter(wdata->offset,
                                                wdata->bytes, &tmp_from,
                                                ctx->cfile, cifs_sb, &tmp_list,
                                                ctx);
+                               }
 
                                list_splice(&tmp_list, &ctx->list);
 
@@ -2701,8 +2796,9 @@ restart_loop:
                kref_put(&wdata->refcount, cifs_uncached_writedata_release);
        }
 
-       for (i = 0; i < ctx->npages; i++)
-               put_page(ctx->bv[i].bv_page);
+       if (!ctx->direct_io)
+               for (i = 0; i < ctx->npages; i++)
+                       put_page(ctx->bv[i].bv_page);
 
        cifs_stats_bytes_written(tcon, ctx->total_len);
        set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
@@ -2717,7 +2813,8 @@ restart_loop:
                complete(&ctx->done);
 }
 
-ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
+static ssize_t __cifs_writev(
+       struct kiocb *iocb, struct iov_iter *from, bool direct)
 {
        struct file *file = iocb->ki_filp;
        ssize_t total_written = 0;
@@ -2726,13 +2823,18 @@ ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
        struct cifs_sb_info *cifs_sb;
        struct cifs_aio_ctx *ctx;
        struct iov_iter saved_from = *from;
+       size_t len = iov_iter_count(from);
        int rc;
 
        /*
-        * BB - optimize the way when signing is disabled. We can drop this
-        * extra memory-to-memory copying and use iovec buffers for constructing
-        * write request.
+        * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
+        * In this case, fall back to non-direct write function.
+        * this could be improved by getting pages directly in ITER_KVEC
         */
+       if (direct && from->type & ITER_KVEC) {
+               cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
+               direct = false;
+       }
 
        rc = generic_write_checks(iocb, from);
        if (rc <= 0)
@@ -2756,10 +2858,16 @@ ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
 
        ctx->pos = iocb->ki_pos;
 
-       rc = setup_aio_ctx_iter(ctx, from, WRITE);
-       if (rc) {
-               kref_put(&ctx->refcount, cifs_aio_ctx_release);
-               return rc;
+       if (direct) {
+               ctx->direct_io = true;
+               ctx->iter = *from;
+               ctx->len = len;
+       } else {
+               rc = setup_aio_ctx_iter(ctx, from, WRITE);
+               if (rc) {
+                       kref_put(&ctx->refcount, cifs_aio_ctx_release);
+                       return rc;
+               }
        }
 
        /* grab a lock here due to read response handlers can access ctx */
@@ -2809,6 +2917,16 @@ ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
        return total_written;
 }
 
+ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
+{
+       return __cifs_writev(iocb, from, true);
+}
+
+ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
+{
+       return __cifs_writev(iocb, from, false);
+}
+
 static ssize_t
 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
 {
@@ -2979,7 +3097,6 @@ cifs_uncached_readdata_release(struct kref *refcount)
        kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
        for (i = 0; i < rdata->nr_pages; i++) {
                put_page(rdata->pages[i]);
-               rdata->pages[i] = NULL;
        }
        cifs_readdata_release(refcount);
 }
@@ -3004,7 +3121,7 @@ cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
                size_t copy = min_t(size_t, remaining, PAGE_SIZE);
                size_t written;
 
-               if (unlikely(iter->type & ITER_PIPE)) {
+               if (unlikely(iov_iter_is_pipe(iter))) {
                        void *addr = kmap_atomic(page);
 
                        written = copy_to_iter(addr, copy, iter);
@@ -3106,6 +3223,55 @@ cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
        return uncached_fill_pages(server, rdata, iter, iter->count);
 }
 
+static int cifs_resend_rdata(struct cifs_readdata *rdata,
+                       struct list_head *rdata_list,
+                       struct cifs_aio_ctx *ctx)
+{
+       unsigned int rsize, credits;
+       int rc;
+       struct TCP_Server_Info *server =
+               tlink_tcon(rdata->cfile->tlink)->ses->server;
+
+       /*
+        * Wait for credits to resend this rdata.
+        * Note: we are attempting to resend the whole rdata not in segments
+        */
+       do {
+               rc = server->ops->wait_mtu_credits(server, rdata->bytes,
+                                               &rsize, &credits);
+
+               if (rc)
+                       goto out;
+
+               if (rsize < rdata->bytes) {
+                       add_credits_and_wake_if(server, credits, 0);
+                       msleep(1000);
+               }
+       } while (rsize < rdata->bytes);
+
+       rc = -EAGAIN;
+       while (rc == -EAGAIN) {
+               rc = 0;
+               if (rdata->cfile->invalidHandle)
+                       rc = cifs_reopen_file(rdata->cfile, true);
+               if (!rc)
+                       rc = server->ops->async_readv(rdata);
+       }
+
+       if (!rc) {
+               /* Add to aio pending list */
+               list_add_tail(&rdata->list, rdata_list);
+               return 0;
+       }
+
+       add_credits_and_wake_if(server, rdata->credits, 0);
+out:
+       kref_put(&rdata->refcount,
+               cifs_uncached_readdata_release);
+
+       return rc;
+}
+
 static int
 cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
                     struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
@@ -3117,6 +3283,9 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
        int rc;
        pid_t pid;
        struct TCP_Server_Info *server;
+       struct page **pagevec;
+       size_t start;
+       struct iov_iter direct_iov = ctx->iter;
 
        server = tlink_tcon(open_file->tlink)->ses->server;
 
@@ -3125,6 +3294,9 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
        else
                pid = current->tgid;
 
+       if (ctx->direct_io)
+               iov_iter_advance(&direct_iov, offset - ctx->pos);
+
        do {
                rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
                                                   &rsize, &credits);
@@ -3132,20 +3304,59 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
                        break;
 
                cur_len = min_t(const size_t, len, rsize);
-               npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
 
-               /* allocate a readdata struct */
-               rdata = cifs_readdata_alloc(npages,
+               if (ctx->direct_io) {
+                       ssize_t result;
+
+                       result = iov_iter_get_pages_alloc(
+                                       &direct_iov, &pagevec,
+                                       cur_len, &start);
+                       if (result < 0) {
+                               cifs_dbg(VFS,
+                                       "couldn't get user pages (cur_len=%zd)"
+                                       " iter type %d"
+                                       " iov_offset %zd count %zd\n",
+                                       result, direct_iov.type,
+                                       direct_iov.iov_offset,
+                                       direct_iov.count);
+                               dump_stack();
+                               break;
+                       }
+                       cur_len = (size_t)result;
+                       iov_iter_advance(&direct_iov, cur_len);
+
+                       rdata = cifs_readdata_direct_alloc(
+                                       pagevec, cifs_uncached_readv_complete);
+                       if (!rdata) {
+                               add_credits_and_wake_if(server, credits, 0);
+                               rc = -ENOMEM;
+                               break;
+                       }
+
+                       npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
+                       rdata->page_offset = start;
+                       rdata->tailsz = npages > 1 ?
+                               cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
+                               cur_len;
+
+               } else {
+
+                       npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
+                       /* allocate a readdata struct */
+                       rdata = cifs_readdata_alloc(npages,
                                            cifs_uncached_readv_complete);
-               if (!rdata) {
-                       add_credits_and_wake_if(server, credits, 0);
-                       rc = -ENOMEM;
-                       break;
-               }
+                       if (!rdata) {
+                               add_credits_and_wake_if(server, credits, 0);
+                               rc = -ENOMEM;
+                               break;
+                       }
 
-               rc = cifs_read_allocate_pages(rdata, npages);
-               if (rc)
-                       goto error;
+                       rc = cifs_read_allocate_pages(rdata, npages);
+                       if (rc)
+                               goto error;
+
+                       rdata->tailsz = PAGE_SIZE;
+               }
 
                rdata->cfile = cifsFileInfo_get(open_file);
                rdata->nr_pages = npages;
@@ -3153,7 +3364,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
                rdata->bytes = cur_len;
                rdata->pid = pid;
                rdata->pagesz = PAGE_SIZE;
-               rdata->tailsz = PAGE_SIZE;
                rdata->read_into_pages = cifs_uncached_read_into_pages;
                rdata->copy_into_pages = cifs_uncached_copy_into_pages;
                rdata->credits = credits;
@@ -3167,9 +3377,11 @@ error:
                if (rc) {
                        add_credits_and_wake_if(server, rdata->credits, 0);
                        kref_put(&rdata->refcount,
-                                cifs_uncached_readdata_release);
-                       if (rc == -EAGAIN)
+                               cifs_uncached_readdata_release);
+                       if (rc == -EAGAIN) {
+                               iov_iter_revert(&direct_iov, cur_len);
                                continue;
+                       }
                        break;
                }
 
@@ -3225,45 +3437,62 @@ again:
                                 * reading.
                                 */
                                if (got_bytes && got_bytes < rdata->bytes) {
-                                       rc = cifs_readdata_to_iov(rdata, to);
+                                       rc = 0;
+                                       if (!ctx->direct_io)
+                                               rc = cifs_readdata_to_iov(rdata, to);
                                        if (rc) {
                                                kref_put(&rdata->refcount,
-                                               cifs_uncached_readdata_release);
+                                                       cifs_uncached_readdata_release);
                                                continue;
                                        }
                                }
 
-                               rc = cifs_send_async_read(
+                               if (ctx->direct_io) {
+                                       /*
+                                        * Re-use rdata as this is a
+                                        * direct I/O
+                                        */
+                                       rc = cifs_resend_rdata(
+                                               rdata,
+                                               &tmp_list, ctx);
+                               } else {
+                                       rc = cifs_send_async_read(
                                                rdata->offset + got_bytes,
                                                rdata->bytes - got_bytes,
                                                rdata->cfile, cifs_sb,
                                                &tmp_list, ctx);
 
+                                       kref_put(&rdata->refcount,
+                                               cifs_uncached_readdata_release);
+                               }
+
                                list_splice(&tmp_list, &ctx->list);
 
-                               kref_put(&rdata->refcount,
-                                        cifs_uncached_readdata_release);
                                goto again;
                        } else if (rdata->result)
                                rc = rdata->result;
-                       else
+                       else if (!ctx->direct_io)
                                rc = cifs_readdata_to_iov(rdata, to);
 
                        /* if there was a short read -- discard anything left */
                        if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
                                rc = -ENODATA;
+
+                       ctx->total_len += rdata->got_bytes;
                }
                list_del_init(&rdata->list);
                kref_put(&rdata->refcount, cifs_uncached_readdata_release);
        }
 
-       for (i = 0; i < ctx->npages; i++) {
-               if (ctx->should_dirty)
-                       set_page_dirty(ctx->bv[i].bv_page);
-               put_page(ctx->bv[i].bv_page);
-       }
+       if (!ctx->direct_io) {
+               for (i = 0; i < ctx->npages; i++) {
+                       if (ctx->should_dirty)
+                               set_page_dirty(ctx->bv[i].bv_page);
+                       put_page(ctx->bv[i].bv_page);
+               }
 
-       ctx->total_len = ctx->len - iov_iter_count(to);
+               ctx->total_len = ctx->len - iov_iter_count(to);
+       }
 
        cifs_stats_bytes_read(tcon, ctx->total_len);
 
@@ -3281,18 +3510,28 @@ again:
                complete(&ctx->done);
 }
 
-ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
+static ssize_t __cifs_readv(
+       struct kiocb *iocb, struct iov_iter *to, bool direct)
 {
-       struct file *file = iocb->ki_filp;
-       ssize_t rc;
        size_t len;
-       ssize_t total_read = 0;
-       loff_t offset = iocb->ki_pos;
+       struct file *file = iocb->ki_filp;
        struct cifs_sb_info *cifs_sb;
-       struct cifs_tcon *tcon;
        struct cifsFileInfo *cfile;
+       struct cifs_tcon *tcon;
+       ssize_t rc, total_read = 0;
+       loff_t offset = iocb->ki_pos;
        struct cifs_aio_ctx *ctx;
 
+       /*
+        * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
+        * fall back to data copy read path
+        * this could be improved by getting pages directly in ITER_KVEC
+        */
+       if (direct && to->type & ITER_KVEC) {
+               cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
+               direct = false;
+       }
+
        len = iov_iter_count(to);
        if (!len)
                return 0;
@@ -3316,17 +3555,23 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
        if (!is_sync_kiocb(iocb))
                ctx->iocb = iocb;
 
-       if (to->type == ITER_IOVEC)
+       if (iter_is_iovec(to))
                ctx->should_dirty = true;
 
-       rc = setup_aio_ctx_iter(ctx, to, READ);
-       if (rc) {
-               kref_put(&ctx->refcount, cifs_aio_ctx_release);
-               return rc;
+       if (direct) {
+               ctx->pos = offset;
+               ctx->direct_io = true;
+               ctx->iter = *to;
+               ctx->len = len;
+       } else {
+               rc = setup_aio_ctx_iter(ctx, to, READ);
+               if (rc) {
+                       kref_put(&ctx->refcount, cifs_aio_ctx_release);
+                       return rc;
+               }
+               len = ctx->len;
        }
 
-       len = ctx->len;
-
        /* grab a lock here due to read response handlers can access ctx */
        mutex_lock(&ctx->aio_mutex);
 
@@ -3368,6 +3613,16 @@ ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
        return rc;
 }
 
+ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
+{
+       return __cifs_readv(iocb, to, true);
+}
+
+ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
+{
+       return __cifs_readv(iocb, to, false);
+}
+
 ssize_t
 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
 {
index 1023d78673fb14b38ba32c793edee92b119053e9..a81a9df997c1c113e0c00e8d0695d46fc05060f6 100644 (file)
@@ -1320,8 +1320,8 @@ cifs_drop_nlink(struct inode *inode)
 /*
  * If d_inode(dentry) is null (usually meaning the cached dentry
  * is a negative dentry) then we would attempt a standard SMB delete, but
- * if that fails we can not attempt the fall back mechanisms on EACCESS
- * but will return the EACCESS to the caller. Note that the VFS does not call
+ * if that fails we can not attempt the fall back mechanisms on EACCES
+ * but will return the EACCES to the caller. Note that the VFS does not call
  * unlink on negative dentries currently.
  */
 int cifs_unlink(struct inode *dir, struct dentry *dentry)
index fc43d5d25d1df1cb6bda302520172f5788d13c55..8a41f4eba7264fd37584b92bbcaa3035b487c649 100644 (file)
@@ -788,7 +788,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
        struct page **pages = NULL;
        struct bio_vec *bv = NULL;
 
-       if (iter->type & ITER_KVEC) {
+       if (iov_iter_is_kvec(iter)) {
                memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
                ctx->len = count;
                iov_iter_advance(iter, count);
@@ -859,7 +859,7 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
        ctx->bv = bv;
        ctx->len = saved_len - count;
        ctx->npages = npages;
-       iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len);
+       iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
        return 0;
 }
 
index f85fc5aa2710821189e7c95ca201470e9f02b4f4..225fec1cfa673360d794058e5acb1d5737acdd32 100644 (file)
@@ -747,6 +747,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
        int rc = 0;
        unsigned int ea_name_len = ea_name ? strlen(ea_name) : 0;
        char *name, *value;
+       size_t buf_size = dst_size;
        size_t name_len, value_len, user_name_len;
 
        while (src_size > 0) {
@@ -782,9 +783,10 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
                        /* 'user.' plus a terminating null */
                        user_name_len = 5 + 1 + name_len;
 
-                       rc += user_name_len;
-
-                       if (dst_size >= user_name_len) {
+                       if (buf_size == 0) {
+                               /* skip copy - calc size only */
+                               rc += user_name_len;
+                       } else if (dst_size >= user_name_len) {
                                dst_size -= user_name_len;
                                memcpy(dst, "user.", 5);
                                dst += 5;
@@ -792,8 +794,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size,
                                dst += name_len;
                                *dst = 0;
                                ++dst;
-                       } else if (dst_size == 0) {
-                               /* skip copy - calc size only */
+                               rc += user_name_len;
                        } else {
                                /* stop before overrun buffer */
                                rc = -ERANGE;
@@ -1078,6 +1079,9 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
 
        cfile->fid.persistent_fid = fid->persistent_fid;
        cfile->fid.volatile_fid = fid->volatile_fid;
+#ifdef CONFIG_CIFS_DEBUG2
+       cfile->fid.mid = fid->mid;
+#endif /* CIFS_DEBUG2 */
        server->ops->set_oplock_level(cinode, oplock, fid->epoch,
                                      &fid->purge_cache);
        cinode->can_cache_brlcks = CIFS_CACHE_WRITE(cinode);
@@ -3152,13 +3156,13 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
                        return 0;
                }
 
-               iov_iter_bvec(&iter, WRITE | ITER_BVEC, bvec, npages, data_len);
+               iov_iter_bvec(&iter, WRITE, bvec, npages, data_len);
        } else if (buf_len >= data_offset + data_len) {
                /* read response payload is in buf */
                WARN_ONCE(npages > 0, "read data can be either in buf or in pages");
                iov.iov_base = buf + data_offset;
                iov.iov_len = data_len;
-               iov_iter_kvec(&iter, WRITE | ITER_KVEC, &iov, 1, data_len);
+               iov_iter_kvec(&iter, WRITE, &iov, 1, data_len);
        } else {
                /* read response payload cannot be in both buf and pages */
                WARN_ONCE(1, "buf can not contain only a part of read data");
index 7d7b016fe8bb0e9edd5523a739e94e37db7291d6..27f86537a5d11acebc45263236d9cdf131215337 100644 (file)
@@ -1512,7 +1512,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
        rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
        cifs_small_buf_release(req);
        rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
-
+       trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
        if (rc != 0) {
                if (tcon) {
                        cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
@@ -1559,6 +1559,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
        if (tcon->ses->server->ops->validate_negotiate)
                rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
 tcon_exit:
+
        free_rsp_buf(resp_buftype, rsp);
        kfree(unc_path);
        return rc;
@@ -2308,6 +2309,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
        atomic_inc(&tcon->num_remote_opens);
        oparms->fid->persistent_fid = rsp->PersistentFileId;
        oparms->fid->volatile_fid = rsp->VolatileFileId;
+#ifdef CONFIG_CIFS_DEBUG2
+       oparms->fid->mid = le64_to_cpu(rsp->sync_hdr.MessageId);
+#endif /* CIFS_DEBUG2 */
 
        if (buf) {
                memcpy(buf, &rsp->CreationTime, 32);
index f753f424d7f111454e52ab651b321b8bf2011e38..5671d5ee7f58f68d62243eea12f6fe934311b6d2 100644 (file)
@@ -842,6 +842,41 @@ struct fsctl_get_integrity_information_rsp {
 /* Integrity flags for above */
 #define FSCTL_INTEGRITY_FLAG_CHECKSUM_ENFORCEMENT_OFF  0x00000001
 
+/* Reparse structures - see MS-FSCC 2.1.2 */
+
+/* struct fsctl_reparse_info_req is empty, only response structs (see below) */
+
+struct reparse_data_buffer {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
+       __u16   Reserved;
+       __u8    DataBuffer[0]; /* Variable Length */
+} __packed;
+
+struct reparse_guid_data_buffer {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
+       __u16   Reserved;
+       __u8    ReparseGuid[16];
+       __u8    DataBuffer[0]; /* Variable Length */
+} __packed;
+
+struct reparse_mount_point_data_buffer {
+       __le32  ReparseTag;
+       __le16  ReparseDataLength;
+       __u16   Reserved;
+       __le16  SubstituteNameOffset;
+       __le16  SubstituteNameLength;
+       __le16  PrintNameOffset;
+       __le16  PrintNameLength;
+       __u8    PathBuffer[0]; /* Variable Length */
+} __packed;
+
+/* See MS-FSCC 2.1.2.4 and cifspdu.h for struct reparse_symlink_data */
+
+/* See MS-FSCC 2.1.2.6 and cifspdu.h for struct reparse_posix_data */
+
+
 /* See MS-DFSC 2.2.2 */
 struct fsctl_get_dfs_referral_req {
        __le16 MaxReferralLevel;
index 5e282368cc4a9f3c43c8f4d402ce918f23753846..e94a8d1d08a3cedba8886f690a767739f97111d1 100644 (file)
@@ -2054,14 +2054,22 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
 
        info->smbd_recv_pending++;
 
-       switch (msg->msg_iter.type) {
-       case READ | ITER_KVEC:
+       if (iov_iter_rw(&msg->msg_iter) == WRITE) {
+               /* It's a bug in upper layer to get there */
+               cifs_dbg(VFS, "CIFS: invalid msg iter dir %u\n",
+                        iov_iter_rw(&msg->msg_iter));
+               rc = -EINVAL;
+               goto out;
+       }
+
+       switch (iov_iter_type(&msg->msg_iter)) {
+       case ITER_KVEC:
                buf = msg->msg_iter.kvec->iov_base;
                to_read = msg->msg_iter.kvec->iov_len;
                rc = smbd_recv_buf(info, buf, to_read);
                break;
 
-       case READ | ITER_BVEC:
+       case ITER_BVEC:
                page = msg->msg_iter.bvec->bv_page;
                page_offset = msg->msg_iter.bvec->bv_offset;
                to_read = msg->msg_iter.bvec->bv_len;
@@ -2071,10 +2079,11 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
        default:
                /* It's a bug in upper layer to get there */
                cifs_dbg(VFS, "CIFS: invalid msg type %d\n",
-                       msg->msg_iter.type);
+                        iov_iter_type(&msg->msg_iter));
                rc = -EINVAL;
        }
 
+out:
        info->smbd_recv_pending--;
        wake_up(&info->wait_smbd_recv_pending);
 
index cce8414fe7ec2b5e3af9c0660c6db033828cb86f..fb049809555fea9b3e2cc072b735ac0adfdfe1ae 100644 (file)
@@ -373,6 +373,48 @@ DEFINE_EVENT(smb3_enter_exit_class, smb3_##name,  \
 DEFINE_SMB3_ENTER_EXIT_EVENT(enter);
 DEFINE_SMB3_ENTER_EXIT_EVENT(exit_done);
 
+/*
+ * For SMB2/SMB3 tree connect
+ */
+
+DECLARE_EVENT_CLASS(smb3_tcon_class,
+       TP_PROTO(unsigned int xid,
+               __u32   tid,
+               __u64   sesid,
+               const char *unc_name,
+               int     rc),
+       TP_ARGS(xid, tid, sesid, unc_name, rc),
+       TP_STRUCT__entry(
+               __field(unsigned int, xid)
+               __field(__u32, tid)
+               __field(__u64, sesid)
+               __field(const char *,  unc_name)
+               __field(int, rc)
+       ),
+       TP_fast_assign(
+               __entry->xid = xid;
+               __entry->tid = tid;
+               __entry->sesid = sesid;
+               __entry->unc_name = unc_name;
+               __entry->rc = rc;
+       ),
+       TP_printk("xid=%u sid=0x%llx tid=0x%x unc_name=%s rc=%d",
+               __entry->xid, __entry->sesid, __entry->tid,
+               __entry->unc_name, __entry->rc)
+)
+
+#define DEFINE_SMB3_TCON_EVENT(name)          \
+DEFINE_EVENT(smb3_tcon_class, smb3_##name,    \
+       TP_PROTO(unsigned int xid,              \
+               __u32   tid,                    \
+               __u64   sesid,                  \
+               const char *unc_name,           \
+               int     rc),                    \
+       TP_ARGS(xid, tid, sesid, unc_name, rc))
+
+DEFINE_SMB3_TCON_EVENT(tcon);
+
+
 /*
  * For smb2/smb3 open call
  */
index f8112433f0c8b3fa90911bc8451b631a5b9b1ef8..83ff0c25710d0861efad84a38d1da01948bbef54 100644 (file)
@@ -316,8 +316,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
                        .iov_base = &rfc1002_marker,
                        .iov_len  = 4
                };
-               iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
-                             1, 4);
+               iov_iter_kvec(&smb_msg.msg_iter, WRITE, &hiov, 1, 4);
                rc = smb_send_kvec(server, &smb_msg, &sent);
                if (rc < 0)
                        goto uncork;
@@ -338,8 +337,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
                        size += iov[i].iov_len;
                }
 
-               iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
-                             iov, n_vec, size);
+               iov_iter_kvec(&smb_msg.msg_iter, WRITE, iov, n_vec, size);
 
                rc = smb_send_kvec(server, &smb_msg, &sent);
                if (rc < 0)
@@ -355,7 +353,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
                        rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
                                             &bvec.bv_offset);
 
-                       iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
+                       iov_iter_bvec(&smb_msg.msg_iter, WRITE,
                                      &bvec, 1, bvec.bv_len);
                        rc = smb_send_kvec(server, &smb_msg, &sent);
                        if (rc < 0)
index 616e36ea6aaab6baf9fd3210aaa425c2398e491b..48132eca3761de2b4cdf7c6c75ab8efda8cf7a26 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -98,12 +98,6 @@ static void *dax_make_entry(pfn_t pfn, unsigned long flags)
        return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
 }
 
-static void *dax_make_page_entry(struct page *page)
-{
-       pfn_t pfn = page_to_pfn_t(page);
-       return dax_make_entry(pfn, PageHead(page) ? DAX_PMD : 0);
-}
-
 static bool dax_is_locked(void *entry)
 {
        return xa_to_value(entry) & DAX_LOCKED;
@@ -116,12 +110,12 @@ static unsigned int dax_entry_order(void *entry)
        return 0;
 }
 
-static int dax_is_pmd_entry(void *entry)
+static unsigned long dax_is_pmd_entry(void *entry)
 {
        return xa_to_value(entry) & DAX_PMD;
 }
 
-static int dax_is_pte_entry(void *entry)
+static bool dax_is_pte_entry(void *entry)
 {
        return !(xa_to_value(entry) & DAX_PMD);
 }
@@ -222,9 +216,8 @@ static void *get_unlocked_entry(struct xa_state *xas)
        ewait.wait.func = wake_exceptional_entry_func;
 
        for (;;) {
-               entry = xas_load(xas);
-               if (!entry || xa_is_internal(entry) ||
-                               WARN_ON_ONCE(!xa_is_value(entry)) ||
+               entry = xas_find_conflict(xas);
+               if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
                                !dax_is_locked(entry))
                        return entry;
 
@@ -239,6 +232,34 @@ static void *get_unlocked_entry(struct xa_state *xas)
        }
 }
 
+/*
+ * The only thing keeping the address space around is the i_pages lock
+ * (it's cycled in clear_inode() after removing the entries from i_pages)
+ * After we call xas_unlock_irq(), we cannot touch xas->xa.
+ */
+static void wait_entry_unlocked(struct xa_state *xas, void *entry)
+{
+       struct wait_exceptional_entry_queue ewait;
+       wait_queue_head_t *wq;
+
+       init_wait(&ewait.wait);
+       ewait.wait.func = wake_exceptional_entry_func;
+
+       wq = dax_entry_waitqueue(xas, entry, &ewait.key);
+       prepare_to_wait_exclusive(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
+       xas_unlock_irq(xas);
+       schedule();
+       finish_wait(wq, &ewait.wait);
+
+       /*
+        * Entry lock waits are exclusive. Wake up the next waiter since
+        * we aren't sure we will acquire the entry lock and thus wake
+        * the next waiter up on unlock.
+        */
+       if (waitqueue_active(wq))
+               __wake_up(wq, TASK_NORMAL, 1, &ewait.key);
+}
+
 static void put_unlocked_entry(struct xa_state *xas, void *entry)
 {
        /* If we were the only waiter woken, wake the next one */
@@ -255,6 +276,7 @@ static void dax_unlock_entry(struct xa_state *xas, void *entry)
 {
        void *old;
 
+       BUG_ON(dax_is_locked(entry));
        xas_reset(xas);
        xas_lock_irq(xas);
        old = xas_store(xas, entry);
@@ -352,16 +374,27 @@ static struct page *dax_busy_page(void *entry)
        return NULL;
 }
 
-bool dax_lock_mapping_entry(struct page *page)
+/*
+ * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
+ * @page: The page whose entry we want to lock
+ *
+ * Context: Process context.
+ * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
+ * not be locked.
+ */
+dax_entry_t dax_lock_page(struct page *page)
 {
        XA_STATE(xas, NULL, 0);
        void *entry;
 
+       /* Ensure page->mapping isn't freed while we look at it */
+       rcu_read_lock();
        for (;;) {
                struct address_space *mapping = READ_ONCE(page->mapping);
 
-               if (!dax_mapping(mapping))
-                       return false;
+               entry = NULL;
+               if (!mapping || !dax_mapping(mapping))
+                       break;
 
                /*
                 * In the device-dax case there's no need to lock, a
@@ -370,8 +403,9 @@ bool dax_lock_mapping_entry(struct page *page)
                 * otherwise we would not have a valid pfn_to_page()
                 * translation.
                 */
+               entry = (void *)~0UL;
                if (S_ISCHR(mapping->host->i_mode))
-                       return true;
+                       break;
 
                xas.xa = &mapping->i_pages;
                xas_lock_irq(&xas);
@@ -382,20 +416,20 @@ bool dax_lock_mapping_entry(struct page *page)
                xas_set(&xas, page->index);
                entry = xas_load(&xas);
                if (dax_is_locked(entry)) {
-                       entry = get_unlocked_entry(&xas);
-                       /* Did the page move while we slept? */
-                       if (dax_to_pfn(entry) != page_to_pfn(page)) {
-                               xas_unlock_irq(&xas);
-                               continue;
-                       }
+                       rcu_read_unlock();
+                       wait_entry_unlocked(&xas, entry);
+                       rcu_read_lock();
+                       continue;
                }
                dax_lock_entry(&xas, entry);
                xas_unlock_irq(&xas);
-               return true;
+               break;
        }
+       rcu_read_unlock();
+       return (dax_entry_t)entry;
 }
 
-void dax_unlock_mapping_entry(struct page *page)
+void dax_unlock_page(struct page *page, dax_entry_t cookie)
 {
        struct address_space *mapping = page->mapping;
        XA_STATE(xas, &mapping->i_pages, page->index);
@@ -403,7 +437,7 @@ void dax_unlock_mapping_entry(struct page *page)
        if (S_ISCHR(mapping->host->i_mode))
                return;
 
-       dax_unlock_entry(&xas, dax_make_page_entry(page));
+       dax_unlock_entry(&xas, (void *)cookie);
 }
 
 /*
@@ -445,11 +479,9 @@ static void *grab_mapping_entry(struct xa_state *xas,
 retry:
        xas_lock_irq(xas);
        entry = get_unlocked_entry(xas);
-       if (xa_is_internal(entry))
-               goto fallback;
 
        if (entry) {
-               if (WARN_ON_ONCE(!xa_is_value(entry))) {
+               if (!xa_is_value(entry)) {
                        xas_set_err(xas, EIO);
                        goto out_unlock;
                }
@@ -1628,8 +1660,7 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
        /* Did we race with someone splitting entry or so? */
        if (!entry ||
            (order == 0 && !dax_is_pte_entry(entry)) ||
-           (order == PMD_ORDER && (xa_is_internal(entry) ||
-                                   !dax_is_pmd_entry(entry)))) {
+           (order == PMD_ORDER && !dax_is_pmd_entry(entry))) {
                put_unlocked_entry(&xas, entry);
                xas_unlock_irq(&xas);
                trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
index 093fb54cd3163d96b03ebf3b0bea4a83a26bb02a..41a0e97252aed1f28a566a76ff7cc342c9606fae 100644 (file)
@@ -325,8 +325,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags)
                 */
                dio->iocb->ki_pos += transferred;
 
-               if (dio->op == REQ_OP_WRITE)
-                       ret = generic_write_sync(dio->iocb,  transferred);
+               if (ret > 0 && dio->op == REQ_OP_WRITE)
+                       ret = generic_write_sync(dio->iocb, ret);
                dio->iocb->ki_complete(dio->iocb, ret, 0);
        }
 
@@ -1313,7 +1313,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
        spin_lock_init(&dio->bio_lock);
        dio->refcount = 1;
 
-       dio->should_dirty = (iter->type == ITER_IOVEC);
+       dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ;
        sdio.iter = iter;
        sdio.final_block_in_request = end >> blkbits;
 
index a5e4a221435c04bdf97e1219ca3bd022ca864e6c..76976d6e50f93ac4fad8be97ab0566bcfce6c775 100644 (file)
@@ -674,7 +674,7 @@ static int receive_from_sock(struct connection *con)
                nvec = 2;
        }
        len = iov[0].iov_len + iov[1].iov_len;
-       iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, nvec, len);
+       iov_iter_kvec(&msg.msg_iter, READ, iov, nvec, len);
 
        r = ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT | MSG_NOSIGNAL);
        if (ret <= 0)
index 41cf2fbee50da4cb9ec0999a967d7d777fb7fc45..906839a4da8ff8215f7a18f26030cfbc18b9b0e3 100644 (file)
@@ -101,6 +101,7 @@ static int parse_options(char *options, struct exofs_mountopt *opts)
                token = match_token(p, tokens, args);
                switch (token) {
                case Opt_name:
+                       kfree(opts->dev_name);
                        opts->dev_name = match_strdup(&args[0]);
                        if (unlikely(!opts->dev_name)) {
                                EXOFS_ERR("Error allocating dev_name");
@@ -117,7 +118,7 @@ static int parse_options(char *options, struct exofs_mountopt *opts)
                                          EXOFS_MIN_PID);
                                return -EINVAL;
                        }
-                       s_pid = 1;
+                       s_pid = true;
                        break;
                case Opt_to:
                        if (match_int(&args[0], &option))
@@ -866,8 +867,10 @@ static struct dentry *exofs_mount(struct file_system_type *type,
        int ret;
 
        ret = parse_options(data, &opts);
-       if (ret)
+       if (ret) {
+               kfree(opts.dev_name);
                return ERR_PTR(ret);
+       }
 
        if (!opts.dev_name)
                opts.dev_name = dev_name;
index 645158dc33f1fc86bfcca570361002c540068584..c69927bed4effd6002f2021ca66c1b641acebc85 100644 (file)
@@ -77,7 +77,7 @@ static bool dentry_connected(struct dentry *dentry)
                struct dentry *parent = dget_parent(dentry);
 
                dput(dentry);
-               if (IS_ROOT(dentry)) {
+               if (dentry == parent) {
                        dput(parent);
                        return false;
                }
@@ -147,6 +147,7 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
        tmp = lookup_one_len_unlocked(nbuf, parent, strlen(nbuf));
        if (IS_ERR(tmp)) {
                dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp));
+               err = PTR_ERR(tmp);
                goto out_err;
        }
        if (tmp != dentry) {
index cb91baa4275d8150664e05d8a1877ca87fca4ff9..eb11502e3fcd4f10e67fdafdf4f9e7e06e04a5b5 100644 (file)
@@ -892,6 +892,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
        if (sb->s_magic != EXT2_SUPER_MAGIC)
                goto cantfind_ext2;
 
+       opts.s_mount_opt = 0;
        /* Set defaults before we parse the mount options */
        def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
        if (def_mount_opts & EXT2_DEFM_DEBUG)
index 62d9a659a8ff4e7a3a9556449e35fecc25612e43..dd8f10db82e992da8b8f4fb37699d7bf4a87cf4d 100644 (file)
@@ -612,9 +612,9 @@ skip_replace:
        }
 
 cleanup:
-       brelse(bh);
        if (!(bh && header == HDR(bh)))
                kfree(header);
+       brelse(bh);
        up_write(&EXT2_I(inode)->xattr_sem);
 
        return error;
index 12f90d48ba6137c05673dd918feb6aca9281f4d2..3f89d0ab08fc4c8355d207c4f27f7019f08122bd 100644 (file)
 
 #include <linux/compiler.h>
 
-/* Until this gets included into linux/compiler-gcc.h */
-#ifndef __nonstring
-#if defined(GCC_VERSION) && (GCC_VERSION >= 80000)
-#define __nonstring __attribute__((nonstring))
-#else
-#define __nonstring
-#endif
-#endif
-
 /*
  * The fourth extended filesystem constants/structures
  */
index 2addcb8730e19afedcabac390e775124e6a66559..014f6a698cb712a25929f135238cb88614360d31 100644 (file)
@@ -1216,7 +1216,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
        bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
        bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
        if (IS_ERR(bitmap_bh))
-               return (struct inode *) bitmap_bh;
+               return ERR_CAST(bitmap_bh);
 
        /* Having the inode bit set should be a 100% indicator that this
         * is a valid orphan (no e2fsck run on fs).  Orphans also include
index 05f01fbd9c7fb868ecf5502cb6217e862461ef8e..22a9d81597206ce8c7aca27ab5df823c3493c991 100644 (file)
@@ -5835,9 +5835,10 @@ int ext4_mark_iloc_dirty(handle_t *handle,
 {
        int err = 0;
 
-       if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+       if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
+               put_bh(iloc->bh);
                return -EIO;
-
+       }
        if (IS_I_VERSION(inode))
                inode_inc_iversion(inode);
 
index 67a38532032ae89cfcbaec35fe139992cbf2875e..437f71fe83ae557ad36af1092026aba6d58ccbc6 100644 (file)
@@ -126,6 +126,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
        if (!is_dx_block && type == INDEX) {
                ext4_error_inode(inode, func, line, block,
                       "directory leaf block found instead of index block");
+               brelse(bh);
                return ERR_PTR(-EFSCORRUPTED);
        }
        if (!ext4_has_metadata_csum(inode->i_sb) ||
@@ -1556,7 +1557,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
 
        bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
        if (IS_ERR(bh))
-               return (struct dentry *) bh;
+               return ERR_CAST(bh);
        inode = NULL;
        if (bh) {
                __u32 ino = le32_to_cpu(de->inode);
@@ -1600,7 +1601,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
 
        bh = ext4_find_entry(d_inode(child), &dotdot, &de, NULL);
        if (IS_ERR(bh))
-               return (struct dentry *) bh;
+               return ERR_CAST(bh);
        if (!bh)
                return ERR_PTR(-ENOENT);
        ino = le32_to_cpu(de->inode);
@@ -2811,7 +2812,9 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
                        list_del_init(&EXT4_I(inode)->i_orphan);
                        mutex_unlock(&sbi->s_orphan_lock);
                }
-       }
+       } else
+               brelse(iloc.bh);
+
        jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
        jbd_debug(4, "orphan inode %lu will point to %d\n",
                        inode->i_ino, NEXT_ORPHAN(inode));
index 2aa62d58d8dd87e095bcb61f84aa78ef755463a1..db7590178dfcf1a4b59ee3c44deaa89a21de8ca6 100644 (file)
@@ -374,13 +374,13 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
        bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
        if (!bio)
                return -ENOMEM;
+       wbc_init_bio(io->io_wbc, bio);
        bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
        bio_set_dev(bio, bh->b_bdev);
        bio->bi_end_io = ext4_end_bio;
        bio->bi_private = ext4_get_io_end(io->io_end);
        io->io_bio = bio;
        io->io_next_block = bh->b_blocknr;
-       wbc_init_bio(io->io_wbc, bio);
        return 0;
 }
 
index ebbc663d07985038ef17520fb41c5fae0e5d3637..a5efee34415fe5a6560522f5b3c48b08c3aac0d8 100644 (file)
@@ -459,16 +459,18 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
 
                BUFFER_TRACE(bh, "get_write_access");
                err = ext4_journal_get_write_access(handle, bh);
-               if (err)
+               if (err) {
+                       brelse(bh);
                        return err;
+               }
                ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
                           first_cluster, first_cluster - start, count2);
                ext4_set_bits(bh->b_data, first_cluster - start, count2);
 
                err = ext4_handle_dirty_metadata(handle, NULL, bh);
+               brelse(bh);
                if (unlikely(err))
                        return err;
-               brelse(bh);
        }
 
        return 0;
@@ -605,7 +607,6 @@ handle_bb:
                bh = bclean(handle, sb, block);
                if (IS_ERR(bh)) {
                        err = PTR_ERR(bh);
-                       bh = NULL;
                        goto out;
                }
                overhead = ext4_group_overhead_blocks(sb, group);
@@ -618,9 +619,9 @@ handle_bb:
                ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
                                     sb->s_blocksize * 8, bh->b_data);
                err = ext4_handle_dirty_metadata(handle, NULL, bh);
+               brelse(bh);
                if (err)
                        goto out;
-               brelse(bh);
 
 handle_ib:
                if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
@@ -635,18 +636,16 @@ handle_ib:
                bh = bclean(handle, sb, block);
                if (IS_ERR(bh)) {
                        err = PTR_ERR(bh);
-                       bh = NULL;
                        goto out;
                }
 
                ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
                                     sb->s_blocksize * 8, bh->b_data);
                err = ext4_handle_dirty_metadata(handle, NULL, bh);
+               brelse(bh);
                if (err)
                        goto out;
-               brelse(bh);
        }
-       bh = NULL;
 
        /* Mark group tables in block bitmap */
        for (j = 0; j < GROUP_TABLE_COUNT; j++) {
@@ -685,7 +684,6 @@ handle_ib:
        }
 
 out:
-       brelse(bh);
        err2 = ext4_journal_stop(handle);
        if (err2 && !err)
                err = err2;
@@ -873,6 +871,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
        err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
        if (unlikely(err)) {
                ext4_std_error(sb, err);
+               iloc.bh = NULL;
                goto exit_inode;
        }
        brelse(dind);
@@ -924,6 +923,7 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
                                     sizeof(struct buffer_head *),
                                     GFP_NOFS);
        if (!n_group_desc) {
+               brelse(gdb_bh);
                err = -ENOMEM;
                ext4_warning(sb, "not enough memory for %lu groups",
                             gdb_num + 1);
@@ -939,8 +939,6 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
        kvfree(o_group_desc);
        BUFFER_TRACE(gdb_bh, "get_write_access");
        err = ext4_journal_get_write_access(handle, gdb_bh);
-       if (unlikely(err))
-               brelse(gdb_bh);
        return err;
 }
 
@@ -1124,8 +1122,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
                           backup_block, backup_block -
                           ext4_group_first_block_no(sb, group));
                BUFFER_TRACE(bh, "get_write_access");
-               if ((err = ext4_journal_get_write_access(handle, bh)))
+               if ((err = ext4_journal_get_write_access(handle, bh))) {
+                       brelse(bh);
                        break;
+               }
                lock_buffer(bh);
                memcpy(bh->b_data, data, size);
                if (rest)
@@ -2023,7 +2023,7 @@ retry:
 
        err = ext4_alloc_flex_bg_array(sb, n_group + 1);
        if (err)
-               return err;
+               goto out;
 
        err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
        if (err)
@@ -2059,6 +2059,10 @@ retry:
                n_blocks_count_retry = 0;
                free_flex_gd(flex_gd);
                flex_gd = NULL;
+               if (resize_inode) {
+                       iput(resize_inode);
+                       resize_inode = NULL;
+               }
                goto retry;
        }
 
index a221f1cdf70464db0d6551236eb5ecf0f2b76326..53ff6c2a26ed999e008f17c3b0170af9a05e158e 100644 (file)
@@ -4075,6 +4075,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        sbi->s_groups_count = blocks_count;
        sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
                        (EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
+       if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
+           le32_to_cpu(es->s_inodes_count)) {
+               ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
+                        le32_to_cpu(es->s_inodes_count),
+                        ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
+               ret = -EINVAL;
+               goto failed_mount;
+       }
        db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
                   EXT4_DESC_PER_BLOCK(sb);
        if (ext4_has_feature_meta_bg(sb)) {
@@ -4094,14 +4102,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                ret = -ENOMEM;
                goto failed_mount;
        }
-       if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
-           le32_to_cpu(es->s_inodes_count)) {
-               ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
-                        le32_to_cpu(es->s_inodes_count),
-                        ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
-               ret = -EINVAL;
-               goto failed_mount;
-       }
 
        bgl_lock_init(sbi->s_blockgroup_lock);
 
@@ -4510,6 +4510,7 @@ failed_mount6:
        percpu_counter_destroy(&sbi->s_freeinodes_counter);
        percpu_counter_destroy(&sbi->s_dirs_counter);
        percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+       percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
 failed_mount5:
        ext4_ext_release(sb);
        ext4_release_system_zone(sb);
index f36fc5d5b257438666641028b8660c0ce3462e91..7643d52c776c61188ca6ac4a7ddd18d303c96e47 100644 (file)
@@ -1031,10 +1031,8 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
        inode_lock(ea_inode);
 
        ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
-       if (ret) {
-               iloc.bh = NULL;
+       if (ret)
                goto out;
-       }
 
        ref_count = ext4_xattr_inode_get_ref(ea_inode);
        ref_count += ref_change;
@@ -1080,12 +1078,10 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
        }
 
        ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc);
-       iloc.bh = NULL;
        if (ret)
                ext4_warning_inode(ea_inode,
                                   "ext4_mark_iloc_dirty() failed ret=%d", ret);
 out:
-       brelse(iloc.bh);
        inode_unlock(ea_inode);
        return ret;
 }
@@ -1388,6 +1384,12 @@ retry:
                bh = ext4_getblk(handle, ea_inode, block, 0);
                if (IS_ERR(bh))
                        return PTR_ERR(bh);
+               if (!bh) {
+                       WARN_ON_ONCE(1);
+                       EXT4_ERROR_INODE(ea_inode,
+                                        "ext4_getblk() return bh = NULL");
+                       return -EFSCORRUPTED;
+               }
                ret = ext4_journal_get_write_access(handle, bh);
                if (ret)
                        goto out;
@@ -2276,8 +2278,10 @@ static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
        if (!bh)
                return ERR_PTR(-EIO);
        error = ext4_xattr_check_block(inode, bh);
-       if (error)
+       if (error) {
+               brelse(bh);
                return ERR_PTR(error);
+       }
        return bh;
 }
 
@@ -2397,6 +2401,8 @@ retry_inode:
                        error = ext4_xattr_block_set(handle, inode, &i, &bs);
                } else if (error == -ENOSPC) {
                        if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
+                               brelse(bs.bh);
+                               bs.bh = NULL;
                                error = ext4_xattr_block_find(inode, &i, &bs);
                                if (error)
                                        goto cleanup;
@@ -2617,6 +2623,8 @@ out:
        kfree(buffer);
        if (is)
                brelse(is->iloc.bh);
+       if (bs)
+               brelse(bs->bh);
        kfree(is);
        kfree(bs);
 
@@ -2696,7 +2704,6 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
                               struct ext4_inode *raw_inode, handle_t *handle)
 {
        struct ext4_xattr_ibody_header *header;
-       struct buffer_head *bh;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        static unsigned int mnt_count;
        size_t min_offs;
@@ -2737,13 +2744,17 @@ retry:
         * EA block can hold new_extra_isize bytes.
         */
        if (EXT4_I(inode)->i_file_acl) {
+               struct buffer_head *bh;
+
                bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
                error = -EIO;
                if (!bh)
                        goto cleanup;
                error = ext4_xattr_check_block(inode, bh);
-               if (error)
+               if (error) {
+                       brelse(bh);
                        goto cleanup;
+               }
                base = BHDR(bh);
                end = bh->b_data + bh->b_size;
                min_offs = end - base;
index 9edc920f651f3929f9ad4f27e061df728c424794..6d9cb1719de5c010f20ef6ecea70aaec09c54a92 100644 (file)
@@ -730,6 +730,9 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
 
        if (awaken)
                wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
+       if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
+               wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
+
 
        /* Prevent a race with our last child, which has to signal EV_CLEARED
         * before dropping our spinlock.
index ae813e609932168ec2e74056fb598aca0ad65907..a5e516a40e7a359cdae8b2bf289175e971b6e6c9 100644 (file)
@@ -165,9 +165,13 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
 
 static void fuse_drop_waiting(struct fuse_conn *fc)
 {
-       if (fc->connected) {
-               atomic_dec(&fc->num_waiting);
-       } else if (atomic_dec_and_test(&fc->num_waiting)) {
+       /*
+        * lockess check of fc->connected is okay, because atomic_dec_and_test()
+        * provides a memory barrier mached with the one in fuse_wait_aborted()
+        * to ensure no wake-up is missed.
+        */
+       if (atomic_dec_and_test(&fc->num_waiting) &&
+           !READ_ONCE(fc->connected)) {
                /* wake up aborters */
                wake_up_all(&fc->blocked_waitq);
        }
@@ -1768,8 +1772,10 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        req->in.args[1].size = total_len;
 
        err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
-       if (err)
+       if (err) {
                fuse_retrieve_end(fc, req);
+               fuse_put_request(fc, req);
+       }
 
        return err;
 }
@@ -2219,6 +2225,8 @@ EXPORT_SYMBOL_GPL(fuse_abort_conn);
 
 void fuse_wait_aborted(struct fuse_conn *fc)
 {
+       /* matches implicit memory barrier in fuse_drop_waiting() */
+       smp_mb();
        wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
 }
 
index 58dbc39fea639ef5e0dc95dd830e47c9763ffe29..b52f9baaa3e7b9c98478a8c115748ae71fb7b0e1 100644 (file)
@@ -1275,7 +1275,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
        ssize_t ret = 0;
 
        /* Special case for kernel I/O: can copy directly into the buffer */
-       if (ii->type & ITER_KVEC) {
+       if (iov_iter_is_kvec(ii)) {
                unsigned long user_addr = fuse_get_user_addr(ii);
                size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
 
@@ -2924,10 +2924,12 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
        }
 
        if (io->async) {
+               bool blocking = io->blocking;
+
                fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
 
                /* we have a non-extending, async request, so return */
-               if (!io->blocking)
+               if (!blocking)
                        return -EIOCBQUEUED;
 
                wait_for_completion(&wait);
index a683d9b27d76033a191b72f81528a7b255de4f08..9a4a15d646ebb2f556828c410cb38c0bd1f30dd5 100644 (file)
@@ -826,7 +826,7 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
        ret = gfs2_meta_inode_buffer(ip, &dibh);
        if (ret)
                goto unlock;
-       iomap->private = dibh;
+       mp->mp_bh[0] = dibh;
 
        if (gfs2_is_stuffed(ip)) {
                if (flags & IOMAP_WRITE) {
@@ -863,9 +863,6 @@ unstuff:
        len = lblock_stop - lblock + 1;
        iomap->length = len << inode->i_blkbits;
 
-       get_bh(dibh);
-       mp->mp_bh[0] = dibh;
-
        height = ip->i_height;
        while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
                height++;
@@ -898,8 +895,6 @@ out:
        iomap->bdev = inode->i_sb->s_bdev;
 unlock:
        up_read(&ip->i_rw_mutex);
-       if (ret && dibh)
-               brelse(dibh);
        return ret;
 
 do_alloc:
@@ -980,9 +975,9 @@ static void gfs2_iomap_journaled_page_done(struct inode *inode, loff_t pos,
 
 static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
                                  loff_t length, unsigned flags,
-                                 struct iomap *iomap)
+                                 struct iomap *iomap,
+                                 struct metapath *mp)
 {
-       struct metapath mp = { .mp_aheight = 1, };
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
@@ -996,9 +991,9 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
        unstuff = gfs2_is_stuffed(ip) &&
                  pos + length > gfs2_max_stuffed_size(ip);
 
-       ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
+       ret = gfs2_iomap_get(inode, pos, length, flags, iomap, mp);
        if (ret)
-               goto out_release;
+               goto out_unlock;
 
        alloc_required = unstuff || iomap->type == IOMAP_HOLE;
 
@@ -1013,7 +1008,7 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
 
                ret = gfs2_quota_lock_check(ip, &ap);
                if (ret)
-                       goto out_release;
+                       goto out_unlock;
 
                ret = gfs2_inplace_reserve(ip, &ap);
                if (ret)
@@ -1038,17 +1033,15 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
                ret = gfs2_unstuff_dinode(ip, NULL);
                if (ret)
                        goto out_trans_end;
-               release_metapath(&mp);
-               brelse(iomap->private);
-               iomap->private = NULL;
+               release_metapath(mp);
                ret = gfs2_iomap_get(inode, iomap->offset, iomap->length,
-                                    flags, iomap, &mp);
+                                    flags, iomap, mp);
                if (ret)
                        goto out_trans_end;
        }
 
        if (iomap->type == IOMAP_HOLE) {
-               ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
+               ret = gfs2_iomap_alloc(inode, iomap, flags, mp);
                if (ret) {
                        gfs2_trans_end(sdp);
                        gfs2_inplace_release(ip);
@@ -1056,7 +1049,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
                        goto out_qunlock;
                }
        }
-       release_metapath(&mp);
        if (!gfs2_is_stuffed(ip) && gfs2_is_jdata(ip))
                iomap->page_done = gfs2_iomap_journaled_page_done;
        return 0;
@@ -1069,10 +1061,7 @@ out_trans_fail:
 out_qunlock:
        if (alloc_required)
                gfs2_quota_unlock(ip);
-out_release:
-       if (iomap->private)
-               brelse(iomap->private);
-       release_metapath(&mp);
+out_unlock:
        gfs2_write_unlock(inode);
        return ret;
 }
@@ -1088,10 +1077,10 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
 
        trace_gfs2_iomap_start(ip, pos, length, flags);
        if ((flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)) {
-               ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap);
+               ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
        } else {
                ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
-               release_metapath(&mp);
+
                /*
                 * Silently fall back to buffered I/O for stuffed files or if
                 * we've hot a hole (see gfs2_file_direct_write).
@@ -1100,6 +1089,11 @@ static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
                    iomap->type != IOMAP_MAPPED)
                        ret = -ENOTBLK;
        }
+       if (!ret) {
+               get_bh(mp.mp_bh[0]);
+               iomap->private = mp.mp_bh[0];
+       }
+       release_metapath(&mp);
        trace_gfs2_iomap_end(ip, iomap, ret);
        return ret;
 }
@@ -1908,10 +1902,16 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
                        if (ret < 0)
                                goto out;
 
-                       /* issue read-ahead on metadata */
-                       if (mp.mp_aheight > 1) {
-                               for (; ret > 1; ret--) {
-                                       metapointer_range(&mp, mp.mp_aheight - ret,
+                       /* On the first pass, issue read-ahead on metadata. */
+                       if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
+                               unsigned int height = mp.mp_aheight - 1;
+
+                               /* No read-ahead for data blocks. */
+                               if (mp.mp_aheight - 1 == strip_h)
+                                       height--;
+
+                               for (; height >= mp.mp_aheight - ret; height--) {
+                                       metapointer_range(&mp, height,
                                                          start_list, start_aligned,
                                                          end_list, end_aligned,
                                                          &start, &end);
index ffe3032b1043deafc6730158f3d262d7a66279af..b08a530433adfb56560fedbf4d741205afdfbd41 100644 (file)
@@ -733,6 +733,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
 
                if (gl) {
                        glock_clear_object(gl, rgd);
+                       gfs2_rgrp_brelse(rgd);
                        gfs2_glock_put(gl);
                }
 
@@ -1174,7 +1175,7 @@ static u32 count_unlinked(struct gfs2_rgrpd *rgd)
  * @rgd: the struct gfs2_rgrpd describing the RG to read in
  *
  * Read in all of a Resource Group's header and bitmap blocks.
- * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
+ * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
  *
  * Returns: errno
  */
index 98b96ffb95ed3d71dbf598b9aadc1e4363ce68da..19017d2961734fd7701aa1ac31c6a94d8a65c71b 100644 (file)
@@ -338,13 +338,14 @@ void hfs_bmap_free(struct hfs_bnode *node)
 
                nidx -= len * 8;
                i = node->next;
-               hfs_bnode_put(node);
                if (!i) {
                        /* panic */;
                        pr_crit("unable to free bnode %u. bmap not found!\n",
                                node->this);
+                       hfs_bnode_put(node);
                        return;
                }
+               hfs_bnode_put(node);
                node = hfs_bnode_find(tree, i);
                if (IS_ERR(node))
                        return;
index 236efe51eca6790e8bf05b679f92683f4c1aa35a..66774f4cb4fd5e34ae98de0d9a8ef9528bcfb47c 100644 (file)
@@ -466,14 +466,15 @@ void hfs_bmap_free(struct hfs_bnode *node)
 
                nidx -= len * 8;
                i = node->next;
-               hfs_bnode_put(node);
                if (!i) {
                        /* panic */;
                        pr_crit("unable to free bnode %u. "
                                        "bmap not found!\n",
                                node->this);
+                       hfs_bnode_put(node);
                        return;
                }
+               hfs_bnode_put(node);
                node = hfs_bnode_find(tree, i);
                if (IS_ERR(node))
                        return;
index 9e198f00b64c6f59e7e4a50b1bc8ddf4ec73cdbf..35d2108d567c25d0de1376a84e8a2a4333859ddf 100644 (file)
@@ -730,8 +730,11 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
                return LRU_REMOVED;
        }
 
-       /* recently referenced inodes get one more pass */
-       if (inode->i_state & I_REFERENCED) {
+       /*
+        * Recently referenced inodes and inodes with many attached pages
+        * get one more pass.
+        */
+       if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
                inode->i_state &= ~I_REFERENCED;
                spin_unlock(&inode->i_lock);
                return LRU_ROTATE;
index 2005529af560891043170b4d86ed05c2a62f19eb..d64f622cac8b8f7a9cb0fdd842ce0f788f857c8d 100644 (file)
@@ -223,6 +223,7 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
                             u64 off, u64 olen, u64 destoff)
 {
        struct fd src_file = fdget(srcfd);
+       loff_t cloned;
        int ret;
 
        if (!src_file.file)
@@ -230,7 +231,14 @@ static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
        ret = -EXDEV;
        if (src_file.file->f_path.mnt != dst_file->f_path.mnt)
                goto fdput;
-       ret = vfs_clone_file_range(src_file.file, off, dst_file, destoff, olen);
+       cloned = vfs_clone_file_range(src_file.file, off, dst_file, destoff,
+                                     olen, 0);
+       if (cloned < 0)
+               ret = cloned;
+       else if (olen && cloned != olen)
+               ret = -EINVAL;
+       else
+               ret = 0;
 fdput:
        fdput(src_file);
        return ret;
@@ -669,6 +677,9 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
                return ioctl_fiemap(filp, arg);
 
        case FIGETBSZ:
+               /* anon_bdev filesystems may not have a block size */
+               if (!inode->i_sb->s_blocksize)
+                       return -EINVAL;
                return put_user(inode->i_sb->s_blocksize, argp);
 
        case FICLONE:
index 90c2febc93acc716d1db6f69ca1e287f7cbac6f5..d6bc98ae8d3503870c1c23f8fe1277cdb4476d85 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/task_io_accounting_ops.h>
 #include <linux/dax.h>
 #include <linux/sched/signal.h>
-#include <linux/swap.h>
 
 #include "internal.h"
 
@@ -143,13 +142,14 @@ static void
 iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
                loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
 {
+       loff_t orig_pos = *pos;
+       loff_t isize = i_size_read(inode);
        unsigned block_bits = inode->i_blkbits;
        unsigned block_size = (1 << block_bits);
        unsigned poff = offset_in_page(*pos);
        unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
        unsigned first = poff >> block_bits;
        unsigned last = (poff + plen - 1) >> block_bits;
-       unsigned end = offset_in_page(i_size_read(inode)) >> block_bits;
 
        /*
         * If the block size is smaller than the page size we need to check the
@@ -184,8 +184,12 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
         * handle both halves separately so that we properly zero data in the
         * page cache for blocks that are entirely outside of i_size.
         */
-       if (first <= end && last > end)
-               plen -= (last - end) * block_size;
+       if (orig_pos <= isize && orig_pos + length > isize) {
+               unsigned end = offset_in_page(isize - 1) >> block_bits;
+
+               if (first <= end && last > end)
+                       plen -= (last - end) * block_size;
+       }
 
        *offp = poff;
        *lenp = plen;
@@ -1581,7 +1585,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
        struct bio *bio;
        bool need_zeroout = false;
        bool use_fua = false;
-       int nr_pages, ret;
+       int nr_pages, ret = 0;
        size_t copied = 0;
 
        if ((pos | length | align) & ((1 << blkbits) - 1))
@@ -1597,12 +1601,13 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
 
        if (iomap->flags & IOMAP_F_NEW) {
                need_zeroout = true;
-       } else {
+       } else if (iomap->type == IOMAP_MAPPED) {
                /*
-                * Use a FUA write if we need datasync semantics, this
-                * is a pure data IO that doesn't require any metadata
-                * updates and the underlying device supports FUA. This
-                * allows us to avoid cache flushes on IO completion.
+                * Use a FUA write if we need datasync semantics, this is a pure
+                * data IO that doesn't require any metadata updates (including
+                * after IO completion such as unwritten extent conversion) and
+                * the underlying device supports FUA. This allows us to avoid
+                * cache flushes on IO completion.
                 */
                if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
                    (dio->flags & IOMAP_DIO_WRITE_FUA) &&
@@ -1645,8 +1650,14 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
 
                ret = bio_iov_iter_get_pages(bio, &iter);
                if (unlikely(ret)) {
+                       /*
+                        * We have to stop part way through an IO. We must fall
+                        * through to the sub-block tail zeroing here, otherwise
+                        * this short IO may expose stale data in the tail of
+                        * the block we haven't written data to.
+                        */
                        bio_put(bio);
-                       return copied ? copied : ret;
+                       goto zero_tail;
                }
 
                n = bio->bi_iter.bi_size;
@@ -1677,13 +1688,21 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
                dio->submit.cookie = submit_bio(bio);
        } while (nr_pages);
 
-       if (need_zeroout) {
+       /*
+        * We need to zeroout the tail of a sub-block write if the extent type
+        * requires zeroing or the write extends beyond EOF. If we don't zero
+        * the block tail in the latter case, we can expose stale data via mmap
+        * reads of the EOF block.
+        */
+zero_tail:
+       if (need_zeroout ||
+           ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
                /* zero out from the end of the write to the end of the block */
                pad = pos & (fs_block_size - 1);
                if (pad)
                        iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
        }
-       return copied;
+       return copied ? copied : ret;
 }
 
 static loff_t
@@ -1795,7 +1814,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
                if (pos >= dio->i_size)
                        goto out_free_dio;
 
-               if (iter->type == ITER_IOVEC)
+               if (iter_is_iovec(iter) && iov_iter_rw(iter) == READ)
                        dio->flags |= IOMAP_DIO_DIRTY;
        } else {
                flags |= IOMAP_WRITE;
index 98d27da43304706f4c8dcc572a397d89ff34cef2..a7f91265ea671d0f6ebe59d2b9fb0f91bd6155cf 100644 (file)
@@ -695,9 +695,6 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
 
        hlist_for_each_entry(mp, chain, m_hash) {
                if (mp->m_dentry == dentry) {
-                       /* might be worth a WARN_ON() */
-                       if (d_unlinked(dentry))
-                               return ERR_PTR(-ENOENT);
                        mp->m_count++;
                        return mp;
                }
@@ -711,6 +708,9 @@ static struct mountpoint *get_mountpoint(struct dentry *dentry)
        int ret;
 
        if (d_mountpoint(dentry)) {
+               /* might be worth a WARN_ON() */
+               if (d_unlinked(dentry))
+                       return ERR_PTR(-ENOENT);
 mountpoint:
                read_seqlock_excl(&mount_lock);
                mp = lookup_mountpoint(dentry);
@@ -1540,8 +1540,13 @@ static int do_umount(struct mount *mnt, int flags)
 
        namespace_lock();
        lock_mount_hash();
-       event++;
 
+       /* Recheck MNT_LOCKED with the locks held */
+       retval = -EINVAL;
+       if (mnt->mnt.mnt_flags & MNT_LOCKED)
+               goto out;
+
+       event++;
        if (flags & MNT_DETACH) {
                if (!list_empty(&mnt->mnt_list))
                        umount_tree(mnt, UMOUNT_PROPAGATE);
@@ -1555,6 +1560,7 @@ static int do_umount(struct mount *mnt, int flags)
                        retval = 0;
                }
        }
+out:
        unlock_mount_hash();
        namespace_unlock();
        return retval;
@@ -1645,7 +1651,7 @@ int ksys_umount(char __user *name, int flags)
                goto dput_and_out;
        if (!check_mnt(mnt))
                goto dput_and_out;
-       if (mnt->mnt.mnt_flags & MNT_LOCKED)
+       if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
                goto dput_and_out;
        retval = -EPERM;
        if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
@@ -1728,8 +1734,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
                for (s = r; s; s = next_mnt(s, r)) {
                        if (!(flag & CL_COPY_UNBINDABLE) &&
                            IS_MNT_UNBINDABLE(s)) {
-                               s = skip_mnt_tree(s);
-                               continue;
+                               if (s->mnt.mnt_flags & MNT_LOCKED) {
+                                       /* Both unbindable and locked. */
+                                       q = ERR_PTR(-EPERM);
+                                       goto out;
+                               } else {
+                                       s = skip_mnt_tree(s);
+                                       continue;
+                               }
                        }
                        if (!(flag & CL_COPY_MNT_NS_FILE) &&
                            is_mnt_ns_file(s->mnt.mnt_root)) {
@@ -1782,7 +1794,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
 {
        namespace_lock();
        lock_mount_hash();
-       umount_tree(real_mount(mnt), UMOUNT_SYNC);
+       umount_tree(real_mount(mnt), 0);
        unlock_mount_hash();
        namespace_unlock();
 }
index fa515d5ea5ba12e0e47fb0c6e3c67ea898bc0003..3159673549540f063124d62a6cc7098f86ec6ae8 100644 (file)
@@ -66,7 +66,7 @@ __be32 nfs4_callback_getattr(void *argp, void *resp,
 out_iput:
        rcu_read_unlock();
        trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
-       iput(inode);
+       nfs_iput_and_deactive(inode);
 out:
        dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
        return res->status;
@@ -108,7 +108,7 @@ __be32 nfs4_callback_recall(void *argp, void *resp,
        }
        trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
                        &args->stateid, -ntohl(res));
-       iput(inode);
+       nfs_iput_and_deactive(inode);
 out:
        dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
        return res;
@@ -686,20 +686,24 @@ __be32 nfs4_callback_offload(void *data, void *dummy,
 {
        struct cb_offloadargs *args = data;
        struct nfs_server *server;
-       struct nfs4_copy_state *copy;
+       struct nfs4_copy_state *copy, *tmp_copy;
        bool found = false;
 
+       copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
+       if (!copy)
+               return htonl(NFS4ERR_SERVERFAULT);
+
        spin_lock(&cps->clp->cl_lock);
        rcu_read_lock();
        list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
                                client_link) {
-               list_for_each_entry(copy, &server->ss_copies, copies) {
+               list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
                        if (memcmp(args->coa_stateid.other,
-                                       copy->stateid.other,
+                                       tmp_copy->stateid.other,
                                        sizeof(args->coa_stateid.other)))
                                continue;
-                       nfs4_copy_cb_args(copy, args);
-                       complete(&copy->completion);
+                       nfs4_copy_cb_args(tmp_copy, args);
+                       complete(&tmp_copy->completion);
                        found = true;
                        goto out;
                }
@@ -707,15 +711,11 @@ __be32 nfs4_callback_offload(void *data, void *dummy,
 out:
        rcu_read_unlock();
        if (!found) {
-               copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
-               if (!copy) {
-                       spin_unlock(&cps->clp->cl_lock);
-                       return htonl(NFS4ERR_SERVERFAULT);
-               }
                memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
                nfs4_copy_cb_args(copy, args);
                list_add_tail(&copy->copies, &cps->clp->pending_cb_stateids);
-       }
+       } else
+               kfree(copy);
        spin_unlock(&cps->clp->cl_lock);
 
        return 0;
index 07b83956057627913ac64b44307d19a5765e03e4..6ec2f78c1e191ef3b7b3666fe458856d1fc547bc 100644 (file)
@@ -850,16 +850,23 @@ nfs_delegation_find_inode_server(struct nfs_server *server,
                                 const struct nfs_fh *fhandle)
 {
        struct nfs_delegation *delegation;
-       struct inode *res = NULL;
+       struct inode *freeme, *res = NULL;
 
        list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
                spin_lock(&delegation->lock);
                if (delegation->inode != NULL &&
                    nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
-                       res = igrab(delegation->inode);
+                       freeme = igrab(delegation->inode);
+                       if (freeme && nfs_sb_active(freeme->i_sb))
+                               res = freeme;
                        spin_unlock(&delegation->lock);
                        if (res != NULL)
                                return res;
+                       if (freeme) {
+                               rcu_read_unlock();
+                               iput(freeme);
+                               rcu_read_lock();
+                       }
                        return ERR_PTR(-EAGAIN);
                }
                spin_unlock(&delegation->lock);
index aa12c3063baec60bad7483004379c2edd7800f5e..33824a0a57bfe5de9e31f4d13e4d2eebc3b7b2df 100644 (file)
@@ -98,8 +98,11 @@ struct nfs_direct_req {
        struct pnfs_ds_commit_info ds_cinfo;    /* Storage for cinfo */
        struct work_struct      work;
        int                     flags;
+       /* for write */
 #define NFS_ODIRECT_DO_COMMIT          (1)     /* an unstable reply was received */
 #define NFS_ODIRECT_RESCHED_WRITES     (2)     /* write verification failed */
+       /* for read */
+#define NFS_ODIRECT_SHOULD_DIRTY       (3)     /* dirty user-space page after read */
        struct nfs_writeverf    verf;           /* unstable write verifier */
 };
 
@@ -412,7 +415,8 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
                struct nfs_page *req = nfs_list_entry(hdr->pages.next);
                struct page *page = req->wb_page;
 
-               if (!PageCompound(page) && bytes < hdr->good_bytes)
+               if (!PageCompound(page) && bytes < hdr->good_bytes &&
+                   (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
                        set_page_dirty(page);
                bytes += req->wb_bytes;
                nfs_list_remove_request(req);
@@ -587,6 +591,9 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
        if (!is_sync_kiocb(iocb))
                dreq->iocb = iocb;
 
+       if (iter_is_iovec(iter))
+               dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
+
        nfs_start_io_direct(inode);
 
        NFS_I(inode)->read_io += count;
index 86bcba40ca61b27ee6228dc900783439facdf680..310d7500f66528cc65e13f258480b2be600dea5c 100644 (file)
@@ -1361,12 +1361,7 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
                                task))
                return;
 
-       if (ff_layout_read_prepare_common(task, hdr))
-               return;
-
-       if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
-                       hdr->args.lock_context, FMODE_READ) == -EIO)
-               rpc_exit(task, -EIO); /* lost lock, terminate I/O */
+       ff_layout_read_prepare_common(task, hdr);
 }
 
 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
@@ -1542,12 +1537,7 @@ static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
                                task))
                return;
 
-       if (ff_layout_write_prepare_common(task, hdr))
-               return;
-
-       if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
-                       hdr->args.lock_context, FMODE_WRITE) == -EIO)
-               rpc_exit(task, -EIO); /* lost lock, terminate I/O */
+       ff_layout_write_prepare_common(task, hdr);
 }
 
 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
@@ -1742,6 +1732,11 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
        fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
        if (fh)
                hdr->args.fh = fh;
+
+       if (vers == 4 &&
+               !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
+               goto out_failed;
+
        /*
         * Note that if we ever decide to split across DSes,
         * then we may need to handle dense-like offsets.
@@ -1804,6 +1799,10 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
        if (fh)
                hdr->args.fh = fh;
 
+       if (vers == 4 &&
+               !nfs4_ff_layout_select_ds_stateid(lseg, idx, &hdr->args.stateid))
+               goto out_failed;
+
        /*
         * Note that if we ever decide to split across DSes,
         * then we may need to handle dense-like offsets.
index 411798346e48360c85a0db1c47d2eb5edb79001f..de50a342d5a50503198bc0f8980e36599fec48f3 100644 (file)
@@ -215,6 +215,10 @@ unsigned int ff_layout_fetch_ds_ioerr(struct pnfs_layout_hdr *lo,
                unsigned int maxnum);
 struct nfs_fh *
 nfs4_ff_layout_select_ds_fh(struct pnfs_layout_segment *lseg, u32 mirror_idx);
+int
+nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
+                               u32 mirror_idx,
+                               nfs4_stateid *stateid);
 
 struct nfs4_pnfs_ds *
 nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
index 74d8d53524382abbaf3b961d81f37ac7fa249d53..d23347389626e4abb11d6be3352e1189d0c1213c 100644 (file)
@@ -370,6 +370,25 @@ out:
        return fh;
 }
 
+int
+nfs4_ff_layout_select_ds_stateid(struct pnfs_layout_segment *lseg,
+                               u32 mirror_idx,
+                               nfs4_stateid *stateid)
+{
+       struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, mirror_idx);
+
+       if (!ff_layout_mirror_valid(lseg, mirror, false)) {
+               pr_err_ratelimited("NFS: %s: No data server for mirror offset index %d\n",
+                       __func__, mirror_idx);
+               goto out;
+       }
+
+       nfs4_stateid_copy(stateid, &mirror->stateid);
+       return 1;
+out:
+       return 0;
+}
+
 /**
  * nfs4_ff_layout_prepare_ds - prepare a DS connection for an RPC call
  * @lseg: the layout segment we're operating on
index ac5b784a1de05c864958f1a16646fac034b402d5..fed06fd9998d322a202befd46f8c2546dedbba84 100644 (file)
@@ -137,31 +137,32 @@ static int handle_async_copy(struct nfs42_copy_res *res,
                             struct file *dst,
                             nfs4_stateid *src_stateid)
 {
-       struct nfs4_copy_state *copy;
+       struct nfs4_copy_state *copy, *tmp_copy;
        int status = NFS4_OK;
        bool found_pending = false;
        struct nfs_open_context *ctx = nfs_file_open_context(dst);
 
+       copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
+       if (!copy)
+               return -ENOMEM;
+
        spin_lock(&server->nfs_client->cl_lock);
-       list_for_each_entry(copy, &server->nfs_client->pending_cb_stateids,
+       list_for_each_entry(tmp_copy, &server->nfs_client->pending_cb_stateids,
                                copies) {
-               if (memcmp(&res->write_res.stateid, &copy->stateid,
+               if (memcmp(&res->write_res.stateid, &tmp_copy->stateid,
                                NFS4_STATEID_SIZE))
                        continue;
                found_pending = true;
-               list_del(&copy->copies);
+               list_del(&tmp_copy->copies);
                break;
        }
        if (found_pending) {
                spin_unlock(&server->nfs_client->cl_lock);
+               kfree(copy);
+               copy = tmp_copy;
                goto out;
        }
 
-       copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
-       if (!copy) {
-               spin_unlock(&server->nfs_client->cl_lock);
-               return -ENOMEM;
-       }
        memcpy(&copy->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE);
        init_completion(&copy->completion);
        copy->parent_state = ctx->state;
index 8d59c9655ec4800c95cde103c578aa1396826386..1b994b52751892cc419bfeae906ee09c300d3196 100644 (file)
@@ -41,6 +41,8 @@ enum nfs4_client_state {
        NFS4CLNT_MOVED,
        NFS4CLNT_LEASE_MOVED,
        NFS4CLNT_DELEGATION_EXPIRED,
+       NFS4CLNT_RUN_MANAGER,
+       NFS4CLNT_DELEGRETURN_RUNNING,
 };
 
 #define NFS4_RENEW_TIMEOUT             0x01
index 4288a6ecaf756361bf134af1b790cb15fd9c02d6..46d691ba04bc8fdb38b28beca6eb445c50b7443a 100644 (file)
@@ -180,8 +180,9 @@ static long nfs42_fallocate(struct file *filep, int mode, loff_t offset, loff_t
        return nfs42_proc_allocate(filep, offset, len);
 }
 
-static int nfs42_clone_file_range(struct file *src_file, loff_t src_off,
-               struct file *dst_file, loff_t dst_off, u64 count)
+static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off,
+               struct file *dst_file, loff_t dst_off, loff_t count,
+               unsigned int remap_flags)
 {
        struct inode *dst_inode = file_inode(dst_file);
        struct nfs_server *server = NFS_SERVER(dst_inode);
@@ -190,6 +191,9 @@ static int nfs42_clone_file_range(struct file *src_file, loff_t src_off,
        bool same_inode = false;
        int ret;
 
+       if (remap_flags & ~REMAP_FILE_ADVISORY)
+               return -EINVAL;
+
        /* check alignment w.r.t. clone_blksize */
        ret = -EINVAL;
        if (bs) {
@@ -240,7 +244,7 @@ out_unlock:
                inode_unlock(src_inode);
        }
 out:
-       return ret;
+       return ret < 0 ? ret : count;
 }
 #endif /* CONFIG_NFS_V4_2 */
 
@@ -262,7 +266,7 @@ const struct file_operations nfs4_file_operations = {
        .copy_file_range = nfs4_copy_file_range,
        .llseek         = nfs4_file_llseek,
        .fallocate      = nfs42_fallocate,
-       .clone_file_range = nfs42_clone_file_range,
+       .remap_file_range = nfs42_remap_file_range,
 #else
        .llseek         = nfs_file_llseek,
 #endif
index db84b4adbc491d7cd62e782ac7440a71c3a6c764..867457d6dfbe54060ae6f152ca2a07dfa605ff71 100644 (file)
@@ -3788,7 +3788,7 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
        }
 
        /*
-        * -EACCESS could mean that the user doesn't have correct permissions
+        * -EACCES could mean that the user doesn't have correct permissions
         * to access the mount.  It could also mean that we tried to mount
         * with a gss auth flavor, but rpc.gssd isn't running.  Either way,
         * existing mount programs don't handle -EACCES very well so it should
index 62ae0fd345ad6751d5dbbf1ab8aefca85eff9e68..d8decf2ec48fa15131dd2135f0c266678c56eec5 100644 (file)
@@ -1210,6 +1210,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
        struct task_struct *task;
        char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
 
+       set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
        if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
                return;
        __module_get(THIS_MODULE);
@@ -2503,6 +2504,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
 
        /* Ensure exclusive access to NFSv4 state */
        do {
+               clear_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
                if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
                        section = "purge state";
                        status = nfs4_purge_lease(clp);
@@ -2593,19 +2595,24 @@ static void nfs4_state_manager(struct nfs_client *clp)
                }
 
                nfs4_end_drain_session(clp);
-               if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
-                       nfs_client_return_marked_delegations(clp);
-                       continue;
+               nfs4_clear_state_manager_bit(clp);
+
+               if (!test_and_set_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state)) {
+                       if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
+                               nfs_client_return_marked_delegations(clp);
+                               set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
+                       }
+                       clear_bit(NFS4CLNT_DELEGRETURN_RUNNING, &clp->cl_state);
                }
 
-               nfs4_clear_state_manager_bit(clp);
                /* Did we race with an attempt to give us more work? */
-               if (clp->cl_state == 0)
-                       break;
+               if (!test_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state))
+                       return;
                if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
-                       break;
-       } while (refcount_read(&clp->cl_count) > 1);
-       return;
+                       return;
+       } while (refcount_read(&clp->cl_count) > 1 && !signalled());
+       goto out_drain;
+
 out_error:
        if (strlen(section))
                section_sep = ": ";
@@ -2613,6 +2620,7 @@ out_error:
                        " with error %d\n", section_sep, section,
                        clp->cl_hostname, -status);
        ssleep(1);
+out_drain:
        nfs4_end_drain_session(clp);
        nfs4_clear_state_manager_bit(clp);
 }
index edff074d38c75c19a06a6ae5c634ba1fd1688d98..d505990dac7c9137b33120762b4606af655a0fc2 100644 (file)
@@ -1038,6 +1038,9 @@ nfsd4_verify_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 {
        __be32 status;
 
+       if (!cstate->save_fh.fh_dentry)
+               return nfserr_nofilehandle;
+
        status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->save_fh,
                                            src_stateid, RD_STATE, src, NULL);
        if (status) {
index 2751976704e9388239fbb3742001e261ca09bdfe..eb67098117b4c09eeaa1bf81f08139deba4dd23a 100644 (file)
@@ -541,8 +541,12 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
 __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
                u64 dst_pos, u64 count)
 {
-       return nfserrno(vfs_clone_file_range(src, src_pos, dst, dst_pos,
-                                            count));
+       loff_t cloned;
+
+       cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
+       if (count && cloned != count)
+               cloned = -EINVAL;
+       return nfserrno(cloned < 0 ? cloned : 0);
 }
 
 ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
@@ -923,7 +927,7 @@ __be32 nfsd_readv(struct svc_rqst *rqstp, struct svc_fh *fhp,
        int host_err;
 
        trace_nfsd_read_vector(rqstp, fhp, offset, *count);
-       iov_iter_kvec(&iter, READ | ITER_KVEC, vec, vlen, *count);
+       iov_iter_kvec(&iter, READ, vec, vlen, *count);
        host_err = vfs_iter_read(file, &iter, &offset, 0);
        return nfsd_finish_read(rqstp, fhp, file, offset, count, host_err);
 }
@@ -999,7 +1003,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
        if (stable && !use_wgather)
                flags |= RWF_SYNC;
 
-       iov_iter_kvec(&iter, WRITE | ITER_KVEC, vec, vlen, *cnt);
+       iov_iter_kvec(&iter, WRITE, vec, vlen, *cnt);
        host_err = vfs_iter_write(file, &iter, &pos, flags);
        if (host_err < 0)
                goto out_nfserr;
index de99db518571bc3dd2334de753e2ffdb51e1b884..f2129a5d9f23720e039f354f7c90a6dce46a6954 100644 (file)
@@ -266,9 +266,7 @@ void nilfs_btnode_abort_change_key(struct address_space *btnc,
                return;
 
        if (nbh == NULL) {      /* blocksize == pagesize */
-               xa_lock_irq(&btnc->i_pages);
-               __xa_erase(&btnc->i_pages, newkey);
-               xa_unlock_irq(&btnc->i_pages);
+               xa_erase_irq(&btnc->i_pages, newkey);
                unlock_page(ctxt->bh->b_page);
        } else
                brelse(nbh);
index 5769cf3ff035a4b500154eb4e1a4027d3245cbe3..e08a6647267b17d927641c2ef8f34de01c9bbcb3 100644 (file)
@@ -115,12 +115,12 @@ static bool fanotify_should_send_event(struct fsnotify_iter_info *iter_info,
                        continue;
                mark = iter_info->marks[type];
                /*
-                * if the event is for a child and this inode doesn't care about
-                * events on the child, don't send it!
+                * If the event is for a child and this mark doesn't care about
+                * events on a child, don't send it!
                 */
-               if (type == FSNOTIFY_OBJ_TYPE_INODE &&
-                   (event_mask & FS_EVENT_ON_CHILD) &&
-                   !(mark->mask & FS_EVENT_ON_CHILD))
+               if (event_mask & FS_EVENT_ON_CHILD &&
+                   (type != FSNOTIFY_OBJ_TYPE_INODE ||
+                    !(mark->mask & FS_EVENT_ON_CHILD)))
                        continue;
 
                marks_mask |= mark->mask;
index 2172ba516c61d536f5f05045e0a47d7dae30cfc1..d2c34900ae05da81e941b2a2d7503714ec09d8d0 100644 (file)
@@ -167,9 +167,9 @@ int __fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask
        parent = dget_parent(dentry);
        p_inode = parent->d_inode;
 
-       if (unlikely(!fsnotify_inode_watches_children(p_inode)))
+       if (unlikely(!fsnotify_inode_watches_children(p_inode))) {
                __fsnotify_update_child_dentry_flags(p_inode);
-       else if (p_inode->i_fsnotify_mask & mask) {
+       } else if (p_inode->i_fsnotify_mask & mask & ALL_FSNOTIFY_EVENTS) {
                struct name_snapshot name;
 
                /* we are notifying a parent so come up with the new mask which
@@ -339,6 +339,9 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is,
                sb = mnt->mnt.mnt_sb;
                mnt_or_sb_mask = mnt->mnt_fsnotify_mask | sb->s_fsnotify_mask;
        }
+       /* An event "on child" is not intended for a mount/sb mark */
+       if (mask & FS_EVENT_ON_CHILD)
+               mnt_or_sb_mask = 0;
 
        /*
         * Optimization: srcu_read_lock() has a memory barrier which can
index 4690cd75d8d7948a056fe899bc4600ade10b8566..3986c7a1f6a88c2b0ed421a1f6fbc45c7a68405c 100644 (file)
@@ -312,7 +312,7 @@ static struct dentry *ntfs_get_parent(struct dentry *child_dent)
        /* Get the mft record of the inode belonging to the child dentry. */
        mrec = map_mft_record(ni);
        if (IS_ERR(mrec))
-               return (struct dentry *)mrec;
+               return ERR_CAST(mrec);
        /* Find the first file name attribute in the mft record. */
        ctx = ntfs_attr_get_search_ctx(ni, mrec);
        if (unlikely(!ctx)) {
index da578ad4c08f4b5f5f66d3e7f3e5b5af77cff812..eb1ce30412dc3e09d1fbd4c8e890d86c2d0a4c9f 100644 (file)
@@ -2411,8 +2411,16 @@ static int ocfs2_dio_end_io(struct kiocb *iocb,
        /* this io's submitter should not have unlocked this before we could */
        BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
 
-       if (bytes > 0 && private)
-               ret = ocfs2_dio_end_io_write(inode, private, offset, bytes);
+       if (bytes <= 0)
+               mlog_ratelimited(ML_ERROR, "Direct IO failed, bytes = %lld",
+                                (long long)bytes);
+       if (private) {
+               if (bytes > 0)
+                       ret = ocfs2_dio_end_io_write(inode, private, offset,
+                                                    bytes);
+               else
+                       ocfs2_dio_free_write_ctx(inode, private);
+       }
 
        ocfs2_iocb_clear_rw_locked(iocb);
 
index 1d098c3c00e023540d6f0665720390647945af58..4ebbd57cbf8460da741860a4e657ab5fa60f6d7b 100644 (file)
@@ -99,25 +99,34 @@ out:
        return ret;
 }
 
+/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
+ * will be easier to handle read failure.
+ */
 int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
                           unsigned int nr, struct buffer_head *bhs[])
 {
        int status = 0;
        unsigned int i;
        struct buffer_head *bh;
+       int new_bh = 0;
 
        trace_ocfs2_read_blocks_sync((unsigned long long)block, nr);
 
        if (!nr)
                goto bail;
 
+       /* Don't put buffer head and re-assign it to NULL if it is allocated
+        * outside since the caller can't be aware of this alternation!
+        */
+       new_bh = (bhs[0] == NULL);
+
        for (i = 0 ; i < nr ; i++) {
                if (bhs[i] == NULL) {
                        bhs[i] = sb_getblk(osb->sb, block++);
                        if (bhs[i] == NULL) {
                                status = -ENOMEM;
                                mlog_errno(status);
-                               goto bail;
+                               break;
                        }
                }
                bh = bhs[i];
@@ -158,9 +167,26 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
                submit_bh(REQ_OP_READ, 0, bh);
        }
 
+read_failure:
        for (i = nr; i > 0; i--) {
                bh = bhs[i - 1];
 
+               if (unlikely(status)) {
+                       if (new_bh && bh) {
+                               /* If middle bh fails, let previous bh
+                                * finish its read and then put it to
+                                * aovoid bh leak
+                                */
+                               if (!buffer_jbd(bh))
+                                       wait_on_buffer(bh);
+                               put_bh(bh);
+                               bhs[i - 1] = NULL;
+                       } else if (bh && buffer_uptodate(bh)) {
+                               clear_buffer_uptodate(bh);
+                       }
+                       continue;
+               }
+
                /* No need to wait on the buffer if it's managed by JBD. */
                if (!buffer_jbd(bh))
                        wait_on_buffer(bh);
@@ -170,8 +196,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
                         * so we can safely record this and loop back
                         * to cleanup the other buffers. */
                        status = -EIO;
-                       put_bh(bh);
-                       bhs[i - 1] = NULL;
+                       goto read_failure;
                }
        }
 
@@ -179,6 +204,9 @@ bail:
        return status;
 }
 
+/* Caller must provide a bhs[] with all NULL or non-NULL entries, so it
+ * will be easier to handle read failure.
+ */
 int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
                      struct buffer_head *bhs[], int flags,
                      int (*validate)(struct super_block *sb,
@@ -188,6 +216,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
        int i, ignore_cache = 0;
        struct buffer_head *bh;
        struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+       int new_bh = 0;
 
        trace_ocfs2_read_blocks_begin(ci, (unsigned long long)block, nr, flags);
 
@@ -213,6 +242,11 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
                goto bail;
        }
 
+       /* Don't put buffer head and re-assign it to NULL if it is allocated
+        * outside since the caller can't be aware of this alternation!
+        */
+       new_bh = (bhs[0] == NULL);
+
        ocfs2_metadata_cache_io_lock(ci);
        for (i = 0 ; i < nr ; i++) {
                if (bhs[i] == NULL) {
@@ -221,7 +255,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
                                ocfs2_metadata_cache_io_unlock(ci);
                                status = -ENOMEM;
                                mlog_errno(status);
-                               goto bail;
+                               /* Don't forget to put previous bh! */
+                               break;
                        }
                }
                bh = bhs[i];
@@ -316,16 +351,27 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
                }
        }
 
-       status = 0;
-
+read_failure:
        for (i = (nr - 1); i >= 0; i--) {
                bh = bhs[i];
 
                if (!(flags & OCFS2_BH_READAHEAD)) {
-                       if (status) {
-                               /* Clear the rest of the buffers on error */
-                               put_bh(bh);
-                               bhs[i] = NULL;
+                       if (unlikely(status)) {
+                               /* Clear the buffers on error including those
+                                * ever succeeded in reading
+                                */
+                               if (new_bh && bh) {
+                                       /* If middle bh fails, let previous bh
+                                        * finish its read and then put it to
+                                        * aovoid bh leak
+                                        */
+                                       if (!buffer_jbd(bh))
+                                               wait_on_buffer(bh);
+                                       put_bh(bh);
+                                       bhs[i] = NULL;
+                               } else if (bh && buffer_uptodate(bh)) {
+                                       clear_buffer_uptodate(bh);
+                               }
                                continue;
                        }
                        /* We know this can't have changed as we hold the
@@ -343,9 +389,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
                                 * uptodate. */
                                status = -EIO;
                                clear_buffer_needs_validate(bh);
-                               put_bh(bh);
-                               bhs[i] = NULL;
-                               continue;
+                               goto read_failure;
                        }
 
                        if (buffer_needs_validate(bh)) {
@@ -355,11 +399,8 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
                                BUG_ON(buffer_jbd(bh));
                                clear_buffer_needs_validate(bh);
                                status = validate(sb, bh);
-                               if (status) {
-                                       put_bh(bh);
-                                       bhs[i] = NULL;
-                                       continue;
-                               }
+                               if (status)
+                                       goto read_failure;
                        }
                }
 
index 308ea0eb35fd112f29a5546c23dba6f80e60c787..a396096a5099f93e95f43b2db71d4f64ce996448 100644 (file)
@@ -178,6 +178,15 @@ do {                                                                       \
                              ##__VA_ARGS__);                           \
 } while (0)
 
+#define mlog_ratelimited(mask, fmt, ...)                               \
+do {                                                                   \
+       static DEFINE_RATELIMIT_STATE(_rs,                              \
+                                     DEFAULT_RATELIMIT_INTERVAL,       \
+                                     DEFAULT_RATELIMIT_BURST);         \
+       if (__ratelimit(&_rs))                                          \
+               mlog(mask, fmt, ##__VA_ARGS__);                         \
+} while (0)
+
 #define mlog_errno(st) ({                                              \
        int _st = (st);                                                 \
        if (_st != -ERESTARTSYS && _st != -EINTR &&                     \
index 7d9eea7d4a87339a58a1f17066c476af59d93642..e9f236af1927d3aaf4bf495ae4a35234c5c94041 100644 (file)
@@ -916,7 +916,7 @@ static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
 {
        struct kvec vec = { .iov_len = len, .iov_base = data, };
        struct msghdr msg = { .msg_flags = MSG_DONTWAIT, };
-       iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, len);
+       iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, len);
        return sock_recvmsg(sock, &msg, MSG_DONTWAIT);
 }
 
index b048d4fa3959081bd1a857f0283d398b84515752..c121abbdfc7dbcfb28675aa7e62a4cb9a70633a1 100644 (file)
@@ -1897,8 +1897,7 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
                                /* On error, skip the f_pos to the
                                   next block. */
                                ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
-                               brelse(bh);
-                               continue;
+                               break;
                        }
                        if (le64_to_cpu(de->inode)) {
                                unsigned char d_type = DT_UNKNOWN;
index 933aac5da193415643b34a33e14db4fdb6fc29b5..7c835824247eb7a64446467b03080f07f67aa90e 100644 (file)
@@ -2123,10 +2123,10 @@ static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
 
 /* LVB only has room for 64 bits of time here so we pack it for
  * now. */
-static u64 ocfs2_pack_timespec(struct timespec *spec)
+static u64 ocfs2_pack_timespec(struct timespec64 *spec)
 {
        u64 res;
-       u64 sec = spec->tv_sec;
+       u64 sec = clamp_t(time64_t, spec->tv_sec, 0, 0x3ffffffffull);
        u32 nsec = spec->tv_nsec;
 
        res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
@@ -2142,7 +2142,6 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
        struct ocfs2_inode_info *oi = OCFS2_I(inode);
        struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
        struct ocfs2_meta_lvb *lvb;
-       struct timespec ts;
 
        lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
 
@@ -2163,15 +2162,12 @@ static void __ocfs2_stuff_meta_lvb(struct inode *inode)
        lvb->lvb_igid      = cpu_to_be32(i_gid_read(inode));
        lvb->lvb_imode     = cpu_to_be16(inode->i_mode);
        lvb->lvb_inlink    = cpu_to_be16(inode->i_nlink);
-       ts = timespec64_to_timespec(inode->i_atime);
        lvb->lvb_iatime_packed  =
-               cpu_to_be64(ocfs2_pack_timespec(&ts));
-       ts = timespec64_to_timespec(inode->i_ctime);
+               cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
        lvb->lvb_ictime_packed =
-               cpu_to_be64(ocfs2_pack_timespec(&ts));
-       ts = timespec64_to_timespec(inode->i_mtime);
+               cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
        lvb->lvb_imtime_packed =
-               cpu_to_be64(ocfs2_pack_timespec(&ts));
+               cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
        lvb->lvb_iattr    = cpu_to_be32(oi->ip_attr);
        lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
        lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
@@ -2180,7 +2176,7 @@ out:
        mlog_meta_lvb(0, lockres);
 }
 
-static void ocfs2_unpack_timespec(struct timespec *spec,
+static void ocfs2_unpack_timespec(struct timespec64 *spec,
                                  u64 packed_time)
 {
        spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
@@ -2189,7 +2185,6 @@ static void ocfs2_unpack_timespec(struct timespec *spec,
 
 static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
 {
-       struct timespec ts;
        struct ocfs2_inode_info *oi = OCFS2_I(inode);
        struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
        struct ocfs2_meta_lvb *lvb;
@@ -2217,15 +2212,12 @@ static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
        i_gid_write(inode, be32_to_cpu(lvb->lvb_igid));
        inode->i_mode    = be16_to_cpu(lvb->lvb_imode);
        set_nlink(inode, be16_to_cpu(lvb->lvb_inlink));
-       ocfs2_unpack_timespec(&ts,
+       ocfs2_unpack_timespec(&inode->i_atime,
                              be64_to_cpu(lvb->lvb_iatime_packed));
-       inode->i_atime = timespec_to_timespec64(ts);
-       ocfs2_unpack_timespec(&ts,
+       ocfs2_unpack_timespec(&inode->i_mtime,
                              be64_to_cpu(lvb->lvb_imtime_packed));
-       inode->i_mtime = timespec_to_timespec64(ts);
-       ocfs2_unpack_timespec(&ts,
+       ocfs2_unpack_timespec(&inode->i_ctime,
                              be64_to_cpu(lvb->lvb_ictime_packed));
-       inode->i_ctime = timespec_to_timespec64(ts);
        spin_unlock(&oi->ip_lock);
 }
 
@@ -3603,7 +3595,7 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
         * we can recover correctly from node failure. Otherwise, we may get
         * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
         */
-       if (!ocfs2_is_o2cb_active() &&
+       if (ocfs2_userspace_stack(osb) &&
            lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
                lvb = 1;
 
index 9f88188060db9c7fa59e6882ecf33b55cf921788..4bf8d5854b2711ebcac47c5c1f38d49f120d0eb5 100644 (file)
@@ -125,10 +125,10 @@ check_err:
 
 check_gen:
        if (handle->ih_generation != inode->i_generation) {
-               iput(inode);
                trace_ocfs2_get_dentry_generation((unsigned long long)blkno,
                                                  handle->ih_generation,
                                                  inode->i_generation);
+               iput(inode);
                result = ERR_PTR(-ESTALE);
                goto bail;
        }
index 9fa35cb6f6e0b5b38023f45512fc75200be4c694..d640c5f8a85da8fc4ba030b295db0233b377ea7f 100644 (file)
@@ -2343,7 +2343,7 @@ static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
 
        written = __generic_file_write_iter(iocb, from);
        /* buffered aio wouldn't have proper lock coverage today */
-       BUG_ON(written == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
+       BUG_ON(written == -EIOCBQUEUED && !direct_io);
 
        /*
         * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
@@ -2463,7 +2463,7 @@ static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
        trace_generic_file_read_iter_ret(ret);
 
        /* buffered aio wouldn't have proper lock coverage today */
-       BUG_ON(ret == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
+       BUG_ON(ret == -EIOCBQUEUED && !direct_io);
 
        /* see ocfs2_file_write_iter */
        if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
@@ -2527,24 +2527,79 @@ out:
        return offset;
 }
 
-static int ocfs2_file_clone_range(struct file *file_in,
-                                 loff_t pos_in,
-                                 struct file *file_out,
-                                 loff_t pos_out,
-                                 u64 len)
+static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in,
+                                    struct file *file_out, loff_t pos_out,
+                                    loff_t len, unsigned int remap_flags)
 {
-       return ocfs2_reflink_remap_range(file_in, pos_in, file_out, pos_out,
-                                        len, false);
-}
+       struct inode *inode_in = file_inode(file_in);
+       struct inode *inode_out = file_inode(file_out);
+       struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb);
+       struct buffer_head *in_bh = NULL, *out_bh = NULL;
+       bool same_inode = (inode_in == inode_out);
+       loff_t remapped = 0;
+       ssize_t ret;
 
-static int ocfs2_file_dedupe_range(struct file *file_in,
-                                  loff_t pos_in,
-                                  struct file *file_out,
-                                  loff_t pos_out,
-                                  u64 len)
-{
-       return ocfs2_reflink_remap_range(file_in, pos_in, file_out, pos_out,
-                                         len, true);
+       if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
+               return -EINVAL;
+       if (!ocfs2_refcount_tree(osb))
+               return -EOPNOTSUPP;
+       if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+               return -EROFS;
+
+       /* Lock both files against IO */
+       ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh);
+       if (ret)
+               return ret;
+
+       /* Check file eligibility and prepare for block sharing. */
+       ret = -EINVAL;
+       if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) ||
+           (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE))
+               goto out_unlock;
+
+       ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
+                       &len, remap_flags);
+       if (ret < 0 || len == 0)
+               goto out_unlock;
+
+       /* Lock out changes to the allocation maps and remap. */
+       down_write(&OCFS2_I(inode_in)->ip_alloc_sem);
+       if (!same_inode)
+               down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem,
+                                 SINGLE_DEPTH_NESTING);
+
+       /* Zap any page cache for the destination file's range. */
+       truncate_inode_pages_range(&inode_out->i_data,
+                                  round_down(pos_out, PAGE_SIZE),
+                                  round_up(pos_out + len, PAGE_SIZE) - 1);
+
+       remapped = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in,
+                       inode_out, out_bh, pos_out, len);
+       up_write(&OCFS2_I(inode_in)->ip_alloc_sem);
+       if (!same_inode)
+               up_write(&OCFS2_I(inode_out)->ip_alloc_sem);
+       if (remapped < 0) {
+               ret = remapped;
+               mlog_errno(ret);
+               goto out_unlock;
+       }
+
+       /*
+        * Empty the extent map so that we may get the right extent
+        * record from the disk.
+        */
+       ocfs2_extent_map_trunc(inode_in, 0);
+       ocfs2_extent_map_trunc(inode_out, 0);
+
+       ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len);
+       if (ret) {
+               mlog_errno(ret);
+               goto out_unlock;
+       }
+
+out_unlock:
+       ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
+       return remapped > 0 ? remapped : ret;
 }
 
 const struct inode_operations ocfs2_file_iops = {
@@ -2586,8 +2641,7 @@ const struct file_operations ocfs2_fops = {
        .splice_read    = generic_file_splice_read,
        .splice_write   = iter_file_splice_write,
        .fallocate      = ocfs2_fallocate,
-       .clone_file_range = ocfs2_file_clone_range,
-       .dedupe_file_range = ocfs2_file_dedupe_range,
+       .remap_file_range = ocfs2_remap_file_range,
 };
 
 const struct file_operations ocfs2_dops = {
@@ -2633,8 +2687,7 @@ const struct file_operations ocfs2_fops_no_plocks = {
        .splice_read    = generic_file_splice_read,
        .splice_write   = iter_file_splice_write,
        .fallocate      = ocfs2_fallocate,
-       .clone_file_range = ocfs2_file_clone_range,
-       .dedupe_file_range = ocfs2_file_dedupe_range,
+       .remap_file_range = ocfs2_remap_file_range,
 };
 
 const struct file_operations ocfs2_dops_no_plocks = {
index bd3475694e83a06501a055e73fd1403f81123eef..b63c97f4318e063889fe1ca203d19092c1abbedf 100644 (file)
@@ -1378,15 +1378,23 @@ static int __ocfs2_recovery_thread(void *arg)
        int rm_quota_used = 0, i;
        struct ocfs2_quota_recovery *qrec;
 
+       /* Whether the quota supported. */
+       int quota_enabled = OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb,
+                       OCFS2_FEATURE_RO_COMPAT_USRQUOTA)
+               || OCFS2_HAS_RO_COMPAT_FEATURE(osb->sb,
+                       OCFS2_FEATURE_RO_COMPAT_GRPQUOTA);
+
        status = ocfs2_wait_on_mount(osb);
        if (status < 0) {
                goto bail;
        }
 
-       rm_quota = kcalloc(osb->max_slots, sizeof(int), GFP_NOFS);
-       if (!rm_quota) {
-               status = -ENOMEM;
-               goto bail;
+       if (quota_enabled) {
+               rm_quota = kcalloc(osb->max_slots, sizeof(int), GFP_NOFS);
+               if (!rm_quota) {
+                       status = -ENOMEM;
+                       goto bail;
+               }
        }
 restart:
        status = ocfs2_super_lock(osb, 1);
@@ -1422,9 +1430,14 @@ restart:
                 * then quota usage would be out of sync until some node takes
                 * the slot. So we remember which nodes need quota recovery
                 * and when everything else is done, we recover quotas. */
-               for (i = 0; i < rm_quota_used && rm_quota[i] != slot_num; i++);
-               if (i == rm_quota_used)
-                       rm_quota[rm_quota_used++] = slot_num;
+               if (quota_enabled) {
+                       for (i = 0; i < rm_quota_used
+                                       && rm_quota[i] != slot_num; i++)
+                               ;
+
+                       if (i == rm_quota_used)
+                               rm_quota[rm_quota_used++] = slot_num;
+               }
 
                status = ocfs2_recover_node(osb, node_num, slot_num);
 skip_recovery:
@@ -1452,16 +1465,19 @@ skip_recovery:
        /* Now it is right time to recover quotas... We have to do this under
         * superblock lock so that no one can start using the slot (and crash)
         * before we recover it */
-       for (i = 0; i < rm_quota_used; i++) {
-               qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
-               if (IS_ERR(qrec)) {
-                       status = PTR_ERR(qrec);
-                       mlog_errno(status);
-                       continue;
+       if (quota_enabled) {
+               for (i = 0; i < rm_quota_used; i++) {
+                       qrec = ocfs2_begin_quota_recovery(osb, rm_quota[i]);
+                       if (IS_ERR(qrec)) {
+                               status = PTR_ERR(qrec);
+                               mlog_errno(status);
+                               continue;
+                       }
+                       ocfs2_queue_recovery_completion(osb->journal,
+                                       rm_quota[i],
+                                       NULL, NULL, qrec,
+                                       ORPHAN_NEED_TRUNCATE);
                }
-               ocfs2_queue_recovery_completion(osb->journal, rm_quota[i],
-                                               NULL, NULL, qrec,
-                                               ORPHAN_NEED_TRUNCATE);
        }
 
        ocfs2_super_unlock(osb, 1);
@@ -1483,7 +1499,8 @@ bail:
 
        mutex_unlock(&osb->recovery_lock);
 
-       kfree(rm_quota);
+       if (quota_enabled)
+               kfree(rm_quota);
 
        /* no one is callint kthread_stop() for us so the kthread() api
         * requires that we call do_exit().  And it isn't exported, but
index 7eb3b0a6347ef74990589ac47f5b4823441c8135..1565dd8e8856ee62d68e4fa63df6e8cc5b79d48c 100644 (file)
@@ -25,6 +25,7 @@
 #include "ocfs2_ioctl.h"
 
 #include "alloc.h"
+#include "localalloc.h"
 #include "aops.h"
 #include "dlmglue.h"
 #include "extent_map.h"
@@ -156,18 +157,14 @@ out:
 }
 
 /*
- * lock allocators, and reserving appropriate number of bits for
- * meta blocks and data clusters.
- *
- * in some cases, we don't need to reserve clusters, just let data_ac
- * be NULL.
+ * lock allocator, and reserve appropriate number of bits for
+ * meta blocks.
  */
-static int ocfs2_lock_allocators_move_extents(struct inode *inode,
+static int ocfs2_lock_meta_allocator_move_extents(struct inode *inode,
                                        struct ocfs2_extent_tree *et,
                                        u32 clusters_to_move,
                                        u32 extents_to_split,
                                        struct ocfs2_alloc_context **meta_ac,
-                                       struct ocfs2_alloc_context **data_ac,
                                        int extra_blocks,
                                        int *credits)
 {
@@ -192,13 +189,6 @@ static int ocfs2_lock_allocators_move_extents(struct inode *inode,
                goto out;
        }
 
-       if (data_ac) {
-               ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
-               if (ret) {
-                       mlog_errno(ret);
-                       goto out;
-               }
-       }
 
        *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
 
@@ -233,6 +223,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
        struct ocfs2_refcount_tree *ref_tree = NULL;
        u32 new_phys_cpos, new_len;
        u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
+       int need_free = 0;
 
        if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
                BUG_ON(!ocfs2_is_refcount_inode(inode));
@@ -257,10 +248,10 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
                }
        }
 
-       ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1,
-                                                &context->meta_ac,
-                                                &context->data_ac,
-                                                extra_blocks, &credits);
+       ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
+                                               *len, 1,
+                                               &context->meta_ac,
+                                               extra_blocks, &credits);
        if (ret) {
                mlog_errno(ret);
                goto out;
@@ -283,6 +274,21 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
                }
        }
 
+       /*
+        * Make sure ocfs2_reserve_cluster is called after
+        * __ocfs2_flush_truncate_log, otherwise, dead lock may happen.
+        *
+        * If ocfs2_reserve_cluster is called
+        * before __ocfs2_flush_truncate_log, dead lock on global bitmap
+        * may happen.
+        *
+        */
+       ret = ocfs2_reserve_clusters(osb, *len, &context->data_ac);
+       if (ret) {
+               mlog_errno(ret);
+               goto out_unlock_mutex;
+       }
+
        handle = ocfs2_start_trans(osb, credits);
        if (IS_ERR(handle)) {
                ret = PTR_ERR(handle);
@@ -308,6 +314,7 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
                if (!partial) {
                        context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
                        ret = -ENOSPC;
+                       need_free = 1;
                        goto out_commit;
                }
        }
@@ -332,6 +339,20 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
                mlog_errno(ret);
 
 out_commit:
+       if (need_free && context->data_ac) {
+               struct ocfs2_alloc_context *data_ac = context->data_ac;
+
+               if (context->data_ac->ac_which == OCFS2_AC_USE_LOCAL)
+                       ocfs2_free_local_alloc_bits(osb, handle, data_ac,
+                                       new_phys_cpos, new_len);
+               else
+                       ocfs2_free_clusters(handle,
+                                       data_ac->ac_inode,
+                                       data_ac->ac_bh,
+                                       ocfs2_clusters_to_blocks(osb->sb, new_phys_cpos),
+                                       new_len);
+       }
+
        ocfs2_commit_trans(osb, handle);
 
 out_unlock_mutex:
@@ -600,9 +621,10 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
                }
        }
 
-       ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1,
-                                                &context->meta_ac,
-                                                NULL, extra_blocks, &credits);
+       ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
+                                               len, 1,
+                                               &context->meta_ac,
+                                               extra_blocks, &credits);
        if (ret) {
                mlog_errno(ret);
                goto out;
index 1114ef02e7803f0594ea6744fa8a39ffe0de68c0..a35259eebc56739b59bf7ffb5029e647dc11ad0f 100644 (file)
@@ -4466,9 +4466,9 @@ out:
 }
 
 /* Update destination inode size, if necessary. */
-static int ocfs2_reflink_update_dest(struct inode *dest,
-                                    struct buffer_head *d_bh,
-                                    loff_t newlen)
+int ocfs2_reflink_update_dest(struct inode *dest,
+                             struct buffer_head *d_bh,
+                             loff_t newlen)
 {
        handle_t *handle;
        int ret;
@@ -4505,14 +4505,14 @@ out_commit:
 }
 
 /* Remap the range pos_in:len in s_inode to pos_out:len in t_inode. */
-static int ocfs2_reflink_remap_extent(struct inode *s_inode,
-                                     struct buffer_head *s_bh,
-                                     loff_t pos_in,
-                                     struct inode *t_inode,
-                                     struct buffer_head *t_bh,
-                                     loff_t pos_out,
-                                     loff_t len,
-                                     struct ocfs2_cached_dealloc_ctxt *dealloc)
+static loff_t ocfs2_reflink_remap_extent(struct inode *s_inode,
+                                        struct buffer_head *s_bh,
+                                        loff_t pos_in,
+                                        struct inode *t_inode,
+                                        struct buffer_head *t_bh,
+                                        loff_t pos_out,
+                                        loff_t len,
+                                        struct ocfs2_cached_dealloc_ctxt *dealloc)
 {
        struct ocfs2_extent_tree s_et;
        struct ocfs2_extent_tree t_et;
@@ -4520,8 +4520,9 @@ static int ocfs2_reflink_remap_extent(struct inode *s_inode,
        struct buffer_head *ref_root_bh = NULL;
        struct ocfs2_refcount_tree *ref_tree;
        struct ocfs2_super *osb;
+       loff_t remapped_bytes = 0;
        loff_t pstart, plen;
-       u32 p_cluster, num_clusters, slast, spos, tpos;
+       u32 p_cluster, num_clusters, slast, spos, tpos, remapped_clus = 0;
        unsigned int ext_flags;
        int ret = 0;
 
@@ -4603,30 +4604,34 @@ static int ocfs2_reflink_remap_extent(struct inode *s_inode,
 next_loop:
                spos += num_clusters;
                tpos += num_clusters;
+               remapped_clus += num_clusters;
        }
 
-out:
-       return ret;
+       goto out;
 out_unlock_refcount:
        ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
        brelse(ref_root_bh);
-       return ret;
+out:
+       remapped_bytes = ocfs2_clusters_to_bytes(t_inode->i_sb, remapped_clus);
+       remapped_bytes = min_t(loff_t, len, remapped_bytes);
+
+       return remapped_bytes > 0 ? remapped_bytes : ret;
 }
 
 /* Set up refcount tree and remap s_inode to t_inode. */
-static int ocfs2_reflink_remap_blocks(struct inode *s_inode,
-                                     struct buffer_head *s_bh,
-                                     loff_t pos_in,
-                                     struct inode *t_inode,
-                                     struct buffer_head *t_bh,
-                                     loff_t pos_out,
-                                     loff_t len)
+loff_t ocfs2_reflink_remap_blocks(struct inode *s_inode,
+                                 struct buffer_head *s_bh,
+                                 loff_t pos_in,
+                                 struct inode *t_inode,
+                                 struct buffer_head *t_bh,
+                                 loff_t pos_out,
+                                 loff_t len)
 {
        struct ocfs2_cached_dealloc_ctxt dealloc;
        struct ocfs2_super *osb;
        struct ocfs2_dinode *dis;
        struct ocfs2_dinode *dit;
-       int ret;
+       loff_t ret;
 
        osb = OCFS2_SB(s_inode->i_sb);
        dis = (struct ocfs2_dinode *)s_bh->b_data;
@@ -4698,7 +4703,7 @@ static int ocfs2_reflink_remap_blocks(struct inode *s_inode,
        /* Actually remap extents now. */
        ret = ocfs2_reflink_remap_extent(s_inode, s_bh, pos_in, t_inode, t_bh,
                                         pos_out, len, &dealloc);
-       if (ret) {
+       if (ret < 0) {
                mlog_errno(ret);
                goto out;
        }
@@ -4713,10 +4718,10 @@ out:
 }
 
 /* Lock an inode and grab a bh pointing to the inode. */
-static int ocfs2_reflink_inodes_lock(struct inode *s_inode,
-                                    struct buffer_head **bh1,
-                                    struct inode *t_inode,
-                                    struct buffer_head **bh2)
+int ocfs2_reflink_inodes_lock(struct inode *s_inode,
+                             struct buffer_head **bh1,
+                             struct inode *t_inode,
+                             struct buffer_head **bh2)
 {
        struct inode *inode1;
        struct inode *inode2;
@@ -4801,10 +4806,10 @@ out_i1:
 }
 
 /* Unlock both inodes and release buffers. */
-static void ocfs2_reflink_inodes_unlock(struct inode *s_inode,
-                                       struct buffer_head *s_bh,
-                                       struct inode *t_inode,
-                                       struct buffer_head *t_bh)
+void ocfs2_reflink_inodes_unlock(struct inode *s_inode,
+                                struct buffer_head *s_bh,
+                                struct inode *t_inode,
+                                struct buffer_head *t_bh)
 {
        ocfs2_inode_unlock(s_inode, 1);
        ocfs2_rw_unlock(s_inode, 1);
@@ -4816,82 +4821,3 @@ static void ocfs2_reflink_inodes_unlock(struct inode *s_inode,
        }
        unlock_two_nondirectories(s_inode, t_inode);
 }
-
-/* Link a range of blocks from one file to another. */
-int ocfs2_reflink_remap_range(struct file *file_in,
-                             loff_t pos_in,
-                             struct file *file_out,
-                             loff_t pos_out,
-                             u64 len,
-                             bool is_dedupe)
-{
-       struct inode *inode_in = file_inode(file_in);
-       struct inode *inode_out = file_inode(file_out);
-       struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb);
-       struct buffer_head *in_bh = NULL, *out_bh = NULL;
-       bool same_inode = (inode_in == inode_out);
-       ssize_t ret;
-
-       if (!ocfs2_refcount_tree(osb))
-               return -EOPNOTSUPP;
-       if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
-               return -EROFS;
-
-       /* Lock both files against IO */
-       ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh);
-       if (ret)
-               return ret;
-
-       /* Check file eligibility and prepare for block sharing. */
-       ret = -EINVAL;
-       if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) ||
-           (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE))
-               goto out_unlock;
-
-       ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
-                       &len, is_dedupe);
-       if (ret <= 0)
-               goto out_unlock;
-
-       /* Lock out changes to the allocation maps and remap. */
-       down_write(&OCFS2_I(inode_in)->ip_alloc_sem);
-       if (!same_inode)
-               down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem,
-                                 SINGLE_DEPTH_NESTING);
-
-       ret = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in, inode_out,
-                                        out_bh, pos_out, len);
-
-       /* Zap any page cache for the destination file's range. */
-       if (!ret)
-               truncate_inode_pages_range(&inode_out->i_data, pos_out,
-                                          PAGE_ALIGN(pos_out + len) - 1);
-
-       up_write(&OCFS2_I(inode_in)->ip_alloc_sem);
-       if (!same_inode)
-               up_write(&OCFS2_I(inode_out)->ip_alloc_sem);
-       if (ret) {
-               mlog_errno(ret);
-               goto out_unlock;
-       }
-
-       /*
-        * Empty the extent map so that we may get the right extent
-        * record from the disk.
-        */
-       ocfs2_extent_map_trunc(inode_in, 0);
-       ocfs2_extent_map_trunc(inode_out, 0);
-
-       ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len);
-       if (ret) {
-               mlog_errno(ret);
-               goto out_unlock;
-       }
-
-       ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
-       return 0;
-
-out_unlock:
-       ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
-       return ret;
-}
index 4af55bf4b35b977355fd1df6826e1ddf493f9736..e9e862be4a1e51cb55fe129d996bf08731d8c220 100644 (file)
@@ -115,11 +115,23 @@ int ocfs2_reflink_ioctl(struct inode *inode,
                        const char __user *oldname,
                        const char __user *newname,
                        bool preserve);
-int ocfs2_reflink_remap_range(struct file *file_in,
-                             loff_t pos_in,
-                             struct file *file_out,
-                             loff_t pos_out,
-                             u64 len,
-                             bool is_dedupe);
+loff_t ocfs2_reflink_remap_blocks(struct inode *s_inode,
+                                 struct buffer_head *s_bh,
+                                 loff_t pos_in,
+                                 struct inode *t_inode,
+                                 struct buffer_head *t_bh,
+                                 loff_t pos_out,
+                                 loff_t len);
+int ocfs2_reflink_inodes_lock(struct inode *s_inode,
+                             struct buffer_head **bh1,
+                             struct inode *t_inode,
+                             struct buffer_head **bh2);
+void ocfs2_reflink_inodes_unlock(struct inode *s_inode,
+                                struct buffer_head *s_bh,
+                                struct inode *t_inode,
+                                struct buffer_head *t_bh);
+int ocfs2_reflink_update_dest(struct inode *dest,
+                             struct buffer_head *d_bh,
+                             loff_t newlen);
 
 #endif /* OCFS2_REFCOUNTTREE_H */
index d6c350ba25b96ec9886cdc11b46a94fb17769261..c4b029c43464e0d14424a8a9af216d9168ca4bc9 100644 (file)
@@ -48,12 +48,6 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
  */
 static struct ocfs2_stack_plugin *active_stack;
 
-inline int ocfs2_is_o2cb_active(void)
-{
-       return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
-}
-EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
-
 static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
 {
        struct ocfs2_stack_plugin *p;
index e3036e1790e86da7b4e13dcb0b8c88e3f19b6d50..f2dce10fae543c254dcb4e6628d357b60a3ac16c 100644 (file)
@@ -298,9 +298,6 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
 int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
 void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
 
-/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
-int ocfs2_is_o2cb_active(void);
-
 extern struct kset *ocfs2_kset;
 
 #endif  /* STACKGLUE_H */
index 5e65d818937bb1f03ba2f964780a5fd5bfb1f880..fe53381b26b1841c3cae47dbe630d400e6021af8 100644 (file)
@@ -25,7 +25,7 @@ static int read_one_page(struct page *page)
        struct iov_iter to;
        struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE};
 
-       iov_iter_bvec(&to, ITER_BVEC | READ, &bv, 1, PAGE_SIZE);
+       iov_iter_bvec(&to, READ, &bv, 1, PAGE_SIZE);
 
        gossip_debug(GOSSIP_INODE_DEBUG,
                    "orangefs_readpage called with page %p\n",
index 1cc797a08a5b5f7eb6c002862c834177e5d6f93c..9e62dcf06fc4a911dd777d2c7ff0a992190b5a0f 100644 (file)
@@ -125,6 +125,7 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
        struct file *new_file;
        loff_t old_pos = 0;
        loff_t new_pos = 0;
+       loff_t cloned;
        int error = 0;
 
        if (len == 0)
@@ -141,11 +142,10 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
        }
 
        /* Try to use clone_file_range to clone up within the same fs */
-       error = do_clone_file_range(old_file, 0, new_file, 0, len);
-       if (!error)
+       cloned = do_clone_file_range(old_file, 0, new_file, 0, len, 0);
+       if (cloned == len)
                goto out;
        /* Couldn't clone, so now we try to copy the data */
-       error = 0;
 
        /* FIXME: copy up sparse files efficiently */
        while (len) {
@@ -395,7 +395,6 @@ struct ovl_copy_up_ctx {
        struct dentry *destdir;
        struct qstr destname;
        struct dentry *workdir;
-       bool tmpfile;
        bool origin;
        bool indexed;
        bool metacopy;
@@ -440,63 +439,6 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
        return err;
 }
 
-static int ovl_install_temp(struct ovl_copy_up_ctx *c, struct dentry *temp,
-                           struct dentry **newdentry)
-{
-       int err;
-       struct dentry *upper;
-       struct inode *udir = d_inode(c->destdir);
-
-       upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len);
-       if (IS_ERR(upper))
-               return PTR_ERR(upper);
-
-       if (c->tmpfile)
-               err = ovl_do_link(temp, udir, upper);
-       else
-               err = ovl_do_rename(d_inode(c->workdir), temp, udir, upper, 0);
-
-       if (!err)
-               *newdentry = dget(c->tmpfile ? upper : temp);
-       dput(upper);
-
-       return err;
-}
-
-static struct dentry *ovl_get_tmpfile(struct ovl_copy_up_ctx *c)
-{
-       int err;
-       struct dentry *temp;
-       const struct cred *old_creds = NULL;
-       struct cred *new_creds = NULL;
-       struct ovl_cattr cattr = {
-               /* Can't properly set mode on creation because of the umask */
-               .mode = c->stat.mode & S_IFMT,
-               .rdev = c->stat.rdev,
-               .link = c->link
-       };
-
-       err = security_inode_copy_up(c->dentry, &new_creds);
-       temp = ERR_PTR(err);
-       if (err < 0)
-               goto out;
-
-       if (new_creds)
-               old_creds = override_creds(new_creds);
-
-       if (c->tmpfile)
-               temp = ovl_do_tmpfile(c->workdir, c->stat.mode);
-       else
-               temp = ovl_create_temp(c->workdir, &cattr);
-out:
-       if (new_creds) {
-               revert_creds(old_creds);
-               put_cred(new_creds);
-       }
-
-       return temp;
-}
-
 static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
 {
        int err;
@@ -548,51 +490,148 @@ static int ovl_copy_up_inode(struct ovl_copy_up_ctx *c, struct dentry *temp)
        return err;
 }
 
-static int ovl_copy_up_locked(struct ovl_copy_up_ctx *c)
+struct ovl_cu_creds {
+       const struct cred *old;
+       struct cred *new;
+};
+
+static int ovl_prep_cu_creds(struct dentry *dentry, struct ovl_cu_creds *cc)
+{
+       int err;
+
+       cc->old = cc->new = NULL;
+       err = security_inode_copy_up(dentry, &cc->new);
+       if (err < 0)
+               return err;
+
+       if (cc->new)
+               cc->old = override_creds(cc->new);
+
+       return 0;
+}
+
+static void ovl_revert_cu_creds(struct ovl_cu_creds *cc)
+{
+       if (cc->new) {
+               revert_creds(cc->old);
+               put_cred(cc->new);
+       }
+}
+
+/*
+ * Copyup using workdir to prepare temp file.  Used when copying up directories,
+ * special files or when upper fs doesn't support O_TMPFILE.
+ */
+static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
 {
-       struct inode *udir = c->destdir->d_inode;
        struct inode *inode;
-       struct dentry *newdentry = NULL;
-       struct dentry *temp;
+       struct inode *udir = d_inode(c->destdir), *wdir = d_inode(c->workdir);
+       struct dentry *temp, *upper;
+       struct ovl_cu_creds cc;
        int err;
+       struct ovl_cattr cattr = {
+               /* Can't properly set mode on creation because of the umask */
+               .mode = c->stat.mode & S_IFMT,
+               .rdev = c->stat.rdev,
+               .link = c->link
+       };
+
+       err = ovl_lock_rename_workdir(c->workdir, c->destdir);
+       if (err)
+               return err;
+
+       err = ovl_prep_cu_creds(c->dentry, &cc);
+       if (err)
+               goto unlock;
 
-       temp = ovl_get_tmpfile(c);
+       temp = ovl_create_temp(c->workdir, &cattr);
+       ovl_revert_cu_creds(&cc);
+
+       err = PTR_ERR(temp);
        if (IS_ERR(temp))
-               return PTR_ERR(temp);
+               goto unlock;
 
        err = ovl_copy_up_inode(c, temp);
        if (err)
-               goto out;
+               goto cleanup;
 
        if (S_ISDIR(c->stat.mode) && c->indexed) {
                err = ovl_create_index(c->dentry, c->lowerpath.dentry, temp);
                if (err)
-                       goto out;
+                       goto cleanup;
        }
 
-       if (c->tmpfile) {
-               inode_lock_nested(udir, I_MUTEX_PARENT);
-               err = ovl_install_temp(c, temp, &newdentry);
-               inode_unlock(udir);
-       } else {
-               err = ovl_install_temp(c, temp, &newdentry);
-       }
+       upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len);
+       err = PTR_ERR(upper);
+       if (IS_ERR(upper))
+               goto cleanup;
+
+       err = ovl_do_rename(wdir, temp, udir, upper, 0);
+       dput(upper);
        if (err)
-               goto out;
+               goto cleanup;
 
        if (!c->metacopy)
                ovl_set_upperdata(d_inode(c->dentry));
        inode = d_inode(c->dentry);
-       ovl_inode_update(inode, newdentry);
+       ovl_inode_update(inode, temp);
        if (S_ISDIR(inode->i_mode))
                ovl_set_flag(OVL_WHITEOUTS, inode);
+unlock:
+       unlock_rename(c->workdir, c->destdir);
 
-out:
-       if (err && !c->tmpfile)
-               ovl_cleanup(d_inode(c->workdir), temp);
-       dput(temp);
        return err;
 
+cleanup:
+       ovl_cleanup(wdir, temp);
+       dput(temp);
+       goto unlock;
+}
+
+/* Copyup using O_TMPFILE which does not require cross dir locking */
+static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
+{
+       struct inode *udir = d_inode(c->destdir);
+       struct dentry *temp, *upper;
+       struct ovl_cu_creds cc;
+       int err;
+
+       err = ovl_prep_cu_creds(c->dentry, &cc);
+       if (err)
+               return err;
+
+       temp = ovl_do_tmpfile(c->workdir, c->stat.mode);
+       ovl_revert_cu_creds(&cc);
+
+       if (IS_ERR(temp))
+               return PTR_ERR(temp);
+
+       err = ovl_copy_up_inode(c, temp);
+       if (err)
+               goto out_dput;
+
+       inode_lock_nested(udir, I_MUTEX_PARENT);
+
+       upper = lookup_one_len(c->destname.name, c->destdir, c->destname.len);
+       err = PTR_ERR(upper);
+       if (!IS_ERR(upper)) {
+               err = ovl_do_link(temp, udir, upper);
+               dput(upper);
+       }
+       inode_unlock(udir);
+
+       if (err)
+               goto out_dput;
+
+       if (!c->metacopy)
+               ovl_set_upperdata(d_inode(c->dentry));
+       ovl_inode_update(d_inode(c->dentry), temp);
+
+       return 0;
+
+out_dput:
+       dput(temp);
+       return err;
 }
 
 /*
@@ -646,18 +685,10 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
        }
 
        /* Should we copyup with O_TMPFILE or with workdir? */
-       if (S_ISREG(c->stat.mode) && ofs->tmpfile) {
-               c->tmpfile = true;
-               err = ovl_copy_up_locked(c);
-       } else {
-               err = ovl_lock_rename_workdir(c->workdir, c->destdir);
-               if (!err) {
-                       err = ovl_copy_up_locked(c);
-                       unlock_rename(c->workdir, c->destdir);
-               }
-       }
-
-
+       if (S_ISREG(c->stat.mode) && ofs->tmpfile)
+               err = ovl_copy_up_tmpfile(c);
+       else
+               err = ovl_copy_up_workdir(c);
        if (err)
                goto out;
 
index 276914ae3c60aaf297747f03b58d4582df335ec9..c6289147c7871f165b70f5ca8e13668f3010d4b0 100644 (file)
@@ -414,13 +414,12 @@ static int ovl_set_upper_acl(struct dentry *upperdentry, const char *name,
        if (!IS_ENABLED(CONFIG_FS_POSIX_ACL) || !acl)
                return 0;
 
-       size = posix_acl_to_xattr(NULL, acl, NULL, 0);
+       size = posix_acl_xattr_size(acl->a_count);
        buffer = kmalloc(size, GFP_KERNEL);
        if (!buffer)
                return -ENOMEM;
 
-       size = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
-       err = size;
+       err = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
        if (err < 0)
                goto out_free;
 
@@ -463,6 +462,10 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
        if (IS_ERR(upper))
                goto out_unlock;
 
+       err = -ESTALE;
+       if (d_is_negative(upper) || !IS_WHITEOUT(d_inode(upper)))
+               goto out_dput;
+
        newdentry = ovl_create_temp(workdir, cattr);
        err = PTR_ERR(newdentry);
        if (IS_ERR(newdentry))
@@ -652,7 +655,6 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
                    struct dentry *new)
 {
        int err;
-       bool locked = false;
        struct inode *inode;
 
        err = ovl_want_write(old);
@@ -663,13 +665,17 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
        if (err)
                goto out_drop_write;
 
+       err = ovl_copy_up(new->d_parent);
+       if (err)
+               goto out_drop_write;
+
        if (ovl_is_metacopy_dentry(old)) {
                err = ovl_set_redirect(old, false);
                if (err)
                        goto out_drop_write;
        }
 
-       err = ovl_nlink_start(old, &locked);
+       err = ovl_nlink_start(old);
        if (err)
                goto out_drop_write;
 
@@ -682,7 +688,7 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
        if (err)
                iput(inode);
 
-       ovl_nlink_end(old, locked);
+       ovl_nlink_end(old);
 out_drop_write:
        ovl_drop_write(old);
 out:
@@ -807,7 +813,6 @@ static bool ovl_pure_upper(struct dentry *dentry)
 static int ovl_do_remove(struct dentry *dentry, bool is_dir)
 {
        int err;
-       bool locked = false;
        const struct cred *old_cred;
        struct dentry *upperdentry;
        bool lower_positive = ovl_lower_positive(dentry);
@@ -828,7 +833,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
        if (err)
                goto out_drop_write;
 
-       err = ovl_nlink_start(dentry, &locked);
+       err = ovl_nlink_start(dentry);
        if (err)
                goto out_drop_write;
 
@@ -844,7 +849,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
                else
                        drop_nlink(dentry->d_inode);
        }
-       ovl_nlink_end(dentry, locked);
+       ovl_nlink_end(dentry);
 
        /*
         * Copy ctime
@@ -1008,7 +1013,6 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
                      unsigned int flags)
 {
        int err;
-       bool locked = false;
        struct dentry *old_upperdir;
        struct dentry *new_upperdir;
        struct dentry *olddentry;
@@ -1017,6 +1021,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
        bool old_opaque;
        bool new_opaque;
        bool cleanup_whiteout = false;
+       bool update_nlink = false;
        bool overwrite = !(flags & RENAME_EXCHANGE);
        bool is_dir = d_is_dir(old);
        bool new_is_dir = d_is_dir(new);
@@ -1074,10 +1079,12 @@ static int ovl_rename(struct inode *olddir, struct dentry *old,
                err = ovl_copy_up(new);
                if (err)
                        goto out_drop_write;
-       } else {
-               err = ovl_nlink_start(new, &locked);
+       } else if (d_inode(new)) {
+               err = ovl_nlink_start(new);
                if (err)
                        goto out_drop_write;
+
+               update_nlink = true;
        }
 
        old_cred = ovl_override_creds(old->d_sb);
@@ -1206,7 +1213,8 @@ out_unlock:
        unlock_rename(new_upperdir, old_upperdir);
 out_revert_creds:
        revert_creds(old_cred);
-       ovl_nlink_end(new, locked);
+       if (update_nlink)
+               ovl_nlink_end(new);
 out_drop_write:
        ovl_drop_write(old);
 out:
index 986313da0c8895352d2216f0fb0b78d3854064fb..84dd957efa24a17e8a66416117790896fd1d04ff 100644 (file)
@@ -434,14 +434,14 @@ enum ovl_copyop {
        OVL_DEDUPE,
 };
 
-static ssize_t ovl_copyfile(struct file *file_in, loff_t pos_in,
+static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in,
                            struct file *file_out, loff_t pos_out,
-                           u64 len, unsigned int flags, enum ovl_copyop op)
+                           loff_t len, unsigned int flags, enum ovl_copyop op)
 {
        struct inode *inode_out = file_inode(file_out);
        struct fd real_in, real_out;
        const struct cred *old_cred;
-       ssize_t ret;
+       loff_t ret;
 
        ret = ovl_real_fdget(file_out, &real_out);
        if (ret)
@@ -462,12 +462,13 @@ static ssize_t ovl_copyfile(struct file *file_in, loff_t pos_in,
 
        case OVL_CLONE:
                ret = vfs_clone_file_range(real_in.file, pos_in,
-                                          real_out.file, pos_out, len);
+                                          real_out.file, pos_out, len, flags);
                break;
 
        case OVL_DEDUPE:
                ret = vfs_dedupe_file_range_one(real_in.file, pos_in,
-                                               real_out.file, pos_out, len);
+                                               real_out.file, pos_out, len,
+                                               flags);
                break;
        }
        revert_creds(old_cred);
@@ -489,26 +490,31 @@ static ssize_t ovl_copy_file_range(struct file *file_in, loff_t pos_in,
                            OVL_COPY);
 }
 
-static int ovl_clone_file_range(struct file *file_in, loff_t pos_in,
-                               struct file *file_out, loff_t pos_out, u64 len)
+static loff_t ovl_remap_file_range(struct file *file_in, loff_t pos_in,
+                                  struct file *file_out, loff_t pos_out,
+                                  loff_t len, unsigned int remap_flags)
 {
-       return ovl_copyfile(file_in, pos_in, file_out, pos_out, len, 0,
-                           OVL_CLONE);
-}
+       enum ovl_copyop op;
+
+       if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
+               return -EINVAL;
+
+       if (remap_flags & REMAP_FILE_DEDUP)
+               op = OVL_DEDUPE;
+       else
+               op = OVL_CLONE;
 
-static int ovl_dedupe_file_range(struct file *file_in, loff_t pos_in,
-                                struct file *file_out, loff_t pos_out, u64 len)
-{
        /*
         * Don't copy up because of a dedupe request, this wouldn't make sense
         * most of the time (data would be duplicated instead of deduplicated).
         */
-       if (!ovl_inode_upper(file_inode(file_in)) ||
-           !ovl_inode_upper(file_inode(file_out)))
+       if (op == OVL_DEDUPE &&
+           (!ovl_inode_upper(file_inode(file_in)) ||
+            !ovl_inode_upper(file_inode(file_out))))
                return -EPERM;
 
-       return ovl_copyfile(file_in, pos_in, file_out, pos_out, len, 0,
-                           OVL_DEDUPE);
+       return ovl_copyfile(file_in, pos_in, file_out, pos_out, len,
+                           remap_flags, op);
 }
 
 const struct file_operations ovl_file_operations = {
@@ -525,6 +531,5 @@ const struct file_operations ovl_file_operations = {
        .compat_ioctl   = ovl_compat_ioctl,
 
        .copy_file_range        = ovl_copy_file_range,
-       .clone_file_range       = ovl_clone_file_range,
-       .dedupe_file_range      = ovl_dedupe_file_range,
+       .remap_file_range       = ovl_remap_file_range,
 };
index 3b7ed5d2279c6a8efde8180471bde94ef1020964..6bcc9dedc342cc7cf141abbc5220f4a0aa5ce1da 100644 (file)
@@ -286,13 +286,22 @@ int ovl_permission(struct inode *inode, int mask)
        if (err)
                return err;
 
-       old_cred = ovl_override_creds(inode->i_sb);
-       if (!upperinode &&
-           !special_file(realinode->i_mode) && mask & MAY_WRITE) {
+       /* No need to do any access on underlying for special files */
+       if (special_file(realinode->i_mode))
+               return 0;
+
+       /* No need to access underlying for execute */
+       mask &= ~MAY_EXEC;
+       if ((mask & (MAY_READ | MAY_WRITE)) == 0)
+               return 0;
+
+       /* Lower files get copied up, so turn write access into read */
+       if (!upperinode && mask & MAY_WRITE) {
                mask &= ~(MAY_WRITE | MAY_APPEND);
-               /* Make sure mounter can read file for copy up later */
                mask |= MAY_READ;
        }
+
+       old_cred = ovl_override_creds(inode->i_sb);
        err = inode_permission(realinode, mask);
        revert_creds(old_cred);
 
index 9c0ca6a7becfbe56e15efd596fbc6540b4bbd859..efd372312ef1000709827eab0291b760ea224ae1 100644 (file)
@@ -422,8 +422,10 @@ int ovl_verify_set_fh(struct dentry *dentry, const char *name,
 
        fh = ovl_encode_real_fh(real, is_upper);
        err = PTR_ERR(fh);
-       if (IS_ERR(fh))
+       if (IS_ERR(fh)) {
+               fh = NULL;
                goto fail;
+       }
 
        err = ovl_verify_fh(dentry, name, fh);
        if (set && err == -ENODATA)
index a3c0d95843121e92a103a6b07628feb853c31399..5e45cb3630a06f37059d044412c3c089d84e28fe 100644 (file)
@@ -271,8 +271,8 @@ bool ovl_test_flag(unsigned long flag, struct inode *inode);
 bool ovl_inuse_trylock(struct dentry *dentry);
 void ovl_inuse_unlock(struct dentry *dentry);
 bool ovl_need_index(struct dentry *dentry);
-int ovl_nlink_start(struct dentry *dentry, bool *locked);
-void ovl_nlink_end(struct dentry *dentry, bool locked);
+int ovl_nlink_start(struct dentry *dentry);
+void ovl_nlink_end(struct dentry *dentry);
 int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir);
 int ovl_check_metacopy_xattr(struct dentry *dentry);
 bool ovl_is_metacopy_dentry(struct dentry *dentry);
@@ -290,6 +290,16 @@ static inline unsigned int ovl_xino_bits(struct super_block *sb)
        return ofs->xino_bits;
 }
 
+static inline int ovl_inode_lock(struct inode *inode)
+{
+       return mutex_lock_interruptible(&OVL_I(inode)->lock);
+}
+
+static inline void ovl_inode_unlock(struct inode *inode)
+{
+       mutex_unlock(&OVL_I(inode)->lock);
+}
+
 
 /* namei.c */
 int ovl_check_fh_len(struct ovl_fh *fh, int fh_len);
index 30adc9d408a0df84455b86811ee468faf439c0c1..0116735cc32147ca3972275e2baa8a52d296f63f 100644 (file)
@@ -472,6 +472,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
 {
        char *p;
        int err;
+       bool metacopy_opt = false, redirect_opt = false;
 
        config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL);
        if (!config->redirect_mode)
@@ -516,6 +517,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
                        config->redirect_mode = match_strdup(&args[0]);
                        if (!config->redirect_mode)
                                return -ENOMEM;
+                       redirect_opt = true;
                        break;
 
                case OPT_INDEX_ON:
@@ -548,6 +550,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
 
                case OPT_METACOPY_ON:
                        config->metacopy = true;
+                       metacopy_opt = true;
                        break;
 
                case OPT_METACOPY_OFF:
@@ -572,13 +575,32 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config)
        if (err)
                return err;
 
-       /* metacopy feature with upper requires redirect_dir=on */
-       if (config->upperdir && config->metacopy && !config->redirect_dir) {
-               pr_warn("overlayfs: metadata only copy up requires \"redirect_dir=on\", falling back to metacopy=off.\n");
-               config->metacopy = false;
-       } else if (config->metacopy && !config->redirect_follow) {
-               pr_warn("overlayfs: metadata only copy up requires \"redirect_dir=follow\" on non-upper mount, falling back to metacopy=off.\n");
-               config->metacopy = false;
+       /*
+        * This is to make the logic below simpler.  It doesn't make any other
+        * difference, since config->redirect_dir is only used for upper.
+        */
+       if (!config->upperdir && config->redirect_follow)
+               config->redirect_dir = true;
+
+       /* Resolve metacopy -> redirect_dir dependency */
+       if (config->metacopy && !config->redirect_dir) {
+               if (metacopy_opt && redirect_opt) {
+                       pr_err("overlayfs: conflicting options: metacopy=on,redirect_dir=%s\n",
+                              config->redirect_mode);
+                       return -EINVAL;
+               }
+               if (redirect_opt) {
+                       /*
+                        * There was an explicit redirect_dir=... that resulted
+                        * in this conflict.
+                        */
+                       pr_info("overlayfs: disabling metacopy due to redirect_dir=%s\n",
+                               config->redirect_mode);
+                       config->metacopy = false;
+               } else {
+                       /* Automatically enable redirect otherwise. */
+                       config->redirect_follow = config->redirect_dir = true;
+               }
        }
 
        return 0;
@@ -1175,9 +1197,29 @@ out:
        return err;
 }
 
+static bool ovl_lower_uuid_ok(struct ovl_fs *ofs, const uuid_t *uuid)
+{
+       unsigned int i;
+
+       if (!ofs->config.nfs_export && !(ofs->config.index && ofs->upper_mnt))
+               return true;
+
+       for (i = 0; i < ofs->numlowerfs; i++) {
+               /*
+                * We use uuid to associate an overlay lower file handle with a
+                * lower layer, so we can accept lower fs with null uuid as long
+                * as all lower layers with null uuid are on the same fs.
+                */
+               if (uuid_equal(&ofs->lower_fs[i].sb->s_uuid, uuid))
+                       return false;
+       }
+       return true;
+}
+
 /* Get a unique fsid for the layer */
-static int ovl_get_fsid(struct ovl_fs *ofs, struct super_block *sb)
+static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
 {
+       struct super_block *sb = path->mnt->mnt_sb;
        unsigned int i;
        dev_t dev;
        int err;
@@ -1191,6 +1233,14 @@ static int ovl_get_fsid(struct ovl_fs *ofs, struct super_block *sb)
                        return i + 1;
        }
 
+       if (!ovl_lower_uuid_ok(ofs, &sb->s_uuid)) {
+               ofs->config.index = false;
+               ofs->config.nfs_export = false;
+               pr_warn("overlayfs: %s uuid detected in lower fs '%pd2', falling back to index=off,nfs_export=off.\n",
+                       uuid_is_null(&sb->s_uuid) ? "null" : "conflicting",
+                       path->dentry);
+       }
+
        err = get_anon_bdev(&dev);
        if (err) {
                pr_err("overlayfs: failed to get anonymous bdev for lowerpath\n");
@@ -1225,7 +1275,7 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
                struct vfsmount *mnt;
                int fsid;
 
-               err = fsid = ovl_get_fsid(ofs, stack[i].mnt->mnt_sb);
+               err = fsid = ovl_get_fsid(ofs, &stack[i]);
                if (err < 0)
                        goto out;
 
index ace4fe4c39a9307aa6008702f0195a92af74627c..7c01327b1852053c58feff725c3d5aac9e6b9717 100644 (file)
@@ -65,8 +65,7 @@ struct super_block *ovl_same_sb(struct super_block *sb)
  */
 int ovl_can_decode_fh(struct super_block *sb)
 {
-       if (!sb->s_export_op || !sb->s_export_op->fh_to_dentry ||
-           uuid_is_null(&sb->s_uuid))
+       if (!sb->s_export_op || !sb->s_export_op->fh_to_dentry)
                return 0;
 
        return sb->s_export_op->encode_fh ? -1 : FILEID_INO32_GEN;
@@ -522,13 +521,13 @@ bool ovl_already_copied_up(struct dentry *dentry, int flags)
 
 int ovl_copy_up_start(struct dentry *dentry, int flags)
 {
-       struct ovl_inode *oi = OVL_I(d_inode(dentry));
+       struct inode *inode = d_inode(dentry);
        int err;
 
-       err = mutex_lock_interruptible(&oi->lock);
+       err = ovl_inode_lock(inode);
        if (!err && ovl_already_copied_up_locked(dentry, flags)) {
                err = 1; /* Already copied up */
-               mutex_unlock(&oi->lock);
+               ovl_inode_unlock(inode);
        }
 
        return err;
@@ -536,7 +535,7 @@ int ovl_copy_up_start(struct dentry *dentry, int flags)
 
 void ovl_copy_up_end(struct dentry *dentry)
 {
-       mutex_unlock(&OVL_I(d_inode(dentry))->lock);
+       ovl_inode_unlock(d_inode(dentry));
 }
 
 bool ovl_check_origin_xattr(struct dentry *dentry)
@@ -739,14 +738,14 @@ fail:
  * Operations that change overlay inode and upper inode nlink need to be
  * synchronized with copy up for persistent nlink accounting.
  */
-int ovl_nlink_start(struct dentry *dentry, bool *locked)
+int ovl_nlink_start(struct dentry *dentry)
 {
-       struct ovl_inode *oi = OVL_I(d_inode(dentry));
+       struct inode *inode = d_inode(dentry);
        const struct cred *old_cred;
        int err;
 
-       if (!d_inode(dentry))
-               return 0;
+       if (WARN_ON(!inode))
+               return -ENOENT;
 
        /*
         * With inodes index is enabled, we store the union overlay nlink
@@ -768,11 +767,11 @@ int ovl_nlink_start(struct dentry *dentry, bool *locked)
                        return err;
        }
 
-       err = mutex_lock_interruptible(&oi->lock);
+       err = ovl_inode_lock(inode);
        if (err)
                return err;
 
-       if (d_is_dir(dentry) || !ovl_test_flag(OVL_INDEX, d_inode(dentry)))
+       if (d_is_dir(dentry) || !ovl_test_flag(OVL_INDEX, inode))
                goto out;
 
        old_cred = ovl_override_creds(dentry->d_sb);
@@ -787,27 +786,24 @@ int ovl_nlink_start(struct dentry *dentry, bool *locked)
 
 out:
        if (err)
-               mutex_unlock(&oi->lock);
-       else
-               *locked = true;
+               ovl_inode_unlock(inode);
 
        return err;
 }
 
-void ovl_nlink_end(struct dentry *dentry, bool locked)
+void ovl_nlink_end(struct dentry *dentry)
 {
-       if (locked) {
-               if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) &&
-                   d_inode(dentry)->i_nlink == 0) {
-                       const struct cred *old_cred;
+       struct inode *inode = d_inode(dentry);
 
-                       old_cred = ovl_override_creds(dentry->d_sb);
-                       ovl_cleanup_index(dentry);
-                       revert_creds(old_cred);
-               }
+       if (ovl_test_flag(OVL_INDEX, inode) && inode->i_nlink == 0) {
+               const struct cred *old_cred;
 
-               mutex_unlock(&OVL_I(d_inode(dentry))->lock);
+               old_cred = ovl_override_creds(dentry->d_sb);
+               ovl_cleanup_index(dentry);
+               revert_creds(old_cred);
        }
+
+       ovl_inode_unlock(inode);
 }
 
 int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir)
index 7e9f07bf260d20bb0a0cd4cd6b6b4abe82b23e20..ce34654794472d0a7b8c2574340c18cc7d594f7a 100644 (file)
@@ -2905,6 +2905,21 @@ static int proc_pid_patch_state(struct seq_file *m, struct pid_namespace *ns,
 }
 #endif /* CONFIG_LIVEPATCH */
 
+#ifdef CONFIG_STACKLEAK_METRICS
+static int proc_stack_depth(struct seq_file *m, struct pid_namespace *ns,
+                               struct pid *pid, struct task_struct *task)
+{
+       unsigned long prev_depth = THREAD_SIZE -
+                               (task->prev_lowest_stack & (THREAD_SIZE - 1));
+       unsigned long depth = THREAD_SIZE -
+                               (task->lowest_stack & (THREAD_SIZE - 1));
+
+       seq_printf(m, "previous stack depth: %lu\nstack depth: %lu\n",
+                                                       prev_depth, depth);
+       return 0;
+}
+#endif /* CONFIG_STACKLEAK_METRICS */
+
 /*
  * Thread groups
  */
@@ -3006,6 +3021,9 @@ static const struct pid_entry tgid_base_stuff[] = {
 #ifdef CONFIG_LIVEPATCH
        ONE("patch_state",  S_IRUSR, proc_pid_patch_state),
 #endif
+#ifdef CONFIG_STACKLEAK_METRICS
+       ONE("stack_depth", S_IRUGO, proc_stack_depth),
+#endif
 };
 
 static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx)
index ffcff6516e89cccfcbbc138a6d7331e4ca9fb63b..e02a9039b5ea4410d3b3a986d7f4b6566e196eff 100644 (file)
@@ -816,17 +816,14 @@ static int ramoops_probe(struct platform_device *pdev)
 
        cxt->pstore.data = cxt;
        /*
-        * Console can handle any buffer size, so prefer LOG_LINE_MAX. If we
-        * have to handle dumps, we must have at least record_size buffer. And
-        * for ftrace, bufsize is irrelevant (if bufsize is 0, buf will be
-        * ZERO_SIZE_PTR).
+        * Since bufsize is only used for dmesg crash dumps, it
+        * must match the size of the dprz record (after PRZ header
+        * and ECC bytes have been accounted for).
         */
-       if (cxt->console_size)
-               cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */
-       cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize);
-       cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL);
+       cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
+       cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
        if (!cxt->pstore.buf) {
-               pr_err("cannot allocate pstore buffer\n");
+               pr_err("cannot allocate pstore crash dump buffer\n");
                err = -ENOMEM;
                goto fail_clear;
        }
index 603794b207ebad39946e96bcbfff73332fc9a0a0..58f30537c47a0a9d04cdba4abbd78188379ae463 100644 (file)
@@ -1407,7 +1407,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
                goto fput_in;
        if (!(out.file->f_mode & FMODE_WRITE))
                goto fput_out;
-       retval = -EINVAL;
        in_inode = file_inode(in.file);
        out_inode = file_inode(out.file);
        out_pos = out.file->f_pos;
@@ -1588,11 +1587,15 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in,
         * Try cloning first, this is supported by more file systems, and
         * more efficient if both clone and copy are supported (e.g. NFS).
         */
-       if (file_in->f_op->clone_file_range) {
-               ret = file_in->f_op->clone_file_range(file_in, pos_in,
-                               file_out, pos_out, len);
-               if (ret == 0) {
-                       ret = len;
+       if (file_in->f_op->remap_file_range) {
+               loff_t cloned;
+
+               cloned = file_in->f_op->remap_file_range(file_in, pos_in,
+                               file_out, pos_out,
+                               min_t(loff_t, MAX_RW_COUNT, len),
+                               REMAP_FILE_CAN_SHORTEN);
+               if (cloned > 0) {
+                       ret = cloned;
                        goto done;
                }
        }
@@ -1686,11 +1689,12 @@ out2:
        return ret;
 }
 
-static int clone_verify_area(struct file *file, loff_t pos, u64 len, bool write)
+static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
+                            bool write)
 {
        struct inode *inode = file_inode(file);
 
-       if (unlikely(pos < 0))
+       if (unlikely(pos < 0 || len < 0))
                return -EINVAL;
 
         if (unlikely((loff_t) (pos + len) < 0))
@@ -1708,22 +1712,150 @@ static int clone_verify_area(struct file *file, loff_t pos, u64 len, bool write)
 
        return security_file_permission(file, write ? MAY_WRITE : MAY_READ);
 }
+/*
+ * Ensure that we don't remap a partial EOF block in the middle of something
+ * else.  Assume that the offsets have already been checked for block
+ * alignment.
+ *
+ * For deduplication we always scale down to the previous block because we
+ * can't meaningfully compare post-EOF contents.
+ *
+ * For clone we only link a partial EOF block above the destination file's EOF.
+ *
+ * Shorten the request if possible.
+ */
+static int generic_remap_check_len(struct inode *inode_in,
+                                  struct inode *inode_out,
+                                  loff_t pos_out,
+                                  loff_t *len,
+                                  unsigned int remap_flags)
+{
+       u64 blkmask = i_blocksize(inode_in) - 1;
+       loff_t new_len = *len;
+
+       if ((*len & blkmask) == 0)
+               return 0;
+
+       if ((remap_flags & REMAP_FILE_DEDUP) ||
+           pos_out + *len < i_size_read(inode_out))
+               new_len &= ~blkmask;
+
+       if (new_len == *len)
+               return 0;
+
+       if (remap_flags & REMAP_FILE_CAN_SHORTEN) {
+               *len = new_len;
+               return 0;
+       }
+
+       return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
+}
+
+/*
+ * Read a page's worth of file data into the page cache.  Return the page
+ * locked.
+ */
+static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
+{
+       struct page *page;
+
+       page = read_mapping_page(inode->i_mapping, offset >> PAGE_SHIFT, NULL);
+       if (IS_ERR(page))
+               return page;
+       if (!PageUptodate(page)) {
+               put_page(page);
+               return ERR_PTR(-EIO);
+       }
+       lock_page(page);
+       return page;
+}
+
+/*
+ * Compare extents of two files to see if they are the same.
+ * Caller must have locked both inodes to prevent write races.
+ */
+static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
+                                        struct inode *dest, loff_t destoff,
+                                        loff_t len, bool *is_same)
+{
+       loff_t src_poff;
+       loff_t dest_poff;
+       void *src_addr;
+       void *dest_addr;
+       struct page *src_page;
+       struct page *dest_page;
+       loff_t cmp_len;
+       bool same;
+       int error;
+
+       error = -EINVAL;
+       same = true;
+       while (len) {
+               src_poff = srcoff & (PAGE_SIZE - 1);
+               dest_poff = destoff & (PAGE_SIZE - 1);
+               cmp_len = min(PAGE_SIZE - src_poff,
+                             PAGE_SIZE - dest_poff);
+               cmp_len = min(cmp_len, len);
+               if (cmp_len <= 0)
+                       goto out_error;
+
+               src_page = vfs_dedupe_get_page(src, srcoff);
+               if (IS_ERR(src_page)) {
+                       error = PTR_ERR(src_page);
+                       goto out_error;
+               }
+               dest_page = vfs_dedupe_get_page(dest, destoff);
+               if (IS_ERR(dest_page)) {
+                       error = PTR_ERR(dest_page);
+                       unlock_page(src_page);
+                       put_page(src_page);
+                       goto out_error;
+               }
+               src_addr = kmap_atomic(src_page);
+               dest_addr = kmap_atomic(dest_page);
+
+               flush_dcache_page(src_page);
+               flush_dcache_page(dest_page);
+
+               if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len))
+                       same = false;
+
+               kunmap_atomic(dest_addr);
+               kunmap_atomic(src_addr);
+               unlock_page(dest_page);
+               unlock_page(src_page);
+               put_page(dest_page);
+               put_page(src_page);
+
+               if (!same)
+                       break;
+
+               srcoff += cmp_len;
+               destoff += cmp_len;
+               len -= cmp_len;
+       }
+
+       *is_same = same;
+       return 0;
+
+out_error:
+       return error;
+}
 
 /*
  * Check that the two inodes are eligible for cloning, the ranges make
  * sense, and then flush all dirty data.  Caller must ensure that the
  * inodes have been locked against any other modifications.
  *
- * Returns: 0 for "nothing to clone", 1 for "something to clone", or
- * the usual negative error code.
+ * If there's an error, then the usual negative error code is returned.
+ * Otherwise returns 0 with *len set to the request length.
  */
-int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
-                              struct inode *inode_out, loff_t pos_out,
-                              u64 *len, bool is_dedupe)
+int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+                                 struct file *file_out, loff_t pos_out,
+                                 loff_t *len, unsigned int remap_flags)
 {
-       loff_t bs = inode_out->i_sb->s_blocksize;
-       loff_t blen;
-       loff_t isize;
+       struct inode *inode_in = file_inode(file_in);
+       struct inode *inode_out = file_inode(file_out);
        bool same_inode = (inode_in == inode_out);
        int ret;
 
@@ -1740,50 +1872,24 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
        if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
                return -EINVAL;
 
-       /* Are we going all the way to the end? */
-       isize = i_size_read(inode_in);
-       if (isize == 0)
-               return 0;
-
        /* Zero length dedupe exits immediately; reflink goes to EOF. */
        if (*len == 0) {
-               if (is_dedupe || pos_in == isize)
+               loff_t isize = i_size_read(inode_in);
+
+               if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize)
                        return 0;
                if (pos_in > isize)
                        return -EINVAL;
                *len = isize - pos_in;
+               if (*len == 0)
+                       return 0;
        }
 
-       /* Ensure offsets don't wrap and the input is inside i_size */
-       if (pos_in + *len < pos_in || pos_out + *len < pos_out ||
-           pos_in + *len > isize)
-               return -EINVAL;
-
-       /* Don't allow dedupe past EOF in the dest file */
-       if (is_dedupe) {
-               loff_t  disize;
-
-               disize = i_size_read(inode_out);
-               if (pos_out >= disize || pos_out + *len > disize)
-                       return -EINVAL;
-       }
-
-       /* If we're linking to EOF, continue to the block boundary. */
-       if (pos_in + *len == isize)
-               blen = ALIGN(isize, bs) - pos_in;
-       else
-               blen = *len;
-
-       /* Only reflink if we're aligned to block boundaries */
-       if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_in + blen, bs) ||
-           !IS_ALIGNED(pos_out, bs) || !IS_ALIGNED(pos_out + blen, bs))
-               return -EINVAL;
-
-       /* Don't allow overlapped reflink within the same file */
-       if (same_inode) {
-               if (pos_out + blen > pos_in && pos_out < pos_in + blen)
-                       return -EINVAL;
-       }
+       /* Check that we don't violate system file offset limits. */
+       ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len,
+                       remap_flags);
+       if (ret)
+               return ret;
 
        /* Wait for the completion of any pending IOs on both files */
        inode_dio_wait(inode_in);
@@ -1803,7 +1909,7 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
        /*
         * Check that the extents are the same.
         */
-       if (is_dedupe) {
+       if (remap_flags & REMAP_FILE_DEDUP) {
                bool            is_same = false;
 
                ret = vfs_dedupe_file_range_compare(inode_in, pos_in,
@@ -1814,16 +1920,43 @@ int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
                        return -EBADE;
        }
 
-       return 1;
+       ret = generic_remap_check_len(inode_in, inode_out, pos_out, len,
+                       remap_flags);
+       if (ret)
+               return ret;
+
+       /* If can't alter the file contents, we're done. */
+       if (!(remap_flags & REMAP_FILE_DEDUP)) {
+               /* Update the timestamps, since we can alter file contents. */
+               if (!(file_out->f_mode & FMODE_NOCMTIME)) {
+                       ret = file_update_time(file_out);
+                       if (ret)
+                               return ret;
+               }
+
+               /*
+                * Clear the security bits if the process is not being run by
+                * root.  This keeps people from modifying setuid and setgid
+                * binaries.
+                */
+               ret = file_remove_privs(file_out);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
 }
-EXPORT_SYMBOL(vfs_clone_file_prep_inodes);
+EXPORT_SYMBOL(generic_remap_file_range_prep);
 
-int do_clone_file_range(struct file *file_in, loff_t pos_in,
-                       struct file *file_out, loff_t pos_out, u64 len)
+loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
+                          struct file *file_out, loff_t pos_out,
+                          loff_t len, unsigned int remap_flags)
 {
        struct inode *inode_in = file_inode(file_in);
        struct inode *inode_out = file_inode(file_out);
-       int ret;
+       loff_t ret;
+
+       WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP);
 
        if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
                return -EISDIR;
@@ -1843,155 +1976,76 @@ int do_clone_file_range(struct file *file_in, loff_t pos_in,
            (file_out->f_flags & O_APPEND))
                return -EBADF;
 
-       if (!file_in->f_op->clone_file_range)
+       if (!file_in->f_op->remap_file_range)
                return -EOPNOTSUPP;
 
-       ret = clone_verify_area(file_in, pos_in, len, false);
+       ret = remap_verify_area(file_in, pos_in, len, false);
        if (ret)
                return ret;
 
-       ret = clone_verify_area(file_out, pos_out, len, true);
+       ret = remap_verify_area(file_out, pos_out, len, true);
        if (ret)
                return ret;
 
-       if (pos_in + len > i_size_read(inode_in))
-               return -EINVAL;
-
-       ret = file_in->f_op->clone_file_range(file_in, pos_in,
-                       file_out, pos_out, len);
-       if (!ret) {
-               fsnotify_access(file_in);
-               fsnotify_modify(file_out);
-       }
+       ret = file_in->f_op->remap_file_range(file_in, pos_in,
+                       file_out, pos_out, len, remap_flags);
+       if (ret < 0)
+               return ret;
 
+       fsnotify_access(file_in);
+       fsnotify_modify(file_out);
        return ret;
 }
 EXPORT_SYMBOL(do_clone_file_range);
 
-int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
-                        struct file *file_out, loff_t pos_out, u64 len)
+loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
+                           struct file *file_out, loff_t pos_out,
+                           loff_t len, unsigned int remap_flags)
 {
-       int ret;
+       loff_t ret;
 
        file_start_write(file_out);
-       ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len);
+       ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len,
+                                 remap_flags);
        file_end_write(file_out);
 
        return ret;
 }
 EXPORT_SYMBOL(vfs_clone_file_range);
 
-/*
- * Read a page's worth of file data into the page cache.  Return the page
- * locked.
- */
-static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
+/* Check whether we are allowed to dedupe the destination file */
+static bool allow_file_dedupe(struct file *file)
 {
-       struct address_space *mapping;
-       struct page *page;
-       pgoff_t n;
-
-       n = offset >> PAGE_SHIFT;
-       mapping = inode->i_mapping;
-       page = read_mapping_page(mapping, n, NULL);
-       if (IS_ERR(page))
-               return page;
-       if (!PageUptodate(page)) {
-               put_page(page);
-               return ERR_PTR(-EIO);
-       }
-       lock_page(page);
-       return page;
+       if (capable(CAP_SYS_ADMIN))
+               return true;
+       if (file->f_mode & FMODE_WRITE)
+               return true;
+       if (uid_eq(current_fsuid(), file_inode(file)->i_uid))
+               return true;
+       if (!inode_permission(file_inode(file), MAY_WRITE))
+               return true;
+       return false;
 }
 
-/*
- * Compare extents of two files to see if they are the same.
- * Caller must have locked both inodes to prevent write races.
- */
-int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
-                                 struct inode *dest, loff_t destoff,
-                                 loff_t len, bool *is_same)
+loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
+                                struct file *dst_file, loff_t dst_pos,
+                                loff_t len, unsigned int remap_flags)
 {
-       loff_t src_poff;
-       loff_t dest_poff;
-       void *src_addr;
-       void *dest_addr;
-       struct page *src_page;
-       struct page *dest_page;
-       loff_t cmp_len;
-       bool same;
-       int error;
+       loff_t ret;
 
-       error = -EINVAL;
-       same = true;
-       while (len) {
-               src_poff = srcoff & (PAGE_SIZE - 1);
-               dest_poff = destoff & (PAGE_SIZE - 1);
-               cmp_len = min(PAGE_SIZE - src_poff,
-                             PAGE_SIZE - dest_poff);
-               cmp_len = min(cmp_len, len);
-               if (cmp_len <= 0)
-                       goto out_error;
-
-               src_page = vfs_dedupe_get_page(src, srcoff);
-               if (IS_ERR(src_page)) {
-                       error = PTR_ERR(src_page);
-                       goto out_error;
-               }
-               dest_page = vfs_dedupe_get_page(dest, destoff);
-               if (IS_ERR(dest_page)) {
-                       error = PTR_ERR(dest_page);
-                       unlock_page(src_page);
-                       put_page(src_page);
-                       goto out_error;
-               }
-               src_addr = kmap_atomic(src_page);
-               dest_addr = kmap_atomic(dest_page);
-
-               flush_dcache_page(src_page);
-               flush_dcache_page(dest_page);
-
-               if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len))
-                       same = false;
-
-               kunmap_atomic(dest_addr);
-               kunmap_atomic(src_addr);
-               unlock_page(dest_page);
-               unlock_page(src_page);
-               put_page(dest_page);
-               put_page(src_page);
-
-               if (!same)
-                       break;
-
-               srcoff += cmp_len;
-               destoff += cmp_len;
-               len -= cmp_len;
-       }
-
-       *is_same = same;
-       return 0;
-
-out_error:
-       return error;
-}
-EXPORT_SYMBOL(vfs_dedupe_file_range_compare);
-
-int vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
-                             struct file *dst_file, loff_t dst_pos, u64 len)
-{
-       s64 ret;
+       WARN_ON_ONCE(remap_flags & ~(REMAP_FILE_DEDUP |
+                                    REMAP_FILE_CAN_SHORTEN));
 
        ret = mnt_want_write_file(dst_file);
        if (ret)
                return ret;
 
-       ret = clone_verify_area(dst_file, dst_pos, len, true);
+       ret = remap_verify_area(dst_file, dst_pos, len, true);
        if (ret < 0)
                goto out_drop_write;
 
-       ret = -EINVAL;
-       if (!(capable(CAP_SYS_ADMIN) || (dst_file->f_mode & FMODE_WRITE)))
+       ret = -EPERM;
+       if (!allow_file_dedupe(dst_file))
                goto out_drop_write;
 
        ret = -EXDEV;
@@ -2003,11 +2057,16 @@ int vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
                goto out_drop_write;
 
        ret = -EINVAL;
-       if (!dst_file->f_op->dedupe_file_range)
+       if (!dst_file->f_op->remap_file_range)
                goto out_drop_write;
 
-       ret = dst_file->f_op->dedupe_file_range(src_file, src_pos,
-                                               dst_file, dst_pos, len);
+       if (len == 0) {
+               ret = 0;
+               goto out_drop_write;
+       }
+
+       ret = dst_file->f_op->remap_file_range(src_file, src_pos, dst_file,
+                       dst_pos, len, remap_flags | REMAP_FILE_DEDUP);
 out_drop_write:
        mnt_drop_write_file(dst_file);
 
@@ -2024,7 +2083,7 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
        int i;
        int ret;
        u16 count = same->dest_count;
-       int deduped;
+       loff_t deduped;
 
        if (!(file->f_mode & FMODE_READ))
                return -EINVAL;
@@ -2035,17 +2094,18 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
        off = same->src_offset;
        len = same->src_length;
 
-       ret = -EISDIR;
        if (S_ISDIR(src->i_mode))
-               goto out;
+               return -EISDIR;
 
-       ret = -EINVAL;
        if (!S_ISREG(src->i_mode))
-               goto out;
+               return -EINVAL;
 
-       ret = clone_verify_area(file, off, len, false);
+       if (!file->f_op->remap_file_range)
+               return -EOPNOTSUPP;
+
+       ret = remap_verify_area(file, off, len, false);
        if (ret < 0)
-               goto out;
+               return ret;
        ret = 0;
 
        if (off + len > i_size_read(src))
@@ -2075,7 +2135,8 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
                }
 
                deduped = vfs_dedupe_file_range_one(file, off, dst_file,
-                                                   info->dest_offset, len);
+                                                   info->dest_offset, len,
+                                                   REMAP_FILE_CAN_SHORTEN);
                if (deduped == -EBADE)
                        info->status = FILE_DEDUPE_RANGE_DIFFERS;
                else if (deduped < 0)
@@ -2087,10 +2148,8 @@ next_fdput:
                fdput(dst_fd);
 next_loop:
                if (fatal_signal_pending(current))
-                       goto out;
+                       break;
        }
-
-out:
        return ret;
 }
 EXPORT_SYMBOL(vfs_dedupe_file_range);
index b3daa971f59771d6adf248a192db7d6e3121b015..de2ede048473cef81bec9161cf1842b70835ea63 100644 (file)
@@ -301,7 +301,7 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos,
        struct kiocb kiocb;
        int idx, ret;
 
-       iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len);
+       iov_iter_pipe(&to, READ, pipe, len);
        idx = to.idx;
        init_sync_kiocb(&kiocb, in);
        kiocb.ki_pos = *ppos;
@@ -386,7 +386,7 @@ static ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
         */
        offset = *ppos & ~PAGE_MASK;
 
-       iov_iter_pipe(&to, ITER_PIPE | READ, pipe, len + offset);
+       iov_iter_pipe(&to, READ, pipe, len + offset);
 
        res = iov_iter_get_pages_alloc(&to, &pages, len + offset, &base);
        if (res <= 0)
@@ -745,8 +745,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
                        left -= this_len;
                }
 
-               iov_iter_bvec(&from, ITER_BVEC | WRITE, array, n,
-                             sd.total_len - left);
+               iov_iter_bvec(&from, WRITE, array, n, sd.total_len - left);
                ret = vfs_iter_write(out, &from, &sd.pos, 0);
                if (ret <= 0)
                        break;
@@ -946,11 +945,16 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
        sd->flags &= ~SPLICE_F_NONBLOCK;
        more = sd->flags & SPLICE_F_MORE;
 
+       WARN_ON_ONCE(pipe->nrbufs != 0);
+
        while (len) {
                size_t read_len;
                loff_t pos = sd->pos, prev_pos = pos;
 
-               ret = do_splice_to(in, &pos, pipe, len, flags);
+               /* Don't try to read more the pipe has space for. */
+               read_len = min_t(size_t, len,
+                                (pipe->buffers - pipe->nrbufs) << PAGE_SHIFT);
+               ret = do_splice_to(in, &pos, pipe, read_len, flags);
                if (unlikely(ret <= 0))
                        goto out_release;
 
index 499a20a5a0107f3ca8942485a7a5b3bc58737793..273736f41be386c9f466c5512bd395732fade3aa 100644 (file)
@@ -275,7 +275,7 @@ static int __sysv_write_inode(struct inode *inode, int wait)
                 }
         }
        brelse(bh);
-       return 0;
+       return err;
 }
 
 int sysv_write_inode(struct inode *inode, struct writeback_control *wbc)
index bbc78549be4cc3f0536033624723666ab13ef0d0..529856fbccd0ee5f6559519a3d9db12cf932a775 100644 (file)
@@ -7,6 +7,7 @@ config UBIFS_FS
        select CRYPTO if UBIFS_FS_ZLIB
        select CRYPTO_LZO if UBIFS_FS_LZO
        select CRYPTO_DEFLATE if UBIFS_FS_ZLIB
+       select CRYPTO_HASH_INFO
        depends on MTD_UBI
        help
          UBIFS is a file system for flash devices which works on top of UBI.
@@ -85,3 +86,13 @@ config UBIFS_FS_SECURITY
          the extended attribute support in advance.
 
          If you are not using a security module, say N.
+
+config UBIFS_FS_AUTHENTICATION
+       bool "UBIFS authentication support"
+       select CRYPTO_HMAC
+       help
+         Enable authentication support for UBIFS. This feature offers protection
+         against offline changes for both data and metadata of the filesystem.
+         If you say yes here you should also select a hashing algorithm such as
+         sha256, these are not selected automatically since there are many
+         different options.
index 6197d7e539e42d872a07763e524e1aad284d7a82..5f838319c8d533858dcf42b34631275ac8683797 100644 (file)
@@ -8,3 +8,4 @@ ubifs-y += recovery.o ioctl.o lpt_commit.o tnc_misc.o debug.o
 ubifs-y += misc.o
 ubifs-$(CONFIG_UBIFS_FS_ENCRYPTION) += crypto.o
 ubifs-$(CONFIG_UBIFS_FS_XATTR) += xattr.o
+ubifs-$(CONFIG_UBIFS_FS_AUTHENTICATION) += auth.o
diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c
new file mode 100644 (file)
index 0000000..124e965
--- /dev/null
@@ -0,0 +1,502 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is part of UBIFS.
+ *
+ * Copyright (C) 2018 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
+ */
+
+/*
+ * This file implements various helper functions for UBIFS authentication support
+ */
+
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/algapi.h>
+#include <keys/user-type.h>
+
+#include "ubifs.h"
+
+/**
+ * ubifs_node_calc_hash - calculate the hash of a UBIFS node
+ * @c: UBIFS file-system description object
+ * @node: the node to calculate a hash for
+ * @hash: the returned hash
+ *
+ * Returns 0 for success or a negative error code otherwise.
+ */
+int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *node,
+                           u8 *hash)
+{
+       const struct ubifs_ch *ch = node;
+       SHASH_DESC_ON_STACK(shash, c->hash_tfm);
+       int err;
+
+       shash->tfm = c->hash_tfm;
+       shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       err = crypto_shash_digest(shash, node, le32_to_cpu(ch->len), hash);
+       if (err < 0)
+               return err;
+       return 0;
+}
+
+/**
+ * ubifs_hash_calc_hmac - calculate a HMAC from a hash
+ * @c: UBIFS file-system description object
+ * @hash: the node to calculate a HMAC for
+ * @hmac: the returned HMAC
+ *
+ * Returns 0 for success or a negative error code otherwise.
+ */
+static int ubifs_hash_calc_hmac(const struct ubifs_info *c, const u8 *hash,
+                                u8 *hmac)
+{
+       SHASH_DESC_ON_STACK(shash, c->hmac_tfm);
+       int err;
+
+       shash->tfm = c->hmac_tfm;
+       shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       err = crypto_shash_digest(shash, hash, c->hash_len, hmac);
+       if (err < 0)
+               return err;
+       return 0;
+}
+
+/**
+ * ubifs_prepare_auth_node - Prepare an authentication node
+ * @c: UBIFS file-system description object
+ * @node: the node to calculate a hash for
+ * @hash: input hash of previous nodes
+ *
+ * This function prepares an authentication node for writing onto flash.
+ * It creates a HMAC from the given input hash and writes it to the node.
+ *
+ * Returns 0 for success or a negative error code otherwise.
+ */
+int ubifs_prepare_auth_node(struct ubifs_info *c, void *node,
+                            struct shash_desc *inhash)
+{
+       SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
+       struct ubifs_auth_node *auth = node;
+       u8 *hash;
+       int err;
+
+       hash = kmalloc(crypto_shash_descsize(c->hash_tfm), GFP_NOFS);
+       if (!hash)
+               return -ENOMEM;
+
+       hash_desc->tfm = c->hash_tfm;
+       hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       ubifs_shash_copy_state(c, inhash, hash_desc);
+
+       err = crypto_shash_final(hash_desc, hash);
+       if (err)
+               goto out;
+
+       err = ubifs_hash_calc_hmac(c, hash, auth->hmac);
+       if (err)
+               goto out;
+
+       auth->ch.node_type = UBIFS_AUTH_NODE;
+       ubifs_prepare_node(c, auth, ubifs_auth_node_sz(c), 0);
+
+       err = 0;
+out:
+       kfree(hash);
+
+       return err;
+}
+
+static struct shash_desc *ubifs_get_desc(const struct ubifs_info *c,
+                                        struct crypto_shash *tfm)
+{
+       struct shash_desc *desc;
+       int err;
+
+       if (!ubifs_authenticated(c))
+               return NULL;
+
+       desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
+       if (!desc)
+               return ERR_PTR(-ENOMEM);
+
+       desc->tfm = tfm;
+       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       err = crypto_shash_init(desc);
+       if (err) {
+               kfree(desc);
+               return ERR_PTR(err);
+       }
+
+       return desc;
+}
+
+/**
+ * __ubifs_hash_get_desc - get a descriptor suitable for hashing a node
+ * @c: UBIFS file-system description object
+ *
+ * This function returns a descriptor suitable for hashing a node. Free after use
+ * with kfree.
+ */
+struct shash_desc *__ubifs_hash_get_desc(const struct ubifs_info *c)
+{
+       return ubifs_get_desc(c, c->hash_tfm);
+}
+
+/**
+ * __ubifs_shash_final - finalize shash
+ * @c: UBIFS file-system description object
+ * @desc: the descriptor
+ * @out: the output hash
+ *
+ * Simple wrapper around crypto_shash_final(), safe to be called with
+ * disabled authentication.
+ */
+int __ubifs_shash_final(const struct ubifs_info *c, struct shash_desc *desc,
+                       u8 *out)
+{
+       if (ubifs_authenticated(c))
+               return crypto_shash_final(desc, out);
+
+       return 0;
+}
+
+/**
+ * ubifs_bad_hash - Report hash mismatches
+ * @c: UBIFS file-system description object
+ * @node: the node
+ * @hash: the expected hash
+ * @lnum: the LEB @node was read from
+ * @offs: offset in LEB @node was read from
+ *
+ * This function reports a hash mismatch when a node has a different hash than
+ * expected.
+ */
+void ubifs_bad_hash(const struct ubifs_info *c, const void *node, const u8 *hash,
+                   int lnum, int offs)
+{
+       int len = min(c->hash_len, 20);
+       int cropped = len != c->hash_len;
+       const char *cont = cropped ? "..." : "";
+
+       u8 calc[UBIFS_HASH_ARR_SZ];
+
+       __ubifs_node_calc_hash(c, node, calc);
+
+       ubifs_err(c, "hash mismatch on node at LEB %d:%d", lnum, offs);
+       ubifs_err(c, "hash expected:   %*ph%s", len, hash, cont);
+       ubifs_err(c, "hash calculated: %*ph%s", len, calc, cont);
+}
+
+/**
+ * __ubifs_node_check_hash - check the hash of a node against given hash
+ * @c: UBIFS file-system description object
+ * @node: the node
+ * @expected: the expected hash
+ *
+ * This function calculates a hash over a node and compares it to the given hash.
+ * Returns 0 if both hashes are equal or authentication is disabled, otherwise a
+ * negative error code is returned.
+ */
+int __ubifs_node_check_hash(const struct ubifs_info *c, const void *node,
+                           const u8 *expected)
+{
+       u8 calc[UBIFS_HASH_ARR_SZ];
+       int err;
+
+       err = __ubifs_node_calc_hash(c, node, calc);
+       if (err)
+               return err;
+
+       if (ubifs_check_hash(c, expected, calc))
+               return -EPERM;
+
+       return 0;
+}
+
+/**
+ * ubifs_init_authentication - initialize UBIFS authentication support
+ * @c: UBIFS file-system description object
+ *
+ * This function returns 0 for success or a negative error code otherwise.
+ */
+int ubifs_init_authentication(struct ubifs_info *c)
+{
+       struct key *keyring_key;
+       const struct user_key_payload *ukp;
+       int err;
+       char hmac_name[CRYPTO_MAX_ALG_NAME];
+
+       if (!c->auth_hash_name) {
+               ubifs_err(c, "authentication hash name needed with authentication");
+               return -EINVAL;
+       }
+
+       c->auth_hash_algo = match_string(hash_algo_name, HASH_ALGO__LAST,
+                                        c->auth_hash_name);
+       if ((int)c->auth_hash_algo < 0) {
+               ubifs_err(c, "Unknown hash algo %s specified",
+                         c->auth_hash_name);
+               return -EINVAL;
+       }
+
+       snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
+                c->auth_hash_name);
+
+       keyring_key = request_key(&key_type_logon, c->auth_key_name, NULL);
+
+       if (IS_ERR(keyring_key)) {
+               ubifs_err(c, "Failed to request key: %ld",
+                         PTR_ERR(keyring_key));
+               return PTR_ERR(keyring_key);
+       }
+
+       down_read(&keyring_key->sem);
+
+       if (keyring_key->type != &key_type_logon) {
+               ubifs_err(c, "key type must be logon");
+               err = -ENOKEY;
+               goto out;
+       }
+
+       ukp = user_key_payload_locked(keyring_key);
+       if (!ukp) {
+               /* key was revoked before we acquired its semaphore */
+               err = -EKEYREVOKED;
+               goto out;
+       }
+
+       c->hash_tfm = crypto_alloc_shash(c->auth_hash_name, 0,
+                                        CRYPTO_ALG_ASYNC);
+       if (IS_ERR(c->hash_tfm)) {
+               err = PTR_ERR(c->hash_tfm);
+               ubifs_err(c, "Can not allocate %s: %d",
+                         c->auth_hash_name, err);
+               goto out;
+       }
+
+       c->hash_len = crypto_shash_digestsize(c->hash_tfm);
+       if (c->hash_len > UBIFS_HASH_ARR_SZ) {
+               ubifs_err(c, "hash %s is bigger than maximum allowed hash size (%d > %d)",
+                         c->auth_hash_name, c->hash_len, UBIFS_HASH_ARR_SZ);
+               err = -EINVAL;
+               goto out_free_hash;
+       }
+
+       c->hmac_tfm = crypto_alloc_shash(hmac_name, 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(c->hmac_tfm)) {
+               err = PTR_ERR(c->hmac_tfm);
+               ubifs_err(c, "Can not allocate %s: %d", hmac_name, err);
+               goto out_free_hash;
+       }
+
+       c->hmac_desc_len = crypto_shash_digestsize(c->hmac_tfm);
+       if (c->hmac_desc_len > UBIFS_HMAC_ARR_SZ) {
+               ubifs_err(c, "hmac %s is bigger than maximum allowed hmac size (%d > %d)",
+                         hmac_name, c->hmac_desc_len, UBIFS_HMAC_ARR_SZ);
+               err = -EINVAL;
+               goto out_free_hash;
+       }
+
+       err = crypto_shash_setkey(c->hmac_tfm, ukp->data, ukp->datalen);
+       if (err)
+               goto out_free_hmac;
+
+       c->authenticated = true;
+
+       c->log_hash = ubifs_hash_get_desc(c);
+       if (IS_ERR(c->log_hash))
+               goto out_free_hmac;
+
+       err = 0;
+
+out_free_hmac:
+       if (err)
+               crypto_free_shash(c->hmac_tfm);
+out_free_hash:
+       if (err)
+               crypto_free_shash(c->hash_tfm);
+out:
+       up_read(&keyring_key->sem);
+       key_put(keyring_key);
+
+       return err;
+}
+
+/**
+ * __ubifs_exit_authentication - release resource
+ * @c: UBIFS file-system description object
+ *
+ * This function releases the authentication related resources.
+ */
+void __ubifs_exit_authentication(struct ubifs_info *c)
+{
+       if (!ubifs_authenticated(c))
+               return;
+
+       crypto_free_shash(c->hmac_tfm);
+       crypto_free_shash(c->hash_tfm);
+       kfree(c->log_hash);
+}
+
+/**
+ * ubifs_node_calc_hmac - calculate the HMAC of a UBIFS node
+ * @c: UBIFS file-system description object
+ * @node: the node to insert a HMAC into.
+ * @len: the length of the node
+ * @ofs_hmac: the offset in the node where the HMAC is inserted
+ * @hmac: returned HMAC
+ *
+ * This function calculates a HMAC of a UBIFS node. The HMAC is expected to be
+ * embedded into the node, so this area is not covered by the HMAC. Also not
+ * covered is the UBIFS_NODE_MAGIC and the CRC of the node.
+ */
+static int ubifs_node_calc_hmac(const struct ubifs_info *c, const void *node,
+                               int len, int ofs_hmac, void *hmac)
+{
+       SHASH_DESC_ON_STACK(shash, c->hmac_tfm);
+       int hmac_len = c->hmac_desc_len;
+       int err;
+
+       ubifs_assert(c, ofs_hmac > 8);
+       ubifs_assert(c, ofs_hmac + hmac_len < len);
+
+       shash->tfm = c->hmac_tfm;
+       shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       err = crypto_shash_init(shash);
+       if (err)
+               return err;
+
+       /* behind common node header CRC up to HMAC begin */
+       err = crypto_shash_update(shash, node + 8, ofs_hmac - 8);
+       if (err < 0)
+               return err;
+
+       /* behind HMAC, if any */
+       if (len - ofs_hmac - hmac_len > 0) {
+               err = crypto_shash_update(shash, node + ofs_hmac + hmac_len,
+                           len - ofs_hmac - hmac_len);
+               if (err < 0)
+                       return err;
+       }
+
+       return crypto_shash_final(shash, hmac);
+}
+
+/**
+ * __ubifs_node_insert_hmac - insert a HMAC into a UBIFS node
+ * @c: UBIFS file-system description object
+ * @node: the node to insert a HMAC into.
+ * @len: the length of the node
+ * @ofs_hmac: the offset in the node where the HMAC is inserted
+ *
+ * This function inserts a HMAC at offset @ofs_hmac into the node given in
+ * @node.
+ *
+ * This function returns 0 for success or a negative error code otherwise.
+ */
+int __ubifs_node_insert_hmac(const struct ubifs_info *c, void *node, int len,
+                           int ofs_hmac)
+{
+       return ubifs_node_calc_hmac(c, node, len, ofs_hmac, node + ofs_hmac);
+}
+
+/**
+ * __ubifs_node_verify_hmac - verify the HMAC of UBIFS node
+ * @c: UBIFS file-system description object
+ * @node: the node to insert a HMAC into.
+ * @len: the length of the node
+ * @ofs_hmac: the offset in the node where the HMAC is inserted
+ *
+ * This function verifies the HMAC at offset @ofs_hmac of the node given in
+ * @node. Returns 0 if successful or a negative error code otherwise.
+ */
+int __ubifs_node_verify_hmac(const struct ubifs_info *c, const void *node,
+                            int len, int ofs_hmac)
+{
+       int hmac_len = c->hmac_desc_len;
+       u8 *hmac;
+       int err;
+
+       hmac = kmalloc(hmac_len, GFP_NOFS);
+       if (!hmac)
+               return -ENOMEM;
+
+       err = ubifs_node_calc_hmac(c, node, len, ofs_hmac, hmac);
+       if (err)
+               return err;
+
+       err = crypto_memneq(hmac, node + ofs_hmac, hmac_len);
+
+       kfree(hmac);
+
+       if (!err)
+               return 0;
+
+       return -EPERM;
+}
+
+int __ubifs_shash_copy_state(const struct ubifs_info *c, struct shash_desc *src,
+                            struct shash_desc *target)
+{
+       u8 *state;
+       int err;
+
+       state = kmalloc(crypto_shash_descsize(src->tfm), GFP_NOFS);
+       if (!state)
+               return -ENOMEM;
+
+       err = crypto_shash_export(src, state);
+       if (err)
+               goto out;
+
+       err = crypto_shash_import(target, state);
+
+out:
+       kfree(state);
+
+       return err;
+}
+
+/**
+ * ubifs_hmac_wkm - Create a HMAC of the well known message
+ * @c: UBIFS file-system description object
+ * @hmac: The HMAC of the well known message
+ *
+ * This function creates a HMAC of a well known message. This is used
+ * to check if the provided key is suitable to authenticate a UBIFS
+ * image. This is only a convenience to the user to provide a better
+ * error message when the wrong key is provided.
+ *
+ * This function returns 0 for success or a negative error code otherwise.
+ */
+int ubifs_hmac_wkm(struct ubifs_info *c, u8 *hmac)
+{
+       SHASH_DESC_ON_STACK(shash, c->hmac_tfm);
+       int err;
+       const char well_known_message[] = "UBIFS";
+
+       if (!ubifs_authenticated(c))
+               return 0;
+
+       shash->tfm = c->hmac_tfm;
+       shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       err = crypto_shash_init(shash);
+       if (err)
+               return err;
+
+       err = crypto_shash_update(shash, well_known_message,
+                                 sizeof(well_known_message) - 1);
+       if (err < 0)
+               return err;
+
+       err = crypto_shash_final(shash, hmac);
+       if (err)
+               return err;
+       return 0;
+}
index 564e330d05b146df6d8c848742b08f02820b14c3..c49ff50fdceb1d8f90f059099cd87d6e4334555b 100644 (file)
@@ -165,6 +165,8 @@ const char *dbg_ntype(int type)
                return "commit start node";
        case UBIFS_ORPH_NODE:
                return "orphan node";
+       case UBIFS_AUTH_NODE:
+               return "auth node";
        default:
                return "unknown node";
        }
@@ -542,6 +544,10 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
                               (unsigned long long)le64_to_cpu(orph->inos[i]));
                break;
        }
+       case UBIFS_AUTH_NODE:
+       {
+               break;
+       }
        default:
                pr_err("node type %d was not recognized\n",
                       (int)ch->node_type);
index d2680e0b4a36f38826f253d33c1b7d258b21bb2d..bf75fdc76fc357f7d8da299405fce8455a829e8d 100644 (file)
@@ -254,7 +254,8 @@ static int sort_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
                             snod->type == UBIFS_DATA_NODE ||
                             snod->type == UBIFS_DENT_NODE ||
                             snod->type == UBIFS_XENT_NODE ||
-                            snod->type == UBIFS_TRUN_NODE);
+                            snod->type == UBIFS_TRUN_NODE ||
+                            snod->type == UBIFS_AUTH_NODE);
 
                if (snod->type != UBIFS_INO_NODE  &&
                    snod->type != UBIFS_DATA_NODE &&
@@ -364,12 +365,13 @@ static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
 
        /* Write nodes to their new location. Use the first-fit strategy */
        while (1) {
-               int avail;
+               int avail, moved = 0;
                struct ubifs_scan_node *snod, *tmp;
 
                /* Move data nodes */
                list_for_each_entry_safe(snod, tmp, &sleb->nodes, list) {
-                       avail = c->leb_size - wbuf->offs - wbuf->used;
+                       avail = c->leb_size - wbuf->offs - wbuf->used -
+                                       ubifs_auth_node_sz(c);
                        if  (snod->len > avail)
                                /*
                                 * Do not skip data nodes in order to optimize
@@ -377,14 +379,21 @@ static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
                                 */
                                break;
 
+                       err = ubifs_shash_update(c, c->jheads[GCHD].log_hash,
+                                                snod->node, snod->len);
+                       if (err)
+                               goto out;
+
                        err = move_node(c, sleb, snod, wbuf);
                        if (err)
                                goto out;
+                       moved = 1;
                }
 
                /* Move non-data nodes */
                list_for_each_entry_safe(snod, tmp, &nondata, list) {
-                       avail = c->leb_size - wbuf->offs - wbuf->used;
+                       avail = c->leb_size - wbuf->offs - wbuf->used -
+                                       ubifs_auth_node_sz(c);
                        if (avail < min)
                                break;
 
@@ -402,9 +411,41 @@ static int move_nodes(struct ubifs_info *c, struct ubifs_scan_leb *sleb)
                                continue;
                        }
 
+                       err = ubifs_shash_update(c, c->jheads[GCHD].log_hash,
+                                                snod->node, snod->len);
+                       if (err)
+                               goto out;
+
                        err = move_node(c, sleb, snod, wbuf);
                        if (err)
                                goto out;
+                       moved = 1;
+               }
+
+               if (ubifs_authenticated(c) && moved) {
+                       struct ubifs_auth_node *auth;
+
+                       auth = kmalloc(ubifs_auth_node_sz(c), GFP_NOFS);
+                       if (!auth) {
+                               err = -ENOMEM;
+                               goto out;
+                       }
+
+                       err = ubifs_prepare_auth_node(c, auth,
+                                               c->jheads[GCHD].log_hash);
+                       if (err) {
+                               kfree(auth);
+                               goto out;
+                       }
+
+                       err = ubifs_wbuf_write_nolock(wbuf, auth,
+                                                     ubifs_auth_node_sz(c));
+                       if (err) {
+                               kfree(auth);
+                               goto out;
+                       }
+
+                       ubifs_add_dirt(c, wbuf->lnum, ubifs_auth_node_sz(c));
                }
 
                if (list_empty(&sleb->nodes) && list_empty(&nondata))
index 099bec94b82079f8fbd03f0fb74aee2180d1dab3..d124117efd42dc8cea8aa25c8c455012b8fbaebd 100644 (file)
@@ -365,20 +365,8 @@ static unsigned long long next_sqnum(struct ubifs_info *c)
        return sqnum;
 }
 
-/**
- * ubifs_prepare_node - prepare node to be written to flash.
- * @c: UBIFS file-system description object
- * @node: the node to pad
- * @len: node length
- * @pad: if the buffer has to be padded
- *
- * This function prepares node at @node to be written to the media - it
- * calculates node CRC, fills the common header, and adds proper padding up to
- * the next minimum I/O unit if @pad is not zero.
- */
-void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
+void ubifs_init_node(struct ubifs_info *c, void *node, int len, int pad)
 {
-       uint32_t crc;
        struct ubifs_ch *ch = node;
        unsigned long long sqnum = next_sqnum(c);
 
@@ -389,8 +377,6 @@ void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
        ch->group_type = UBIFS_NO_NODE_GROUP;
        ch->sqnum = cpu_to_le64(sqnum);
        ch->padding[0] = ch->padding[1] = 0;
-       crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
-       ch->crc = cpu_to_le32(crc);
 
        if (pad) {
                len = ALIGN(len, 8);
@@ -399,6 +385,68 @@ void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
        }
 }
 
+void ubifs_crc_node(struct ubifs_info *c, void *node, int len)
+{
+       struct ubifs_ch *ch = node;
+       uint32_t crc;
+
+       crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8);
+       ch->crc = cpu_to_le32(crc);
+}
+
+/**
+ * ubifs_prepare_node_hmac - prepare node to be written to flash.
+ * @c: UBIFS file-system description object
+ * @node: the node to pad
+ * @len: node length
+ * @hmac_offs: offset of the HMAC in the node
+ * @pad: if the buffer has to be padded
+ *
+ * This function prepares node at @node to be written to the media - it
+ * calculates node CRC, fills the common header, and adds proper padding up to
+ * the next minimum I/O unit if @pad is not zero. if @hmac_offs is positive then
+ * a HMAC is inserted into the node at the given offset.
+ *
+ * This function returns 0 for success or a negative error code otherwise.
+ */
+int ubifs_prepare_node_hmac(struct ubifs_info *c, void *node, int len,
+                           int hmac_offs, int pad)
+{
+       int err;
+
+       ubifs_init_node(c, node, len, pad);
+
+       if (hmac_offs > 0) {
+               err = ubifs_node_insert_hmac(c, node, len, hmac_offs);
+               if (err)
+                       return err;
+       }
+
+       ubifs_crc_node(c, node, len);
+
+       return 0;
+}
+
+/**
+ * ubifs_prepare_node - prepare node to be written to flash.
+ * @c: UBIFS file-system description object
+ * @node: the node to pad
+ * @len: node length
+ * @pad: if the buffer has to be padded
+ *
+ * This function prepares node at @node to be written to the media - it
+ * calculates node CRC, fills the common header, and adds proper padding up to
+ * the next minimum I/O unit if @pad is not zero.
+ */
+void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad)
+{
+       /*
+        * Deliberately ignore return value since this function can only fail
+        * when a hmac offset is given.
+        */
+       ubifs_prepare_node_hmac(c, node, len, 0, pad);
+}
+
 /**
  * ubifs_prep_grp_node - prepare node of a group to be written to flash.
  * @c: UBIFS file-system description object
@@ -849,12 +897,13 @@ out:
 }
 
 /**
- * ubifs_write_node - write node to the media.
+ * ubifs_write_node_hmac - write node to the media.
  * @c: UBIFS file-system description object
  * @buf: the node to write
  * @len: node length
  * @lnum: logical eraseblock number
  * @offs: offset within the logical eraseblock
+ * @hmac_offs: offset of the HMAC within the node
  *
  * This function automatically fills node magic number, assigns sequence
  * number, and calculates node CRC checksum. The length of the @buf buffer has
@@ -862,8 +911,8 @@ out:
  * appends padding node and padding bytes if needed. Returns zero in case of
  * success and a negative error code in case of failure.
  */
-int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
-                    int offs)
+int ubifs_write_node_hmac(struct ubifs_info *c, void *buf, int len, int lnum,
+                         int offs, int hmac_offs)
 {
        int err, buf_len = ALIGN(len, c->min_io_size);
 
@@ -878,7 +927,10 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
        if (c->ro_error)
                return -EROFS;
 
-       ubifs_prepare_node(c, buf, len, 1);
+       err = ubifs_prepare_node_hmac(c, buf, len, hmac_offs, 1);
+       if (err)
+               return err;
+
        err = ubifs_leb_write(c, lnum, buf, offs, buf_len);
        if (err)
                ubifs_dump_node(c, buf);
@@ -886,6 +938,26 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
        return err;
 }
 
+/**
+ * ubifs_write_node - write node to the media.
+ * @c: UBIFS file-system description object
+ * @buf: the node to write
+ * @len: node length
+ * @lnum: logical eraseblock number
+ * @offs: offset within the logical eraseblock
+ *
+ * This function automatically fills node magic number, assigns sequence
+ * number, and calculates node CRC checksum. The length of the @buf buffer has
+ * to be aligned to the minimal I/O unit size. This function automatically
+ * appends padding node and padding bytes if needed. Returns zero in case of
+ * success and a negative error code in case of failure.
+ */
+int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
+                    int offs)
+{
+       return ubifs_write_node_hmac(c, buf, len, lnum, offs, -1);
+}
+
 /**
  * ubifs_read_node_wbuf - read node from the media or write-buffer.
  * @wbuf: wbuf to check for un-written data
index 802565a17733ce4b0e304df7daffda9517252bca..729dc76c83dffb850354521f96f7cdfabf860051 100644 (file)
@@ -90,6 +90,12 @@ static inline void zero_trun_node_unused(struct ubifs_trun_node *trun)
        memset(trun->padding, 0, 12);
 }
 
+static void ubifs_add_auth_dirt(struct ubifs_info *c, int lnum)
+{
+       if (ubifs_authenticated(c))
+               ubifs_add_dirt(c, lnum, ubifs_auth_node_sz(c));
+}
+
 /**
  * reserve_space - reserve space in the journal.
  * @c: UBIFS file-system description object
@@ -228,34 +234,33 @@ out_return:
        return err;
 }
 
-/**
- * write_node - write node to a journal head.
- * @c: UBIFS file-system description object
- * @jhead: journal head
- * @node: node to write
- * @len: node length
- * @lnum: LEB number written is returned here
- * @offs: offset written is returned here
- *
- * This function writes a node to reserved space of journal head @jhead.
- * Returns zero in case of success and a negative error code in case of
- * failure.
- */
-static int write_node(struct ubifs_info *c, int jhead, void *node, int len,
-                     int *lnum, int *offs)
+static int ubifs_hash_nodes(struct ubifs_info *c, void *node,
+                            int len, struct shash_desc *hash)
 {
-       struct ubifs_wbuf *wbuf = &c->jheads[jhead].wbuf;
+       int auth_node_size = ubifs_auth_node_sz(c);
+       int err;
 
-       ubifs_assert(c, jhead != GCHD);
+       while (1) {
+               const struct ubifs_ch *ch = node;
+               int nodelen = le32_to_cpu(ch->len);
 
-       *lnum = c->jheads[jhead].wbuf.lnum;
-       *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
+               ubifs_assert(c, len >= auth_node_size);
 
-       dbg_jnl("jhead %s, LEB %d:%d, len %d",
-               dbg_jhead(jhead), *lnum, *offs, len);
-       ubifs_prepare_node(c, node, len, 0);
+               if (len == auth_node_size)
+                       break;
+
+               ubifs_assert(c, len > nodelen);
+               ubifs_assert(c, ch->magic == cpu_to_le32(UBIFS_NODE_MAGIC));
 
-       return ubifs_wbuf_write_nolock(wbuf, node, len);
+               err = ubifs_shash_update(c, hash, (void *)node, nodelen);
+               if (err)
+                       return err;
+
+               node += ALIGN(nodelen, 8);
+               len -= ALIGN(nodelen, 8);
+       }
+
+       return ubifs_prepare_auth_node(c, node, hash);
 }
 
 /**
@@ -268,9 +273,9 @@ static int write_node(struct ubifs_info *c, int jhead, void *node, int len,
  * @offs: offset written is returned here
  * @sync: non-zero if the write-buffer has to by synchronized
  *
- * This function is the same as 'write_node()' but it does not assume the
- * buffer it is writing is a node, so it does not prepare it (which means
- * initializing common header and calculating CRC).
+ * This function writes data to the reserved space of journal head @jhead.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
  */
 static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
                      int *lnum, int *offs, int sync)
@@ -285,6 +290,12 @@ static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
        dbg_jnl("jhead %s, LEB %d:%d, len %d",
                dbg_jhead(jhead), *lnum, *offs, len);
 
+       if (ubifs_authenticated(c)) {
+               err = ubifs_hash_nodes(c, buf, len, c->jheads[jhead].log_hash);
+               if (err)
+                       return err;
+       }
+
        err = ubifs_wbuf_write_nolock(wbuf, buf, len);
        if (err)
                return err;
@@ -548,6 +559,9 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
        struct ubifs_dent_node *dent;
        struct ubifs_ino_node *ino;
        union ubifs_key dent_key, ino_key;
+       u8 hash_dent[UBIFS_HASH_ARR_SZ];
+       u8 hash_ino[UBIFS_HASH_ARR_SZ];
+       u8 hash_ino_host[UBIFS_HASH_ARR_SZ];
 
        ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
 
@@ -570,7 +584,10 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
 
        len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
        /* Make sure to also account for extended attributes */
-       len += host_ui->data_len;
+       if (ubifs_authenticated(c))
+               len += ALIGN(host_ui->data_len, 8) + ubifs_auth_node_sz(c);
+       else
+               len += host_ui->data_len;
 
        dent = kzalloc(len, GFP_NOFS);
        if (!dent)
@@ -602,11 +619,21 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
 
        zero_dent_node_unused(dent);
        ubifs_prep_grp_node(c, dent, dlen, 0);
+       err = ubifs_node_calc_hash(c, dent, hash_dent);
+       if (err)
+               goto out_release;
 
        ino = (void *)dent + aligned_dlen;
        pack_inode(c, ino, inode, 0);
+       err = ubifs_node_calc_hash(c, ino, hash_ino);
+       if (err)
+               goto out_release;
+
        ino = (void *)ino + aligned_ilen;
        pack_inode(c, ino, dir, 1);
+       err = ubifs_node_calc_hash(c, ino, hash_ino_host);
+       if (err)
+               goto out_release;
 
        if (last_reference) {
                err = ubifs_add_orphan(c, inode->i_ino);
@@ -628,6 +655,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
        }
        release_head(c, BASEHD);
        kfree(dent);
+       ubifs_add_auth_dirt(c, lnum);
 
        if (deletion) {
                if (nm->hash)
@@ -638,7 +666,8 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
                        goto out_ro;
                err = ubifs_add_dirt(c, lnum, dlen);
        } else
-               err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen, nm);
+               err = ubifs_tnc_add_nm(c, &dent_key, lnum, dent_offs, dlen,
+                                      hash_dent, nm);
        if (err)
                goto out_ro;
 
@@ -650,14 +679,14 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
         */
        ino_key_init(c, &ino_key, inode->i_ino);
        ino_offs = dent_offs + aligned_dlen;
-       err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen);
+       err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, ilen, hash_ino);
        if (err)
                goto out_ro;
 
        ino_key_init(c, &ino_key, dir->i_ino);
        ino_offs += aligned_ilen;
        err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs,
-                           UBIFS_INO_NODE_SZ + host_ui->data_len);
+                           UBIFS_INO_NODE_SZ + host_ui->data_len, hash_ino_host);
        if (err)
                goto out_ro;
 
@@ -706,10 +735,12 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
                         const union ubifs_key *key, const void *buf, int len)
 {
        struct ubifs_data_node *data;
-       int err, lnum, offs, compr_type, out_len, compr_len;
+       int err, lnum, offs, compr_type, out_len, compr_len, auth_len;
        int dlen = COMPRESSED_DATA_NODE_BUF_SZ, allocated = 1;
+       int write_len;
        struct ubifs_inode *ui = ubifs_inode(inode);
        bool encrypted = ubifs_crypt_is_encrypted(inode);
+       u8 hash[UBIFS_HASH_ARR_SZ];
 
        dbg_jnlk(key, "ino %lu, blk %u, len %d, key ",
                (unsigned long)key_inum(c, key), key_block(c, key), len);
@@ -718,7 +749,9 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
        if (encrypted)
                dlen += UBIFS_CIPHER_BLOCK_SIZE;
 
-       data = kmalloc(dlen, GFP_NOFS | __GFP_NOWARN);
+       auth_len = ubifs_auth_node_sz(c);
+
+       data = kmalloc(dlen + auth_len, GFP_NOFS | __GFP_NOWARN);
        if (!data) {
                /*
                 * Fall-back to the write reserve buffer. Note, we might be
@@ -757,20 +790,33 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
        }
 
        dlen = UBIFS_DATA_NODE_SZ + out_len;
+       if (ubifs_authenticated(c))
+               write_len = ALIGN(dlen, 8) + auth_len;
+       else
+               write_len = dlen;
+
        data->compr_type = cpu_to_le16(compr_type);
 
        /* Make reservation before allocating sequence numbers */
-       err = make_reservation(c, DATAHD, dlen);
+       err = make_reservation(c, DATAHD, write_len);
        if (err)
                goto out_free;
 
-       err = write_node(c, DATAHD, data, dlen, &lnum, &offs);
+       ubifs_prepare_node(c, data, dlen, 0);
+       err = write_head(c, DATAHD, data, write_len, &lnum, &offs, 0);
+       if (err)
+               goto out_release;
+
+       err = ubifs_node_calc_hash(c, data, hash);
        if (err)
                goto out_release;
+
        ubifs_wbuf_add_ino_nolock(&c->jheads[DATAHD].wbuf, key_inum(c, key));
        release_head(c, DATAHD);
 
-       err = ubifs_tnc_add(c, key, lnum, offs, dlen);
+       ubifs_add_auth_dirt(c, lnum);
+
+       err = ubifs_tnc_add(c, key, lnum, offs, dlen, hash);
        if (err)
                goto out_ro;
 
@@ -808,7 +854,9 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
        int err, lnum, offs;
        struct ubifs_ino_node *ino;
        struct ubifs_inode *ui = ubifs_inode(inode);
-       int sync = 0, len = UBIFS_INO_NODE_SZ, last_reference = !inode->i_nlink;
+       int sync = 0, write_len, ilen = UBIFS_INO_NODE_SZ;
+       int last_reference = !inode->i_nlink;
+       u8 hash[UBIFS_HASH_ARR_SZ];
 
        dbg_jnl("ino %lu, nlink %u", inode->i_ino, inode->i_nlink);
 
@@ -817,20 +865,30 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
         * need to synchronize the write-buffer either.
         */
        if (!last_reference) {
-               len += ui->data_len;
+               ilen += ui->data_len;
                sync = IS_SYNC(inode);
        }
-       ino = kmalloc(len, GFP_NOFS);
+
+       if (ubifs_authenticated(c))
+               write_len = ALIGN(ilen, 8) + ubifs_auth_node_sz(c);
+       else
+               write_len = ilen;
+
+       ino = kmalloc(write_len, GFP_NOFS);
        if (!ino)
                return -ENOMEM;
 
        /* Make reservation before allocating sequence numbers */
-       err = make_reservation(c, BASEHD, len);
+       err = make_reservation(c, BASEHD, write_len);
        if (err)
                goto out_free;
 
        pack_inode(c, ino, inode, 1);
-       err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
+       err = ubifs_node_calc_hash(c, ino, hash);
+       if (err)
+               goto out_release;
+
+       err = write_head(c, BASEHD, ino, write_len, &lnum, &offs, sync);
        if (err)
                goto out_release;
        if (!sync)
@@ -838,17 +896,19 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
                                          inode->i_ino);
        release_head(c, BASEHD);
 
+       ubifs_add_auth_dirt(c, lnum);
+
        if (last_reference) {
                err = ubifs_tnc_remove_ino(c, inode->i_ino);
                if (err)
                        goto out_ro;
                ubifs_delete_orphan(c, inode->i_ino);
-               err = ubifs_add_dirt(c, lnum, len);
+               err = ubifs_add_dirt(c, lnum, ilen);
        } else {
                union ubifs_key key;
 
                ino_key_init(c, &key, inode->i_ino);
-               err = ubifs_tnc_add(c, &key, lnum, offs, len);
+               err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash);
        }
        if (err)
                goto out_ro;
@@ -958,6 +1018,10 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
        int aligned_dlen1, aligned_dlen2;
        int twoparents = (fst_dir != snd_dir);
        void *p;
+       u8 hash_dent1[UBIFS_HASH_ARR_SZ];
+       u8 hash_dent2[UBIFS_HASH_ARR_SZ];
+       u8 hash_p1[UBIFS_HASH_ARR_SZ];
+       u8 hash_p2[UBIFS_HASH_ARR_SZ];
 
        ubifs_assert(c, ubifs_inode(fst_dir)->data_len == 0);
        ubifs_assert(c, ubifs_inode(snd_dir)->data_len == 0);
@@ -973,6 +1037,8 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
        if (twoparents)
                len += plen;
 
+       len += ubifs_auth_node_sz(c);
+
        dent1 = kzalloc(len, GFP_NOFS);
        if (!dent1)
                return -ENOMEM;
@@ -993,6 +1059,9 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
        set_dent_cookie(c, dent1);
        zero_dent_node_unused(dent1);
        ubifs_prep_grp_node(c, dent1, dlen1, 0);
+       err = ubifs_node_calc_hash(c, dent1, hash_dent1);
+       if (err)
+               goto out_release;
 
        /* Make new dent for 2nd entry */
        dent2 = (void *)dent1 + aligned_dlen1;
@@ -1006,14 +1075,26 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
        set_dent_cookie(c, dent2);
        zero_dent_node_unused(dent2);
        ubifs_prep_grp_node(c, dent2, dlen2, 0);
+       err = ubifs_node_calc_hash(c, dent2, hash_dent2);
+       if (err)
+               goto out_release;
 
        p = (void *)dent2 + aligned_dlen2;
-       if (!twoparents)
+       if (!twoparents) {
                pack_inode(c, p, fst_dir, 1);
-       else {
+               err = ubifs_node_calc_hash(c, p, hash_p1);
+               if (err)
+                       goto out_release;
+       } else {
                pack_inode(c, p, fst_dir, 0);
+               err = ubifs_node_calc_hash(c, p, hash_p1);
+               if (err)
+                       goto out_release;
                p += ALIGN(plen, 8);
                pack_inode(c, p, snd_dir, 1);
+               err = ubifs_node_calc_hash(c, p, hash_p2);
+               if (err)
+                       goto out_release;
        }
 
        err = write_head(c, BASEHD, dent1, len, &lnum, &offs, sync);
@@ -1027,28 +1108,30 @@ int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
        }
        release_head(c, BASEHD);
 
+       ubifs_add_auth_dirt(c, lnum);
+
        dent_key_init(c, &key, snd_dir->i_ino, snd_nm);
-       err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, snd_nm);
+       err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, snd_nm);
        if (err)
                goto out_ro;
 
        offs += aligned_dlen1;
        dent_key_init(c, &key, fst_dir->i_ino, fst_nm);
-       err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, fst_nm);
+       err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, fst_nm);
        if (err)
                goto out_ro;
 
        offs += aligned_dlen2;
 
        ino_key_init(c, &key, fst_dir->i_ino);
-       err = ubifs_tnc_add(c, &key, lnum, offs, plen);
+       err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p1);
        if (err)
                goto out_ro;
 
        if (twoparents) {
                offs += ALIGN(plen, 8);
                ino_key_init(c, &key, snd_dir->i_ino);
-               err = ubifs_tnc_add(c, &key, lnum, offs, plen);
+               err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_p2);
                if (err)
                        goto out_ro;
        }
@@ -1101,6 +1184,11 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
        int last_reference = !!(new_inode && new_inode->i_nlink == 0);
        int move = (old_dir != new_dir);
        struct ubifs_inode *uninitialized_var(new_ui);
+       u8 hash_old_dir[UBIFS_HASH_ARR_SZ];
+       u8 hash_new_dir[UBIFS_HASH_ARR_SZ];
+       u8 hash_new_inode[UBIFS_HASH_ARR_SZ];
+       u8 hash_dent1[UBIFS_HASH_ARR_SZ];
+       u8 hash_dent2[UBIFS_HASH_ARR_SZ];
 
        ubifs_assert(c, ubifs_inode(old_dir)->data_len == 0);
        ubifs_assert(c, ubifs_inode(new_dir)->data_len == 0);
@@ -1123,6 +1211,9 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
        len = aligned_dlen1 + aligned_dlen2 + ALIGN(ilen, 8) + ALIGN(plen, 8);
        if (move)
                len += plen;
+
+       len += ubifs_auth_node_sz(c);
+
        dent = kzalloc(len, GFP_NOFS);
        if (!dent)
                return -ENOMEM;
@@ -1143,6 +1234,9 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
        set_dent_cookie(c, dent);
        zero_dent_node_unused(dent);
        ubifs_prep_grp_node(c, dent, dlen1, 0);
+       err = ubifs_node_calc_hash(c, dent, hash_dent1);
+       if (err)
+               goto out_release;
 
        dent2 = (void *)dent + aligned_dlen1;
        dent2->ch.node_type = UBIFS_DENT_NODE;
@@ -1162,19 +1256,36 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
        set_dent_cookie(c, dent2);
        zero_dent_node_unused(dent2);
        ubifs_prep_grp_node(c, dent2, dlen2, 0);
+       err = ubifs_node_calc_hash(c, dent2, hash_dent2);
+       if (err)
+               goto out_release;
 
        p = (void *)dent2 + aligned_dlen2;
        if (new_inode) {
                pack_inode(c, p, new_inode, 0);
+               err = ubifs_node_calc_hash(c, p, hash_new_inode);
+               if (err)
+                       goto out_release;
+
                p += ALIGN(ilen, 8);
        }
 
-       if (!move)
+       if (!move) {
                pack_inode(c, p, old_dir, 1);
-       else {
+               err = ubifs_node_calc_hash(c, p, hash_old_dir);
+               if (err)
+                       goto out_release;
+       } else {
                pack_inode(c, p, old_dir, 0);
+               err = ubifs_node_calc_hash(c, p, hash_old_dir);
+               if (err)
+                       goto out_release;
+
                p += ALIGN(plen, 8);
                pack_inode(c, p, new_dir, 1);
+               err = ubifs_node_calc_hash(c, p, hash_new_dir);
+               if (err)
+                       goto out_release;
        }
 
        if (last_reference) {
@@ -1200,15 +1311,17 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
        }
        release_head(c, BASEHD);
 
+       ubifs_add_auth_dirt(c, lnum);
+
        dent_key_init(c, &key, new_dir->i_ino, new_nm);
-       err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, new_nm);
+       err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen1, hash_dent1, new_nm);
        if (err)
                goto out_ro;
 
        offs += aligned_dlen1;
        if (whiteout) {
                dent_key_init(c, &key, old_dir->i_ino, old_nm);
-               err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, old_nm);
+               err = ubifs_tnc_add_nm(c, &key, lnum, offs, dlen2, hash_dent2, old_nm);
                if (err)
                        goto out_ro;
 
@@ -1227,21 +1340,21 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir,
        offs += aligned_dlen2;
        if (new_inode) {
                ino_key_init(c, &key, new_inode->i_ino);
-               err = ubifs_tnc_add(c, &key, lnum, offs, ilen);
+               err = ubifs_tnc_add(c, &key, lnum, offs, ilen, hash_new_inode);
                if (err)
                        goto out_ro;
                offs += ALIGN(ilen, 8);
        }
 
        ino_key_init(c, &key, old_dir->i_ino);
-       err = ubifs_tnc_add(c, &key, lnum, offs, plen);
+       err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_old_dir);
        if (err)
                goto out_ro;
 
        if (move) {
                offs += ALIGN(plen, 8);
                ino_key_init(c, &key, new_dir->i_ino);
-               err = ubifs_tnc_add(c, &key, lnum, offs, plen);
+               err = ubifs_tnc_add(c, &key, lnum, offs, plen, hash_new_dir);
                if (err)
                        goto out_ro;
        }
@@ -1360,6 +1473,8 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
        struct ubifs_inode *ui = ubifs_inode(inode);
        ino_t inum = inode->i_ino;
        unsigned int blk;
+       u8 hash_ino[UBIFS_HASH_ARR_SZ];
+       u8 hash_dn[UBIFS_HASH_ARR_SZ];
 
        dbg_jnl("ino %lu, size %lld -> %lld",
                (unsigned long)inum, old_size, new_size);
@@ -1369,6 +1484,9 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
 
        sz = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ +
             UBIFS_MAX_DATA_NODE_SZ * WORST_COMPR_FACTOR;
+
+       sz += ubifs_auth_node_sz(c);
+
        ino = kmalloc(sz, GFP_NOFS);
        if (!ino)
                return -ENOMEM;
@@ -1414,16 +1532,28 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
 
        /* Must make reservation before allocating sequence numbers */
        len = UBIFS_TRUN_NODE_SZ + UBIFS_INO_NODE_SZ;
-       if (dlen)
+
+       if (ubifs_authenticated(c))
+               len += ALIGN(dlen, 8) + ubifs_auth_node_sz(c);
+       else
                len += dlen;
+
        err = make_reservation(c, BASEHD, len);
        if (err)
                goto out_free;
 
        pack_inode(c, ino, inode, 0);
+       err = ubifs_node_calc_hash(c, ino, hash_ino);
+       if (err)
+               goto out_release;
+
        ubifs_prep_grp_node(c, trun, UBIFS_TRUN_NODE_SZ, dlen ? 0 : 1);
-       if (dlen)
+       if (dlen) {
                ubifs_prep_grp_node(c, dn, dlen, 1);
+               err = ubifs_node_calc_hash(c, dn, hash_dn);
+               if (err)
+                       goto out_release;
+       }
 
        err = write_head(c, BASEHD, ino, len, &lnum, &offs, sync);
        if (err)
@@ -1432,15 +1562,17 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
                ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, inum);
        release_head(c, BASEHD);
 
+       ubifs_add_auth_dirt(c, lnum);
+
        if (dlen) {
                sz = offs + UBIFS_INO_NODE_SZ + UBIFS_TRUN_NODE_SZ;
-               err = ubifs_tnc_add(c, &key, lnum, sz, dlen);
+               err = ubifs_tnc_add(c, &key, lnum, sz, dlen, hash_dn);
                if (err)
                        goto out_ro;
        }
 
        ino_key_init(c, &key, inum);
-       err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ);
+       err = ubifs_tnc_add(c, &key, lnum, offs, UBIFS_INO_NODE_SZ, hash_ino);
        if (err)
                goto out_ro;
 
@@ -1495,12 +1627,13 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
                           const struct inode *inode,
                           const struct fscrypt_name *nm)
 {
-       int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen;
+       int err, xlen, hlen, len, lnum, xent_offs, aligned_xlen, write_len;
        struct ubifs_dent_node *xent;
        struct ubifs_ino_node *ino;
        union ubifs_key xent_key, key1, key2;
        int sync = IS_DIRSYNC(host);
        struct ubifs_inode *host_ui = ubifs_inode(host);
+       u8 hash[UBIFS_HASH_ARR_SZ];
 
        ubifs_assert(c, inode->i_nlink == 0);
        ubifs_assert(c, mutex_is_locked(&host_ui->ui_mutex));
@@ -1514,12 +1647,14 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
        hlen = host_ui->data_len + UBIFS_INO_NODE_SZ;
        len = aligned_xlen + UBIFS_INO_NODE_SZ + ALIGN(hlen, 8);
 
-       xent = kzalloc(len, GFP_NOFS);
+       write_len = len + ubifs_auth_node_sz(c);
+
+       xent = kzalloc(write_len, GFP_NOFS);
        if (!xent)
                return -ENOMEM;
 
        /* Make reservation before allocating sequence numbers */
-       err = make_reservation(c, BASEHD, len);
+       err = make_reservation(c, BASEHD, write_len);
        if (err) {
                kfree(xent);
                return err;
@@ -1540,11 +1675,16 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
        pack_inode(c, ino, inode, 0);
        ino = (void *)ino + UBIFS_INO_NODE_SZ;
        pack_inode(c, ino, host, 1);
+       err = ubifs_node_calc_hash(c, ino, hash);
+       if (err)
+               goto out_release;
 
-       err = write_head(c, BASEHD, xent, len, &lnum, &xent_offs, sync);
+       err = write_head(c, BASEHD, xent, write_len, &lnum, &xent_offs, sync);
        if (!sync && !err)
                ubifs_wbuf_add_ino_nolock(&c->jheads[BASEHD].wbuf, host->i_ino);
        release_head(c, BASEHD);
+
+       ubifs_add_auth_dirt(c, lnum);
        kfree(xent);
        if (err)
                goto out_ro;
@@ -1572,7 +1712,7 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
 
        /* And update TNC with the new host inode position */
        ino_key_init(c, &key1, host->i_ino);
-       err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen);
+       err = ubifs_tnc_add(c, &key1, lnum, xent_offs + len - hlen, hlen, hash);
        if (err)
                goto out_ro;
 
@@ -1583,6 +1723,9 @@ int ubifs_jnl_delete_xattr(struct ubifs_info *c, const struct inode *host,
        mark_inode_clean(c, host_ui);
        return 0;
 
+out_release:
+       kfree(xent);
+       release_head(c, BASEHD);
 out_ro:
        ubifs_ro_mode(c, err);
        finish_reservation(c);
@@ -1610,6 +1753,8 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
        struct ubifs_ino_node *ino;
        union ubifs_key key;
        int sync = IS_DIRSYNC(host);
+       u8 hash_host[UBIFS_HASH_ARR_SZ];
+       u8 hash[UBIFS_HASH_ARR_SZ];
 
        dbg_jnl("ino %lu, ino %lu", host->i_ino, inode->i_ino);
        ubifs_assert(c, host->i_nlink > 0);
@@ -1621,6 +1766,8 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
        aligned_len1 = ALIGN(len1, 8);
        aligned_len = aligned_len1 + ALIGN(len2, 8);
 
+       aligned_len += ubifs_auth_node_sz(c);
+
        ino = kzalloc(aligned_len, GFP_NOFS);
        if (!ino)
                return -ENOMEM;
@@ -1631,7 +1778,13 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
                goto out_free;
 
        pack_inode(c, ino, host, 0);
+       err = ubifs_node_calc_hash(c, ino, hash_host);
+       if (err)
+               goto out_release;
        pack_inode(c, (void *)ino + aligned_len1, inode, 1);
+       err = ubifs_node_calc_hash(c, (void *)ino + aligned_len1, hash);
+       if (err)
+               goto out_release;
 
        err = write_head(c, BASEHD, ino, aligned_len, &lnum, &offs, 0);
        if (!sync && !err) {
@@ -1644,13 +1797,15 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
        if (err)
                goto out_ro;
 
+       ubifs_add_auth_dirt(c, lnum);
+
        ino_key_init(c, &key, host->i_ino);
-       err = ubifs_tnc_add(c, &key, lnum, offs, len1);
+       err = ubifs_tnc_add(c, &key, lnum, offs, len1, hash_host);
        if (err)
                goto out_ro;
 
        ino_key_init(c, &key, inode->i_ino);
-       err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2);
+       err = ubifs_tnc_add(c, &key, lnum, offs + aligned_len1, len2, hash);
        if (err)
                goto out_ro;
 
@@ -1662,6 +1817,8 @@ int ubifs_jnl_change_xattr(struct ubifs_info *c, const struct inode *inode,
        kfree(ino);
        return 0;
 
+out_release:
+       release_head(c, BASEHD);
 out_ro:
        ubifs_ro_mode(c, err);
        finish_reservation(c);
index 86b0828f54991d680bb9e2db72632165673ad3e0..15fd854149bbfafa4be60109c9e9b5bccf09a598 100644 (file)
@@ -236,6 +236,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
        bud->lnum = lnum;
        bud->start = offs;
        bud->jhead = jhead;
+       bud->log_hash = NULL;
 
        ref->ch.node_type = UBIFS_REF_NODE;
        ref->lnum = cpu_to_le32(bud->lnum);
@@ -275,6 +276,14 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
        if (err)
                goto out_unlock;
 
+       err = ubifs_shash_update(c, c->log_hash, ref, UBIFS_REF_NODE_SZ);
+       if (err)
+               goto out_unlock;
+
+       err = ubifs_shash_copy_state(c, c->log_hash, c->jheads[jhead].log_hash);
+       if (err)
+               goto out_unlock;
+
        c->lhead_offs += c->ref_node_alsz;
 
        ubifs_add_bud(c, bud);
@@ -377,6 +386,14 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
        cs->cmt_no = cpu_to_le64(c->cmt_no);
        ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0);
 
+       err = ubifs_shash_init(c, c->log_hash);
+       if (err)
+               goto out;
+
+       err = ubifs_shash_update(c, c->log_hash, cs, UBIFS_CS_NODE_SZ);
+       if (err < 0)
+               goto out;
+
        /*
         * Note, we do not lock 'c->log_mutex' because this is the commit start
         * phase and we are exclusively using the log. And we do not lock
@@ -402,6 +419,12 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
 
                ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0);
                len += UBIFS_REF_NODE_SZ;
+
+               err = ubifs_shash_update(c, c->log_hash, ref,
+                                        UBIFS_REF_NODE_SZ);
+               if (err)
+                       goto out;
+               ubifs_shash_copy_state(c, c->log_hash, c->jheads[i].log_hash);
        }
 
        ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len);
@@ -516,6 +539,7 @@ int ubifs_log_post_commit(struct ubifs_info *c, int old_ltail_lnum)
                if (err)
                        return err;
                list_del(&bud->list);
+               kfree(bud->log_hash);
                kfree(bud);
        }
        mutex_lock(&c->log_mutex);
index 31393370e334826c182a2ea159df1caa435af158..d1d5e96350ddbd0ff549941655b0e4d8e089beef 100644 (file)
@@ -604,11 +604,12 @@ static int calc_pnode_num_from_parent(const struct ubifs_info *c,
  * @lpt_first: LEB number of first LPT LEB
  * @lpt_lebs: number of LEBs for LPT is passed and returned here
  * @big_lpt: use big LPT model is passed and returned here
+ * @hash: hash of the LPT is returned here
  *
  * This function returns %0 on success and a negative error code on failure.
  */
 int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
-                         int *lpt_lebs, int *big_lpt)
+                         int *lpt_lebs, int *big_lpt, u8 *hash)
 {
        int lnum, err = 0, node_sz, iopos, i, j, cnt, len, alen, row;
        int blnum, boffs, bsz, bcnt;
@@ -617,6 +618,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
        void *buf = NULL, *p;
        struct ubifs_lpt_lprops *ltab = NULL;
        int *lsave = NULL;
+       struct shash_desc *desc;
 
        err = calc_dflt_lpt_geom(c, main_lebs, big_lpt);
        if (err)
@@ -630,6 +632,10 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
        /* Needed by 'ubifs_pack_lsave()' */
        c->main_first = c->leb_cnt - *main_lebs;
 
+       desc = ubifs_hash_get_desc(c);
+       if (IS_ERR(desc))
+               return PTR_ERR(desc);
+
        lsave = kmalloc_array(c->lsave_cnt, sizeof(int), GFP_KERNEL);
        pnode = kzalloc(sizeof(struct ubifs_pnode), GFP_KERNEL);
        nnode = kzalloc(sizeof(struct ubifs_nnode), GFP_KERNEL);
@@ -677,6 +683,10 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
 
        /* Add first pnode */
        ubifs_pack_pnode(c, p, pnode);
+       err = ubifs_shash_update(c, desc, p, c->pnode_sz);
+       if (err)
+               goto out;
+
        p += c->pnode_sz;
        len = c->pnode_sz;
        pnode->num += 1;
@@ -711,6 +721,10 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
                        len = 0;
                }
                ubifs_pack_pnode(c, p, pnode);
+               err = ubifs_shash_update(c, desc, p, c->pnode_sz);
+               if (err)
+                       goto out;
+
                p += c->pnode_sz;
                len += c->pnode_sz;
                /*
@@ -830,6 +844,10 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
        if (err)
                goto out;
 
+       err = ubifs_shash_final(c, desc, hash);
+       if (err)
+               goto out;
+
        c->nhead_lnum = lnum;
        c->nhead_offs = ALIGN(len, c->min_io_size);
 
@@ -853,6 +871,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
                dbg_lp("LPT lsave is at %d:%d", c->lsave_lnum, c->lsave_offs);
 out:
        c->ltab = NULL;
+       kfree(desc);
        kfree(lsave);
        vfree(ltab);
        vfree(buf);
@@ -1439,26 +1458,25 @@ struct ubifs_pnode *ubifs_get_pnode(struct ubifs_info *c,
 }
 
 /**
- * ubifs_lpt_lookup - lookup LEB properties in the LPT.
+ * ubifs_pnode_lookup - lookup a pnode in the LPT.
  * @c: UBIFS file-system description object
- * @lnum: LEB number to lookup
+ * @i: pnode number (0 to (main_lebs - 1) / UBIFS_LPT_FANOUT)
  *
- * This function returns a pointer to the LEB properties on success or a
- * negative error code on failure.
+ * This function returns a pointer to the pnode on success or a negative
+ * error code on failure.
  */
-struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum)
+struct ubifs_pnode *ubifs_pnode_lookup(struct ubifs_info *c, int i)
 {
-       int err, i, h, iip, shft;
+       int err, h, iip, shft;
        struct ubifs_nnode *nnode;
-       struct ubifs_pnode *pnode;
 
        if (!c->nroot) {
                err = ubifs_read_nnode(c, NULL, 0);
                if (err)
                        return ERR_PTR(err);
        }
+       i <<= UBIFS_LPT_FANOUT_SHIFT;
        nnode = c->nroot;
-       i = lnum - c->main_first;
        shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT;
        for (h = 1; h < c->lpt_hght; h++) {
                iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
@@ -1468,7 +1486,24 @@ struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum)
                        return ERR_CAST(nnode);
        }
        iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
-       pnode = ubifs_get_pnode(c, nnode, iip);
+       return ubifs_get_pnode(c, nnode, iip);
+}
+
+/**
+ * ubifs_lpt_lookup - lookup LEB properties in the LPT.
+ * @c: UBIFS file-system description object
+ * @lnum: LEB number to lookup
+ *
+ * This function returns a pointer to the LEB properties on success or a
+ * negative error code on failure.
+ */
+struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum)
+{
+       int i, iip;
+       struct ubifs_pnode *pnode;
+
+       i = lnum - c->main_first;
+       pnode = ubifs_pnode_lookup(c, i >> UBIFS_LPT_FANOUT_SHIFT);
        if (IS_ERR(pnode))
                return ERR_CAST(pnode);
        iip = (i & (UBIFS_LPT_FANOUT - 1));
@@ -1619,6 +1654,131 @@ struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum)
        return &pnode->lprops[iip];
 }
 
+/**
+ * ubifs_lpt_calc_hash - Calculate hash of the LPT pnodes
+ * @c: UBIFS file-system description object
+ * @hash: the returned hash of the LPT pnodes
+ *
+ * This function iterates over the LPT pnodes and creates a hash over them.
+ * Returns 0 for success or a negative error code otherwise.
+ */
+int ubifs_lpt_calc_hash(struct ubifs_info *c, u8 *hash)
+{
+       struct ubifs_nnode *nnode, *nn;
+       struct ubifs_cnode *cnode;
+       struct shash_desc *desc;
+       int iip = 0, i;
+       int bufsiz = max_t(int, c->nnode_sz, c->pnode_sz);
+       void *buf;
+       int err;
+
+       if (!ubifs_authenticated(c))
+               return 0;
+
+       desc = ubifs_hash_get_desc(c);
+       if (IS_ERR(desc))
+               return PTR_ERR(desc);
+
+       buf = kmalloc(bufsiz, GFP_NOFS);
+       if (!buf) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       if (!c->nroot) {
+               err = ubifs_read_nnode(c, NULL, 0);
+               if (err)
+                       return err;
+       }
+
+       cnode = (struct ubifs_cnode *)c->nroot;
+
+       while (cnode) {
+               nnode = cnode->parent;
+               nn = (struct ubifs_nnode *)cnode;
+               if (cnode->level > 1) {
+                       while (iip < UBIFS_LPT_FANOUT) {
+                               if (nn->nbranch[iip].lnum == 0) {
+                                       /* Go right */
+                                       iip++;
+                                       continue;
+                               }
+
+                               nnode = ubifs_get_nnode(c, nn, iip);
+                               if (IS_ERR(nnode)) {
+                                       err = PTR_ERR(nnode);
+                                       goto out;
+                               }
+
+                               /* Go down */
+                               iip = 0;
+                               cnode = (struct ubifs_cnode *)nnode;
+                               break;
+                       }
+                       if (iip < UBIFS_LPT_FANOUT)
+                               continue;
+               } else {
+                       struct ubifs_pnode *pnode;
+
+                       for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
+                               if (nn->nbranch[i].lnum == 0)
+                                       continue;
+                               pnode = ubifs_get_pnode(c, nn, i);
+                               if (IS_ERR(pnode)) {
+                                       err = PTR_ERR(pnode);
+                                       goto out;
+                               }
+
+                               ubifs_pack_pnode(c, buf, pnode);
+                               err = ubifs_shash_update(c, desc, buf,
+                                                        c->pnode_sz);
+                               if (err)
+                                       goto out;
+                       }
+               }
+               /* Go up and to the right */
+               iip = cnode->iip + 1;
+               cnode = (struct ubifs_cnode *)nnode;
+       }
+
+       err = ubifs_shash_final(c, desc, hash);
+out:
+       kfree(desc);
+       kfree(buf);
+
+       return err;
+}
+
+/**
+ * lpt_check_hash - check the hash of the LPT.
+ * @c: UBIFS file-system description object
+ *
+ * This function calculates a hash over all pnodes in the LPT and compares it with
+ * the hash stored in the master node. Returns %0 on success and a negative error
+ * code on failure.
+ */
+static int lpt_check_hash(struct ubifs_info *c)
+{
+       int err;
+       u8 hash[UBIFS_HASH_ARR_SZ];
+
+       if (!ubifs_authenticated(c))
+               return 0;
+
+       err = ubifs_lpt_calc_hash(c, hash);
+       if (err)
+               return err;
+
+       if (ubifs_check_hash(c, c->mst_node->hash_lpt, hash)) {
+               err = -EPERM;
+               ubifs_err(c, "Failed to authenticate LPT");
+       } else {
+               err = 0;
+       }
+
+       return err;
+}
+
 /**
  * lpt_init_rd - initialize the LPT for reading.
  * @c: UBIFS file-system description object
@@ -1660,6 +1820,10 @@ static int lpt_init_rd(struct ubifs_info *c)
        if (err)
                return err;
 
+       err = lpt_check_hash(c);
+       if (err)
+               return err;
+
        dbg_lp("space_bits %d", c->space_bits);
        dbg_lp("lpt_lnum_bits %d", c->lpt_lnum_bits);
        dbg_lp("lpt_offs_bits %d", c->lpt_offs_bits);
index 7ce30994bbbac726c30a16262cbfd4411243026e..1f88caffdf2acfae85113a064c68f0e3fbefb9a5 100644 (file)
@@ -618,38 +618,6 @@ static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c,
        return ubifs_get_pnode(c, nnode, iip);
 }
 
-/**
- * pnode_lookup - lookup a pnode in the LPT.
- * @c: UBIFS file-system description object
- * @i: pnode number (0 to (main_lebs - 1) / UBIFS_LPT_FANOUT))
- *
- * This function returns a pointer to the pnode on success or a negative
- * error code on failure.
- */
-static struct ubifs_pnode *pnode_lookup(struct ubifs_info *c, int i)
-{
-       int err, h, iip, shft;
-       struct ubifs_nnode *nnode;
-
-       if (!c->nroot) {
-               err = ubifs_read_nnode(c, NULL, 0);
-               if (err)
-                       return ERR_PTR(err);
-       }
-       i <<= UBIFS_LPT_FANOUT_SHIFT;
-       nnode = c->nroot;
-       shft = c->lpt_hght * UBIFS_LPT_FANOUT_SHIFT;
-       for (h = 1; h < c->lpt_hght; h++) {
-               iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
-               shft -= UBIFS_LPT_FANOUT_SHIFT;
-               nnode = ubifs_get_nnode(c, nnode, iip);
-               if (IS_ERR(nnode))
-                       return ERR_CAST(nnode);
-       }
-       iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1));
-       return ubifs_get_pnode(c, nnode, iip);
-}
-
 /**
  * add_pnode_dirt - add dirty space to LPT LEB properties.
  * @c: UBIFS file-system description object
@@ -702,7 +670,7 @@ static int make_tree_dirty(struct ubifs_info *c)
 {
        struct ubifs_pnode *pnode;
 
-       pnode = pnode_lookup(c, 0);
+       pnode = ubifs_pnode_lookup(c, 0);
        if (IS_ERR(pnode))
                return PTR_ERR(pnode);
 
@@ -956,7 +924,7 @@ static int make_pnode_dirty(struct ubifs_info *c, int node_num, int lnum,
        struct ubifs_pnode *pnode;
        struct ubifs_nbranch *branch;
 
-       pnode = pnode_lookup(c, node_num);
+       pnode = ubifs_pnode_lookup(c, node_num);
        if (IS_ERR(pnode))
                return PTR_ERR(pnode);
        branch = &pnode->parent->nbranch[pnode->iip];
@@ -1279,6 +1247,10 @@ int ubifs_lpt_start_commit(struct ubifs_info *c)
        if (err)
                goto out;
 
+       err = ubifs_lpt_calc_hash(c, c->mst_node->hash_lpt);
+       if (err)
+               goto out;
+
        /* Copy the LPT's own lprops for end commit to write */
        memcpy(c->ltab_cmt, c->ltab,
               sizeof(struct ubifs_lpt_lprops) * c->lpt_lebs);
@@ -1558,7 +1530,7 @@ static int dbg_is_pnode_dirty(struct ubifs_info *c, int lnum, int offs)
                struct ubifs_nbranch *branch;
 
                cond_resched();
-               pnode = pnode_lookup(c, i);
+               pnode = ubifs_pnode_lookup(c, i);
                if (IS_ERR(pnode))
                        return PTR_ERR(pnode);
                branch = &pnode->parent->nbranch[pnode->iip];
@@ -1710,7 +1682,7 @@ int dbg_check_ltab(struct ubifs_info *c)
        for (i = 0; i < cnt; i++) {
                struct ubifs_pnode *pnode;
 
-               pnode = pnode_lookup(c, i);
+               pnode = ubifs_pnode_lookup(c, i);
                if (IS_ERR(pnode))
                        return PTR_ERR(pnode);
                cond_resched();
index 9df4a41bba523d6a58fb3369cd90225a1ad8617a..5ea51bbd14c7f29cb4501d6ea3a9f5863e8517d2 100644 (file)
 
 #include "ubifs.h"
 
+/**
+ * ubifs_compare_master_node - compare two UBIFS master nodes
+ * @c: UBIFS file-system description object
+ * @m1: the first node
+ * @m2: the second node
+ *
+ * This function compares two UBIFS master nodes. Returns 0 if they are equal
+ * and nonzero if not.
+ */
+int ubifs_compare_master_node(struct ubifs_info *c, void *m1, void *m2)
+{
+       int ret;
+       int behind;
+       int hmac_offs = offsetof(struct ubifs_mst_node, hmac);
+
+       /*
+        * Do not compare the common node header since the sequence number and
+        * hence the CRC are different.
+        */
+       ret = memcmp(m1 + UBIFS_CH_SZ, m2 + UBIFS_CH_SZ,
+                    hmac_offs - UBIFS_CH_SZ);
+       if (ret)
+               return ret;
+
+       /*
+        * Do not compare the embedded HMAC aswell which also must be different
+        * due to the different common node header.
+        */
+       behind = hmac_offs + UBIFS_MAX_HMAC_LEN;
+
+       if (UBIFS_MST_NODE_SZ > behind)
+               return memcmp(m1 + behind, m2 + behind, UBIFS_MST_NODE_SZ - behind);
+
+       return 0;
+}
+
 /**
  * scan_for_master - search the valid master node.
  * @c: UBIFS file-system description object
@@ -37,7 +73,7 @@ static int scan_for_master(struct ubifs_info *c)
 {
        struct ubifs_scan_leb *sleb;
        struct ubifs_scan_node *snod;
-       int lnum, offs = 0, nodes_cnt;
+       int lnum, offs = 0, nodes_cnt, err;
 
        lnum = UBIFS_MST_LNUM;
 
@@ -69,12 +105,23 @@ static int scan_for_master(struct ubifs_info *c)
                goto out_dump;
        if (snod->offs != offs)
                goto out;
-       if (memcmp((void *)c->mst_node + UBIFS_CH_SZ,
-                  (void *)snod->node + UBIFS_CH_SZ,
-                  UBIFS_MST_NODE_SZ - UBIFS_CH_SZ))
+       if (ubifs_compare_master_node(c, c->mst_node, snod->node))
                goto out;
+
        c->mst_offs = offs;
        ubifs_scan_destroy(sleb);
+
+       if (!ubifs_authenticated(c))
+               return 0;
+
+       err = ubifs_node_verify_hmac(c, c->mst_node,
+                                    sizeof(struct ubifs_mst_node),
+                                    offsetof(struct ubifs_mst_node, hmac));
+       if (err) {
+               ubifs_err(c, "Failed to verify master node HMAC");
+               return -EPERM;
+       }
+
        return 0;
 
 out:
@@ -305,6 +352,8 @@ int ubifs_read_master(struct ubifs_info *c)
        c->lst.total_dead  = le64_to_cpu(c->mst_node->total_dead);
        c->lst.total_dark  = le64_to_cpu(c->mst_node->total_dark);
 
+       ubifs_copy_hash(c, c->mst_node->hash_root_idx, c->zroot.hash);
+
        c->calc_idx_sz = c->bi.old_idx_sz;
 
        if (c->mst_node->flags & cpu_to_le32(UBIFS_MST_NO_ORPHS))
@@ -378,7 +427,9 @@ int ubifs_write_master(struct ubifs_info *c)
        c->mst_offs = offs;
        c->mst_node->highest_inum = cpu_to_le64(c->highest_inum);
 
-       err = ubifs_write_node(c, c->mst_node, len, lnum, offs);
+       ubifs_copy_hash(c, c->zroot.hash, c->mst_node->hash_root_idx);
+       err = ubifs_write_node_hmac(c, c->mst_node, len, lnum, offs,
+                                   offsetof(struct ubifs_mst_node, hmac));
        if (err)
                return err;
 
@@ -389,7 +440,8 @@ int ubifs_write_master(struct ubifs_info *c)
                if (err)
                        return err;
        }
-       err = ubifs_write_node(c, c->mst_node, len, lnum, offs);
+       err = ubifs_write_node_hmac(c, c->mst_node, len, lnum, offs,
+                                   offsetof(struct ubifs_mst_node, hmac));
 
        return err;
 }
index 21d35d7dd975ca32c6d638e448b2e1632db1aa35..6f87237fdbf43bddc1455df20edf91abfca60caa 100644 (file)
@@ -197,7 +197,8 @@ static inline int ubifs_return_leb(struct ubifs_info *c, int lnum)
  */
 static inline int ubifs_idx_node_sz(const struct ubifs_info *c, int child_cnt)
 {
-       return UBIFS_IDX_NODE_SZ + (UBIFS_BRANCH_SZ + c->key_len) * child_cnt;
+       return UBIFS_IDX_NODE_SZ + (UBIFS_BRANCH_SZ + c->key_len + c->hash_len)
+                                  * child_cnt;
 }
 
 /**
@@ -212,7 +213,7 @@ struct ubifs_branch *ubifs_idx_branch(const struct ubifs_info *c,
                                      int bnum)
 {
        return (struct ubifs_branch *)((void *)idx->branches +
-                                      (UBIFS_BRANCH_SZ + c->key_len) * bnum);
+                       (UBIFS_BRANCH_SZ + c->key_len + c->hash_len) * bnum);
 }
 
 /**
index 984e30e83c0b3eead78fdc61190026c78cd15d51..8526b7ec47077675fad73e1a74f48adeb250d212 100644 (file)
@@ -212,7 +212,10 @@ static int write_rcvrd_mst_node(struct ubifs_info *c,
        save_flags = mst->flags;
        mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY);
 
-       ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1);
+       err = ubifs_prepare_node_hmac(c, mst, UBIFS_MST_NODE_SZ,
+                                     offsetof(struct ubifs_mst_node, hmac), 1);
+       if (err)
+               goto out;
        err = ubifs_leb_change(c, lnum, mst, sz);
        if (err)
                goto out;
@@ -264,9 +267,7 @@ int ubifs_recover_master_node(struct ubifs_info *c)
                        offs2 = (void *)mst2 - buf2;
                        if (offs1 == offs2) {
                                /* Same offset, so must be the same */
-                               if (memcmp((void *)mst1 + UBIFS_CH_SZ,
-                                          (void *)mst2 + UBIFS_CH_SZ,
-                                          UBIFS_MST_NODE_SZ - UBIFS_CH_SZ))
+                               if (ubifs_compare_master_node(c, mst1, mst2))
                                        goto out_err;
                                mst = mst1;
                        } else if (offs2 + sz == offs1) {
@@ -1461,16 +1462,82 @@ out:
        return err;
 }
 
+/**
+ * inode_fix_size - fix inode size
+ * @c: UBIFS file-system description object
+ * @e: inode size information for recovery
+ */
+static int inode_fix_size(struct ubifs_info *c, struct size_entry *e)
+{
+       struct inode *inode;
+       struct ubifs_inode *ui;
+       int err;
+
+       if (c->ro_mount)
+               ubifs_assert(c, !e->inode);
+
+       if (e->inode) {
+               /* Remounting rw, pick up inode we stored earlier */
+               inode = e->inode;
+       } else {
+               inode = ubifs_iget(c->vfs_sb, e->inum);
+               if (IS_ERR(inode))
+                       return PTR_ERR(inode);
+
+               if (inode->i_size >= e->d_size) {
+                       /*
+                        * The original inode in the index already has a size
+                        * big enough, nothing to do
+                        */
+                       iput(inode);
+                       return 0;
+               }
+
+               dbg_rcvry("ino %lu size %lld -> %lld",
+                         (unsigned long)e->inum,
+                         inode->i_size, e->d_size);
+
+               ui = ubifs_inode(inode);
+
+               inode->i_size = e->d_size;
+               ui->ui_size = e->d_size;
+               ui->synced_i_size = e->d_size;
+
+               e->inode = inode;
+       }
+
+       /*
+        * In readonly mode just keep the inode pinned in memory until we go
+        * readwrite. In readwrite mode write the inode to the journal with the
+        * fixed size.
+        */
+       if (c->ro_mount)
+               return 0;
+
+       err = ubifs_jnl_write_inode(c, inode);
+
+       iput(inode);
+
+       if (err)
+               return err;
+
+       rb_erase(&e->rb, &c->size_tree);
+       kfree(e);
+
+       return 0;
+}
+
 /**
  * ubifs_recover_size - recover inode size.
  * @c: UBIFS file-system description object
+ * @in_place: If true, do a in-place size fixup
  *
  * This function attempts to fix inode size discrepancies identified by the
  * 'ubifs_recover_size_accum()' function.
  *
  * This functions returns %0 on success and a negative error code on failure.
  */
-int ubifs_recover_size(struct ubifs_info *c)
+int ubifs_recover_size(struct ubifs_info *c, bool in_place)
 {
        struct rb_node *this = rb_first(&c->size_tree);
 
@@ -1479,6 +1546,9 @@ int ubifs_recover_size(struct ubifs_info *c)
                int err;
 
                e = rb_entry(this, struct size_entry, rb);
+
+               this = rb_next(this);
+
                if (!e->exists) {
                        union ubifs_key key;
 
@@ -1502,40 +1572,26 @@ int ubifs_recover_size(struct ubifs_info *c)
                }
 
                if (e->exists && e->i_size < e->d_size) {
-                       if (c->ro_mount) {
-                               /* Fix the inode size and pin it in memory */
-                               struct inode *inode;
-                               struct ubifs_inode *ui;
-
-                               ubifs_assert(c, !e->inode);
-
-                               inode = ubifs_iget(c->vfs_sb, e->inum);
-                               if (IS_ERR(inode))
-                                       return PTR_ERR(inode);
-
-                               ui = ubifs_inode(inode);
-                               if (inode->i_size < e->d_size) {
-                                       dbg_rcvry("ino %lu size %lld -> %lld",
-                                                 (unsigned long)e->inum,
-                                                 inode->i_size, e->d_size);
-                                       inode->i_size = e->d_size;
-                                       ui->ui_size = e->d_size;
-                                       ui->synced_i_size = e->d_size;
-                                       e->inode = inode;
-                                       this = rb_next(this);
-                                       continue;
-                               }
-                               iput(inode);
-                       } else {
-                               /* Fix the size in place */
+                       ubifs_assert(c, !(c->ro_mount && in_place));
+
+                       /*
+                        * We found data that is outside the found inode size,
+                        * fixup the inode size
+                        */
+
+                       if (in_place) {
                                err = fix_size_in_place(c, e);
                                if (err)
                                        return err;
                                iput(e->inode);
+                       } else {
+                               err = inode_fix_size(c, e);
+                               if (err)
+                                       return err;
+                               continue;
                        }
                }
 
-               this = rb_next(this);
                rb_erase(&e->rb, &c->size_tree);
                kfree(e);
        }
index 4844538eb92626212394839e4734111866fbc085..75f961c4c0449505aaac3f085aa80b00acb0b8ab 100644 (file)
@@ -34,6 +34,8 @@
 
 #include "ubifs.h"
 #include <linux/list_sort.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
 
 /**
  * struct replay_entry - replay list entry.
@@ -56,6 +58,7 @@ struct replay_entry {
        int lnum;
        int offs;
        int len;
+       u8 hash[UBIFS_HASH_ARR_SZ];
        unsigned int deletion:1;
        unsigned long long sqnum;
        struct list_head list;
@@ -228,7 +231,7 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
                        err = ubifs_tnc_remove_nm(c, &r->key, &r->nm);
                else
                        err = ubifs_tnc_add_nm(c, &r->key, r->lnum, r->offs,
-                                              r->len, &r->nm);
+                                              r->len, r->hash, &r->nm);
        } else {
                if (r->deletion)
                        switch (key_type(c, &r->key)) {
@@ -248,7 +251,7 @@ static int apply_replay_entry(struct ubifs_info *c, struct replay_entry *r)
                        }
                else
                        err = ubifs_tnc_add(c, &r->key, r->lnum, r->offs,
-                                           r->len);
+                                           r->len, r->hash);
                if (err)
                        return err;
 
@@ -352,9 +355,9 @@ static void destroy_replay_list(struct ubifs_info *c)
  * in case of success and a negative error code in case of failure.
  */
 static int insert_node(struct ubifs_info *c, int lnum, int offs, int len,
-                      union ubifs_key *key, unsigned long long sqnum,
-                      int deletion, int *used, loff_t old_size,
-                      loff_t new_size)
+                      const u8 *hash, union ubifs_key *key,
+                      unsigned long long sqnum, int deletion, int *used,
+                      loff_t old_size, loff_t new_size)
 {
        struct replay_entry *r;
 
@@ -372,6 +375,7 @@ static int insert_node(struct ubifs_info *c, int lnum, int offs, int len,
        r->lnum = lnum;
        r->offs = offs;
        r->len = len;
+       ubifs_copy_hash(c, hash, r->hash);
        r->deletion = !!deletion;
        r->sqnum = sqnum;
        key_copy(c, key, &r->key);
@@ -400,8 +404,9 @@ static int insert_node(struct ubifs_info *c, int lnum, int offs, int len,
  * negative error code in case of failure.
  */
 static int insert_dent(struct ubifs_info *c, int lnum, int offs, int len,
-                      union ubifs_key *key, const char *name, int nlen,
-                      unsigned long long sqnum, int deletion, int *used)
+                      const u8 *hash, union ubifs_key *key,
+                      const char *name, int nlen, unsigned long long sqnum,
+                      int deletion, int *used)
 {
        struct replay_entry *r;
        char *nbuf;
@@ -425,6 +430,7 @@ static int insert_dent(struct ubifs_info *c, int lnum, int offs, int len,
        r->lnum = lnum;
        r->offs = offs;
        r->len = len;
+       ubifs_copy_hash(c, hash, r->hash);
        r->deletion = !!deletion;
        r->sqnum = sqnum;
        key_copy(c, key, &r->key);
@@ -527,6 +533,105 @@ static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud)
        return data == 0xFFFFFFFF;
 }
 
+/**
+ * authenticate_sleb - authenticate one scan LEB
+ * @c: UBIFS file-system description object
+ * @sleb: the scan LEB to authenticate
+ * @log_hash:
+ * @is_last: if true, this is is the last LEB
+ *
+ * This function iterates over the buds of a single LEB authenticating all buds
+ * with the authentication nodes on this LEB. Authentication nodes are written
+ * after some buds and contain a HMAC covering the authentication node itself
+ * and the buds between the last authentication node and the current
+ * authentication node. It can happen that the last buds cannot be authenticated
+ * because a powercut happened when some nodes were written but not the
+ * corresponding authentication node. This function returns the number of nodes
+ * that could be authenticated or a negative error code.
+ */
+static int authenticate_sleb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
+                            struct shash_desc *log_hash, int is_last)
+{
+       int n_not_auth = 0;
+       struct ubifs_scan_node *snod;
+       int n_nodes = 0;
+       int err;
+       u8 *hash, *hmac;
+
+       if (!ubifs_authenticated(c))
+               return sleb->nodes_cnt;
+
+       hash = kmalloc(crypto_shash_descsize(c->hash_tfm), GFP_NOFS);
+       hmac = kmalloc(c->hmac_desc_len, GFP_NOFS);
+       if (!hash || !hmac) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       list_for_each_entry(snod, &sleb->nodes, list) {
+
+               n_nodes++;
+
+               if (snod->type == UBIFS_AUTH_NODE) {
+                       struct ubifs_auth_node *auth = snod->node;
+                       SHASH_DESC_ON_STACK(hash_desc, c->hash_tfm);
+                       SHASH_DESC_ON_STACK(hmac_desc, c->hmac_tfm);
+
+                       hash_desc->tfm = c->hash_tfm;
+                       hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+                       ubifs_shash_copy_state(c, log_hash, hash_desc);
+                       err = crypto_shash_final(hash_desc, hash);
+                       if (err)
+                               goto out;
+
+                       hmac_desc->tfm = c->hmac_tfm;
+                       hmac_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+                       err = crypto_shash_digest(hmac_desc, hash, c->hash_len,
+                                                 hmac);
+                       if (err)
+                               goto out;
+
+                       err = ubifs_check_hmac(c, auth->hmac, hmac);
+                       if (err) {
+                               err = -EPERM;
+                               goto out;
+                       }
+                       n_not_auth = 0;
+               } else {
+                       err = crypto_shash_update(log_hash, snod->node,
+                                                 snod->len);
+                       if (err)
+                               goto out;
+                       n_not_auth++;
+               }
+       }
+
+       /*
+        * A powercut can happen when some nodes were written, but not yet
+        * the corresponding authentication node. This may only happen on
+        * the last bud though.
+        */
+       if (n_not_auth) {
+               if (is_last) {
+                       dbg_mnt("%d unauthenticated nodes found on LEB %d, Ignoring them",
+                               n_not_auth, sleb->lnum);
+                       err = 0;
+               } else {
+                       dbg_mnt("%d unauthenticated nodes found on non-last LEB %d",
+                               n_not_auth, sleb->lnum);
+                       err = -EPERM;
+               }
+       } else {
+               err = 0;
+       }
+out:
+       kfree(hash);
+       kfree(hmac);
+
+       return err ? err : n_nodes - n_not_auth;
+}
+
 /**
  * replay_bud - replay a bud logical eraseblock.
  * @c: UBIFS file-system description object
@@ -540,6 +645,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
 {
        int is_last = is_last_bud(c, b->bud);
        int err = 0, used = 0, lnum = b->bud->lnum, offs = b->bud->start;
+       int n_nodes, n = 0;
        struct ubifs_scan_leb *sleb;
        struct ubifs_scan_node *snod;
 
@@ -559,6 +665,15 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
        if (IS_ERR(sleb))
                return PTR_ERR(sleb);
 
+       n_nodes = authenticate_sleb(c, sleb, b->bud->log_hash, is_last);
+       if (n_nodes < 0) {
+               err = n_nodes;
+               goto out;
+       }
+
+       ubifs_shash_copy_state(c, b->bud->log_hash,
+                              c->jheads[b->bud->jhead].log_hash);
+
        /*
         * The bud does not have to start from offset zero - the beginning of
         * the 'lnum' LEB may contain previously committed data. One of the
@@ -582,6 +697,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
         */
 
        list_for_each_entry(snod, &sleb->nodes, list) {
+               u8 hash[UBIFS_HASH_ARR_SZ];
                int deletion = 0;
 
                cond_resched();
@@ -591,6 +707,8 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
                        goto out_dump;
                }
 
+               ubifs_node_calc_hash(c, snod->node, hash);
+
                if (snod->sqnum > c->max_sqnum)
                        c->max_sqnum = snod->sqnum;
 
@@ -602,7 +720,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
 
                        if (le32_to_cpu(ino->nlink) == 0)
                                deletion = 1;
-                       err = insert_node(c, lnum, snod->offs, snod->len,
+                       err = insert_node(c, lnum, snod->offs, snod->len, hash,
                                          &snod->key, snod->sqnum, deletion,
                                          &used, 0, new_size);
                        break;
@@ -614,7 +732,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
                                          key_block(c, &snod->key) *
                                          UBIFS_BLOCK_SIZE;
 
-                       err = insert_node(c, lnum, snod->offs, snod->len,
+                       err = insert_node(c, lnum, snod->offs, snod->len, hash,
                                          &snod->key, snod->sqnum, deletion,
                                          &used, 0, new_size);
                        break;
@@ -628,7 +746,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
                        if (err)
                                goto out_dump;
 
-                       err = insert_dent(c, lnum, snod->offs, snod->len,
+                       err = insert_dent(c, lnum, snod->offs, snod->len, hash,
                                          &snod->key, dent->name,
                                          le16_to_cpu(dent->nlen), snod->sqnum,
                                          !le64_to_cpu(dent->inum), &used);
@@ -654,11 +772,13 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
                         * functions which expect nodes to have keys.
                         */
                        trun_key_init(c, &key, le32_to_cpu(trun->inum));
-                       err = insert_node(c, lnum, snod->offs, snod->len,
+                       err = insert_node(c, lnum, snod->offs, snod->len, hash,
                                          &key, snod->sqnum, 1, &used,
                                          old_size, new_size);
                        break;
                }
+               case UBIFS_AUTH_NODE:
+                       break;
                default:
                        ubifs_err(c, "unexpected node type %d in bud LEB %d:%d",
                                  snod->type, lnum, snod->offs);
@@ -667,6 +787,10 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
                }
                if (err)
                        goto out;
+
+               n++;
+               if (n == n_nodes)
+                       break;
        }
 
        ubifs_assert(c, ubifs_search_bud(c, lnum));
@@ -745,6 +869,7 @@ static int add_replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead,
 {
        struct ubifs_bud *bud;
        struct bud_entry *b;
+       int err;
 
        dbg_mnt("add replay bud LEB %d:%d, head %d", lnum, offs, jhead);
 
@@ -754,13 +879,21 @@ static int add_replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead,
 
        b = kmalloc(sizeof(struct bud_entry), GFP_KERNEL);
        if (!b) {
-               kfree(bud);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto out;
        }
 
        bud->lnum = lnum;
        bud->start = offs;
        bud->jhead = jhead;
+       bud->log_hash = ubifs_hash_get_desc(c);
+       if (IS_ERR(bud->log_hash)) {
+               err = PTR_ERR(bud->log_hash);
+               goto out;
+       }
+
+       ubifs_shash_copy_state(c, c->log_hash, bud->log_hash);
+
        ubifs_add_bud(c, bud);
 
        b->bud = bud;
@@ -768,6 +901,11 @@ static int add_replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead,
        list_add_tail(&b->list, &c->replay_buds);
 
        return 0;
+out:
+       kfree(bud);
+       kfree(b);
+
+       return err;
 }
 
 /**
@@ -873,6 +1011,14 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
 
                c->cs_sqnum = le64_to_cpu(node->ch.sqnum);
                dbg_mnt("commit start sqnum %llu", c->cs_sqnum);
+
+               err = ubifs_shash_init(c, c->log_hash);
+               if (err)
+                       goto out;
+
+               err = ubifs_shash_update(c, c->log_hash, node, UBIFS_CS_NODE_SZ);
+               if (err < 0)
+                       goto out;
        }
 
        if (snod->sqnum < c->cs_sqnum) {
@@ -920,6 +1066,11 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
                        if (err)
                                goto out_dump;
 
+                       err = ubifs_shash_update(c, c->log_hash, ref,
+                                                UBIFS_REF_NODE_SZ);
+                       if (err)
+                               goto out;
+
                        err = add_replay_bud(c, le32_to_cpu(ref->lnum),
                                             le32_to_cpu(ref->offs),
                                             le32_to_cpu(ref->jhead),
index bf17f58908ff95bc63efbe48dd5ede204a28ac78..75a69dd26d6eafa609a01a2f9969c22c37512346 100644 (file)
@@ -82,10 +82,13 @@ static int create_default_filesystem(struct ubifs_info *c)
        int err, tmp, jnl_lebs, log_lebs, max_buds, main_lebs, main_first;
        int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0;
        int min_leb_cnt = UBIFS_MIN_LEB_CNT;
+       int idx_node_size;
        long long tmp64, main_bytes;
        __le64 tmp_le64;
        __le32 tmp_le32;
        struct timespec64 ts;
+       u8 hash[UBIFS_HASH_ARR_SZ];
+       u8 hash_lpt[UBIFS_HASH_ARR_SZ];
 
        /* Some functions called from here depend on the @c->key_len filed */
        c->key_len = UBIFS_SK_LEN;
@@ -147,7 +150,7 @@ static int create_default_filesystem(struct ubifs_info *c)
        c->lsave_cnt = DEFAULT_LSAVE_CNT;
        c->max_leb_cnt = c->leb_cnt;
        err = ubifs_create_dflt_lpt(c, &main_lebs, lpt_first, &lpt_lebs,
-                                   &big_lpt);
+                                   &big_lpt, hash_lpt);
        if (err)
                return err;
 
@@ -156,17 +159,35 @@ static int create_default_filesystem(struct ubifs_info *c)
 
        main_first = c->leb_cnt - main_lebs;
 
+       sup = kzalloc(ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size), GFP_KERNEL);
+       mst = kzalloc(c->mst_node_alsz, GFP_KERNEL);
+       idx_node_size = ubifs_idx_node_sz(c, 1);
+       idx = kzalloc(ALIGN(tmp, c->min_io_size), GFP_KERNEL);
+       ino = kzalloc(ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size), GFP_KERNEL);
+       cs = kzalloc(ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size), GFP_KERNEL);
+
+       if (!sup || !mst || !idx || !ino || !cs) {
+               err = -ENOMEM;
+               goto out;
+       }
+
        /* Create default superblock */
-       tmp = ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size);
-       sup = kzalloc(tmp, GFP_KERNEL);
-       if (!sup)
-               return -ENOMEM;
 
        tmp64 = (long long)max_buds * c->leb_size;
        if (big_lpt)
                sup_flags |= UBIFS_FLG_BIGLPT;
        sup_flags |= UBIFS_FLG_DOUBLE_HASH;
 
+       if (ubifs_authenticated(c)) {
+               sup_flags |= UBIFS_FLG_AUTHENTICATION;
+               sup->hash_algo = cpu_to_le16(c->auth_hash_algo);
+               err = ubifs_hmac_wkm(c, sup->hmac_wkm);
+               if (err)
+                       goto out;
+       } else {
+               sup->hash_algo = 0xffff;
+       }
+
        sup->ch.node_type  = UBIFS_SB_NODE;
        sup->key_hash      = UBIFS_KEY_HASH_R5;
        sup->flags         = cpu_to_le32(sup_flags);
@@ -197,17 +218,9 @@ static int create_default_filesystem(struct ubifs_info *c)
        sup->rp_size = cpu_to_le64(tmp64);
        sup->ro_compat_version = cpu_to_le32(UBIFS_RO_COMPAT_VERSION);
 
-       err = ubifs_write_node(c, sup, UBIFS_SB_NODE_SZ, 0, 0);
-       kfree(sup);
-       if (err)
-               return err;
-
        dbg_gen("default superblock created at LEB 0:0");
 
        /* Create default master node */
-       mst = kzalloc(c->mst_node_alsz, GFP_KERNEL);
-       if (!mst)
-               return -ENOMEM;
 
        mst->ch.node_type = UBIFS_MST_NODE;
        mst->log_lnum     = cpu_to_le32(UBIFS_LOG_LNUM);
@@ -233,6 +246,7 @@ static int create_default_filesystem(struct ubifs_info *c)
        mst->empty_lebs   = cpu_to_le32(main_lebs - 2);
        mst->idx_lebs     = cpu_to_le32(1);
        mst->leb_cnt      = cpu_to_le32(c->leb_cnt);
+       ubifs_copy_hash(c, hash_lpt, mst->hash_lpt);
 
        /* Calculate lprops statistics */
        tmp64 = main_bytes;
@@ -253,24 +267,9 @@ static int create_default_filesystem(struct ubifs_info *c)
 
        mst->total_used = cpu_to_le64(UBIFS_INO_NODE_SZ);
 
-       err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM, 0);
-       if (err) {
-               kfree(mst);
-               return err;
-       }
-       err = ubifs_write_node(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM + 1,
-                              0);
-       kfree(mst);
-       if (err)
-               return err;
-
        dbg_gen("default master node created at LEB %d:0", UBIFS_MST_LNUM);
 
        /* Create the root indexing node */
-       tmp = ubifs_idx_node_sz(c, 1);
-       idx = kzalloc(ALIGN(tmp, c->min_io_size), GFP_KERNEL);
-       if (!idx)
-               return -ENOMEM;
 
        c->key_fmt = UBIFS_SIMPLE_KEY_FMT;
        c->key_hash = key_r5_hash;
@@ -282,19 +281,11 @@ static int create_default_filesystem(struct ubifs_info *c)
        key_write_idx(c, &key, &br->key);
        br->lnum = cpu_to_le32(main_first + DEFAULT_DATA_LEB);
        br->len  = cpu_to_le32(UBIFS_INO_NODE_SZ);
-       err = ubifs_write_node(c, idx, tmp, main_first + DEFAULT_IDX_LEB, 0);
-       kfree(idx);
-       if (err)
-               return err;
 
        dbg_gen("default root indexing node created LEB %d:0",
                main_first + DEFAULT_IDX_LEB);
 
        /* Create default root inode */
-       tmp = ALIGN(UBIFS_INO_NODE_SZ, c->min_io_size);
-       ino = kzalloc(tmp, GFP_KERNEL);
-       if (!ino)
-               return -ENOMEM;
 
        ino_key_init_flash(c, &ino->key, UBIFS_ROOT_INO);
        ino->ch.node_type = UBIFS_INO_NODE;
@@ -317,12 +308,6 @@ static int create_default_filesystem(struct ubifs_info *c)
        /* Set compression enabled by default */
        ino->flags = cpu_to_le32(UBIFS_COMPR_FL);
 
-       err = ubifs_write_node(c, ino, UBIFS_INO_NODE_SZ,
-                              main_first + DEFAULT_DATA_LEB, 0);
-       kfree(ino);
-       if (err)
-               return err;
-
        dbg_gen("root inode created at LEB %d:0",
                main_first + DEFAULT_DATA_LEB);
 
@@ -331,19 +316,54 @@ static int create_default_filesystem(struct ubifs_info *c)
         * always the case during normal file-system operation. Write a fake
         * commit start node to the log.
         */
-       tmp = ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size);
-       cs = kzalloc(tmp, GFP_KERNEL);
-       if (!cs)
-               return -ENOMEM;
 
        cs->ch.node_type = UBIFS_CS_NODE;
+
+       err = ubifs_write_node_hmac(c, sup, UBIFS_SB_NODE_SZ, 0, 0,
+                                   offsetof(struct ubifs_sb_node, hmac));
+       if (err)
+               goto out;
+
+       err = ubifs_write_node(c, ino, UBIFS_INO_NODE_SZ,
+                              main_first + DEFAULT_DATA_LEB, 0);
+       if (err)
+               goto out;
+
+       ubifs_node_calc_hash(c, ino, hash);
+       ubifs_copy_hash(c, hash, ubifs_branch_hash(c, br));
+
+       err = ubifs_write_node(c, idx, idx_node_size, main_first + DEFAULT_IDX_LEB, 0);
+       if (err)
+               goto out;
+
+       ubifs_node_calc_hash(c, idx, hash);
+       ubifs_copy_hash(c, hash, mst->hash_root_idx);
+
+       err = ubifs_write_node_hmac(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM, 0,
+               offsetof(struct ubifs_mst_node, hmac));
+       if (err)
+               goto out;
+
+       err = ubifs_write_node_hmac(c, mst, UBIFS_MST_NODE_SZ, UBIFS_MST_LNUM + 1,
+                              0, offsetof(struct ubifs_mst_node, hmac));
+       if (err)
+               goto out;
+
        err = ubifs_write_node(c, cs, UBIFS_CS_NODE_SZ, UBIFS_LOG_LNUM, 0);
-       kfree(cs);
        if (err)
-               return err;
+               goto out;
 
        ubifs_msg(c, "default file-system created");
-       return 0;
+
+       err = 0;
+out:
+       kfree(sup);
+       kfree(mst);
+       kfree(idx);
+       kfree(ino);
+       kfree(cs);
+
+       return err;
 }
 
 /**
@@ -498,7 +518,7 @@ failed:
  * code. Note, the user of this function is responsible of kfree()'ing the
  * returned superblock buffer.
  */
-struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c)
+static struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c)
 {
        struct ubifs_sb_node *sup;
        int err;
@@ -517,6 +537,65 @@ struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c)
        return sup;
 }
 
+static int authenticate_sb_node(struct ubifs_info *c,
+                               const struct ubifs_sb_node *sup)
+{
+       unsigned int sup_flags = le32_to_cpu(sup->flags);
+       u8 hmac_wkm[UBIFS_HMAC_ARR_SZ];
+       int authenticated = !!(sup_flags & UBIFS_FLG_AUTHENTICATION);
+       int hash_algo;
+       int err;
+
+       if (c->authenticated && !authenticated) {
+               ubifs_err(c, "authenticated FS forced, but found FS without authentication");
+               return -EINVAL;
+       }
+
+       if (!c->authenticated && authenticated) {
+               ubifs_err(c, "authenticated FS found, but no key given");
+               return -EINVAL;
+       }
+
+       ubifs_msg(c, "Mounting in %sauthenticated mode",
+                 c->authenticated ? "" : "un");
+
+       if (!c->authenticated)
+               return 0;
+
+       if (!IS_ENABLED(CONFIG_UBIFS_FS_AUTHENTICATION))
+               return -EOPNOTSUPP;
+
+       hash_algo = le16_to_cpu(sup->hash_algo);
+       if (hash_algo >= HASH_ALGO__LAST) {
+               ubifs_err(c, "superblock uses unknown hash algo %d",
+                         hash_algo);
+               return -EINVAL;
+       }
+
+       if (strcmp(hash_algo_name[hash_algo], c->auth_hash_name)) {
+               ubifs_err(c, "This filesystem uses %s for hashing,"
+                            " but %s is specified", hash_algo_name[hash_algo],
+                            c->auth_hash_name);
+               return -EINVAL;
+       }
+
+       err = ubifs_hmac_wkm(c, hmac_wkm);
+       if (err)
+               return err;
+
+       if (ubifs_check_hmac(c, hmac_wkm, sup->hmac_wkm)) {
+               ubifs_err(c, "provided key does not fit");
+               return -ENOKEY;
+       }
+
+       err = ubifs_node_verify_hmac(c, sup, sizeof(*sup),
+                                    offsetof(struct ubifs_sb_node, hmac));
+       if (err)
+               ubifs_err(c, "Failed to authenticate superblock: %d", err);
+
+       return err;
+}
+
 /**
  * ubifs_write_sb_node - write superblock node.
  * @c: UBIFS file-system description object
@@ -527,8 +606,13 @@ struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c)
 int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup)
 {
        int len = ALIGN(UBIFS_SB_NODE_SZ, c->min_io_size);
+       int err;
+
+       err = ubifs_prepare_node_hmac(c, sup, UBIFS_SB_NODE_SZ,
+                                     offsetof(struct ubifs_sb_node, hmac), 1);
+       if (err)
+               return err;
 
-       ubifs_prepare_node(c, sup, UBIFS_SB_NODE_SZ, 1);
        return ubifs_leb_change(c, UBIFS_SB_LNUM, sup, len);
 }
 
@@ -555,6 +639,8 @@ int ubifs_read_superblock(struct ubifs_info *c)
        if (IS_ERR(sup))
                return PTR_ERR(sup);
 
+       c->sup_node = sup;
+
        c->fmt_version = le32_to_cpu(sup->fmt_version);
        c->ro_compat_version = le32_to_cpu(sup->ro_compat_version);
 
@@ -603,7 +689,7 @@ int ubifs_read_superblock(struct ubifs_info *c)
                c->key_hash = key_test_hash;
                c->key_hash_type = UBIFS_KEY_HASH_TEST;
                break;
-       };
+       }
 
        c->key_fmt = sup->key_fmt;
 
@@ -640,6 +726,10 @@ int ubifs_read_superblock(struct ubifs_info *c)
        c->double_hash = !!(sup_flags & UBIFS_FLG_DOUBLE_HASH);
        c->encrypted = !!(sup_flags & UBIFS_FLG_ENCRYPTION);
 
+       err = authenticate_sb_node(c, sup);
+       if (err)
+               goto out;
+
        if ((sup_flags & ~UBIFS_FLG_MASK) != 0) {
                ubifs_err(c, "Unknown feature flags found: %#x",
                          sup_flags & ~UBIFS_FLG_MASK);
@@ -686,7 +776,6 @@ int ubifs_read_superblock(struct ubifs_info *c)
 
        err = validate_sb(c, sup);
 out:
-       kfree(sup);
        return err;
 }
 
@@ -815,7 +904,7 @@ out:
 int ubifs_fixup_free_space(struct ubifs_info *c)
 {
        int err;
-       struct ubifs_sb_node *sup;
+       struct ubifs_sb_node *sup = c->sup_node;
 
        ubifs_assert(c, c->space_fixup);
        ubifs_assert(c, !c->ro_mount);
@@ -826,16 +915,11 @@ int ubifs_fixup_free_space(struct ubifs_info *c)
        if (err)
                return err;
 
-       sup = ubifs_read_sb_node(c);
-       if (IS_ERR(sup))
-               return PTR_ERR(sup);
-
        /* Free-space fixup is no longer required */
        c->space_fixup = 0;
        sup->flags &= cpu_to_le32(~UBIFS_FLG_SPACE_FIXUP);
 
        err = ubifs_write_sb_node(c, sup);
-       kfree(sup);
        if (err)
                return err;
 
@@ -846,7 +930,7 @@ int ubifs_fixup_free_space(struct ubifs_info *c)
 int ubifs_enable_encryption(struct ubifs_info *c)
 {
        int err;
-       struct ubifs_sb_node *sup;
+       struct ubifs_sb_node *sup = c->sup_node;
 
        if (c->encrypted)
                return 0;
@@ -859,16 +943,11 @@ int ubifs_enable_encryption(struct ubifs_info *c)
                return -EINVAL;
        }
 
-       sup = ubifs_read_sb_node(c);
-       if (IS_ERR(sup))
-               return PTR_ERR(sup);
-
        sup->flags |= cpu_to_le32(UBIFS_FLG_ENCRYPTION);
 
        err = ubifs_write_sb_node(c, sup);
        if (!err)
                c->encrypted = 1;
-       kfree(sup);
 
        return err;
 }
index fec62e9dfbe6a6c639d7f61879bf21ac84ef4a6c..1fac1133dadd291b7491527802e70782ab8544e8 100644 (file)
@@ -579,6 +579,9 @@ static int init_constants_early(struct ubifs_info *c)
        c->ranges[UBIFS_REF_NODE].len  = UBIFS_REF_NODE_SZ;
        c->ranges[UBIFS_TRUN_NODE].len = UBIFS_TRUN_NODE_SZ;
        c->ranges[UBIFS_CS_NODE].len   = UBIFS_CS_NODE_SZ;
+       c->ranges[UBIFS_AUTH_NODE].min_len = UBIFS_AUTH_NODE_SZ;
+       c->ranges[UBIFS_AUTH_NODE].max_len = UBIFS_AUTH_NODE_SZ +
+                               UBIFS_MAX_HMAC_LEN;
 
        c->ranges[UBIFS_INO_NODE].min_len  = UBIFS_INO_NODE_SZ;
        c->ranges[UBIFS_INO_NODE].max_len  = UBIFS_MAX_INO_NODE_SZ;
@@ -816,6 +819,9 @@ static int alloc_wbufs(struct ubifs_info *c)
                c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback;
                c->jheads[i].wbuf.jhead = i;
                c->jheads[i].grouped = 1;
+               c->jheads[i].log_hash = ubifs_hash_get_desc(c);
+               if (IS_ERR(c->jheads[i].log_hash))
+                       goto out;
        }
 
        /*
@@ -826,6 +832,12 @@ static int alloc_wbufs(struct ubifs_info *c)
        c->jheads[GCHD].grouped = 0;
 
        return 0;
+
+out:
+       while (i--)
+               kfree(c->jheads[i].log_hash);
+
+       return err;
 }
 
 /**
@@ -840,6 +852,7 @@ static void free_wbufs(struct ubifs_info *c)
                for (i = 0; i < c->jhead_cnt; i++) {
                        kfree(c->jheads[i].wbuf.buf);
                        kfree(c->jheads[i].wbuf.inodes);
+                       kfree(c->jheads[i].log_hash);
                }
                kfree(c->jheads);
                c->jheads = NULL;
@@ -924,6 +937,8 @@ static int check_volume_empty(struct ubifs_info *c)
  * Opt_no_chk_data_crc: do not check CRCs when reading data nodes
  * Opt_override_compr: override default compressor
  * Opt_assert: set ubifs_assert() action
+ * Opt_auth_key: The key name used for authentication
+ * Opt_auth_hash_name: The hash type used for authentication
  * Opt_err: just end of array marker
  */
 enum {
@@ -935,6 +950,8 @@ enum {
        Opt_no_chk_data_crc,
        Opt_override_compr,
        Opt_assert,
+       Opt_auth_key,
+       Opt_auth_hash_name,
        Opt_ignore,
        Opt_err,
 };
@@ -947,6 +964,8 @@ static const match_table_t tokens = {
        {Opt_chk_data_crc, "chk_data_crc"},
        {Opt_no_chk_data_crc, "no_chk_data_crc"},
        {Opt_override_compr, "compr=%s"},
+       {Opt_auth_key, "auth_key=%s"},
+       {Opt_auth_hash_name, "auth_hash_name=%s"},
        {Opt_ignore, "ubi=%s"},
        {Opt_ignore, "vol=%s"},
        {Opt_assert, "assert=%s"},
@@ -1070,6 +1089,16 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
                        kfree(act);
                        break;
                }
+               case Opt_auth_key:
+                       c->auth_key_name = kstrdup(args[0].from, GFP_KERNEL);
+                       if (!c->auth_key_name)
+                               return -ENOMEM;
+                       break;
+               case Opt_auth_hash_name:
+                       c->auth_hash_name = kstrdup(args[0].from, GFP_KERNEL);
+                       if (!c->auth_hash_name)
+                               return -ENOMEM;
+                       break;
                case Opt_ignore:
                        break;
                default:
@@ -1249,6 +1278,19 @@ static int mount_ubifs(struct ubifs_info *c)
 
        c->mounting = 1;
 
+       if (c->auth_key_name) {
+               if (IS_ENABLED(CONFIG_UBIFS_FS_AUTHENTICATION)) {
+                       err = ubifs_init_authentication(c);
+                       if (err)
+                               goto out_free;
+               } else {
+                       ubifs_err(c, "auth_key_name, but UBIFS is built without"
+                                 " authentication support");
+                       err = -EINVAL;
+                       goto out_free;
+               }
+       }
+
        err = ubifs_read_superblock(c);
        if (err)
                goto out_free;
@@ -1367,12 +1409,21 @@ static int mount_ubifs(struct ubifs_info *c)
                }
 
                if (c->need_recovery) {
-                       err = ubifs_recover_size(c);
-                       if (err)
-                               goto out_orphans;
+                       if (!ubifs_authenticated(c)) {
+                               err = ubifs_recover_size(c, true);
+                               if (err)
+                                       goto out_orphans;
+                       }
+
                        err = ubifs_rcvry_gc_commit(c);
                        if (err)
                                goto out_orphans;
+
+                       if (ubifs_authenticated(c)) {
+                               err = ubifs_recover_size(c, false);
+                               if (err)
+                                       goto out_orphans;
+                       }
                } else {
                        err = take_gc_lnum(c);
                        if (err)
@@ -1391,7 +1442,7 @@ static int mount_ubifs(struct ubifs_info *c)
                if (err)
                        goto out_orphans;
        } else if (c->need_recovery) {
-               err = ubifs_recover_size(c);
+               err = ubifs_recover_size(c, false);
                if (err)
                        goto out_orphans;
        } else {
@@ -1557,7 +1608,10 @@ static void ubifs_umount(struct ubifs_info *c)
        free_wbufs(c);
        free_orphans(c);
        ubifs_lpt_free(c, 0);
+       ubifs_exit_authentication(c);
 
+       kfree(c->auth_key_name);
+       kfree(c->auth_hash_name);
        kfree(c->cbuf);
        kfree(c->rcvrd_mst_node);
        kfree(c->mst_node);
@@ -1605,16 +1659,10 @@ static int ubifs_remount_rw(struct ubifs_info *c)
                goto out;
 
        if (c->old_leb_cnt != c->leb_cnt) {
-               struct ubifs_sb_node *sup;
+               struct ubifs_sb_node *sup = c->sup_node;
 
-               sup = ubifs_read_sb_node(c);
-               if (IS_ERR(sup)) {
-                       err = PTR_ERR(sup);
-                       goto out;
-               }
                sup->leb_cnt = cpu_to_le32(c->leb_cnt);
                err = ubifs_write_sb_node(c, sup);
-               kfree(sup);
                if (err)
                        goto out;
        }
@@ -1624,9 +1672,11 @@ static int ubifs_remount_rw(struct ubifs_info *c)
                err = ubifs_write_rcvrd_mst_node(c);
                if (err)
                        goto out;
-               err = ubifs_recover_size(c);
-               if (err)
-                       goto out;
+               if (!ubifs_authenticated(c)) {
+                       err = ubifs_recover_size(c, true);
+                       if (err)
+                               goto out;
+               }
                err = ubifs_clean_lebs(c, c->sbuf);
                if (err)
                        goto out;
@@ -1692,10 +1742,19 @@ static int ubifs_remount_rw(struct ubifs_info *c)
                        goto out;
        }
 
-       if (c->need_recovery)
+       if (c->need_recovery) {
                err = ubifs_rcvry_gc_commit(c);
-       else
+               if (err)
+                       goto out;
+
+               if (ubifs_authenticated(c)) {
+                       err = ubifs_recover_size(c, false);
+                       if (err)
+                               goto out;
+               }
+       } else {
                err = ubifs_leb_unmap(c, c->gc_lnum);
+       }
        if (err)
                goto out;
 
index bf416e5127431aae03a6211bbd89ab069bb7ceda..25572ffea1634964785aee3b05590cf439dddd6d 100644 (file)
@@ -35,7 +35,7 @@
 #include "ubifs.h"
 
 static int try_read_node(const struct ubifs_info *c, void *buf, int type,
-                        int len, int lnum, int offs);
+                        struct ubifs_zbranch *zbr);
 static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
                              struct ubifs_zbranch *zbr, void *node);
 
@@ -433,9 +433,7 @@ static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
  * @c: UBIFS file-system description object
  * @buf: buffer to read to
  * @type: node type
- * @len: node length (not aligned)
- * @lnum: LEB number of node to read
- * @offs: offset of node to read
+ * @zbr: the zbranch describing the node to read
  *
  * This function tries to read a node of known type and length, checks it and
  * stores it in @buf. This function returns %1 if a node is present and %0 if
@@ -453,8 +451,11 @@ static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
  * journal nodes may potentially be corrupted, so checking is required.
  */
 static int try_read_node(const struct ubifs_info *c, void *buf, int type,
-                        int len, int lnum, int offs)
+                        struct ubifs_zbranch *zbr)
 {
+       int len = zbr->len;
+       int lnum = zbr->lnum;
+       int offs = zbr->offs;
        int err, node_len;
        struct ubifs_ch *ch = buf;
        uint32_t crc, node_crc;
@@ -487,6 +488,12 @@ static int try_read_node(const struct ubifs_info *c, void *buf, int type,
        if (crc != node_crc)
                return 0;
 
+       err = ubifs_node_check_hash(c, buf, zbr->hash);
+       if (err) {
+               ubifs_bad_hash(c, buf, zbr->hash, lnum, offs);
+               return 0;
+       }
+
        return 1;
 }
 
@@ -507,8 +514,7 @@ static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
 
        dbg_tnck(key, "LEB %d:%d, key ", zbr->lnum, zbr->offs);
 
-       ret = try_read_node(c, node, key_type(c, key), zbr->len, zbr->lnum,
-                           zbr->offs);
+       ret = try_read_node(c, node, key_type(c, key), zbr);
        if (ret == 1) {
                union ubifs_key node_key;
                struct ubifs_dent_node *dent = node;
@@ -1713,6 +1719,12 @@ static int validate_data_node(struct ubifs_info *c, void *buf,
                goto out;
        }
 
+       err = ubifs_node_check_hash(c, buf, zbr->hash);
+       if (err) {
+               ubifs_bad_hash(c, buf, zbr->hash, zbr->lnum, zbr->offs);
+               return err;
+       }
+
        len = le32_to_cpu(ch->len);
        if (len != zbr->len) {
                ubifs_err(c, "bad node length %d, expected %d", len, zbr->len);
@@ -2260,13 +2272,14 @@ do_split:
  * @lnum: LEB number of node
  * @offs: node offset
  * @len: node length
+ * @hash: The hash over the node
  *
  * This function adds a node with key @key to TNC. The node may be new or it may
  * obsolete some existing one. Returns %0 on success or negative error code on
  * failure.
  */
 int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
-                 int offs, int len)
+                 int offs, int len, const u8 *hash)
 {
        int found, n, err = 0;
        struct ubifs_znode *znode;
@@ -2281,6 +2294,7 @@ int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
                zbr.lnum = lnum;
                zbr.offs = offs;
                zbr.len = len;
+               ubifs_copy_hash(c, hash, zbr.hash);
                key_copy(c, key, &zbr.key);
                err = tnc_insert(c, znode, &zbr, n + 1);
        } else if (found == 1) {
@@ -2291,6 +2305,7 @@ int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
                zbr->lnum = lnum;
                zbr->offs = offs;
                zbr->len = len;
+               ubifs_copy_hash(c, hash, zbr->hash);
        } else
                err = found;
        if (!err)
@@ -2392,13 +2407,14 @@ out_unlock:
  * @lnum: LEB number of node
  * @offs: node offset
  * @len: node length
+ * @hash: The hash over the node
  * @nm: node name
  *
  * This is the same as 'ubifs_tnc_add()' but it should be used with keys which
  * may have collisions, like directory entry keys.
  */
 int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
-                    int lnum, int offs, int len,
+                    int lnum, int offs, int len, const u8 *hash,
                     const struct fscrypt_name *nm)
 {
        int found, n, err = 0;
@@ -2441,6 +2457,7 @@ int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
                        zbr->lnum = lnum;
                        zbr->offs = offs;
                        zbr->len = len;
+                       ubifs_copy_hash(c, hash, zbr->hash);
                        goto out_unlock;
                }
        }
@@ -2452,6 +2469,7 @@ int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
                zbr.lnum = lnum;
                zbr.offs = offs;
                zbr.len = len;
+               ubifs_copy_hash(c, hash, zbr.hash);
                key_copy(c, key, &zbr.key);
                err = tnc_insert(c, znode, &zbr, n + 1);
                if (err)
index dba87d09b989375cea13b168bd43977cd43e60ef..dbcd2c350b65223209b95deeb657ae991e8181ab 100644 (file)
@@ -38,6 +38,7 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
                         struct ubifs_znode *znode, int lnum, int offs, int len)
 {
        struct ubifs_znode *zp;
+       u8 hash[UBIFS_HASH_ARR_SZ];
        int i, err;
 
        /* Make index node */
@@ -52,6 +53,7 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
                br->lnum = cpu_to_le32(zbr->lnum);
                br->offs = cpu_to_le32(zbr->offs);
                br->len = cpu_to_le32(zbr->len);
+               ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
                if (!zbr->lnum || !zbr->len) {
                        ubifs_err(c, "bad ref in znode");
                        ubifs_dump_znode(c, znode);
@@ -62,6 +64,7 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
                }
        }
        ubifs_prepare_node(c, idx, len, 0);
+       ubifs_node_calc_hash(c, idx, hash);
 
        znode->lnum = lnum;
        znode->offs = offs;
@@ -78,10 +81,12 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
                zbr->lnum = lnum;
                zbr->offs = offs;
                zbr->len = len;
+               ubifs_copy_hash(c, hash, zbr->hash);
        } else {
                c->zroot.lnum = lnum;
                c->zroot.offs = offs;
                c->zroot.len = len;
+               ubifs_copy_hash(c, hash, c->zroot.hash);
        }
        c->calc_idx_sz += ALIGN(len, 8);
 
@@ -647,6 +652,8 @@ static int get_znodes_to_commit(struct ubifs_info *c)
                        znode->cnext = c->cnext;
                        break;
                }
+               znode->cparent = znode->parent;
+               znode->ciip = znode->iip;
                znode->cnext = cnext;
                znode = cnext;
                cnt += 1;
@@ -840,6 +847,8 @@ static int write_index(struct ubifs_info *c)
        }
 
        while (1) {
+               u8 hash[UBIFS_HASH_ARR_SZ];
+
                cond_resched();
 
                znode = cnext;
@@ -857,6 +866,7 @@ static int write_index(struct ubifs_info *c)
                        br->lnum = cpu_to_le32(zbr->lnum);
                        br->offs = cpu_to_le32(zbr->offs);
                        br->len = cpu_to_le32(zbr->len);
+                       ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
                        if (!zbr->lnum || !zbr->len) {
                                ubifs_err(c, "bad ref in znode");
                                ubifs_dump_znode(c, znode);
@@ -868,6 +878,23 @@ static int write_index(struct ubifs_info *c)
                }
                len = ubifs_idx_node_sz(c, znode->child_cnt);
                ubifs_prepare_node(c, idx, len, 0);
+               ubifs_node_calc_hash(c, idx, hash);
+
+               mutex_lock(&c->tnc_mutex);
+
+               if (znode->cparent)
+                       ubifs_copy_hash(c, hash,
+                                       znode->cparent->zbranch[znode->ciip].hash);
+
+               if (znode->parent) {
+                       if (!ubifs_zn_obsolete(znode))
+                               ubifs_copy_hash(c, hash,
+                                       znode->parent->zbranch[znode->iip].hash);
+               } else {
+                       ubifs_copy_hash(c, hash, c->zroot.hash);
+               }
+
+               mutex_unlock(&c->tnc_mutex);
 
                /* Determine the index node position */
                if (lnum == -1) {
index d90ee01076a9ea5559dd8afb26cffc4e61a5cb81..d1815e9590071940059186923bb4dc21e8bb9cce 100644 (file)
@@ -265,9 +265,7 @@ long ubifs_destroy_tnc_subtree(const struct ubifs_info *c,
 /**
  * read_znode - read an indexing node from flash and fill znode.
  * @c: UBIFS file-system description object
- * @lnum: LEB of the indexing node to read
- * @offs: node offset
- * @len: node length
+ * @zzbr: the zbranch describing the node to read
  * @znode: znode to read to
  *
  * This function reads an indexing node from the flash media and fills znode
@@ -276,9 +274,12 @@ long ubifs_destroy_tnc_subtree(const struct ubifs_info *c,
  * is wrong with it, this function prints complaint messages and returns
  * %-EINVAL.
  */
-static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
+static int read_znode(struct ubifs_info *c, struct ubifs_zbranch *zzbr,
                      struct ubifs_znode *znode)
 {
+       int lnum = zzbr->lnum;
+       int offs = zzbr->offs;
+       int len = zzbr->len;
        int i, err, type, cmp;
        struct ubifs_idx_node *idx;
 
@@ -292,6 +293,12 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
                return err;
        }
 
+       err = ubifs_node_check_hash(c, idx, zzbr->hash);
+       if (err) {
+               ubifs_bad_hash(c, idx, zzbr->hash, lnum, offs);
+               return err;
+       }
+
        znode->child_cnt = le16_to_cpu(idx->child_cnt);
        znode->level = le16_to_cpu(idx->level);
 
@@ -308,13 +315,14 @@ static int read_znode(struct ubifs_info *c, int lnum, int offs, int len,
        }
 
        for (i = 0; i < znode->child_cnt; i++) {
-               const struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
+               struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
                struct ubifs_zbranch *zbr = &znode->zbranch[i];
 
                key_read(c, &br->key, &zbr->key);
                zbr->lnum = le32_to_cpu(br->lnum);
                zbr->offs = le32_to_cpu(br->offs);
                zbr->len  = le32_to_cpu(br->len);
+               ubifs_copy_hash(c, ubifs_branch_hash(c, br), zbr->hash);
                zbr->znode = NULL;
 
                /* Validate branch */
@@ -425,7 +433,7 @@ struct ubifs_znode *ubifs_load_znode(struct ubifs_info *c,
        if (!znode)
                return ERR_PTR(-ENOMEM);
 
-       err = read_znode(c, zbr->lnum, zbr->offs, zbr->len, znode);
+       err = read_znode(c, zbr, znode);
        if (err)
                goto out;
 
@@ -496,5 +504,11 @@ int ubifs_tnc_read_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
                return -EINVAL;
        }
 
+       err = ubifs_node_check_hash(c, node, zbr->hash);
+       if (err) {
+               ubifs_bad_hash(c, node, zbr->hash, zbr->lnum, zbr->offs);
+               return err;
+       }
+
        return 0;
 }
index e8c23c9d4f4a7c67aef3f94092038e8fa58fbfb0..8b7c1844014ffbbeb1dcc93ce4be8dc1cbc65742 100644 (file)
@@ -286,6 +286,7 @@ enum {
 #define UBIFS_IDX_NODE_SZ  sizeof(struct ubifs_idx_node)
 #define UBIFS_CS_NODE_SZ   sizeof(struct ubifs_cs_node)
 #define UBIFS_ORPH_NODE_SZ sizeof(struct ubifs_orph_node)
+#define UBIFS_AUTH_NODE_SZ sizeof(struct ubifs_auth_node)
 /* Extended attribute entry nodes are identical to directory entry nodes */
 #define UBIFS_XENT_NODE_SZ UBIFS_DENT_NODE_SZ
 /* Only this does not have to be multiple of 8 bytes */
@@ -300,6 +301,12 @@ enum {
 /* The largest UBIFS node */
 #define UBIFS_MAX_NODE_SZ UBIFS_MAX_INO_NODE_SZ
 
+/* The maxmimum size of a hash, enough for sha512 */
+#define UBIFS_MAX_HASH_LEN 64
+
+/* The maxmimum size of a hmac, enough for hmac(sha512) */
+#define UBIFS_MAX_HMAC_LEN 64
+
 /*
  * xattr name of UBIFS encryption context, we don't use a prefix
  * nor a long name to not waste space on the flash.
@@ -365,6 +372,7 @@ enum {
  * UBIFS_IDX_NODE: index node
  * UBIFS_CS_NODE: commit start node
  * UBIFS_ORPH_NODE: orphan node
+ * UBIFS_AUTH_NODE: authentication node
  * UBIFS_NODE_TYPES_CNT: count of supported node types
  *
  * Note, we index arrays by these numbers, so keep them low and contiguous.
@@ -384,6 +392,7 @@ enum {
        UBIFS_IDX_NODE,
        UBIFS_CS_NODE,
        UBIFS_ORPH_NODE,
+       UBIFS_AUTH_NODE,
        UBIFS_NODE_TYPES_CNT,
 };
 
@@ -421,15 +430,19 @@ enum {
  * UBIFS_FLG_DOUBLE_HASH: store a 32bit cookie in directory entry nodes to
  *                       support 64bit cookies for lookups by hash
  * UBIFS_FLG_ENCRYPTION: this filesystem contains encrypted files
+ * UBIFS_FLG_AUTHENTICATION: this filesystem contains hashes for authentication
  */
 enum {
        UBIFS_FLG_BIGLPT = 0x02,
        UBIFS_FLG_SPACE_FIXUP = 0x04,
        UBIFS_FLG_DOUBLE_HASH = 0x08,
        UBIFS_FLG_ENCRYPTION = 0x10,
+       UBIFS_FLG_AUTHENTICATION = 0x20,
 };
 
-#define UBIFS_FLG_MASK (UBIFS_FLG_BIGLPT|UBIFS_FLG_SPACE_FIXUP|UBIFS_FLG_DOUBLE_HASH|UBIFS_FLG_ENCRYPTION)
+#define UBIFS_FLG_MASK (UBIFS_FLG_BIGLPT | UBIFS_FLG_SPACE_FIXUP | \
+               UBIFS_FLG_DOUBLE_HASH | UBIFS_FLG_ENCRYPTION | \
+               UBIFS_FLG_AUTHENTICATION)
 
 /**
  * struct ubifs_ch - common header node.
@@ -633,6 +646,10 @@ struct ubifs_pad_node {
  * @time_gran: time granularity in nanoseconds
  * @uuid: UUID generated when the file system image was created
  * @ro_compat_version: UBIFS R/O compatibility version
+ * @hmac: HMAC to authenticate the superblock node
+ * @hmac_wkm: HMAC of a well known message (the string "UBIFS") as a convenience
+ *            to the user to check if the correct key is passed.
+ * @hash_algo: The hash algo used for this filesystem (one of enum hash_algo)
  */
 struct ubifs_sb_node {
        struct ubifs_ch ch;
@@ -660,7 +677,10 @@ struct ubifs_sb_node {
        __le32 time_gran;
        __u8 uuid[16];
        __le32 ro_compat_version;
-       __u8 padding2[3968];
+       __u8 hmac[UBIFS_MAX_HMAC_LEN];
+       __u8 hmac_wkm[UBIFS_MAX_HMAC_LEN];
+       __le16 hash_algo;
+       __u8 padding2[3838];
 } __packed;
 
 /**
@@ -695,6 +715,9 @@ struct ubifs_sb_node {
  * @empty_lebs: number of empty logical eraseblocks
  * @idx_lebs: number of indexing logical eraseblocks
  * @leb_cnt: count of LEBs used by file-system
+ * @hash_root_idx: the hash of the root index node
+ * @hash_lpt: the hash of the LPT
+ * @hmac: HMAC to authenticate the master node
  * @padding: reserved for future, zeroes
  */
 struct ubifs_mst_node {
@@ -727,7 +750,10 @@ struct ubifs_mst_node {
        __le32 empty_lebs;
        __le32 idx_lebs;
        __le32 leb_cnt;
-       __u8 padding[344];
+       __u8 hash_root_idx[UBIFS_MAX_HASH_LEN];
+       __u8 hash_lpt[UBIFS_MAX_HASH_LEN];
+       __u8 hmac[UBIFS_MAX_HMAC_LEN];
+       __u8 padding[152];
 } __packed;
 
 /**
@@ -746,12 +772,26 @@ struct ubifs_ref_node {
        __u8 padding[28];
 } __packed;
 
+/**
+ * struct ubifs_auth_node - node for authenticating other nodes
+ * @ch: common header
+ * @hmac: The HMAC
+ */
+struct ubifs_auth_node {
+       struct ubifs_ch ch;
+       __u8 hmac[];
+} __packed;
+
 /**
  * struct ubifs_branch - key/reference/length branch
  * @lnum: LEB number of the target node
  * @offs: offset within @lnum
  * @len: target node length
  * @key: key
+ *
+ * In an authenticated UBIFS we have the hash of the referenced node after @key.
+ * This can't be added to the struct type definition because @key is a
+ * dynamically sized element already.
  */
 struct ubifs_branch {
        __le32 lnum;
index 4368cde476b0fa4360ab942790fb70ff62937530..38401adaa00d63a2f975e9900143d7a061336826 100644 (file)
@@ -39,6 +39,9 @@
 #include <linux/security.h>
 #include <linux/xattr.h>
 #include <linux/random.h>
+#include <crypto/hash_info.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
 
 #define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_UBIFS_FS_ENCRYPTION)
 #include <linux/fscrypt.h>
 /* Maximum number of data nodes to bulk-read */
 #define UBIFS_MAX_BULK_READ 32
 
+#ifdef CONFIG_UBIFS_FS_AUTHENTICATION
+#define UBIFS_HASH_ARR_SZ UBIFS_MAX_HASH_LEN
+#define UBIFS_HMAC_ARR_SZ UBIFS_MAX_HMAC_LEN
+#else
+#define UBIFS_HASH_ARR_SZ 0
+#define UBIFS_HMAC_ARR_SZ 0
+#endif
+
 /*
  * Lockdep classes for UBIFS inode @ui_mutex.
  */
@@ -706,6 +717,7 @@ struct ubifs_wbuf {
  * @jhead: journal head number this bud belongs to
  * @list: link in the list buds belonging to the same journal head
  * @rb: link in the tree of all buds
+ * @log_hash: the log hash from the commit start node up to this bud
  */
 struct ubifs_bud {
        int lnum;
@@ -713,6 +725,7 @@ struct ubifs_bud {
        int jhead;
        struct list_head list;
        struct rb_node rb;
+       struct shash_desc *log_hash;
 };
 
 /**
@@ -720,6 +733,7 @@ struct ubifs_bud {
  * @wbuf: head's write-buffer
  * @buds_list: list of bud LEBs belonging to this journal head
  * @grouped: non-zero if UBIFS groups nodes when writing to this journal head
+ * @log_hash: the log hash from the commit start node up to this journal head
  *
  * Note, the @buds list is protected by the @c->buds_lock.
  */
@@ -727,6 +741,7 @@ struct ubifs_jhead {
        struct ubifs_wbuf wbuf;
        struct list_head buds_list;
        unsigned int grouped:1;
+       struct shash_desc *log_hash;
 };
 
 /**
@@ -736,6 +751,7 @@ struct ubifs_jhead {
  * @lnum: LEB number of the target node (indexing node or data node)
  * @offs: target node offset within @lnum
  * @len: target node length
+ * @hash: the hash of the target node
  */
 struct ubifs_zbranch {
        union ubifs_key key;
@@ -746,12 +762,15 @@ struct ubifs_zbranch {
        int lnum;
        int offs;
        int len;
+       u8 hash[UBIFS_HASH_ARR_SZ];
 };
 
 /**
  * struct ubifs_znode - in-memory representation of an indexing node.
  * @parent: parent znode or NULL if it is the root
  * @cnext: next znode to commit
+ * @cparent: parent node for this commit
+ * @ciip: index in cparent's zbranch array
  * @flags: znode flags (%DIRTY_ZNODE, %COW_ZNODE or %OBSOLETE_ZNODE)
  * @time: last access time (seconds)
  * @level: level of the entry in the TNC tree
@@ -769,6 +788,8 @@ struct ubifs_zbranch {
 struct ubifs_znode {
        struct ubifs_znode *parent;
        struct ubifs_znode *cnext;
+       struct ubifs_znode *cparent;
+       int ciip;
        unsigned long flags;
        time64_t time;
        int level;
@@ -983,6 +1004,7 @@ struct ubifs_debug_info;
  * struct ubifs_info - UBIFS file-system description data structure
  * (per-superblock).
  * @vfs_sb: VFS @struct super_block object
+ * @sup_node: The super block node as read from the device
  *
  * @highest_inum: highest used inode number
  * @max_sqnum: current global sequence number
@@ -1028,6 +1050,7 @@ struct ubifs_debug_info;
  * @default_compr: default compression algorithm (%UBIFS_COMPR_LZO, etc)
  * @rw_incompat: the media is not R/W compatible
  * @assert_action: action to take when a ubifs_assert() fails
+ * @authenticated: flag indigating the FS is mounted in authenticated mode
  *
  * @tnc_mutex: protects the Tree Node Cache (TNC), @zroot, @cnext, @enext, and
  *             @calc_idx_sz
@@ -1075,6 +1098,7 @@ struct ubifs_debug_info;
  * @key_hash: direntry key hash function
  * @key_fmt: key format
  * @key_len: key length
+ * @hash_len: The length of the index node hashes
  * @fanout: fanout of the index tree (number of links per indexing node)
  *
  * @min_io_size: minimal input/output unit size
@@ -1210,6 +1234,15 @@ struct ubifs_debug_info;
  * @rp_uid: reserved pool user ID
  * @rp_gid: reserved pool group ID
  *
+ * @hash_tfm: the hash transformation used for hashing nodes
+ * @hmac_tfm: the HMAC transformation for this filesystem
+ * @hmac_desc_len: length of the HMAC used for authentication
+ * @auth_key_name: the authentication key name
+ * @auth_hash_name: the name of the hash algorithm used for authentication
+ * @auth_hash_algo: the authentication hash used for this fs
+ * @log_hash: the log hash from the commit start node up to the latest reference
+ *            node.
+ *
  * @empty: %1 if the UBI device is empty
  * @need_recovery: %1 if the file-system needs recovery
  * @replaying: %1 during journal replay
@@ -1230,6 +1263,7 @@ struct ubifs_debug_info;
  */
 struct ubifs_info {
        struct super_block *vfs_sb;
+       struct ubifs_sb_node *sup_node;
 
        ino_t highest_inum;
        unsigned long long max_sqnum;
@@ -1270,6 +1304,7 @@ struct ubifs_info {
        unsigned int default_compr:2;
        unsigned int rw_incompat:1;
        unsigned int assert_action:2;
+       unsigned int authenticated:1;
 
        struct mutex tnc_mutex;
        struct ubifs_zbranch zroot;
@@ -1314,6 +1349,7 @@ struct ubifs_info {
        uint32_t (*key_hash)(const char *str, int len);
        int key_fmt;
        int key_len;
+       int hash_len;
        int fanout;
 
        int min_io_size;
@@ -1441,6 +1477,15 @@ struct ubifs_info {
        kuid_t rp_uid;
        kgid_t rp_gid;
 
+       struct crypto_shash *hash_tfm;
+       struct crypto_shash *hmac_tfm;
+       int hmac_desc_len;
+       char *auth_key_name;
+       char *auth_hash_name;
+       enum hash_algo auth_hash_algo;
+
+       struct shash_desc *log_hash;
+
        /* The below fields are used only during mounting and re-mounting */
        unsigned int empty:1;
        unsigned int need_recovery:1;
@@ -1471,6 +1516,195 @@ extern const struct inode_operations ubifs_dir_inode_operations;
 extern const struct inode_operations ubifs_symlink_inode_operations;
 extern struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
 
+/* auth.c */
+static inline int ubifs_authenticated(const struct ubifs_info *c)
+{
+       return (IS_ENABLED(CONFIG_UBIFS_FS_AUTHENTICATION)) && c->authenticated;
+}
+
+struct shash_desc *__ubifs_hash_get_desc(const struct ubifs_info *c);
+static inline struct shash_desc *ubifs_hash_get_desc(const struct ubifs_info *c)
+{
+       return ubifs_authenticated(c) ? __ubifs_hash_get_desc(c) : NULL;
+}
+
+static inline int ubifs_shash_init(const struct ubifs_info *c,
+                                  struct shash_desc *desc)
+{
+       if (ubifs_authenticated(c))
+               return crypto_shash_init(desc);
+       else
+               return 0;
+}
+
+static inline int ubifs_shash_update(const struct ubifs_info *c,
+                                     struct shash_desc *desc, const void *buf,
+                                     unsigned int len)
+{
+       int err = 0;
+
+       if (ubifs_authenticated(c)) {
+               err = crypto_shash_update(desc, buf, len);
+               if (err < 0)
+                       return err;
+       }
+
+       return 0;
+}
+
+static inline int ubifs_shash_final(const struct ubifs_info *c,
+                                   struct shash_desc *desc, u8 *out)
+{
+       return ubifs_authenticated(c) ? crypto_shash_final(desc, out) : 0;
+}
+
+int __ubifs_node_calc_hash(const struct ubifs_info *c, const void *buf,
+                         u8 *hash);
+static inline int ubifs_node_calc_hash(const struct ubifs_info *c,
+                                       const void *buf, u8 *hash)
+{
+       if (ubifs_authenticated(c))
+               return __ubifs_node_calc_hash(c, buf, hash);
+       else
+               return 0;
+}
+
+int ubifs_prepare_auth_node(struct ubifs_info *c, void *node,
+                            struct shash_desc *inhash);
+
+/**
+ * ubifs_check_hash - compare two hashes
+ * @c: UBIFS file-system description object
+ * @expected: first hash
+ * @got: second hash
+ *
+ * Compare two hashes @expected and @got. Returns 0 when they are equal, a
+ * negative error code otherwise.
+ */
+static inline int ubifs_check_hash(const struct ubifs_info *c,
+                                  const u8 *expected, const u8 *got)
+{
+       return crypto_memneq(expected, got, c->hash_len);
+}
+
+/**
+ * ubifs_check_hmac - compare two HMACs
+ * @c: UBIFS file-system description object
+ * @expected: first HMAC
+ * @got: second HMAC
+ *
+ * Compare two hashes @expected and @got. Returns 0 when they are equal, a
+ * negative error code otherwise.
+ */
+static inline int ubifs_check_hmac(const struct ubifs_info *c,
+                                  const u8 *expected, const u8 *got)
+{
+       return crypto_memneq(expected, got, c->hmac_desc_len);
+}
+
+void ubifs_bad_hash(const struct ubifs_info *c, const void *node,
+                   const u8 *hash, int lnum, int offs);
+
+int __ubifs_node_check_hash(const struct ubifs_info *c, const void *buf,
+                         const u8 *expected);
+static inline int ubifs_node_check_hash(const struct ubifs_info *c,
+                                       const void *buf, const u8 *expected)
+{
+       if (ubifs_authenticated(c))
+               return __ubifs_node_check_hash(c, buf, expected);
+       else
+               return 0;
+}
+
+int ubifs_init_authentication(struct ubifs_info *c);
+void __ubifs_exit_authentication(struct ubifs_info *c);
+static inline void ubifs_exit_authentication(struct ubifs_info *c)
+{
+       if (ubifs_authenticated(c))
+               __ubifs_exit_authentication(c);
+}
+
+/**
+ * ubifs_branch_hash - returns a pointer to the hash of a branch
+ * @c: UBIFS file-system description object
+ * @br: branch to get the hash from
+ *
+ * This returns a pointer to the hash of a branch. Since the key already is a
+ * dynamically sized object we cannot use a struct member here.
+ */
+static inline u8 *ubifs_branch_hash(struct ubifs_info *c,
+                                   struct ubifs_branch *br)
+{
+       return (void *)br + sizeof(*br) + c->key_len;
+}
+
+/**
+ * ubifs_copy_hash - copy a hash
+ * @c: UBIFS file-system description object
+ * @from: source hash
+ * @to: destination hash
+ *
+ * With authentication this copies a hash, otherwise does nothing.
+ */
+static inline void ubifs_copy_hash(const struct ubifs_info *c, const u8 *from,
+                                  u8 *to)
+{
+       if (ubifs_authenticated(c))
+               memcpy(to, from, c->hash_len);
+}
+
+int __ubifs_node_insert_hmac(const struct ubifs_info *c, void *buf,
+                             int len, int ofs_hmac);
+static inline int ubifs_node_insert_hmac(const struct ubifs_info *c, void *buf,
+                                         int len, int ofs_hmac)
+{
+       if (ubifs_authenticated(c))
+               return __ubifs_node_insert_hmac(c, buf, len, ofs_hmac);
+       else
+               return 0;
+}
+
+int __ubifs_node_verify_hmac(const struct ubifs_info *c, const void *buf,
+                            int len, int ofs_hmac);
+static inline int ubifs_node_verify_hmac(const struct ubifs_info *c,
+                                        const void *buf, int len, int ofs_hmac)
+{
+       if (ubifs_authenticated(c))
+               return __ubifs_node_verify_hmac(c, buf, len, ofs_hmac);
+       else
+               return 0;
+}
+
+/**
+ * ubifs_auth_node_sz - returns the size of an authentication node
+ * @c: UBIFS file-system description object
+ *
+ * This function returns the size of an authentication node which can
+ * be 0 for unauthenticated filesystems or the real size of an auth node
+ * authentication is enabled.
+ */
+static inline int ubifs_auth_node_sz(const struct ubifs_info *c)
+{
+       if (ubifs_authenticated(c))
+               return sizeof(struct ubifs_auth_node) + c->hmac_desc_len;
+       else
+               return 0;
+}
+
+int ubifs_hmac_wkm(struct ubifs_info *c, u8 *hmac);
+
+int __ubifs_shash_copy_state(const struct ubifs_info *c, struct shash_desc *src,
+                            struct shash_desc *target);
+static inline int ubifs_shash_copy_state(const struct ubifs_info *c,
+                                          struct shash_desc *src,
+                                          struct shash_desc *target)
+{
+       if (ubifs_authenticated(c))
+               return __ubifs_shash_copy_state(c, src, target);
+       else
+               return 0;
+}
+
 /* io.c */
 void ubifs_ro_mode(struct ubifs_info *c, int err);
 int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
@@ -1490,9 +1724,15 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
                         int lnum, int offs);
 int ubifs_write_node(struct ubifs_info *c, void *node, int len, int lnum,
                     int offs);
+int ubifs_write_node_hmac(struct ubifs_info *c, void *buf, int len, int lnum,
+                         int offs, int hmac_offs);
 int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
                     int offs, int quiet, int must_chk_crc);
+void ubifs_init_node(struct ubifs_info *c, void *buf, int len, int pad);
+void ubifs_crc_node(struct ubifs_info *c, void *buf, int len);
 void ubifs_prepare_node(struct ubifs_info *c, void *buf, int len, int pad);
+int ubifs_prepare_node_hmac(struct ubifs_info *c, void *node, int len,
+                           int hmac_offs, int pad);
 void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last);
 int ubifs_io_init(struct ubifs_info *c);
 void ubifs_pad(const struct ubifs_info *c, void *buf, int pad);
@@ -1592,11 +1832,12 @@ int ubifs_tnc_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
 int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key,
                     void *node, int *lnum, int *offs);
 int ubifs_tnc_add(struct ubifs_info *c, const union ubifs_key *key, int lnum,
-                 int offs, int len);
+                 int offs, int len, const u8 *hash);
 int ubifs_tnc_replace(struct ubifs_info *c, const union ubifs_key *key,
                      int old_lnum, int old_offs, int lnum, int offs, int len);
 int ubifs_tnc_add_nm(struct ubifs_info *c, const union ubifs_key *key,
-                    int lnum, int offs, int len, const struct fscrypt_name *nm);
+                    int lnum, int offs, int len, const u8 *hash,
+                    const struct fscrypt_name *nm);
 int ubifs_tnc_remove(struct ubifs_info *c, const union ubifs_key *key);
 int ubifs_tnc_remove_nm(struct ubifs_info *c, const union ubifs_key *key,
                        const struct fscrypt_name *nm);
@@ -1659,12 +1900,12 @@ int ubifs_gc_should_commit(struct ubifs_info *c);
 void ubifs_wait_for_commit(struct ubifs_info *c);
 
 /* master.c */
+int ubifs_compare_master_node(struct ubifs_info *c, void *m1, void *m2);
 int ubifs_read_master(struct ubifs_info *c);
 int ubifs_write_master(struct ubifs_info *c);
 
 /* sb.c */
 int ubifs_read_superblock(struct ubifs_info *c);
-struct ubifs_sb_node *ubifs_read_sb_node(struct ubifs_info *c);
 int ubifs_write_sb_node(struct ubifs_info *c, struct ubifs_sb_node *sup);
 int ubifs_fixup_free_space(struct ubifs_info *c);
 int ubifs_enable_encryption(struct ubifs_info *c);
@@ -1693,7 +1934,7 @@ int ubifs_clear_orphans(struct ubifs_info *c);
 /* lpt.c */
 int ubifs_calc_lpt_geom(struct ubifs_info *c);
 int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
-                         int *lpt_lebs, int *big_lpt);
+                         int *lpt_lebs, int *big_lpt, u8 *hash);
 int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr);
 struct ubifs_lprops *ubifs_lpt_lookup(struct ubifs_info *c, int lnum);
 struct ubifs_lprops *ubifs_lpt_lookup_dirty(struct ubifs_info *c, int lnum);
@@ -1712,6 +1953,7 @@ struct ubifs_pnode *ubifs_get_pnode(struct ubifs_info *c,
                                    struct ubifs_nnode *parent, int iip);
 struct ubifs_nnode *ubifs_get_nnode(struct ubifs_info *c,
                                    struct ubifs_nnode *parent, int iip);
+struct ubifs_pnode *ubifs_pnode_lookup(struct ubifs_info *c, int i);
 int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip);
 void ubifs_add_lpt_dirt(struct ubifs_info *c, int lnum, int dirty);
 void ubifs_add_nnode_dirt(struct ubifs_info *c, struct ubifs_nnode *nnode);
@@ -1720,6 +1962,7 @@ struct ubifs_nnode *ubifs_first_nnode(struct ubifs_info *c, int *hght);
 /* Needed only in debugging code in lpt_commit.c */
 int ubifs_unpack_nnode(const struct ubifs_info *c, void *buf,
                       struct ubifs_nnode *nnode);
+int ubifs_lpt_calc_hash(struct ubifs_info *c, u8 *hash);
 
 /* lpt_commit.c */
 int ubifs_lpt_start_commit(struct ubifs_info *c);
@@ -1807,7 +2050,7 @@ int ubifs_clean_lebs(struct ubifs_info *c, void *sbuf);
 int ubifs_rcvry_gc_commit(struct ubifs_info *c);
 int ubifs_recover_size_accum(struct ubifs_info *c, union ubifs_key *key,
                             int deletion, loff_t new_size);
-int ubifs_recover_size(struct ubifs_info *c);
+int ubifs_recover_size(struct ubifs_info *c, bool in_place);
 void ubifs_destroy_size_tree(struct ubifs_info *c);
 
 /* ioctl.c */
index 8f2f56d9a1bbfb8b110812d45167ce7e7b456099..e3d684ea320303630f4a75135ed4e4bcc408484c 100644 (file)
@@ -827,16 +827,20 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
 
 
        ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32);
-       if (ret < 0)
-               goto out_bh;
-
-       strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
+       if (ret < 0) {
+               strcpy(UDF_SB(sb)->s_volume_ident, "InvalidName");
+               pr_warn("incorrect volume identification, setting to "
+                       "'InvalidName'\n");
+       } else {
+               strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
+       }
        udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
 
        ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128);
-       if (ret < 0)
+       if (ret < 0) {
+               ret = 0;
                goto out_bh;
-
+       }
        outstr[ret] = 0;
        udf_debug("volSetIdent[] = '%s'\n", outstr);
 
index 45234791fec281f027cc05b97d9364384d8bc7b8..5fcfa96463ebb820cef33b83460fc8d1a9bea8b2 100644 (file)
@@ -351,6 +351,11 @@ try_again:
        return u_len;
 }
 
+/*
+ * Convert CS0 dstring to output charset. Warning: This function may truncate
+ * input string if it is too long as it is used for informational strings only
+ * and it is better to truncate the string than to refuse mounting a media.
+ */
 int udf_dstrCS0toChar(struct super_block *sb, uint8_t *utf_o, int o_len,
                      const uint8_t *ocu_i, int i_len)
 {
@@ -359,9 +364,12 @@ int udf_dstrCS0toChar(struct super_block *sb, uint8_t *utf_o, int o_len,
        if (i_len > 0) {
                s_len = ocu_i[i_len - 1];
                if (s_len >= i_len) {
-                       pr_err("incorrect dstring lengths (%d/%d)\n",
-                              s_len, i_len);
-                       return -EINVAL;
+                       pr_warn("incorrect dstring lengths (%d/%d),"
+                               " truncating\n", s_len, i_len);
+                       s_len = i_len - 1;
+                       /* 2-byte encoding? Need to round properly... */
+                       if (ocu_i[0] == 16)
+                               s_len -= (s_len - 1) & 2;
                }
        }
 
index 356d2b8568c1424d1d1935c451e65c7db13aab62..cd58939dc977e481930053f37e3137394265d75e 100644 (file)
@@ -1361,6 +1361,19 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
                ret = -EINVAL;
                if (!vma_can_userfault(cur))
                        goto out_unlock;
+
+               /*
+                * UFFDIO_COPY will fill file holes even without
+                * PROT_WRITE. This check enforces that if this is a
+                * MAP_SHARED, the process has write permission to the backing
+                * file. If VM_MAYWRITE is set it also enforces that on a
+                * MAP_SHARED vma: there is no F_WRITE_SEAL and no further
+                * F_WRITE_SEAL can be taken until the vma is destroyed.
+                */
+               ret = -EPERM;
+               if (unlikely(!(cur->vm_flags & VM_MAYWRITE)))
+                       goto out_unlock;
+
                /*
                 * If this vma contains ending address, and huge pages
                 * check alignment.
@@ -1406,6 +1419,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
                BUG_ON(!vma_can_userfault(vma));
                BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
                       vma->vm_userfaultfd_ctx.ctx != ctx);
+               WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
 
                /*
                 * Nothing to do: this vma is already registered into this
@@ -1552,6 +1566,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                cond_resched();
 
                BUG_ON(!vma_can_userfault(vma));
+               WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
 
                /*
                 * Nothing to do: this vma is already registered into this
index 6fc5425b1474a52694b4860aa79cefc52d3fd826..2652d00842d6ba8c6479f816765c87dfc622d1cb 100644 (file)
@@ -243,7 +243,7 @@ xfs_attr3_leaf_verify(
        struct xfs_mount                *mp = bp->b_target->bt_mount;
        struct xfs_attr_leafblock       *leaf = bp->b_addr;
        struct xfs_attr_leaf_entry      *entries;
-       uint16_t                        end;
+       uint32_t                        end;    /* must be 32bit - see below */
        int                             i;
 
        xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
@@ -293,6 +293,11 @@ xfs_attr3_leaf_verify(
        /*
         * Quickly check the freemap information.  Attribute data has to be
         * aligned to 4-byte boundaries, and likewise for the free space.
+        *
+        * Note that for 64k block size filesystems, the freemap entries cannot
+        * overflow as they are only be16 fields. However, when checking end
+        * pointer of the freemap, we have to be careful to detect overflows and
+        * so use uint32_t for those checks.
         */
        for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
                if (ichdr.freemap[i].base > mp->m_attr_geo->blksize)
@@ -303,7 +308,9 @@ xfs_attr3_leaf_verify(
                        return __this_address;
                if (ichdr.freemap[i].size & 0x3)
                        return __this_address;
-               end = ichdr.freemap[i].base + ichdr.freemap[i].size;
+
+               /* be care of 16 bit overflows here */
+               end = (uint32_t)ichdr.freemap[i].base + ichdr.freemap[i].size;
                if (end < ichdr.freemap[i].base)
                        return __this_address;
                if (end > mp->m_attr_geo->blksize)
index 74d7228e755b3ade097457c0ae68e07e252e67a4..19e921d1586f273f7f234d0c32a8982683821f69 100644 (file)
@@ -1694,10 +1694,13 @@ xfs_bmap_add_extent_delay_real(
        case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
                /*
                 * Filling in all of a previously delayed allocation extent.
-                * The right neighbor is contiguous, the left is not.
+                * The right neighbor is contiguous, the left is not. Take care
+                * with delay -> unwritten extent allocation here because the
+                * delalloc record we are overwriting is always written.
                 */
                PREV.br_startblock = new->br_startblock;
                PREV.br_blockcount += RIGHT.br_blockcount;
+               PREV.br_state = new->br_state;
 
                xfs_iext_next(ifp, &bma->icur);
                xfs_iext_remove(bma->ip, &bma->icur, state);
index 34c6d7bd4d180c736d8da7e6c33a413ee177ac5f..bbdae2b4559fc91d0e7f650fcfe6ed81868b5512 100644 (file)
@@ -330,7 +330,7 @@ xfs_btree_sblock_verify_crc(
 
        if (xfs_sb_version_hascrc(&mp->m_sb)) {
                if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
-                       return __this_address;
+                       return false;
                return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
        }
 
index 86c50208a14374e2a1588b5686df8d30dc677c57..7fbf8af0b15949fb1e329e270cf66ef9f519eb3f 100644 (file)
@@ -538,15 +538,18 @@ xfs_inobt_rec_check_count(
 
 static xfs_extlen_t
 xfs_inobt_max_size(
-       struct xfs_mount        *mp)
+       struct xfs_mount        *mp,
+       xfs_agnumber_t          agno)
 {
+       xfs_agblock_t           agblocks = xfs_ag_block_count(mp, agno);
+
        /* Bail out if we're uninitialized, which can happen in mkfs. */
        if (mp->m_inobt_mxr[0] == 0)
                return 0;
 
        return xfs_btree_calc_size(mp->m_inobt_mnr,
-               (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
-                               XFS_INODES_PER_CHUNK);
+                               (uint64_t)agblocks * mp->m_sb.sb_inopblock /
+                                       XFS_INODES_PER_CHUNK);
 }
 
 static int
@@ -594,7 +597,7 @@ xfs_finobt_calc_reserves(
        if (error)
                return error;
 
-       *ask += xfs_inobt_max_size(mp);
+       *ask += xfs_inobt_max_size(mp, agno);
        *used += tree_len;
        return 0;
 }
index 5d263dfdb3bcc60ca30708622397e42de9fbaeac..1ee8c5539fa4f2e999808acc021d63be0b4963b0 100644 (file)
@@ -1042,7 +1042,7 @@ out_trans_cancel:
        goto out_unlock;
 }
 
-static int
+int
 xfs_flush_unmap_range(
        struct xfs_inode        *ip,
        xfs_off_t               offset,
@@ -1126,9 +1126,9 @@ xfs_free_file_space(
         * page could be mmap'd and iomap_zero_range doesn't do that for us.
         * Writeback of the eof page will do this, albeit clumsily.
         */
-       if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
+       if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
                error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
-                               (offset + len) & ~PAGE_MASK, LLONG_MAX);
+                               round_down(offset + len, PAGE_SIZE), LLONG_MAX);
        }
 
        return error;
@@ -1195,13 +1195,7 @@ xfs_prepare_shift(
         * Writeback and invalidate cache for the remainder of the file as we're
         * about to shift down every extent from offset to EOF.
         */
-       error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1);
-       if (error)
-               return error;
-       error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
-                                       offset >> PAGE_SHIFT, -1);
-       if (error)
-               return error;
+       error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
 
        /*
         * Clean out anything hanging around in the cow fork now that
index 87363d136bb618145c396223da5b4755bdd572c8..7a78229cf1a79807c7794e5f468eff6e4bf8ff7f 100644 (file)
@@ -80,4 +80,7 @@ int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
                          int whichfork, xfs_extnum_t *nextents,
                          xfs_filblks_t *count);
 
+int    xfs_flush_unmap_range(struct xfs_inode *ip, xfs_off_t offset,
+                             xfs_off_t len);
+
 #endif /* __XFS_BMAP_UTIL_H__ */
index 12d8455bfbb29114887744046d52cb75428bc911..010db5f8fb00f81deb3524c6356b35f71a2fd7d3 100644 (file)
@@ -1233,9 +1233,23 @@ xfs_buf_iodone(
 }
 
 /*
- * Requeue a failed buffer for writeback
+ * Requeue a failed buffer for writeback.
  *
- * Return true if the buffer has been re-queued properly, false otherwise
+ * We clear the log item failed state here as well, but we have to be careful
+ * about reference counts because the only active reference counts on the buffer
+ * may be the failed log items. Hence if we clear the log item failed state
+ * before queuing the buffer for IO we can release all active references to
+ * the buffer and free it, leading to use after free problems in
+ * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
+ * order we process them in - the buffer is locked, and we own the buffer list
+ * so nothing on them is going to change while we are performing this action.
+ *
+ * Hence we can safely queue the buffer for IO before we clear the failed log
+ * item state, therefore  always having an active reference to the buffer and
+ * avoiding the transient zero-reference state that leads to use-after-free.
+ *
+ * Return true if the buffer was added to the buffer list, false if it was
+ * already on the buffer list.
  */
 bool
 xfs_buf_resubmit_failed_buffers(
@@ -1243,16 +1257,16 @@ xfs_buf_resubmit_failed_buffers(
        struct list_head        *buffer_list)
 {
        struct xfs_log_item     *lip;
+       bool                    ret;
+
+       ret = xfs_buf_delwri_queue(bp, buffer_list);
 
        /*
-        * Clear XFS_LI_FAILED flag from all items before resubmit
-        *
-        * XFS_LI_FAILED set/clear is protected by ail_lock, caller  this
+        * XFS_LI_FAILED set/clear is protected by ail_lock, caller of this
         * function already have it acquired
         */
        list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
                xfs_clear_li_failed(lip);
 
-       /* Add this buffer back to the delayed write list */
-       return xfs_buf_delwri_queue(bp, buffer_list);
+       return ret;
 }
index 61a5ad2600e865a6b11a8345f956eba10203abd2..e47425071e654473f4b34e7899015cecce19ef5e 100644 (file)
@@ -919,28 +919,67 @@ out_unlock:
        return error;
 }
 
-STATIC int
-xfs_file_clone_range(
-       struct file     *file_in,
-       loff_t          pos_in,
-       struct file     *file_out,
-       loff_t          pos_out,
-       u64             len)
-{
-       return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
-                                    len, false);
-}
 
-STATIC int
-xfs_file_dedupe_range(
-       struct file     *file_in,
-       loff_t          pos_in,
-       struct file     *file_out,
-       loff_t          pos_out,
-       u64             len)
+STATIC loff_t
+xfs_file_remap_range(
+       struct file             *file_in,
+       loff_t                  pos_in,
+       struct file             *file_out,
+       loff_t                  pos_out,
+       loff_t                  len,
+       unsigned int            remap_flags)
 {
-       return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
-                                    len, true);
+       struct inode            *inode_in = file_inode(file_in);
+       struct xfs_inode        *src = XFS_I(inode_in);
+       struct inode            *inode_out = file_inode(file_out);
+       struct xfs_inode        *dest = XFS_I(inode_out);
+       struct xfs_mount        *mp = src->i_mount;
+       loff_t                  remapped = 0;
+       xfs_extlen_t            cowextsize;
+       int                     ret;
+
+       if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
+               return -EINVAL;
+
+       if (!xfs_sb_version_hasreflink(&mp->m_sb))
+               return -EOPNOTSUPP;
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
+
+       /* Prepare and then clone file data. */
+       ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
+                       &len, remap_flags);
+       if (ret < 0 || len == 0)
+               return ret;
+
+       trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
+
+       ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
+                       &remapped);
+       if (ret)
+               goto out_unlock;
+
+       /*
+        * Carry the cowextsize hint from src to dest if we're sharing the
+        * entire source file to the entire destination file, the source file
+        * has a cowextsize hint, and the destination file does not.
+        */
+       cowextsize = 0;
+       if (pos_in == 0 && len == i_size_read(inode_in) &&
+           (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
+           pos_out == 0 && len >= i_size_read(inode_out) &&
+           !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
+               cowextsize = src->i_d.di_cowextsize;
+
+       ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
+                       remap_flags);
+
+out_unlock:
+       xfs_reflink_remap_unlock(file_in, file_out);
+       if (ret)
+               trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
+       return remapped > 0 ? remapped : ret;
 }
 
 STATIC int
@@ -1175,8 +1214,7 @@ const struct file_operations xfs_file_operations = {
        .fsync          = xfs_file_fsync,
        .get_unmapped_area = thp_get_unmapped_area,
        .fallocate      = xfs_file_fallocate,
-       .clone_file_range = xfs_file_clone_range,
-       .dedupe_file_range = xfs_file_dedupe_range,
+       .remap_file_range = xfs_file_remap_range,
 };
 
 const struct file_operations xfs_dir_file_operations = {
index 6e2c08f30f602deb360e737003cc3ae1abf4bfc7..6ecdbb3af7de5c02c86a25d41ed7086ae2f845fc 100644 (file)
@@ -1608,7 +1608,7 @@ xfs_ioc_getbmap(
        error = 0;
 out_free_buf:
        kmem_free(buf);
-       return 0;
+       return error;
 }
 
 struct getfsmap_info {
index 576c375ce12a8f411a49f75cf6bd72f69c96279c..6b736ea58d35402eb7e7975067a4303131cf3d83 100644 (file)
@@ -107,5 +107,5 @@ assfail(char *expr, char *file, int line)
 void
 xfs_hex_dump(void *p, int length)
 {
-       print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_ADDRESS, 16, 1, p, length, 1);
+       print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1);
 }
index 73a1d77ec187c8958d09cc6fb13f4af3e0e61605..3091e4bc04efe1e6f4d9aa88ed7987a221f9bd78 100644 (file)
@@ -40,7 +40,7 @@ xfs_fill_statvfs_from_dquot(
                statp->f_files = limit;
                statp->f_ffree =
                        (statp->f_files > dqp->q_res_icount) ?
-                        (statp->f_ffree - dqp->q_res_icount) : 0;
+                        (statp->f_files - dqp->q_res_icount) : 0;
        }
 }
 
index 8eaeec9d58ed6799898753f49f0ad895b5db4cb5..322a852ce284a017382ceb9e75bb74762dfbad78 100644 (file)
@@ -296,6 +296,7 @@ xfs_reflink_reserve_cow(
        if (error)
                return error;
 
+       xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
        trace_xfs_reflink_cow_alloc(ip, &got);
        return 0;
 }
@@ -913,18 +914,18 @@ out_error:
 /*
  * Update destination inode size & cowextsize hint, if necessary.
  */
-STATIC int
+int
 xfs_reflink_update_dest(
        struct xfs_inode        *dest,
        xfs_off_t               newlen,
        xfs_extlen_t            cowextsize,
-       bool                    is_dedupe)
+       unsigned int            remap_flags)
 {
        struct xfs_mount        *mp = dest->i_mount;
        struct xfs_trans        *tp;
        int                     error;
 
-       if (is_dedupe && newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0)
+       if (newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0)
                return 0;
 
        error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
@@ -945,10 +946,6 @@ xfs_reflink_update_dest(
                dest->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
        }
 
-       if (!is_dedupe) {
-               xfs_trans_ichgtime(tp, dest,
-                                  XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
-       }
        xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
 
        error = xfs_trans_commit(tp);
@@ -1112,19 +1109,28 @@ out:
 /*
  * Iteratively remap one file's extents (and holes) to another's.
  */
-STATIC int
+int
 xfs_reflink_remap_blocks(
        struct xfs_inode        *src,
-       xfs_fileoff_t           srcoff,
+       loff_t                  pos_in,
        struct xfs_inode        *dest,
-       xfs_fileoff_t           destoff,
-       xfs_filblks_t           len,
-       xfs_off_t               new_isize)
+       loff_t                  pos_out,
+       loff_t                  remap_len,
+       loff_t                  *remapped)
 {
        struct xfs_bmbt_irec    imap;
+       xfs_fileoff_t           srcoff;
+       xfs_fileoff_t           destoff;
+       xfs_filblks_t           len;
+       xfs_filblks_t           range_len;
+       xfs_filblks_t           remapped_len = 0;
+       xfs_off_t               new_isize = pos_out + remap_len;
        int                     nimaps;
        int                     error = 0;
-       xfs_filblks_t           range_len;
+
+       destoff = XFS_B_TO_FSBT(src->i_mount, pos_out);
+       srcoff = XFS_B_TO_FSBT(src->i_mount, pos_in);
+       len = XFS_B_TO_FSB(src->i_mount, remap_len);
 
        /* drange = (destoff, destoff + len); srange = (srcoff, srcoff + len) */
        while (len) {
@@ -1139,7 +1145,7 @@ xfs_reflink_remap_blocks(
                error = xfs_bmapi_read(src, srcoff, len, &imap, &nimaps, 0);
                xfs_iunlock(src, lock_mode);
                if (error)
-                       goto err;
+                       break;
                ASSERT(nimaps == 1);
 
                trace_xfs_reflink_remap_imap(src, srcoff, len, XFS_IO_OVERWRITE,
@@ -1153,23 +1159,24 @@ xfs_reflink_remap_blocks(
                error = xfs_reflink_remap_extent(dest, &imap, destoff,
                                new_isize);
                if (error)
-                       goto err;
+                       break;
 
                if (fatal_signal_pending(current)) {
                        error = -EINTR;
-                       goto err;
+                       break;
                }
 
                /* Advance drange/srange */
                srcoff += range_len;
                destoff += range_len;
                len -= range_len;
+               remapped_len += range_len;
        }
 
-       return 0;
-
-err:
-       trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_);
+       if (error)
+               trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_);
+       *remapped = min_t(loff_t, remap_len,
+                         XFS_FSB_TO_B(src->i_mount, remapped_len));
        return error;
 }
 
@@ -1218,7 +1225,7 @@ retry:
 }
 
 /* Unlock both inodes after they've been prepped for a range clone. */
-STATIC void
+void
 xfs_reflink_remap_unlock(
        struct file             *file_in,
        struct file             *file_out)
@@ -1286,21 +1293,20 @@ xfs_reflink_zero_posteof(
  * stale data in the destination file. Hence we reject these clone attempts with
  * -EINVAL in this case.
  */
-STATIC int
+int
 xfs_reflink_remap_prep(
        struct file             *file_in,
        loff_t                  pos_in,
        struct file             *file_out,
        loff_t                  pos_out,
-       u64                     *len,
-       bool                    is_dedupe)
+       loff_t                  *len,
+       unsigned int            remap_flags)
 {
        struct inode            *inode_in = file_inode(file_in);
        struct xfs_inode        *src = XFS_I(inode_in);
        struct inode            *inode_out = file_inode(file_out);
        struct xfs_inode        *dest = XFS_I(inode_out);
        bool                    same_inode = (inode_in == inode_out);
-       u64                     blkmask = i_blocksize(inode_in) - 1;
        ssize_t                 ret;
 
        /* Lock both files against IO */
@@ -1323,29 +1329,11 @@ xfs_reflink_remap_prep(
        if (IS_DAX(inode_in) || IS_DAX(inode_out))
                goto out_unlock;
 
-       ret = vfs_clone_file_prep_inodes(inode_in, pos_in, inode_out, pos_out,
-                       len, is_dedupe);
-       if (ret <= 0)
+       ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
+                       len, remap_flags);
+       if (ret < 0 || *len == 0)
                goto out_unlock;
 
-       /*
-        * If the dedupe data matches, chop off the partial EOF block
-        * from the source file so we don't try to dedupe the partial
-        * EOF block.
-        */
-       if (is_dedupe) {
-               *len &= ~blkmask;
-       } else if (*len & blkmask) {
-               /*
-                * The user is attempting to share a partial EOF block,
-                * if it's inside the destination EOF then reject it.
-                */
-               if (pos_out + *len < i_size_read(inode_out)) {
-                       ret = -EINVAL;
-                       goto out_unlock;
-               }
-       }
-
        /* Attach dquots to dest inode before changing block map */
        ret = xfs_qm_dqattach(dest);
        if (ret)
@@ -1364,102 +1352,23 @@ xfs_reflink_remap_prep(
        if (ret)
                goto out_unlock;
 
-       /* Zap any page cache for the destination file's range. */
-       truncate_inode_pages_range(&inode_out->i_data, pos_out,
-                                  PAGE_ALIGN(pos_out + *len) - 1);
-
-       /* If we're altering the file contents... */
-       if (!is_dedupe) {
-               /*
-                * ...update the timestamps (which will grab the ilock again
-                * from xfs_fs_dirty_inode, so we have to call it before we
-                * take the ilock).
-                */
-               if (!(file_out->f_mode & FMODE_NOCMTIME)) {
-                       ret = file_update_time(file_out);
-                       if (ret)
-                               goto out_unlock;
-               }
-
-               /*
-                * ...clear the security bits if the process is not being run
-                * by root.  This keeps people from modifying setuid and setgid
-                * binaries.
-                */
-               ret = file_remove_privs(file_out);
-               if (ret)
-                       goto out_unlock;
+       /*
+        * If pos_out > EOF, we may have dirtied blocks between EOF and
+        * pos_out. In that case, we need to extend the flush and unmap to cover
+        * from EOF to the end of the copy length.
+        */
+       if (pos_out > XFS_ISIZE(dest)) {
+               loff_t  flen = *len + (pos_out - XFS_ISIZE(dest));
+               ret = xfs_flush_unmap_range(dest, XFS_ISIZE(dest), flen);
+       } else {
+               ret = xfs_flush_unmap_range(dest, pos_out, *len);
        }
-
-       return 1;
-out_unlock:
-       xfs_reflink_remap_unlock(file_in, file_out);
-       return ret;
-}
-
-/*
- * Link a range of blocks from one file to another.
- */
-int
-xfs_reflink_remap_range(
-       struct file             *file_in,
-       loff_t                  pos_in,
-       struct file             *file_out,
-       loff_t                  pos_out,
-       u64                     len,
-       bool                    is_dedupe)
-{
-       struct inode            *inode_in = file_inode(file_in);
-       struct xfs_inode        *src = XFS_I(inode_in);
-       struct inode            *inode_out = file_inode(file_out);
-       struct xfs_inode        *dest = XFS_I(inode_out);
-       struct xfs_mount        *mp = src->i_mount;
-       xfs_fileoff_t           sfsbno, dfsbno;
-       xfs_filblks_t           fsblen;
-       xfs_extlen_t            cowextsize;
-       ssize_t                 ret;
-
-       if (!xfs_sb_version_hasreflink(&mp->m_sb))
-               return -EOPNOTSUPP;
-
-       if (XFS_FORCED_SHUTDOWN(mp))
-               return -EIO;
-
-       /* Prepare and then clone file data. */
-       ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
-                       &len, is_dedupe);
-       if (ret <= 0)
-               return ret;
-
-       trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
-
-       dfsbno = XFS_B_TO_FSBT(mp, pos_out);
-       sfsbno = XFS_B_TO_FSBT(mp, pos_in);
-       fsblen = XFS_B_TO_FSB(mp, len);
-       ret = xfs_reflink_remap_blocks(src, sfsbno, dest, dfsbno, fsblen,
-                       pos_out + len);
        if (ret)
                goto out_unlock;
 
-       /*
-        * Carry the cowextsize hint from src to dest if we're sharing the
-        * entire source file to the entire destination file, the source file
-        * has a cowextsize hint, and the destination file does not.
-        */
-       cowextsize = 0;
-       if (pos_in == 0 && len == i_size_read(inode_in) &&
-           (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
-           pos_out == 0 && len >= i_size_read(inode_out) &&
-           !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
-               cowextsize = src->i_d.di_cowextsize;
-
-       ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
-                       is_dedupe);
-
+       return 1;
 out_unlock:
        xfs_reflink_remap_unlock(file_in, file_out);
-       if (ret)
-               trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
        return ret;
 }
 
index 7f47202b5639142054420b2fb2384e9c44cfbb0d..6d73daef1f132398d0b2ee02ed319c067bd02b6f 100644 (file)
@@ -27,13 +27,24 @@ extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
 extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
                xfs_off_t count);
 extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
-extern int xfs_reflink_remap_range(struct file *file_in, loff_t pos_in,
-               struct file *file_out, loff_t pos_out, u64 len, bool is_dedupe);
+extern loff_t xfs_reflink_remap_range(struct file *file_in, loff_t pos_in,
+               struct file *file_out, loff_t pos_out, loff_t len,
+               unsigned int remap_flags);
 extern int xfs_reflink_inode_has_shared_extents(struct xfs_trans *tp,
                struct xfs_inode *ip, bool *has_shared);
 extern int xfs_reflink_clear_inode_flag(struct xfs_inode *ip,
                struct xfs_trans **tpp);
 extern int xfs_reflink_unshare(struct xfs_inode *ip, xfs_off_t offset,
                xfs_off_t len);
+extern int xfs_reflink_remap_prep(struct file *file_in, loff_t pos_in,
+               struct file *file_out, loff_t pos_out, loff_t *len,
+               unsigned int remap_flags);
+extern int xfs_reflink_remap_blocks(struct xfs_inode *src, loff_t pos_in,
+               struct xfs_inode *dest, loff_t pos_out, loff_t remap_len,
+               loff_t *remapped);
+extern int xfs_reflink_update_dest(struct xfs_inode *dest, xfs_off_t newlen,
+               xfs_extlen_t cowextsize, unsigned int remap_flags);
+extern void xfs_reflink_remap_unlock(struct file *file_in,
+               struct file *file_out);
 
 #endif /* __XFS_REFLINK_H */
index 3043e5ed6495580de11de6932117addca0e85aec..8a6532aae779b49299e8c99b55b6d770d1866be1 100644 (file)
@@ -280,7 +280,10 @@ DECLARE_EVENT_CLASS(xfs_buf_class,
        ),
        TP_fast_assign(
                __entry->dev = bp->b_target->bt_dev;
-               __entry->bno = bp->b_bn;
+               if (bp->b_bn == XFS_BUF_DADDR_NULL)
+                       __entry->bno = bp->b_maps[0].bm_bn;
+               else
+                       __entry->bno = bp->b_bn;
                __entry->nblks = bp->b_length;
                __entry->hold = atomic_read(&bp->b_hold);
                __entry->pincount = atomic_read(&bp->b_pin_count);
index 89f3b03b14451af9f4a9707172a97143f2ae9b5e..e3667c9a33a5deea5ef1f849b97cb0c3fe83a404 100644 (file)
@@ -3,7 +3,7 @@
 #define _4LEVEL_FIXUP_H
 
 #define __ARCH_HAS_4LEVEL_HACK
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
 
 #define PUD_SHIFT                      PGDIR_SHIFT
 #define PUD_SIZE                       PGDIR_SIZE
index 9c2e0708eb82f4aeb8c009f51f78fc43ae511036..73474bb52344d982abaee00ffcbad322308e06f4 100644 (file)
@@ -3,7 +3,7 @@
 #define _5LEVEL_FIXUP_H
 
 #define __ARCH_HAS_5LEVEL_HACK
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
 
 #define P4D_SHIFT                      PGDIR_SHIFT
 #define P4D_SIZE                       PGDIR_SIZE
index 0c34215263b8aec624451b3f04575a1ea6328cf7..1d6dd38c0e5ea8a2155c370cf27bb808f252031e 100644 (file)
@@ -5,7 +5,7 @@
 #ifndef __ASSEMBLY__
 #include <asm-generic/5level-fixup.h>
 
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
 
 /*
  * Having the pud type consist of a pgd gets the size right, and allows
index 1a29b2a0282bf20a8541b79096b474d16c5ae50c..04cb913797bc0d534032364c05d53c50d8d7d73f 100644 (file)
@@ -4,7 +4,7 @@
 
 #ifndef __ASSEMBLY__
 
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
 
 typedef struct { pgd_t pgd; } p4d_t;
 
index f35f6e8149e47dca34e7cded26574b0786322fc1..b85b8271a73debc1dc58f661ba9073c399fab175 100644 (file)
@@ -8,7 +8,7 @@
 
 struct mm_struct;
 
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
 
 /*
  * Having the pmd type consist of a pud gets the size right, and allows
index e950b9c50f34f218284ff0785366c9ef07a6bdf9..9bef475db6fefe1e3b79c04cff754efd1b383de0 100644 (file)
@@ -9,7 +9,7 @@
 #else
 #include <asm-generic/pgtable-nop4d.h>
 
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
 
 /*
  * Having the pud type consist of a p4d gets the size right, and allows
index 5657a20e0c599449d9851e08f672b2c2cb7f6d64..359fb935ded6ab0cc418659f6f119d26886db129 100644 (file)
@@ -1127,4 +1127,20 @@ static inline bool arch_has_pfn_modify_check(void)
 #endif
 #endif
 
+/*
+ * On some architectures it depends on the mm if the p4d/pud or pmd
+ * layer of the page table hierarchy is folded or not.
+ */
+#ifndef mm_p4d_folded
+#define mm_p4d_folded(mm)      __is_defined(__PAGETABLE_P4D_FOLDED)
+#endif
+
+#ifndef mm_pud_folded
+#define mm_pud_folded(mm)      __is_defined(__PAGETABLE_PUD_FOLDED)
+#endif
+
+#ifndef mm_pmd_folded
+#define mm_pmd_folded(mm)      __is_defined(__PAGETABLE_PMD_FOLDED)
+#endif
+
 #endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/crypto/asym_tpm_subtype.h b/include/crypto/asym_tpm_subtype.h
new file mode 100644 (file)
index 0000000..48198c3
--- /dev/null
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _LINUX_ASYM_TPM_SUBTYPE_H
+#define _LINUX_ASYM_TPM_SUBTYPE_H
+
+#include <linux/keyctl.h>
+
+struct tpm_key {
+       void *blob;
+       u32 blob_len;
+       uint16_t key_len; /* Size in bits of the key */
+       const void *pub_key; /* pointer inside blob to the public key bytes */
+       uint16_t pub_key_len; /* length of the public key */
+};
+
+struct tpm_key *tpm_key_create(const void *blob, uint32_t blob_len);
+
+extern struct asymmetric_key_subtype asym_tpm_subtype;
+
+#endif /* _LINUX_ASYM_TPM_SUBTYPE_H */
index e0b681a717bac93ce3323b1a353fceba8310f811..be626eac911338cc12c5783de8d7181d8cb47949 100644 (file)
@@ -14,6 +14,8 @@
 #ifndef _LINUX_PUBLIC_KEY_H
 #define _LINUX_PUBLIC_KEY_H
 
+#include <linux/keyctl.h>
+
 /*
  * Cryptographic data for the public-key subtype of the asymmetric key type.
  *
@@ -23,6 +25,7 @@
 struct public_key {
        void *key;
        u32 keylen;
+       bool key_is_private;
        const char *id_type;
        const char *pkey_algo;
 };
@@ -40,6 +43,7 @@ struct public_key_signature {
        u8 digest_size;         /* Number of bytes in digest */
        const char *pkey_algo;
        const char *hash_algo;
+       const char *encoding;
 };
 
 extern void public_key_signature_free(struct public_key_signature *sig);
@@ -65,8 +69,14 @@ extern int restrict_link_by_key_or_keyring_chain(struct key *trust_keyring,
                                                 const union key_payload *payload,
                                                 struct key *trusted);
 
-extern int verify_signature(const struct key *key,
-                           const struct public_key_signature *sig);
+extern int query_asymmetric_key(const struct kernel_pkey_params *,
+                               struct kernel_pkey_query *);
+
+extern int encrypt_blob(struct kernel_pkey_params *, const void *, void *);
+extern int decrypt_blob(struct kernel_pkey_params *, const void *, void *);
+extern int create_signature(struct kernel_pkey_params *, const void *, void *);
+extern int verify_signature(const struct key *,
+                           const struct public_key_signature *);
 
 int public_key_verify_signature(const struct public_key *pkey,
                                const struct public_key_signature *sig);
index 91a877fa00cb59161125a711515931e49b040e53..9ccad6b062f2bb62c54434288e7fc9a25b194b53 100644 (file)
@@ -82,6 +82,53 @@ enum drm_connector_status {
        connector_status_unknown = 3,
 };
 
+/**
+ * enum drm_connector_registration_status - userspace registration status for
+ * a &drm_connector
+ *
+ * This enum is used to track the status of initializing a connector and
+ * registering it with userspace, so that DRM can prevent bogus modesets on
+ * connectors that no longer exist.
+ */
+enum drm_connector_registration_state {
+       /**
+        * @DRM_CONNECTOR_INITIALIZING: The connector has just been created,
+        * but has yet to be exposed to userspace. There should be no
+        * additional restrictions to how the state of this connector may be
+        * modified.
+        */
+       DRM_CONNECTOR_INITIALIZING = 0,
+
+       /**
+        * @DRM_CONNECTOR_REGISTERED: The connector has been fully initialized
+        * and registered with sysfs, as such it has been exposed to
+        * userspace. There should be no additional restrictions to how the
+        * state of this connector may be modified.
+        */
+       DRM_CONNECTOR_REGISTERED = 1,
+
+       /**
+        * @DRM_CONNECTOR_UNREGISTERED: The connector has either been exposed
+        * to userspace and has since been unregistered and removed from
+        * userspace, or the connector was unregistered before it had a chance
+        * to be exposed to userspace (e.g. still in the
+        * @DRM_CONNECTOR_INITIALIZING state). When a connector is
+        * unregistered, there are additional restrictions to how its state
+        * may be modified:
+        *
+        * - An unregistered connector may only have its DPMS changed from
+        *   On->Off. Once DPMS is changed to Off, it may not be switched back
+        *   to On.
+        * - Modesets are not allowed on unregistered connectors, unless they
+        *   would result in disabling its assigned CRTCs. This means
+        *   disabling a CRTC on an unregistered connector is OK, but enabling
+        *   one is not.
+        * - Removing a CRTC from an unregistered connector is OK, but new
+        *   CRTCs may never be assigned to an unregistered connector.
+        */
+       DRM_CONNECTOR_UNREGISTERED = 2,
+};
+
 enum subpixel_order {
        SubPixelUnknown = 0,
        SubPixelHorizontalRGB,
@@ -853,10 +900,12 @@ struct drm_connector {
        bool ycbcr_420_allowed;
 
        /**
-        * @registered: Is this connector exposed (registered) with userspace?
+        * @registration_state: Is this connector initializing, exposed
+        * (registered) with userspace, or unregistered?
+        *
         * Protected by @mutex.
         */
-       bool registered;
+       enum drm_connector_registration_state registration_state;
 
        /**
         * @modes:
@@ -1166,6 +1215,24 @@ static inline void drm_connector_unreference(struct drm_connector *connector)
        drm_connector_put(connector);
 }
 
+/**
+ * drm_connector_is_unregistered - has the connector been unregistered from
+ * userspace?
+ * @connector: DRM connector
+ *
+ * Checks whether or not @connector has been unregistered from userspace.
+ *
+ * Returns:
+ * True if the connector was unregistered, false if the connector is
+ * registered or has not yet been registered with userspace.
+ */
+static inline bool
+drm_connector_is_unregistered(struct drm_connector *connector)
+{
+       return READ_ONCE(connector->registration_state) ==
+               DRM_CONNECTOR_UNREGISTERED;
+}
+
 const char *drm_get_connector_status_name(enum drm_connector_status status);
 const char *drm_get_subpixel_order_name(enum subpixel_order order);
 const char *drm_get_dpms_name(int val);
index e0a9c236887281d793acff2ea457d2290c56cb4e..9ce2f0fae57e39da7dd7471248402a8b1f2a2a35 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/seq_file.h>
 #include <keys/asymmetric-type.h>
 
+struct kernel_pkey_query;
+struct kernel_pkey_params;
 struct public_key_signature;
 
 /*
@@ -34,6 +36,13 @@ struct asymmetric_key_subtype {
        /* Destroy a key of this subtype */
        void (*destroy)(void *payload_crypto, void *payload_auth);
 
+       int (*query)(const struct kernel_pkey_params *params,
+                    struct kernel_pkey_query *info);
+
+       /* Encrypt/decrypt/sign data */
+       int (*eds_op)(struct kernel_pkey_params *params,
+                     const void *in, void *out);
+
        /* Verify the signature on a key of this subtype (optional) */
        int (*verify_signature)(const struct key *key,
                                const struct public_key_signature *sig);
diff --git a/include/keys/trusted.h b/include/keys/trusted.h
new file mode 100644 (file)
index 0000000..adbcb68
--- /dev/null
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __TRUSTED_KEY_H
+#define __TRUSTED_KEY_H
+
+/* implementation specific TPM constants */
+#define MAX_BUF_SIZE                   1024
+#define TPM_GETRANDOM_SIZE             14
+#define TPM_OSAP_SIZE                  36
+#define TPM_OIAP_SIZE                  10
+#define TPM_SEAL_SIZE                  87
+#define TPM_UNSEAL_SIZE                        104
+#define TPM_SIZE_OFFSET                        2
+#define TPM_RETURN_OFFSET              6
+#define TPM_DATA_OFFSET                        10
+
+#define LOAD32(buffer, offset) (ntohl(*(uint32_t *)&buffer[offset]))
+#define LOAD32N(buffer, offset)        (*(uint32_t *)&buffer[offset])
+#define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset]))
+
+struct tpm_buf {
+       int len;
+       unsigned char data[MAX_BUF_SIZE];
+};
+
+#define INIT_BUF(tb) (tb->len = 0)
+
+struct osapsess {
+       uint32_t handle;
+       unsigned char secret[SHA1_DIGEST_SIZE];
+       unsigned char enonce[TPM_NONCE_SIZE];
+};
+
+/* discrete values, but have to store in uint16_t for TPM use */
+enum {
+       SEAL_keytype = 1,
+       SRK_keytype = 4
+};
+
+int TSS_authhmac(unsigned char *digest, const unsigned char *key,
+                       unsigned int keylen, unsigned char *h1,
+                       unsigned char *h2, unsigned char h3, ...);
+int TSS_checkhmac1(unsigned char *buffer,
+                         const uint32_t command,
+                         const unsigned char *ononce,
+                         const unsigned char *key,
+                         unsigned int keylen, ...);
+
+int trusted_tpm_send(unsigned char *cmd, size_t buflen);
+int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce);
+
+#define TPM_DEBUG 0
+
+#if TPM_DEBUG
+static inline void dump_options(struct trusted_key_options *o)
+{
+       pr_info("trusted_key: sealing key type %d\n", o->keytype);
+       pr_info("trusted_key: sealing key handle %0X\n", o->keyhandle);
+       pr_info("trusted_key: pcrlock %d\n", o->pcrlock);
+       pr_info("trusted_key: pcrinfo %d\n", o->pcrinfo_len);
+       print_hex_dump(KERN_INFO, "pcrinfo ", DUMP_PREFIX_NONE,
+                      16, 1, o->pcrinfo, o->pcrinfo_len, 0);
+}
+
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+       pr_info("trusted_key: key_len %d\n", p->key_len);
+       print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE,
+                      16, 1, p->key, p->key_len, 0);
+       pr_info("trusted_key: bloblen %d\n", p->blob_len);
+       print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE,
+                      16, 1, p->blob, p->blob_len, 0);
+       pr_info("trusted_key: migratable %d\n", p->migratable);
+}
+
+static inline void dump_sess(struct osapsess *s)
+{
+       print_hex_dump(KERN_INFO, "trusted-key: handle ", DUMP_PREFIX_NONE,
+                      16, 1, &s->handle, 4, 0);
+       pr_info("trusted-key: secret:\n");
+       print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
+                      16, 1, &s->secret, SHA1_DIGEST_SIZE, 0);
+       pr_info("trusted-key: enonce:\n");
+       print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
+                      16, 1, &s->enonce, SHA1_DIGEST_SIZE, 0);
+}
+
+static inline void dump_tpm_buf(unsigned char *buf)
+{
+       int len;
+
+       pr_info("\ntrusted-key: tpm buffer\n");
+       len = LOAD32(buf, TPM_SIZE_OFFSET);
+       print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, buf, len, 0);
+}
+#else
+static inline void dump_options(struct trusted_key_options *o)
+{
+}
+
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+}
+
+static inline void dump_sess(struct osapsess *s)
+{
+}
+
+static inline void dump_tpm_buf(unsigned char *buf)
+{
+}
+#endif
+
+static inline void store8(struct tpm_buf *buf, const unsigned char value)
+{
+       buf->data[buf->len++] = value;
+}
+
+static inline void store16(struct tpm_buf *buf, const uint16_t value)
+{
+       *(uint16_t *) & buf->data[buf->len] = htons(value);
+       buf->len += sizeof value;
+}
+
+static inline void store32(struct tpm_buf *buf, const uint32_t value)
+{
+       *(uint32_t *) & buf->data[buf->len] = htonl(value);
+       buf->len += sizeof value;
+}
+
+static inline void storebytes(struct tpm_buf *buf, const unsigned char *in,
+                             const int len)
+{
+       memcpy(buf->data + buf->len, in, len);
+       buf->len += len;
+}
+#endif
index 2a629acb4c3f467221c8db81ded322780f31b99a..2d29f55923e3ad2e79c70cd0832a8c8523f96ef8 100644 (file)
@@ -7,7 +7,12 @@
 #ifndef _LINUX_ADXL_H
 #define _LINUX_ADXL_H
 
+#ifdef CONFIG_ACPI_ADXL
 const char * const *adxl_get_component_names(void);
 int adxl_decode(u64 addr, u64 component_values[]);
+#else
+static inline const char * const *adxl_get_component_names(void)  { return NULL; }
+static inline int adxl_decode(u64 addr, u64 component_values[])   { return  -EOPNOTSUPP; }
+#endif
 
 #endif /* _LINUX_ADXL_H */
index 2c9756bd9c4cdc4b07ac9e8f6158480b0e58d88f..b2488055fd1d18a5e2986fe9fb27ec4b4bf572df 100644 (file)
 /* Error Codes */
 enum virtchnl_status_code {
        VIRTCHNL_STATUS_SUCCESS                         = 0,
-       VIRTCHNL_ERR_PARAM                              = -5,
+       VIRTCHNL_STATUS_ERR_PARAM                       = -5,
+       VIRTCHNL_STATUS_ERR_NO_MEMORY                   = -18,
        VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH             = -38,
        VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR             = -39,
        VIRTCHNL_STATUS_ERR_INVALID_VF_ID               = -40,
-       VIRTCHNL_STATUS_NOT_SUPPORTED                   = -64,
+       VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR           = -53,
+       VIRTCHNL_STATUS_ERR_NOT_SUPPORTED               = -64,
 };
 
+/* Backward compatibility */
+#define VIRTCHNL_ERR_PARAM VIRTCHNL_STATUS_ERR_PARAM
+#define VIRTCHNL_STATUS_NOT_SUPPORTED VIRTCHNL_STATUS_ERR_NOT_SUPPORTED
+
 #define VIRTCHNL_LINK_SPEED_100MB_SHIFT                0x1
 #define VIRTCHNL_LINK_SPEED_1000MB_SHIFT       0x2
 #define VIRTCHNL_LINK_SPEED_10GB_SHIFT         0x3
@@ -831,7 +837,7 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode,
        case VIRTCHNL_OP_EVENT:
        case VIRTCHNL_OP_UNKNOWN:
        default:
-               return VIRTCHNL_ERR_PARAM;
+               return VIRTCHNL_STATUS_ERR_PARAM;
        }
        /* few more checks */
        if (err_msg_format || valid_len != msglen)
index b47c7f716731fc5ebcdf28f03db3b5eadada59e0..056fb627edb3e82779d0d4f92b27be43a423ce15 100644 (file)
@@ -503,31 +503,23 @@ do {                                              \
        disk_devt((bio)->bi_disk)
 
 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-int bio_associate_blkg_from_page(struct bio *bio, struct page *page);
+int bio_associate_blkcg_from_page(struct bio *bio, struct page *page);
 #else
-static inline int bio_associate_blkg_from_page(struct bio *bio,
-                                              struct page *page) { return 0; }
+static inline int bio_associate_blkcg_from_page(struct bio *bio,
+                                               struct page *page) {  return 0; }
 #endif
 
 #ifdef CONFIG_BLK_CGROUP
+int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
 int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
-int bio_associate_blkg_from_css(struct bio *bio,
-                               struct cgroup_subsys_state *css);
-int bio_associate_create_blkg(struct request_queue *q, struct bio *bio);
-int bio_reassociate_blkg(struct request_queue *q, struct bio *bio);
 void bio_disassociate_task(struct bio *bio);
-void bio_clone_blkg_association(struct bio *dst, struct bio *src);
+void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
 #else  /* CONFIG_BLK_CGROUP */
-static inline int bio_associate_blkg_from_css(struct bio *bio,
-                                             struct cgroup_subsys_state *css)
-{ return 0; }
-static inline int bio_associate_create_blkg(struct request_queue *q,
-                                           struct bio *bio) { return 0; }
-static inline int bio_reassociate_blkg(struct request_queue *q, struct bio *bio)
-{ return 0; }
+static inline int bio_associate_blkcg(struct bio *bio,
+                       struct cgroup_subsys_state *blkcg_css) { return 0; }
 static inline void bio_disassociate_task(struct bio *bio) { }
-static inline void bio_clone_blkg_association(struct bio *dst,
-                                             struct bio *src) { }
+static inline void bio_clone_blkcg_association(struct bio *dst,
+                       struct bio *src) { }
 #endif /* CONFIG_BLK_CGROUP */
 
 #ifdef CONFIG_HIGHMEM
index 1e76ceebeb5dc58c7f98e9f1d18d65fbe62477ef..6d766a19f2bbb2b62facc79ff3871aa81be68534 100644 (file)
@@ -126,7 +126,7 @@ struct blkcg_gq {
        struct request_list             rl;
 
        /* reference count */
-       struct percpu_ref               refcnt;
+       atomic_t                        refcnt;
 
        /* is this blkg online? protected by both blkcg and q locks */
        bool                            online;
@@ -184,8 +184,6 @@ extern struct cgroup_subsys_state * const blkcg_root_css;
 
 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
                                      struct request_queue *q, bool update_hint);
-struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
-                                     struct request_queue *q);
 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
                                    struct request_queue *q);
 int blkcg_init_queue(struct request_queue *q);
@@ -232,59 +230,22 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
                   char *input, struct blkg_conf_ctx *ctx);
 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
 
-/**
- * blkcg_css - find the current css
- *
- * Find the css associated with either the kthread or the current task.
- * This may return a dying css, so it is up to the caller to use tryget logic
- * to confirm it is alive and well.
- */
-static inline struct cgroup_subsys_state *blkcg_css(void)
-{
-       struct cgroup_subsys_state *css;
-
-       css = kthread_blkcg();
-       if (css)
-               return css;
-       return task_css(current, io_cgrp_id);
-}
 
 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
 {
        return css ? container_of(css, struct blkcg, css) : NULL;
 }
 
-/**
- * __bio_blkcg - internal version of bio_blkcg for bfq and cfq
- *
- * DO NOT USE.
- * There is a flaw using this version of the function.  In particular, this was
- * used in a broken paradigm where association was called on the given css.  It
- * is possible though that the returned css from task_css() is in the process
- * of dying due to migration of the current task.  So it is improper to assume
- * *_get() is going to succeed.  Both BFQ and CFQ rely on this logic and will
- * take additional work to handle more gracefully.
- */
-static inline struct blkcg *__bio_blkcg(struct bio *bio)
-{
-       if (bio && bio->bi_blkg)
-               return bio->bi_blkg->blkcg;
-       return css_to_blkcg(blkcg_css());
-}
-
-/**
- * bio_blkcg - grab the blkcg associated with a bio
- * @bio: target bio
- *
- * This returns the blkcg associated with a bio, NULL if not associated.
- * Callers are expected to either handle NULL or know association has been
- * done prior to calling this.
- */
 static inline struct blkcg *bio_blkcg(struct bio *bio)
 {
-       if (bio && bio->bi_blkg)
-               return bio->bi_blkg->blkcg;
-       return NULL;
+       struct cgroup_subsys_state *css;
+
+       if (bio && bio->bi_css)
+               return css_to_blkcg(bio->bi_css);
+       css = kthread_blkcg();
+       if (css)
+               return css_to_blkcg(css);
+       return css_to_blkcg(task_css(current, io_cgrp_id));
 }
 
 static inline bool blk_cgroup_congested(void)
@@ -490,35 +451,26 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
  */
 static inline void blkg_get(struct blkcg_gq *blkg)
 {
-       percpu_ref_get(&blkg->refcnt);
+       WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+       atomic_inc(&blkg->refcnt);
 }
 
 /**
- * blkg_tryget - try and get a blkg reference
+ * blkg_try_get - try and get a blkg reference
  * @blkg: blkg to get
  *
  * This is for use when doing an RCU lookup of the blkg.  We may be in the midst
  * of freeing this blkg, so we can only use it if the refcnt is not zero.
  */
-static inline bool blkg_tryget(struct blkcg_gq *blkg)
+static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
 {
-       return percpu_ref_tryget(&blkg->refcnt);
+       if (atomic_inc_not_zero(&blkg->refcnt))
+               return blkg;
+       return NULL;
 }
 
-/**
- * blkg_tryget_closest - try and get a blkg ref on the closet blkg
- * @blkg: blkg to get
- *
- * This walks up the blkg tree to find the closest non-dying blkg and returns
- * the blkg that it did association with as it may not be the passed in blkg.
- */
-static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
-{
-       while (!percpu_ref_tryget(&blkg->refcnt))
-               blkg = blkg->parent;
 
-       return blkg;
-}
+void __blkg_release_rcu(struct rcu_head *rcu);
 
 /**
  * blkg_put - put a blkg reference
@@ -526,7 +478,9 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
  */
 static inline void blkg_put(struct blkcg_gq *blkg)
 {
-       percpu_ref_put(&blkg->refcnt);
+       WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+       if (atomic_dec_and_test(&blkg->refcnt))
+               call_rcu(&blkg->rcu_head, __blkg_release_rcu);
 }
 
 /**
@@ -579,36 +533,25 @@ static inline struct request_list *blk_get_rl(struct request_queue *q,
 
        rcu_read_lock();
 
-       if (bio && bio->bi_blkg) {
-               blkcg = bio->bi_blkg->blkcg;
-               if (blkcg == &blkcg_root)
-                       goto rl_use_root;
-
-               blkg_get(bio->bi_blkg);
-               rcu_read_unlock();
-               return &bio->bi_blkg->rl;
-       }
+       blkcg = bio_blkcg(bio);
 
-       blkcg = css_to_blkcg(blkcg_css());
+       /* bypass blkg lookup and use @q->root_rl directly for root */
        if (blkcg == &blkcg_root)
-               goto rl_use_root;
+               goto root_rl;
 
+       /*
+        * Try to use blkg->rl.  blkg lookup may fail under memory pressure
+        * or if either the blkcg or queue is going away.  Fall back to
+        * root_rl in such cases.
+        */
        blkg = blkg_lookup(blkcg, q);
        if (unlikely(!blkg))
-               blkg = __blkg_lookup_create(blkcg, q);
-
-       if (blkg->blkcg == &blkcg_root || !blkg_tryget(blkg))
-               goto rl_use_root;
+               goto root_rl;
 
+       blkg_get(blkg);
        rcu_read_unlock();
        return &blkg->rl;
-
-       /*
-        * Each blkg has its own request_list, however, the root blkcg
-        * uses the request_queue's root_rl.  This is to avoid most
-        * overhead for the root blkcg.
-        */
-rl_use_root:
+root_rl:
        rcu_read_unlock();
        return &q->root_rl;
 }
@@ -854,26 +797,32 @@ static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg
                                  struct bio *bio) { return false; }
 #endif
 
-
-static inline void blkcg_bio_issue_init(struct bio *bio)
-{
-       bio_issue_init(&bio->bi_issue, bio_sectors(bio));
-}
-
 static inline bool blkcg_bio_issue_check(struct request_queue *q,
                                         struct bio *bio)
 {
+       struct blkcg *blkcg;
        struct blkcg_gq *blkg;
        bool throtl = false;
 
        rcu_read_lock();
+       blkcg = bio_blkcg(bio);
+
+       /* associate blkcg if bio hasn't attached one */
+       bio_associate_blkcg(bio, &blkcg->css);
 
-       bio_associate_create_blkg(q, bio);
-       blkg = bio->bi_blkg;
+       blkg = blkg_lookup(blkcg, q);
+       if (unlikely(!blkg)) {
+               spin_lock_irq(q->queue_lock);
+               blkg = blkg_lookup_create(blkcg, q);
+               if (IS_ERR(blkg))
+                       blkg = NULL;
+               spin_unlock_irq(q->queue_lock);
+       }
 
        throtl = blk_throtl_bio(q, blkg, bio);
 
        if (!throtl) {
+               blkg = blkg ?: q->root_blkg;
                /*
                 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
                 * is a split bio and we would have already accounted for the
@@ -885,8 +834,6 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
                blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
        }
 
-       blkcg_bio_issue_init(bio);
-
        rcu_read_unlock();
        return !throtl;
 }
@@ -983,7 +930,6 @@ static inline int blkcg_activate_policy(struct request_queue *q,
 static inline void blkcg_deactivate_policy(struct request_queue *q,
                                           const struct blkcg_policy *pol) { }
 
-static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
 
 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
@@ -999,7 +945,6 @@ static inline void blk_put_rl(struct request_list *rl) { }
 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
 
-static inline void blkcg_bio_issue_init(struct bio *bio) { }
 static inline bool blkcg_bio_issue_check(struct request_queue *q,
                                         struct bio *bio) { return true; }
 
index 093a818c5b684754dbe8877721389947d74674ac..1dcf652ba0aa3e989a7b93e78e4303aa920a1dab 100644 (file)
@@ -178,6 +178,7 @@ struct bio {
         * release.  Read comment on top of bio_associate_current().
         */
        struct io_context       *bi_ioc;
+       struct cgroup_subsys_state *bi_css;
        struct blkcg_gq         *bi_blkg;
        struct bio_issue        bi_issue;
 #endif
index 9e8056ec20faab8c3ffd4e99843a166ace95a0a1..d93e89761a8b429c2b5568688b7bf388e5b977d7 100644 (file)
@@ -51,6 +51,9 @@ struct bpf_reg_state {
                 *   PTR_TO_MAP_VALUE_OR_NULL
                 */
                struct bpf_map *map_ptr;
+
+               /* Max size from any of the above. */
+               unsigned long raw;
        };
        /* Fixed part of pointer offset, pointer types only */
        s32 off;
index a83e1f632eb70eeda82581403f2916e7c7346f79..f01623aef2f77945514d4b2ef0045f0735e11851 100644 (file)
@@ -169,6 +169,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
 
 void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
                      unsigned int idx);
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr);
 unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
 void can_free_echo_skb(struct net_device *dev, unsigned int idx);
 
index cb31683bbe154ab1f5b58319369aa7fe32da1c44..8268811a697e25398b6785b6d8904fc9dca46adf 100644 (file)
@@ -41,7 +41,12 @@ int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *
 int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight);
 int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 reg);
 int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
-int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb);
+int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
+                               struct sk_buff *skb, u32 timestamp);
+unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
+                                        unsigned int idx, u32 timestamp);
+int can_rx_offload_queue_tail(struct can_rx_offload *offload,
+                             struct sk_buff *skb);
 void can_rx_offload_reset(struct can_rx_offload *offload);
 void can_rx_offload_del(struct can_rx_offload *offload);
 void can_rx_offload_enable(struct can_rx_offload *offload);
index 6b92b3395fa9954ec140df3ed5b2b6d7b4f527f1..65a38c4a02a18d59ff5837447264f7f00998e4fb 100644 (file)
@@ -213,12 +213,6 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
         CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
         CEPH_FEATURE_CEPHX_V2)
 
-#define CEPH_FEATURES_REQUIRED_DEFAULT   \
-       (CEPH_FEATURE_NOSRCADDR |        \
-        CEPH_FEATURE_SUBSCRIBE2 |       \
-        CEPH_FEATURE_RECONNECT_SEQ |    \
-        CEPH_FEATURE_PGID64 |           \
-        CEPH_FEATURE_PGPOOL3 |          \
-        CEPH_FEATURE_OSDENC)
+#define CEPH_FEATURES_REQUIRED_DEFAULT 0
 
 #endif
index 9968332cceed0e64e5fc9bdb814507b0bf67451b..9d12757a65b01846486341c5d31d398ee51d89b4 100644 (file)
@@ -93,8 +93,6 @@ extern struct css_set init_css_set;
 
 bool css_has_online_children(struct cgroup_subsys_state *css);
 struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
-struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
-                                        struct cgroup_subsys *ss);
 struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
                                             struct cgroup_subsys *ss);
 struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
index 06e77473f17593dc1fac4ad01af652776735b6ef..88720b443cd646f588fa183d60a0f5c549fc030e 100644 (file)
@@ -1032,9 +1032,9 @@ int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
 #else /* !CONFIG_COMPAT */
 
 #define is_compat_task() (0)
-#ifndef in_compat_syscall
+/* Ensure no one redefines in_compat_syscall() under !CONFIG_COMPAT */
+#define in_compat_syscall in_compat_syscall
 static inline bool in_compat_syscall(void) { return false; }
-#endif
 
 #endif /* CONFIG_COMPAT */
 
index b1ce500fe8b3df06a8fca84d6f022b02813000f8..3e7dafb3ea8099285d4185df113f2c89a0426e06 100644 (file)
@@ -21,8 +21,6 @@
 #define __SANITIZE_ADDRESS__
 #endif
 
-#define __no_sanitize_address __attribute__((no_sanitize("address")))
-
 /*
  * Not all versions of clang implement the the type-generic versions
  * of the builtin overflow checkers. Fortunately, clang implements
@@ -41,6 +39,3 @@
  * compilers, like ICC.
  */
 #define barrier() __asm__ __volatile__("" : : : "memory")
-#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
-#define __assume_aligned(a, ...)       \
-       __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
index 90ddfefb6c2b12886acba122a6d2e79e99951f82..2010493e1040846c999804e2e157233c27ccef60 100644 (file)
  */
 #define uninitialized_var(x) x = x
 
-#ifdef __CHECKER__
-#define __must_be_array(a)     0
-#else
-/* &a[0] degrades to a pointer: a different type from an array */
-#define __must_be_array(a)     BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
-#endif
-
 #ifdef RETPOLINE
-#define __noretpoline __attribute__((indirect_branch("keep")))
+#define __noretpoline __attribute__((__indirect_branch__("keep")))
 #endif
 
 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
 
-#define __optimize(level)      __attribute__((__optimize__(level)))
-
 #define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
 
-#ifndef __CHECKER__
-#define __compiletime_warning(message) __attribute__((warning(message)))
-#define __compiletime_error(message) __attribute__((error(message)))
+#define __compiletime_warning(message) __attribute__((__warning__(message)))
+#define __compiletime_error(message) __attribute__((__error__(message)))
 
-#ifdef LATENT_ENTROPY_PLUGIN
+#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
 #define __latent_entropy __attribute__((latent_entropy))
 #endif
-#endif /* __CHECKER__ */
 
 /*
  * calling noreturn functions, __builtin_unreachable() and __builtin_trap()
  * Mark a position in code as unreachable.  This can be used to
  * suppress control flow warnings after asm blocks that transfer
  * control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased.  Really, we need to have autoconf for the kernel.
  */
 #define unreachable() \
        do {                                    \
                __builtin_unreachable();        \
        } while (0)
 
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone      __attribute__((__noclone__, __optimize__("no-tracer")))
-
 #if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
 #define __randomize_layout __attribute__((randomize_layout))
 #define __no_randomize_layout __attribute__((no_randomize_layout))
 #define randomized_struct_fields_end   } __randomize_layout;
 #endif
 
-/*
- * When used with Link Time Optimization, gcc can optimize away C functions or
- * variables which are referenced only from assembly code.  __visible tells the
- * optimizer that something else uses this function or variable, thus preventing
- * this.
- */
-#define __visible      __attribute__((externally_visible))
-
-/* gcc version specific checks */
-
-#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
-/*
- * __assume_aligned(n, k): Tell the optimizer that the returned
- * pointer can be assumed to be k modulo n. The second argument is
- * optional (default 0), so we use a variadic macro to make the
- * shorthand.
- *
- * Beware: Do not apply this to functions which may return
- * ERR_PTRs. Also, it is probably unwise to apply it to functions
- * returning extra information in the low bits (but in that case the
- * compiler should see some alignment anyway, when the return value is
- * massaged by 'flags = ptr & 3; ptr &= ~3;').
- */
-#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
-#endif
-
 /*
  * GCC 'asm goto' miscompiles certain code sequences:
  *
 #define KASAN_ABI_VERSION 3
 #endif
 
-#if GCC_VERSION >= 40902
-/*
- * Tell the compiler that address safety instrumentation (KASAN)
- * should not be applied to that function.
- * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- */
-#define __no_sanitize_address __attribute__((no_sanitize_address))
-#ifdef CONFIG_KASAN
-#define __no_sanitize_address_or_inline                                        \
-       __no_sanitize_address __maybe_unused notrace
-#else
-#define __no_sanitize_address_or_inline inline
-#endif
-#endif
-
 #if GCC_VERSION >= 50100
-/*
- * Mark structures as requiring designated initializers.
- * https://gcc.gnu.org/onlinedocs/gcc/Designated-Inits.html
- */
-#define __designated_init __attribute__((designated_init))
 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
 #endif
 
-#if !defined(__noclone)
-#define __noclone      /* not needed */
-#endif
-
-#if !defined(__no_sanitize_address)
-#define __no_sanitize_address
-#define __no_sanitize_address_or_inline inline
-#endif
-
 /*
  * Turn individual warnings and errors on and off locally, depending
  * on version.
index 4c7f9befa9f6c66694c8146aafe4139614733d18..517bd14e122248f029d27e6dd192b492fe3be93b 100644 (file)
  */
 #define OPTIMIZER_HIDE_VAR(var) barrier()
 
-/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
-#define __must_be_array(a) 0
-
 #endif
 
 /* icc has this, but it's called _bswap16 */
 #define __HAVE_BUILTIN_BSWAP16__
 #define __builtin_bswap16 _bswap16
-
-/* The following are for compatibility with GCC, from compiler-gcc.h,
- * and may be redefined here because they should not be shared with other
- * compilers, like clang.
- */
-#define __visible      __attribute__((externally_visible))
index 4170fcee5adb30780dae6c06a5b34bcd57855d02..06396c1cf127f75bb357326883f1dcb69161ccf1 100644 (file)
@@ -23,8 +23,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 #define __branch_check__(x, expect, is_constant) ({                    \
                        long ______r;                                   \
                        static struct ftrace_likely_data                \
-                               __attribute__((__aligned__(4)))         \
-                               __attribute__((section("_ftrace_annotated_branch"))) \
+                               __aligned(4)                            \
+                               __section("_ftrace_annotated_branch")   \
                                ______f = {                             \
                                .data.func = __func__,                  \
                                .data.file = __FILE__,                  \
@@ -59,8 +59,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
        ({                                                              \
                int ______r;                                            \
                static struct ftrace_branch_data                        \
-                       __attribute__((__aligned__(4)))                 \
-                       __attribute__((section("_ftrace_branch")))      \
+                       __aligned(4)                                    \
+                       __section("_ftrace_branch")                     \
                        ______f = {                                     \
                                .func = __func__,                       \
                                .file = __FILE__,                       \
@@ -115,7 +115,10 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 # define ASM_UNREACHABLE
 #endif
 #ifndef unreachable
-# define unreachable() do { annotate_reachable(); do { } while (1); } while (0)
+# define unreachable() do {            \
+       annotate_unreachable();         \
+       __builtin_unreachable();        \
+} while (0)
 #endif
 
 /*
@@ -137,7 +140,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
        extern typeof(sym) sym;                                 \
        static const unsigned long __kentry_##sym               \
        __used                                                  \
-       __attribute__((section("___kentry" "+" #sym ), used))   \
+       __section("___kentry" "+" #sym )                        \
        = (unsigned long)&sym;
 #endif
 
@@ -186,7 +189,7 @@ void __read_once_size(const volatile void *p, void *res, int size)
  *     https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
  * '__maybe_unused' allows us to avoid defined-but-not-used warnings.
  */
-# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
 #else
 # define __no_kasan_or_inline __always_inline
 #endif
@@ -278,7 +281,7 @@ unsigned long read_word_at_a_time(const void *addr)
  * visible to the compiler.
  */
 #define __ADDRESSABLE(sym) \
-       static void * __attribute__((section(".discard.addressable"), used)) \
+       static void * __section(".discard.addressable") __used \
                __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
 
 /**
@@ -331,10 +334,6 @@ static inline void *offset_to_ptr(const int *off)
 #endif /* __KERNEL__ */
 #endif /* __ASSEMBLY__ */
 
-#ifndef __optimize
-# define __optimize(level)
-#endif
-
 /* Compile time object size, -1 for unknown */
 #ifndef __compiletime_object_size
 # define __compiletime_object_size(obj) -1
@@ -376,4 +375,7 @@ static inline void *offset_to_ptr(const int *off)
        compiletime_assert(__native_word(t),                            \
                "Need native word sized stores/loads for atomicity.")
 
+/* &a[0] degrades to a pointer: a different type from an array */
+#define __must_be_array(a)     BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+
 #endif /* __LINUX_COMPILER_H */
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h
new file mode 100644 (file)
index 0000000..f8c400b
--- /dev/null
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COMPILER_ATTRIBUTES_H
+#define __LINUX_COMPILER_ATTRIBUTES_H
+
+/*
+ * The attributes in this file are unconditionally defined and they directly
+ * map to compiler attribute(s), unless one of the compilers does not support
+ * the attribute. In that case, __has_attribute is used to check for support
+ * and the reason is stated in its comment ("Optional: ...").
+ *
+ * Any other "attributes" (i.e. those that depend on a configuration option,
+ * on a compiler, on an architecture, on plugins, on other attributes...)
+ * should be defined elsewhere (e.g. compiler_types.h or compiler-*.h).
+ * The intention is to keep this file as simple as possible, as well as
+ * compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks).
+ *
+ * This file is meant to be sorted (by actual attribute name,
+ * not by #define identifier). Use the __attribute__((__name__)) syntax
+ * (i.e. with underscores) to avoid future collisions with other macros.
+ * Provide links to the documentation of each supported compiler, if it exists.
+ */
+
+/*
+ * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
+ * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
+ * by hand.
+ *
+ * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
+ * depending on the compiler used to build it; however, these attributes have
+ * no semantic effects for sparse, so it does not matter. Also note that,
+ * in order to avoid sparse's warnings, even the unsupported ones must be
+ * defined to 0.
+ */
+#ifndef __has_attribute
+# define __has_attribute(x) __GCC4_has_attribute_##x
+# define __GCC4_has_attribute___assume_aligned__      (__GNUC_MINOR__ >= 9)
+# define __GCC4_has_attribute___designated_init__     0
+# define __GCC4_has_attribute___externally_visible__  1
+# define __GCC4_has_attribute___noclone__             1
+# define __GCC4_has_attribute___optimize__            1
+# define __GCC4_has_attribute___nonstring__           0
+# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
+#endif
+
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute
+ */
+#define __alias(symbol)                 __attribute__((__alias__(#symbol)))
+
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-aligned-function-attribute
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-aligned-type-attribute
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-aligned-variable-attribute
+ */
+#define __aligned(x)                    __attribute__((__aligned__(x)))
+#define __aligned_largest               __attribute__((__aligned__))
+
+/*
+ * Note: users of __always_inline currently do not write "inline" themselves,
+ * which seems to be required by gcc to apply the attribute according
+ * to its docs (and also "warning: always_inline function might not be
+ * inlinable [-Wattributes]" is emitted).
+ *
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-always_005finline-function-attribute
+ * clang: mentioned
+ */
+#define __always_inline                 inline __attribute__((__always_inline__))
+
+/*
+ * The second argument is optional (default 0), so we use a variadic macro
+ * to make the shorthand.
+ *
+ * Beware: Do not apply this to functions which may return
+ * ERR_PTRs. Also, it is probably unwise to apply it to functions
+ * returning extra information in the low bits (but in that case the
+ * compiler should see some alignment anyway, when the return value is
+ * massaged by 'flags = ptr & 3; ptr &= ~3;').
+ *
+ * Optional: only supported since gcc >= 4.9
+ * Optional: not supported by icc
+ *
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#assume-aligned
+ */
+#if __has_attribute(__assume_aligned__)
+# define __assume_aligned(a, ...)       __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
+#else
+# define __assume_aligned(a, ...)
+#endif
+
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute
+ */
+#define __cold                          __attribute__((__cold__))
+
+/*
+ * Note the long name.
+ *
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-const-function-attribute
+ */
+#define __attribute_const__             __attribute__((__const__))
+
+/*
+ * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated'
+ * attribute warnings entirely and for good") for more information.
+ *
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-deprecated-function-attribute
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-deprecated-type-attribute
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-deprecated-variable-attribute
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Enumerator-Attributes.html#index-deprecated-enumerator-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#deprecated
+ */
+#define __deprecated
+
+/*
+ * Optional: only supported since gcc >= 5.1
+ * Optional: not supported by clang
+ * Optional: not supported by icc
+ *
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-designated_005finit-type-attribute
+ */
+#if __has_attribute(__designated_init__)
+# define __designated_init              __attribute__((__designated_init__))
+#else
+# define __designated_init
+#endif
+
+/*
+ * Optional: not supported by clang
+ *
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-externally_005fvisible-function-attribute
+ */
+#if __has_attribute(__externally_visible__)
+# define __visible                      __attribute__((__externally_visible__))
+#else
+# define __visible
+#endif
+
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-format-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#format
+ */
+#define __printf(a, b)                  __attribute__((__format__(printf, a, b)))
+#define __scanf(a, b)                   __attribute__((__format__(scanf, a, b)))
+
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-gnu_005finline-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#gnu-inline
+ */
+#define __gnu_inline                    __attribute__((__gnu_inline__))
+
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-malloc-function-attribute
+ */
+#define __malloc                        __attribute__((__malloc__))
+
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-mode-type-attribute
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-mode-variable-attribute
+ */
+#define __mode(x)                       __attribute__((__mode__(x)))
+
+/*
+ * Optional: not supported by clang
+ * Note: icc does not recognize gcc's no-tracer
+ *
+ *  gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute
+ *  gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-optimize-function-attribute
+ */
+#if __has_attribute(__noclone__)
+# if __has_attribute(__optimize__)
+#  define __noclone                     __attribute__((__noclone__, __optimize__("no-tracer")))
+# else
+#  define __noclone                     __attribute__((__noclone__))
+# endif
+#else
+# define __noclone
+#endif
+
+/*
+ * Note the missing underscores.
+ *
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noinline-function-attribute
+ * clang: mentioned
+ */
+#define   noinline                      __attribute__((__noinline__))
+
+/*
+ * Optional: only supported since gcc >= 8
+ * Optional: not supported by clang
+ * Optional: not supported by icc
+ *
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-nonstring-variable-attribute
+ */
+#if __has_attribute(__nonstring__)
+# define __nonstring                    __attribute__((__nonstring__))
+#else
+# define __nonstring
+#endif
+
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noreturn-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#noreturn
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#id1
+ */
+#define __noreturn                      __attribute__((__noreturn__))
+
+/*
+ * Optional: only supported since gcc >= 4.8
+ * Optional: not supported by icc
+ *
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-no_005fsanitize_005faddress-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#no-sanitize-address-no-address-safety-analysis
+ */
+#if __has_attribute(__no_sanitize_address__)
+# define __no_sanitize_address          __attribute__((__no_sanitize_address__))
+#else
+# define __no_sanitize_address
+#endif
+
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute
+ * clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute
+ */
+#define __packed                        __attribute__((__packed__))
+
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-pure-function-attribute
+ */
+#define __pure                          __attribute__((__pure__))
+
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-section-function-attribute
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-section-variable-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#section-declspec-allocate
+ */
+#define __section(S)                    __attribute__((__section__(#S)))
+
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-unused-type-attribute
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-unused-variable-attribute
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-unused-label-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#maybe-unused-unused
+ */
+#define __always_unused                 __attribute__((__unused__))
+#define __maybe_unused                  __attribute__((__unused__))
+
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-used-function-attribute
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-used-variable-attribute
+ */
+#define __used                          __attribute__((__used__))
+
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
+ */
+#define __weak                          __attribute__((__weak__))
+
+#endif /* __LINUX_COMPILER_ATTRIBUTES_H */
index 97cfe29b3f0adf6da5537d68bc7d85ad9769dcd5..4a3f9c09c92d04583f9a0d4fa335a1fb9ce44ab8 100644 (file)
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef __LINUX_COMPILER_TYPES_H
 #define __LINUX_COMPILER_TYPES_H
 
@@ -54,6 +55,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
 
 #ifdef __KERNEL__
 
+/* Attributes */
+#include <linux/compiler_attributes.h>
+
 /* Compiler specific macros. */
 #ifdef __clang__
 #include <linux/compiler-clang.h>
@@ -78,12 +82,6 @@ extern void __chk_io_ptr(const volatile void __iomem *);
 #include <asm/compiler.h>
 #endif
 
-/*
- * Generic compiler-independent macros required for kernel
- * build go below this comment. Actual compiler/compiler version
- * specific implementations come from the above header files
- */
-
 struct ftrace_branch_data {
        const char *func;
        const char *file;
@@ -106,10 +104,6 @@ struct ftrace_likely_data {
        unsigned long                   constant;
 };
 
-/* Don't. Just don't. */
-#define __deprecated
-#define __deprecated_for_modules
-
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
@@ -119,10 +113,6 @@ struct ftrace_likely_data {
  * compilers. We don't consider that to be an error, so set them to nothing.
  * For example, some of them are for compiler specific plugins.
  */
-#ifndef __designated_init
-# define __designated_init
-#endif
-
 #ifndef __latent_entropy
 # define __latent_entropy
 #endif
@@ -140,15 +130,8 @@ struct ftrace_likely_data {
 # define randomized_struct_fields_end
 #endif
 
-#ifndef __visible
-#define __visible
-#endif
-
-/*
- * Assume alignment of return value.
- */
-#ifndef __assume_aligned
-#define __assume_aligned(a, ...)
+#ifndef asm_volatile_goto
+#define asm_volatile_goto(x...) asm goto(x)
 #endif
 
 /* Are two types/vars the same type (ignoring qualifiers)? */
@@ -159,14 +142,6 @@ struct ftrace_likely_data {
        (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
         sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
 
-#ifndef __attribute_const__
-#define __attribute_const__    __attribute__((__const__))
-#endif
-
-#ifndef __noclone
-#define __noclone
-#endif
-
 /* Helpers for emitting diagnostics in pragmas. */
 #ifndef __diag
 #define __diag(string)
@@ -186,43 +161,16 @@ struct ftrace_likely_data {
 #define __diag_error(compiler, version, option, comment) \
        __diag_ ## compiler(version, error, option)
 
-/*
- * From the GCC manual:
- *
- * Many functions have no effects except the return value and their
- * return value depends only on the parameters and/or global
- * variables.  Such a function can be subject to common subexpression
- * elimination and loop optimization just as an arithmetic operator
- * would be.
- * [...]
- */
-#define __pure                 __attribute__((pure))
-#define __aligned(x)           __attribute__((aligned(x)))
-#define __printf(a, b)         __attribute__((format(printf, a, b)))
-#define __scanf(a, b)          __attribute__((format(scanf, a, b)))
-#define __maybe_unused         __attribute__((unused))
-#define __always_unused                __attribute__((unused))
-#define __mode(x)              __attribute__((mode(x)))
-#define __malloc               __attribute__((__malloc__))
-#define __used                 __attribute__((__used__))
-#define __noreturn             __attribute__((noreturn))
-#define __packed               __attribute__((packed))
-#define __weak                 __attribute__((weak))
-#define __alias(symbol)                __attribute__((alias(#symbol)))
-#define __cold                 __attribute__((cold))
-#define __section(S)           __attribute__((__section__(#S)))
-
-
 #ifdef CONFIG_ENABLE_MUST_CHECK
-#define __must_check           __attribute__((warn_unused_result))
+#define __must_check           __attribute__((__warn_unused_result__))
 #else
 #define __must_check
 #endif
 
-#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
+#if defined(CC_USING_HOTPATCH)
 #define notrace                        __attribute__((hotpatch(0, 0)))
 #else
-#define notrace                        __attribute__((no_instrument_function))
+#define notrace                        __attribute__((__no_instrument_function__))
 #endif
 
 /*
@@ -231,22 +179,10 @@ struct ftrace_likely_data {
  * stack and frame pointer being set up and there is no chance to
  * restore the lr register to the value before mcount was called.
  */
-#define __naked                        __attribute__((naked)) notrace
+#define __naked                        __attribute__((__naked__)) notrace
 
 #define __compiler_offsetof(a, b)      __builtin_offsetof(a, b)
 
-/*
- * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
- * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
- * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
- * defined so the gnu89 semantics are the default.
- */
-#ifdef __GNUC_STDC_INLINE__
-# define __gnu_inline  __attribute__((gnu_inline))
-#else
-# define __gnu_inline
-#endif
-
 /*
  * Force always-inline if the user requests it so via the .config.
  * GCC does not warn about unused static inline functions for
@@ -258,22 +194,20 @@ struct ftrace_likely_data {
  * semantics rather than c99. This prevents multiple symbol definition errors
  * of extern inline functions at link time.
  * A lot of inline functions can cause havoc with function tracing.
+ * Do not use __always_inline here, since currently it expands to inline again
+ * (which would break users of __always_inline).
  */
 #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
        !defined(CONFIG_OPTIMIZE_INLINING)
-#define inline \
-       inline __attribute__((always_inline, unused)) notrace __gnu_inline
+#define inline inline __attribute__((__always_inline__)) __gnu_inline \
+       __maybe_unused notrace
 #else
-#define inline inline  __attribute__((unused)) notrace __gnu_inline
+#define inline inline                                    __gnu_inline \
+       __maybe_unused notrace
 #endif
 
 #define __inline__ inline
-#define __inline inline
-#define noinline       __attribute__((noinline))
-
-#ifndef __always_inline
-#define __always_inline inline __attribute__((always_inline))
-#endif
+#define __inline   inline
 
 /*
  * Rather then using noinline to prevent stack consumption, use
index caf40ad0bbc6e0f42027aa1c68f3fa5d07477c9a..e0cd2baa83809a8cff419d0a65562c3a80870232 100644 (file)
@@ -126,6 +126,7 @@ enum cpuhp_state {
        CPUHP_AP_MIPS_GIC_TIMER_STARTING,
        CPUHP_AP_ARC_TIMER_STARTING,
        CPUHP_AP_RISCV_TIMER_STARTING,
+       CPUHP_AP_CSKY_TIMER_STARTING,
        CPUHP_AP_KVM_STARTING,
        CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
        CPUHP_AP_KVM_ARM_VGIC_STARTING,
index 450b28db95331ffbe19963c804391d7249cfb2c3..0dd316a74a295132ea6b6c04f914356c5c4064d6 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/radix-tree.h>
 #include <asm/pgtable.h>
 
+typedef unsigned long dax_entry_t;
+
 struct iomap_ops;
 struct dax_device;
 struct dax_operations {
@@ -88,8 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
                struct block_device *bdev, struct writeback_control *wbc);
 
 struct page *dax_layout_busy_page(struct address_space *mapping);
-bool dax_lock_mapping_entry(struct page *page);
-void dax_unlock_mapping_entry(struct page *page);
+dax_entry_t dax_lock_page(struct page *page);
+void dax_unlock_page(struct page *page, dax_entry_t cookie);
 #else
 static inline bool bdev_dax_supported(struct block_device *bdev,
                int blocksize)
@@ -122,14 +124,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
        return -EOPNOTSUPP;
 }
 
-static inline bool dax_lock_mapping_entry(struct page *page)
+static inline dax_entry_t dax_lock_page(struct page *page)
 {
        if (IS_DAX(page->mapping->host))
-               return true;
-       return false;
+               return ~0UL;
+       return 0;
 }
 
-static inline void dax_unlock_mapping_entry(struct page *page)
+static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
 {
 }
 #endif
index bd73e7a9141076389ad638cc8fe4f48515331711..9e66bfe369aa057ac9d285d1f3cede42e54387cf 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/mem_encrypt.h>
 
-#define DIRECT_MAPPING_ERROR           0
+#define DIRECT_MAPPING_ERROR           (~(dma_addr_t)0)
 
 #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
 #include <asm/dma-direct.h>
index 845174e113ce9b360e899553b7e97f837a5abff7..100ce4a4aff6ce1808a0993a7bc24ebeb3612468 100644 (file)
@@ -1167,6 +1167,8 @@ static inline bool efi_enabled(int feature)
 extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
 
 extern bool efi_is_table_address(unsigned long phys_addr);
+
+extern int efi_apply_persistent_mem_reservations(void);
 #else
 static inline bool efi_enabled(int feature)
 {
@@ -1185,6 +1187,11 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
 {
        return false;
 }
+
+static inline int efi_apply_persistent_mem_reservations(void)
+{
+       return 0;
+}
 #endif
 
 extern int efi_status_to_err(efi_status_t status);
index de629b706d1d7dda246819ed37109f098c05785e..795ff0b869bbf6403c0e89b87fdc7855e3bbb5cd 100644 (file)
@@ -449,6 +449,13 @@ struct sock_reuseport;
        offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
 #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2)                             \
        offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
+#if BITS_PER_LONG == 64
+# define bpf_ctx_range_ptr(TYPE, MEMBER)                                       \
+       offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
+#else
+# define bpf_ctx_range_ptr(TYPE, MEMBER)                                       \
+       offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
+#endif /* BITS_PER_LONG == 64 */
 
 #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE)                           \
        ({                                                                      \
@@ -866,6 +873,10 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr);
 
 void bpf_jit_free(struct bpf_prog *fp);
 
+int bpf_jit_get_func_addr(const struct bpf_prog *prog,
+                         const struct bpf_insn *insn, bool extra_pass,
+                         u64 *func_addr, bool *func_addr_fixed);
+
 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
 void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other);
 
index 8252df30b9a16afe53c269b912b47e17edac7b41..c95c0807471fd178c5d943c315fda2aab0f993b8 100644 (file)
@@ -1752,6 +1752,25 @@ struct block_device_operations;
 #define NOMMU_VMFLAGS \
        (NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC)
 
+/*
+ * These flags control the behavior of the remap_file_range function pointer.
+ * If it is called with len == 0 that means "remap to end of source file".
+ * See Documentation/filesystems/vfs.txt for more details about this call.
+ *
+ * REMAP_FILE_DEDUP: only remap if contents identical (i.e. deduplicate)
+ * REMAP_FILE_CAN_SHORTEN: caller can handle a shortened request
+ */
+#define REMAP_FILE_DEDUP               (1 << 0)
+#define REMAP_FILE_CAN_SHORTEN         (1 << 1)
+
+/*
+ * These flags signal that the caller is ok with altering various aspects of
+ * the behavior of the remap operation.  The changes must be made by the
+ * implementation; the vfs remap helper functions can take advantage of them.
+ * Flags in this category exist to preserve the quirky behavior of the hoisted
+ * btrfs clone/dedupe ioctls.
+ */
+#define REMAP_FILE_ADVISORY            (REMAP_FILE_CAN_SHORTEN)
 
 struct iov_iter;
 
@@ -1790,10 +1809,9 @@ struct file_operations {
 #endif
        ssize_t (*copy_file_range)(struct file *, loff_t, struct file *,
                        loff_t, size_t, unsigned int);
-       int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t,
-                       u64);
-       int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t,
-                       u64);
+       loff_t (*remap_file_range)(struct file *file_in, loff_t pos_in,
+                                  struct file *file_out, loff_t pos_out,
+                                  loff_t len, unsigned int remap_flags);
        int (*fadvise)(struct file *, loff_t, loff_t, int);
 } __randomize_layout;
 
@@ -1856,21 +1874,21 @@ extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
                unsigned long, loff_t *, rwf_t);
 extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *,
                                   loff_t, size_t, unsigned int);
-extern int vfs_clone_file_prep_inodes(struct inode *inode_in, loff_t pos_in,
-                                     struct inode *inode_out, loff_t pos_out,
-                                     u64 *len, bool is_dedupe);
-extern int do_clone_file_range(struct file *file_in, loff_t pos_in,
-                              struct file *file_out, loff_t pos_out, u64 len);
-extern int vfs_clone_file_range(struct file *file_in, loff_t pos_in,
-                               struct file *file_out, loff_t pos_out, u64 len);
-extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
-                                        struct inode *dest, loff_t destoff,
-                                        loff_t len, bool *is_same);
+extern int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
+                                        struct file *file_out, loff_t pos_out,
+                                        loff_t *count,
+                                        unsigned int remap_flags);
+extern loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
+                                 struct file *file_out, loff_t pos_out,
+                                 loff_t len, unsigned int remap_flags);
+extern loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
+                                  struct file *file_out, loff_t pos_out,
+                                  loff_t len, unsigned int remap_flags);
 extern int vfs_dedupe_file_range(struct file *file,
                                 struct file_dedupe_range *same);
-extern int vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
-                                    struct file *dst_file, loff_t dst_pos,
-                                    u64 len);
+extern loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
+                                       struct file *dst_file, loff_t dst_pos,
+                                       loff_t len, unsigned int remap_flags);
 
 
 struct super_operations {
@@ -2998,6 +3016,9 @@ extern int sb_min_blocksize(struct super_block *, int);
 extern int generic_file_mmap(struct file *, struct vm_area_struct *);
 extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
 extern ssize_t generic_write_checks(struct kiocb *, struct iov_iter *);
+extern int generic_remap_checks(struct file *file_in, loff_t pos_in,
+                               struct file *file_out, loff_t pos_out,
+                               loff_t *count, unsigned int remap_flags);
 extern ssize_t generic_file_read_iter(struct kiocb *, struct iov_iter *);
 extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *);
 extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *);
index 34cf0fdd7dc76ac0ac754ad9af36d8a505aa9795..610815e3f1aaeb02c0a713128fd687a8adc78b5b 100644 (file)
@@ -196,8 +196,7 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
 static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
                                              int n_pages)
 {
-       atomic_sub(n_pages, &op->n_pages);
-       if (atomic_read(&op->n_pages) <= 0)
+       if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
                fscache_op_complete(&op->op, false);
 }
 
index a397907e8d727481cfb933e7a7c5e80ca194b0d7..dd16e8218db3a7a9cb3988b973ae500f683cd076 100644 (file)
@@ -777,8 +777,8 @@ struct ftrace_ret_stack {
 extern void return_to_handler(void);
 
 extern int
-ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
-                        unsigned long frame_pointer, unsigned long *retp);
+function_graph_enter(unsigned long ret, unsigned long func,
+                    unsigned long frame_pointer, unsigned long *retp);
 
 unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
                                    unsigned long ret, unsigned long *retp);
index 24bcc5eec6b409ec379602156a94d74373ec2633..0705164f928c949720d9a0927f1564119146c3fa 100644 (file)
@@ -511,14 +511,14 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
 extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
                        struct vm_area_struct *vma, unsigned long addr,
                        int node, bool hugepage);
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
        alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
 #else
 #define alloc_pages(gfp_mask, order) \
                alloc_pages_node(numa_node_id(), gfp_mask, order)
 #define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
        alloc_pages(gfp_mask, order)
-#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
+#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
        alloc_pages(gfp_mask, order)
 #endif
 #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
index 331dc377c2758cd0f39460c4184ce4cecca56bfb..dc12f5c4b076cb94d1341eb58f57a282f8da69a9 100644 (file)
@@ -177,6 +177,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
 * @attr_usage_id:      Attribute usage id as per spec
 * @report_id:  Report id to look for
 * @flag:      Synchronous or asynchronous read
+* @is_signed:   If true then fields < 32 bits will be sign-extended
 *
 * Issues a synchronous or asynchronous read request for an input attribute.
 * Returns data upto 32 bits.
@@ -190,7 +191,8 @@ enum sensor_hub_read_flags {
 int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
                                        u32 usage_id,
                                        u32 attr_usage_id, u32 report_id,
-                                       enum sensor_hub_read_flags flag
+                                       enum sensor_hub_read_flags flag,
+                                       bool is_signed
 );
 
 /**
index a355d61940f28957ec61b4424d49732a85118ffd..d99287327ef23f630e321d4705da2ef0248a257a 100644 (file)
@@ -219,6 +219,7 @@ struct hid_item {
 #define HID_GD_VBRZ            0x00010045
 #define HID_GD_VNO             0x00010046
 #define HID_GD_FEATURE         0x00010047
+#define HID_GD_RESOLUTION_MULTIPLIER   0x00010048
 #define HID_GD_SYSTEM_CONTROL  0x00010080
 #define HID_GD_UP              0x00010090
 #define HID_GD_DOWN            0x00010091
@@ -232,12 +233,14 @@ struct hid_item {
 #define HID_DC_BATTERYSTRENGTH 0x00060020
 
 #define HID_CP_CONSUMER_CONTROL        0x000c0001
+#define HID_CP_AC_PAN          0x000c0238
 
 #define HID_DG_DIGITIZER       0x000d0001
 #define HID_DG_PEN             0x000d0002
 #define HID_DG_LIGHTPEN                0x000d0003
 #define HID_DG_TOUCHSCREEN     0x000d0004
 #define HID_DG_TOUCHPAD                0x000d0005
+#define HID_DG_WHITEBOARD      0x000d0006
 #define HID_DG_STYLUS          0x000d0020
 #define HID_DG_PUCK            0x000d0021
 #define HID_DG_FINGER          0x000d0022
@@ -427,6 +430,7 @@ struct hid_local {
  */
 
 struct hid_collection {
+       struct hid_collection *parent;
        unsigned type;
        unsigned usage;
        unsigned level;
@@ -436,12 +440,16 @@ struct hid_usage {
        unsigned  hid;                  /* hid usage code */
        unsigned  collection_index;     /* index into collection array */
        unsigned  usage_index;          /* index into usage array */
+       __s8      resolution_multiplier;/* Effective Resolution Multiplier
+                                          (HUT v1.12, 4.3.1), default: 1 */
        /* hidinput data */
+       __s8      wheel_factor;         /* 120/resolution_multiplier */
        __u16     code;                 /* input driver code */
        __u8      type;                 /* input driver type */
        __s8      hat_min;              /* hat switch fun */
        __s8      hat_max;              /* ditto */
        __s8      hat_dir;              /* ditto */
+       __s16     wheel_accumulated;    /* hi-res wheel */
 };
 
 struct hid_input;
@@ -650,6 +658,7 @@ struct hid_parser {
        unsigned int         *collection_stack;
        unsigned int          collection_stack_ptr;
        unsigned int          collection_stack_size;
+       struct hid_collection *active_collection;
        struct hid_device    *device;
        unsigned int          scan_flags;
 };
@@ -836,7 +845,11 @@ static inline bool hid_is_using_ll_driver(struct hid_device *hdev,
 
 /* Applications from HID Usage Tables 4/8/99 Version 1.1 */
 /* We ignore a few input applications that are not widely used */
-#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || ((a >= 0x000d0002) && (a <= 0x000d0006)))
+#define IS_INPUT_APPLICATION(a) \
+               (((a >= HID_UP_GENDESK) && (a <= HID_GD_MULTIAXIS)) \
+               || ((a >= HID_DG_PEN) && (a <= HID_DG_WHITEBOARD)) \
+               || (a == HID_GD_SYSTEM_CONTROL) || (a == HID_CP_CONSUMER_CONTROL) \
+               || (a == HID_GD_WIRELESS_RADIO_CTLS))
 
 /* HID core API */
 
@@ -892,6 +905,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
                                       unsigned int type, unsigned int id,
                                       unsigned int field_index,
                                       unsigned int report_counts);
+
+void hid_setup_resolution_multiplier(struct hid_device *hid);
 int hid_open_report(struct hid_device *device);
 int hid_check_keys_pressed(struct hid_device *hid);
 int hid_connect(struct hid_device *hid, unsigned int connect_mask);
index b3e24368930a2246edd2223c8431a448c7f36776..14131b6fae68dda342187a35312c1eee43f4d5a2 100644 (file)
@@ -905,6 +905,13 @@ struct vmbus_channel {
 
        bool probe_done;
 
+       /*
+        * We must offload the handling of the primary/sub channels
+        * from the single-threaded vmbus_connection.work_queue to
+        * two different workqueue, otherwise we can block
+        * vmbus_connection.work_queue and hang: see vmbus_process_offer().
+        */
+       struct work_struct add_channel_work;
 };
 
 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
index e6bb36a97519b00f1aa5e438dc2f72501f93c682..8336b2f6f834627c462a0add39f4502aecfb5ed6 100644 (file)
@@ -21,6 +21,7 @@
 #define PIT_LATCH      ((PIT_TICK_RATE + HZ/2) / HZ)
 
 extern raw_spinlock_t i8253_lock;
+extern bool i8253_clear_counter_on_shutdown;
 extern struct clock_event_device i8253_clockevent;
 extern void clockevent_i8253_init(bool oneshot);
 
index c759d1cbcedd8d7f19f835457641dee9761a0fa0..a64f21a97369a5a7f8eb35c5a5040d359ffd6681 100644 (file)
@@ -37,7 +37,9 @@ struct in_device {
        unsigned long           mr_v1_seen;
        unsigned long           mr_v2_seen;
        unsigned long           mr_maxdelay;
-       unsigned char           mr_qrv;
+       unsigned long           mr_qi;          /* Query Interval */
+       unsigned long           mr_qri;         /* Query Response Interval */
+       unsigned char           mr_qrv;         /* Query Robustness Variable */
        unsigned char           mr_gq_running;
        unsigned char           mr_ifc_count;
        struct timer_list       mr_gq_timer;    /* general query timer */
index 05d8fb5a06c491076889f57872a1388690e40438..bc9af551fc83821e5bec98e5cbc582b2fe0be07a 100644 (file)
@@ -17,6 +17,9 @@
 
 #ifdef CONFIG_KEYS
 
+struct kernel_pkey_query;
+struct kernel_pkey_params;
+
 /*
  * key under-construction record
  * - passed to the request_key actor if supplied
@@ -155,6 +158,14 @@ struct key_type {
         */
        struct key_restriction *(*lookup_restriction)(const char *params);
 
+       /* Asymmetric key accessor functions. */
+       int (*asym_query)(const struct kernel_pkey_params *params,
+                         struct kernel_pkey_query *info);
+       int (*asym_eds_op)(struct kernel_pkey_params *params,
+                          const void *in, void *out);
+       int (*asym_verify_signature)(struct kernel_pkey_params *params,
+                                    const void *in, const void *in2);
+
        /* internal fields */
        struct list_head        link;           /* link in types list */
        struct lock_class_key   lock_class;     /* key->sem lock class */
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h
new file mode 100644 (file)
index 0000000..c7c48c7
--- /dev/null
@@ -0,0 +1,46 @@
+/* keyctl kernel bits
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef __LINUX_KEYCTL_H
+#define __LINUX_KEYCTL_H
+
+#include <uapi/linux/keyctl.h>
+
+struct kernel_pkey_query {
+       __u32           supported_ops;  /* Which ops are supported */
+       __u32           key_size;       /* Size of the key in bits */
+       __u16           max_data_size;  /* Maximum size of raw data to sign in bytes */
+       __u16           max_sig_size;   /* Maximum size of signature in bytes */
+       __u16           max_enc_size;   /* Maximum size of encrypted blob in bytes */
+       __u16           max_dec_size;   /* Maximum size of decrypted blob in bytes */
+};
+
+enum kernel_pkey_operation {
+       kernel_pkey_encrypt,
+       kernel_pkey_decrypt,
+       kernel_pkey_sign,
+       kernel_pkey_verify,
+};
+
+struct kernel_pkey_params {
+       struct key      *key;
+       const char      *encoding;      /* Encoding (eg. "oaep" or "raw" for none) */
+       const char      *hash_algo;     /* Digest algorithm used (eg. "sha1") or NULL if N/A */
+       char            *info;          /* Modified info string to be released later */
+       __u32           in_len;         /* Input data size */
+       union {
+               __u32   out_len;        /* Output buffer size (enc/dec/sign) */
+               __u32   in2_len;        /* 2nd input data size (verify) */
+       };
+       enum kernel_pkey_operation op : 8;
+};
+
+#endif /* __LINUX_KEYCTL_H */
index dbff9ff28f2c4596dfb08baed65d3655cfbc9866..34e17e6f894290f161d5e734e6fb2f9dfb0c148e 100644 (file)
@@ -2473,14 +2473,15 @@ struct mlx5_ifc_xrc_srqc_bits {
 
        u8         wq_signature[0x1];
        u8         cont_srq[0x1];
-       u8         dbr_umem_valid[0x1];
+       u8         reserved_at_22[0x1];
        u8         rlky[0x1];
        u8         basic_cyclic_rcv_wqe[0x1];
        u8         log_rq_stride[0x3];
        u8         xrcd[0x18];
 
        u8         page_offset[0x6];
-       u8         reserved_at_46[0x2];
+       u8         reserved_at_46[0x1];
+       u8         dbr_umem_valid[0x1];
        u8         cqn[0x18];
 
        u8         reserved_at_60[0x20];
@@ -6689,9 +6690,12 @@ struct mlx5_ifc_create_xrc_srq_in_bits {
 
        struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
 
-       u8         reserved_at_280[0x40];
+       u8         reserved_at_280[0x60];
+
        u8         xrc_srq_umem_valid[0x1];
-       u8         reserved_at_2c1[0x5bf];
+       u8         reserved_at_2e1[0x1f];
+
+       u8         reserved_at_300[0x580];
 
        u8         pas[0][0x40];
 };
index fcf9cc9d535faf54c6b0fa463b6cf09643d0e5c4..5411de93a363e8a14bb980a30c8e5af67f25907e 100644 (file)
@@ -1744,11 +1744,15 @@ int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
 
 static inline void mm_inc_nr_puds(struct mm_struct *mm)
 {
+       if (mm_pud_folded(mm))
+               return;
        atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
 }
 
 static inline void mm_dec_nr_puds(struct mm_struct *mm)
 {
+       if (mm_pud_folded(mm))
+               return;
        atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
 }
 #endif
@@ -1768,11 +1772,15 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
 
 static inline void mm_inc_nr_pmds(struct mm_struct *mm)
 {
+       if (mm_pmd_folded(mm))
+               return;
        atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
 }
 
 static inline void mm_dec_nr_pmds(struct mm_struct *mm)
 {
+       if (mm_pmd_folded(mm))
+               return;
        atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
 }
 #endif
index abe975c87b9003a7301f0e879f6bdad733e6c583..7f53ece2c039aeb849ca929b2b13cb29bd172292 100644 (file)
@@ -324,9 +324,8 @@ static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
  */
 static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
 {
-       return (u64)nand->memorg.luns_per_target *
-              nand->memorg.eraseblocks_per_lun *
-              nand->memorg.pages_per_eraseblock;
+       return nand->memorg.ntargets * nand->memorg.luns_per_target *
+              nand->memorg.eraseblocks_per_lun;
 }
 
 /**
@@ -569,7 +568,7 @@ static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
 }
 
 /**
- * nanddev_pos_next_eraseblock() - Move a position to the next page
+ * nanddev_pos_next_page() - Move a position to the next page
  * @nand: NAND device
  * @pos: the position to update
  *
index c79e859408e62d7cd7306407bbaf4c6489103905..fd458389f7d19291ed6a13c22ff60eff5e34b7b3 100644 (file)
@@ -406,6 +406,8 @@ static inline void net_dim(struct net_dim *dim,
                }
                /* fall through */
        case NET_DIM_START_MEASURE:
+               net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr,
+                              &dim->start_sample);
                dim->state = NET_DIM_MEASURE_IN_PROGRESS;
                break;
        case NET_DIM_APPLY_NEW_PROFILE:
index dc1d9ed33b3192e9406b17c3107b3235b28ff1b9..857f8abf7b91bc79731873fc8f68e31f6bff4d03 100644 (file)
@@ -3190,6 +3190,26 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
 #endif
 }
 
+/* Variant of netdev_tx_sent_queue() for drivers that are aware
+ * that they should not test BQL status themselves.
+ * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
+ * skb of a batch.
+ * Returns true if the doorbell must be used to kick the NIC.
+ */
+static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+                                         unsigned int bytes,
+                                         bool xmit_more)
+{
+       if (xmit_more) {
+#ifdef CONFIG_BQL
+               dql_queued(&dev_queue->dql, bytes);
+#endif
+               return netif_tx_queue_stopped(dev_queue);
+       }
+       netdev_tx_sent_queue(dev_queue, bytes);
+       return true;
+}
+
 /**
  *     netdev_sent_queue - report the number of bytes queued to hardware
  *     @dev: network device
index 34fc80f3eb900deb8e4c21b10edf8909c469e7b4..1d100efe74ec76861084a4272327b662cf1de478 100644 (file)
@@ -314,7 +314,7 @@ enum {
 extern ip_set_id_t ip_set_get_byname(struct net *net,
                                     const char *name, struct ip_set **set);
 extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
-extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
+extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
 extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
 extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
 
index 8e2bab1e8e90930f954ec7dc3a1b7a8179eecd13..70877f8de7e919d30716f0483610dfb12eeec433 100644 (file)
@@ -43,11 +43,11 @@ ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
        rcu_assign_pointer(comment->c, c);
 }
 
-/* Used only when dumping a set, protected by rcu_read_lock_bh() */
+/* Used only when dumping a set, protected by rcu_read_lock() */
 static inline int
 ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
 {
-       struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
+       struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
 
        if (!c)
                return 0;
index b8d95564bd53481acf0bd78b15a140e8ac761776..14edb795ab43045939f9976105cd9060603ea566 100644 (file)
@@ -21,6 +21,19 @@ struct nf_ct_gre_keymap {
        struct nf_conntrack_tuple tuple;
 };
 
+enum grep_conntrack {
+       GRE_CT_UNREPLIED,
+       GRE_CT_REPLIED,
+       GRE_CT_MAX
+};
+
+struct netns_proto_gre {
+       struct nf_proto_net     nf;
+       rwlock_t                keymap_lock;
+       struct list_head        keymap_list;
+       unsigned int            gre_timeouts[GRE_CT_MAX];
+};
+
 /* add new tuple->key_reply pair to keymap */
 int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir,
                         struct nf_conntrack_tuple *t);
index 08f9247e9827e0056eb4d82c5c83a73a19cebd11..9003e29cde4615eb9a9e7785c675d36e1f24b8df 100644 (file)
@@ -119,6 +119,8 @@ static inline int hardlockup_detector_perf_init(void) { return 0; }
 void watchdog_nmi_stop(void);
 void watchdog_nmi_start(void);
 int watchdog_nmi_probe(void);
+int watchdog_nmi_enable(unsigned int cpu);
+void watchdog_nmi_disable(unsigned int cpu);
 
 /**
  * touch_nmi_watchdog - restart NMI watchdog timeout.
index f35c7bf7614302ee51f0896258a3dfc08d5224f8..0096a05395e380a35fe25a6329e05953eb66ab66 100644 (file)
@@ -122,8 +122,7 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
 
 #ifdef CONFIG_TREE_SRCU
 #define _SRCU_NOTIFIER_HEAD(name, mod)                         \
-       static DEFINE_PER_CPU(struct srcu_data,                 \
-                       name##_head_srcu_data);                 \
+       static DEFINE_PER_CPU(struct srcu_data, name##_head_srcu_data); \
        mod struct srcu_notifier_head name =                    \
                        SRCU_NOTIFIER_INIT(name, name##_head_srcu_data)
 
index f92a47e180341c2f6e138ae576e6581cdbda52dd..a93841bfb9f7a3db10c977455f48efed83bd575d 100644 (file)
@@ -17,6 +17,8 @@
 #define __DAVINCI_GPIO_PLATFORM_H
 
 struct davinci_gpio_platform_data {
+       bool    no_auto_base;
+       u32     base;
        u32     ngpio;
        u32     gpio_unbanked;
 };
index 8e0725aac0aa82fc45997ca21bddfb40a431a9ab..7006008d5b72fbb4bde94a98fde89202bfd706a0 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _LINUX_PSI_H
 #define _LINUX_PSI_H
 
+#include <linux/jump_label.h>
 #include <linux/psi_types.h>
 #include <linux/sched.h>
 
@@ -9,7 +10,7 @@ struct css_set;
 
 #ifdef CONFIG_PSI
 
-extern bool psi_disabled;
+extern struct static_key_false psi_disabled;
 
 void psi_init(void);
 
index a15bc4d487528f4f4301fdb9f30d465799637652..30fcec375a3af2d8b0ee305b7c540a15080847d2 100644 (file)
@@ -90,7 +90,10 @@ struct pstore_record {
  *
  * @buf_lock:  spinlock to serialize access to @buf
  * @buf:       preallocated crash dump buffer
- * @bufsize:   size of @buf available for crash dump writes
+ * @bufsize:   size of @buf available for crash dump bytes (must match
+ *             smallest number of bytes available for writing to a
+ *             backend entry, since compressed bytes don't take kindly
+ *             to being truncated)
  *
  * @read_mutex:        serializes @open, @read, @close, and @erase callbacks
  * @flags:     bitfield of frontends the backend can accept writes for
index 6c2ffed907f5fc3a0d0a721f4201b4ce82dc85ad..de20ede2c5c81fe9c0f78676fb8a7a9a63b3cd3a 100644 (file)
@@ -64,15 +64,12 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
 #define PTRACE_MODE_NOAUDIT    0x04
 #define PTRACE_MODE_FSCREDS    0x08
 #define PTRACE_MODE_REALCREDS  0x10
-#define PTRACE_MODE_SCHED      0x20
-#define PTRACE_MODE_IBPB       0x40
 
 /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
 #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
 #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
 #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
 #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
-#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB)
 
 /**
  * ptrace_may_access - check whether the caller is permitted to access
@@ -90,20 +87,6 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
  */
 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
 
-/**
- * ptrace_may_access - check whether the caller is permitted to access
- * a target task.
- * @task: target task
- * @mode: selects type of access and caller credentials
- *
- * Returns true on success, false on denial.
- *
- * Similar to ptrace_may_access(). Only to be called from context switch
- * code. Does not call into audit and the regular LSM hooks due to locking
- * constraints.
- */
-extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode);
-
 static inline int ptrace_reparented(struct task_struct *child)
 {
        return !same_thread_group(child->real_parent, child->parent);
index 8f8a5418b627a2db2377add2da367796e572cd2d..291a9bd5b97fe6ad8d67409b05d1ec94c1300f44 100644 (file)
@@ -1116,6 +1116,7 @@ struct task_struct {
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        /* Index of current stored address in ret_stack: */
        int                             curr_ret_stack;
+       int                             curr_ret_depth;
 
        /* Stack of return addresses for return function tracing: */
        struct ftrace_ret_stack         *ret_stack;
@@ -1200,6 +1201,11 @@ struct task_struct {
        void                            *security;
 #endif
 
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+       unsigned long                   lowest_stack;
+       unsigned long                   prev_lowest_stack;
+#endif
+
        /*
         * New fields for task_struct should be added above here, so that
         * they are included in the randomized portion of task_struct.
@@ -1448,6 +1454,8 @@ static inline bool is_percpu_thread(void)
 #define PFA_SPREAD_SLAB                        2       /* Spread some slab caches over cpuset */
 #define PFA_SPEC_SSB_DISABLE           3       /* Speculative Store Bypass disabled */
 #define PFA_SPEC_SSB_FORCE_DISABLE     4       /* Speculative Store Bypass force disabled*/
+#define PFA_SPEC_IB_DISABLE            5       /* Indirect branch speculation restricted */
+#define PFA_SPEC_IB_FORCE_DISABLE      6       /* Indirect branch speculation permanently restricted */
 
 #define TASK_PFA_TEST(name, func)                                      \
        static inline bool task_##func(struct task_struct *p)           \
@@ -1479,6 +1487,13 @@ TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
 
+TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
+TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
+
+TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
+
 static inline void
 current_restore_flags(unsigned long orig_flags, unsigned long flags)
 {
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
new file mode 100644 (file)
index 0000000..59d3736
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_SMT_H
+#define _LINUX_SCHED_SMT_H
+
+#include <linux/static_key.h>
+
+#ifdef CONFIG_SCHED_SMT
+extern struct static_key_false sched_smt_present;
+
+static __always_inline bool sched_smt_active(void)
+{
+       return static_branch_likely(&sched_smt_present);
+}
+#else
+static inline bool sched_smt_active(void) { return false; }
+#endif
+
+void arch_smt_update(void);
+
+#endif
index d37518e89db2ddea762bb483cc74c13040a0f803..d9d9de3fcf8e297a49f86a0e8103c815102fefbb 100644 (file)
@@ -224,7 +224,7 @@ struct sfp_eeprom_ext {
  *
  * See the SFF-8472 specification and related documents for the definition
  * of these structure members. This can be obtained from
- * ftp://ftp.seagate.com/sff
+ * https://www.snia.org/technology-communities/sff/specifications
  */
 struct sfp_eeprom_id {
        struct sfp_eeprom_base base;
index 0ba687454267fdac6f69d8fea69ffaa94c1bb205..0d1b2c3f127b3a4f316dca1ed6ffb7d65aac0cc1 100644 (file)
@@ -1326,6 +1326,22 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg)
        }
 }
 
+static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val)
+{
+       skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL);
+       skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG;
+}
+
+static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb)
+{
+       return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL;
+}
+
+static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb)
+{
+       return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL);
+}
+
 /* Release a reference on a zerocopy structure */
 static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
 {
@@ -1335,7 +1351,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
                if (uarg->callback == sock_zerocopy_callback) {
                        uarg->zerocopy = uarg->zerocopy && zerocopy;
                        sock_zerocopy_put(uarg);
-               } else {
+               } else if (!skb_zcopy_is_nouarg(skb)) {
                        uarg->callback(uarg, zerocopy);
                }
 
diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h
new file mode 100644 (file)
index 0000000..3d5c327
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_STACKLEAK_H
+#define _LINUX_STACKLEAK_H
+
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+/*
+ * Check that the poison value points to the unused hole in the
+ * virtual memory map for your platform.
+ */
+#define STACKLEAK_POISON -0xBEEF
+#define STACKLEAK_SEARCH_DEPTH 128
+
+#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+#include <asm/stacktrace.h>
+
+static inline void stackleak_task_init(struct task_struct *t)
+{
+       t->lowest_stack = (unsigned long)end_of_stack(t) + sizeof(unsigned long);
+# ifdef CONFIG_STACKLEAK_METRICS
+       t->prev_lowest_stack = t->lowest_stack;
+# endif
+}
+
+#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
+int stack_erasing_sysctl(struct ctl_table *table, int write,
+                       void __user *buffer, size_t *lenp, loff_t *ppos);
+#endif
+
+#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */
+static inline void stackleak_task_init(struct task_struct *t) { }
+#endif
+
+#endif
index 131424cefc6a92381036c099acb3a9833c846506..02c0412e368cc1040212436c424edaa0777f723d 100644 (file)
@@ -107,8 +107,8 @@ struct krb5_ctx {
        u8                      Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
        u8                      cksum[GSS_KRB5_MAX_KEYLEN];
        s32                     endtime;
-       u32                     seq_send;
-       u64                     seq_send64;
+       atomic_t                seq_send;
+       atomic64_t              seq_send64;
        struct xdr_netobj       mech_used;
        u8                      initiator_sign[GSS_KRB5_MAX_KEYLEN];
        u8                      acceptor_sign[GSS_KRB5_MAX_KEYLEN];
@@ -118,9 +118,6 @@ struct krb5_ctx {
        u8                      acceptor_integ[GSS_KRB5_MAX_KEYLEN];
 };
 
-extern u32 gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx);
-extern u64 gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx);
-
 /* The length of the Kerberos GSS token header */
 #define GSS_KRB5_TOK_HDR_LEN   (16)
 
index 43106ffa6788a40101840008d0c702f4c3586c45..2ec1280602390efe6e5c71413a7a731f6e98f398 100644 (file)
@@ -72,7 +72,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
        buf->head[0].iov_base = start;
        buf->head[0].iov_len = len;
        buf->tail[0].iov_len = 0;
-       buf->bvec = NULL;
        buf->pages = NULL;
        buf->page_len = 0;
        buf->flags = 0;
index 8ed77bb4ed8636e9294389a011529fd9a667dce4..a9b0280687d52797972506a8bac13ed0747e2182 100644 (file)
@@ -196,6 +196,7 @@ struct tcp_sock {
        u32     rcv_tstamp;     /* timestamp of last received ACK (for keepalives) */
        u32     lsndtime;       /* timestamp of last sent data packet (for restart window) */
        u32     last_oow_ack_time;  /* timestamp of last out-of-window ACK */
+       u32     compressed_ack_rcv_nxt;
 
        u32     tsoffset;       /* timestamp offset */
 
index 40b0b4c1bf7b89db225d94a539b025aa4bd2f9de..df20f8bdbfa30950f6c1dc2f0addf35f67b4eb2a 100644 (file)
@@ -83,8 +83,8 @@ static inline int ptrace_report_syscall(struct pt_regs *regs)
  * tracehook_report_syscall_entry - task is about to attempt a system call
  * @regs:              user register state of current task
  *
- * This will be called if %TIF_SYSCALL_TRACE has been set, when the
- * current task has just entered the kernel for a system call.
+ * This will be called if %TIF_SYSCALL_TRACE or %TIF_SYSCALL_EMU have been set,
+ * when the current task has just entered the kernel for a system call.
  * Full user register state is available here.  Changing the values
  * in @regs can affect the system call number and arguments to be tried.
  * It is safe to block here, preventing the system call from beginning.
index 538ba1a58f5b25c13a51d96da996a058e9511dde..e9de8ad0bad748e9d74e0af0d851568f6378be5c 100644 (file)
@@ -166,7 +166,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
                struct tracepoint_func *it_func_ptr;                    \
                void *it_func;                                          \
                void *__data;                                           \
-               int __maybe_unused idx = 0;                             \
+               int __maybe_unused __idx = 0;                           \
                                                                        \
                if (!(cond))                                            \
                        return;                                         \
@@ -182,7 +182,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
                 * doesn't work from the idle path.                     \
                 */                                                     \
                if (rcuidle) {                                          \
-                       idx = srcu_read_lock_notrace(&tracepoint_srcu); \
+                       __idx = srcu_read_lock_notrace(&tracepoint_srcu);\
                        rcu_irq_enter_irqson();                         \
                }                                                       \
                                                                        \
@@ -198,7 +198,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p)
                                                                        \
                if (rcuidle) {                                          \
                        rcu_irq_exit_irqson();                          \
-                       srcu_read_unlock_notrace(&tracepoint_srcu, idx);\
+                       srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\
                }                                                       \
                                                                        \
                preempt_enable_notrace();                               \
index 414db2bce7150cc94c8b24cc4106161511e4ead1..392138fe59b6929ded24b6dabf317962c5fb3574 100644 (file)
@@ -556,6 +556,7 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
 extern void tty_release_struct(struct tty_struct *tty, int idx);
 extern int tty_release(struct inode *inode, struct file *filp);
 extern void tty_init_termios(struct tty_struct *tty);
+extern void tty_save_termios(struct tty_struct *tty);
 extern int tty_standard_install(struct tty_driver *driver,
                struct tty_struct *tty);
 
index 422b1c01ee0de0d679d7f6cb4276bb7d45e82186..55ce99ddb912f9bd603e5031a328eba96e37fc78 100644 (file)
@@ -21,15 +21,16 @@ struct kvec {
        size_t iov_len;
 };
 
-enum {
+enum iter_type {
        ITER_IOVEC = 0,
        ITER_KVEC = 2,
        ITER_BVEC = 4,
        ITER_PIPE = 8,
+       ITER_DISCARD = 16,
 };
 
 struct iov_iter {
-       int type;
+       unsigned int type;
        size_t iov_offset;
        size_t count;
        union {
@@ -47,6 +48,41 @@ struct iov_iter {
        };
 };
 
+static inline enum iter_type iov_iter_type(const struct iov_iter *i)
+{
+       return i->type & ~(READ | WRITE);
+}
+
+static inline bool iter_is_iovec(const struct iov_iter *i)
+{
+       return iov_iter_type(i) == ITER_IOVEC;
+}
+
+static inline bool iov_iter_is_kvec(const struct iov_iter *i)
+{
+       return iov_iter_type(i) == ITER_KVEC;
+}
+
+static inline bool iov_iter_is_bvec(const struct iov_iter *i)
+{
+       return iov_iter_type(i) == ITER_BVEC;
+}
+
+static inline bool iov_iter_is_pipe(const struct iov_iter *i)
+{
+       return iov_iter_type(i) == ITER_PIPE;
+}
+
+static inline bool iov_iter_is_discard(const struct iov_iter *i)
+{
+       return iov_iter_type(i) == ITER_DISCARD;
+}
+
+static inline unsigned char iov_iter_rw(const struct iov_iter *i)
+{
+       return i->type & (READ | WRITE);
+}
+
 /*
  * Total number of bytes covered by an iovec.
  *
@@ -74,7 +110,8 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
 }
 
 #define iov_for_each(iov, iter, start)                         \
-       if (!((start).type & (ITER_BVEC | ITER_PIPE)))          \
+       if (iov_iter_type(start) == ITER_IOVEC ||               \
+           iov_iter_type(start) == ITER_KVEC)                  \
        for (iter = (start);                                    \
             (iter).count &&                                    \
             ((iov = iov_iter_iovec(&(iter))), 1);              \
@@ -181,14 +218,15 @@ size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
 unsigned long iov_iter_alignment(const struct iov_iter *i);
 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
-void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
+void iov_iter_init(struct iov_iter *i, unsigned int direction, const struct iovec *iov,
                        unsigned long nr_segs, size_t count);
-void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
+void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec *kvec,
                        unsigned long nr_segs, size_t count);
-void iov_iter_bvec(struct iov_iter *i, int direction, const struct bio_vec *bvec,
+void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec,
                        unsigned long nr_segs, size_t count);
-void iov_iter_pipe(struct iov_iter *i, int direction, struct pipe_inode_info *pipe,
+void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode_info *pipe,
                        size_t count);
+void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
 ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
                        size_t maxsize, unsigned maxpages, size_t *start);
 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
@@ -202,19 +240,6 @@ static inline size_t iov_iter_count(const struct iov_iter *i)
        return i->count;
 }
 
-static inline bool iter_is_iovec(const struct iov_iter *i)
-{
-       return !(i->type & (ITER_BVEC | ITER_KVEC | ITER_PIPE));
-}
-
-/*
- * Get one of READ or WRITE out of iter->type without any other flags OR'd in
- * with it.
- *
- * The ?: is just for type safety.
- */
-#define iov_iter_rw(i) ((0 ? (struct iov_iter *)0 : (i))->type & (READ | WRITE))
-
 /*
  * Cap the iov_iter by given limit; note that the second argument is
  * *not* the new size - it's upper limit for such.  Passing it a value
index 4cdd515a4385f1a99d95a7bb7b5b29b09a5c1c1a..5e49e82c43684854c379e18a1d698d79ac4ef347 100644 (file)
@@ -407,11 +407,11 @@ struct usb_host_bos {
 };
 
 int __usb_get_extra_descriptor(char *buffer, unsigned size,
-       unsigned char type, void **ptr);
+       unsigned char type, void **ptr, size_t min);
 #define usb_get_extra_descriptor(ifpoint, type, ptr) \
                                __usb_get_extra_descriptor((ifpoint)->extra, \
                                (ifpoint)->extralen, \
-                               type, (void **)ptr)
+                               type, (void **)ptr, sizeof(**(ptr)))
 
 /* ----------------------------------------------------------------------- */
 
index b7a99ce56bc9ad6cd4c8ff2c5681d6fbb781b875..a1be64c9940fb4ad4e2365f226419f050d6addba 100644 (file)
@@ -66,4 +66,7 @@
 /* Device needs a pause after every control message. */
 #define USB_QUIRK_DELAY_CTRL_MSG               BIT(13)
 
+/* Hub needs extra delay after resetting its port. */
+#define USB_QUIRK_HUB_SLOW_RESET               BIT(14)
+
 #endif /* __LINUX_USB_QUIRKS_H */
index 738a0c24874f0bfd308fad36057cae71401f71f0..fdfd04e348f698b3d108228868866072164d31b7 100644 (file)
@@ -246,8 +246,7 @@ static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc,
  *
  * @bio is a part of the writeback in progress controlled by @wbc.  Perform
  * writeback specific initialization.  This is used to apply the cgroup
- * writeback context.  Must be called after the bio has been associated with
- * a device.
+ * writeback context.
  */
 static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
 {
@@ -258,7 +257,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio)
         * regular writeback instead of writing things out itself.
         */
        if (wbc->wb)
-               bio_associate_blkg_from_css(bio, wbc->wb->blkcg_css);
+               bio_associate_blkcg(bio, wbc->wb->blkcg_css);
 }
 
 #else  /* CONFIG_CGROUP_WRITEBACK */
index d9514928ddacb0b873b2824100599d5daa05ad50..564892e19f8caac321780119747b0f18435f3af3 100644 (file)
@@ -289,9 +289,7 @@ struct xarray {
 void xa_init_flags(struct xarray *, gfp_t flags);
 void *xa_load(struct xarray *, unsigned long index);
 void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
-void *xa_cmpxchg(struct xarray *, unsigned long index,
-                       void *old, void *entry, gfp_t);
-int xa_reserve(struct xarray *, unsigned long index, gfp_t);
+void *xa_erase(struct xarray *, unsigned long index);
 void *xa_store_range(struct xarray *, unsigned long first, unsigned long last,
                        void *entry, gfp_t);
 bool xa_get_mark(struct xarray *, unsigned long index, xa_mark_t);
@@ -343,65 +341,6 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
        return xa->xa_flags & XA_FLAGS_MARK(mark);
 }
 
-/**
- * xa_erase() - Erase this entry from the XArray.
- * @xa: XArray.
- * @index: Index of entry.
- *
- * This function is the equivalent of calling xa_store() with %NULL as
- * the third argument.  The XArray does not need to allocate memory, so
- * the user does not need to provide GFP flags.
- *
- * Context: Process context.  Takes and releases the xa_lock.
- * Return: The entry which used to be at this index.
- */
-static inline void *xa_erase(struct xarray *xa, unsigned long index)
-{
-       return xa_store(xa, index, NULL, 0);
-}
-
-/**
- * xa_insert() - Store this entry in the XArray unless another entry is
- *                     already present.
- * @xa: XArray.
- * @index: Index into array.
- * @entry: New entry.
- * @gfp: Memory allocation flags.
- *
- * If you would rather see the existing entry in the array, use xa_cmpxchg().
- * This function is for users who don't care what the entry is, only that
- * one is present.
- *
- * Context: Process context.  Takes and releases the xa_lock.
- *         May sleep if the @gfp flags permit.
- * Return: 0 if the store succeeded.  -EEXIST if another entry was present.
- * -ENOMEM if memory could not be allocated.
- */
-static inline int xa_insert(struct xarray *xa, unsigned long index,
-               void *entry, gfp_t gfp)
-{
-       void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
-       if (!curr)
-               return 0;
-       if (xa_is_err(curr))
-               return xa_err(curr);
-       return -EEXIST;
-}
-
-/**
- * xa_release() - Release a reserved entry.
- * @xa: XArray.
- * @index: Index of entry.
- *
- * After calling xa_reserve(), you can call this function to release the
- * reservation.  If the entry at @index has been stored to, this function
- * will do nothing.
- */
-static inline void xa_release(struct xarray *xa, unsigned long index)
-{
-       xa_cmpxchg(xa, index, NULL, NULL, 0);
-}
-
 /**
  * xa_for_each() - Iterate over a portion of an XArray.
  * @xa: XArray.
@@ -455,6 +394,7 @@ void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
 void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old,
                void *entry, gfp_t);
 int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t);
+int __xa_reserve(struct xarray *, unsigned long index, gfp_t);
 void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t);
 void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t);
 
@@ -486,6 +426,58 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
        return -EEXIST;
 }
 
+/**
+ * xa_store_bh() - Store this entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_store() except it disables softirqs
+ * while holding the array lock.
+ *
+ * Context: Any context.  Takes and releases the xa_lock while
+ * disabling softirqs.
+ * Return: The entry which used to be at this index.
+ */
+static inline void *xa_store_bh(struct xarray *xa, unsigned long index,
+               void *entry, gfp_t gfp)
+{
+       void *curr;
+
+       xa_lock_bh(xa);
+       curr = __xa_store(xa, index, entry, gfp);
+       xa_unlock_bh(xa);
+
+       return curr;
+}
+
+/**
+ * xa_store_irq() - Erase this entry from the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * This function is like calling xa_store() except it disables interrupts
+ * while holding the array lock.
+ *
+ * Context: Process context.  Takes and releases the xa_lock while
+ * disabling interrupts.
+ * Return: The entry which used to be at this index.
+ */
+static inline void *xa_store_irq(struct xarray *xa, unsigned long index,
+               void *entry, gfp_t gfp)
+{
+       void *curr;
+
+       xa_lock_irq(xa);
+       curr = __xa_store(xa, index, entry, gfp);
+       xa_unlock_irq(xa);
+
+       return curr;
+}
+
 /**
  * xa_erase_bh() - Erase this entry from the XArray.
  * @xa: XArray.
@@ -495,7 +487,7 @@ static inline int __xa_insert(struct xarray *xa, unsigned long index,
  * the third argument.  The XArray does not need to allocate memory, so
  * the user does not need to provide GFP flags.
  *
- * Context: Process context.  Takes and releases the xa_lock while
+ * Context: Any context.  Takes and releases the xa_lock while
  * disabling softirqs.
  * Return: The entry which used to be at this index.
  */
@@ -534,6 +526,61 @@ static inline void *xa_erase_irq(struct xarray *xa, unsigned long index)
        return entry;
 }
 
+/**
+ * xa_cmpxchg() - Conditionally replace an entry in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @old: Old value to test against.
+ * @entry: New value to place in array.
+ * @gfp: Memory allocation flags.
+ *
+ * If the entry at @index is the same as @old, replace it with @entry.
+ * If the return value is equal to @old, then the exchange was successful.
+ *
+ * Context: Any context.  Takes and releases the xa_lock.  May sleep
+ * if the @gfp flags permit.
+ * Return: The old value at this index or xa_err() if an error happened.
+ */
+static inline void *xa_cmpxchg(struct xarray *xa, unsigned long index,
+                       void *old, void *entry, gfp_t gfp)
+{
+       void *curr;
+
+       xa_lock(xa);
+       curr = __xa_cmpxchg(xa, index, old, entry, gfp);
+       xa_unlock(xa);
+
+       return curr;
+}
+
+/**
+ * xa_insert() - Store this entry in the XArray unless another entry is
+ *                     already present.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @entry: New entry.
+ * @gfp: Memory allocation flags.
+ *
+ * If you would rather see the existing entry in the array, use xa_cmpxchg().
+ * This function is for users who don't care what the entry is, only that
+ * one is present.
+ *
+ * Context: Process context.  Takes and releases the xa_lock.
+ *         May sleep if the @gfp flags permit.
+ * Return: 0 if the store succeeded.  -EEXIST if another entry was present.
+ * -ENOMEM if memory could not be allocated.
+ */
+static inline int xa_insert(struct xarray *xa, unsigned long index,
+               void *entry, gfp_t gfp)
+{
+       void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp);
+       if (!curr)
+               return 0;
+       if (xa_is_err(curr))
+               return xa_err(curr);
+       return -EEXIST;
+}
+
 /**
  * xa_alloc() - Find somewhere to store this entry in the XArray.
  * @xa: XArray.
@@ -575,7 +622,7 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry,
  * Updates the @id pointer with the index, then stores the entry at that
  * index.  A concurrent lookup will not see an uninitialised @id.
  *
- * Context: Process context.  Takes and releases the xa_lock while
+ * Context: Any context.  Takes and releases the xa_lock while
  * disabling softirqs.  May sleep if the @gfp flags permit.
  * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if
  * there is no more space in the XArray.
@@ -621,6 +668,98 @@ static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry,
        return err;
 }
 
+/**
+ * xa_reserve() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * Ensures there is somewhere to store an entry at @index in the array.
+ * If there is already something stored at @index, this function does
+ * nothing.  If there was nothing there, the entry is marked as reserved.
+ * Loading from a reserved entry returns a %NULL pointer.
+ *
+ * If you do not use the entry that you have reserved, call xa_release()
+ * or xa_erase() to free any unnecessary memory.
+ *
+ * Context: Any context.  Takes and releases the xa_lock.
+ * May sleep if the @gfp flags permit.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+static inline
+int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+       int ret;
+
+       xa_lock(xa);
+       ret = __xa_reserve(xa, index, gfp);
+       xa_unlock(xa);
+
+       return ret;
+}
+
+/**
+ * xa_reserve_bh() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * A softirq-disabling version of xa_reserve().
+ *
+ * Context: Any context.  Takes and releases the xa_lock while
+ * disabling softirqs.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+static inline
+int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+       int ret;
+
+       xa_lock_bh(xa);
+       ret = __xa_reserve(xa, index, gfp);
+       xa_unlock_bh(xa);
+
+       return ret;
+}
+
+/**
+ * xa_reserve_irq() - Reserve this index in the XArray.
+ * @xa: XArray.
+ * @index: Index into array.
+ * @gfp: Memory allocation flags.
+ *
+ * An interrupt-disabling version of xa_reserve().
+ *
+ * Context: Process context.  Takes and releases the xa_lock while
+ * disabling interrupts.
+ * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
+ */
+static inline
+int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp)
+{
+       int ret;
+
+       xa_lock_irq(xa);
+       ret = __xa_reserve(xa, index, gfp);
+       xa_unlock_irq(xa);
+
+       return ret;
+}
+
+/**
+ * xa_release() - Release a reserved entry.
+ * @xa: XArray.
+ * @index: Index of entry.
+ *
+ * After calling xa_reserve(), you can call this function to release the
+ * reservation.  If the entry at @index has been stored to, this function
+ * will do nothing.
+ */
+static inline void xa_release(struct xarray *xa, unsigned long index)
+{
+       xa_cmpxchg(xa, index, NULL, NULL, 0);
+}
+
 /* Everything below here is the Advanced API.  Proceed with caution. */
 
 /*
index 0ce75c35131f114f24998c6d4332cba5eb20d843..bd36d74316984161bd7e1828b60c189affdf8af3 100644 (file)
@@ -68,7 +68,7 @@ struct media_request {
        unsigned int access_count;
        struct list_head objects;
        unsigned int num_incomplete_objects;
-       struct wait_queue_head poll_wait;
+       wait_queue_head_t poll_wait;
        spinlock_t lock;
 };
 
index 58c1ecf3d6489cda2cfd77a5f7de47abc7d90d3a..5467264771ec136f1b2f9b4ee538f847221f90d9 100644 (file)
@@ -624,7 +624,7 @@ v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx)
 
 /* v4l2 request helper */
 
-void vb2_m2m_request_queue(struct media_request *req);
+void v4l2_m2m_request_queue(struct media_request *req);
 
 /* v4l2 ioctl helpers */
 
index 14b789a123e7d9240cea72fc01bf0d4d7acdba9b..1656c59784987bd486ace6be1f10705fb47ac5c6 100644 (file)
@@ -317,6 +317,8 @@ bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
                         const struct in6_addr *addr);
 bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
                             const struct in6_addr *addr);
+int ipv6_anycast_init(void);
+void ipv6_anycast_cleanup(void);
 
 /* Device notifier */
 int register_inet6addr_notifier(struct notifier_block *nb);
index de587948042a4ab6ce9e00dac021ce492a026621..1adefe42c0a689b839492d500327ef891c4c395c 100644 (file)
@@ -77,7 +77,8 @@ int rxrpc_kernel_retry_call(struct socket *, struct rxrpc_call *,
                            struct sockaddr_rxrpc *, struct key *);
 int rxrpc_kernel_check_call(struct socket *, struct rxrpc_call *,
                            enum rxrpc_call_completion *, u32 *);
-u32 rxrpc_kernel_check_life(struct socket *, struct rxrpc_call *);
+u32 rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
+void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
 u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
 bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
                                 ktime_t *);
index e2695c4bf3580323d9df2edd100388cf7db7e256..ddbba838d048df8f7d2d080442b77fa89130b561 100644 (file)
@@ -13,7 +13,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp);
 void unix_gc(void);
 void wait_for_unix_gc(void);
 struct sock *unix_get_socket(struct file *filp);
-struct sock *unix_peer_get(struct sock *);
+struct sock *unix_peer_get(struct sock *sk);
 
 #define UNIX_HASH_SIZE 256
 #define UNIX_HASH_BITS 8
@@ -40,7 +40,7 @@ struct unix_skb_parms {
        u32                     consumed;
 } __randomize_layout;
 
-#define UNIXCB(skb)    (*(struct unix_skb_parms *)&((skb)->cb))
+#define UNIXCB(skb)    (*(struct unix_skb_parms *)&((skb)->cb))
 
 #define unix_state_lock(s)     spin_lock(&unix_sk(s)->lock)
 #define unix_state_unlock(s)   spin_unlock(&unix_sk(s)->lock)
index d7578cf49c3af85f2cd164a0b242d064b25ed23b..c9c78c15bce04eea71172ecad8693eb363bc2d60 100644 (file)
@@ -146,10 +146,12 @@ struct ifacaddr6 {
        struct in6_addr         aca_addr;
        struct fib6_info        *aca_rt;
        struct ifacaddr6        *aca_next;
+       struct hlist_node       aca_addr_lst;
        int                     aca_users;
        refcount_t              aca_refcnt;
        unsigned long           aca_cstamp;
        unsigned long           aca_tstamp;
+       struct rcu_head         rcu;
 };
 
 #define        IFA_HOST        IPV6_ADDR_LOOPBACK
index f58b384aa6c9e0fd67b05f59bd921ab38bb67923..665990c7dec8c127e2eb8321b80512760f0e824e 100644 (file)
@@ -454,6 +454,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
 
 static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
 {
+       unsigned int hh_alen = 0;
        unsigned int seq;
        unsigned int hh_len;
 
@@ -461,16 +462,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
                seq = read_seqbegin(&hh->hh_lock);
                hh_len = hh->hh_len;
                if (likely(hh_len <= HH_DATA_MOD)) {
-                       /* this is inlined by gcc */
-                       memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
+                       hh_alen = HH_DATA_MOD;
+
+                       /* skb_push() would proceed silently if we have room for
+                        * the unaligned size but not for the aligned size:
+                        * check headroom explicitly.
+                        */
+                       if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
+                               /* this is inlined by gcc */
+                               memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
+                                      HH_DATA_MOD);
+                       }
                } else {
-                       unsigned int hh_alen = HH_DATA_ALIGN(hh_len);
+                       hh_alen = HH_DATA_ALIGN(hh_len);
 
-                       memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+                       if (likely(skb_headroom(skb) >= hh_alen)) {
+                               memcpy(skb->data - hh_alen, hh->hh_data,
+                                      hh_alen);
+                       }
                }
        } while (read_seqretry(&hh->hh_lock, seq));
 
-       skb_push(skb, hh_len);
+       if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
+               kfree_skb(skb);
+               return NET_XMIT_DROP;
+       }
+
+       __skb_push(skb, hh_len);
        return dev_queue_xmit(skb);
 }
 
index cd24be4c4a99bd633f5718c822920f1aa44b7f12..13d55206bb9fccee2d669aec6a8c09ff92beaf8d 100644 (file)
@@ -9,7 +9,7 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
                       const struct nf_nat_range2 *range,
                       const struct net_device *out);
 
-void nf_nat_masquerade_ipv4_register_notifier(void);
+int nf_nat_masquerade_ipv4_register_notifier(void);
 void nf_nat_masquerade_ipv4_unregister_notifier(void);
 
 #endif /*_NF_NAT_MASQUERADE_IPV4_H_ */
index 0c3b5ebf0bb8d4832322ed16c1c3a106712d73d3..2917bf95c4370db57fcc64a6bb76fd64ad1ef902 100644 (file)
@@ -5,7 +5,7 @@
 unsigned int
 nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
                       const struct net_device *out);
-void nf_nat_masquerade_ipv6_register_notifier(void);
+int nf_nat_masquerade_ipv6_register_notifier(void);
 void nf_nat_masquerade_ipv6_unregister_notifier(void);
 
 #endif /* _NF_NAT_MASQUERADE_IPV6_H_ */
index eed04af9b75e56b6c33d0887cdefa4c8f827251e..ae7b86f587f2c77c5e2e05972d67b070a39b8711 100644 (file)
@@ -153,4 +153,43 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
                               const char *fmt, ...) { }
 #endif /* CONFIG_SYSCTL */
 
+static inline struct nf_generic_net *nf_generic_pernet(struct net *net)
+{
+       return &net->ct.nf_ct_proto.generic;
+}
+
+static inline struct nf_tcp_net *nf_tcp_pernet(struct net *net)
+{
+       return &net->ct.nf_ct_proto.tcp;
+}
+
+static inline struct nf_udp_net *nf_udp_pernet(struct net *net)
+{
+       return &net->ct.nf_ct_proto.udp;
+}
+
+static inline struct nf_icmp_net *nf_icmp_pernet(struct net *net)
+{
+       return &net->ct.nf_ct_proto.icmp;
+}
+
+static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net)
+{
+       return &net->ct.nf_ct_proto.icmpv6;
+}
+
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net)
+{
+       return &net->ct.nf_ct_proto.dccp;
+}
+#endif
+
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
+{
+       return &net->ct.nf_ct_proto.sctp;
+}
+#endif
+
 #endif /*_NF_CONNTRACK_PROTOCOL_H*/
index 8c2caa370e0f683ea764bc0d72da6dfa93699673..2abbc15824af953589d8fb4eb5c15e6d2e4a4c3d 100644 (file)
@@ -608,4 +608,21 @@ static inline __u32 sctp_dst_mtu(const struct dst_entry *dst)
                                 SCTP_DEFAULT_MINSEGMENT));
 }
 
+static inline bool sctp_transport_pmtu_check(struct sctp_transport *t)
+{
+       __u32 pmtu = sctp_dst_mtu(t->dst);
+
+       if (t->pathmtu == pmtu)
+               return true;
+
+       t->pathmtu = pmtu;
+
+       return false;
+}
+
+static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
+{
+       return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
+}
+
 #endif /* __net_sctp_h__ */
index a11f9379047698886713a0f56a950bf93dd367b3..feada358d872f3bc5622f5787078fde4cbfa6a97 100644 (file)
@@ -2075,6 +2075,8 @@ struct sctp_association {
 
        __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
        __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
+
+       struct rcu_head rcu;
 };
 
 
index 2dd37cada7c088d7bd6c8b4bd35f18c28c63cc86..888a833d3b003f81f475ef5b695a74739a68c6fe 100644 (file)
@@ -254,11 +254,13 @@ static inline int snd_interval_empty(const struct snd_interval *i)
 static inline int snd_interval_single(const struct snd_interval *i)
 {
        return (i->min == i->max || 
-               (i->min + 1 == i->max && i->openmax));
+               (i->min + 1 == i->max && (i->openmin || i->openmax)));
 }
 
 static inline int snd_interval_value(const struct snd_interval *i)
 {
+       if (i->openmin && !i->openmax)
+               return i->max;
        return i->min;
 }
 
index f1dab1f4b194d46423bd168e9027eab8fe09891a..70c10a8f3e90a7a828d9df930e16fed5915f1686 100644 (file)
@@ -1192,7 +1192,7 @@ struct snd_soc_pcm_runtime {
             ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
             (i)++)
 #define for_each_rtd_codec_dai_rollback(rtd, i, dai)           \
-       for (; ((i--) >= 0) && ((dai) = rtd->codec_dais[i]);)
+       for (; ((--i) >= 0) && ((dai) = rtd->codec_dais[i]);)
 
 
 /* mixer control */
index d0a341bc45404b06c08f434d26030dd8be3138db..33d291888ba9c1ceb6edc3cbcd9db4467633939e 100644 (file)
@@ -54,6 +54,35 @@ enum afs_fs_operation {
        afs_FS_StoreData64              = 65538, /* AFS Store file data */
        afs_FS_GiveUpAllCallBacks       = 65539, /* AFS Give up all our callbacks on a server */
        afs_FS_GetCapabilities          = 65540, /* AFS Get FS server capabilities */
+
+       yfs_FS_FetchData                = 130,   /* YFS Fetch file data */
+       yfs_FS_FetchACL                 = 64131, /* YFS Fetch file ACL */
+       yfs_FS_FetchStatus              = 64132, /* YFS Fetch file status */
+       yfs_FS_StoreACL                 = 64134, /* YFS Store file ACL */
+       yfs_FS_StoreStatus              = 64135, /* YFS Store file status */
+       yfs_FS_RemoveFile               = 64136, /* YFS Remove a file */
+       yfs_FS_CreateFile               = 64137, /* YFS Create a file */
+       yfs_FS_Rename                   = 64138, /* YFS Rename or move a file or directory */
+       yfs_FS_Symlink                  = 64139, /* YFS Create a symbolic link */
+       yfs_FS_Link                     = 64140, /* YFS Create a hard link */
+       yfs_FS_MakeDir                  = 64141, /* YFS Create a directory */
+       yfs_FS_RemoveDir                = 64142, /* YFS Remove a directory */
+       yfs_FS_GetVolumeStatus          = 64149, /* YFS Get volume status information */
+       yfs_FS_SetVolumeStatus          = 64150, /* YFS Set volume status information */
+       yfs_FS_SetLock                  = 64156, /* YFS Request a file lock */
+       yfs_FS_ExtendLock               = 64157, /* YFS Extend a file lock */
+       yfs_FS_ReleaseLock              = 64158, /* YFS Release a file lock */
+       yfs_FS_Lookup                   = 64161, /* YFS lookup file in directory */
+       yfs_FS_FlushCPS                 = 64165,
+       yfs_FS_FetchOpaqueACL           = 64168,
+       yfs_FS_WhoAmI                   = 64170,
+       yfs_FS_RemoveACL                = 64171,
+       yfs_FS_RemoveFile2              = 64173,
+       yfs_FS_StoreOpaqueACL2          = 64174,
+       yfs_FS_InlineBulkStatus         = 64536, /* YFS Fetch multiple file statuses with errors */
+       yfs_FS_FetchData64              = 64537, /* YFS Fetch file data */
+       yfs_FS_StoreData64              = 64538, /* YFS Store file data */
+       yfs_FS_UpdateSymlink            = 64540,
 };
 
 enum afs_vl_operation {
@@ -84,6 +113,44 @@ enum afs_edit_dir_reason {
        afs_edit_dir_for_unlink,
 };
 
+enum afs_eproto_cause {
+       afs_eproto_bad_status,
+       afs_eproto_cb_count,
+       afs_eproto_cb_fid_count,
+       afs_eproto_file_type,
+       afs_eproto_ibulkst_cb_count,
+       afs_eproto_ibulkst_count,
+       afs_eproto_motd_len,
+       afs_eproto_offline_msg_len,
+       afs_eproto_volname_len,
+       afs_eproto_yvl_fsendpt4_len,
+       afs_eproto_yvl_fsendpt6_len,
+       afs_eproto_yvl_fsendpt_num,
+       afs_eproto_yvl_fsendpt_type,
+       afs_eproto_yvl_vlendpt4_len,
+       afs_eproto_yvl_vlendpt6_len,
+       afs_eproto_yvl_vlendpt_type,
+};
+
+enum afs_io_error {
+       afs_io_error_cm_reply,
+       afs_io_error_extract,
+       afs_io_error_fs_probe_fail,
+       afs_io_error_vl_lookup_fail,
+       afs_io_error_vl_probe_fail,
+};
+
+enum afs_file_error {
+       afs_file_error_dir_bad_magic,
+       afs_file_error_dir_big,
+       afs_file_error_dir_missing_page,
+       afs_file_error_dir_over_end,
+       afs_file_error_dir_small,
+       afs_file_error_dir_unmarked_ext,
+       afs_file_error_mntpt,
+       afs_file_error_writeback_fail,
+};
+
 #endif /* end __AFS_DECLARE_TRACE_ENUMS_ONCE_ONLY */
 
 /*
@@ -119,7 +186,34 @@ enum afs_edit_dir_reason {
        EM(afs_FS_FetchData64,                  "FS.FetchData64") \
        EM(afs_FS_StoreData64,                  "FS.StoreData64") \
        EM(afs_FS_GiveUpAllCallBacks,           "FS.GiveUpAllCallBacks") \
-       E_(afs_FS_GetCapabilities,              "FS.GetCapabilities")
+       EM(afs_FS_GetCapabilities,              "FS.GetCapabilities") \
+       EM(yfs_FS_FetchACL,                     "YFS.FetchACL") \
+       EM(yfs_FS_FetchStatus,                  "YFS.FetchStatus") \
+       EM(yfs_FS_StoreACL,                     "YFS.StoreACL") \
+       EM(yfs_FS_StoreStatus,                  "YFS.StoreStatus") \
+       EM(yfs_FS_RemoveFile,                   "YFS.RemoveFile") \
+       EM(yfs_FS_CreateFile,                   "YFS.CreateFile") \
+       EM(yfs_FS_Rename,                       "YFS.Rename") \
+       EM(yfs_FS_Symlink,                      "YFS.Symlink") \
+       EM(yfs_FS_Link,                         "YFS.Link") \
+       EM(yfs_FS_MakeDir,                      "YFS.MakeDir") \
+       EM(yfs_FS_RemoveDir,                    "YFS.RemoveDir") \
+       EM(yfs_FS_GetVolumeStatus,              "YFS.GetVolumeStatus") \
+       EM(yfs_FS_SetVolumeStatus,              "YFS.SetVolumeStatus") \
+       EM(yfs_FS_SetLock,                      "YFS.SetLock") \
+       EM(yfs_FS_ExtendLock,                   "YFS.ExtendLock") \
+       EM(yfs_FS_ReleaseLock,                  "YFS.ReleaseLock") \
+       EM(yfs_FS_Lookup,                       "YFS.Lookup") \
+       EM(yfs_FS_FlushCPS,                     "YFS.FlushCPS") \
+       EM(yfs_FS_FetchOpaqueACL,               "YFS.FetchOpaqueACL") \
+       EM(yfs_FS_WhoAmI,                       "YFS.WhoAmI") \
+       EM(yfs_FS_RemoveACL,                    "YFS.RemoveACL") \
+       EM(yfs_FS_RemoveFile2,                  "YFS.RemoveFile2") \
+       EM(yfs_FS_StoreOpaqueACL2,              "YFS.StoreOpaqueACL2") \
+       EM(yfs_FS_InlineBulkStatus,             "YFS.InlineBulkStatus") \
+       EM(yfs_FS_FetchData64,                  "YFS.FetchData64") \
+       EM(yfs_FS_StoreData64,                  "YFS.StoreData64") \
+       E_(yfs_FS_UpdateSymlink,                "YFS.UpdateSymlink")
 
 #define afs_vl_operations \
        EM(afs_VL_GetEntryByNameU,              "VL.GetEntryByNameU") \
@@ -146,6 +240,40 @@ enum afs_edit_dir_reason {
        EM(afs_edit_dir_for_symlink,            "Symlnk") \
        E_(afs_edit_dir_for_unlink,             "Unlink")
 
+#define afs_eproto_causes                      \
+       EM(afs_eproto_bad_status,       "BadStatus") \
+       EM(afs_eproto_cb_count,         "CbCount") \
+       EM(afs_eproto_cb_fid_count,     "CbFidCount") \
+       EM(afs_eproto_file_type,        "FileTYpe") \
+       EM(afs_eproto_ibulkst_cb_count, "IBS.CbCount") \
+       EM(afs_eproto_ibulkst_count,    "IBS.FidCount") \
+       EM(afs_eproto_motd_len,         "MotdLen") \
+       EM(afs_eproto_offline_msg_len,  "OfflineMsgLen") \
+       EM(afs_eproto_volname_len,      "VolNameLen") \
+       EM(afs_eproto_yvl_fsendpt4_len, "YVL.FsEnd4Len") \
+       EM(afs_eproto_yvl_fsendpt6_len, "YVL.FsEnd6Len") \
+       EM(afs_eproto_yvl_fsendpt_num,  "YVL.FsEndCount") \
+       EM(afs_eproto_yvl_fsendpt_type, "YVL.FsEndType") \
+       EM(afs_eproto_yvl_vlendpt4_len, "YVL.VlEnd4Len") \
+       EM(afs_eproto_yvl_vlendpt6_len, "YVL.VlEnd6Len") \
+       E_(afs_eproto_yvl_vlendpt_type, "YVL.VlEndType")
+
+#define afs_io_errors                                                  \
+       EM(afs_io_error_cm_reply,               "CM_REPLY")             \
+       EM(afs_io_error_extract,                "EXTRACT")              \
+       EM(afs_io_error_fs_probe_fail,          "FS_PROBE_FAIL")        \
+       EM(afs_io_error_vl_lookup_fail,         "VL_LOOKUP_FAIL")       \
+       E_(afs_io_error_vl_probe_fail,          "VL_PROBE_FAIL")
+
+#define afs_file_errors                                                        \
+       EM(afs_file_error_dir_bad_magic,        "DIR_BAD_MAGIC")        \
+       EM(afs_file_error_dir_big,              "DIR_BIG")              \
+       EM(afs_file_error_dir_missing_page,     "DIR_MISSING_PAGE")     \
+       EM(afs_file_error_dir_over_end,         "DIR_ENT_OVER_END")     \
+       EM(afs_file_error_dir_small,            "DIR_SMALL")            \
+       EM(afs_file_error_dir_unmarked_ext,     "DIR_UNMARKED_EXT")     \
+       EM(afs_file_error_mntpt,                "MNTPT_READ_FAILED")    \
+       E_(afs_file_error_writeback_fail,       "WRITEBACK_FAILED")
 
 /*
  * Export enum symbols via userspace.
@@ -160,6 +288,9 @@ afs_fs_operations;
 afs_vl_operations;
 afs_edit_dir_ops;
 afs_edit_dir_reasons;
+afs_eproto_causes;
+afs_io_errors;
+afs_file_errors;
 
 /*
  * Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -170,17 +301,16 @@ afs_edit_dir_reasons;
 #define EM(a, b)       { a, b },
 #define E_(a, b)       { a, b }
 
-TRACE_EVENT(afs_recv_data,
-           TP_PROTO(struct afs_call *call, unsigned count, unsigned offset,
+TRACE_EVENT(afs_receive_data,
+           TP_PROTO(struct afs_call *call, struct iov_iter *iter,
                     bool want_more, int ret),
 
-           TP_ARGS(call, count, offset, want_more, ret),
+           TP_ARGS(call, iter, want_more, ret),
 
            TP_STRUCT__entry(
+                   __field(loff_t,                     remain          )
                    __field(unsigned int,               call            )
                    __field(enum afs_call_state,        state           )
-                   __field(unsigned int,               count           )
-                   __field(unsigned int,               offset          )
                    __field(unsigned short,             unmarshall      )
                    __field(bool,                       want_more       )
                    __field(int,                        ret             )
@@ -190,17 +320,18 @@ TRACE_EVENT(afs_recv_data,
                    __entry->call       = call->debug_id;
                    __entry->state      = call->state;
                    __entry->unmarshall = call->unmarshall;
-                   __entry->count      = count;
-                   __entry->offset     = offset;
+                   __entry->remain     = iov_iter_count(iter);
                    __entry->want_more  = want_more;
                    __entry->ret        = ret;
                           ),
 
-           TP_printk("c=%08x s=%u u=%u %u/%u wm=%u ret=%d",
+           TP_printk("c=%08x r=%llu u=%u w=%u s=%u ret=%d",
                      __entry->call,
-                     __entry->state, __entry->unmarshall,
-                     __entry->offset, __entry->count,
-                     __entry->want_more, __entry->ret)
+                     __entry->remain,
+                     __entry->unmarshall,
+                     __entry->want_more,
+                     __entry->state,
+                     __entry->ret)
            );
 
 TRACE_EVENT(afs_notify_call,
@@ -301,7 +432,7 @@ TRACE_EVENT(afs_make_fs_call,
                    }
                           ),
 
-           TP_printk("c=%08x %06x:%06x:%06x %s",
+           TP_printk("c=%08x %06llx:%06llx:%06x %s",
                      __entry->call,
                      __entry->fid.vid,
                      __entry->fid.vnode,
@@ -555,24 +686,70 @@ TRACE_EVENT(afs_edit_dir,
            );
 
 TRACE_EVENT(afs_protocol_error,
-           TP_PROTO(struct afs_call *call, int error, const void *where),
+           TP_PROTO(struct afs_call *call, int error, enum afs_eproto_cause cause),
+
+           TP_ARGS(call, error, cause),
+
+           TP_STRUCT__entry(
+                   __field(unsigned int,               call            )
+                   __field(int,                        error           )
+                   __field(enum afs_eproto_cause,      cause           )
+                            ),
+
+           TP_fast_assign(
+                   __entry->call = call ? call->debug_id : 0;
+                   __entry->error = error;
+                   __entry->cause = cause;
+                          ),
+
+           TP_printk("c=%08x r=%d %s",
+                     __entry->call, __entry->error,
+                     __print_symbolic(__entry->cause, afs_eproto_causes))
+           );
+
+TRACE_EVENT(afs_io_error,
+           TP_PROTO(unsigned int call, int error, enum afs_io_error where),
 
            TP_ARGS(call, error, where),
 
            TP_STRUCT__entry(
                    __field(unsigned int,       call            )
                    __field(int,                error           )
-                   __field(const void *,       where           )
+                   __field(enum afs_io_error,  where           )
                             ),
 
            TP_fast_assign(
-                   __entry->call = call ? call->debug_id : 0;
+                   __entry->call = call;
+                   __entry->error = error;
+                   __entry->where = where;
+                          ),
+
+           TP_printk("c=%08x r=%d %s",
+                     __entry->call, __entry->error,
+                     __print_symbolic(__entry->where, afs_io_errors))
+           );
+
+TRACE_EVENT(afs_file_error,
+           TP_PROTO(struct afs_vnode *vnode, int error, enum afs_file_error where),
+
+           TP_ARGS(vnode, error, where),
+
+           TP_STRUCT__entry(
+                   __field_struct(struct afs_fid,      fid             )
+                   __field(int,                        error           )
+                   __field(enum afs_file_error,        where           )
+                            ),
+
+           TP_fast_assign(
+                   __entry->fid = vnode->fid;
                    __entry->error = error;
                    __entry->where = where;
                           ),
 
-           TP_printk("c=%08x r=%d sp=%pSR",
-                     __entry->call, __entry->error, __entry->where)
+           TP_printk("%llx:%llx:%x r=%d %s",
+                     __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
+                     __entry->error,
+                     __print_symbolic(__entry->where, afs_file_errors))
            );
 
 TRACE_EVENT(afs_cm_no_server,
index a9834c37ac40061d0e18209756c1d3be2c0497d7..c0e7d24ca25682acf384d56d7b0c87e71e6c456b 100644 (file)
@@ -31,8 +31,8 @@ TRACE_EVENT(kyber_latency,
 
        TP_fast_assign(
                __entry->dev            = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
-               strlcpy(__entry->domain, domain, DOMAIN_LEN);
-               strlcpy(__entry->type, type, DOMAIN_LEN);
+               strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+               strlcpy(__entry->type, type, sizeof(__entry->type));
                __entry->percentile     = percentile;
                __entry->numerator      = numerator;
                __entry->denominator    = denominator;
@@ -60,7 +60,7 @@ TRACE_EVENT(kyber_adjust,
 
        TP_fast_assign(
                __entry->dev            = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
-               strlcpy(__entry->domain, domain, DOMAIN_LEN);
+               strlcpy(__entry->domain, domain, sizeof(__entry->domain));
                __entry->depth          = depth;
        ),
 
@@ -82,7 +82,7 @@ TRACE_EVENT(kyber_throttled,
 
        TP_fast_assign(
                __entry->dev            = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent)));
-               strlcpy(__entry->domain, domain, DOMAIN_LEN);
+               strlcpy(__entry->domain, domain, sizeof(__entry->domain));
        ),
 
        TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev),
index 573d5b901fb11886590a2672cf396af64476efeb..5b50fe4906d2ef95d5121a80109fdb1f130ead2b 100644 (file)
@@ -181,6 +181,7 @@ enum rxrpc_timer_trace {
 enum rxrpc_propose_ack_trace {
        rxrpc_propose_ack_client_tx_end,
        rxrpc_propose_ack_input_data,
+       rxrpc_propose_ack_ping_for_check_life,
        rxrpc_propose_ack_ping_for_keepalive,
        rxrpc_propose_ack_ping_for_lost_ack,
        rxrpc_propose_ack_ping_for_lost_reply,
@@ -380,6 +381,7 @@ enum rxrpc_tx_point {
 #define rxrpc_propose_ack_traces \
        EM(rxrpc_propose_ack_client_tx_end,     "ClTxEnd") \
        EM(rxrpc_propose_ack_input_data,        "DataIn ") \
+       EM(rxrpc_propose_ack_ping_for_check_life, "ChkLife") \
        EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
        EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
        EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
index f07b270d4fc4febeb1d3c4d0413ba4bf14e05daf..9a4bdfadab0770d62a3b46510441cf00b5397f16 100644 (file)
@@ -107,6 +107,8 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
 #ifdef CREATE_TRACE_POINTS
 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
 {
+       unsigned int state;
+
 #ifdef CONFIG_SCHED_DEBUG
        BUG_ON(p != current);
 #endif /* CONFIG_SCHED_DEBUG */
@@ -118,7 +120,15 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct *
        if (preempt)
                return TASK_REPORT_MAX;
 
-       return 1 << task_state_index(p);
+       /*
+        * task_state_index() uses fls() and returns a value from 0-8 range.
+        * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
+        * it for left shift operation to get the correct task->state
+        * mapping.
+        */
+       state = task_state_index(p);
+
+       return state ? (1 << (state - 1)) : state;
 }
 #endif /* CREATE_TRACE_POINTS */
 
index 538546edbfbd2bd1cfca431aa95864f018fcc7ee..c7f3321fbe4384260da20a6a1fe7cbf48ecfec72 100644 (file)
@@ -760,8 +760,10 @@ __SYSCALL(__NR_rseq, sys_rseq)
 #define __NR_ftruncate __NR3264_ftruncate
 #define __NR_lseek __NR3264_lseek
 #define __NR_sendfile __NR3264_sendfile
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
 #define __NR_newfstatat __NR3264_fstatat
 #define __NR_fstat __NR3264_fstat
+#endif
 #define __NR_mmap __NR3264_mmap
 #define __NR_fadvise64 __NR3264_fadvise64
 #ifdef __NR3264_stat
@@ -776,8 +778,10 @@ __SYSCALL(__NR_rseq, sys_rseq)
 #define __NR_ftruncate64 __NR3264_ftruncate
 #define __NR_llseek __NR3264_lseek
 #define __NR_sendfile64 __NR3264_sendfile
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
 #define __NR_fstatat64 __NR3264_fstatat
 #define __NR_fstat64 __NR3264_fstat
+#endif
 #define __NR_mmap2 __NR3264_mmap
 #define __NR_fadvise64_64 __NR3264_fadvise64
 #ifdef __NR3264_stat
index 852dc17ab47a07f2580ade5f9e4a1130ee779c26..72c453a8bf50ed5cd4a0383997f5727048ce8d60 100644 (file)
@@ -2170,7 +2170,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
  *     Description
  *             Look for TCP socket matching *tuple*, optionally in a child
  *             network namespace *netns*. The return value must be checked,
@@ -2187,12 +2187,14 @@ union bpf_attr {
  *             **sizeof**\ (*tuple*\ **->ipv6**)
  *                     Look for an IPv6 socket.
  *
- *             If the *netns* is zero, then the socket lookup table in the
- *             netns associated with the *ctx* will be used. For the TC hooks,
- *             this in the netns of the device in the skb. For socket hooks,
- *             this in the netns of the socket. If *netns* is non-zero, then
- *             it specifies the ID of the netns relative to the netns
- *             associated with the *ctx*.
+ *             If the *netns* is a negative signed 32-bit integer, then the
+ *             socket lookup table in the netns associated with the *ctx* will
+ *             will be used. For the TC hooks, this is the netns of the device
+ *             in the skb. For socket hooks, this is the netns of the socket.
+ *             If *netns* is any other signed 32-bit value greater than or
+ *             equal to zero then it specifies the ID of the netns relative to
+ *             the netns associated with the *ctx*. *netns* values beyond the
+ *             range of 32-bit integers are reserved for future use.
  *
  *             All values for *flags* are reserved for future usage, and must
  *             be left at zero.
@@ -2201,8 +2203,10 @@ union bpf_attr {
  *             **CONFIG_NET** configuration option.
  *     Return
  *             Pointer to *struct bpf_sock*, or NULL in case of failure.
+ *             For sockets with reuseport option, the *struct bpf_sock*
+ *             result is from reuse->socks[] using the hash of the tuple.
  *
- * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
  *     Description
  *             Look for UDP socket matching *tuple*, optionally in a child
  *             network namespace *netns*. The return value must be checked,
@@ -2219,12 +2223,14 @@ union bpf_attr {
  *             **sizeof**\ (*tuple*\ **->ipv6**)
  *                     Look for an IPv6 socket.
  *
- *             If the *netns* is zero, then the socket lookup table in the
- *             netns associated with the *ctx* will be used. For the TC hooks,
- *             this in the netns of the device in the skb. For socket hooks,
- *             this in the netns of the socket. If *netns* is non-zero, then
- *             it specifies the ID of the netns relative to the netns
- *             associated with the *ctx*.
+ *             If the *netns* is a negative signed 32-bit integer, then the
+ *             socket lookup table in the netns associated with the *ctx* will
+ *             will be used. For the TC hooks, this is the netns of the device
+ *             in the skb. For socket hooks, this is the netns of the socket.
+ *             If *netns* is any other signed 32-bit value greater than or
+ *             equal to zero then it specifies the ID of the netns relative to
+ *             the netns associated with the *ctx*. *netns* values beyond the
+ *             range of 32-bit integers are reserved for future use.
  *
  *             All values for *flags* are reserved for future usage, and must
  *             be left at zero.
@@ -2233,6 +2239,8 @@ union bpf_attr {
  *             **CONFIG_NET** configuration option.
  *     Return
  *             Pointer to *struct bpf_sock*, or NULL in case of failure.
+ *             For sockets with reuseport option, the *struct bpf_sock*
+ *             result is from reuse->socks[] using the hash of the tuple.
  *
  * int bpf_sk_release(struct bpf_sock *sk)
  *     Description
@@ -2405,6 +2413,9 @@ enum bpf_func_id {
 /* BPF_FUNC_perf_event_output for sk_buff input context. */
 #define BPF_F_CTXLEN_MASK              (0xfffffULL << 32)
 
+/* Current network namespace */
+#define BPF_F_CURRENT_NETNS            (-1L)
+
 /* Mode for BPF_FUNC_skb_adjust_room helper. */
 enum bpf_adj_room_mode {
        BPF_ADJ_ROOM_NET,
@@ -2422,6 +2433,12 @@ enum bpf_lwt_encap_mode {
        BPF_LWT_ENCAP_SEG6_INLINE
 };
 
+#define __bpf_md_ptr(type, name)       \
+union {                                        \
+       type name;                      \
+       __u64 :64;                      \
+} __attribute__((aligned(8)))
+
 /* user accessible mirror of in-kernel sk_buff.
  * new fields can only be added to the end of this structure
  */
@@ -2456,7 +2473,7 @@ struct __sk_buff {
        /* ... here. */
 
        __u32 data_meta;
-       struct bpf_flow_keys *flow_keys;
+       __bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
 };
 
 struct bpf_tunnel_key {
@@ -2572,8 +2589,8 @@ enum sk_action {
  * be added to the end of this structure
  */
 struct sk_msg_md {
-       void *data;
-       void *data_end;
+       __bpf_md_ptr(void *, data);
+       __bpf_md_ptr(void *, data_end);
 
        __u32 family;
        __u32 remote_ip4;       /* Stored in network byte order */
@@ -2589,8 +2606,9 @@ struct sk_reuseport_md {
         * Start of directly accessible data. It begins from
         * the tcp/udp header.
         */
-       void *data;
-       void *data_end;         /* End of directly accessible data */
+       __bpf_md_ptr(void *, data);
+       /* End of directly accessible data */
+       __bpf_md_ptr(void *, data_end);
        /*
         * Total length of packet (starting from the tcp/udp header).
         * Note that the directly accessible bytes (data_end - data)
index ae366b87426accef1c49b4fdeee478717e0d60ea..7f14d4a66c28c1c13d1388c6dacfcff30711edab 100644 (file)
  * the situation described above.
  */
 #define REL_RESERVED           0x0a
+#define REL_WHEEL_HI_RES       0x0b
+#define REL_HWHEEL_HI_RES      0x0c
 #define REL_MAX                        0x0f
 #define REL_CNT                        (REL_MAX+1)
 
index 0f3cb13db8e93efe9d6319395f21849e896f7a02..f45ee0f69c0c25afaf82c670c82d3fcb26bc8cc2 100644 (file)
 #define KEYCTL_INVALIDATE              21      /* invalidate a key */
 #define KEYCTL_GET_PERSISTENT          22      /* get a user's persistent keyring */
 #define KEYCTL_DH_COMPUTE              23      /* Compute Diffie-Hellman values */
+#define KEYCTL_PKEY_QUERY              24      /* Query public key parameters */
+#define KEYCTL_PKEY_ENCRYPT            25      /* Encrypt a blob using a public key */
+#define KEYCTL_PKEY_DECRYPT            26      /* Decrypt a blob using a public key */
+#define KEYCTL_PKEY_SIGN               27      /* Create a public key signature */
+#define KEYCTL_PKEY_VERIFY             28      /* Verify a public key signature */
 #define KEYCTL_RESTRICT_KEYRING                29      /* Restrict keys allowed to link to a keyring */
 
 /* keyctl structures */
@@ -82,4 +87,29 @@ struct keyctl_kdf_params {
        __u32 __spare[8];
 };
 
+#define KEYCTL_SUPPORTS_ENCRYPT                0x01
+#define KEYCTL_SUPPORTS_DECRYPT                0x02
+#define KEYCTL_SUPPORTS_SIGN           0x04
+#define KEYCTL_SUPPORTS_VERIFY         0x08
+
+struct keyctl_pkey_query {
+       __u32           supported_ops;  /* Which ops are supported */
+       __u32           key_size;       /* Size of the key in bits */
+       __u16           max_data_size;  /* Maximum size of raw data to sign in bytes */
+       __u16           max_sig_size;   /* Maximum size of signature in bytes */
+       __u16           max_enc_size;   /* Maximum size of encrypted blob in bytes */
+       __u16           max_dec_size;   /* Maximum size of decrypted blob in bytes */
+       __u32           __spare[10];
+};
+
+struct keyctl_pkey_params {
+       __s32           key_id;         /* Serial no. of public key to use */
+       __u32           in_len;         /* Input data size */
+       union {
+               __u32           out_len;        /* Output buffer size (encrypt/decrypt/sign) */
+               __u32           in2_len;        /* 2nd input data size (verify) */
+       };
+       __u32           __spare[7];
+};
+
 #endif /*  _LINUX_KEYCTL_H */
index f5ff8a76e208fc45584bb76503860c6fdf6650fd..b01eb502d49c55d04f33cace28a410171239eaf5 100644 (file)
@@ -83,11 +83,11 @@ struct kfd_ioctl_set_cu_mask_args {
 };
 
 struct kfd_ioctl_get_queue_wave_state_args {
-       uint64_t ctl_stack_address;     /* to KFD */
-       uint32_t ctl_stack_used_size;   /* from KFD */
-       uint32_t save_area_used_size;   /* from KFD */
-       uint32_t queue_id;              /* to KFD */
-       uint32_t pad;
+       __u64 ctl_stack_address;        /* to KFD */
+       __u32 ctl_stack_used_size;      /* from KFD */
+       __u32 save_area_used_size;      /* from KFD */
+       __u32 queue_id;                 /* to KFD */
+       __u32 pad;
 };
 
 /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
@@ -255,10 +255,10 @@ struct kfd_hsa_memory_exception_data {
 
 /* hw exception data */
 struct kfd_hsa_hw_exception_data {
-       uint32_t reset_type;
-       uint32_t reset_cause;
-       uint32_t memory_lost;
-       uint32_t gpu_id;
+       __u32 reset_type;
+       __u32 reset_cause;
+       __u32 memory_lost;
+       __u32 gpu_id;
 };
 
 /* Event data */
index 579974b0bf0d8140882ff2a48f96184f0f829456..7de4f1bdaf06a28a7e64fb9d72fba3d42d0032b8 100644 (file)
@@ -1635,8 +1635,8 @@ enum nft_ng_attributes {
        NFTA_NG_MODULUS,
        NFTA_NG_TYPE,
        NFTA_NG_OFFSET,
-       NFTA_NG_SET_NAME,
-       NFTA_NG_SET_ID,
+       NFTA_NG_SET_NAME,       /* deprecated */
+       NFTA_NG_SET_ID,         /* deprecated */
        __NFTA_NG_MAX
 };
 #define NFTA_NG_MAX    (__NFTA_NG_MAX - 1)
index 156ccd089df184853c180a240bfaaaa27774a4fe..1610fdbab98dfc89212ee653a573da8c39bdefe8 100644 (file)
 #include <linux/if_vlan.h>
 #include <linux/if_pppox.h>
 
+#ifndef __KERNEL__
+#include <limits.h> /* for INT_MIN, INT_MAX */
+#endif
+
 /* Bridge Hooks */
 /* After promisc drops, checksum checks. */
 #define NF_BR_PRE_ROUTING      0
index f35eb72739c09e3ad0bd22e279fa4a33119c15f6..9de8780ac8d97568932d3857de3dc2c8e5de2806 100644 (file)
@@ -646,10 +646,12 @@ struct perf_event_mmap_page {
  *
  *   PERF_RECORD_MISC_MMAP_DATA  - PERF_RECORD_MMAP* events
  *   PERF_RECORD_MISC_COMM_EXEC  - PERF_RECORD_COMM event
+ *   PERF_RECORD_MISC_FORK_EXEC  - PERF_RECORD_FORK event (perf internal)
  *   PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
  */
 #define PERF_RECORD_MISC_MMAP_DATA             (1 << 13)
 #define PERF_RECORD_MISC_COMM_EXEC             (1 << 13)
+#define PERF_RECORD_MISC_FORK_EXEC             (1 << 13)
 #define PERF_RECORD_MISC_SWITCH_OUT            (1 << 13)
 /*
  * These PERF_RECORD_MISC_* flags below are safely reused
index c0d7ea0bf5b62438ca8184551b64d5d29ad7951b..b17201edfa09a4d00b01b4b0665b67825f6078b7 100644 (file)
@@ -212,6 +212,7 @@ struct prctl_mm_map {
 #define PR_SET_SPECULATION_CTRL                53
 /* Speculation control variants */
 # define PR_SPEC_STORE_BYPASS          0
+# define PR_SPEC_INDIRECT_BRANCH       1
 /* Return and control values for PR_SET/GET_SPECULATION_CTRL */
 # define PR_SPEC_NOT_AFFECTED          0
 # define PR_SPEC_PRCTL                 (1UL << 0)
index 34dd3d497f2cc52b6742d5bf89fa1e88aa947d57..c81feb373d3ea597a7d2c66ad203ad18ed821189 100644 (file)
@@ -568,6 +568,8 @@ struct sctp_assoc_reset_event {
 
 #define SCTP_ASSOC_CHANGE_DENIED       0x0004
 #define SCTP_ASSOC_CHANGE_FAILED       0x0008
+#define SCTP_STREAM_CHANGE_DENIED      SCTP_ASSOC_CHANGE_DENIED
+#define SCTP_STREAM_CHANGE_FAILED      SCTP_ASSOC_CHANGE_FAILED
 struct sctp_stream_change_event {
        __u16 strchange_type;
        __u16 strchange_flags;
@@ -1151,6 +1153,7 @@ struct sctp_add_streams {
 /* SCTP Stream schedulers */
 enum sctp_sched_type {
        SCTP_SS_FCFS,
+       SCTP_SS_DEFAULT = SCTP_SS_FCFS,
        SCTP_SS_PRIO,
        SCTP_SS_RR,
        SCTP_SS_MAX = SCTP_SS_RR
index 51b095898f4b5952178205b5c517c7e383066164..998983a6e6b712f76b503fc8b2950554d582659f 100644 (file)
@@ -50,6 +50,8 @@
 #ifndef __LINUX_V4L2_CONTROLS_H
 #define __LINUX_V4L2_CONTROLS_H
 
+#include <linux/types.h>
+
 /* Control classes */
 #define V4L2_CTRL_CLASS_USER           0x00980000      /* Old-style 'user' controls */
 #define V4L2_CTRL_CLASS_MPEG           0x00990000      /* MPEG-compression controls */
@@ -1110,6 +1112,7 @@ struct v4l2_mpeg2_sequence {
        __u8    profile_and_level_indication;
        __u8    progressive_sequence;
        __u8    chroma_format;
+       __u8    pad;
 };
 
 struct v4l2_mpeg2_picture {
@@ -1128,6 +1131,7 @@ struct v4l2_mpeg2_picture {
        __u8    alternate_scan;
        __u8    repeat_first_field;
        __u8    progressive_frame;
+       __u8    pad;
 };
 
 struct v4l2_ctrl_mpeg2_slice_params {
@@ -1142,6 +1146,7 @@ struct v4l2_ctrl_mpeg2_slice_params {
 
        __u8    backward_ref_index;
        __u8    forward_ref_index;
+       __u8    pad;
 };
 
 struct v4l2_ctrl_mpeg2_quantization {
index 13b8cb563892b7ca66a6268738b452c8428f006c..a1966cd7b6774944a1d818bd823de0253c4a2908 100644 (file)
 #define VIRTIO_BALLOON_F_MUST_TELL_HOST        0 /* Tell before reclaiming pages */
 #define VIRTIO_BALLOON_F_STATS_VQ      1 /* Memory Stats virtqueue */
 #define VIRTIO_BALLOON_F_DEFLATE_ON_OOM        2 /* Deflate balloon on OOM */
+#define VIRTIO_BALLOON_F_FREE_PAGE_HINT        3 /* VQ to report free pages */
+#define VIRTIO_BALLOON_F_PAGE_POISON   4 /* Guest is using page poisoning */
 
 /* Size of a PFN in the balloon interface. */
 #define VIRTIO_BALLOON_PFN_SHIFT 12
 
+#define VIRTIO_BALLOON_CMD_ID_STOP     0
+#define VIRTIO_BALLOON_CMD_ID_DONE     1
 struct virtio_balloon_config {
        /* Number of pages host wants Guest to give up. */
        __u32 num_pages;
        /* Number of pages we've actually got in balloon. */
        __u32 actual;
+       /* Free page report command id, readonly by guest */
+       __u32 free_page_report_cmd_id;
+       /* Stores PAGE_POISON if page poisoning is in use */
+       __u32 poison_val;
 };
 
 #define VIRTIO_BALLOON_S_SWAP_IN  0   /* Amount of memory swapped in */
index 61f410fd74e4cf4180f7ad5ffa1d996cc1528c91..4914b93a23f2bdeb066a4048d7ab749456ae3fe3 100644 (file)
@@ -44,8 +44,3 @@ static inline void xen_balloon_init(void)
 {
 }
 #endif
-
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
-struct resource;
-void arch_xen_balloon_init(struct resource *hostmem_resource);
-#endif
index 18803ff76e27808bc8263284bbd089c32877ba05..4969817124a8d7c6b462aeb18f54105d1d49e2e3 100644 (file)
@@ -42,16 +42,12 @@ int xen_setup_shutdown_event(void);
 
 extern unsigned long *xen_contiguous_bitmap;
 
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
                                unsigned int address_bits,
                                dma_addr_t *dma_handle);
 
 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
-
-int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
-                 xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
-                 unsigned int domid, bool no_translate, struct page **pages);
 #else
 static inline int xen_create_contiguous_region(phys_addr_t pstart,
                                               unsigned int order,
@@ -63,7 +59,13 @@ static inline int xen_create_contiguous_region(phys_addr_t pstart,
 
 static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
                                                 unsigned int order) { }
+#endif
 
+#if defined(CONFIG_XEN_PV)
+int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+                 xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
+                 unsigned int domid, bool no_translate, struct page **pages);
+#else
 static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
                                xen_pfn_t *pfn, int nr, int *err_ptr,
                                pgprot_t prot,  unsigned int domid,
index a4112e95724a05e85cc5a1d5e7c651f02ecfafeb..cf5b5a0dcbc2f04a5a9812f6272a1b54d7fd06b0 100644 (file)
@@ -509,6 +509,15 @@ config PSI
 
          Say N if unsure.
 
+config PSI_DEFAULT_DISABLED
+       bool "Require boot parameter to enable pressure stall information tracking"
+       default n
+       depends on PSI
+       help
+         If set, pressure stall information tracking will be disabled
+         per default but can be enabled through passing psi_enable=1
+         on the kernel commandline during boot.
+
 endmenu # "CPU/Task time and stats accounting"
 
 config CPU_ISOLATION
index 640557788026d8d947841c8da384056ec94ec4a0..f6f4a1e4cd54753d81d7dbfd515f5f4bda683d4a 100644 (file)
@@ -291,16 +291,6 @@ static int __init do_reset(void)
        return 1;
 }
 
-static int __init maybe_link(void)
-{
-       if (nlink >= 2) {
-               char *old = find_link(major, minor, ino, mode, collected);
-               if (old)
-                       return (ksys_link(old, collected) < 0) ? -1 : 1;
-       }
-       return 0;
-}
-
 static void __init clean_path(char *path, umode_t fmode)
 {
        struct kstat st;
@@ -313,6 +303,18 @@ static void __init clean_path(char *path, umode_t fmode)
        }
 }
 
+static int __init maybe_link(void)
+{
+       if (nlink >= 2) {
+               char *old = find_link(major, minor, ino, mode, collected);
+               if (old) {
+                       clean_path(collected, 0);
+                       return (ksys_link(old, collected) < 0) ? -1 : 1;
+               }
+       }
+       return 0;
+}
+
 static __initdata int wfd;
 
 static int __init do_name(void)
index 7a63d567fdb571f357910a83cf3398df65876016..7343b3a9bff07d0155fad5ba137daef6db39135f 100644 (file)
@@ -117,6 +117,10 @@ obj-$(CONFIG_HAS_IOMEM) += iomem.o
 obj-$(CONFIG_ZONE_DEVICE) += memremap.o
 obj-$(CONFIG_RSEQ) += rseq.o
 
+obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o
+KASAN_SANITIZE_stackleak.o := n
+KCOV_INSTRUMENT_stackleak.o := n
+
 $(obj)/configs.o: $(obj)/config_data.h
 
 targets += config_data.gz
index ee4c82667d659019022f42cfc3c4a13cf9fc9124..4da543d6bea29c315b896848269e873f78ac09dd 100644 (file)
@@ -5,6 +5,7 @@
 #include <uapi/linux/types.h>
 #include <linux/seq_file.h>
 #include <linux/compiler.h>
+#include <linux/ctype.h>
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/anon_inodes.h>
@@ -426,6 +427,30 @@ static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
                offset < btf->hdr.str_len;
 }
 
+/* Only C-style identifier is permitted. This can be relaxed if
+ * necessary.
+ */
+static bool btf_name_valid_identifier(const struct btf *btf, u32 offset)
+{
+       /* offset must be valid */
+       const char *src = &btf->strings[offset];
+       const char *src_limit;
+
+       if (!isalpha(*src) && *src != '_')
+               return false;
+
+       /* set a limit on identifier length */
+       src_limit = src + KSYM_NAME_LEN;
+       src++;
+       while (*src && src < src_limit) {
+               if (!isalnum(*src) && *src != '_')
+                       return false;
+               src++;
+       }
+
+       return !*src;
+}
+
 static const char *btf_name_by_offset(const struct btf *btf, u32 offset)
 {
        if (!offset)
@@ -1143,6 +1168,22 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env,
                return -EINVAL;
        }
 
+       /* typedef type must have a valid name, and other ref types,
+        * volatile, const, restrict, should have a null name.
+        */
+       if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
+               if (!t->name_off ||
+                   !btf_name_valid_identifier(env->btf, t->name_off)) {
+                       btf_verifier_log_type(env, t, "Invalid name");
+                       return -EINVAL;
+               }
+       } else {
+               if (t->name_off) {
+                       btf_verifier_log_type(env, t, "Invalid name");
+                       return -EINVAL;
+               }
+       }
+
        btf_verifier_log_type(env, t, NULL);
 
        return 0;
@@ -1300,6 +1341,13 @@ static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
                return -EINVAL;
        }
 
+       /* fwd type must have a valid name */
+       if (!t->name_off ||
+           !btf_name_valid_identifier(env->btf, t->name_off)) {
+               btf_verifier_log_type(env, t, "Invalid name");
+               return -EINVAL;
+       }
+
        btf_verifier_log_type(env, t, NULL);
 
        return 0;
@@ -1356,6 +1404,12 @@ static s32 btf_array_check_meta(struct btf_verifier_env *env,
                return -EINVAL;
        }
 
+       /* array type should not have a name */
+       if (t->name_off) {
+               btf_verifier_log_type(env, t, "Invalid name");
+               return -EINVAL;
+       }
+
        if (btf_type_vlen(t)) {
                btf_verifier_log_type(env, t, "vlen != 0");
                return -EINVAL;
@@ -1532,6 +1586,13 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
                return -EINVAL;
        }
 
+       /* struct type either no name or a valid one */
+       if (t->name_off &&
+           !btf_name_valid_identifier(env->btf, t->name_off)) {
+               btf_verifier_log_type(env, t, "Invalid name");
+               return -EINVAL;
+       }
+
        btf_verifier_log_type(env, t, NULL);
 
        last_offset = 0;
@@ -1543,6 +1604,12 @@ static s32 btf_struct_check_meta(struct btf_verifier_env *env,
                        return -EINVAL;
                }
 
+               /* struct member either no name or a valid one */
+               if (member->name_off &&
+                   !btf_name_valid_identifier(btf, member->name_off)) {
+                       btf_verifier_log_member(env, t, member, "Invalid name");
+                       return -EINVAL;
+               }
                /* A member cannot be in type void */
                if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
                        btf_verifier_log_member(env, t, member,
@@ -1730,6 +1797,13 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
                return -EINVAL;
        }
 
+       /* enum type either no name or a valid one */
+       if (t->name_off &&
+           !btf_name_valid_identifier(env->btf, t->name_off)) {
+               btf_verifier_log_type(env, t, "Invalid name");
+               return -EINVAL;
+       }
+
        btf_verifier_log_type(env, t, NULL);
 
        for (i = 0; i < nr_enums; i++) {
@@ -1739,6 +1813,14 @@ static s32 btf_enum_check_meta(struct btf_verifier_env *env,
                        return -EINVAL;
                }
 
+               /* enum member must have a valid name */
+               if (!enums[i].name_off ||
+                   !btf_name_valid_identifier(btf, enums[i].name_off)) {
+                       btf_verifier_log_type(env, t, "Invalid name");
+                       return -EINVAL;
+               }
+
+
                btf_verifier_log(env, "\t%s val=%d\n",
                                 btf_name_by_offset(btf, enums[i].name_off),
                                 enums[i].val);
index 6377225b208204c1c2d8829a778f50ebaa7d816d..b1a3545d0ec89f747d1cd51b7140fd64ad2fe6fd 100644 (file)
@@ -553,7 +553,6 @@ bool is_bpf_text_address(unsigned long addr)
 int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
                    char *sym)
 {
-       unsigned long symbol_start, symbol_end;
        struct bpf_prog_aux *aux;
        unsigned int it = 0;
        int ret = -ERANGE;
@@ -566,10 +565,9 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
                if (it++ != symnum)
                        continue;
 
-               bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
                bpf_get_prog_name(aux->prog, sym);
 
-               *value = symbol_start;
+               *value = (unsigned long)aux->prog->bpf_func;
                *type  = BPF_SYM_ELF_TYPE;
 
                ret = 0;
@@ -674,6 +672,40 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
        bpf_prog_unlock_free(fp);
 }
 
+int bpf_jit_get_func_addr(const struct bpf_prog *prog,
+                         const struct bpf_insn *insn, bool extra_pass,
+                         u64 *func_addr, bool *func_addr_fixed)
+{
+       s16 off = insn->off;
+       s32 imm = insn->imm;
+       u8 *addr;
+
+       *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL;
+       if (!*func_addr_fixed) {
+               /* Place-holder address till the last pass has collected
+                * all addresses for JITed subprograms in which case we
+                * can pick them up from prog->aux.
+                */
+               if (!extra_pass)
+                       addr = NULL;
+               else if (prog->aux->func &&
+                        off >= 0 && off < prog->aux->func_cnt)
+                       addr = (u8 *)prog->aux->func[off]->bpf_func;
+               else
+                       return -EINVAL;
+       } else {
+               /* Address of a BPF helper call. Since part of the core
+                * kernel, it's always at a fixed location. __bpf_call_base
+                * and the helper with imm relative to it are both in core
+                * kernel.
+                */
+               addr = (u8 *)__bpf_call_base + imm;
+       }
+
+       *func_addr = (unsigned long)addr;
+       return 0;
+}
+
 static int bpf_jit_blind_insn(const struct bpf_insn *from,
                              const struct bpf_insn *aux,
                              struct bpf_insn *to_buff)
index c97a8f968638c6da0c2ec32c591753f69af59e15..bed9d48a7ae9582928c0ab4c59dd931fea61f1bd 100644 (file)
@@ -139,7 +139,8 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *_key,
                return -ENOENT;
 
        new = kmalloc_node(sizeof(struct bpf_storage_buffer) +
-                          map->value_size, __GFP_ZERO | GFP_USER,
+                          map->value_size,
+                          __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN,
                           map->numa_node);
        if (!new)
                return -ENOMEM;
index 8bbd72d3a121f4e1cb75effd9c5fe5b96728a6b5..b384ea9f3254987f1caa16dff0780900720d93ca 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/bpf.h>
 #include <linux/list.h>
 #include <linux/slab.h>
+#include <linux/capability.h>
 #include "percpu_freelist.h"
 
 #define QUEUE_STACK_CREATE_FLAG_MASK \
@@ -45,8 +46,12 @@ static bool queue_stack_map_is_full(struct bpf_queue_stack *qs)
 /* Called from syscall */
 static int queue_stack_map_alloc_check(union bpf_attr *attr)
 {
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
        /* check sanity of attributes */
        if (attr->max_entries == 0 || attr->key_size != 0 ||
+           attr->value_size == 0 ||
            attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK)
                return -EINVAL;
 
@@ -63,15 +68,10 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
 {
        int ret, numa_node = bpf_map_attr_numa_node(attr);
        struct bpf_queue_stack *qs;
-       u32 size, value_size;
-       u64 queue_size, cost;
-
-       size = attr->max_entries + 1;
-       value_size = attr->value_size;
-
-       queue_size = sizeof(*qs) + (u64) value_size * size;
+       u64 size, queue_size, cost;
 
-       cost = queue_size;
+       size = (u64) attr->max_entries + 1;
+       cost = queue_size = sizeof(*qs) + size * attr->value_size;
        if (cost >= U32_MAX - PAGE_SIZE)
                return ERR_PTR(-E2BIG);
 
index ccb93277aae2c607e7b6ef079e5432d89ef4a1f6..cf5040fd54344dd798f73464eadb5b9684300f1c 100644 (file)
@@ -2078,6 +2078,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                info.jited_prog_len = 0;
                info.xlated_prog_len = 0;
                info.nr_jited_ksyms = 0;
+               info.nr_jited_func_lens = 0;
                goto done;
        }
 
@@ -2158,11 +2159,11 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        }
 
        ulen = info.nr_jited_ksyms;
-       info.nr_jited_ksyms = prog->aux->func_cnt;
+       info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
        if (info.nr_jited_ksyms && ulen) {
                if (bpf_dump_raw_ok()) {
+                       unsigned long ksym_addr;
                        u64 __user *user_ksyms;
-                       ulong ksym_addr;
                        u32 i;
 
                        /* copy the address of the kernel symbol
@@ -2170,10 +2171,17 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                         */
                        ulen = min_t(u32, info.nr_jited_ksyms, ulen);
                        user_ksyms = u64_to_user_ptr(info.jited_ksyms);
-                       for (i = 0; i < ulen; i++) {
-                               ksym_addr = (ulong) prog->aux->func[i]->bpf_func;
-                               ksym_addr &= PAGE_MASK;
-                               if (put_user((u64) ksym_addr, &user_ksyms[i]))
+                       if (prog->aux->func_cnt) {
+                               for (i = 0; i < ulen; i++) {
+                                       ksym_addr = (unsigned long)
+                                               prog->aux->func[i]->bpf_func;
+                                       if (put_user((u64) ksym_addr,
+                                                    &user_ksyms[i]))
+                                               return -EFAULT;
+                               }
+                       } else {
+                               ksym_addr = (unsigned long) prog->bpf_func;
+                               if (put_user((u64) ksym_addr, &user_ksyms[0]))
                                        return -EFAULT;
                        }
                } else {
@@ -2182,7 +2190,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
        }
 
        ulen = info.nr_jited_func_lens;
-       info.nr_jited_func_lens = prog->aux->func_cnt;
+       info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
        if (info.nr_jited_func_lens && ulen) {
                if (bpf_dump_raw_ok()) {
                        u32 __user *user_lens;
@@ -2191,9 +2199,16 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
                        /* copy the JITed image lengths for each function */
                        ulen = min_t(u32, info.nr_jited_func_lens, ulen);
                        user_lens = u64_to_user_ptr(info.jited_func_lens);
-                       for (i = 0; i < ulen; i++) {
-                               func_len = prog->aux->func[i]->jited_len;
-                               if (put_user(func_len, &user_lens[i]))
+                       if (prog->aux->func_cnt) {
+                               for (i = 0; i < ulen; i++) {
+                                       func_len =
+                                               prog->aux->func[i]->jited_len;
+                                       if (put_user(func_len, &user_lens[i]))
+                                               return -EFAULT;
+                               }
+                       } else {
+                               func_len = prog->jited_len;
+                               if (put_user(func_len, &user_lens[0]))
                                        return -EFAULT;
                        }
                } else {
index 171a2c88e77ddd28ea5f9d5ba135d424cd09714e..fc760d00a38c497502c28b56aad1a8d426565560 100644 (file)
@@ -175,6 +175,7 @@ struct bpf_verifier_stack_elem {
 
 #define BPF_COMPLEXITY_LIMIT_INSNS     131072
 #define BPF_COMPLEXITY_LIMIT_STACK     1024
+#define BPF_COMPLEXITY_LIMIT_STATES    64
 
 #define BPF_MAP_PTR_UNPRIV     1UL
 #define BPF_MAP_PTR_POISON     ((void *)((0xeB9FUL << 1) +     \
@@ -2852,10 +2853,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
                regs[BPF_REG_0].type = NOT_INIT;
        } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL ||
                   fn->ret_type == RET_PTR_TO_MAP_VALUE) {
-               if (fn->ret_type == RET_PTR_TO_MAP_VALUE)
-                       regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
-               else
-                       regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
                /* There is no offset yet applied, variable or fixed */
                mark_reg_known_zero(env, regs, BPF_REG_0);
                /* remember map_ptr, so that check_map_access()
@@ -2868,7 +2865,12 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
                        return -EINVAL;
                }
                regs[BPF_REG_0].map_ptr = meta.map_ptr;
-               regs[BPF_REG_0].id = ++env->id_gen;
+               if (fn->ret_type == RET_PTR_TO_MAP_VALUE) {
+                       regs[BPF_REG_0].type = PTR_TO_MAP_VALUE;
+               } else {
+                       regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL;
+                       regs[BPF_REG_0].id = ++env->id_gen;
+               }
        } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
                int id = acquire_reference_state(env, insn_idx);
                if (id < 0)
@@ -3046,7 +3048,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                        dst_reg->umax_value = umax_ptr;
                        dst_reg->var_off = ptr_reg->var_off;
                        dst_reg->off = ptr_reg->off + smin_val;
-                       dst_reg->range = ptr_reg->range;
+                       dst_reg->raw = ptr_reg->raw;
                        break;
                }
                /* A new variable offset is created.  Note that off_reg->off
@@ -3076,10 +3078,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                }
                dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
                dst_reg->off = ptr_reg->off;
+               dst_reg->raw = ptr_reg->raw;
                if (reg_is_pkt_pointer(ptr_reg)) {
                        dst_reg->id = ++env->id_gen;
                        /* something was added to pkt_ptr, set range to zero */
-                       dst_reg->range = 0;
+                       dst_reg->raw = 0;
                }
                break;
        case BPF_SUB:
@@ -3108,7 +3111,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                        dst_reg->var_off = ptr_reg->var_off;
                        dst_reg->id = ptr_reg->id;
                        dst_reg->off = ptr_reg->off - smin_val;
-                       dst_reg->range = ptr_reg->range;
+                       dst_reg->raw = ptr_reg->raw;
                        break;
                }
                /* A new variable offset is created.  If the subtrahend is known
@@ -3134,11 +3137,12 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                }
                dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off);
                dst_reg->off = ptr_reg->off;
+               dst_reg->raw = ptr_reg->raw;
                if (reg_is_pkt_pointer(ptr_reg)) {
                        dst_reg->id = ++env->id_gen;
                        /* something was added to pkt_ptr, set range to zero */
                        if (smin_val < 0)
-                               dst_reg->range = 0;
+                               dst_reg->raw = 0;
                }
                break;
        case BPF_AND:
@@ -3748,6 +3752,79 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
        }
 }
 
+/* compute branch direction of the expression "if (reg opcode val) goto target;"
+ * and return:
+ *  1 - branch will be taken and "goto target" will be executed
+ *  0 - branch will not be taken and fall-through to next insn
+ * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
+ */
+static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
+{
+       if (__is_pointer_value(false, reg))
+               return -1;
+
+       switch (opcode) {
+       case BPF_JEQ:
+               if (tnum_is_const(reg->var_off))
+                       return !!tnum_equals_const(reg->var_off, val);
+               break;
+       case BPF_JNE:
+               if (tnum_is_const(reg->var_off))
+                       return !tnum_equals_const(reg->var_off, val);
+               break;
+       case BPF_JGT:
+               if (reg->umin_value > val)
+                       return 1;
+               else if (reg->umax_value <= val)
+                       return 0;
+               break;
+       case BPF_JSGT:
+               if (reg->smin_value > (s64)val)
+                       return 1;
+               else if (reg->smax_value < (s64)val)
+                       return 0;
+               break;
+       case BPF_JLT:
+               if (reg->umax_value < val)
+                       return 1;
+               else if (reg->umin_value >= val)
+                       return 0;
+               break;
+       case BPF_JSLT:
+               if (reg->smax_value < (s64)val)
+                       return 1;
+               else if (reg->smin_value >= (s64)val)
+                       return 0;
+               break;
+       case BPF_JGE:
+               if (reg->umin_value >= val)
+                       return 1;
+               else if (reg->umax_value < val)
+                       return 0;
+               break;
+       case BPF_JSGE:
+               if (reg->smin_value >= (s64)val)
+                       return 1;
+               else if (reg->smax_value < (s64)val)
+                       return 0;
+               break;
+       case BPF_JLE:
+               if (reg->umax_value <= val)
+                       return 1;
+               else if (reg->umin_value > val)
+                       return 0;
+               break;
+       case BPF_JSLE:
+               if (reg->smax_value <= (s64)val)
+                       return 1;
+               else if (reg->smin_value > (s64)val)
+                       return 0;
+               break;
+       }
+
+       return -1;
+}
+
 /* Adjusts the register min/max values in the case that the dst_reg is the
  * variable register that we are working on, and src_reg is a constant or we're
  * simply doing a BPF_K check.
@@ -4149,21 +4226,15 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
 
        dst_reg = &regs[insn->dst_reg];
 
-       /* detect if R == 0 where R was initialized to zero earlier */
-       if (BPF_SRC(insn->code) == BPF_K &&
-           (opcode == BPF_JEQ || opcode == BPF_JNE) &&
-           dst_reg->type == SCALAR_VALUE &&
-           tnum_is_const(dst_reg->var_off)) {
-               if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
-                   (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
-                       /* if (imm == imm) goto pc+off;
-                        * only follow the goto, ignore fall-through
-                        */
+       if (BPF_SRC(insn->code) == BPF_K) {
+               int pred = is_branch_taken(dst_reg, insn->imm, opcode);
+
+               if (pred == 1) {
+                        /* only follow the goto, ignore fall-through */
                        *insn_idx += insn->off;
                        return 0;
-               } else {
-                       /* if (imm != imm) goto pc+off;
-                        * only follow fall-through branch, since
+               } else if (pred == 0) {
+                       /* only follow fall-through branch, since
                         * that's where the program will go
                         */
                        return 0;
@@ -4977,7 +5048,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
        struct bpf_verifier_state_list *new_sl;
        struct bpf_verifier_state_list *sl;
        struct bpf_verifier_state *cur = env->cur_state, *new;
-       int i, j, err;
+       int i, j, err, states_cnt = 0;
 
        sl = env->explored_states[insn_idx];
        if (!sl)
@@ -5004,8 +5075,12 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
                        return 1;
                }
                sl = sl->next;
+               states_cnt++;
        }
 
+       if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
+               return 0;
+
        /* there were no equivalent states, remember current one.
         * technically the current state is not proven to be safe yet,
         * but it will either reach outer most bpf_exit (which means it's safe)
@@ -5145,6 +5220,9 @@ static int do_check(struct bpf_verifier_env *env)
                        goto process_bpf_exit;
                }
 
+               if (signal_pending(current))
+                       return -EAGAIN;
+
                if (need_resched())
                        cond_resched();
 
@@ -5647,7 +5725,7 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
                return;
        /* NOTE: fake 'exit' subprog should be updated as well. */
        for (i = 0; i <= env->subprog_cnt; i++) {
-               if (env->subprog_info[i].start < off)
+               if (env->subprog_info[i].start <= off)
                        continue;
                env->subprog_info[i].start += len - 1;
        }
index 8b79318810ad5c63d9e70cd634f6d6bc928659ef..6aaf5dd5383bba294719772bc76b7c1664d54ea9 100644 (file)
@@ -493,7 +493,7 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
 }
 
 /**
- * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss
+ * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
  * @cgrp: the cgroup of interest
  * @ss: the subsystem of interest (%NULL returns @cgrp->self)
  *
@@ -502,8 +502,8 @@ static struct cgroup_subsys_state *cgroup_tryget_css(struct cgroup *cgrp,
  * enabled.  If @ss is associated with the hierarchy @cgrp is on, this
  * function is guaranteed to return non-NULL css.
  */
-static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
-                                                       struct cgroup_subsys *ss)
+static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
+                                               struct cgroup_subsys *ss)
 {
        lockdep_assert_held(&cgroup_mutex);
 
@@ -523,35 +523,6 @@ static struct cgroup_subsys_state *cgroup_e_css_by_mask(struct cgroup *cgrp,
        return cgroup_css(cgrp, ss);
 }
 
-/**
- * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
- * @cgrp: the cgroup of interest
- * @ss: the subsystem of interest
- *
- * Find and get the effective css of @cgrp for @ss.  The effective css is
- * defined as the matching css of the nearest ancestor including self which
- * has @ss enabled.  If @ss is not mounted on the hierarchy @cgrp is on,
- * the root css is returned, so this function always returns a valid css.
- *
- * The returned css is not guaranteed to be online, and therefore it is the
- * callers responsiblity to tryget a reference for it.
- */
-struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
-                                        struct cgroup_subsys *ss)
-{
-       struct cgroup_subsys_state *css;
-
-       do {
-               css = cgroup_css(cgrp, ss);
-
-               if (css)
-                       return css;
-               cgrp = cgroup_parent(cgrp);
-       } while (cgrp);
-
-       return init_css_set.subsys[ss->id];
-}
-
 /**
  * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
  * @cgrp: the cgroup of interest
@@ -634,11 +605,10 @@ EXPORT_SYMBOL_GPL(of_css);
  *
  * Should be called under cgroup_[tree_]mutex.
  */
-#define for_each_e_css(css, ssid, cgrp)                                            \
-       for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)            \
-               if (!((css) = cgroup_e_css_by_mask(cgrp,                    \
-                                                  cgroup_subsys[(ssid)]))) \
-                       ;                                                   \
+#define for_each_e_css(css, ssid, cgrp)                                        \
+       for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)        \
+               if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
+                       ;                                               \
                else
 
 /**
@@ -1037,7 +1007,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset,
                         * @ss is in this hierarchy, so we want the
                         * effective css from @cgrp.
                         */
-                       template[i] = cgroup_e_css_by_mask(cgrp, ss);
+                       template[i] = cgroup_e_css(cgrp, ss);
                } else {
                        /*
                         * @ss is not in this hierarchy, so we don't want
@@ -3054,7 +3024,7 @@ static int cgroup_apply_control(struct cgroup *cgrp)
                return ret;
 
        /*
-        * At this point, cgroup_e_css_by_mask() results reflect the new csses
+        * At this point, cgroup_e_css() results reflect the new csses
         * making the following cgroup_update_dfl_csses() properly update
         * css associations of all tasks in the subtree.
         */
index 108fecc20fc148e66f78be657bc80c22590ff5ca..208481d9109030403357b36ada51074f14f37293 100644 (file)
@@ -20,6 +20,7 @@ CONFIG_PARAVIRT=y
 CONFIG_KVM_GUEST=y
 CONFIG_S390_GUEST=y
 CONFIG_VIRTIO=y
+CONFIG_VIRTIO_MENU=y
 CONFIG_VIRTIO_PCI=y
 CONFIG_VIRTIO_BLK=y
 CONFIG_VIRTIO_CONSOLE=y
index 3c7f3b4c453cf57c8e37dd5fadc9f5941f074f0d..91d5c38eb7e5b91a5d2cf821414f7cbbaa854c7a 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/sched/signal.h>
 #include <linux/sched/hotplug.h>
 #include <linux/sched/task.h>
+#include <linux/sched/smt.h>
 #include <linux/unistd.h>
 #include <linux/cpu.h>
 #include <linux/oom.h>
@@ -367,6 +368,12 @@ static void lockdep_release_cpus_lock(void)
 
 #endif /* CONFIG_HOTPLUG_CPU */
 
+/*
+ * Architectures that need SMT-specific errata handling during SMT hotplug
+ * should override this.
+ */
+void __weak arch_smt_update(void) { }
+
 #ifdef CONFIG_HOTPLUG_SMT
 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
 EXPORT_SYMBOL_GPL(cpu_smt_control);
@@ -1011,6 +1018,7 @@ out:
         * concurrent CPU hotplug via cpu_add_remove_lock.
         */
        lockup_detector_cleanup();
+       arch_smt_update();
        return ret;
 }
 
@@ -1139,6 +1147,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
        ret = cpuhp_up_callbacks(cpu, st, target);
 out:
        cpus_write_unlock();
+       arch_smt_update();
        return ret;
 }
 
@@ -2055,12 +2064,6 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
        kobject_uevent(&dev->kobj, KOBJ_ONLINE);
 }
 
-/*
- * Architectures that need SMT-specific errata handling during SMT hotplug
- * should override this.
- */
-void __weak arch_smt_update(void) { };
-
 static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
 {
        int cpu, ret = 0;
index 6ad4a9fcbd6f7012ca577070455bec9a1cd03e57..7921ae4fca8de92513fb5b71cbc36fd96ed0e04f 100644 (file)
@@ -179,14 +179,14 @@ kdb_bt(int argc, const char **argv)
                                kdb_printf("no process for cpu %ld\n", cpu);
                                return 0;
                        }
-                       sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
+                       sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
                        kdb_parse(buf);
                        return 0;
                }
                kdb_printf("btc: cpu status: ");
                kdb_parse("cpu\n");
                for_each_online_cpu(cpu) {
-                       sprintf(buf, "btt 0x%p\n", KDB_TSK(cpu));
+                       sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
                        kdb_parse(buf);
                        touch_nmi_watchdog();
                }
index ed5d34925ad0617a40aeed3774b0e393aec03e99..6a4b41484afe654572f3b4f37186046c4541c8e5 100644 (file)
@@ -216,7 +216,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
        int count;
        int i;
        int diag, dtab_count;
-       int key;
+       int key, buf_size, ret;
 
 
        diag = kdbgetintenv("DTABCOUNT", &dtab_count);
@@ -336,9 +336,8 @@ poll_again:
                else
                        p_tmp = tmpbuffer;
                len = strlen(p_tmp);
-               count = kallsyms_symbol_complete(p_tmp,
-                                                sizeof(tmpbuffer) -
-                                                (p_tmp - tmpbuffer));
+               buf_size = sizeof(tmpbuffer) - (p_tmp - tmpbuffer);
+               count = kallsyms_symbol_complete(p_tmp, buf_size);
                if (tab == 2 && count > 0) {
                        kdb_printf("\n%d symbols are found.", count);
                        if (count > dtab_count) {
@@ -350,9 +349,13 @@ poll_again:
                        }
                        kdb_printf("\n");
                        for (i = 0; i < count; i++) {
-                               if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
+                               ret = kallsyms_symbol_next(p_tmp, i, buf_size);
+                               if (WARN_ON(!ret))
                                        break;
-                               kdb_printf("%s ", p_tmp);
+                               if (ret != -E2BIG)
+                                       kdb_printf("%s ", p_tmp);
+                               else
+                                       kdb_printf("%s... ", p_tmp);
                                *(p_tmp + len) = '\0';
                        }
                        if (i >= dtab_count)
index 118527aa60eae183f6d3b882ff2102a0c9a6012a..750497b0003a6decd80a97051aec97347079fd93 100644 (file)
@@ -173,11 +173,11 @@ int kdb_get_kbd_char(void)
        case KT_LATIN:
                if (isprint(keychar))
                        break;          /* printable characters */
-               /* drop through */
+               /* fall through */
        case KT_SPEC:
                if (keychar == K_ENTER)
                        break;
-               /* drop through */
+               /* fall through */
        default:
                return -1;      /* ignore unprintables */
        }
index bb4fe4e1a601b5252197f20babd6da394aaa0bc9..d72b32c66f7dd3ba3f5cf254b4d8a66da21d259d 100644 (file)
@@ -1192,7 +1192,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
        if (reason == KDB_REASON_DEBUG) {
                /* special case below */
        } else {
-               kdb_printf("\nEntering kdb (current=0x%p, pid %d) ",
+               kdb_printf("\nEntering kdb (current=0x%px, pid %d) ",
                           kdb_current, kdb_current ? kdb_current->pid : 0);
 #if defined(CONFIG_SMP)
                kdb_printf("on processor %d ", raw_smp_processor_id());
@@ -1208,7 +1208,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
                 */
                switch (db_result) {
                case KDB_DB_BPT:
-                       kdb_printf("\nEntering kdb (0x%p, pid %d) ",
+                       kdb_printf("\nEntering kdb (0x%px, pid %d) ",
                                   kdb_current, kdb_current->pid);
 #if defined(CONFIG_SMP)
                        kdb_printf("on processor %d ", raw_smp_processor_id());
@@ -1493,6 +1493,7 @@ static void kdb_md_line(const char *fmtstr, unsigned long addr,
        char cbuf[32];
        char *c = cbuf;
        int i;
+       int j;
        unsigned long word;
 
        memset(cbuf, '\0', sizeof(cbuf));
@@ -1538,25 +1539,9 @@ static void kdb_md_line(const char *fmtstr, unsigned long addr,
                        wc.word = word;
 #define printable_char(c) \
        ({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; })
-                       switch (bytesperword) {
-                       case 8:
+                       for (j = 0; j < bytesperword; j++)
                                *c++ = printable_char(*cp++);
-                               *c++ = printable_char(*cp++);
-                               *c++ = printable_char(*cp++);
-                               *c++ = printable_char(*cp++);
-                               addr += 4;
-                       case 4:
-                               *c++ = printable_char(*cp++);
-                               *c++ = printable_char(*cp++);
-                               addr += 2;
-                       case 2:
-                               *c++ = printable_char(*cp++);
-                               addr++;
-                       case 1:
-                               *c++ = printable_char(*cp++);
-                               addr++;
-                               break;
-                       }
+                       addr += bytesperword;
 #undef printable_char
                }
        }
@@ -2048,7 +2033,7 @@ static int kdb_lsmod(int argc, const char **argv)
                if (mod->state == MODULE_STATE_UNFORMED)
                        continue;
 
-               kdb_printf("%-20s%8u  0x%p ", mod->name,
+               kdb_printf("%-20s%8u  0x%px ", mod->name,
                           mod->core_layout.size, (void *)mod);
 #ifdef CONFIG_MODULE_UNLOAD
                kdb_printf("%4d ", module_refcount(mod));
@@ -2059,7 +2044,7 @@ static int kdb_lsmod(int argc, const char **argv)
                        kdb_printf(" (Loading)");
                else
                        kdb_printf(" (Live)");
-               kdb_printf(" 0x%p", mod->core_layout.base);
+               kdb_printf(" 0x%px", mod->core_layout.base);
 
 #ifdef CONFIG_MODULE_UNLOAD
                {
@@ -2341,7 +2326,7 @@ void kdb_ps1(const struct task_struct *p)
                return;
 
        cpu = kdb_process_cpu(p);
-       kdb_printf("0x%p %8d %8d  %d %4d   %c  0x%p %c%s\n",
+       kdb_printf("0x%px %8d %8d  %d %4d   %c  0x%px %c%s\n",
                   (void *)p, p->pid, p->parent->pid,
                   kdb_task_has_cpu(p), kdb_process_cpu(p),
                   kdb_task_state_char(p),
@@ -2354,7 +2339,7 @@ void kdb_ps1(const struct task_struct *p)
                } else {
                        if (KDB_TSK(cpu) != p)
                                kdb_printf("  Error: does not match running "
-                                  "process table (0x%p)\n", KDB_TSK(cpu));
+                                  "process table (0x%px)\n", KDB_TSK(cpu));
                }
        }
 }
@@ -2687,7 +2672,7 @@ int kdb_register_flags(char *cmd,
        for_each_kdbcmd(kp, i) {
                if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
                        kdb_printf("Duplicate kdb command registered: "
-                               "%s, func %p help %s\n", cmd, func, help);
+                               "%s, func %px help %s\n", cmd, func, help);
                        return 1;
                }
        }
index 1e5a502ba4a7b44787a097540cc21787cce41f5d..2118d8258b7c9a3d66b917f10575f7b109b5d665 100644 (file)
@@ -83,7 +83,7 @@ typedef struct __ksymtab {
                unsigned long sym_start;
                unsigned long sym_end;
                } kdb_symtab_t;
-extern int kallsyms_symbol_next(char *prefix_name, int flag);
+extern int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size);
 extern int kallsyms_symbol_complete(char *prefix_name, int max_len);
 
 /* Exported Symbols for kernel loadable modules to use. */
index 990b3cc526c80d2162d79f0524dbd83932418cdf..50bf9b119bad04952c767451a4e29315b0234ab0 100644 (file)
@@ -40,7 +40,7 @@
 int kdbgetsymval(const char *symname, kdb_symtab_t *symtab)
 {
        if (KDB_DEBUG(AR))
-               kdb_printf("kdbgetsymval: symname=%s, symtab=%p\n", symname,
+               kdb_printf("kdbgetsymval: symname=%s, symtab=%px\n", symname,
                           symtab);
        memset(symtab, 0, sizeof(*symtab));
        symtab->sym_start = kallsyms_lookup_name(symname);
@@ -88,7 +88,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
        char *knt1 = NULL;
 
        if (KDB_DEBUG(AR))
-               kdb_printf("kdbnearsym: addr=0x%lx, symtab=%p\n", addr, symtab);
+               kdb_printf("kdbnearsym: addr=0x%lx, symtab=%px\n", addr, symtab);
        memset(symtab, 0, sizeof(*symtab));
 
        if (addr < 4096)
@@ -149,7 +149,7 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
                symtab->mod_name = "kernel";
        if (KDB_DEBUG(AR))
                kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, "
-                  "symtab->mod_name=%p, symtab->sym_name=%p (%s)\n", ret,
+                  "symtab->mod_name=%px, symtab->sym_name=%px (%s)\n", ret,
                   symtab->sym_start, symtab->mod_name, symtab->sym_name,
                   symtab->sym_name);
 
@@ -221,11 +221,13 @@ int kallsyms_symbol_complete(char *prefix_name, int max_len)
  * Parameters:
  *     prefix_name     prefix of a symbol name to lookup
  *     flag    0 means search from the head, 1 means continue search.
+ *     buf_size        maximum length that can be written to prefix_name
+ *                     buffer
  * Returns:
  *     1 if a symbol matches the given prefix.
  *     0 if no string found
  */
-int kallsyms_symbol_next(char *prefix_name, int flag)
+int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size)
 {
        int prefix_len = strlen(prefix_name);
        static loff_t pos;
@@ -235,10 +237,8 @@ int kallsyms_symbol_next(char *prefix_name, int flag)
                pos = 0;
 
        while ((name = kdb_walk_kallsyms(&pos))) {
-               if (strncmp(name, prefix_name, prefix_len) == 0) {
-                       strncpy(prefix_name, name, strlen(name)+1);
-                       return 1;
-               }
+               if (!strncmp(name, prefix_name, prefix_len))
+                       return strscpy(prefix_name, name, buf_size);
        }
        return 0;
 }
@@ -432,7 +432,7 @@ int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size)
                                *word = w8;
                        break;
                }
-               /* drop through */
+               /* fall through */
        default:
                diag = KDB_BADWIDTH;
                kdb_printf("kdb_getphysword: bad width %ld\n", (long) size);
@@ -481,7 +481,7 @@ int kdb_getword(unsigned long *word, unsigned long addr, size_t size)
                                *word = w8;
                        break;
                }
-               /* drop through */
+               /* fall through */
        default:
                diag = KDB_BADWIDTH;
                kdb_printf("kdb_getword: bad width %ld\n", (long) size);
@@ -525,7 +525,7 @@ int kdb_putword(unsigned long addr, unsigned long word, size_t size)
                        diag = kdb_putarea(addr, w8);
                        break;
                }
-               /* drop through */
+               /* fall through */
        default:
                diag = KDB_BADWIDTH;
                kdb_printf("kdb_putword: bad width %ld\n", (long) size);
@@ -887,13 +887,13 @@ void debug_kusage(void)
                   __func__, dah_first);
        if (dah_first) {
                h_used = (struct debug_alloc_header *)debug_alloc_pool;
-               kdb_printf("%s: h_used %p size %d\n", __func__, h_used,
+               kdb_printf("%s: h_used %px size %d\n", __func__, h_used,
                           h_used->size);
        }
        do {
                h_used = (struct debug_alloc_header *)
                          ((char *)h_free + dah_overhead + h_free->size);
-               kdb_printf("%s: h_used %p size %d caller %p\n",
+               kdb_printf("%s: h_used %px size %d caller %px\n",
                           __func__, h_used, h_used->size, h_used->caller);
                h_free = (struct debug_alloc_header *)
                          (debug_alloc_pool + h_free->next);
@@ -902,7 +902,7 @@ void debug_kusage(void)
                  ((char *)h_free + dah_overhead + h_free->size);
        if ((char *)h_used - debug_alloc_pool !=
            sizeof(debug_alloc_pool_aligned))
-               kdb_printf("%s: h_used %p size %d caller %p\n",
+               kdb_printf("%s: h_used %px size %d caller %px\n",
                           __func__, h_used, h_used->size, h_used->caller);
 out:
        spin_unlock(&dap_lock);
index 5731daa09a32edc2175657384aee68b60a365a2a..045930e32c0e93d91fab9b28a4329f5b2dc1c356 100644 (file)
@@ -679,7 +679,8 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
        }
 
        if (!dev_is_dma_coherent(dev) &&
-           (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+           (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0 &&
+           dev_addr != DIRECT_MAPPING_ERROR)
                arch_sync_dma_for_device(dev, phys, size, dir);
 
        return dev_addr;
index 8c490130c4fb0072838801534948d9ec6b9a285f..84530ab358c37ad876744d5ebda5d277dfb6d065 100644 (file)
@@ -750,7 +750,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
        /*
         * Do not update time when cgroup is not active
         */
-       if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
+       if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
                __update_cgrp_time(event->cgrp);
 }
 
index 96d4bee83489b113a1f37452aeffb8e13461b230..abbd8da9ac21613d6520e1a8533d3fe0b28dccb0 100644 (file)
@@ -572,7 +572,9 @@ static void put_uprobe(struct uprobe *uprobe)
                 * gets called, we don't get a chance to remove uprobe from
                 * delayed_uprobe_list from remove_breakpoint(). Do it here.
                 */
+               mutex_lock(&delayed_uprobe_lock);
                delayed_uprobe_remove(uprobe, NULL);
+               mutex_unlock(&delayed_uprobe_lock);
                kfree(uprobe);
        }
 }
@@ -829,7 +831,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
        BUG_ON((uprobe->offset & ~PAGE_MASK) +
                        UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
 
-       smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
+       smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
        set_bit(UPROBE_COPY_INSN, &uprobe->flags);
 
  out:
@@ -2178,10 +2180,18 @@ static void handle_swbp(struct pt_regs *regs)
         * After we hit the bp, _unregister + _register can install the
         * new and not-yet-analyzed uprobe at the same address, restart.
         */
-       smp_rmb(); /* pairs with wmb() in install_breakpoint() */
        if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
                goto out;
 
+       /*
+        * Pairs with the smp_wmb() in prepare_uprobe().
+        *
+        * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
+        * we must also see the stores to &uprobe->arch performed by the
+        * prepare_uprobe() call.
+        */
+       smp_rmb();
+
        /* Tracing handlers use ->utask to communicate with fetch methods */
        if (!get_utask())
                goto out;
index 8f82a3bdcb8feff10a8ce4c8d608a406890b6673..07cddff89c7b6bac3658c8cb41dd32dc64a3cfa4 100644 (file)
@@ -91,6 +91,7 @@
 #include <linux/kcov.h>
 #include <linux/livepatch.h>
 #include <linux/thread_info.h>
+#include <linux/stackleak.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -1926,6 +1927,8 @@ static __latent_entropy struct task_struct *copy_process(
        if (retval)
                goto bad_fork_cleanup_io;
 
+       stackleak_task_init(p);
+
        if (pid != &init_struct_pid) {
                pid = alloc_pid(p->nsproxy->pid_ns_for_children);
                if (IS_ERR(pid)) {
index 6e6d467f3dec57717ffb7cfae9098dedafb02b22..1f0985adf19340fdb9ec02cf9118c035d8f8e346 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/cpu.h>
 #include <linux/irq.h>
 
-#define IRQ_MATRIX_SIZE        (BITS_TO_LONGS(IRQ_MATRIX_BITS) * sizeof(unsigned long))
+#define IRQ_MATRIX_SIZE        (BITS_TO_LONGS(IRQ_MATRIX_BITS))
 
 struct cpumap {
        unsigned int            available;
index 3ebd09efe72a67fcc1e88761e91e306880c5ae0a..97959d7b77e2a9f535af0eda0c9e41fac1fbe798 100644 (file)
@@ -56,7 +56,7 @@ struct kcov {
        struct task_struct      *t;
 };
 
-static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
+static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
 {
        unsigned int mode;
 
@@ -78,7 +78,7 @@ static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
        return mode == needed_mode;
 }
 
-static unsigned long canonicalize_ip(unsigned long ip)
+static notrace unsigned long canonicalize_ip(unsigned long ip)
 {
 #ifdef CONFIG_RANDOMIZE_BASE
        ip -= kaslr_offset();
index c6a3b6851372c480005d4f053757ba02ad101d8f..35cf0ad29718ffdb0a35dd9c8b2313645c91c6fd 100644 (file)
@@ -25,8 +25,6 @@
 #include <linux/elf.h>
 #include <linux/elfcore.h>
 #include <linux/kernel.h>
-#include <linux/kexec.h>
-#include <linux/slab.h>
 #include <linux/syscalls.h>
 #include <linux/vmalloc.h>
 #include "kexec_internal.h"
index 80b34dffdfb9bffc481de5b624a5be6622a33da5..c2cee9db52040a069188fa9949d37785b0539f7b 100644 (file)
@@ -261,9 +261,6 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
 
 static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
 {
-       if (mode & PTRACE_MODE_SCHED)
-               return false;
-
        if (mode & PTRACE_MODE_NOAUDIT)
                return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
        else
@@ -331,16 +328,9 @@ ok:
             !ptrace_has_cap(mm->user_ns, mode)))
            return -EPERM;
 
-       if (mode & PTRACE_MODE_SCHED)
-               return 0;
        return security_ptrace_access_check(task, mode);
 }
 
-bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode)
-{
-       return __ptrace_may_access(task, mode | PTRACE_MODE_SCHED);
-}
-
 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
 {
        int err;
index b3a3a1fc499eaf386b3b3e1c51f2100b7226ff37..b0fbf685c77a52ba45dc4c3e2044782080a74953 100644 (file)
@@ -319,16 +319,23 @@ int release_resource(struct resource *old)
 EXPORT_SYMBOL(release_resource);
 
 /**
- * Finds the lowest iomem resource that covers part of [start..end].  The
- * caller must specify start, end, flags, and desc (which may be
+ * Finds the lowest iomem resource that covers part of [@start..@end].  The
+ * caller must specify @start, @end, @flags, and @desc (which may be
  * IORES_DESC_NONE).
  *
- * If a resource is found, returns 0 and *res is overwritten with the part
- * of the resource that's within [start..end]; if none is found, returns
- * -1.
+ * If a resource is found, returns 0 and @*res is overwritten with the part
+ * of the resource that's within [@start..@end]; if none is found, returns
+ * -1 or -EINVAL for other invalid parameters.
  *
  * This function walks the whole tree and not just first level children
  * unless @first_lvl is true.
+ *
+ * @start:     start address of the resource searched for
+ * @end:       end address of same resource
+ * @flags:     flags which the resource must have
+ * @desc:      descriptor the resource must have
+ * @first_lvl: walk only the first level children, if set
+ * @res:       return ptr, if resource found
  */
 static int find_next_iomem_res(resource_size_t start, resource_size_t end,
                               unsigned long flags, unsigned long desc,
@@ -399,6 +406,8 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
  * @flags: I/O resource flags
  * @start: start addr
  * @end: end addr
+ * @arg: function argument for the callback @func
+ * @func: callback function that is called for each qualifying resource area
  *
  * NOTE: For a new descriptor search, define a new IORES_DESC in
  * <linux/ioport.h> and set it in 'desc' of a target resource entry.
index f12225f26b70a630ac185cfccba2b140f191c47e..6fedf3a98581b34b388a014d9ddb671ab8281a1f 100644 (file)
@@ -5738,15 +5738,10 @@ int sched_cpu_activate(unsigned int cpu)
 
 #ifdef CONFIG_SCHED_SMT
        /*
-        * The sched_smt_present static key needs to be evaluated on every
-        * hotplug event because at boot time SMT might be disabled when
-        * the number of booted CPUs is limited.
-        *
-        * If then later a sibling gets hotplugged, then the key would stay
-        * off and SMT scheduling would never be functional.
+        * When going up, increment the number of cores with SMT present.
         */
-       if (cpumask_weight(cpu_smt_mask(cpu)) > 1)
-               static_branch_enable_cpuslocked(&sched_smt_present);
+       if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+               static_branch_inc_cpuslocked(&sched_smt_present);
 #endif
        set_cpu_active(cpu, true);
 
@@ -5790,6 +5785,14 @@ int sched_cpu_deactivate(unsigned int cpu)
         */
        synchronize_rcu_mult(call_rcu, call_rcu_sched);
 
+#ifdef CONFIG_SCHED_SMT
+       /*
+        * When going down, decrement the number of cores with SMT present.
+        */
+       if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+               static_branch_dec_cpuslocked(&sched_smt_present);
+#endif
+
        if (!sched_smp_initialized)
                return 0;
 
@@ -5851,11 +5854,14 @@ void __init sched_init_smp(void)
        /*
         * There's no userspace yet to cause hotplug operations; hence all the
         * CPU masks are stable and all blatant races in the below code cannot
-        * happen.
+        * happen. The hotplug lock is nevertheless taken to satisfy lockdep,
+        * but there won't be any contention on it.
         */
+       cpus_read_lock();
        mutex_lock(&sched_domains_mutex);
        sched_init_domains(cpu_active_mask);
        mutex_unlock(&sched_domains_mutex);
+       cpus_read_unlock();
 
        /* Move init over to a non-isolated CPU */
        if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
index ee271bb661cc923dfa67ae5d5c45a18c71df7cb1..ac855b2f47746efa80ed91081442626d15169568 100644 (file)
@@ -2400,8 +2400,8 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
                local = 1;
 
        /*
-        * Retry task to preferred node migration periodically, in case it
-        * case it previously failed, or the scheduler moved us.
+        * Retry to migrate task to preferred node periodically, in case it
+        * previously failed, or the scheduler moved us.
         */
        if (time_after(jiffies, p->numa_migrate_retry)) {
                task_numa_placement(p);
@@ -5674,11 +5674,11 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
        return target;
 }
 
-static unsigned long cpu_util_wake(int cpu, struct task_struct *p);
+static unsigned long cpu_util_without(int cpu, struct task_struct *p);
 
-static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
+static unsigned long capacity_spare_without(int cpu, struct task_struct *p)
 {
-       return max_t(long, capacity_of(cpu) - cpu_util_wake(cpu, p), 0);
+       return max_t(long, capacity_of(cpu) - cpu_util_without(cpu, p), 0);
 }
 
 /*
@@ -5738,7 +5738,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 
                        avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
 
-                       spare_cap = capacity_spare_wake(i, p);
+                       spare_cap = capacity_spare_without(i, p);
 
                        if (spare_cap > max_spare_cap)
                                max_spare_cap = spare_cap;
@@ -5889,8 +5889,8 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
                return prev_cpu;
 
        /*
-        * We need task's util for capacity_spare_wake, sync it up to prev_cpu's
-        * last_update_time.
+        * We need task's util for capacity_spare_without, sync it up to
+        * prev_cpu's last_update_time.
         */
        if (!(sd_flag & SD_BALANCE_FORK))
                sync_entity_load_avg(&p->se);
@@ -6216,10 +6216,19 @@ static inline unsigned long cpu_util(int cpu)
 }
 
 /*
- * cpu_util_wake: Compute CPU utilization with any contributions from
- * the waking task p removed.
+ * cpu_util_without: compute cpu utilization without any contributions from *p
+ * @cpu: the CPU which utilization is requested
+ * @p: the task which utilization should be discounted
+ *
+ * The utilization of a CPU is defined by the utilization of tasks currently
+ * enqueued on that CPU as well as tasks which are currently sleeping after an
+ * execution on that CPU.
+ *
+ * This method returns the utilization of the specified CPU by discounting the
+ * utilization of the specified task, whenever the task is currently
+ * contributing to the CPU utilization.
  */
-static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
+static unsigned long cpu_util_without(int cpu, struct task_struct *p)
 {
        struct cfs_rq *cfs_rq;
        unsigned int util;
@@ -6231,7 +6240,7 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
        cfs_rq = &cpu_rq(cpu)->cfs;
        util = READ_ONCE(cfs_rq->avg.util_avg);
 
-       /* Discount task's blocked util from CPU's util */
+       /* Discount task's util from CPU's util */
        util -= min_t(unsigned int, util, task_util(p));
 
        /*
@@ -6240,14 +6249,14 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
         * a) if *p is the only task sleeping on this CPU, then:
         *      cpu_util (== task_util) > util_est (== 0)
         *    and thus we return:
-        *      cpu_util_wake = (cpu_util - task_util) = 0
+        *      cpu_util_without = (cpu_util - task_util) = 0
         *
         * b) if other tasks are SLEEPING on this CPU, which is now exiting
         *    IDLE, then:
         *      cpu_util >= task_util
         *      cpu_util > util_est (== 0)
         *    and thus we discount *p's blocked utilization to return:
-        *      cpu_util_wake = (cpu_util - task_util) >= 0
+        *      cpu_util_without = (cpu_util - task_util) >= 0
         *
         * c) if other tasks are RUNNABLE on that CPU and
         *      util_est > cpu_util
@@ -6260,8 +6269,33 @@ static unsigned long cpu_util_wake(int cpu, struct task_struct *p)
         * covered by the following code when estimated utilization is
         * enabled.
         */
-       if (sched_feat(UTIL_EST))
-               util = max(util, READ_ONCE(cfs_rq->avg.util_est.enqueued));
+       if (sched_feat(UTIL_EST)) {
+               unsigned int estimated =
+                       READ_ONCE(cfs_rq->avg.util_est.enqueued);
+
+               /*
+                * Despite the following checks we still have a small window
+                * for a possible race, when an execl's select_task_rq_fair()
+                * races with LB's detach_task():
+                *
+                *   detach_task()
+                *     p->on_rq = TASK_ON_RQ_MIGRATING;
+                *     ---------------------------------- A
+                *     deactivate_task()                   \
+                *       dequeue_task()                     + RaceTime
+                *         util_est_dequeue()              /
+                *     ---------------------------------- B
+                *
+                * The additional check on "current == p" it's required to
+                * properly fix the execl regression and it helps in further
+                * reducing the chances for the above race.
+                */
+               if (unlikely(task_on_rq_queued(p) || current == p)) {
+                       estimated -= min_t(unsigned int, estimated,
+                                          (_task_util_est(p) | UTIL_AVG_UNCHANGED));
+               }
+               util = max(util, estimated);
+       }
 
        /*
         * Utilization (estimated) can exceed the CPU capacity, thus let's
index 7cdecfc010af83f1f5d8679536433f288aa847d7..fe24de3fbc93805f0c1e913a85657a15d141ad2f 100644 (file)
 
 static int psi_bug __read_mostly;
 
-bool psi_disabled __read_mostly;
-core_param(psi_disabled, psi_disabled, bool, 0644);
+DEFINE_STATIC_KEY_FALSE(psi_disabled);
+
+#ifdef CONFIG_PSI_DEFAULT_DISABLED
+bool psi_enable;
+#else
+bool psi_enable = true;
+#endif
+static int __init setup_psi(char *str)
+{
+       return kstrtobool(str, &psi_enable) == 0;
+}
+__setup("psi=", setup_psi);
 
 /* Running averages - we need to be higher-res than loadavg */
 #define PSI_FREQ       (2*HZ+1)        /* 2 sec intervals */
@@ -169,8 +179,10 @@ static void group_init(struct psi_group *group)
 
 void __init psi_init(void)
 {
-       if (psi_disabled)
+       if (!psi_enable) {
+               static_branch_enable(&psi_disabled);
                return;
+       }
 
        psi_period = jiffies_to_nsecs(PSI_FREQ);
        group_init(&psi_system);
@@ -549,7 +561,7 @@ void psi_memstall_enter(unsigned long *flags)
        struct rq_flags rf;
        struct rq *rq;
 
-       if (psi_disabled)
+       if (static_branch_likely(&psi_disabled))
                return;
 
        *flags = current->flags & PF_MEMSTALL;
@@ -579,7 +591,7 @@ void psi_memstall_leave(unsigned long *flags)
        struct rq_flags rf;
        struct rq *rq;
 
-       if (psi_disabled)
+       if (static_branch_likely(&psi_disabled))
                return;
 
        if (*flags)
@@ -600,7 +612,7 @@ void psi_memstall_leave(unsigned long *flags)
 #ifdef CONFIG_CGROUPS
 int psi_cgroup_alloc(struct cgroup *cgroup)
 {
-       if (psi_disabled)
+       if (static_branch_likely(&psi_disabled))
                return 0;
 
        cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
@@ -612,7 +624,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup)
 
 void psi_cgroup_free(struct cgroup *cgroup)
 {
-       if (psi_disabled)
+       if (static_branch_likely(&psi_disabled))
                return;
 
        cancel_delayed_work_sync(&cgroup->psi.clock_work);
@@ -633,38 +645,39 @@ void psi_cgroup_free(struct cgroup *cgroup)
  */
 void cgroup_move_task(struct task_struct *task, struct css_set *to)
 {
-       bool move_psi = !psi_disabled;
        unsigned int task_flags = 0;
        struct rq_flags rf;
        struct rq *rq;
 
-       if (move_psi) {
-               rq = task_rq_lock(task, &rf);
+       if (static_branch_likely(&psi_disabled)) {
+               /*
+                * Lame to do this here, but the scheduler cannot be locked
+                * from the outside, so we move cgroups from inside sched/.
+                */
+               rcu_assign_pointer(task->cgroups, to);
+               return;
+       }
 
-               if (task_on_rq_queued(task))
-                       task_flags = TSK_RUNNING;
-               else if (task->in_iowait)
-                       task_flags = TSK_IOWAIT;
+       rq = task_rq_lock(task, &rf);
 
-               if (task->flags & PF_MEMSTALL)
-                       task_flags |= TSK_MEMSTALL;
+       if (task_on_rq_queued(task))
+               task_flags = TSK_RUNNING;
+       else if (task->in_iowait)
+               task_flags = TSK_IOWAIT;
 
-               if (task_flags)
-                       psi_task_change(task, task_flags, 0);
-       }
+       if (task->flags & PF_MEMSTALL)
+               task_flags |= TSK_MEMSTALL;
 
-       /*
-        * Lame to do this here, but the scheduler cannot be locked
-        * from the outside, so we move cgroups from inside sched/.
-        */
+       if (task_flags)
+               psi_task_change(task, task_flags, 0);
+
+       /* See comment above */
        rcu_assign_pointer(task->cgroups, to);
 
-       if (move_psi) {
-               if (task_flags)
-                       psi_task_change(task, 0, task_flags);
+       if (task_flags)
+               psi_task_change(task, 0, task_flags);
 
-               task_rq_unlock(rq, task, &rf);
-       }
+       task_rq_unlock(rq, task, &rf);
 }
 #endif /* CONFIG_CGROUPS */
 
@@ -672,7 +685,7 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
 {
        int full;
 
-       if (psi_disabled)
+       if (static_branch_likely(&psi_disabled))
                return -EOPNOTSUPP;
 
        update_stats(group);
index 2e2955a8cf8fe3648a007036dde85320f5834a45..a21ea60219293a0be6cc65ee63918f650b2606e1 100644 (file)
@@ -1561,7 +1561,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
 
        /*
         * We may dequeue prev's rt_rq in put_prev_task().
-        * So, we update time before rt_nr_running check.
+        * So, we update time before rt_queued check.
         */
        if (prev->sched_class == &rt_sched_class)
                update_curr_rt(rq);
index 618577fc9aa873d20425c3c4ac590bb91a8af003..4e524ab589c9b406d0f3bffa43e8da2793e20259 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/sched/prio.h>
 #include <linux/sched/rt.h>
 #include <linux/sched/signal.h>
+#include <linux/sched/smt.h>
 #include <linux/sched/stat.h>
 #include <linux/sched/sysctl.h>
 #include <linux/sched/task.h>
@@ -936,9 +937,6 @@ static inline int cpu_of(struct rq *rq)
 
 
 #ifdef CONFIG_SCHED_SMT
-
-extern struct static_key_false sched_smt_present;
-
 extern void __update_idle_core(struct rq *rq);
 
 static inline void update_idle_core(struct rq *rq)
index 4904c46770007f4bd0ef2c8cb6e8b567ce300864..aa0de240fb419c966281f7fa5972f4d76b435615 100644 (file)
@@ -66,7 +66,7 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
 {
        int clear = 0, set = TSK_RUNNING;
 
-       if (psi_disabled)
+       if (static_branch_likely(&psi_disabled))
                return;
 
        if (!wakeup || p->sched_psi_wake_requeue) {
@@ -86,7 +86,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
 {
        int clear = TSK_RUNNING, set = 0;
 
-       if (psi_disabled)
+       if (static_branch_likely(&psi_disabled))
                return;
 
        if (!sleep) {
@@ -102,7 +102,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
 
 static inline void psi_ttwu_dequeue(struct task_struct *p)
 {
-       if (psi_disabled)
+       if (static_branch_likely(&psi_disabled))
                return;
        /*
         * Is the task being migrated during a wakeup? Make sure to
@@ -128,7 +128,7 @@ static inline void psi_ttwu_dequeue(struct task_struct *p)
 
 static inline void psi_task_tick(struct rq *rq)
 {
-       if (psi_disabled)
+       if (static_branch_likely(&psi_disabled))
                return;
 
        if (unlikely(rq->curr->flags & PF_MEMSTALL))
index 9d74371e4aad86a436c549045be08c2a1ed30853..8d7f15ba59163c3a496f0284c2473a0575d57958 100644 (file)
@@ -1337,7 +1337,7 @@ void sched_init_numa(void)
        int level = 0;
        int i, j, k;
 
-       sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
+       sched_domains_numa_distance = kzalloc(sizeof(int) * (nr_node_ids + 1), GFP_KERNEL);
        if (!sched_domains_numa_distance)
                return;
 
diff --git a/kernel/stackleak.c b/kernel/stackleak.c
new file mode 100644 (file)
index 0000000..b193a59
--- /dev/null
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This code fills the used part of the kernel stack with a poison value
+ * before returning to userspace. It's part of the STACKLEAK feature
+ * ported from grsecurity/PaX.
+ *
+ * Author: Alexander Popov <alex.popov@linux.com>
+ *
+ * STACKLEAK reduces the information which kernel stack leak bugs can
+ * reveal and blocks some uninitialized stack variable attacks.
+ */
+
+#include <linux/stackleak.h>
+#include <linux/kprobes.h>
+
+#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
+#include <linux/jump_label.h>
+#include <linux/sysctl.h>
+
+static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass);
+
+int stack_erasing_sysctl(struct ctl_table *table, int write,
+                       void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       int ret = 0;
+       int state = !static_branch_unlikely(&stack_erasing_bypass);
+       int prev_state = state;
+
+       table->data = &state;
+       table->maxlen = sizeof(int);
+       ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+       state = !!state;
+       if (ret || !write || state == prev_state)
+               return ret;
+
+       if (state)
+               static_branch_disable(&stack_erasing_bypass);
+       else
+               static_branch_enable(&stack_erasing_bypass);
+
+       pr_warn("stackleak: kernel stack erasing is %s\n",
+                                       state ? "enabled" : "disabled");
+       return ret;
+}
+
+#define skip_erasing() static_branch_unlikely(&stack_erasing_bypass)
+#else
+#define skip_erasing() false
+#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
+
+asmlinkage void notrace stackleak_erase(void)
+{
+       /* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
+       unsigned long kstack_ptr = current->lowest_stack;
+       unsigned long boundary = (unsigned long)end_of_stack(current);
+       unsigned int poison_count = 0;
+       const unsigned int depth = STACKLEAK_SEARCH_DEPTH / sizeof(unsigned long);
+
+       if (skip_erasing())
+               return;
+
+       /* Check that 'lowest_stack' value is sane */
+       if (unlikely(kstack_ptr - boundary >= THREAD_SIZE))
+               kstack_ptr = boundary;
+
+       /* Search for the poison value in the kernel stack */
+       while (kstack_ptr > boundary && poison_count <= depth) {
+               if (*(unsigned long *)kstack_ptr == STACKLEAK_POISON)
+                       poison_count++;
+               else
+                       poison_count = 0;
+
+               kstack_ptr -= sizeof(unsigned long);
+       }
+
+       /*
+        * One 'long int' at the bottom of the thread stack is reserved and
+        * should not be poisoned (see CONFIG_SCHED_STACK_END_CHECK=y).
+        */
+       if (kstack_ptr == boundary)
+               kstack_ptr += sizeof(unsigned long);
+
+#ifdef CONFIG_STACKLEAK_METRICS
+       current->prev_lowest_stack = kstack_ptr;
+#endif
+
+       /*
+        * Now write the poison value to the kernel stack. Start from
+        * 'kstack_ptr' and move up till the new 'boundary'. We assume that
+        * the stack pointer doesn't change when we write poison.
+        */
+       if (on_thread_stack())
+               boundary = current_stack_pointer;
+       else
+               boundary = current_top_of_stack();
+
+       while (kstack_ptr < boundary) {
+               *(unsigned long *)kstack_ptr = STACKLEAK_POISON;
+               kstack_ptr += sizeof(unsigned long);
+       }
+
+       /* Reset the 'lowest_stack' value for the next syscall */
+       current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64;
+}
+NOKPROBE_SYMBOL(stackleak_erase);
+
+void __used notrace stackleak_track_stack(void)
+{
+       /*
+        * N.B. stackleak_erase() fills the kernel stack with the poison value,
+        * which has the register width. That code assumes that the value
+        * of 'lowest_stack' is aligned on the register width boundary.
+        *
+        * That is true for x86 and x86_64 because of the kernel stack
+        * alignment on these platforms (for details, see 'cc_stack_align' in
+        * arch/x86/Makefile). Take care of that when you port STACKLEAK to
+        * new platforms.
+        */
+       unsigned long sp = (unsigned long)&sp;
+
+       /*
+        * Having CONFIG_STACKLEAK_TRACK_MIN_SIZE larger than
+        * STACKLEAK_SEARCH_DEPTH makes the poison search in
+        * stackleak_erase() unreliable. Let's prevent that.
+        */
+       BUILD_BUG_ON(CONFIG_STACKLEAK_TRACK_MIN_SIZE > STACKLEAK_SEARCH_DEPTH);
+
+       if (sp < current->lowest_stack &&
+           sp >= (unsigned long)task_stack_page(current) +
+                                               sizeof(unsigned long)) {
+               current->lowest_stack = sp;
+       }
+}
+EXPORT_SYMBOL(stackleak_track_stack);
index cc02050fd0c493378228eb6960e15449754f1387..5fc724e4e454c3304ecaebe7c868eb622f784eb8 100644 (file)
@@ -66,7 +66,6 @@
 #include <linux/kexec.h>
 #include <linux/bpf.h>
 #include <linux/mount.h>
-#include <linux/pipe_fs_i.h>
 
 #include <linux/uaccess.h>
 #include <asm/processor.h>
@@ -91,7 +90,9 @@
 #ifdef CONFIG_CHR_DEV_SG
 #include <scsi/sg.h>
 #endif
-
+#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
+#include <linux/stackleak.h>
+#endif
 #ifdef CONFIG_LOCKUP_DETECTOR
 #include <linux/nmi.h>
 #endif
@@ -1232,6 +1233,17 @@ static struct ctl_table kern_table[] = {
                .extra1         = &zero,
                .extra2         = &one,
        },
+#endif
+#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
+       {
+               .procname       = "stack_erasing",
+               .data           = NULL,
+               .maxlen         = sizeof(int),
+               .mode           = 0600,
+               .proc_handler   = stack_erasing_sysctl,
+               .extra1         = &zero,
+               .extra2         = &one,
+       },
 #endif
        { }
 };
index ce32cf741b250939de562ab05bd598fb30f10986..8f0644af40be7e5869f8f664775183bdb07a0f56 100644 (file)
@@ -917,9 +917,6 @@ static void check_process_timers(struct task_struct *tsk,
        struct task_cputime cputime;
        unsigned long soft;
 
-       if (dl_task(tsk))
-               check_dl_overrun(tsk);
-
        /*
         * If cputimer is not running, then there are no active
         * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
index e3a7f7fd3abc1aaf337b18a1eb268126f4308bd2..ad204cf6d0018ef99c00d4b30e942209b77d890c 100644 (file)
@@ -842,7 +842,7 @@ int get_timespec64(struct timespec64 *ts,
        ts->tv_sec = kts.tv_sec;
 
        /* Zero out the padding for 32 bit systems or in compat mode */
-       if (IS_ENABLED(CONFIG_64BIT_TIME) && (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall()))
+       if (IS_ENABLED(CONFIG_64BIT_TIME) && in_compat_syscall())
                kts.tv_nsec &= 0xFFFFFFFFUL;
 
        ts->tv_nsec = kts.tv_nsec;
index fac0ddf8a8e22505749be3064e6b964ba12d4930..2868d85f1fb1d3286984c4727f0519957ac069a9 100644 (file)
@@ -764,9 +764,9 @@ blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
        if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
                return NULL;
 
-       if (!bio->bi_blkg)
+       if (!bio->bi_css)
                return NULL;
-       return cgroup_get_kernfs_id(bio_blkcg(bio)->css.cgroup);
+       return cgroup_get_kernfs_id(bio->bi_css->cgroup);
 }
 #else
 static union kernfs_node_id *
index 08fcfe440c6374e336b02c1dde33a478fc58615a..9864a35c8bb576e30655bccf62e5174cb1591072 100644 (file)
@@ -196,11 +196,13 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
                        i++;
                } else if (fmt[i] == 'p' || fmt[i] == 's') {
                        mod[fmt_cnt]++;
-                       i++;
-                       if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
+                       /* disallow any further format extensions */
+                       if (fmt[i + 1] != 0 &&
+                           !isspace(fmt[i + 1]) &&
+                           !ispunct(fmt[i + 1]))
                                return -EINVAL;
                        fmt_cnt++;
-                       if (fmt[i - 1] == 's') {
+                       if (fmt[i] == 's') {
                                if (str_seen)
                                        /* allow only one '%s' per fmt string */
                                        return -EINVAL;
index f536f601bd46e321a5ae77b2fbb0f3d642edb13e..77734451cb05d3876a123a6c620c33287cc8cc33 100644 (file)
@@ -817,7 +817,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static int profile_graph_entry(struct ftrace_graph_ent *trace)
 {
-       int index = trace->depth;
+       int index = current->curr_ret_stack;
 
        function_profile_call(trace->func, 0, NULL, NULL);
 
@@ -852,7 +852,7 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
        if (!fgraph_graph_time) {
                int index;
 
-               index = trace->depth;
+               index = current->curr_ret_stack;
 
                /* Append this call time to the parent time to subtract */
                if (index)
@@ -6814,6 +6814,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
                        atomic_set(&t->tracing_graph_pause, 0);
                        atomic_set(&t->trace_overrun, 0);
                        t->curr_ret_stack = -1;
+                       t->curr_ret_depth = -1;
                        /* Make sure the tasks see the -1 first: */
                        smp_wmb();
                        t->ret_stack = ret_stack_list[start++];
@@ -7038,6 +7039,7 @@ graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
 {
        t->curr_ret_stack = -1;
+       t->curr_ret_depth = -1;
        /*
         * The idle task has no parent, it either has its own
         * stack or no stack at all.
@@ -7068,6 +7070,7 @@ void ftrace_graph_init_task(struct task_struct *t)
        /* Make sure we do not use the parent ret_stack */
        t->ret_stack = NULL;
        t->curr_ret_stack = -1;
+       t->curr_ret_depth = -1;
 
        if (ftrace_graph_active) {
                struct ftrace_ret_stack *ret_stack;
index 3b8c0e24ab306f5f1ca1d6194749a266baa14fc5..447bd96ee658aacc085875cd014a6bcb782e6a96 100644 (file)
@@ -512,12 +512,44 @@ enum {
  * can only be modified by current, we can reuse trace_recursion.
  */
        TRACE_IRQ_BIT,
+
+       /* Set if the function is in the set_graph_function file */
+       TRACE_GRAPH_BIT,
+
+       /*
+        * In the very unlikely case that an interrupt came in
+        * at a start of graph tracing, and we want to trace
+        * the function in that interrupt, the depth can be greater
+        * than zero, because of the preempted start of a previous
+        * trace. In an even more unlikely case, depth could be 2
+        * if a softirq interrupted the start of graph tracing,
+        * followed by an interrupt preempting a start of graph
+        * tracing in the softirq, and depth can even be 3
+        * if an NMI came in at the start of an interrupt function
+        * that preempted a softirq start of a function that
+        * preempted normal context!!!! Luckily, it can't be
+        * greater than 3, so the next two bits are a mask
+        * of what the depth is when we set TRACE_GRAPH_BIT
+        */
+
+       TRACE_GRAPH_DEPTH_START_BIT,
+       TRACE_GRAPH_DEPTH_END_BIT,
 };
 
 #define trace_recursion_set(bit)       do { (current)->trace_recursion |= (1<<(bit)); } while (0)
 #define trace_recursion_clear(bit)     do { (current)->trace_recursion &= ~(1<<(bit)); } while (0)
 #define trace_recursion_test(bit)      ((current)->trace_recursion & (1<<(bit)))
 
+#define trace_recursion_depth() \
+       (((current)->trace_recursion >> TRACE_GRAPH_DEPTH_START_BIT) & 3)
+#define trace_recursion_set_depth(depth) \
+       do {                                                            \
+               current->trace_recursion &=                             \
+                       ~(3 << TRACE_GRAPH_DEPTH_START_BIT);            \
+               current->trace_recursion |=                             \
+                       ((depth) & 3) << TRACE_GRAPH_DEPTH_START_BIT;   \
+       } while (0)
+
 #define TRACE_CONTEXT_BITS     4
 
 #define TRACE_FTRACE_START     TRACE_FTRACE_BIT
@@ -843,8 +875,9 @@ extern void __trace_graph_return(struct trace_array *tr,
 extern struct ftrace_hash *ftrace_graph_hash;
 extern struct ftrace_hash *ftrace_graph_notrace_hash;
 
-static inline int ftrace_graph_addr(unsigned long addr)
+static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
 {
+       unsigned long addr = trace->func;
        int ret = 0;
 
        preempt_disable_notrace();
@@ -855,6 +888,14 @@ static inline int ftrace_graph_addr(unsigned long addr)
        }
 
        if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
+
+               /*
+                * This needs to be cleared on the return functions
+                * when the depth is zero.
+                */
+               trace_recursion_set(TRACE_GRAPH_BIT);
+               trace_recursion_set_depth(trace->depth);
+
                /*
                 * If no irqs are to be traced, but a set_graph_function
                 * is set, and called by an interrupt handler, we still
@@ -872,6 +913,13 @@ out:
        return ret;
 }
 
+static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
+{
+       if (trace_recursion_test(TRACE_GRAPH_BIT) &&
+           trace->depth == trace_recursion_depth())
+               trace_recursion_clear(TRACE_GRAPH_BIT);
+}
+
 static inline int ftrace_graph_notrace_addr(unsigned long addr)
 {
        int ret = 0;
@@ -885,7 +933,7 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
        return ret;
 }
 #else
-static inline int ftrace_graph_addr(unsigned long addr)
+static inline int ftrace_graph_addr(struct ftrace_graph_ent *trace)
 {
        return 1;
 }
@@ -894,6 +942,8 @@ static inline int ftrace_graph_notrace_addr(unsigned long addr)
 {
        return 0;
 }
+static inline void ftrace_graph_addr_finish(struct ftrace_graph_ret *trace)
+{ }
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 extern unsigned int fgraph_max_depth;
@@ -901,7 +951,8 @@ extern unsigned int fgraph_max_depth;
 static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
 {
        /* trace it when it is-nested-in or is a function enabled. */
-       return !(trace->depth || ftrace_graph_addr(trace->func)) ||
+       return !(trace_recursion_test(TRACE_GRAPH_BIT) ||
+                ftrace_graph_addr(trace)) ||
                (trace->depth < 0) ||
                (fgraph_max_depth && trace->depth >= fgraph_max_depth);
 }
index 169b3c44ee97f3cf00bc574b185f16fa572a12d5..086af4f5c3e846755f7c0da269bf6cda70b4893c 100644 (file)
@@ -118,8 +118,8 @@ print_graph_duration(struct trace_array *tr, unsigned long long duration,
                     struct trace_seq *s, u32 flags);
 
 /* Add a function return address to the trace stack on thread info.*/
-int
-ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
+static int
+ftrace_push_return_trace(unsigned long ret, unsigned long func,
                         unsigned long frame_pointer, unsigned long *retp)
 {
        unsigned long long calltime;
@@ -177,9 +177,31 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
 #ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
        current->ret_stack[index].retp = retp;
 #endif
-       *depth = current->curr_ret_stack;
+       return 0;
+}
+
+int function_graph_enter(unsigned long ret, unsigned long func,
+                        unsigned long frame_pointer, unsigned long *retp)
+{
+       struct ftrace_graph_ent trace;
+
+       trace.func = func;
+       trace.depth = ++current->curr_ret_depth;
+
+       if (ftrace_push_return_trace(ret, func,
+                                    frame_pointer, retp))
+               goto out;
+
+       /* Only trace if the calling function expects to */
+       if (!ftrace_graph_entry(&trace))
+               goto out_ret;
 
        return 0;
+ out_ret:
+       current->curr_ret_stack--;
+ out:
+       current->curr_ret_depth--;
+       return -EBUSY;
 }
 
 /* Retrieve a function return address to the trace stack on thread info.*/
@@ -241,7 +263,13 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
        trace->func = current->ret_stack[index].func;
        trace->calltime = current->ret_stack[index].calltime;
        trace->overrun = atomic_read(&current->trace_overrun);
-       trace->depth = index;
+       trace->depth = current->curr_ret_depth--;
+       /*
+        * We still want to trace interrupts coming in if
+        * max_depth is set to 1. Make sure the decrement is
+        * seen before ftrace_graph_return.
+        */
+       barrier();
 }
 
 /*
@@ -255,6 +283,12 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
 
        ftrace_pop_return_trace(&trace, &ret, frame_pointer);
        trace.rettime = trace_clock_local();
+       ftrace_graph_return(&trace);
+       /*
+        * The ftrace_graph_return() may still access the current
+        * ret_stack structure, we need to make sure the update of
+        * curr_ret_stack is after that.
+        */
        barrier();
        current->curr_ret_stack--;
        /*
@@ -267,13 +301,6 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
                return ret;
        }
 
-       /*
-        * The trace should run after decrementing the ret counter
-        * in case an interrupt were to come in. We don't want to
-        * lose the interrupt if max_depth is set.
-        */
-       ftrace_graph_return(&trace);
-
        if (unlikely(!ret)) {
                ftrace_graph_stop();
                WARN_ON(1);
@@ -482,6 +509,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
        int cpu;
        int pc;
 
+       ftrace_graph_addr_finish(trace);
+
        local_irq_save(flags);
        cpu = raw_smp_processor_id();
        data = per_cpu_ptr(tr->trace_buffer.data, cpu);
@@ -505,6 +534,8 @@ void set_graph_array(struct trace_array *tr)
 
 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
 {
+       ftrace_graph_addr_finish(trace);
+
        if (tracing_thresh &&
            (trace->rettime - trace->calltime < tracing_thresh))
                return;
index b7357f9f82a35456e4a51acf443b2b2019ea3d83..98ea6d28df15d4093ad9b9744b07ec1522191c7c 100644 (file)
@@ -208,6 +208,8 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
        unsigned long flags;
        int pc;
 
+       ftrace_graph_addr_finish(trace);
+
        if (!func_prolog_dec(tr, &data, &flags))
                return;
 
index 3ef15a6683c002bc2c5402b5be8ad07c903021bc..bd30e9398d2a8b2afbc49839c969efd277ae2728 100644 (file)
@@ -535,7 +535,7 @@ int traceprobe_update_arg(struct probe_arg *arg)
                        if (code[1].op != FETCH_OP_IMM)
                                return -EINVAL;
 
-                       tmp = strpbrk("+-", code->data);
+                       tmp = strpbrk(code->data, "+-");
                        if (tmp)
                                c = *tmp;
                        ret = traceprobe_split_symbol_offset(code->data,
index a86b303e6c67dc7e1561636ddf52a9c8136fe65f..7d04b989075516f327c22a2b3dae77f227b75f1a 100644 (file)
@@ -270,6 +270,8 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
        unsigned long flags;
        int pc;
 
+       ftrace_graph_addr_finish(trace);
+
        if (!func_prolog_preempt_disable(tr, &data, &pc))
                return;
 
index e5222b5fb4fe6c3868c78ee3602518ce835c85e3..923414a246e9e4eb4bd422e8146133cad50db45f 100644 (file)
@@ -974,10 +974,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
        if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
                goto out;
 
-       ret = sort_idmaps(&new_map);
-       if (ret < 0)
-               goto out;
-
        ret = -EPERM;
        /* Map the lower ids from the parent user namespace to the
         * kernel global id space.
@@ -1004,6 +1000,14 @@ static ssize_t map_write(struct file *file, const char __user *buf,
                e->lower_first = lower_first;
        }
 
+       /*
+        * If we want to use binary search for lookup, this clones the extent
+        * array and sorts both copies.
+        */
+       ret = sort_idmaps(&new_map);
+       if (ret < 0)
+               goto out;
+
        /* Install the map */
        if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
                memcpy(map->extent, new_map.extent,
index 70935ed9112599c3517829c0f6dea0de6d2435e4..14afeeb7d6ef5b91929702af25f96ef4eeb711ec 100644 (file)
@@ -135,7 +135,6 @@ static void fill_pool(void)
                if (!new)
                        return;
 
-               kmemleak_ignore(new);
                raw_spin_lock_irqsave(&pool_lock, flags);
                hlist_add_head(&new->node, &obj_pool);
                debug_objects_allocated++;
@@ -1128,7 +1127,6 @@ static int __init debug_objects_replace_static_objects(void)
                obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
                if (!obj)
                        goto free;
-               kmemleak_ignore(obj);
                hlist_add_head(&obj->node, &objects);
        }
 
@@ -1184,7 +1182,8 @@ void __init debug_objects_mem_init(void)
 
        obj_cache = kmem_cache_create("debug_objects_cache",
                                      sizeof (struct debug_obj), 0,
-                                     SLAB_DEBUG_OBJECTS, NULL);
+                                     SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
+                                     NULL);
 
        if (!obj_cache || debug_objects_replace_static_objects()) {
                debug_objects_enabled = 0;
index 8be175df30753c95692007a5d41503838344d9a5..54c248526b55fc498c996f2a5ea651262fcc7f61 100644 (file)
@@ -83,6 +83,7 @@
                        const struct kvec *kvec;                \
                        struct kvec v;                          \
                        iterate_kvec(i, n, v, kvec, skip, (K))  \
+               } else if (unlikely(i->type & ITER_DISCARD)) {  \
                } else {                                        \
                        const struct iovec *iov;                \
                        struct iovec v;                         \
                        }                                       \
                        i->nr_segs -= kvec - i->kvec;           \
                        i->kvec = kvec;                         \
+               } else if (unlikely(i->type & ITER_DISCARD)) {  \
+                       skip += n;                              \
                } else {                                        \
                        const struct iovec *iov;                \
                        struct iovec v;                         \
@@ -428,17 +431,19 @@ int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
 }
 EXPORT_SYMBOL(iov_iter_fault_in_readable);
 
-void iov_iter_init(struct iov_iter *i, int direction,
+void iov_iter_init(struct iov_iter *i, unsigned int direction,
                        const struct iovec *iov, unsigned long nr_segs,
                        size_t count)
 {
+       WARN_ON(direction & ~(READ | WRITE));
+       direction &= READ | WRITE;
+
        /* It will get better.  Eventually... */
        if (uaccess_kernel()) {
-               direction |= ITER_KVEC;
-               i->type = direction;
+               i->type = ITER_KVEC | direction;
                i->kvec = (struct kvec *)iov;
        } else {
-               i->type = direction;
+               i->type = ITER_IOVEC | direction;
                i->iov = iov;
        }
        i->nr_segs = nr_segs;
@@ -555,10 +560,42 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
        return bytes;
 }
 
+static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
+                               __wsum *csum, struct iov_iter *i)
+{
+       struct pipe_inode_info *pipe = i->pipe;
+       size_t n, r;
+       size_t off = 0;
+       __wsum sum = *csum, next;
+       int idx;
+
+       if (!sanity(i))
+               return 0;
+
+       bytes = n = push_pipe(i, bytes, &idx, &r);
+       if (unlikely(!n))
+               return 0;
+       for ( ; n; idx = next_idx(idx, pipe), r = 0) {
+               size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
+               char *p = kmap_atomic(pipe->bufs[idx].page);
+               next = csum_partial_copy_nocheck(addr, p + r, chunk, 0);
+               sum = csum_block_add(sum, next, off);
+               kunmap_atomic(p);
+               i->idx = idx;
+               i->iov_offset = r + chunk;
+               n -= chunk;
+               off += chunk;
+               addr += chunk;
+       }
+       i->count -= bytes;
+       *csum = sum;
+       return bytes;
+}
+
 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 {
        const char *from = addr;
-       if (unlikely(i->type & ITER_PIPE))
+       if (unlikely(iov_iter_is_pipe(i)))
                return copy_pipe_to_iter(addr, bytes, i);
        if (iter_is_iovec(i))
                might_fault();
@@ -658,7 +695,7 @@ size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
        const char *from = addr;
        unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
 
-       if (unlikely(i->type & ITER_PIPE))
+       if (unlikely(iov_iter_is_pipe(i)))
                return copy_pipe_to_iter_mcsafe(addr, bytes, i);
        if (iter_is_iovec(i))
                might_fault();
@@ -692,7 +729,7 @@ EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 {
        char *to = addr;
-       if (unlikely(i->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_pipe(i))) {
                WARN_ON(1);
                return 0;
        }
@@ -712,7 +749,7 @@ EXPORT_SYMBOL(_copy_from_iter);
 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
 {
        char *to = addr;
-       if (unlikely(i->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_pipe(i))) {
                WARN_ON(1);
                return false;
        }
@@ -739,7 +776,7 @@ EXPORT_SYMBOL(_copy_from_iter_full);
 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 {
        char *to = addr;
-       if (unlikely(i->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_pipe(i))) {
                WARN_ON(1);
                return 0;
        }
@@ -773,7 +810,7 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
 {
        char *to = addr;
-       if (unlikely(i->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_pipe(i))) {
                WARN_ON(1);
                return 0;
        }
@@ -794,7 +831,7 @@ EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache);
 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
 {
        char *to = addr;
-       if (unlikely(i->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_pipe(i))) {
                WARN_ON(1);
                return false;
        }
@@ -836,7 +873,9 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
                size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
                kunmap_atomic(kaddr);
                return wanted;
-       } else if (likely(!(i->type & ITER_PIPE)))
+       } else if (unlikely(iov_iter_is_discard(i)))
+               return bytes;
+       else if (likely(!iov_iter_is_pipe(i)))
                return copy_page_to_iter_iovec(page, offset, bytes, i);
        else
                return copy_page_to_iter_pipe(page, offset, bytes, i);
@@ -848,7 +887,7 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 {
        if (unlikely(!page_copy_sane(page, offset, bytes)))
                return 0;
-       if (unlikely(i->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
                WARN_ON(1);
                return 0;
        }
@@ -888,7 +927,7 @@ static size_t pipe_zero(size_t bytes, struct iov_iter *i)
 
 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
 {
-       if (unlikely(i->type & ITER_PIPE))
+       if (unlikely(iov_iter_is_pipe(i)))
                return pipe_zero(bytes, i);
        iterate_and_advance(i, bytes, v,
                clear_user(v.iov_base, v.iov_len),
@@ -908,7 +947,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
                kunmap_atomic(kaddr);
                return 0;
        }
-       if (unlikely(i->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
                kunmap_atomic(kaddr);
                WARN_ON(1);
                return 0;
@@ -972,10 +1011,14 @@ static void pipe_advance(struct iov_iter *i, size_t size)
 
 void iov_iter_advance(struct iov_iter *i, size_t size)
 {
-       if (unlikely(i->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_pipe(i))) {
                pipe_advance(i, size);
                return;
        }
+       if (unlikely(iov_iter_is_discard(i))) {
+               i->count -= size;
+               return;
+       }
        iterate_and_advance(i, size, v, 0, 0, 0)
 }
 EXPORT_SYMBOL(iov_iter_advance);
@@ -987,7 +1030,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
        if (WARN_ON(unroll > MAX_RW_COUNT))
                return;
        i->count += unroll;
-       if (unlikely(i->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_pipe(i))) {
                struct pipe_inode_info *pipe = i->pipe;
                int idx = i->idx;
                size_t off = i->iov_offset;
@@ -1011,12 +1054,14 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
                pipe_truncate(i);
                return;
        }
+       if (unlikely(iov_iter_is_discard(i)))
+               return;
        if (unroll <= i->iov_offset) {
                i->iov_offset -= unroll;
                return;
        }
        unroll -= i->iov_offset;
-       if (i->type & ITER_BVEC) {
+       if (iov_iter_is_bvec(i)) {
                const struct bio_vec *bvec = i->bvec;
                while (1) {
                        size_t n = (--bvec)->bv_len;
@@ -1049,23 +1094,25 @@ EXPORT_SYMBOL(iov_iter_revert);
  */
 size_t iov_iter_single_seg_count(const struct iov_iter *i)
 {
-       if (unlikely(i->type & ITER_PIPE))
+       if (unlikely(iov_iter_is_pipe(i)))
                return i->count;        // it is a silly place, anyway
        if (i->nr_segs == 1)
                return i->count;
-       else if (i->type & ITER_BVEC)
+       if (unlikely(iov_iter_is_discard(i)))
+               return i->count;
+       else if (iov_iter_is_bvec(i))
                return min(i->count, i->bvec->bv_len - i->iov_offset);
        else
                return min(i->count, i->iov->iov_len - i->iov_offset);
 }
 EXPORT_SYMBOL(iov_iter_single_seg_count);
 
-void iov_iter_kvec(struct iov_iter *i, int direction,
+void iov_iter_kvec(struct iov_iter *i, unsigned int direction,
                        const struct kvec *kvec, unsigned long nr_segs,
                        size_t count)
 {
-       BUG_ON(!(direction & ITER_KVEC));
-       i->type = direction;
+       WARN_ON(direction & ~(READ | WRITE));
+       i->type = ITER_KVEC | (direction & (READ | WRITE));
        i->kvec = kvec;
        i->nr_segs = nr_segs;
        i->iov_offset = 0;
@@ -1073,12 +1120,12 @@ void iov_iter_kvec(struct iov_iter *i, int direction,
 }
 EXPORT_SYMBOL(iov_iter_kvec);
 
-void iov_iter_bvec(struct iov_iter *i, int direction,
+void iov_iter_bvec(struct iov_iter *i, unsigned int direction,
                        const struct bio_vec *bvec, unsigned long nr_segs,
                        size_t count)
 {
-       BUG_ON(!(direction & ITER_BVEC));
-       i->type = direction;
+       WARN_ON(direction & ~(READ | WRITE));
+       i->type = ITER_BVEC | (direction & (READ | WRITE));
        i->bvec = bvec;
        i->nr_segs = nr_segs;
        i->iov_offset = 0;
@@ -1086,13 +1133,13 @@ void iov_iter_bvec(struct iov_iter *i, int direction,
 }
 EXPORT_SYMBOL(iov_iter_bvec);
 
-void iov_iter_pipe(struct iov_iter *i, int direction,
+void iov_iter_pipe(struct iov_iter *i, unsigned int direction,
                        struct pipe_inode_info *pipe,
                        size_t count)
 {
-       BUG_ON(direction != ITER_PIPE);
+       BUG_ON(direction != READ);
        WARN_ON(pipe->nrbufs == pipe->buffers);
-       i->type = direction;
+       i->type = ITER_PIPE | READ;
        i->pipe = pipe;
        i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
        i->iov_offset = 0;
@@ -1101,12 +1148,30 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
 }
 EXPORT_SYMBOL(iov_iter_pipe);
 
+/**
+ * iov_iter_discard - Initialise an I/O iterator that discards data
+ * @i: The iterator to initialise.
+ * @direction: The direction of the transfer.
+ * @count: The size of the I/O buffer in bytes.
+ *
+ * Set up an I/O iterator that just discards everything that's written to it.
+ * It's only available as a READ iterator.
+ */
+void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
+{
+       BUG_ON(direction != READ);
+       i->type = ITER_DISCARD | READ;
+       i->count = count;
+       i->iov_offset = 0;
+}
+EXPORT_SYMBOL(iov_iter_discard);
+
 unsigned long iov_iter_alignment(const struct iov_iter *i)
 {
        unsigned long res = 0;
        size_t size = i->count;
 
-       if (unlikely(i->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_pipe(i))) {
                if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
                        return size | i->iov_offset;
                return size;
@@ -1125,7 +1190,7 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
        unsigned long res = 0;
        size_t size = i->count;
 
-       if (unlikely(i->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
                WARN_ON(1);
                return ~0U;
        }
@@ -1193,8 +1258,11 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
        if (maxsize > i->count)
                maxsize = i->count;
 
-       if (unlikely(i->type & ITER_PIPE))
+       if (unlikely(iov_iter_is_pipe(i)))
                return pipe_get_pages(i, pages, maxsize, maxpages, start);
+       if (unlikely(iov_iter_is_discard(i)))
+               return -EFAULT;
+
        iterate_all_kinds(i, maxsize, v, ({
                unsigned long addr = (unsigned long)v.iov_base;
                size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
@@ -1205,7 +1273,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
                        len = maxpages * PAGE_SIZE;
                addr &= ~(PAGE_SIZE - 1);
                n = DIV_ROUND_UP(len, PAGE_SIZE);
-               res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
+               res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, pages);
                if (unlikely(res < 0))
                        return res;
                return (res == n ? len : res * PAGE_SIZE) - *start;
@@ -1270,8 +1338,11 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
        if (maxsize > i->count)
                maxsize = i->count;
 
-       if (unlikely(i->type & ITER_PIPE))
+       if (unlikely(iov_iter_is_pipe(i)))
                return pipe_get_pages_alloc(i, pages, maxsize, start);
+       if (unlikely(iov_iter_is_discard(i)))
+               return -EFAULT;
+
        iterate_all_kinds(i, maxsize, v, ({
                unsigned long addr = (unsigned long)v.iov_base;
                size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
@@ -1283,7 +1354,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
                p = get_pages_array(n);
                if (!p)
                        return -ENOMEM;
-               res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
+               res = get_user_pages_fast(addr, n, iov_iter_rw(i) != WRITE, p);
                if (unlikely(res < 0)) {
                        kvfree(p);
                        return res;
@@ -1313,7 +1384,7 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
        __wsum sum, next;
        size_t off = 0;
        sum = *csum;
-       if (unlikely(i->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
                WARN_ON(1);
                return 0;
        }
@@ -1355,7 +1426,7 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
        __wsum sum, next;
        size_t off = 0;
        sum = *csum;
-       if (unlikely(i->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
                WARN_ON(1);
                return false;
        }
@@ -1399,8 +1470,12 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
        const char *from = addr;
        __wsum sum, next;
        size_t off = 0;
+
+       if (unlikely(iov_iter_is_pipe(i)))
+               return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
+
        sum = *csum;
-       if (unlikely(i->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_discard(i))) {
                WARN_ON(1);     /* for now */
                return 0;
        }
@@ -1442,8 +1517,10 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
 
        if (!size)
                return 0;
+       if (unlikely(iov_iter_is_discard(i)))
+               return 0;
 
-       if (unlikely(i->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_pipe(i))) {
                struct pipe_inode_info *pipe = i->pipe;
                size_t off;
                int idx;
@@ -1481,11 +1558,13 @@ EXPORT_SYMBOL(iov_iter_npages);
 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
 {
        *new = *old;
-       if (unlikely(new->type & ITER_PIPE)) {
+       if (unlikely(iov_iter_is_pipe(new))) {
                WARN_ON(1);
                return NULL;
        }
-       if (new->type & ITER_BVEC)
+       if (unlikely(iov_iter_is_discard(new)))
+               return NULL;
+       if (iov_iter_is_bvec(new))
                return new->bvec = kmemdup(new->bvec,
                                    new->nr_segs * sizeof(struct bio_vec),
                                    flags);
index 5d73f5cb4d8a78f0887cc6cfbf10a29ec5d5f51c..79777645cac9c1243518f4f4bf403cdc567aa9ea 100644 (file)
@@ -27,7 +27,7 @@ ifeq ($(ARCH),arm)
         CFLAGS += -I../../../arch/arm/include -mfpu=neon
         HAS_NEON = yes
 endif
-ifeq ($(ARCH),arm64)
+ifeq ($(ARCH),aarch64)
         CFLAGS += -I../../../arch/arm64/include
         HAS_NEON = yes
 endif
@@ -41,7 +41,7 @@ ifeq ($(IS_X86),yes)
                    gcc -c -x assembler - >&/dev/null &&        \
                    rm ./-.o && echo -DCONFIG_AS_AVX512=1)
 else ifeq ($(HAS_NEON),yes)
-        OBJS   += neon.o neon1.o neon2.o neon4.o neon8.o
+        OBJS   += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
         CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
 else
         HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
index b984806d7d7bb1f4665ffe6cecfa5849aecc8c22..7cab9a9869ace92900ec58aabffa175d2249296d 100644 (file)
@@ -837,6 +837,7 @@ static ssize_t read_firmware_show(struct device *dev,
        if (req->fw->size > PAGE_SIZE) {
                pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
                rc = -EINVAL;
+               goto out;
        }
        memcpy(buf, req->fw->data, req->fw->size);
 
index 626f580b4ff7b0c52fd3aeb7cffdf19429c3f457..5144899d3c6b8b235d33c8a988f3a413fa34aff8 100644 (file)
@@ -99,7 +99,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
                const char *q = *result++;
                size_t amount = strlen(q);
 
-               strncpy(p, q, amount);
+               memcpy(p, q, amount);
                p += amount;
 
                *p++ = ' ';
index e3ddd836491faef2a21c1b1218d4fd1c4b219f0f..d82d022111e0e5321ef17cff105e1e87bc90ba0e 100644 (file)
@@ -1214,7 +1214,6 @@ void unregister_test_dev_kmod(struct kmod_test_device *test_dev)
 
        dev_info(test_dev->dev, "removing interface\n");
        misc_deregister(&test_dev->misc_dev);
-       kfree(&test_dev->misc_dev.name);
 
        mutex_unlock(&test_dev->config_mutex);
        mutex_unlock(&test_dev->trigger_mutex);
index aa47754150cee9a0374ca16195f17c061225a538..0598e86af8fc327266988a273f5826730d951e0d 100644 (file)
@@ -208,15 +208,19 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index)
                        XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_2));
 
                        /* We should see two elements in the array */
+                       rcu_read_lock();
                        xas_for_each(&xas, entry, ULONG_MAX)
                                seen++;
+                       rcu_read_unlock();
                        XA_BUG_ON(xa, seen != 2);
 
                        /* One of which is marked */
                        xas_set(&xas, 0);
                        seen = 0;
+                       rcu_read_lock();
                        xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_0)
                                seen++;
+                       rcu_read_unlock();
                        XA_BUG_ON(xa, seen != 1);
                }
                XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0));
@@ -373,6 +377,12 @@ static noinline void check_reserve(struct xarray *xa)
        xa_erase_index(xa, 12345678);
        XA_BUG_ON(xa, !xa_empty(xa));
 
+       /* And so does xa_insert */
+       xa_reserve(xa, 12345678, GFP_KERNEL);
+       XA_BUG_ON(xa, xa_insert(xa, 12345678, xa_mk_value(12345678), 0) != 0);
+       xa_erase_index(xa, 12345678);
+       XA_BUG_ON(xa, !xa_empty(xa));
+
        /* Can iterate through a reserved entry */
        xa_store_index(xa, 5, GFP_KERNEL);
        xa_reserve(xa, 6, GFP_KERNEL);
@@ -436,7 +446,9 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index,
        XA_BUG_ON(xa, xa_load(xa, max) != NULL);
        XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL);
 
+       xas_lock(&xas);
        XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(min)) != xa_mk_value(index));
+       xas_unlock(&xas);
        XA_BUG_ON(xa, xa_load(xa, min) != xa_mk_value(min));
        XA_BUG_ON(xa, xa_load(xa, max - 1) != xa_mk_value(min));
        XA_BUG_ON(xa, xa_load(xa, max) != NULL);
@@ -452,9 +464,11 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index,
        XA_STATE(xas, xa, index);
        xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL);
 
+       xas_lock(&xas);
        XA_BUG_ON(xa, xas_store(&xas, xa_mk_value(1)) != xa_mk_value(0));
        XA_BUG_ON(xa, xas.xa_index != index);
        XA_BUG_ON(xa, xas_store(&xas, NULL) != xa_mk_value(1));
+       xas_unlock(&xas);
        XA_BUG_ON(xa, !xa_empty(xa));
 }
 #endif
@@ -498,7 +512,7 @@ static noinline void check_multi_store(struct xarray *xa)
        rcu_read_unlock();
 
        /* We can erase multiple values with a single store */
-       xa_store_order(xa, 0, 63, NULL, GFP_KERNEL);
+       xa_store_order(xa, 0, BITS_PER_LONG - 1, NULL, GFP_KERNEL);
        XA_BUG_ON(xa, !xa_empty(xa));
 
        /* Even when the first slot is empty but the others aren't */
@@ -702,7 +716,7 @@ static noinline void check_multi_find_2(struct xarray *xa)
        }
 }
 
-static noinline void check_find(struct xarray *xa)
+static noinline void check_find_1(struct xarray *xa)
 {
        unsigned long i, j, k;
 
@@ -748,6 +762,34 @@ static noinline void check_find(struct xarray *xa)
                XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0));
        }
        XA_BUG_ON(xa, !xa_empty(xa));
+}
+
+static noinline void check_find_2(struct xarray *xa)
+{
+       void *entry;
+       unsigned long i, j, index = 0;
+
+       xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+               XA_BUG_ON(xa, true);
+       }
+
+       for (i = 0; i < 1024; i++) {
+               xa_store_index(xa, index, GFP_KERNEL);
+               j = 0;
+               index = 0;
+               xa_for_each(xa, entry, index, ULONG_MAX, XA_PRESENT) {
+                       XA_BUG_ON(xa, xa_mk_value(index) != entry);
+                       XA_BUG_ON(xa, index != j++);
+               }
+       }
+
+       xa_destroy(xa);
+}
+
+static noinline void check_find(struct xarray *xa)
+{
+       check_find_1(xa);
+       check_find_2(xa);
        check_multi_find(xa);
        check_multi_find_2(xa);
 }
@@ -1067,7 +1109,7 @@ static noinline void check_store_range(struct xarray *xa)
                        __check_store_range(xa, 4095 + i, 4095 + j);
                        __check_store_range(xa, 4096 + i, 4096 + j);
                        __check_store_range(xa, 123456 + i, 123456 + j);
-                       __check_store_range(xa, UINT_MAX + i, UINT_MAX + j);
+                       __check_store_range(xa, (1 << 24) + i, (1 << 24) + j);
                }
        }
 }
@@ -1146,10 +1188,12 @@ static noinline void check_account(struct xarray *xa)
                XA_STATE(xas, xa, 1 << order);
 
                xa_store_order(xa, 0, order, xa, GFP_KERNEL);
+               rcu_read_lock();
                xas_load(&xas);
                XA_BUG_ON(xa, xas.xa_node->count == 0);
                XA_BUG_ON(xa, xas.xa_node->count > (1 << order));
                XA_BUG_ON(xa, xas.xa_node->nr_values != 0);
+               rcu_read_unlock();
 
                xa_store_order(xa, 1 << order, order, xa_mk_value(1 << order),
                                GFP_KERNEL);
index 59fee96c29a0f1fb83fe67282482bc5d941bd6f3..e4162f59a81ccacda275cd218193fb2ad34d71d3 100644 (file)
@@ -427,8 +427,7 @@ void __ubsan_handle_shift_out_of_bounds(struct shift_out_of_bounds_data *data,
 EXPORT_SYMBOL(__ubsan_handle_shift_out_of_bounds);
 
 
-void __noreturn
-__ubsan_handle_builtin_unreachable(struct unreachable_data *data)
+void __ubsan_handle_builtin_unreachable(struct unreachable_data *data)
 {
        unsigned long flags;
 
index 8b176f009c0877853087772b18cd53ef15b2c099..bbacca576593613f25cf107485f3cd8a89896d0b 100644 (file)
@@ -610,8 +610,8 @@ static int xas_expand(struct xa_state *xas, void *head)
  * (see the xa_cmpxchg() implementation for an example).
  *
  * Return: If the slot already existed, returns the contents of this slot.
- * If the slot was newly created, returns NULL.  If it failed to create the
- * slot, returns NULL and indicates the error in @xas.
+ * If the slot was newly created, returns %NULL.  If it failed to create the
+ * slot, returns %NULL and indicates the error in @xas.
  */
 static void *xas_create(struct xa_state *xas)
 {
@@ -1334,44 +1334,31 @@ void *__xa_erase(struct xarray *xa, unsigned long index)
        XA_STATE(xas, xa, index);
        return xas_result(&xas, xas_store(&xas, NULL));
 }
-EXPORT_SYMBOL_GPL(__xa_erase);
+EXPORT_SYMBOL(__xa_erase);
 
 /**
- * xa_store() - Store this entry in the XArray.
+ * xa_erase() - Erase this entry from the XArray.
  * @xa: XArray.
- * @index: Index into array.
- * @entry: New entry.
- * @gfp: Memory allocation flags.
+ * @index: Index of entry.
  *
- * After this function returns, loads from this index will return @entry.
- * Storing into an existing multislot entry updates the entry of every index.
- * The marks associated with @index are unaffected unless @entry is %NULL.
+ * This function is the equivalent of calling xa_store() with %NULL as
+ * the third argument.  The XArray does not need to allocate memory, so
+ * the user does not need to provide GFP flags.
  *
- * Context: Process context.  Takes and releases the xa_lock.  May sleep
- * if the @gfp flags permit.
- * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
- * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
- * failed.
+ * Context: Any context.  Takes and releases the xa_lock.
+ * Return: The entry which used to be at this index.
  */
-void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
+void *xa_erase(struct xarray *xa, unsigned long index)
 {
-       XA_STATE(xas, xa, index);
-       void *curr;
-
-       if (WARN_ON_ONCE(xa_is_internal(entry)))
-               return XA_ERROR(-EINVAL);
+       void *entry;
 
-       do {
-               xas_lock(&xas);
-               curr = xas_store(&xas, entry);
-               if (xa_track_free(xa) && entry)
-                       xas_clear_mark(&xas, XA_FREE_MARK);
-               xas_unlock(&xas);
-       } while (xas_nomem(&xas, gfp));
+       xa_lock(xa);
+       entry = __xa_erase(xa, index);
+       xa_unlock(xa);
 
-       return xas_result(&xas, curr);
+       return entry;
 }
-EXPORT_SYMBOL(xa_store);
+EXPORT_SYMBOL(xa_erase);
 
 /**
  * __xa_store() - Store this entry in the XArray.
@@ -1395,10 +1382,12 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
 
        if (WARN_ON_ONCE(xa_is_internal(entry)))
                return XA_ERROR(-EINVAL);
+       if (xa_track_free(xa) && !entry)
+               entry = XA_ZERO_ENTRY;
 
        do {
                curr = xas_store(&xas, entry);
-               if (xa_track_free(xa) && entry)
+               if (xa_track_free(xa))
                        xas_clear_mark(&xas, XA_FREE_MARK);
        } while (__xas_nomem(&xas, gfp));
 
@@ -1407,45 +1396,33 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
 EXPORT_SYMBOL(__xa_store);
 
 /**
- * xa_cmpxchg() - Conditionally replace an entry in the XArray.
+ * xa_store() - Store this entry in the XArray.
  * @xa: XArray.
  * @index: Index into array.
- * @old: Old value to test against.
- * @entry: New value to place in array.
+ * @entry: New entry.
  * @gfp: Memory allocation flags.
  *
- * If the entry at @index is the same as @old, replace it with @entry.
- * If the return value is equal to @old, then the exchange was successful.
+ * After this function returns, loads from this index will return @entry.
+ * Storing into an existing multislot entry updates the entry of every index.
+ * The marks associated with @index are unaffected unless @entry is %NULL.
  *
- * Context: Process context.  Takes and releases the xa_lock.  May sleep
- * if the @gfp flags permit.
- * Return: The old value at this index or xa_err() if an error happened.
+ * Context: Any context.  Takes and releases the xa_lock.
+ * May sleep if the @gfp flags permit.
+ * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
+ * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
+ * failed.
  */
-void *xa_cmpxchg(struct xarray *xa, unsigned long index,
-                       void *old, void *entry, gfp_t gfp)
+void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
 {
-       XA_STATE(xas, xa, index);
        void *curr;
 
-       if (WARN_ON_ONCE(xa_is_internal(entry)))
-               return XA_ERROR(-EINVAL);
-
-       do {
-               xas_lock(&xas);
-               curr = xas_load(&xas);
-               if (curr == XA_ZERO_ENTRY)
-                       curr = NULL;
-               if (curr == old) {
-                       xas_store(&xas, entry);
-                       if (xa_track_free(xa) && entry)
-                               xas_clear_mark(&xas, XA_FREE_MARK);
-               }
-               xas_unlock(&xas);
-       } while (xas_nomem(&xas, gfp));
+       xa_lock(xa);
+       curr = __xa_store(xa, index, entry, gfp);
+       xa_unlock(xa);
 
-       return xas_result(&xas, curr);
+       return curr;
 }
-EXPORT_SYMBOL(xa_cmpxchg);
+EXPORT_SYMBOL(xa_store);
 
 /**
  * __xa_cmpxchg() - Store this entry in the XArray.
@@ -1471,6 +1448,8 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
 
        if (WARN_ON_ONCE(xa_is_internal(entry)))
                return XA_ERROR(-EINVAL);
+       if (xa_track_free(xa) && !entry)
+               entry = XA_ZERO_ENTRY;
 
        do {
                curr = xas_load(&xas);
@@ -1478,7 +1457,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
                        curr = NULL;
                if (curr == old) {
                        xas_store(&xas, entry);
-                       if (xa_track_free(xa) && entry)
+                       if (xa_track_free(xa))
                                xas_clear_mark(&xas, XA_FREE_MARK);
                }
        } while (__xas_nomem(&xas, gfp));
@@ -1488,7 +1467,7 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
 EXPORT_SYMBOL(__xa_cmpxchg);
 
 /**
- * xa_reserve() - Reserve this index in the XArray.
+ * __xa_reserve() - Reserve this index in the XArray.
  * @xa: XArray.
  * @index: Index into array.
  * @gfp: Memory allocation flags.
@@ -1496,33 +1475,32 @@ EXPORT_SYMBOL(__xa_cmpxchg);
  * Ensures there is somewhere to store an entry at @index in the array.
  * If there is already something stored at @index, this function does
  * nothing.  If there was nothing there, the entry is marked as reserved.
- * Loads from @index will continue to see a %NULL pointer until a
- * subsequent store to @index.
+ * Loading from a reserved entry returns a %NULL pointer.
  *
  * If you do not use the entry that you have reserved, call xa_release()
  * or xa_erase() to free any unnecessary memory.
  *
- * Context: Process context.  Takes and releases the xa_lock, IRQ or BH safe
- * if specified in XArray flags.  May sleep if the @gfp flags permit.
+ * Context: Any context.  Expects the xa_lock to be held on entry.  May
+ * release the lock, sleep and reacquire the lock if the @gfp flags permit.
  * Return: 0 if the reservation succeeded or -ENOMEM if it failed.
  */
-int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
+int __xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp)
 {
        XA_STATE(xas, xa, index);
-       unsigned int lock_type = xa_lock_type(xa);
        void *curr;
 
        do {
-               xas_lock_type(&xas, lock_type);
                curr = xas_load(&xas);
-               if (!curr)
+               if (!curr) {
                        xas_store(&xas, XA_ZERO_ENTRY);
-               xas_unlock_type(&xas, lock_type);
-       } while (xas_nomem(&xas, gfp));
+                       if (xa_track_free(xa))
+                               xas_clear_mark(&xas, XA_FREE_MARK);
+               }
+       } while (__xas_nomem(&xas, gfp));
 
        return xas_error(&xas);
 }
-EXPORT_SYMBOL(xa_reserve);
+EXPORT_SYMBOL(__xa_reserve);
 
 #ifdef CONFIG_XARRAY_MULTI
 static void xas_set_range(struct xa_state *xas, unsigned long first,
@@ -1587,8 +1565,9 @@ void *xa_store_range(struct xarray *xa, unsigned long first,
        do {
                xas_lock(&xas);
                if (entry) {
-                       unsigned int order = (last == ~0UL) ? 64 :
-                                               ilog2(last + 1);
+                       unsigned int order = BITS_PER_LONG;
+                       if (last + 1)
+                               order = __ffs(last + 1);
                        xas_set_order(&xas, last, order);
                        xas_create(&xas);
                        if (xas_error(&xas))
@@ -1662,7 +1641,7 @@ EXPORT_SYMBOL(__xa_alloc);
  * @index: Index of entry.
  * @mark: Mark number.
  *
- * Attempting to set a mark on a NULL entry does not succeed.
+ * Attempting to set a mark on a %NULL entry does not succeed.
  *
  * Context: Any context.  Expects xa_lock to be held on entry.
  */
@@ -1674,7 +1653,7 @@ void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
        if (entry)
                xas_set_mark(&xas, mark);
 }
-EXPORT_SYMBOL_GPL(__xa_set_mark);
+EXPORT_SYMBOL(__xa_set_mark);
 
 /**
  * __xa_clear_mark() - Clear this mark on this entry while locked.
@@ -1692,7 +1671,7 @@ void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
        if (entry)
                xas_clear_mark(&xas, mark);
 }
-EXPORT_SYMBOL_GPL(__xa_clear_mark);
+EXPORT_SYMBOL(__xa_clear_mark);
 
 /**
  * xa_get_mark() - Inquire whether this mark is set on this entry.
@@ -1732,7 +1711,7 @@ EXPORT_SYMBOL(xa_get_mark);
  * @index: Index of entry.
  * @mark: Mark number.
  *
- * Attempting to set a mark on a NULL entry does not succeed.
+ * Attempting to set a mark on a %NULL entry does not succeed.
  *
  * Context: Process context.  Takes and releases the xa_lock.
  */
@@ -1829,6 +1808,8 @@ void *xa_find_after(struct xarray *xa, unsigned long *indexp,
                        entry = xas_find_marked(&xas, max, filter);
                else
                        entry = xas_find(&xas, max);
+               if (xas.xa_node == XAS_BOUNDS)
+                       break;
                if (xas.xa_shift) {
                        if (xas.xa_index & ((1UL << xas.xa_shift) - 1))
                                continue;
@@ -1899,7 +1880,7 @@ static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
  *
  * The @filter may be an XArray mark value, in which case entries which are
  * marked with that mark will be copied.  It may also be %XA_PRESENT, in
- * which case all entries which are not NULL will be copied.
+ * which case all entries which are not %NULL will be copied.
  *
  * The entries returned may not represent a snapshot of the XArray at a
  * moment in time.  For example, if another thread stores to index 5, then
index 218d0b2ec82d1534dcb66b4744f886d7d0262d55..81adec8ee02cc3bdb765625e28c3d765f203e512 100644 (file)
@@ -2049,7 +2049,7 @@ find_page:
                                        !mapping->a_ops->is_partially_uptodate)
                                goto page_not_up_to_date;
                        /* pipes can't handle partially uptodate pages */
-                       if (unlikely(iter->type & ITER_PIPE))
+                       if (unlikely(iov_iter_is_pipe(iter)))
                                goto page_not_up_to_date;
                        if (!trylock_page(page))
                                goto page_not_up_to_date;
@@ -2824,6 +2824,42 @@ struct page *read_cache_page_gfp(struct address_space *mapping,
 }
 EXPORT_SYMBOL(read_cache_page_gfp);
 
+/*
+ * Don't operate on ranges the page cache doesn't support, and don't exceed the
+ * LFS limits.  If pos is under the limit it becomes a short access.  If it
+ * exceeds the limit we return -EFBIG.
+ */
+static int generic_access_check_limits(struct file *file, loff_t pos,
+                                      loff_t *count)
+{
+       struct inode *inode = file->f_mapping->host;
+       loff_t max_size = inode->i_sb->s_maxbytes;
+
+       if (!(file->f_flags & O_LARGEFILE))
+               max_size = MAX_NON_LFS;
+
+       if (unlikely(pos >= max_size))
+               return -EFBIG;
+       *count = min(*count, max_size - pos);
+       return 0;
+}
+
+static int generic_write_check_limits(struct file *file, loff_t pos,
+                                     loff_t *count)
+{
+       loff_t limit = rlimit(RLIMIT_FSIZE);
+
+       if (limit != RLIM_INFINITY) {
+               if (pos >= limit) {
+                       send_sig(SIGXFSZ, current, 0);
+                       return -EFBIG;
+               }
+               *count = min(*count, limit - pos);
+       }
+
+       return generic_access_check_limits(file, pos, count);
+}
+
 /*
  * Performs necessary checks before doing a write
  *
@@ -2835,8 +2871,8 @@ inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
-       unsigned long limit = rlimit(RLIMIT_FSIZE);
-       loff_t pos;
+       loff_t count;
+       int ret;
 
        if (!iov_iter_count(from))
                return 0;
@@ -2845,43 +2881,99 @@ inline ssize_t generic_write_checks(struct kiocb *iocb, struct iov_iter *from)
        if (iocb->ki_flags & IOCB_APPEND)
                iocb->ki_pos = i_size_read(inode);
 
-       pos = iocb->ki_pos;
-
        if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
                return -EINVAL;
 
-       if (limit != RLIM_INFINITY) {
-               if (iocb->ki_pos >= limit) {
-                       send_sig(SIGXFSZ, current, 0);
-                       return -EFBIG;
-               }
-               iov_iter_truncate(from, limit - (unsigned long)pos);
-       }
+       count = iov_iter_count(from);
+       ret = generic_write_check_limits(file, iocb->ki_pos, &count);
+       if (ret)
+               return ret;
+
+       iov_iter_truncate(from, count);
+       return iov_iter_count(from);
+}
+EXPORT_SYMBOL(generic_write_checks);
+
+/*
+ * Performs necessary checks before doing a clone.
+ *
+ * Can adjust amount of bytes to clone.
+ * Returns appropriate error code that caller should return or
+ * zero in case the clone should be allowed.
+ */
+int generic_remap_checks(struct file *file_in, loff_t pos_in,
+                        struct file *file_out, loff_t pos_out,
+                        loff_t *req_count, unsigned int remap_flags)
+{
+       struct inode *inode_in = file_in->f_mapping->host;
+       struct inode *inode_out = file_out->f_mapping->host;
+       uint64_t count = *req_count;
+       uint64_t bcount;
+       loff_t size_in, size_out;
+       loff_t bs = inode_out->i_sb->s_blocksize;
+       int ret;
+
+       /* The start of both ranges must be aligned to an fs block. */
+       if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs))
+               return -EINVAL;
+
+       /* Ensure offsets don't wrap. */
+       if (pos_in + count < pos_in || pos_out + count < pos_out)
+               return -EINVAL;
+
+       size_in = i_size_read(inode_in);
+       size_out = i_size_read(inode_out);
+
+       /* Dedupe requires both ranges to be within EOF. */
+       if ((remap_flags & REMAP_FILE_DEDUP) &&
+           (pos_in >= size_in || pos_in + count > size_in ||
+            pos_out >= size_out || pos_out + count > size_out))
+               return -EINVAL;
+
+       /* Ensure the infile range is within the infile. */
+       if (pos_in >= size_in)
+               return -EINVAL;
+       count = min(count, size_in - (uint64_t)pos_in);
+
+       ret = generic_access_check_limits(file_in, pos_in, &count);
+       if (ret)
+               return ret;
+
+       ret = generic_write_check_limits(file_out, pos_out, &count);
+       if (ret)
+               return ret;
 
        /*
-        * LFS rule
+        * If the user wanted us to link to the infile's EOF, round up to the
+        * next block boundary for this check.
+        *
+        * Otherwise, make sure the count is also block-aligned, having
+        * already confirmed the starting offsets' block alignment.
         */
-       if (unlikely(pos + iov_iter_count(from) > MAX_NON_LFS &&
-                               !(file->f_flags & O_LARGEFILE))) {
-               if (pos >= MAX_NON_LFS)
-                       return -EFBIG;
-               iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos);
+       if (pos_in + count == size_in) {
+               bcount = ALIGN(size_in, bs) - pos_in;
+       } else {
+               if (!IS_ALIGNED(count, bs))
+                       count = ALIGN_DOWN(count, bs);
+               bcount = count;
        }
 
+       /* Don't allow overlapped cloning within the same file. */
+       if (inode_in == inode_out &&
+           pos_out + bcount > pos_in &&
+           pos_out < pos_in + bcount)
+               return -EINVAL;
+
        /*
-        * Are we about to exceed the fs block limit ?
-        *
-        * If we have written data it becomes a short write.  If we have
-        * exceeded without writing data we send a signal and return EFBIG.
-        * Linus frestrict idea will clean these up nicely..
+        * We shortened the request but the caller can't deal with that, so
+        * bounce the request back to userspace.
         */
-       if (unlikely(pos >= inode->i_sb->s_maxbytes))
-               return -EFBIG;
+       if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
+               return -EINVAL;
 
-       iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos);
-       return iov_iter_count(from);
+       *req_count = count;
+       return 0;
 }
-EXPORT_SYMBOL(generic_write_checks);
 
 int pagecache_write_begin(struct file *file, struct address_space *mapping,
                                loff_t pos, unsigned len, unsigned flags,
index f76e77a2d34b79afec5f3032366a6bd954d1aead..8cb68a50dbdf28a27dfaef6dce27e6cdb1f87881 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -385,11 +385,17 @@ static struct page *follow_p4d_mask(struct vm_area_struct *vma,
  * @vma: vm_area_struct mapping @address
  * @address: virtual address to look up
  * @flags: flags modifying lookup behaviour
- * @page_mask: on output, *page_mask is set according to the size of the page
+ * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
+ *       pointer to output page_mask
  *
  * @flags can have FOLL_ flags set, defined in <linux/mm.h>
  *
- * Returns the mapped (struct page *), %NULL if no mapping exists, or
+ * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
+ * the device's dev_pagemap metadata to avoid repeating expensive lookups.
+ *
+ * On output, the @ctx->page_mask is set according to the size of the page.
+ *
+ * Return: the mapped (struct page *), %NULL if no mapping exists, or
  * an error pointer if there is a mapping to something not represented
  * by a page descriptor (see also vm_normal_page()).
  */
@@ -696,12 +702,11 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                if (!vma || start >= vma->vm_end) {
                        vma = find_extend_vma(mm, start);
                        if (!vma && in_gate_area(mm, start)) {
-                               int ret;
                                ret = get_gate_page(mm, start & PAGE_MASK,
                                                gup_flags, &vma,
                                                pages ? &pages[i] : NULL);
                                if (ret)
-                                       return i ? : ret;
+                                       goto out;
                                ctx.page_mask = 0;
                                goto next_page;
                        }
index 4e4ef8fa479d53b7ee7c4c8fcb86985acb790c8a..5da55b38b1b7fd2878a20b41c180b839096fd872 100644 (file)
@@ -633,16 +633,25 @@ static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma)
 {
        const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE);
 
+       /* Always do synchronous compaction */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags))
                return GFP_TRANSHUGE | (vma_madvised ? 0 : __GFP_NORETRY);
+
+       /* Kick kcompactd and fail quickly */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags))
                return GFP_TRANSHUGE_LIGHT | __GFP_KSWAPD_RECLAIM;
+
+       /* Synchronous compaction if madvised, otherwise kick kcompactd */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags))
-               return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
-                                                            __GFP_KSWAPD_RECLAIM);
+               return GFP_TRANSHUGE_LIGHT |
+                       (vma_madvised ? __GFP_DIRECT_RECLAIM :
+                                       __GFP_KSWAPD_RECLAIM);
+
+       /* Only do synchronous compaction if madvised */
        if (test_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags))
-               return GFP_TRANSHUGE_LIGHT | (vma_madvised ? __GFP_DIRECT_RECLAIM :
-                                                            0);
+               return GFP_TRANSHUGE_LIGHT |
+                      (vma_madvised ? __GFP_DIRECT_RECLAIM : 0);
+
        return GFP_TRANSHUGE_LIGHT;
 }
 
@@ -2330,7 +2339,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
        }
 }
 
-static void freeze_page(struct page *page)
+static void unmap_page(struct page *page)
 {
        enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
                TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
@@ -2345,7 +2354,7 @@ static void freeze_page(struct page *page)
        VM_BUG_ON_PAGE(!unmap_success, page);
 }
 
-static void unfreeze_page(struct page *page)
+static void remap_page(struct page *page)
 {
        int i;
        if (PageTransHuge(page)) {
@@ -2382,6 +2391,12 @@ static void __split_huge_page_tail(struct page *head, int tail,
                         (1L << PG_unevictable) |
                         (1L << PG_dirty)));
 
+       /* ->mapping in first tail page is compound_mapcount */
+       VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
+                       page_tail);
+       page_tail->mapping = head->mapping;
+       page_tail->index = head->index + tail;
+
        /* Page flags must be visible before we make the page non-compound. */
        smp_wmb();
 
@@ -2402,12 +2417,6 @@ static void __split_huge_page_tail(struct page *head, int tail,
        if (page_is_idle(head))
                set_page_idle(page_tail);
 
-       /* ->mapping in first tail page is compound_mapcount */
-       VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
-                       page_tail);
-       page_tail->mapping = head->mapping;
-
-       page_tail->index = head->index + tail;
        page_cpupid_xchg_last(page_tail, page_cpupid_last(head));
 
        /*
@@ -2419,12 +2428,11 @@ static void __split_huge_page_tail(struct page *head, int tail,
 }
 
 static void __split_huge_page(struct page *page, struct list_head *list,
-               unsigned long flags)
+               pgoff_t end, unsigned long flags)
 {
        struct page *head = compound_head(page);
        struct zone *zone = page_zone(head);
        struct lruvec *lruvec;
-       pgoff_t end = -1;
        int i;
 
        lruvec = mem_cgroup_page_lruvec(head, zone->zone_pgdat);
@@ -2432,9 +2440,6 @@ static void __split_huge_page(struct page *page, struct list_head *list,
        /* complete memcg works before add pages to LRU */
        mem_cgroup_split_huge_fixup(head);
 
-       if (!PageAnon(page))
-               end = DIV_ROUND_UP(i_size_read(head->mapping->host), PAGE_SIZE);
-
        for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
                __split_huge_page_tail(head, i, lruvec, list);
                /* Some pages can be beyond i_size: drop them from page cache */
@@ -2463,7 +2468,7 @@ static void __split_huge_page(struct page *page, struct list_head *list,
 
        spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
 
-       unfreeze_page(head);
+       remap_page(head);
 
        for (i = 0; i < HPAGE_PMD_NR; i++) {
                struct page *subpage = head + i;
@@ -2606,6 +2611,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        int count, mapcount, extra_pins, ret;
        bool mlocked;
        unsigned long flags;
+       pgoff_t end;
 
        VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
@@ -2628,6 +2634,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                        ret = -EBUSY;
                        goto out;
                }
+               end = -1;
                mapping = NULL;
                anon_vma_lock_write(anon_vma);
        } else {
@@ -2641,10 +2648,19 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 
                anon_vma = NULL;
                i_mmap_lock_read(mapping);
+
+               /*
+                *__split_huge_page() may need to trim off pages beyond EOF:
+                * but on 32-bit, i_size_read() takes an irq-unsafe seqlock,
+                * which cannot be nested inside the page tree lock. So note
+                * end now: i_size itself may be changed at any moment, but
+                * head page lock is good enough to serialize the trimming.
+                */
+               end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
        }
 
        /*
-        * Racy check if we can split the page, before freeze_page() will
+        * Racy check if we can split the page, before unmap_page() will
         * split PMDs
         */
        if (!can_split_huge_page(head, &extra_pins)) {
@@ -2653,7 +2669,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
        }
 
        mlocked = PageMlocked(page);
-       freeze_page(head);
+       unmap_page(head);
        VM_BUG_ON_PAGE(compound_mapcount(head), head);
 
        /* Make sure the page is not on per-CPU pagevec as it takes pin */
@@ -2687,7 +2703,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                if (mapping)
                        __dec_node_page_state(page, NR_SHMEM_THPS);
                spin_unlock(&pgdata->split_queue_lock);
-               __split_huge_page(page, list, flags);
+               __split_huge_page(page, list, end, flags);
                if (PageSwapCache(head)) {
                        swp_entry_t entry = { .val = page_private(head) };
 
@@ -2707,7 +2723,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
 fail:          if (mapping)
                        xa_unlock(&mapping->i_pages);
                spin_unlock_irqrestore(zone_lru_lock(page_zone(head)), flags);
-               unfreeze_page(head);
+               remap_page(head);
                ret = -EBUSY;
        }
 
index c007fb5fb8d5f6547dc916e5ab468753028a862b..705a3e9cc910e16472159a2d20f9585e1ff7c13d 100644 (file)
@@ -3233,7 +3233,7 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                            struct vm_area_struct *vma)
 {
-       pte_t *src_pte, *dst_pte, entry;
+       pte_t *src_pte, *dst_pte, entry, dst_entry;
        struct page *ptepage;
        unsigned long addr;
        int cow;
@@ -3261,15 +3261,30 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                        break;
                }
 
-               /* If the pagetables are shared don't copy or take references */
-               if (dst_pte == src_pte)
+               /*
+                * If the pagetables are shared don't copy or take references.
+                * dst_pte == src_pte is the common case of src/dest sharing.
+                *
+                * However, src could have 'unshared' and dst shares with
+                * another vma.  If dst_pte !none, this implies sharing.
+                * Check here before taking page table lock, and once again
+                * after taking the lock below.
+                */
+               dst_entry = huge_ptep_get(dst_pte);
+               if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
                        continue;
 
                dst_ptl = huge_pte_lock(h, dst, dst_pte);
                src_ptl = huge_pte_lockptr(h, src, src_pte);
                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
                entry = huge_ptep_get(src_pte);
-               if (huge_pte_none(entry)) { /* skip none entry */
+               dst_entry = huge_ptep_get(dst_pte);
+               if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
+                       /*
+                        * Skip if src entry none.  Also, skip in the
+                        * unlikely case dst entry !none as this implies
+                        * sharing with another vma.
+                        */
                        ;
                } else if (unlikely(is_hugetlb_entry_migration(entry) ||
                                    is_hugetlb_entry_hwpoisoned(entry))) {
@@ -4065,7 +4080,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
 
                /* fallback to copy_from_user outside mmap_sem */
                if (unlikely(ret)) {
-                       ret = -EFAULT;
+                       ret = -ENOENT;
                        *pagep = page;
                        /* don't free the page */
                        goto out;
index c13625c1ad5e5e91abf1d45bdbcf388f9b1f2690..8e2ff195ecb30a6e3314664d60110bfa7781fd53 100644 (file)
@@ -1287,7 +1287,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
  * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
  *
  * Basic scheme is simple, details are more complex:
- *  - allocate and freeze a new huge page;
+ *  - allocate and lock a new huge page;
  *  - scan page cache replacing old pages with the new one
  *    + swap in pages if necessary;
  *    + fill in gaps;
@@ -1295,11 +1295,11 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
  *  - if replacing succeeds:
  *    + copy data over;
  *    + free old pages;
- *    + unfreeze huge page;
+ *    + unlock huge page;
  *  - if replacing failed;
  *    + put all pages back and unfreeze them;
  *    + restore gaps in the page cache;
- *    + free huge page;
+ *    + unlock and free huge page;
  */
 static void collapse_shmem(struct mm_struct *mm,
                struct address_space *mapping, pgoff_t start,
@@ -1329,19 +1329,6 @@ static void collapse_shmem(struct mm_struct *mm,
                goto out;
        }
 
-       new_page->index = start;
-       new_page->mapping = mapping;
-       __SetPageSwapBacked(new_page);
-       __SetPageLocked(new_page);
-       BUG_ON(!page_ref_freeze(new_page, 1));
-
-       /*
-        * At this point the new_page is 'frozen' (page_count() is zero),
-        * locked and not up-to-date. It's safe to insert it into the page
-        * cache, because nobody would be able to map it or use it in other
-        * way until we unfreeze it.
-        */
-
        /* This will be less messy when we use multi-index entries */
        do {
                xas_lock_irq(&xas);
@@ -1349,19 +1336,44 @@ static void collapse_shmem(struct mm_struct *mm,
                if (!xas_error(&xas))
                        break;
                xas_unlock_irq(&xas);
-               if (!xas_nomem(&xas, GFP_KERNEL))
+               if (!xas_nomem(&xas, GFP_KERNEL)) {
+                       mem_cgroup_cancel_charge(new_page, memcg, true);
+                       result = SCAN_FAIL;
                        goto out;
+               }
        } while (1);
 
+       __SetPageLocked(new_page);
+       __SetPageSwapBacked(new_page);
+       new_page->index = start;
+       new_page->mapping = mapping;
+
+       /*
+        * At this point the new_page is locked and not up-to-date.
+        * It's safe to insert it into the page cache, because nobody would
+        * be able to map it or use it in another way until we unlock it.
+        */
+
        xas_set(&xas, start);
        for (index = start; index < end; index++) {
                struct page *page = xas_next(&xas);
 
                VM_BUG_ON(index != xas.xa_index);
                if (!page) {
+                       /*
+                        * Stop if extent has been truncated or hole-punched,
+                        * and is now completely empty.
+                        */
+                       if (index == start) {
+                               if (!xas_next_entry(&xas, end - 1)) {
+                                       result = SCAN_TRUNCATED;
+                                       goto xa_locked;
+                               }
+                               xas_set(&xas, index);
+                       }
                        if (!shmem_charge(mapping->host, 1)) {
                                result = SCAN_FAIL;
-                               break;
+                               goto xa_locked;
                        }
                        xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
                        nr_none++;
@@ -1376,13 +1388,12 @@ static void collapse_shmem(struct mm_struct *mm,
                                result = SCAN_FAIL;
                                goto xa_unlocked;
                        }
-                       xas_lock_irq(&xas);
-                       xas_set(&xas, index);
                } else if (trylock_page(page)) {
                        get_page(page);
+                       xas_unlock_irq(&xas);
                } else {
                        result = SCAN_PAGE_LOCK;
-                       break;
+                       goto xa_locked;
                }
 
                /*
@@ -1391,17 +1402,24 @@ static void collapse_shmem(struct mm_struct *mm,
                 */
                VM_BUG_ON_PAGE(!PageLocked(page), page);
                VM_BUG_ON_PAGE(!PageUptodate(page), page);
-               VM_BUG_ON_PAGE(PageTransCompound(page), page);
+
+               /*
+                * If file was truncated then extended, or hole-punched, before
+                * we locked the first page, then a THP might be there already.
+                */
+               if (PageTransCompound(page)) {
+                       result = SCAN_PAGE_COMPOUND;
+                       goto out_unlock;
+               }
 
                if (page_mapping(page) != mapping) {
                        result = SCAN_TRUNCATED;
                        goto out_unlock;
                }
-               xas_unlock_irq(&xas);
 
                if (isolate_lru_page(page)) {
                        result = SCAN_DEL_PAGE_LRU;
-                       goto out_isolate_failed;
+                       goto out_unlock;
                }
 
                if (page_mapped(page))
@@ -1421,7 +1439,9 @@ static void collapse_shmem(struct mm_struct *mm,
                 */
                if (!page_ref_freeze(page, 3)) {
                        result = SCAN_PAGE_COUNT;
-                       goto out_lru;
+                       xas_unlock_irq(&xas);
+                       putback_lru_page(page);
+                       goto out_unlock;
                }
 
                /*
@@ -1433,71 +1453,74 @@ static void collapse_shmem(struct mm_struct *mm,
                /* Finally, replace with the new page. */
                xas_store(&xas, new_page + (index % HPAGE_PMD_NR));
                continue;
-out_lru:
-               xas_unlock_irq(&xas);
-               putback_lru_page(page);
-out_isolate_failed:
-               unlock_page(page);
-               put_page(page);
-               goto xa_unlocked;
 out_unlock:
                unlock_page(page);
                put_page(page);
-               break;
+               goto xa_unlocked;
        }
-       xas_unlock_irq(&xas);
 
+       __inc_node_page_state(new_page, NR_SHMEM_THPS);
+       if (nr_none) {
+               struct zone *zone = page_zone(new_page);
+
+               __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
+               __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
+       }
+
+xa_locked:
+       xas_unlock_irq(&xas);
 xa_unlocked:
+
        if (result == SCAN_SUCCEED) {
                struct page *page, *tmp;
-               struct zone *zone = page_zone(new_page);
 
                /*
                 * Replacing old pages with new one has succeeded, now we
                 * need to copy the content and free the old pages.
                 */
+               index = start;
                list_for_each_entry_safe(page, tmp, &pagelist, lru) {
+                       while (index < page->index) {
+                               clear_highpage(new_page + (index % HPAGE_PMD_NR));
+                               index++;
+                       }
                        copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
                                        page);
                        list_del(&page->lru);
-                       unlock_page(page);
-                       page_ref_unfreeze(page, 1);
                        page->mapping = NULL;
+                       page_ref_unfreeze(page, 1);
                        ClearPageActive(page);
                        ClearPageUnevictable(page);
+                       unlock_page(page);
                        put_page(page);
+                       index++;
                }
-
-               local_irq_disable();
-               __inc_node_page_state(new_page, NR_SHMEM_THPS);
-               if (nr_none) {
-                       __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
-                       __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
+               while (index < end) {
+                       clear_highpage(new_page + (index % HPAGE_PMD_NR));
+                       index++;
                }
-               local_irq_enable();
 
-               /*
-                * Remove pte page tables, so we can re-fault
-                * the page as huge.
-                */
-               retract_page_tables(mapping, start);
-
-               /* Everything is ready, let's unfreeze the new_page */
-               set_page_dirty(new_page);
                SetPageUptodate(new_page);
-               page_ref_unfreeze(new_page, HPAGE_PMD_NR);
+               page_ref_add(new_page, HPAGE_PMD_NR - 1);
+               set_page_dirty(new_page);
                mem_cgroup_commit_charge(new_page, memcg, false, true);
                lru_cache_add_anon(new_page);
-               unlock_page(new_page);
 
+               /*
+                * Remove pte page tables, so we can re-fault the page as huge.
+                */
+               retract_page_tables(mapping, start);
                *hpage = NULL;
 
                khugepaged_pages_collapsed++;
        } else {
                struct page *page;
+
                /* Something went wrong: roll back page cache changes */
-               shmem_uncharge(mapping->host, nr_none);
                xas_lock_irq(&xas);
+               mapping->nrpages -= nr_none;
+               shmem_uncharge(mapping->host, nr_none);
+
                xas_set(&xas, start);
                xas_for_each(&xas, page, end - 1) {
                        page = list_first_entry_or_null(&pagelist,
@@ -1519,19 +1542,18 @@ xa_unlocked:
                        xas_store(&xas, page);
                        xas_pause(&xas);
                        xas_unlock_irq(&xas);
-                       putback_lru_page(page);
                        unlock_page(page);
+                       putback_lru_page(page);
                        xas_lock_irq(&xas);
                }
                VM_BUG_ON(nr_none);
                xas_unlock_irq(&xas);
 
-               /* Unfreeze new_page, caller would take care about freeing it */
-               page_ref_unfreeze(new_page, 1);
                mem_cgroup_cancel_charge(new_page, memcg, true);
-               unlock_page(new_page);
                new_page->mapping = NULL;
        }
+
+       unlock_page(new_page);
 out:
        VM_BUG_ON(!list_empty(&pagelist));
        /* TODO: tracepoints */
index 7df468c8ebc8c0ada5b560ed70c9c821e5127c05..9a2d5ae81ae1cf4217ed3174d72667be276769da 100644 (file)
@@ -1179,7 +1179,7 @@ void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 /*
- * Common iterator interface used to define for_each_mem_range().
+ * Common iterator interface used to define for_each_mem_pfn_range().
  */
 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
                                unsigned long *out_start_pfn,
index 54920cbc46bfdcb87b0a4ae3e6b4538596b6bd58..6e1469b80cb7d57d3cdf01ef9c917a21ece0b7fa 100644 (file)
@@ -2593,7 +2593,7 @@ int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
        struct mem_cgroup *memcg;
        int ret = 0;
 
-       if (memcg_kmem_bypass())
+       if (mem_cgroup_disabled() || memcg_kmem_bypass())
                return 0;
 
        memcg = get_mem_cgroup_from_current();
index 0cd3de3550f0830f507d286b0499789d7961171e..7c72f2a95785e0d3d5df615ea33477b0bdcc5278 100644 (file)
@@ -1161,6 +1161,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
        LIST_HEAD(tokill);
        int rc = -EBUSY;
        loff_t start;
+       dax_entry_t cookie;
 
        /*
         * Prevent the inode from being freed while we are interrogating
@@ -1169,7 +1170,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
         * also prevents changes to the mapping of this pfn until
         * poison signaling is complete.
         */
-       if (!dax_lock_mapping_entry(page))
+       cookie = dax_lock_page(page);
+       if (!cookie)
                goto out;
 
        if (hwpoison_filter(page)) {
@@ -1220,7 +1222,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
        kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
        rc = 0;
 unlock:
-       dax_unlock_mapping_entry(page);
+       dax_unlock_page(page, cookie);
 out:
        /* drop pgmap ref acquired in caller */
        put_dev_pagemap(pgmap);
index 61972da38d93cb54d6f1088df186ed20bb0f98bb..2b2b3ccbbfb5768a3d6b530799ebf5c4c3129688 100644 (file)
@@ -586,6 +586,7 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
        for (i = 0; i < sections_to_remove; i++) {
                unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
 
+               cond_resched();
                ret = __remove_section(zone, __pfn_to_section(pfn), map_offset,
                                altmap);
                map_offset = 0;
index cfd26d7e61a17f9c5fd260b85778058aa04b83e2..d4496d9d34f533dcd66accb7d92a69a03feae65c 100644 (file)
@@ -2053,8 +2053,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
                 * If the policy is interleave, or does not allow the current
                 * node in its nodemask, we allocate the standard way.
                 */
-               if (pol->mode == MPOL_PREFERRED &&
-                                               !(pol->flags & MPOL_F_LOCAL))
+               if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
                        hpage_node = pol->v.preferred_node;
 
                nmask = policy_nodemask(gfp, pol);
index a919ba5cb3c845e03e4a070eff354acf19ec7c4a..2ec9cc407216565f849b752484e731433fba146c 100644 (file)
@@ -4060,17 +4060,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        unsigned int cpuset_mems_cookie;
        int reserve_flags;
 
-       /*
-        * In the slowpath, we sanity check order to avoid ever trying to
-        * reclaim >= MAX_ORDER areas which will never succeed. Callers may
-        * be using allocators in order of preference for an area that is
-        * too large.
-        */
-       if (order >= MAX_ORDER) {
-               WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
-               return NULL;
-       }
-
        /*
         * We also sanity check to catch abuse of atomic reserves being used by
         * callers that are not in atomic context.
@@ -4364,6 +4353,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
        gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
        struct alloc_context ac = { };
 
+       /*
+        * There are several places where we assume that the order value is sane
+        * so bail out early if the request is out of bound.
+        */
+       if (unlikely(order >= MAX_ORDER)) {
+               WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
+               return NULL;
+       }
+
        gfp_mask &= gfp_allowed_mask;
        alloc_mask = gfp_mask;
        if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
@@ -5815,8 +5813,10 @@ void __meminit init_currently_empty_zone(struct zone *zone,
                                        unsigned long size)
 {
        struct pglist_data *pgdat = zone->zone_pgdat;
+       int zone_idx = zone_idx(zone) + 1;
 
-       pgdat->nr_zones = zone_idx(zone) + 1;
+       if (zone_idx > pgdat->nr_zones)
+               pgdat->nr_zones = zone_idx;
 
        zone->zone_start_pfn = zone_start_pfn;
 
@@ -7788,6 +7788,14 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
                if (PageReserved(page))
                        goto unmovable;
 
+               /*
+                * If the zone is movable and we have ruled out all reserved
+                * pages then it should be reasonably safe to assume the rest
+                * is movable.
+                */
+               if (zone_idx(zone) == ZONE_MOVABLE)
+                       continue;
+
                /*
                 * Hugepages are not in LRU lists, but they're movable.
                 * We need not scan over tail pages bacause we don't
index a451ffa9491ca0c84d3932fb5af1d02ca18f8f62..d4d1c89bcdddcef43dfa04fa02926f21fba228fb 100644 (file)
@@ -294,7 +294,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
                };
                struct iov_iter from;
 
-               iov_iter_bvec(&from, ITER_BVEC | WRITE, &bv, 1, PAGE_SIZE);
+               iov_iter_bvec(&from, WRITE, &bv, 1, PAGE_SIZE);
                init_sync_kiocb(&kiocb, swap_file);
                kiocb.ki_pos = page_file_offset(page);
 
@@ -339,7 +339,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
                goto out;
        }
        bio->bi_opf = REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc);
-       bio_associate_blkg_from_page(bio, page);
+       bio_associate_blkcg_from_page(bio, page);
        count_swpout_vm_event(page);
        set_page_writeback(page);
        unlock_page(page);
index f7e2a676365a10256368d3fe9e5dfccfbd5cc17e..f0c15e9017c02236e56cb71948d992c584226d0c 100644 (file)
@@ -17,6 +17,11 @@ static int __init early_page_poison_param(char *buf)
 }
 early_param("page_poison", early_page_poison_param);
 
+/**
+ * page_poisoning_enabled - check if page poisoning is enabled
+ *
+ * Return true if page poisoning is enabled, or false if not.
+ */
 bool page_poisoning_enabled(void)
 {
        /*
@@ -29,6 +34,7 @@ bool page_poisoning_enabled(void)
                (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
                debug_pagealloc_enabled()));
 }
+EXPORT_SYMBOL_GPL(page_poisoning_enabled);
 
 static void poison_page(struct page *page)
 {
index a6b74c6fe0becd3ef42284aa643933ae4fb7f0cf..db86282fd024580cbf5c41f01cb6d5447a9e1791 100644 (file)
@@ -2591,7 +2591,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
        BUG_ON(ai->nr_groups != 1);
        upa = ai->alloc_size/ai->unit_size;
        nr_g0_units = roundup(num_possible_cpus(), upa);
-       if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) {
+       if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
                pcpu_free_alloc_info(ai);
                return -EINVAL;
        }
index 1e79fac3186b63208cbe37a8c05597c44d2234c9..85b7f94233526acc1863539caae49b6e640b3434 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1627,16 +1627,9 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                                      address + PAGE_SIZE);
                } else {
                        /*
-                        * We should not need to notify here as we reach this
-                        * case only from freeze_page() itself only call from
-                        * split_huge_page_to_list() so everything below must
-                        * be true:
-                        *   - page is not anonymous
-                        *   - page is locked
-                        *
-                        * So as it is a locked file back page thus it can not
-                        * be remove from the page cache and replace by a new
-                        * page before mmu_notifier_invalidate_range_end so no
+                        * This is a locked file-backed page, thus it cannot
+                        * be removed from the page cache and replaced by a new
+                        * page before mmu_notifier_invalidate_range_end, so no
                         * concurrent thread might update its page table to
                         * point at new page while a device still is using this
                         * page.
index 56bf122e0bb4ddf7b57548e7e4b4a33bbdf9a9ab..921f80488bb3fdd03cc7ce64a5a5a6d0f015b794 100644 (file)
@@ -297,12 +297,14 @@ bool shmem_charge(struct inode *inode, long pages)
        if (!shmem_inode_acct_block(inode, pages))
                return false;
 
+       /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
+       inode->i_mapping->nrpages += pages;
+
        spin_lock_irqsave(&info->lock, flags);
        info->alloced += pages;
        inode->i_blocks += pages * BLOCKS_PER_PAGE;
        shmem_recalc_inode(inode);
        spin_unlock_irqrestore(&info->lock, flags);
-       inode->i_mapping->nrpages += pages;
 
        return true;
 }
@@ -312,6 +314,8 @@ void shmem_uncharge(struct inode *inode, long pages)
        struct shmem_inode_info *info = SHMEM_I(inode);
        unsigned long flags;
 
+       /* nrpages adjustment done by __delete_from_page_cache() or caller */
+
        spin_lock_irqsave(&info->lock, flags);
        info->alloced -= pages;
        inode->i_blocks -= pages * BLOCKS_PER_PAGE;
@@ -1509,11 +1513,13 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
 {
        struct page *oldpage, *newpage;
        struct address_space *swap_mapping;
+       swp_entry_t entry;
        pgoff_t swap_index;
        int error;
 
        oldpage = *pagep;
-       swap_index = page_private(oldpage);
+       entry.val = page_private(oldpage);
+       swap_index = swp_offset(entry);
        swap_mapping = page_mapping(oldpage);
 
        /*
@@ -1532,7 +1538,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
        __SetPageLocked(newpage);
        __SetPageSwapBacked(newpage);
        SetPageUptodate(newpage);
-       set_page_private(newpage, swap_index);
+       set_page_private(newpage, entry.val);
        SetPageSwapCache(newpage);
 
        /*
@@ -2214,6 +2220,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
        struct page *page;
        pte_t _dst_pte, *dst_pte;
        int ret;
+       pgoff_t offset, max_off;
 
        ret = -ENOMEM;
        if (!shmem_inode_acct_block(inode, 1))
@@ -2236,7 +2243,7 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
                                *pagep = page;
                                shmem_inode_unacct_blocks(inode, 1);
                                /* don't free the page */
-                               return -EFAULT;
+                               return -ENOENT;
                        }
                } else {                /* mfill_zeropage_atomic */
                        clear_highpage(page);
@@ -2251,6 +2258,12 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
        __SetPageSwapBacked(page);
        __SetPageUptodate(page);
 
+       ret = -EFAULT;
+       offset = linear_page_index(dst_vma, dst_addr);
+       max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+       if (unlikely(offset >= max_off))
+               goto out_release;
+
        ret = mem_cgroup_try_charge_delay(page, dst_mm, gfp, &memcg, false);
        if (ret)
                goto out_release;
@@ -2265,9 +2278,25 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
        _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
        if (dst_vma->vm_flags & VM_WRITE)
                _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
+       else {
+               /*
+                * We don't set the pte dirty if the vma has no
+                * VM_WRITE permission, so mark the page dirty or it
+                * could be freed from under us. We could do it
+                * unconditionally before unlock_page(), but doing it
+                * only if VM_WRITE is not set is faster.
+                */
+               set_page_dirty(page);
+       }
 
-       ret = -EEXIST;
        dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
+
+       ret = -EFAULT;
+       max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+       if (unlikely(offset >= max_off))
+               goto out_release_uncharge_unlock;
+
+       ret = -EEXIST;
        if (!pte_none(*dst_pte))
                goto out_release_uncharge_unlock;
 
@@ -2285,13 +2314,15 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
 
        /* No need to invalidate - it was non-present before */
        update_mmu_cache(dst_vma, dst_addr, dst_pte);
-       unlock_page(page);
        pte_unmap_unlock(dst_pte, ptl);
+       unlock_page(page);
        ret = 0;
 out:
        return ret;
 out_release_uncharge_unlock:
        pte_unmap_unlock(dst_pte, ptl);
+       ClearPageDirty(page);
+       delete_from_page_cache(page);
 out_release_uncharge:
        mem_cgroup_cancel_charge(page, memcg, false);
 out_release:
@@ -2563,9 +2594,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
        inode_lock(inode);
        /* We're holding i_mutex so we can access i_size directly */
 
-       if (offset < 0)
-               offset = -EINVAL;
-       else if (offset >= inode->i_size)
+       if (offset < 0 || offset >= inode->i_size)
                offset = -ENXIO;
        else {
                start = offset >> PAGE_SHIFT;
index 644f746e167acd65e8244088f483e50c71afbf20..8688ae65ef58ac639b0b2202039fa22577309350 100644 (file)
@@ -2813,7 +2813,7 @@ static struct swap_info_struct *alloc_swap_info(void)
        unsigned int type;
        int i;
 
-       p = kzalloc(sizeof(*p), GFP_KERNEL);
+       p = kvzalloc(sizeof(*p), GFP_KERNEL);
        if (!p)
                return ERR_PTR(-ENOMEM);
 
@@ -2824,7 +2824,7 @@ static struct swap_info_struct *alloc_swap_info(void)
        }
        if (type >= MAX_SWAPFILES) {
                spin_unlock(&swap_lock);
-               kfree(p);
+               kvfree(p);
                return ERR_PTR(-EPERM);
        }
        if (type >= nr_swapfiles) {
@@ -2838,7 +2838,7 @@ static struct swap_info_struct *alloc_swap_info(void)
                smp_wmb();
                nr_swapfiles++;
        } else {
-               kfree(p);
+               kvfree(p);
                p = swap_info[type];
                /*
                 * Do not memset this entry: a racing procfs swap_next()
index 45d68e90b7037669fce5b50849c4d5964451c2d4..798e7ccfb030be40fded9eb6fed45960e2956938 100644 (file)
@@ -517,9 +517,13 @@ void truncate_inode_pages_final(struct address_space *mapping)
                 */
                xa_lock_irq(&mapping->i_pages);
                xa_unlock_irq(&mapping->i_pages);
-
-               truncate_inode_pages(mapping, 0);
        }
+
+       /*
+        * Cleancache needs notification even if there are no pages or shadow
+        * entries.
+        */
+       truncate_inode_pages(mapping, 0);
 }
 EXPORT_SYMBOL(truncate_inode_pages_final);
 
index 5029f241908f48b50ad268169baab496cdbc976e..458acda96f2075472c3afcd6bc8a024684845019 100644 (file)
@@ -33,6 +33,8 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
        void *page_kaddr;
        int ret;
        struct page *page;
+       pgoff_t offset, max_off;
+       struct inode *inode;
 
        if (!*pagep) {
                ret = -ENOMEM;
@@ -48,7 +50,7 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
 
                /* fallback to copy_from_user outside mmap_sem */
                if (unlikely(ret)) {
-                       ret = -EFAULT;
+                       ret = -ENOENT;
                        *pagep = page;
                        /* don't free the page */
                        goto out;
@@ -73,8 +75,17 @@ static int mcopy_atomic_pte(struct mm_struct *dst_mm,
        if (dst_vma->vm_flags & VM_WRITE)
                _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte));
 
-       ret = -EEXIST;
        dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
+       if (dst_vma->vm_file) {
+               /* the shmem MAP_PRIVATE case requires checking the i_size */
+               inode = dst_vma->vm_file->f_inode;
+               offset = linear_page_index(dst_vma, dst_addr);
+               max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+               ret = -EFAULT;
+               if (unlikely(offset >= max_off))
+                       goto out_release_uncharge_unlock;
+       }
+       ret = -EEXIST;
        if (!pte_none(*dst_pte))
                goto out_release_uncharge_unlock;
 
@@ -108,11 +119,22 @@ static int mfill_zeropage_pte(struct mm_struct *dst_mm,
        pte_t _dst_pte, *dst_pte;
        spinlock_t *ptl;
        int ret;
+       pgoff_t offset, max_off;
+       struct inode *inode;
 
        _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr),
                                         dst_vma->vm_page_prot));
-       ret = -EEXIST;
        dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl);
+       if (dst_vma->vm_file) {
+               /* the shmem MAP_PRIVATE case requires checking the i_size */
+               inode = dst_vma->vm_file->f_inode;
+               offset = linear_page_index(dst_vma, dst_addr);
+               max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
+               ret = -EFAULT;
+               if (unlikely(offset >= max_off))
+                       goto out_unlock;
+       }
+       ret = -EEXIST;
        if (!pte_none(*dst_pte))
                goto out_unlock;
        set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
@@ -205,8 +227,9 @@ retry:
                if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
                        goto out_unlock;
                /*
-                * Only allow __mcopy_atomic_hugetlb on userfaultfd
-                * registered ranges.
+                * Check the vma is registered in uffd, this is
+                * required to enforce the VM_MAYWRITE check done at
+                * uffd registration time.
                 */
                if (!dst_vma->vm_userfaultfd_ctx.ctx)
                        goto out_unlock;
@@ -274,7 +297,7 @@ retry:
 
                cond_resched();
 
-               if (unlikely(err == -EFAULT)) {
+               if (unlikely(err == -ENOENT)) {
                        up_read(&dst_mm->mmap_sem);
                        BUG_ON(!page);
 
@@ -380,7 +403,17 @@ static __always_inline ssize_t mfill_atomic_pte(struct mm_struct *dst_mm,
 {
        ssize_t err;
 
-       if (vma_is_anonymous(dst_vma)) {
+       /*
+        * The normal page fault path for a shmem will invoke the
+        * fault, fill the hole in the file and COW it right away. The
+        * result generates plain anonymous memory. So when we are
+        * asked to fill an hole in a MAP_PRIVATE shmem mapping, we'll
+        * generate anonymous memory directly without actually filling
+        * the hole. For the MAP_PRIVATE case the robustness check
+        * only happens in the pagetable (to verify it's still none)
+        * and not in the radix tree.
+        */
+       if (!(dst_vma->vm_flags & VM_SHARED)) {
                if (!zeropage)
                        err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
                                               dst_addr, src_addr, page);
@@ -449,13 +482,9 @@ retry:
        if (!dst_vma)
                goto out_unlock;
        /*
-        * Be strict and only allow __mcopy_atomic on userfaultfd
-        * registered ranges to prevent userland errors going
-        * unnoticed. As far as the VM consistency is concerned, it
-        * would be perfectly safe to remove this check, but there's
-        * no useful usage for __mcopy_atomic ouside of userfaultfd
-        * registered ranges. This is after all why these are ioctls
-        * belonging to the userfaultfd and not syscalls.
+        * Check the vma is registered in uffd, this is required to
+        * enforce the VM_MAYWRITE check done at uffd registration
+        * time.
         */
        if (!dst_vma->vm_userfaultfd_ctx.ctx)
                goto out_unlock;
@@ -489,7 +518,8 @@ retry:
         * dst_vma.
         */
        err = -ENOMEM;
-       if (vma_is_anonymous(dst_vma) && unlikely(anon_vma_prepare(dst_vma)))
+       if (!(dst_vma->vm_flags & VM_SHARED) &&
+           unlikely(anon_vma_prepare(dst_vma)))
                goto out_unlock;
 
        while (src_addr < src_start + len) {
@@ -530,7 +560,7 @@ retry:
                                       src_addr, &page, zeropage);
                cond_resched();
 
-               if (unlikely(err == -EFAULT)) {
+               if (unlikely(err == -ENOENT)) {
                        void *page_kaddr;
 
                        up_read(&dst_mm->mmap_sem);
index 6038ce593ce3e1ca4cce34a02bc0bbd4c1d3a296..9c624595e90416bc9114fc11721e14d56183a1b6 100644 (file)
@@ -1827,12 +1827,13 @@ static bool need_update(int cpu)
 
                /*
                 * The fast way of checking if there are any vmstat diffs.
-                * This works because the diffs are byte sized items.
                 */
-               if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS))
+               if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
+                              sizeof(p->vm_stat_diff[0])))
                        return true;
 #ifdef CONFIG_NUMA
-               if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS))
+               if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS *
+                              sizeof(p->vm_numa_stat_diff[0])))
                        return true;
 #endif
        }
index 4b366d181f35d12f9a1e600bb7083bbf4dfe7fff..aee9b0b8d9078a0bbf59a06509c6f7f7aa1360f3 100644 (file)
@@ -99,6 +99,7 @@ struct z3fold_header {
 #define NCHUNKS                ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
 
 #define BUDDY_MASK     (0x3)
+#define BUDDY_SHIFT    2
 
 /**
  * struct z3fold_pool - stores metadata for each z3fold pool
@@ -145,7 +146,7 @@ enum z3fold_page_flags {
        MIDDLE_CHUNK_MAPPED,
        NEEDS_COMPACTING,
        PAGE_STALE,
-       UNDER_RECLAIM
+       PAGE_CLAIMED, /* by either reclaim or free */
 };
 
 /*****************
@@ -174,7 +175,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
        clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
        clear_bit(NEEDS_COMPACTING, &page->private);
        clear_bit(PAGE_STALE, &page->private);
-       clear_bit(UNDER_RECLAIM, &page->private);
+       clear_bit(PAGE_CLAIMED, &page->private);
 
        spin_lock_init(&zhdr->page_lock);
        kref_init(&zhdr->refcount);
@@ -223,8 +224,11 @@ static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
        unsigned long handle;
 
        handle = (unsigned long)zhdr;
-       if (bud != HEADLESS)
-               handle += (bud + zhdr->first_num) & BUDDY_MASK;
+       if (bud != HEADLESS) {
+               handle |= (bud + zhdr->first_num) & BUDDY_MASK;
+               if (bud == LAST)
+                       handle |= (zhdr->last_chunks << BUDDY_SHIFT);
+       }
        return handle;
 }
 
@@ -234,6 +238,12 @@ static struct z3fold_header *handle_to_z3fold_header(unsigned long handle)
        return (struct z3fold_header *)(handle & PAGE_MASK);
 }
 
+/* only for LAST bud, returns zero otherwise */
+static unsigned short handle_to_chunks(unsigned long handle)
+{
+       return (handle & ~PAGE_MASK) >> BUDDY_SHIFT;
+}
+
 /*
  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
  *  but that doesn't matter. because the masking will result in the
@@ -720,37 +730,39 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
        page = virt_to_page(zhdr);
 
        if (test_bit(PAGE_HEADLESS, &page->private)) {
-               /* HEADLESS page stored */
-               bud = HEADLESS;
-       } else {
-               z3fold_page_lock(zhdr);
-               bud = handle_to_buddy(handle);
-
-               switch (bud) {
-               case FIRST:
-                       zhdr->first_chunks = 0;
-                       break;
-               case MIDDLE:
-                       zhdr->middle_chunks = 0;
-                       zhdr->start_middle = 0;
-                       break;
-               case LAST:
-                       zhdr->last_chunks = 0;
-                       break;
-               default:
-                       pr_err("%s: unknown bud %d\n", __func__, bud);
-                       WARN_ON(1);
-                       z3fold_page_unlock(zhdr);
-                       return;
+               /* if a headless page is under reclaim, just leave.
+                * NB: we use test_and_set_bit for a reason: if the bit
+                * has not been set before, we release this page
+                * immediately so we don't care about its value any more.
+                */
+               if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
+                       spin_lock(&pool->lock);
+                       list_del(&page->lru);
+                       spin_unlock(&pool->lock);
+                       free_z3fold_page(page);
+                       atomic64_dec(&pool->pages_nr);
                }
+               return;
        }
 
-       if (bud == HEADLESS) {
-               spin_lock(&pool->lock);
-               list_del(&page->lru);
-               spin_unlock(&pool->lock);
-               free_z3fold_page(page);
-               atomic64_dec(&pool->pages_nr);
+       /* Non-headless case */
+       z3fold_page_lock(zhdr);
+       bud = handle_to_buddy(handle);
+
+       switch (bud) {
+       case FIRST:
+               zhdr->first_chunks = 0;
+               break;
+       case MIDDLE:
+               zhdr->middle_chunks = 0;
+               break;
+       case LAST:
+               zhdr->last_chunks = 0;
+               break;
+       default:
+               pr_err("%s: unknown bud %d\n", __func__, bud);
+               WARN_ON(1);
+               z3fold_page_unlock(zhdr);
                return;
        }
 
@@ -758,7 +770,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                atomic64_dec(&pool->pages_nr);
                return;
        }
-       if (test_bit(UNDER_RECLAIM, &page->private)) {
+       if (test_bit(PAGE_CLAIMED, &page->private)) {
                z3fold_page_unlock(zhdr);
                return;
        }
@@ -836,20 +848,30 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
                }
                list_for_each_prev(pos, &pool->lru) {
                        page = list_entry(pos, struct page, lru);
+
+                       /* this bit could have been set by free, in which case
+                        * we pass over to the next page in the pool.
+                        */
+                       if (test_and_set_bit(PAGE_CLAIMED, &page->private))
+                               continue;
+
+                       zhdr = page_address(page);
                        if (test_bit(PAGE_HEADLESS, &page->private))
-                               /* candidate found */
                                break;
 
-                       zhdr = page_address(page);
-                       if (!z3fold_page_trylock(zhdr))
+                       if (!z3fold_page_trylock(zhdr)) {
+                               zhdr = NULL;
                                continue; /* can't evict at this point */
+                       }
                        kref_get(&zhdr->refcount);
                        list_del_init(&zhdr->buddy);
                        zhdr->cpu = -1;
-                       set_bit(UNDER_RECLAIM, &page->private);
                        break;
                }
 
+               if (!zhdr)
+                       break;
+
                list_del_init(&page->lru);
                spin_unlock(&pool->lock);
 
@@ -898,6 +920,7 @@ next:
                if (test_bit(PAGE_HEADLESS, &page->private)) {
                        if (ret == 0) {
                                free_z3fold_page(page);
+                               atomic64_dec(&pool->pages_nr);
                                return 0;
                        }
                        spin_lock(&pool->lock);
@@ -905,7 +928,7 @@ next:
                        spin_unlock(&pool->lock);
                } else {
                        z3fold_page_lock(zhdr);
-                       clear_bit(UNDER_RECLAIM, &page->private);
+                       clear_bit(PAGE_CLAIMED, &page->private);
                        if (kref_put(&zhdr->refcount,
                                        release_z3fold_page_locked)) {
                                atomic64_dec(&pool->pages_nr);
@@ -964,7 +987,7 @@ static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
                set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
                break;
        case LAST:
-               addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT);
+               addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
                break;
        default:
                pr_err("unknown buddy id %d\n", buddy);
index 5f23e18eecc02f32ac80ddd85658006f9ad15430..2c9a17b9b46bb344897691d5f53c94a90e25c3c8 100644 (file)
@@ -2066,7 +2066,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
        struct kvec kv = {.iov_base = data, .iov_len = count};
        struct iov_iter to;
 
-       iov_iter_kvec(&to, READ | ITER_KVEC, &kv, 1, count);
+       iov_iter_kvec(&to, READ, &kv, 1, count);
 
        p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
                                fid->fid, (unsigned long long) offset, count);
index eb596c2ed546ca5555a2100426eebb67205d99b1..b1d39cabf125a7f90b7029f4355a0a7025af526d 100644 (file)
@@ -329,7 +329,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
        if (!iov_iter_count(data))
                return 0;
 
-       if (!(data->type & ITER_KVEC)) {
+       if (!iov_iter_is_kvec(data)) {
                int n;
                /*
                 * We allow only p9_max_pages pinned. We wait for the
index 9f481cfdf77dace2fa594585fde5079d46474c66..e8090f099eb805626ffaa1445d3d0f8254f6c9a7 100644 (file)
@@ -352,19 +352,21 @@ out:
  */
 int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
 {
+       static const size_t tvlv_padding = sizeof(__be32);
        struct batadv_elp_packet *elp_packet;
        unsigned char *elp_buff;
        u32 random_seqno;
        size_t size;
        int res = -ENOMEM;
 
-       size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN;
+       size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN + tvlv_padding;
        hard_iface->bat_v.elp_skb = dev_alloc_skb(size);
        if (!hard_iface->bat_v.elp_skb)
                goto out;
 
        skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
-       elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
+       elp_buff = skb_put_zero(hard_iface->bat_v.elp_skb,
+                               BATADV_ELP_HLEN + tvlv_padding);
        elp_packet = (struct batadv_elp_packet *)elp_buff;
 
        elp_packet->packet_type = BATADV_ELP;
index 0fddc17106bd8a0e3f064fee9adba7c226f34682..5b71a289d04fc80de6c20e7a24d621727c77825a 100644 (file)
@@ -275,7 +275,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
        kfree(entry);
 
        packet = (struct batadv_frag_packet *)skb_out->data;
-       size = ntohs(packet->total_size);
+       size = ntohs(packet->total_size) + hdr_size;
 
        /* Make room for the rest of the fragments. */
        if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
index 4e2576fc0c59932cbb1f3c98c150764b91bb52c1..828e87fe802788d13f13f39efc68ce7558dfb245 100644 (file)
@@ -467,7 +467,7 @@ static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb,
        iv.iov_len = skb->len;
 
        memset(&msg, 0, sizeof(msg));
-       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iv, 1, skb->len);
+       iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, skb->len);
 
        err = l2cap_chan_send(chan, &msg, skb->len);
        if (err > 0) {
index 51c2cf2d8923ae8dcb174355f26b0b08634a3892..58fc6333d41225dcb967052eec798af839013e0b 100644 (file)
@@ -63,7 +63,7 @@ static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *dat
 
        memset(&msg, 0, sizeof(msg));
 
-       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iv, 1, total_len);
+       iov_iter_kvec(&msg.msg_iter, WRITE, &iv, 1, total_len);
 
        l2cap_chan_send(chan, &msg, total_len);
 
index a1c1b7e8a45ca6d6c44de507d7a5ff232d776edc..c822e626761bd0ecb51e1b1de1f8dfa5b98b3568 100644 (file)
@@ -622,7 +622,7 @@ static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
 
        memset(&msg, 0, sizeof(msg));
 
-       iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, iv, 2, 1 + len);
+       iov_iter_kvec(&msg.msg_iter, WRITE, iv, 2, 1 + len);
 
        l2cap_chan_send(chan, &msg, 1 + len);
 
index c89c22c49015ff070f228bece397937d7cdce8b5..25001913d03b599dde50e85a20de61156465359b 100644 (file)
@@ -28,12 +28,13 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
        return ret;
 }
 
-static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
+static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
+                       u32 *time)
 {
        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
        enum bpf_cgroup_storage_type stype;
        u64 time_start, time_spent = 0;
-       u32 ret = 0, i;
+       u32 i;
 
        for_each_cgroup_storage_type(stype) {
                storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
@@ -49,7 +50,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
                repeat = 1;
        time_start = ktime_get_ns();
        for (i = 0; i < repeat; i++) {
-               ret = bpf_test_run_one(prog, ctx, storage);
+               *ret = bpf_test_run_one(prog, ctx, storage);
                if (need_resched()) {
                        if (signal_pending(current))
                                break;
@@ -65,7 +66,7 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
        for_each_cgroup_storage_type(stype)
                bpf_cgroup_storage_free(storage[stype]);
 
-       return ret;
+       return 0;
 }
 
 static int bpf_test_finish(const union bpf_attr *kattr,
@@ -165,7 +166,12 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
                __skb_push(skb, hh_len);
        if (is_direct_pkt_access)
                bpf_compute_data_pointers(skb);
-       retval = bpf_test_run(prog, skb, repeat, &duration);
+       ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
+       if (ret) {
+               kfree_skb(skb);
+               kfree(sk);
+               return ret;
+       }
        if (!is_l2) {
                if (skb_headroom(skb) < hh_len) {
                        int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
@@ -212,11 +218,14 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
        rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
        xdp.rxq = &rxqueue->xdp_rxq;
 
-       retval = bpf_test_run(prog, &xdp, repeat, &duration);
+       ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
+       if (ret)
+               goto out;
        if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
            xdp.data_end != xdp.data + size)
                size = xdp.data_end - xdp.data;
        ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
+out:
        kfree(data);
        return ret;
 }
index 2920e06a540329ffb8f1286c65bc5acc6f53afad..04c19a37e500e872d564497c9410509c0df61fce 100644 (file)
@@ -102,12 +102,18 @@ struct br_tunnel_info {
        struct metadata_dst     *tunnel_dst;
 };
 
+/* private vlan flags */
+enum {
+       BR_VLFLAG_PER_PORT_STATS = BIT(0),
+};
+
 /**
  * struct net_bridge_vlan - per-vlan entry
  *
  * @vnode: rhashtable member
  * @vid: VLAN id
  * @flags: bridge vlan flags
+ * @priv_flags: private (in-kernel) bridge vlan flags
  * @stats: per-cpu VLAN statistics
  * @br: if MASTER flag set, this points to a bridge struct
  * @port: if MASTER flag unset, this points to a port struct
@@ -127,6 +133,7 @@ struct net_bridge_vlan {
        struct rhash_head               tnode;
        u16                             vid;
        u16                             flags;
+       u16                             priv_flags;
        struct br_vlan_stats __percpu   *stats;
        union {
                struct net_bridge       *br;
index 8c9297a019475fe68ee110a85cda1647deaf9075..e84be08b82854916d91bc0195ad2a9ab7dae8564 100644 (file)
@@ -197,7 +197,7 @@ static void nbp_vlan_rcu_free(struct rcu_head *rcu)
        v = container_of(rcu, struct net_bridge_vlan, rcu);
        WARN_ON(br_vlan_is_master(v));
        /* if we had per-port stats configured then free them here */
-       if (v->brvlan->stats != v->stats)
+       if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
                free_percpu(v->stats);
        v->stats = NULL;
        kfree(v);
@@ -264,6 +264,7 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
                                err = -ENOMEM;
                                goto out_filt;
                        }
+                       v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
                } else {
                        v->stats = masterv->stats;
                }
index 1051eee8258184f33d15a6142ee8b387839c9adc..3aab7664933fdc67770cdbc8b80a231b41dc2555 100644 (file)
@@ -745,18 +745,19 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        } else
                ifindex = ro->ifindex;
 
-       if (ro->fd_frames) {
+       dev = dev_get_by_index(sock_net(sk), ifindex);
+       if (!dev)
+               return -ENXIO;
+
+       err = -EINVAL;
+       if (ro->fd_frames && dev->mtu == CANFD_MTU) {
                if (unlikely(size != CANFD_MTU && size != CAN_MTU))
-                       return -EINVAL;
+                       goto put_dev;
        } else {
                if (unlikely(size != CAN_MTU))
-                       return -EINVAL;
+                       goto put_dev;
        }
 
-       dev = dev_get_by_index(sock_net(sk), ifindex);
-       if (!dev)
-               return -ENXIO;
-
        skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
                                  msg->msg_flags & MSG_DONTWAIT, &err);
        if (!skb)
index 88e35830198cd3ed47b6655c6e31b783bddaf030..2f126eff275d58417d2397b15e7fcef2351cdde5 100644 (file)
@@ -513,7 +513,7 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
        if (!buf)
                msg.msg_flags |= MSG_TRUNC;
 
-       iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, len);
+       iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, len);
        r = sock_recvmsg(sock, &msg, msg.msg_flags);
        if (r == -EAGAIN)
                r = 0;
@@ -532,7 +532,7 @@ static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
        int r;
 
        BUG_ON(page_offset + length > PAGE_SIZE);
-       iov_iter_bvec(&msg.msg_iter, READ | ITER_BVEC, &bvec, 1, length);
+       iov_iter_bvec(&msg.msg_iter, READ, &bvec, 1, length);
        r = sock_recvmsg(sock, &msg, msg.msg_flags);
        if (r == -EAGAIN)
                r = 0;
@@ -580,9 +580,15 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
        struct bio_vec bvec;
        int ret;
 
-       /* sendpage cannot properly handle pages with page_count == 0,
-        * we need to fallback to sendmsg if that's the case */
-       if (page_count(page) >= 1)
+       /*
+        * sendpage cannot properly handle pages with page_count == 0,
+        * we need to fall back to sendmsg if that's the case.
+        *
+        * Same goes for slab pages: skb_can_coalesce() allows
+        * coalescing neighboring slab objects into a single frag which
+        * triggers one of hardened usercopy checks.
+        */
+       if (page_count(page) >= 1 && !PageSlab(page))
                return __ceph_tcp_sendpage(sock, page, offset, size, more);
 
        bvec.bv_page = page;
@@ -594,7 +600,7 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
        else
                msg.msg_flags |= MSG_EOR;  /* superfluous, but what the hell */
 
-       iov_iter_bvec(&msg.msg_iter, WRITE | ITER_BVEC, &bvec, 1, size);
+       iov_iter_bvec(&msg.msg_iter, WRITE, &bvec, 1, size);
        ret = sock_sendmsg(sock, &msg);
        if (ret == -EAGAIN)
                ret = 0;
index 77d43ae2a7bbe1267f8430d5c35637d1984f463c..722d50dbf8a459f412e8c20982600a28edf695a0 100644 (file)
@@ -2175,6 +2175,20 @@ static bool remove_xps_queue_cpu(struct net_device *dev,
        return active;
 }
 
+static void reset_xps_maps(struct net_device *dev,
+                          struct xps_dev_maps *dev_maps,
+                          bool is_rxqs_map)
+{
+       if (is_rxqs_map) {
+               static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
+               RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
+       } else {
+               RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
+       }
+       static_key_slow_dec_cpuslocked(&xps_needed);
+       kfree_rcu(dev_maps, rcu);
+}
+
 static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
                           struct xps_dev_maps *dev_maps, unsigned int nr_ids,
                           u16 offset, u16 count, bool is_rxqs_map)
@@ -2186,18 +2200,15 @@ static void clean_xps_maps(struct net_device *dev, const unsigned long *mask,
             j < nr_ids;)
                active |= remove_xps_queue_cpu(dev, dev_maps, j, offset,
                                               count);
-       if (!active) {
-               if (is_rxqs_map) {
-                       RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
-               } else {
-                       RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
+       if (!active)
+               reset_xps_maps(dev, dev_maps, is_rxqs_map);
 
-                       for (i = offset + (count - 1); count--; i--)
-                               netdev_queue_numa_node_write(
-                                       netdev_get_tx_queue(dev, i),
-                                                       NUMA_NO_NODE);
+       if (!is_rxqs_map) {
+               for (i = offset + (count - 1); count--; i--) {
+                       netdev_queue_numa_node_write(
+                               netdev_get_tx_queue(dev, i),
+                               NUMA_NO_NODE);
                }
-               kfree_rcu(dev_maps, rcu);
        }
 }
 
@@ -2234,10 +2245,6 @@ static void netif_reset_xps_queues(struct net_device *dev, u16 offset,
                       false);
 
 out_no_maps:
-       if (static_key_enabled(&xps_rxqs_needed))
-               static_key_slow_dec_cpuslocked(&xps_rxqs_needed);
-
-       static_key_slow_dec_cpuslocked(&xps_needed);
        mutex_unlock(&xps_map_mutex);
        cpus_read_unlock();
 }
@@ -2355,9 +2362,12 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
        if (!new_dev_maps)
                goto out_no_new_maps;
 
-       static_key_slow_inc_cpuslocked(&xps_needed);
-       if (is_rxqs_map)
-               static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
+       if (!dev_maps) {
+               /* Increment static keys at most once per type */
+               static_key_slow_inc_cpuslocked(&xps_needed);
+               if (is_rxqs_map)
+                       static_key_slow_inc_cpuslocked(&xps_rxqs_needed);
+       }
 
        for (j = -1; j = netif_attrmask_next(j, possible_mask, nr_ids),
             j < nr_ids;) {
@@ -2455,13 +2465,8 @@ out_no_new_maps:
        }
 
        /* free map if not active */
-       if (!active) {
-               if (is_rxqs_map)
-                       RCU_INIT_POINTER(dev->xps_rxqs_map, NULL);
-               else
-                       RCU_INIT_POINTER(dev->xps_cpus_map, NULL);
-               kfree_rcu(dev_maps, rcu);
-       }
+       if (!active)
+               reset_xps_maps(dev, dev_maps, is_rxqs_map);
 
 out_no_maps:
        mutex_unlock(&xps_map_mutex);
@@ -3272,7 +3277,7 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *de
                }
 
                skb = next;
-               if (netif_xmit_stopped(txq) && skb) {
+               if (netif_tx_queue_stopped(txq) && skb) {
                        rc = NETDEV_TX_BUSY;
                        break;
                }
@@ -5009,7 +5014,7 @@ static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemallo
                struct net_device *orig_dev = skb->dev;
                struct packet_type *pt_prev = NULL;
 
-               list_del(&skb->list);
+               skb_list_del_init(skb);
                __netif_receive_skb_core(skb, pfmemalloc, &pt_prev);
                if (!pt_prev)
                        continue;
@@ -5165,7 +5170,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
        INIT_LIST_HEAD(&sublist);
        list_for_each_entry_safe(skb, next, head, list) {
                net_timestamp_check(netdev_tstamp_prequeue, skb);
-               list_del(&skb->list);
+               skb_list_del_init(skb);
                if (!skb_defer_rx_timestamp(skb))
                        list_add_tail(&skb->list, &sublist);
        }
@@ -5176,7 +5181,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
                rcu_read_lock();
                list_for_each_entry_safe(skb, next, head, list) {
                        xdp_prog = rcu_dereference(skb->dev->xdp_prog);
-                       list_del(&skb->list);
+                       skb_list_del_init(skb);
                        if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
                                list_add_tail(&skb->list, &sublist);
                }
@@ -5195,7 +5200,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
 
                        if (cpu >= 0) {
                                /* Will be handled, remove from list */
-                               list_del(&skb->list);
+                               skb_list_del_init(skb);
                                enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
                        }
                }
@@ -5655,6 +5660,10 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
        skb->vlan_tci = 0;
        skb->dev = napi->dev;
        skb->skb_iif = 0;
+
+       /* eth_type_trans() assumes pkt_type is PACKET_HOST */
+       skb->pkt_type = PACKET_HOST;
+
        skb->encapsulation = 0;
        skb_shinfo(skb)->gso_type = 0;
        skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
@@ -5966,11 +5975,14 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
                if (work_done)
                        timeout = n->dev->gro_flush_timeout;
 
+               /* When the NAPI instance uses a timeout and keeps postponing
+                * it, we need to bound somehow the time packets are kept in
+                * the GRO layer
+                */
+               napi_gro_flush(n, !!timeout);
                if (timeout)
                        hrtimer_start(&n->timer, ns_to_ktime(timeout),
                                      HRTIMER_MODE_REL_PINNED);
-               else
-                       napi_gro_flush(n, false);
        }
        if (unlikely(!list_empty(&n->poll_list))) {
                /* If n->poll_list is not empty, we need to mask irqs */
@@ -6197,8 +6209,8 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
        napi->skb = NULL;
        napi->poll = poll;
        if (weight > NAPI_POLL_WEIGHT)
-               pr_err_once("netif_napi_add() called with weight %d on device %s\n",
-                           weight, dev->name);
+               netdev_err_once(dev, "%s() called with weight %d\n", __func__,
+                               weight);
        napi->weight = weight;
        list_add(&napi->dev_list, &dev->napi_list);
        napi->dev = dev;
index e521c5ebc7d11cdfdcc10307ad973bcac2d1602a..8d2c629501e2df10b0b4113986cac71369863789 100644 (file)
@@ -4852,18 +4852,17 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
        } else {
                struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
                struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
-               u16 hnum = ntohs(tuple->ipv6.dport);
                int sdif = inet6_sdif(skb);
 
                if (proto == IPPROTO_TCP)
                        sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0,
                                            src6, tuple->ipv6.sport,
-                                           dst6, hnum,
+                                           dst6, ntohs(tuple->ipv6.dport),
                                            dif, sdif, &refcounted);
                else if (likely(ipv6_bpf_stub))
                        sk = ipv6_bpf_stub->udp6_lib_lookup(net,
                                                            src6, tuple->ipv6.sport,
-                                                           dst6, hnum,
+                                                           dst6, tuple->ipv6.dport,
                                                            dif, sdif,
                                                            &udp_table, skb);
 #endif
@@ -4891,22 +4890,23 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
        struct net *net;
 
        family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
-       if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags))
+       if (unlikely(family == AF_UNSPEC || flags ||
+                    !((s32)netns_id < 0 || netns_id <= S32_MAX)))
                goto out;
 
        if (skb->dev)
                caller_net = dev_net(skb->dev);
        else
                caller_net = sock_net(skb->sk);
-       if (netns_id) {
+       if ((s32)netns_id < 0) {
+               net = caller_net;
+               sk = sk_lookup(net, tuple, skb, family, proto);
+       } else {
                net = get_net_ns_by_id(caller_net, netns_id);
                if (unlikely(!net))
                        goto out;
                sk = sk_lookup(net, tuple, skb, family, proto);
                put_net(net);
-       } else {
-               net = caller_net;
-               sk = sk_lookup(net, tuple, skb, family, proto);
        }
 
        if (sk)
@@ -5436,8 +5436,8 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
                if (size != size_default)
                        return false;
                break;
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
-               if (size != sizeof(struct bpf_flow_keys *))
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+               if (size != sizeof(__u64))
                        return false;
                break;
        default:
@@ -5465,7 +5465,7 @@ static bool sk_filter_is_valid_access(int off, int size,
        case bpf_ctx_range(struct __sk_buff, data):
        case bpf_ctx_range(struct __sk_buff, data_meta):
        case bpf_ctx_range(struct __sk_buff, data_end):
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
                return false;
        }
@@ -5490,7 +5490,7 @@ static bool cg_skb_is_valid_access(int off, int size,
        switch (off) {
        case bpf_ctx_range(struct __sk_buff, tc_classid):
        case bpf_ctx_range(struct __sk_buff, data_meta):
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
                return false;
        case bpf_ctx_range(struct __sk_buff, data):
        case bpf_ctx_range(struct __sk_buff, data_end):
@@ -5531,7 +5531,7 @@ static bool lwt_is_valid_access(int off, int size,
        case bpf_ctx_range(struct __sk_buff, tc_classid):
        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
        case bpf_ctx_range(struct __sk_buff, data_meta):
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
                return false;
        }
 
@@ -5757,7 +5757,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
        case bpf_ctx_range(struct __sk_buff, data_end):
                info->reg_type = PTR_TO_PACKET_END;
                break;
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
                return false;
        }
@@ -5959,7 +5959,7 @@ static bool sk_skb_is_valid_access(int off, int size,
        switch (off) {
        case bpf_ctx_range(struct __sk_buff, tc_classid):
        case bpf_ctx_range(struct __sk_buff, data_meta):
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
                return false;
        }
 
@@ -6040,7 +6040,7 @@ static bool flow_dissector_is_valid_access(int off, int size,
        case bpf_ctx_range(struct __sk_buff, data_end):
                info->reg_type = PTR_TO_PACKET_END;
                break;
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
                info->reg_type = PTR_TO_FLOW_KEYS;
                break;
        case bpf_ctx_range(struct __sk_buff, tc_classid):
index 676f3ad629f95625422aa55f0f54157001ac477c..588f475019d47c9d6bae8883acebab48aaf63b48 100644 (file)
@@ -1166,8 +1166,8 @@ ip_proto_again:
                break;
        }
 
-       if (dissector_uses_key(flow_dissector,
-                              FLOW_DISSECTOR_KEY_PORTS)) {
+       if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
+           !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
                key_ports = skb_flow_dissector_target(flow_dissector,
                                                      FLOW_DISSECTOR_KEY_PORTS,
                                                      target_container);
index 5da9552b186bc853904f7c85bbf872925463896c..2b9fdbc43205f3d8cf826b2074493aa5e72401fb 100644 (file)
@@ -717,7 +717,8 @@ int netpoll_setup(struct netpoll *np)
 
                                read_lock_bh(&idev->lock);
                                list_for_each_entry(ifp, &idev->addr_list, if_list) {
-                                       if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
+                                       if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
+                                           !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
                                                continue;
                                        np->local_ip.in6 = ifp->addr;
                                        err = 0;
index f679c7a7d761a60b22f733a443e77b54cb51595f..7819f7804eeb80fcac76f32ee1aacdc0a180d6ca 100644 (file)
@@ -3367,7 +3367,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
                        cb->seq = 0;
                }
                ret = dumpit(skb, cb);
-               if (ret < 0)
+               if (ret)
                        break;
        }
        cb->family = idx;
@@ -3600,6 +3600,11 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
                return -EINVAL;
        }
 
+       if (dev->type != ARPHRD_ETHER) {
+               NL_SET_ERR_MSG(extack, "FDB add only supported for Ethernet devices");
+               return -EINVAL;
+       }
+
        addr = nla_data(tb[NDA_LLADDR]);
 
        err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
@@ -3704,6 +3709,11 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
                return -EINVAL;
        }
 
+       if (dev->type != ARPHRD_ETHER) {
+               NL_SET_ERR_MSG(extack, "FDB delete only supported for Ethernet devices");
+               return -EINVAL;
+       }
+
        addr = nla_data(tb[NDA_LLADDR]);
 
        err = fdb_vid_parse(tb[NDA_VLAN], &vid, extack);
@@ -3790,6 +3800,9 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb,
 {
        int err;
 
+       if (dev->type != ARPHRD_ETHER)
+               return -EINVAL;
+
        netif_addr_lock_bh(dev);
        err = nlmsg_populate_fdb(skb, cb, dev, idx, &dev->uc);
        if (err)
index 946de0e24c876bbbe63de71b5c7cef91cb967708..a8217e221e1954871f15d6ba160538939d9b53ff 100644 (file)
@@ -4854,6 +4854,11 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
        nf_reset(skb);
        nf_reset_trace(skb);
 
+#ifdef CONFIG_NET_SWITCHDEV
+       skb->offload_fwd_mark = 0;
+       skb->offload_mr_fwd_mark = 0;
+#endif
+
        if (!xnet)
                return;
 
@@ -4944,6 +4949,8 @@ static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
  *
  * This is a helper to do that correctly considering GSO_BY_FRAGS.
  *
+ * @skb: GSO skb
+ *
  * @seg_len: The segmented length (from skb_gso_*_seglen). In the
  *           GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
  *
index 6fcc4bc07d19bd929648f03b136225a69f2eddfc..080a880a1761b8e0efafaddf0ddac5bb87c64f88 100644 (file)
@@ -3279,6 +3279,7 @@ int sock_load_diag_module(int family, int protocol)
 
 #ifdef CONFIG_INET
        if (family == AF_INET &&
+           protocol != IPPROTO_RAW &&
            !rcu_access_pointer(inet_protos[protocol]))
                return -ENOENT;
 #endif
index c90ee3227deab6281c3df301d14bb6e81a6e1011..5e8c9bef78bd2ec26b405d1de6e9a74845492e8b 100644 (file)
@@ -158,8 +158,31 @@ static void dsa_master_ethtool_teardown(struct net_device *dev)
        cpu_dp->orig_ethtool_ops = NULL;
 }
 
+static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
+                           char *buf)
+{
+       struct net_device *dev = to_net_dev(d);
+       struct dsa_port *cpu_dp = dev->dsa_ptr;
+
+       return sprintf(buf, "%s\n",
+                      dsa_tag_protocol_to_str(cpu_dp->tag_ops));
+}
+static DEVICE_ATTR_RO(tagging);
+
+static struct attribute *dsa_slave_attrs[] = {
+       &dev_attr_tagging.attr,
+       NULL
+};
+
+static const struct attribute_group dsa_group = {
+       .name   = "dsa",
+       .attrs  = dsa_slave_attrs,
+};
+
 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
 {
+       int ret;
+
        /* If we use a tagging format that doesn't have an ethertype
         * field, make sure that all packets from this point on get
         * sent to the tag format's receive function.
@@ -168,11 +191,20 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
 
        dev->dsa_ptr = cpu_dp;
 
-       return dsa_master_ethtool_setup(dev);
+       ret = dsa_master_ethtool_setup(dev);
+       if (ret)
+               return ret;
+
+       ret = sysfs_create_group(&dev->dev.kobj, &dsa_group);
+       if (ret)
+               dsa_master_ethtool_teardown(dev);
+
+       return ret;
 }
 
 void dsa_master_teardown(struct net_device *dev)
 {
+       sysfs_remove_group(&dev->dev.kobj, &dsa_group);
        dsa_master_ethtool_teardown(dev);
 
        dev->dsa_ptr = NULL;
index 7d0c19e7edcf00c556fca25dc8fcdc121d82a79a..aec78f5aca72d197038246fe5462b05c7394a004 100644 (file)
@@ -1058,27 +1058,6 @@ static struct device_type dsa_type = {
        .name   = "dsa",
 };
 
-static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
-                           char *buf)
-{
-       struct net_device *dev = to_net_dev(d);
-       struct dsa_port *dp = dsa_slave_to_port(dev);
-
-       return sprintf(buf, "%s\n",
-                      dsa_tag_protocol_to_str(dp->cpu_dp->tag_ops));
-}
-static DEVICE_ATTR_RO(tagging);
-
-static struct attribute *dsa_slave_attrs[] = {
-       &dev_attr_tagging.attr,
-       NULL
-};
-
-static const struct attribute_group dsa_group = {
-       .name   = "dsa",
-       .attrs  = dsa_slave_attrs,
-};
-
 static void dsa_slave_phylink_validate(struct net_device *dev,
                                       unsigned long *supported,
                                       struct phylink_link_state *state)
@@ -1374,14 +1353,8 @@ int dsa_slave_create(struct dsa_port *port)
                goto out_phy;
        }
 
-       ret = sysfs_create_group(&slave_dev->dev.kobj, &dsa_group);
-       if (ret)
-               goto out_unreg;
-
        return 0;
 
-out_unreg:
-       unregister_netdev(slave_dev);
 out_phy:
        rtnl_lock();
        phylink_disconnect_phy(p->dp->pl);
@@ -1405,7 +1378,6 @@ void dsa_slave_destroy(struct net_device *slave_dev)
        rtnl_unlock();
 
        dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER);
-       sysfs_remove_group(&slave_dev->dev.kobj, &dsa_group);
        unregister_netdev(slave_dev);
        phylink_destroy(dp->pl);
        free_percpu(p->stats64);
index 4da39446da2d89b529973eb33902577a0e6cbb54..765b2b32c4a4263640563f34b4dd93b5bdf471de 100644 (file)
 #ifdef CONFIG_IP_MULTICAST
 /* Parameter names and values are taken from igmp-v2-06 draft */
 
-#define IGMP_V1_ROUTER_PRESENT_TIMEOUT         (400*HZ)
-#define IGMP_V2_ROUTER_PRESENT_TIMEOUT         (400*HZ)
 #define IGMP_V2_UNSOLICITED_REPORT_INTERVAL    (10*HZ)
 #define IGMP_V3_UNSOLICITED_REPORT_INTERVAL    (1*HZ)
+#define IGMP_QUERY_INTERVAL                    (125*HZ)
 #define IGMP_QUERY_RESPONSE_INTERVAL           (10*HZ)
-#define IGMP_QUERY_ROBUSTNESS_VARIABLE         2
-
 
 #define IGMP_INITIAL_REPORT_DELAY              (1)
 
@@ -935,13 +932,15 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
 
                        max_delay = IGMP_QUERY_RESPONSE_INTERVAL;
                        in_dev->mr_v1_seen = jiffies +
-                               IGMP_V1_ROUTER_PRESENT_TIMEOUT;
+                               (in_dev->mr_qrv * in_dev->mr_qi) +
+                               in_dev->mr_qri;
                        group = 0;
                } else {
                        /* v2 router present */
                        max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
                        in_dev->mr_v2_seen = jiffies +
-                               IGMP_V2_ROUTER_PRESENT_TIMEOUT;
+                               (in_dev->mr_qrv * in_dev->mr_qi) +
+                               in_dev->mr_qri;
                }
                /* cancel the interface change timer */
                in_dev->mr_ifc_count = 0;
@@ -981,8 +980,21 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
                if (!max_delay)
                        max_delay = 1;  /* can't mod w/ 0 */
                in_dev->mr_maxdelay = max_delay;
-               if (ih3->qrv)
-                       in_dev->mr_qrv = ih3->qrv;
+
+               /* RFC3376, 4.1.6. QRV and 4.1.7. QQIC, when the most recently
+                * received value was zero, use the default or statically
+                * configured value.
+                */
+               in_dev->mr_qrv = ih3->qrv ?: net->ipv4.sysctl_igmp_qrv;
+               in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL;
+
+               /* RFC3376, 8.3. Query Response Interval:
+                * The number of seconds represented by the [Query Response
+                * Interval] must be less than the [Query Interval].
+                */
+               if (in_dev->mr_qri >= in_dev->mr_qi)
+                       in_dev->mr_qri = (in_dev->mr_qi/HZ - 1)*HZ;
+
                if (!group) { /* general query */
                        if (ih3->nsrcs)
                                return true;    /* no sources allowed */
@@ -1723,18 +1735,30 @@ void ip_mc_down(struct in_device *in_dev)
        ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
 }
 
-void ip_mc_init_dev(struct in_device *in_dev)
-{
 #ifdef CONFIG_IP_MULTICAST
+static void ip_mc_reset(struct in_device *in_dev)
+{
        struct net *net = dev_net(in_dev->dev);
+
+       in_dev->mr_qi = IGMP_QUERY_INTERVAL;
+       in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL;
+       in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
+}
+#else
+static void ip_mc_reset(struct in_device *in_dev)
+{
+}
 #endif
+
+void ip_mc_init_dev(struct in_device *in_dev)
+{
        ASSERT_RTNL();
 
 #ifdef CONFIG_IP_MULTICAST
        timer_setup(&in_dev->mr_gq_timer, igmp_gq_timer_expire, 0);
        timer_setup(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, 0);
-       in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
 #endif
+       ip_mc_reset(in_dev);
 
        spin_lock_init(&in_dev->mc_tomb_lock);
 }
@@ -1744,15 +1768,10 @@ void ip_mc_init_dev(struct in_device *in_dev)
 void ip_mc_up(struct in_device *in_dev)
 {
        struct ip_mc_list *pmc;
-#ifdef CONFIG_IP_MULTICAST
-       struct net *net = dev_net(in_dev->dev);
-#endif
 
        ASSERT_RTNL();
 
-#ifdef CONFIG_IP_MULTICAST
-       in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
-#endif
+       ip_mc_reset(in_dev);
        ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
 
        for_each_pmc_rtnl(in_dev, pmc) {
index bcb11f3a27c0c34115af05034a5a20f57842eb0a..760a9e52e02b91b36af323c92f7027e150858f88 100644 (file)
@@ -178,21 +178,22 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
 }
 
 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
-                                               void *arg)
+                                               void *arg,
+                                               struct inet_frag_queue **prev)
 {
        struct inet_frags *f = nf->f;
        struct inet_frag_queue *q;
-       int err;
 
        q = inet_frag_alloc(nf, f, arg);
-       if (!q)
+       if (!q) {
+               *prev = ERR_PTR(-ENOMEM);
                return NULL;
-
+       }
        mod_timer(&q->timer, jiffies + nf->timeout);
 
-       err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
-                                    f->rhash_params);
-       if (err < 0) {
+       *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
+                                                &q->node, f->rhash_params);
+       if (*prev) {
                q->flags |= INET_FRAG_COMPLETE;
                inet_frag_kill(q);
                inet_frag_destroy(q);
@@ -204,22 +205,22 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
 /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
 {
-       struct inet_frag_queue *fq;
+       struct inet_frag_queue *fq = NULL, *prev;
 
        if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
                return NULL;
 
        rcu_read_lock();
 
-       fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
-       if (fq) {
+       prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
+       if (!prev)
+               fq = inet_frag_create(nf, key, &prev);
+       if (prev && !IS_ERR(prev)) {
+               fq = prev;
                if (!refcount_inc_not_zero(&fq->refcnt))
                        fq = NULL;
-               rcu_read_unlock();
-               return fq;
        }
        rcu_read_unlock();
-
-       return inet_frag_create(nf, key);
+       return fq;
 }
 EXPORT_SYMBOL(inet_frag_find);
index 9b0158fa431f2245c0fa7e21d62e3ac01296dc20..aa0b22697998ab60f0013bf65cea9cef2913f61f 100644 (file)
@@ -515,6 +515,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
        struct rb_node *rbn;
        int len;
        int ihlen;
+       int delta;
        int err;
        u8 ecn;
 
@@ -556,10 +557,16 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
        if (len > 65535)
                goto out_oversize;
 
+       delta = - head->truesize;
+
        /* Head of list must not be cloned. */
        if (skb_unclone(head, GFP_ATOMIC))
                goto out_nomem;
 
+       delta += head->truesize;
+       if (delta)
+               add_frag_mem_limit(qp->q.net, delta);
+
        /* If the first fragment is fragmented itself, we split
         * it to two chunks: the first with data and paged part
         * and the second, holding only fragments. */
@@ -722,10 +729,14 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
        if (ip_is_fragment(&iph)) {
                skb = skb_share_check(skb, GFP_ATOMIC);
                if (skb) {
-                       if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
-                               return skb;
-                       if (pskb_trim_rcsum(skb, netoff + len))
-                               return skb;
+                       if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
+                               kfree_skb(skb);
+                               return NULL;
+                       }
+                       if (pskb_trim_rcsum(skb, netoff + len)) {
+                               kfree_skb(skb);
+                               return NULL;
+                       }
                        memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
                        if (ip_defrag(net, skb, user))
                                return NULL;
index 35a786c0aaa064888540774863cd08e1f21f12a8..e609b08c9df4f562f01c1a4aba97fc54f6f97694 100644 (file)
@@ -547,7 +547,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk,
        list_for_each_entry_safe(skb, next, head, list) {
                struct dst_entry *dst;
 
-               list_del(&skb->list);
+               skb_list_del_init(skb);
                /* if ingress device is enslaved to an L3 master device pass the
                 * skb to its handler for processing
                 */
@@ -594,7 +594,7 @@ void ip_list_rcv(struct list_head *head, struct packet_type *pt,
                struct net_device *dev = skb->dev;
                struct net *net = dev_net(dev);
 
-               list_del(&skb->list);
+               skb_list_del_init(skb);
                skb = ip_rcv_core(skb, net);
                if (skb == NULL)
                        continue;
index c09219e7f23048636836d52daf748bf7e81369e1..5dbec21856f4ce458af136bfe5ef1c0c84a4f8a5 100644 (file)
@@ -939,7 +939,7 @@ static int __ip_append_data(struct sock *sk,
                        unsigned int fraglen;
                        unsigned int fraggap;
                        unsigned int alloclen;
-                       unsigned int pagedlen = 0;
+                       unsigned int pagedlen;
                        struct sk_buff *skb_prev;
 alloc_new_skb:
                        skb_prev = skb;
@@ -956,6 +956,7 @@ alloc_new_skb:
                        if (datalen > mtu - fragheaderlen)
                                datalen = maxfraglen - fragheaderlen;
                        fraglen = datalen + fragheaderlen;
+                       pagedlen = 0;
 
                        if ((flags & MSG_MORE) &&
                            !(rt->dst.dev->features&NETIF_F_SG))
index 26c36cccabdc2c8cc95cfd609672d412c493fc42..fffcc130900e518874027562272b1052cf0bdd16 100644 (file)
@@ -1246,7 +1246,7 @@ int ip_setsockopt(struct sock *sk, int level,
                return -ENOPROTOOPT;
 
        err = do_ip_setsockopt(sk, level, optname, optval, optlen);
-#ifdef CONFIG_BPFILTER
+#if IS_ENABLED(CONFIG_BPFILTER_UMH)
        if (optname >= BPFILTER_IPT_SO_SET_REPLACE &&
            optname < BPFILTER_IPT_SET_MAX)
                err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
@@ -1559,7 +1559,7 @@ int ip_getsockopt(struct sock *sk, int level,
        int err;
 
        err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
-#ifdef CONFIG_BPFILTER
+#if IS_ENABLED(CONFIG_BPFILTER_UMH)
        if (optname >= BPFILTER_IPT_SO_GET_INFO &&
            optname < BPFILTER_IPT_GET_MAX)
                err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
@@ -1596,7 +1596,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
        err = do_ip_getsockopt(sk, level, optname, optval, optlen,
                MSG_CMSG_COMPAT);
 
-#ifdef CONFIG_BPFILTER
+#if IS_ENABLED(CONFIG_BPFILTER_UMH)
        if (optname >= BPFILTER_IPT_SO_GET_INFO &&
            optname < BPFILTER_IPT_GET_MAX)
                err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
index dde671e978298b5b0239f975fe134b232db9049b..c248e0dccbe17afa397910b0d68260daf2a9eb3f 100644 (file)
@@ -80,7 +80,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
 
        iph->version    =       4;
        iph->ihl        =       sizeof(struct iphdr) >> 2;
-       iph->frag_off   =       df;
+       iph->frag_off   =       ip_mtu_locked(&rt->dst) ? 0 : df;
        iph->protocol   =       proto;
        iph->tos        =       tos;
        iph->daddr      =       dst;
index ce1512b02cb203a549529967eb602b467644a2d5..fd3f9e8a74daf4954d675eaf2cc381196facc4f4 100644 (file)
@@ -81,9 +81,12 @@ static int __init masquerade_tg_init(void)
        int ret;
 
        ret = xt_register_target(&masquerade_tg_reg);
+       if (ret)
+               return ret;
 
-       if (ret == 0)
-               nf_nat_masquerade_ipv4_register_notifier();
+       ret = nf_nat_masquerade_ipv4_register_notifier();
+       if (ret)
+               xt_unregister_target(&masquerade_tg_reg);
 
        return ret;
 }
index a9d5e013e5556a5bace7afcb61cabeb0849261d1..41327bb990932bd8f95fe5b357fac4c0134866e9 100644 (file)
@@ -147,28 +147,50 @@ static struct notifier_block masq_inet_notifier = {
        .notifier_call  = masq_inet_event,
 };
 
-static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0);
+static int masq_refcnt;
+static DEFINE_MUTEX(masq_mutex);
 
-void nf_nat_masquerade_ipv4_register_notifier(void)
+int nf_nat_masquerade_ipv4_register_notifier(void)
 {
+       int ret = 0;
+
+       mutex_lock(&masq_mutex);
        /* check if the notifier was already set */
-       if (atomic_inc_return(&masquerade_notifier_refcount) > 1)
-               return;
+       if (++masq_refcnt > 1)
+               goto out_unlock;
 
        /* Register for device down reports */
-       register_netdevice_notifier(&masq_dev_notifier);
+       ret = register_netdevice_notifier(&masq_dev_notifier);
+       if (ret)
+               goto err_dec;
        /* Register IP address change reports */
-       register_inetaddr_notifier(&masq_inet_notifier);
+       ret = register_inetaddr_notifier(&masq_inet_notifier);
+       if (ret)
+               goto err_unregister;
+
+       mutex_unlock(&masq_mutex);
+       return ret;
+
+err_unregister:
+       unregister_netdevice_notifier(&masq_dev_notifier);
+err_dec:
+       masq_refcnt--;
+out_unlock:
+       mutex_unlock(&masq_mutex);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_register_notifier);
 
 void nf_nat_masquerade_ipv4_unregister_notifier(void)
 {
+       mutex_lock(&masq_mutex);
        /* check if the notifier still has clients */
-       if (atomic_dec_return(&masquerade_notifier_refcount) > 0)
-               return;
+       if (--masq_refcnt > 0)
+               goto out_unlock;
 
        unregister_netdevice_notifier(&masq_dev_notifier);
        unregister_inetaddr_notifier(&masq_inet_notifier);
+out_unlock:
+       mutex_unlock(&masq_mutex);
 }
 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4_unregister_notifier);
index f1193e1e928aa14aaa88371d78558a46ed9c9d59..6847de1d1db8a7e4c86dd27f5c4310df4627c296 100644 (file)
@@ -69,7 +69,9 @@ static int __init nft_masq_ipv4_module_init(void)
        if (ret < 0)
                return ret;
 
-       nf_nat_masquerade_ipv4_register_notifier();
+       ret = nf_nat_masquerade_ipv4_register_notifier();
+       if (ret)
+               nft_unregister_expr(&nft_masq_ipv4_type);
 
        return ret;
 }
index b7918d4caa300a15bec2858065b8f73d71cf6eb0..3b45fe530f91e2e1aa697888e11a78cf7e9d211e 100644 (file)
@@ -145,6 +145,7 @@ msg_bytes_ready:
                        ret = err;
                        goto out;
                }
+               copied = -EAGAIN;
        }
        ret = copied;
 out:
index 2868ef28ce52179b3c5874e749b680ffbdc0521a..a9d9555a973fed4e3562a57d1a2cdadfef40dae4 100644 (file)
@@ -579,10 +579,12 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
                u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
                u32 delta_us;
 
-               if (!delta)
-                       delta = 1;
-               delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
-               tcp_rcv_rtt_update(tp, delta_us, 0);
+               if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
+                       if (!delta)
+                               delta = 1;
+                       delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+                       tcp_rcv_rtt_update(tp, delta_us, 0);
+               }
        }
 }
 
@@ -2910,9 +2912,11 @@ static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
        if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
            flag & FLAG_ACKED) {
                u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
-               u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
 
-               seq_rtt_us = ca_rtt_us = delta_us;
+               if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
+                       seq_rtt_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+                       ca_rtt_us = seq_rtt_us;
+               }
        }
        rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */
        if (seq_rtt_us < 0)
@@ -4268,7 +4272,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
         * If the sack array is full, forget about the last one.
         */
        if (this_sack >= TCP_NUM_SACKS) {
-               if (tp->compressed_ack)
+               if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
                        tcp_send_ack(sk);
                this_sack--;
                tp->rx_opt.num_sacks--;
@@ -4363,6 +4367,7 @@ static bool tcp_try_coalesce(struct sock *sk,
        if (TCP_SKB_CB(from)->has_rxtstamp) {
                TCP_SKB_CB(to)->has_rxtstamp = true;
                to->tstamp = from->tstamp;
+               skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp;
        }
 
        return true;
@@ -5188,7 +5193,17 @@ send_now:
        if (!tcp_is_sack(tp) ||
            tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)
                goto send_now;
-       tp->compressed_ack++;
+
+       if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
+               tp->compressed_ack_rcv_nxt = tp->rcv_nxt;
+               if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
+                       NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
+                                     tp->compressed_ack - TCP_FASTRETRANS_THRESH);
+               tp->compressed_ack = 0;
+       }
+
+       if (++tp->compressed_ack <= TCP_FASTRETRANS_THRESH)
+               goto send_now;
 
        if (hrtimer_is_queued(&tp->compressed_ack_timer))
                return;
index 9c34b97d365d719ff76250bc9fe7fa20495a3ed2..d1676d8a6ed70fbe050709a16a650df35a1f4d87 100644 (file)
@@ -180,10 +180,10 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (unlikely(tp->compressed_ack)) {
+       if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) {
                NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
-                             tp->compressed_ack);
-               tp->compressed_ack = 0;
+                             tp->compressed_ack - TCP_FASTRETRANS_THRESH);
+               tp->compressed_ack = TCP_FASTRETRANS_THRESH;
                if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
                        __sock_put(sk);
        }
@@ -1904,7 +1904,9 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
  * This algorithm is from John Heffner.
  */
 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
-                                bool *is_cwnd_limited, u32 max_segs)
+                                bool *is_cwnd_limited,
+                                bool *is_rwnd_limited,
+                                u32 max_segs)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 age, send_win, cong_win, limit, in_flight;
@@ -1912,9 +1914,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
        struct sk_buff *head;
        int win_divisor;
 
-       if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
-               goto send_now;
-
        if (icsk->icsk_ca_state >= TCP_CA_Recovery)
                goto send_now;
 
@@ -1973,10 +1972,27 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
        if (age < (tp->srtt_us >> 4))
                goto send_now;
 
-       /* Ok, it looks like it is advisable to defer. */
+       /* Ok, it looks like it is advisable to defer.
+        * Three cases are tracked :
+        * 1) We are cwnd-limited
+        * 2) We are rwnd-limited
+        * 3) We are application limited.
+        */
+       if (cong_win < send_win) {
+               if (cong_win <= skb->len) {
+                       *is_cwnd_limited = true;
+                       return true;
+               }
+       } else {
+               if (send_win <= skb->len) {
+                       *is_rwnd_limited = true;
+                       return true;
+               }
+       }
 
-       if (cong_win < send_win && cong_win <= skb->len)
-               *is_cwnd_limited = true;
+       /* If this packet won't get more data, do not wait. */
+       if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+               goto send_now;
 
        return true;
 
@@ -2356,7 +2372,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                } else {
                        if (!push_one &&
                            tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
-                                                max_segs))
+                                                &is_rwnd_limited, max_segs))
                                break;
                }
 
@@ -2494,15 +2510,18 @@ void tcp_send_loss_probe(struct sock *sk)
                goto rearm_timer;
        }
        skb = skb_rb_last(&sk->tcp_rtx_queue);
+       if (unlikely(!skb)) {
+               WARN_ONCE(tp->packets_out,
+                         "invalid inflight: %u state %u cwnd %u mss %d\n",
+                         tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
+               inet_csk(sk)->icsk_pending = 0;
+               return;
+       }
 
        /* At most one outstanding TLP retransmission. */
        if (tp->tlp_high_seq)
                goto rearm_timer;
 
-       /* Retransmit last segment. */
-       if (WARN_ON(!skb))
-               goto rearm_timer;
-
        if (skb_still_in_host_queue(sk, skb))
                goto rearm_timer;
 
@@ -2920,7 +2939,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
                TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
                trace_tcp_retransmit_skb(sk, skb);
        } else if (err != -EBUSY) {
-               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
+               NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
        }
        return err;
 }
index 676020663ce80a79341ad1a05352742cc8dd5850..f87dbc78b6bcb85e12b72bdf57679a36440bb5bf 100644 (file)
@@ -40,15 +40,17 @@ static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        u32 elapsed, start_ts;
+       s32 remaining;
 
        start_ts = tcp_retransmit_stamp(sk);
        if (!icsk->icsk_user_timeout || !start_ts)
                return icsk->icsk_rto;
        elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
-       if (elapsed >= icsk->icsk_user_timeout)
+       remaining = icsk->icsk_user_timeout - elapsed;
+       if (remaining <= 0)
                return 1; /* user timeout has passed; fire ASAP */
-       else
-               return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(icsk->icsk_user_timeout - elapsed));
+
+       return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
 }
 
 /**
@@ -209,7 +211,7 @@ static bool retransmits_timed_out(struct sock *sk,
                                (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
                timeout = jiffies_to_msecs(timeout);
        }
-       return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= timeout;
+       return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
 }
 
 /* A write timeout has occurred. Process the after effects. */
@@ -376,7 +378,7 @@ static void tcp_probe_timer(struct sock *sk)
                        return;
        }
 
-       if (icsk->icsk_probes_out > max_probes) {
+       if (icsk->icsk_probes_out >= max_probes) {
 abort:         tcp_write_err(sk);
        } else {
                /* Only send another probe if we didn't close things up. */
@@ -482,11 +484,12 @@ void tcp_retransmit_timer(struct sock *sk)
                goto out_reset_timer;
        }
 
+       __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
        if (tcp_write_timeout(sk))
                goto out;
 
        if (icsk->icsk_retransmits == 0) {
-               int mib_idx;
+               int mib_idx = 0;
 
                if (icsk->icsk_ca_state == TCP_CA_Recovery) {
                        if (tcp_is_sack(tp))
@@ -501,10 +504,9 @@ void tcp_retransmit_timer(struct sock *sk)
                                mib_idx = LINUX_MIB_TCPSACKFAILURES;
                        else
                                mib_idx = LINUX_MIB_TCPRENOFAILURES;
-               } else {
-                       mib_idx = LINUX_MIB_TCPTIMEOUTS;
                }
-               __NET_INC_STATS(sock_net(sk), mib_idx);
+               if (mib_idx)
+                       __NET_INC_STATS(sock_net(sk), mib_idx);
        }
 
        tcp_enter_loss(sk);
@@ -740,7 +742,7 @@ static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
 
        bh_lock_sock(sk);
        if (!sock_owned_by_user(sk)) {
-               if (tp->compressed_ack)
+               if (tp->compressed_ack > TCP_FASTRETRANS_THRESH)
                        tcp_send_ack(sk);
        } else {
                if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
index 63a808d5af1575255ae9ef16aeb9a2a9549571d5..045597b9a7c05bd5fdb796358af64f4da6bb22dc 100644 (file)
@@ -179,7 +179,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp);
 static void addrconf_dad_work(struct work_struct *w);
 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
                                   bool send_na);
-static void addrconf_dad_run(struct inet6_dev *idev);
+static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
 static void addrconf_rs_timer(struct timer_list *t);
 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
@@ -3439,6 +3439,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                           void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct netdev_notifier_change_info *change_info;
        struct netdev_notifier_changeupper_info *info;
        struct inet6_dev *idev = __in6_dev_get(dev);
        struct net *net = dev_net(dev);
@@ -3513,7 +3514,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                                break;
                        }
 
-                       if (idev) {
+                       if (!IS_ERR_OR_NULL(idev)) {
                                if (idev->if_flags & IF_READY) {
                                        /* device is already configured -
                                         * but resend MLD reports, we might
@@ -3521,6 +3522,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                                         * multicast snooping switches
                                         */
                                        ipv6_mc_up(idev);
+                                       change_info = ptr;
+                                       if (change_info->flags_changed & IFF_NOARP)
+                                               addrconf_dad_run(idev, true);
                                        rt6_sync_up(dev, RTNH_F_LINKDOWN);
                                        break;
                                }
@@ -3555,7 +3559,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
 
                if (!IS_ERR_OR_NULL(idev)) {
                        if (run_pending)
-                               addrconf_dad_run(idev);
+                               addrconf_dad_run(idev, false);
 
                        /* Device has an address by now */
                        rt6_sync_up(dev, RTNH_F_DEAD);
@@ -4173,16 +4177,19 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
                addrconf_verify_rtnl();
 }
 
-static void addrconf_dad_run(struct inet6_dev *idev)
+static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
 {
        struct inet6_ifaddr *ifp;
 
        read_lock_bh(&idev->lock);
        list_for_each_entry(ifp, &idev->addr_list, if_list) {
                spin_lock(&ifp->lock);
-               if (ifp->flags & IFA_F_TENTATIVE &&
-                   ifp->state == INET6_IFADDR_STATE_DAD)
+               if ((ifp->flags & IFA_F_TENTATIVE &&
+                    ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
+                       if (restart)
+                               ifp->state = INET6_IFADDR_STATE_PREDAD;
                        addrconf_dad_kick(ifp);
+               }
                spin_unlock(&ifp->lock);
        }
        read_unlock_bh(&idev->lock);
index 3f4d61017a6947c9dfb5cd1a38e5a25f1665928f..f0cd291034f0fa8ece55acd0fccf79e02629c98a 100644 (file)
@@ -1001,6 +1001,9 @@ static int __init inet6_init(void)
        err = ip6_flowlabel_init();
        if (err)
                goto ip6_flowlabel_fail;
+       err = ipv6_anycast_init();
+       if (err)
+               goto ipv6_anycast_fail;
        err = addrconf_init();
        if (err)
                goto addrconf_fail;
@@ -1091,6 +1094,8 @@ ipv6_frag_fail:
 ipv6_exthdrs_fail:
        addrconf_cleanup();
 addrconf_fail:
+       ipv6_anycast_cleanup();
+ipv6_anycast_fail:
        ip6_flowlabel_cleanup();
 ip6_flowlabel_fail:
        ndisc_late_cleanup();
index 4e0ff7031edd55ce6dbb3f2c62e22b9040cc7fec..94999058e11029b637b6ab8201f8706599e49284 100644 (file)
 
 #include <net/checksum.h>
 
+#define IN6_ADDR_HSIZE_SHIFT   8
+#define IN6_ADDR_HSIZE         BIT(IN6_ADDR_HSIZE_SHIFT)
+/*     anycast address hash table
+ */
+static struct hlist_head inet6_acaddr_lst[IN6_ADDR_HSIZE];
+static DEFINE_SPINLOCK(acaddr_hash_lock);
+
 static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
 
+static u32 inet6_acaddr_hash(struct net *net, const struct in6_addr *addr)
+{
+       u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
+
+       return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
+}
+
 /*
  *     socket join an anycast group
  */
@@ -204,16 +218,39 @@ void ipv6_sock_ac_close(struct sock *sk)
        rtnl_unlock();
 }
 
+static void ipv6_add_acaddr_hash(struct net *net, struct ifacaddr6 *aca)
+{
+       unsigned int hash = inet6_acaddr_hash(net, &aca->aca_addr);
+
+       spin_lock(&acaddr_hash_lock);
+       hlist_add_head_rcu(&aca->aca_addr_lst, &inet6_acaddr_lst[hash]);
+       spin_unlock(&acaddr_hash_lock);
+}
+
+static void ipv6_del_acaddr_hash(struct ifacaddr6 *aca)
+{
+       spin_lock(&acaddr_hash_lock);
+       hlist_del_init_rcu(&aca->aca_addr_lst);
+       spin_unlock(&acaddr_hash_lock);
+}
+
 static void aca_get(struct ifacaddr6 *aca)
 {
        refcount_inc(&aca->aca_refcnt);
 }
 
+static void aca_free_rcu(struct rcu_head *h)
+{
+       struct ifacaddr6 *aca = container_of(h, struct ifacaddr6, rcu);
+
+       fib6_info_release(aca->aca_rt);
+       kfree(aca);
+}
+
 static void aca_put(struct ifacaddr6 *ac)
 {
        if (refcount_dec_and_test(&ac->aca_refcnt)) {
-               fib6_info_release(ac->aca_rt);
-               kfree(ac);
+               call_rcu(&ac->rcu, aca_free_rcu);
        }
 }
 
@@ -229,6 +266,7 @@ static struct ifacaddr6 *aca_alloc(struct fib6_info *f6i,
        aca->aca_addr = *addr;
        fib6_info_hold(f6i);
        aca->aca_rt = f6i;
+       INIT_HLIST_NODE(&aca->aca_addr_lst);
        aca->aca_users = 1;
        /* aca_tstamp should be updated upon changes */
        aca->aca_cstamp = aca->aca_tstamp = jiffies;
@@ -285,6 +323,8 @@ int __ipv6_dev_ac_inc(struct inet6_dev *idev, const struct in6_addr *addr)
        aca_get(aca);
        write_unlock_bh(&idev->lock);
 
+       ipv6_add_acaddr_hash(net, aca);
+
        ip6_ins_rt(net, f6i);
 
        addrconf_join_solict(idev->dev, &aca->aca_addr);
@@ -325,6 +365,7 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
        else
                idev->ac_list = aca->aca_next;
        write_unlock_bh(&idev->lock);
+       ipv6_del_acaddr_hash(aca);
        addrconf_leave_solict(idev, &aca->aca_addr);
 
        ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
@@ -352,6 +393,8 @@ void ipv6_ac_destroy_dev(struct inet6_dev *idev)
                idev->ac_list = aca->aca_next;
                write_unlock_bh(&idev->lock);
 
+               ipv6_del_acaddr_hash(aca);
+
                addrconf_leave_solict(idev, &aca->aca_addr);
 
                ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
@@ -390,17 +433,25 @@ static bool ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *ad
 bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
                         const struct in6_addr *addr)
 {
+       unsigned int hash = inet6_acaddr_hash(net, addr);
+       struct net_device *nh_dev;
+       struct ifacaddr6 *aca;
        bool found = false;
 
        rcu_read_lock();
        if (dev)
                found = ipv6_chk_acast_dev(dev, addr);
        else
-               for_each_netdev_rcu(net, dev)
-                       if (ipv6_chk_acast_dev(dev, addr)) {
+               hlist_for_each_entry_rcu(aca, &inet6_acaddr_lst[hash],
+                                        aca_addr_lst) {
+                       nh_dev = fib6_info_nh_dev(aca->aca_rt);
+                       if (!nh_dev || !net_eq(dev_net(nh_dev), net))
+                               continue;
+                       if (ipv6_addr_equal(&aca->aca_addr, addr)) {
                                found = true;
                                break;
                        }
+               }
        rcu_read_unlock();
        return found;
 }
@@ -540,3 +591,24 @@ void ac6_proc_exit(struct net *net)
        remove_proc_entry("anycast6", net->proc_net);
 }
 #endif
+
+/*     Init / cleanup code
+ */
+int __init ipv6_anycast_init(void)
+{
+       int i;
+
+       for (i = 0; i < IN6_ADDR_HSIZE; i++)
+               INIT_HLIST_HEAD(&inet6_acaddr_lst[i]);
+       return 0;
+}
+
+void ipv6_anycast_cleanup(void)
+{
+       int i;
+
+       spin_lock(&acaddr_hash_lock);
+       for (i = 0; i < IN6_ADDR_HSIZE; i++)
+               WARN_ON(!hlist_empty(&inet6_acaddr_lst[i]));
+       spin_unlock(&acaddr_hash_lock);
+}
index 1b8bc008b53b642adef3ba9335563d430a99c1a9..ae3786132c236b2bcde4f8f3008fceb2d6bc1cdd 100644 (file)
@@ -591,7 +591,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
 
        /* fib entries are never clones */
        if (arg.filter.flags & RTM_F_CLONED)
-               return skb->len;
+               goto out;
 
        w = (void *)cb->args[2];
        if (!w) {
@@ -621,7 +621,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
                tb = fib6_get_table(net, arg.filter.table_id);
                if (!tb) {
                        if (arg.filter.dump_all_families)
-                               return skb->len;
+                               goto out;
 
                        NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist");
                        return -ENOENT;
index 96577e742afd496eaed410395620be5a8e24430f..c1d85830c906f68bdd2310e411b44dca98e3db72 100644 (file)
@@ -95,7 +95,7 @@ static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
        list_for_each_entry_safe(skb, next, head, list) {
                struct dst_entry *dst;
 
-               list_del(&skb->list);
+               skb_list_del_init(skb);
                /* if ingress device is enslaved to an L3 master device pass the
                 * skb to its handler for processing
                 */
@@ -296,7 +296,7 @@ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
                struct net_device *dev = skb->dev;
                struct net *net = dev_net(dev);
 
-               list_del(&skb->list);
+               skb_list_del_init(skb);
                skb = ip6_rcv_core(skb, dev, net);
                if (skb == NULL)
                        continue;
index 89e0d5118afe69a94c93e1f584047240fc0d1a51..fcd3c66ded1620d0d320fd7b1753f2734d1dfa46 100644 (file)
@@ -195,37 +195,37 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
        const struct ipv6_pinfo *np = inet6_sk(sk);
        struct in6_addr *first_hop = &fl6->daddr;
        struct dst_entry *dst = skb_dst(skb);
+       unsigned int head_room;
        struct ipv6hdr *hdr;
        u8  proto = fl6->flowi6_proto;
        int seg_len = skb->len;
        int hlimit = -1;
        u32 mtu;
 
-       if (opt) {
-               unsigned int head_room;
+       head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
+       if (opt)
+               head_room += opt->opt_nflen + opt->opt_flen;
 
-               /* First: exthdrs may take lots of space (~8K for now)
-                  MAX_HEADER is not enough.
-                */
-               head_room = opt->opt_nflen + opt->opt_flen;
-               seg_len += head_room;
-               head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
-
-               if (skb_headroom(skb) < head_room) {
-                       struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
-                       if (!skb2) {
-                               IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
-                                             IPSTATS_MIB_OUTDISCARDS);
-                               kfree_skb(skb);
-                               return -ENOBUFS;
-                       }
-                       if (skb->sk)
-                               skb_set_owner_w(skb2, skb->sk);
-                       consume_skb(skb);
-                       skb = skb2;
+       if (unlikely(skb_headroom(skb) < head_room)) {
+               struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
+               if (!skb2) {
+                       IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+                                     IPSTATS_MIB_OUTDISCARDS);
+                       kfree_skb(skb);
+                       return -ENOBUFS;
                }
+               if (skb->sk)
+                       skb_set_owner_w(skb2, skb->sk);
+               consume_skb(skb);
+               skb = skb2;
+       }
+
+       if (opt) {
+               seg_len += opt->opt_nflen + opt->opt_flen;
+
                if (opt->opt_flen)
                        ipv6_push_frag_opts(skb, opt, &proto);
+
                if (opt->opt_nflen)
                        ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
                                             &fl6->saddr);
@@ -1354,7 +1354,7 @@ emsgsize:
                        unsigned int fraglen;
                        unsigned int fraggap;
                        unsigned int alloclen;
-                       unsigned int pagedlen = 0;
+                       unsigned int pagedlen;
 alloc_new_skb:
                        /* There's no room in the current skb */
                        if (skb)
@@ -1378,6 +1378,7 @@ alloc_new_skb:
                        if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
                                datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
                        fraglen = datalen + fragheaderlen;
+                       pagedlen = 0;
 
                        if ((flags & MSG_MORE) &&
                            !(rt->dst.dev->features&NETIF_F_SG))
index 5ae8e1c51079cb2cb36324af300762c5463134ba..8b075f0bc35169b4098bda738950d631b62ec415 100644 (file)
@@ -24,7 +24,8 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
        unsigned int hh_len;
        struct dst_entry *dst;
        struct flowi6 fl6 = {
-               .flowi6_oif = sk ? sk->sk_bound_dev_if : 0,
+               .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
+                       rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
                .flowi6_mark = skb->mark,
                .flowi6_uid = sock_net_uid(net, sk),
                .daddr = iph->daddr,
index 491f808e356a68046f553785836d220e287c318a..29c7f1915a96cba89c357eeaa9de0b8c29aa9f28 100644 (file)
@@ -58,8 +58,12 @@ static int __init masquerade_tg6_init(void)
        int err;
 
        err = xt_register_target(&masquerade_tg6_reg);
-       if (err == 0)
-               nf_nat_masquerade_ipv6_register_notifier();
+       if (err)
+               return err;
+
+       err = nf_nat_masquerade_ipv6_register_notifier();
+       if (err)
+               xt_unregister_target(&masquerade_tg6_reg);
 
        return err;
 }
index b8ac369f98ad877f6cf9114b1dbcfcb6c4c95ec5..181da2c40f9a98f99b0491dfcc589aec73ad977e 100644 (file)
@@ -341,7 +341,7 @@ static bool
 nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev,  struct net_device *dev)
 {
        struct sk_buff *fp, *head = fq->q.fragments;
-       int    payload_len;
+       int    payload_len, delta;
        u8 ecn;
 
        inet_frag_kill(&fq->q);
@@ -363,10 +363,16 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev,  struct net_devic
                return false;
        }
 
+       delta = - head->truesize;
+
        /* Head of list must not be cloned. */
        if (skb_unclone(head, GFP_ATOMIC))
                return false;
 
+       delta += head->truesize;
+       if (delta)
+               add_frag_mem_limit(fq->q.net, delta);
+
        /* If the first fragment is fragmented itself, we split
         * it to two chunks: the first with data and paged part
         * and the second, holding only fragments. */
@@ -587,11 +593,16 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
         */
        ret = -EINPROGRESS;
        if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
-           fq->q.meat == fq->q.len &&
-           nf_ct_frag6_reasm(fq, skb, dev))
-               ret = 0;
-       else
+           fq->q.meat == fq->q.len) {
+               unsigned long orefdst = skb->_skb_refdst;
+
+               skb->_skb_refdst = 0UL;
+               if (nf_ct_frag6_reasm(fq, skb, dev))
+                       ret = 0;
+               skb->_skb_refdst = orefdst;
+       } else {
                skb_dst_drop(skb);
+       }
 
 out_unlock:
        spin_unlock_bh(&fq->q.lock);
index 3e4bf2286abea96617f8df1ecac74d91667ef59f..0ad0da5a260026ccddd96a4becbec9fa3a975e67 100644 (file)
@@ -132,8 +132,8 @@ static void iterate_cleanup_work(struct work_struct *work)
  * of ipv6 addresses being deleted), we also need to add an upper
  * limit to the number of queued work items.
  */
-static int masq_inet_event(struct notifier_block *this,
-                          unsigned long event, void *ptr)
+static int masq_inet6_event(struct notifier_block *this,
+                           unsigned long event, void *ptr)
 {
        struct inet6_ifaddr *ifa = ptr;
        const struct net_device *dev;
@@ -171,30 +171,53 @@ static int masq_inet_event(struct notifier_block *this,
        return NOTIFY_DONE;
 }
 
-static struct notifier_block masq_inet_notifier = {
-       .notifier_call  = masq_inet_event,
+static struct notifier_block masq_inet6_notifier = {
+       .notifier_call  = masq_inet6_event,
 };
 
-static atomic_t masquerade_notifier_refcount = ATOMIC_INIT(0);
+static int masq_refcnt;
+static DEFINE_MUTEX(masq_mutex);
 
-void nf_nat_masquerade_ipv6_register_notifier(void)
+int nf_nat_masquerade_ipv6_register_notifier(void)
 {
+       int ret = 0;
+
+       mutex_lock(&masq_mutex);
        /* check if the notifier is already set */
-       if (atomic_inc_return(&masquerade_notifier_refcount) > 1)
-               return;
+       if (++masq_refcnt > 1)
+               goto out_unlock;
+
+       ret = register_netdevice_notifier(&masq_dev_notifier);
+       if (ret)
+               goto err_dec;
+
+       ret = register_inet6addr_notifier(&masq_inet6_notifier);
+       if (ret)
+               goto err_unregister;
 
-       register_netdevice_notifier(&masq_dev_notifier);
-       register_inet6addr_notifier(&masq_inet_notifier);
+       mutex_unlock(&masq_mutex);
+       return ret;
+
+err_unregister:
+       unregister_netdevice_notifier(&masq_dev_notifier);
+err_dec:
+       masq_refcnt--;
+out_unlock:
+       mutex_unlock(&masq_mutex);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_register_notifier);
 
 void nf_nat_masquerade_ipv6_unregister_notifier(void)
 {
+       mutex_lock(&masq_mutex);
        /* check if the notifier still has clients */
-       if (atomic_dec_return(&masquerade_notifier_refcount) > 0)
-               return;
+       if (--masq_refcnt > 0)
+               goto out_unlock;
 
-       unregister_inet6addr_notifier(&masq_inet_notifier);
+       unregister_inet6addr_notifier(&masq_inet6_notifier);
        unregister_netdevice_notifier(&masq_dev_notifier);
+out_unlock:
+       mutex_unlock(&masq_mutex);
 }
 EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6_unregister_notifier);
index dd0122f3cffea92f587f0c8a598281e77aa5c98b..e06c82e9dfcdf86c7f41ec8dc1693013b3cc4fc1 100644 (file)
@@ -70,7 +70,9 @@ static int __init nft_masq_ipv6_module_init(void)
        if (ret < 0)
                return ret;
 
-       nf_nat_masquerade_ipv6_register_notifier();
+       ret = nf_nat_masquerade_ipv6_register_notifier();
+       if (ret)
+               nft_unregister_expr(&nft_masq_ipv6_type);
 
        return ret;
 }
index 5c3c9271309620b6ca8bf0a7d7e10459bea8dc40..aa26c45486d94ab2f2f9f443b837642d8b582f83 100644 (file)
@@ -281,7 +281,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
 {
        struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
        struct sk_buff *fp, *head = fq->q.fragments;
-       int    payload_len;
+       int    payload_len, delta;
        unsigned int nhoff;
        int sum_truesize;
        u8 ecn;
@@ -322,10 +322,16 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
        if (payload_len > IPV6_MAXPLEN)
                goto out_oversize;
 
+       delta = - head->truesize;
+
        /* Head of list must not be cloned. */
        if (skb_unclone(head, GFP_ATOMIC))
                goto out_oom;
 
+       delta += head->truesize;
+       if (delta)
+               add_frag_mem_limit(fq->q.net, delta);
+
        /* If the first fragment is fragmented itself, we split
         * it to two chunks: the first with data and paged part
         * and the second, holding only fragments. */
index 2a7423c394560c0bc70d6f0398781a0b35fa9fa0..059f0531f7c1c86133afcab0f8e7388c60eeaf58 100644 (file)
@@ -2232,8 +2232,7 @@ static void ip6_link_failure(struct sk_buff *skb)
        if (rt) {
                rcu_read_lock();
                if (rt->rt6i_flags & RTF_CACHE) {
-                       if (dst_hold_safe(&rt->dst))
-                               rt6_remove_exception_rt(rt);
+                       rt6_remove_exception_rt(rt);
                } else {
                        struct fib6_info *from;
                        struct fib6_node *fn;
@@ -2360,10 +2359,13 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
 
 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
 {
+       int oif = sk->sk_bound_dev_if;
        struct dst_entry *dst;
 
-       ip6_update_pmtu(skb, sock_net(sk), mtu,
-                       sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
+       if (!oif && skb->dev)
+               oif = l3mdev_master_ifindex(skb->dev);
+
+       ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
 
        dst = __sk_dst_get(sk);
        if (!dst || !dst->obsolete ||
@@ -3214,8 +3216,8 @@ static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
        if (cfg->fc_flags & RTF_GATEWAY &&
            !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
                goto out;
-       if (dst_hold_safe(&rt->dst))
-               rc = rt6_remove_exception_rt(rt);
+
+       rc = rt6_remove_exception_rt(rt);
 out:
        return rc;
 }
index a8854dd3e9c5ef64a7a480bb6ff891fac0e6d1ea..8181ee7e1e27051040bd3bbce9d6a228632a5297 100644 (file)
@@ -347,6 +347,7 @@ static int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                struct ipv6hdr *hdr = ipv6_hdr(skb);
                struct flowi6 fl6;
 
+               memset(&fl6, 0, sizeof(fl6));
                fl6.daddr = hdr->daddr;
                fl6.saddr = hdr->saddr;
                fl6.flowlabel = ip6_flowinfo(hdr);
index 82cdf9020b53921c1ed86cd6122ebe6007ea263c..26f1d435696a628aca844edb2a88f8b793839d91 100644 (file)
@@ -1490,12 +1490,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
                        goto err_sock;
        }
 
-       sk = sock->sk;
-
-       sock_hold(sk);
-       tunnel->sock = sk;
        tunnel->l2tp_net = net;
-
        pn = l2tp_pernet(net);
 
        spin_lock_bh(&pn->l2tp_tunnel_list_lock);
@@ -1510,6 +1505,10 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
        list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
        spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
 
+       sk = sock->sk;
+       sock_hold(sk);
+       tunnel->sock = sk;
+
        if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
                struct udp_tunnel_sock_cfg udp_cfg = {
                        .sk_user_data = tunnel,
index 51622333d4602a60fa39a877d7fbb721ada64bf1..818aa006034950785768d80a3f5ba8ad5f2f7f0f 100644 (file)
@@ -2891,7 +2891,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
 
        len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len +
              beacon->proberesp_ies_len + beacon->assocresp_ies_len +
-             beacon->probe_resp_len;
+             beacon->probe_resp_len + beacon->lci_len + beacon->civicloc_len;
 
        new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL);
        if (!new_beacon)
@@ -2934,8 +2934,9 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
                memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
                pos += beacon->probe_resp_len;
        }
-       if (beacon->ftm_responder)
-               new_beacon->ftm_responder = beacon->ftm_responder;
+
+       /* might copy -1, meaning no changes requested */
+       new_beacon->ftm_responder = beacon->ftm_responder;
        if (beacon->lci) {
                new_beacon->lci_len = beacon->lci_len;
                new_beacon->lci = pos;
index 5836ddeac9e34ecd2aa6e51363679d2cd11f266d..5f3c81e705c7df9ea7ff7c69eb3b6aff00df17ee 100644 (file)
@@ -1015,6 +1015,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        if (local->open_count == 0)
                ieee80211_clear_tx_pending(local);
 
+       sdata->vif.bss_conf.beacon_int = 0;
+
        /*
         * If the interface goes down while suspended, presumably because
         * the device was unplugged and that happens before our resume,
index d2bc8d57c87eb40b943873732e5705514fcd2fc2..bcf5ffc1567a4ff1c6054f7aecc0aa28dfd2c3fa 100644 (file)
@@ -2766,6 +2766,7 @@ static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
        struct sta_info *sta;
+       bool result = true;
 
        sdata_info(sdata, "authenticated\n");
        ifmgd->auth_data->done = true;
@@ -2778,15 +2779,18 @@ static bool ieee80211_mark_sta_auth(struct ieee80211_sub_if_data *sdata,
        sta = sta_info_get(sdata, bssid);
        if (!sta) {
                WARN_ONCE(1, "%s: STA %pM not found", sdata->name, bssid);
-               return false;
+               result = false;
+               goto out;
        }
        if (sta_info_move_state(sta, IEEE80211_STA_AUTH)) {
                sdata_info(sdata, "failed moving %pM to auth\n", bssid);
-               return false;
+               result = false;
+               goto out;
        }
-       mutex_unlock(&sdata->local->sta_mtx);
 
-       return true;
+out:
+       mutex_unlock(&sdata->local->sta_mtx);
+       return result;
 }
 
 static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
index 3bd3b57697970f8ad2c756d1ab69a360b465ff38..428f7ad5f9b59f7964405c1c534d2b1a130fe25e 100644 (file)
@@ -1403,6 +1403,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
                return RX_CONTINUE;
 
        if (ieee80211_is_ctl(hdr->frame_control) ||
+           ieee80211_is_nullfunc(hdr->frame_control) ||
            ieee80211_is_qos_nullfunc(hdr->frame_control) ||
            is_multicast_ether_addr(hdr->addr1))
                return RX_CONTINUE;
@@ -3063,7 +3064,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
                        cfg80211_sta_opmode_change_notify(sdata->dev,
                                                          rx->sta->addr,
                                                          &sta_opmode,
-                                                         GFP_KERNEL);
+                                                         GFP_ATOMIC);
                        goto handled;
                }
                case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
@@ -3100,7 +3101,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
                        cfg80211_sta_opmode_change_notify(sdata->dev,
                                                          rx->sta->addr,
                                                          &sta_opmode,
-                                                         GFP_KERNEL);
+                                                         GFP_ATOMIC);
                        goto handled;
                }
                default:
index aa4afbf0abaf1727b62fe1e6a9ce9605d549b856..a794ca7290001a778d7ef5fde62a7c32b7c110b1 100644 (file)
@@ -964,6 +964,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
                        /* Track when last TDLS packet was ACKed */
                        if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
                                sta->status_stats.last_tdls_pkt_time = jiffies;
+               } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
+                       return;
                } else {
                        ieee80211_lost_packet(sta, info);
                }
index e0ccee23fbcdb209a6f7f5704c2aea2d2ef74782..1f536ba573b4852ef18f1a80129e56c4961be520 100644 (file)
@@ -439,8 +439,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
        if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
                info->hw_queue = tx->sdata->vif.cab_queue;
 
-       /* no stations in PS mode */
-       if (!atomic_read(&ps->num_sta_ps))
+       /* no stations in PS mode and no buffered packets */
+       if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf))
                return TX_CONTINUE;
 
        info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
index bc4bd247bb7d42767eb860c05fb4b0b40408304b..1577f2f76060dcd816f94078412f52943568ce40 100644 (file)
@@ -55,11 +55,15 @@ MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
 MODULE_DESCRIPTION("core IP set support");
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
 
-/* When the nfnl mutex is held: */
+/* When the nfnl mutex or ip_set_ref_lock is held: */
 #define ip_set_dereference(p)          \
-       rcu_dereference_protected(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
+       rcu_dereference_protected(p,    \
+               lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
+               lockdep_is_held(&ip_set_ref_lock))
 #define ip_set(inst, id)               \
        ip_set_dereference((inst)->ip_set_list)[id]
+#define ip_set_ref_netlink(inst,id)    \
+       rcu_dereference_raw((inst)->ip_set_list)[id]
 
 /* The set types are implemented in modules and registered set types
  * can be found in ip_set_type_list. Adding/deleting types is
@@ -693,21 +697,20 @@ ip_set_put_byindex(struct net *net, ip_set_id_t index)
 EXPORT_SYMBOL_GPL(ip_set_put_byindex);
 
 /* Get the name of a set behind a set index.
- * We assume the set is referenced, so it does exist and
- * can't be destroyed. The set cannot be renamed due to
- * the referencing either.
- *
+ * Set itself is protected by RCU, but its name isn't: to protect against
+ * renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the
+ * name.
  */
-const char *
-ip_set_name_byindex(struct net *net, ip_set_id_t index)
+void
+ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name)
 {
-       const struct ip_set *set = ip_set_rcu_get(net, index);
+       struct ip_set *set = ip_set_rcu_get(net, index);
 
        BUG_ON(!set);
-       BUG_ON(set->ref == 0);
 
-       /* Referenced, so it's safe */
-       return set->name;
+       read_lock_bh(&ip_set_ref_lock);
+       strncpy(name, set->name, IPSET_MAXNAMELEN);
+       read_unlock_bh(&ip_set_ref_lock);
 }
 EXPORT_SYMBOL_GPL(ip_set_name_byindex);
 
@@ -961,7 +964,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
                        /* Wraparound */
                        goto cleanup;
 
-               list = kcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
+               list = kvcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
                if (!list)
                        goto cleanup;
                /* nfnl mutex is held, both lists are valid */
@@ -973,7 +976,7 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
                /* Use new list */
                index = inst->ip_set_max;
                inst->ip_set_max = i;
-               kfree(tmp);
+               kvfree(tmp);
                ret = 0;
        } else if (ret) {
                goto cleanup;
@@ -1153,7 +1156,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
        if (!set)
                return -ENOENT;
 
-       read_lock_bh(&ip_set_ref_lock);
+       write_lock_bh(&ip_set_ref_lock);
        if (set->ref != 0) {
                ret = -IPSET_ERR_REFERENCED;
                goto out;
@@ -1170,7 +1173,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
        strncpy(set->name, name2, IPSET_MAXNAMELEN);
 
 out:
-       read_unlock_bh(&ip_set_ref_lock);
+       write_unlock_bh(&ip_set_ref_lock);
        return ret;
 }
 
@@ -1252,7 +1255,7 @@ ip_set_dump_done(struct netlink_callback *cb)
                struct ip_set_net *inst =
                        (struct ip_set_net *)cb->args[IPSET_CB_NET];
                ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
-               struct ip_set *set = ip_set(inst, index);
+               struct ip_set *set = ip_set_ref_netlink(inst, index);
 
                if (set->variant->uref)
                        set->variant->uref(set, cb, false);
@@ -1441,7 +1444,7 @@ next_set:
 release_refcount:
        /* If there was an error or set is done, release set */
        if (ret || !cb->args[IPSET_CB_ARG0]) {
-               set = ip_set(inst, index);
+               set = ip_set_ref_netlink(inst, index);
                if (set->variant->uref)
                        set->variant->uref(set, cb, false);
                pr_debug("release set %s\n", set->name);
@@ -2059,7 +2062,7 @@ ip_set_net_init(struct net *net)
        if (inst->ip_set_max >= IPSET_INVALID_ID)
                inst->ip_set_max = IPSET_INVALID_ID - 1;
 
-       list = kcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
+       list = kvcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
        if (!list)
                return -ENOMEM;
        inst->is_deleted = false;
@@ -2087,7 +2090,7 @@ ip_set_net_exit(struct net *net)
                }
        }
        nfnl_unlock(NFNL_SUBSYS_IPSET);
-       kfree(rcu_dereference_protected(inst->ip_set_list, 1));
+       kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
 }
 
 static struct pernet_operations ip_set_net_ops = {
index d391485a6acdc2ff3523d5b7d39c20ab4a8add80..613e18e720a44777754428666b9f021de952de9a 100644 (file)
@@ -213,13 +213,13 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (tb[IPSET_ATTR_CIDR]) {
                e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-               if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
+               if (e.cidr[0] > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
        }
 
        if (tb[IPSET_ATTR_CIDR2]) {
                e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
-               if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
+               if (e.cidr[1] > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
        }
 
@@ -493,13 +493,13 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
 
        if (tb[IPSET_ATTR_CIDR]) {
                e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
-               if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
+               if (e.cidr[0] > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
        }
 
        if (tb[IPSET_ATTR_CIDR2]) {
                e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
-               if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
+               if (e.cidr[1] > HOST_MASK)
                        return -IPSET_ERR_INVALID_CIDR;
        }
 
index 072a658fde047c5d9d59ac08e796b25759cc68a1..4eef55da0878e299d0bb912fa7ea69d3d4e91441 100644 (file)
@@ -148,9 +148,7 @@ __list_set_del_rcu(struct rcu_head * rcu)
 {
        struct set_elem *e = container_of(rcu, struct set_elem, rcu);
        struct ip_set *set = e->set;
-       struct list_set *map = set->data;
 
-       ip_set_put_byindex(map->net, e->id);
        ip_set_ext_destroy(set, e);
        kfree(e);
 }
@@ -158,15 +156,21 @@ __list_set_del_rcu(struct rcu_head * rcu)
 static inline void
 list_set_del(struct ip_set *set, struct set_elem *e)
 {
+       struct list_set *map = set->data;
+
        set->elements--;
        list_del_rcu(&e->list);
+       ip_set_put_byindex(map->net, e->id);
        call_rcu(&e->rcu, __list_set_del_rcu);
 }
 
 static inline void
-list_set_replace(struct set_elem *e, struct set_elem *old)
+list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
 {
+       struct list_set *map = set->data;
+
        list_replace_rcu(&old->list, &e->list);
+       ip_set_put_byindex(map->net, old->id);
        call_rcu(&old->rcu, __list_set_del_rcu);
 }
 
@@ -298,7 +302,7 @@ list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
        INIT_LIST_HEAD(&e->list);
        list_set_init_extensions(set, ext, e);
        if (n)
-               list_set_replace(e, n);
+               list_set_replace(set, e, n);
        else if (next)
                list_add_tail_rcu(&e->list, &next->list);
        else if (prev)
@@ -486,6 +490,7 @@ list_set_list(const struct ip_set *set,
        const struct list_set *map = set->data;
        struct nlattr *atd, *nested;
        u32 i = 0, first = cb->args[IPSET_CB_ARG0];
+       char name[IPSET_MAXNAMELEN];
        struct set_elem *e;
        int ret = 0;
 
@@ -504,8 +509,8 @@ list_set_list(const struct ip_set *set,
                nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
                if (!nested)
                        goto nla_put_failure;
-               if (nla_put_string(skb, IPSET_ATTR_NAME,
-                                  ip_set_name_byindex(map->net, e->id)))
+               ip_set_name_byindex(map->net, e->id, name);
+               if (nla_put_string(skb, IPSET_ATTR_NAME, name))
                        goto nla_put_failure;
                if (ip_set_put_extensions(skb, set, e, true))
                        goto nla_put_failure;
index 83395bf6dc35e2a3ea486246e98de99b6e1094da..432141f04af3d98b356332ac9723b7a286f93538 100644 (file)
@@ -3980,6 +3980,9 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct netns_ipvs *ipvs)
 
 static struct notifier_block ip_vs_dst_notifier = {
        .notifier_call = ip_vs_dst_event,
+#ifdef CONFIG_IP_VS_IPV6
+       .priority = ADDRCONF_NOTIFY_PRIORITY + 5,
+#endif
 };
 
 int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
index d4020c5e831d3020a6e412ead6d1895f81b5a124..2526be6b3d9095abea1ee68bda4e4f3bf608be55 100644 (file)
@@ -1616,7 +1616,7 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen)
        EnterFunction(7);
 
        /* Receive a packet */
-       iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, buflen);
+       iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, buflen);
        len = sock_recvmsg(sock, &msg, MSG_DONTWAIT);
        if (len < 0)
                return len;
index 02ca7df793f5c07233924c051bbbd9faf60854d4..b6d0f6deea86c34437f997533df3f4f7f4f6bd50 100644 (file)
@@ -49,6 +49,7 @@ struct nf_conncount_tuple {
        struct nf_conntrack_zone        zone;
        int                             cpu;
        u32                             jiffies32;
+       bool                            dead;
        struct rcu_head                 rcu_head;
 };
 
@@ -106,15 +107,16 @@ nf_conncount_add(struct nf_conncount_list *list,
        conn->zone = *zone;
        conn->cpu = raw_smp_processor_id();
        conn->jiffies32 = (u32)jiffies;
-       spin_lock(&list->list_lock);
+       conn->dead = false;
+       spin_lock_bh(&list->list_lock);
        if (list->dead == true) {
                kmem_cache_free(conncount_conn_cachep, conn);
-               spin_unlock(&list->list_lock);
+               spin_unlock_bh(&list->list_lock);
                return NF_CONNCOUNT_SKIP;
        }
        list_add_tail(&conn->node, &list->head);
        list->count++;
-       spin_unlock(&list->list_lock);
+       spin_unlock_bh(&list->list_lock);
        return NF_CONNCOUNT_ADDED;
 }
 EXPORT_SYMBOL_GPL(nf_conncount_add);
@@ -132,19 +134,22 @@ static bool conn_free(struct nf_conncount_list *list,
 {
        bool free_entry = false;
 
-       spin_lock(&list->list_lock);
+       spin_lock_bh(&list->list_lock);
 
-       if (list->count == 0) {
-               spin_unlock(&list->list_lock);
-                return free_entry;
+       if (conn->dead) {
+               spin_unlock_bh(&list->list_lock);
+               return free_entry;
        }
 
        list->count--;
+       conn->dead = true;
        list_del_rcu(&conn->node);
-       if (list->count == 0)
+       if (list->count == 0) {
+               list->dead = true;
                free_entry = true;
+       }
 
-       spin_unlock(&list->list_lock);
+       spin_unlock_bh(&list->list_lock);
        call_rcu(&conn->rcu_head, __conn_free);
        return free_entry;
 }
@@ -245,7 +250,7 @@ void nf_conncount_list_init(struct nf_conncount_list *list)
 {
        spin_lock_init(&list->list_lock);
        INIT_LIST_HEAD(&list->head);
-       list->count = 1;
+       list->count = 0;
        list->dead = false;
 }
 EXPORT_SYMBOL_GPL(nf_conncount_list_init);
@@ -259,6 +264,7 @@ bool nf_conncount_gc_list(struct net *net,
        struct nf_conn *found_ct;
        unsigned int collected = 0;
        bool free_entry = false;
+       bool ret = false;
 
        list_for_each_entry_safe(conn, conn_n, &list->head, node) {
                found = find_or_evict(net, list, conn, &free_entry);
@@ -288,7 +294,15 @@ bool nf_conncount_gc_list(struct net *net,
                if (collected > CONNCOUNT_GC_MAX_NODES)
                        return false;
        }
-       return false;
+
+       spin_lock_bh(&list->list_lock);
+       if (!list->count) {
+               list->dead = true;
+               ret = true;
+       }
+       spin_unlock_bh(&list->list_lock);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(nf_conncount_gc_list);
 
@@ -309,11 +323,8 @@ static void tree_nodes_free(struct rb_root *root,
        while (gc_count) {
                rbconn = gc_nodes[--gc_count];
                spin_lock(&rbconn->list.list_lock);
-               if (rbconn->list.count == 0 && rbconn->list.dead == false) {
-                       rbconn->list.dead = true;
-                       rb_erase(&rbconn->node, root);
-                       call_rcu(&rbconn->rcu_head, __tree_nodes_free);
-               }
+               rb_erase(&rbconn->node, root);
+               call_rcu(&rbconn->rcu_head, __tree_nodes_free);
                spin_unlock(&rbconn->list.list_lock);
        }
 }
@@ -414,6 +425,7 @@ insert_tree(struct net *net,
        nf_conncount_list_init(&rbconn->list);
        list_add(&conn->node, &rbconn->list.head);
        count = 1;
+       rbconn->list.count = count;
 
        rb_link_node(&rbconn->node, parent, rbnode);
        rb_insert_color(&rbconn->node, root);
index ca1168d67fac6c0fc1eaef5dfeb1db8428e51db3..e92e749aff53e46c60718b55593e72d70838e9be 100644 (file)
@@ -1073,19 +1073,22 @@ static unsigned int early_drop_list(struct net *net,
        return drops;
 }
 
-static noinline int early_drop(struct net *net, unsigned int _hash)
+static noinline int early_drop(struct net *net, unsigned int hash)
 {
-       unsigned int i;
+       unsigned int i, bucket;
 
        for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
                struct hlist_nulls_head *ct_hash;
-               unsigned int hash, hsize, drops;
+               unsigned int hsize, drops;
 
                rcu_read_lock();
                nf_conntrack_get_ht(&ct_hash, &hsize);
-               hash = reciprocal_scale(_hash++, hsize);
+               if (!i)
+                       bucket = reciprocal_scale(hash, hsize);
+               else
+                       bucket = (bucket + 1) % hsize;
 
-               drops = early_drop_list(net, &ct_hash[hash]);
+               drops = early_drop_list(net, &ct_hash[bucket]);
                rcu_read_unlock();
 
                if (drops) {
index 171e9e122e5f1e8b8840e41013d86246ba8025b9..023c1445bc3960de8c3d2350d9fb5c8d743e920f 100644 (file)
@@ -384,11 +384,6 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
        },
 };
 
-static inline struct nf_dccp_net *dccp_pernet(struct net *net)
-{
-       return &net->ct.nf_ct_proto.dccp;
-}
-
 static noinline bool
 dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
         const struct dccp_hdr *dh)
@@ -401,7 +396,7 @@ dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
        state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
        switch (state) {
        default:
-               dn = dccp_pernet(net);
+               dn = nf_dccp_pernet(net);
                if (dn->dccp_loose == 0) {
                        msg = "not picking up existing connection ";
                        goto out_invalid;
@@ -568,7 +563,7 @@ static int dccp_packet(struct nf_conn *ct, struct sk_buff *skb,
 
        timeouts = nf_ct_timeout_lookup(ct);
        if (!timeouts)
-               timeouts = dccp_pernet(nf_ct_net(ct))->dccp_timeout;
+               timeouts = nf_dccp_pernet(nf_ct_net(ct))->dccp_timeout;
        nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
 
        return NF_ACCEPT;
@@ -681,7 +676,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
 static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
                                      struct net *net, void *data)
 {
-       struct nf_dccp_net *dn = dccp_pernet(net);
+       struct nf_dccp_net *dn = nf_dccp_pernet(net);
        unsigned int *timeouts = data;
        int i;
 
@@ -814,7 +809,7 @@ static int dccp_kmemdup_sysctl_table(struct net *net, struct nf_proto_net *pn,
 
 static int dccp_init_net(struct net *net)
 {
-       struct nf_dccp_net *dn = dccp_pernet(net);
+       struct nf_dccp_net *dn = nf_dccp_pernet(net);
        struct nf_proto_net *pn = &dn->pn;
 
        if (!pn->users) {
index e10e867e0b55f3203e8a50d4ac7c884201ac1186..5da19d5fbc767f2ca8f22ac4eba09aebb6c59fda 100644 (file)
@@ -27,11 +27,6 @@ static bool nf_generic_should_process(u8 proto)
        }
 }
 
-static inline struct nf_generic_net *generic_pernet(struct net *net)
-{
-       return &net->ct.nf_ct_proto.generic;
-}
-
 static bool generic_pkt_to_tuple(const struct sk_buff *skb,
                                 unsigned int dataoff,
                                 struct net *net, struct nf_conntrack_tuple *tuple)
@@ -58,7 +53,7 @@ static int generic_packet(struct nf_conn *ct,
        }
 
        if (!timeout)
-               timeout = &generic_pernet(nf_ct_net(ct))->timeout;
+               timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
 
        nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
        return NF_ACCEPT;
@@ -72,7 +67,7 @@ static int generic_packet(struct nf_conn *ct,
 static int generic_timeout_nlattr_to_obj(struct nlattr *tb[],
                                         struct net *net, void *data)
 {
-       struct nf_generic_net *gn = generic_pernet(net);
+       struct nf_generic_net *gn = nf_generic_pernet(net);
        unsigned int *timeout = data;
 
        if (!timeout)
@@ -138,7 +133,7 @@ static int generic_kmemdup_sysctl_table(struct nf_proto_net *pn,
 
 static int generic_init_net(struct net *net)
 {
-       struct nf_generic_net *gn = generic_pernet(net);
+       struct nf_generic_net *gn = nf_generic_pernet(net);
        struct nf_proto_net *pn = &gn->pn;
 
        gn->timeout = nf_ct_generic_timeout;
index 9b48dc8b4b885a00d8806038fc5fd0948e60cbca..2a5e56c6d8d9f966be97de8ae9153cf3be260c5b 100644 (file)
 #include <linux/netfilter/nf_conntrack_proto_gre.h>
 #include <linux/netfilter/nf_conntrack_pptp.h>
 
-enum grep_conntrack {
-       GRE_CT_UNREPLIED,
-       GRE_CT_REPLIED,
-       GRE_CT_MAX
-};
-
 static const unsigned int gre_timeouts[GRE_CT_MAX] = {
        [GRE_CT_UNREPLIED]      = 30*HZ,
        [GRE_CT_REPLIED]        = 180*HZ,
 };
 
 static unsigned int proto_gre_net_id __read_mostly;
-struct netns_proto_gre {
-       struct nf_proto_net     nf;
-       rwlock_t                keymap_lock;
-       struct list_head        keymap_list;
-       unsigned int            gre_timeouts[GRE_CT_MAX];
-};
 
 static inline struct netns_proto_gre *gre_pernet(struct net *net)
 {
@@ -402,6 +390,8 @@ static int __init nf_ct_proto_gre_init(void)
 {
        int ret;
 
+       BUILD_BUG_ON(offsetof(struct netns_proto_gre, nf) != 0);
+
        ret = register_pernet_subsys(&proto_gre_net_ops);
        if (ret < 0)
                goto out_pernet;
index 3598520bd19b7b76dbd91bb42e4b8b91713abf2c..de64d8a5fdfd137aca48a9e62a143a01f63bec07 100644 (file)
 
 static const unsigned int nf_ct_icmp_timeout = 30*HZ;
 
-static inline struct nf_icmp_net *icmp_pernet(struct net *net)
-{
-       return &net->ct.nf_ct_proto.icmp;
-}
-
 static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
                              struct net *net, struct nf_conntrack_tuple *tuple)
 {
@@ -103,7 +98,7 @@ static int icmp_packet(struct nf_conn *ct,
        }
 
        if (!timeout)
-               timeout = &icmp_pernet(nf_ct_net(ct))->timeout;
+               timeout = &nf_icmp_pernet(nf_ct_net(ct))->timeout;
 
        nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
        return NF_ACCEPT;
@@ -275,7 +270,7 @@ static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[],
                                      struct net *net, void *data)
 {
        unsigned int *timeout = data;
-       struct nf_icmp_net *in = icmp_pernet(net);
+       struct nf_icmp_net *in = nf_icmp_pernet(net);
 
        if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
                if (!timeout)
@@ -337,7 +332,7 @@ static int icmp_kmemdup_sysctl_table(struct nf_proto_net *pn,
 
 static int icmp_init_net(struct net *net)
 {
-       struct nf_icmp_net *in = icmp_pernet(net);
+       struct nf_icmp_net *in = nf_icmp_pernet(net);
        struct nf_proto_net *pn = &in->pn;
 
        in->timeout = nf_ct_icmp_timeout;
index 378618feed5da7df50e09c8ec4f72618953306b0..a15eefb8e3173c5d89268bd7f2a6c076ff787b1c 100644 (file)
 
 static const unsigned int nf_ct_icmpv6_timeout = 30*HZ;
 
-static inline struct nf_icmp_net *icmpv6_pernet(struct net *net)
-{
-       return &net->ct.nf_ct_proto.icmpv6;
-}
-
 static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
                                unsigned int dataoff,
                                struct net *net,
@@ -87,7 +82,7 @@ static bool icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
 
 static unsigned int *icmpv6_get_timeouts(struct net *net)
 {
-       return &icmpv6_pernet(net)->timeout;
+       return &nf_icmpv6_pernet(net)->timeout;
 }
 
 /* Returns verdict for packet, or -1 for invalid. */
@@ -286,7 +281,7 @@ static int icmpv6_timeout_nlattr_to_obj(struct nlattr *tb[],
                                        struct net *net, void *data)
 {
        unsigned int *timeout = data;
-       struct nf_icmp_net *in = icmpv6_pernet(net);
+       struct nf_icmp_net *in = nf_icmpv6_pernet(net);
 
        if (!timeout)
                timeout = icmpv6_get_timeouts(net);
@@ -348,7 +343,7 @@ static int icmpv6_kmemdup_sysctl_table(struct nf_proto_net *pn,
 
 static int icmpv6_init_net(struct net *net)
 {
-       struct nf_icmp_net *in = icmpv6_pernet(net);
+       struct nf_icmp_net *in = nf_icmpv6_pernet(net);
        struct nf_proto_net *pn = &in->pn;
 
        in->timeout = nf_ct_icmpv6_timeout;
index 3d719d3eb9a38c7709b8d224facdad8820ebded4..d53e3e78f6052a1f8d8fde973ee03b0763470b30 100644 (file)
@@ -146,11 +146,6 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
        }
 };
 
-static inline struct nf_sctp_net *sctp_pernet(struct net *net)
-{
-       return &net->ct.nf_ct_proto.sctp;
-}
-
 #ifdef CONFIG_NF_CONNTRACK_PROCFS
 /* Print out the private part of the conntrack. */
 static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
@@ -480,7 +475,7 @@ static int sctp_packet(struct nf_conn *ct,
 
        timeouts = nf_ct_timeout_lookup(ct);
        if (!timeouts)
-               timeouts = sctp_pernet(nf_ct_net(ct))->timeouts;
+               timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts;
 
        nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
 
@@ -599,7 +594,7 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
                                      struct net *net, void *data)
 {
        unsigned int *timeouts = data;
-       struct nf_sctp_net *sn = sctp_pernet(net);
+       struct nf_sctp_net *sn = nf_sctp_pernet(net);
        int i;
 
        /* set default SCTP timeouts. */
@@ -736,7 +731,7 @@ static int sctp_kmemdup_sysctl_table(struct nf_proto_net *pn,
 
 static int sctp_init_net(struct net *net)
 {
-       struct nf_sctp_net *sn = sctp_pernet(net);
+       struct nf_sctp_net *sn = nf_sctp_pernet(net);
        struct nf_proto_net *pn = &sn->pn;
 
        if (!pn->users) {
index 1bcf9984d45e8601646cb2b99dc5f3113a5c8b0a..4dcbd51a8e97f04ad8056374ed892887d2f0798e 100644 (file)
@@ -272,11 +272,6 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
        }
 };
 
-static inline struct nf_tcp_net *tcp_pernet(struct net *net)
-{
-       return &net->ct.nf_ct_proto.tcp;
-}
-
 #ifdef CONFIG_NF_CONNTRACK_PROCFS
 /* Print out the private part of the conntrack. */
 static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
@@ -475,7 +470,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
                          const struct tcphdr *tcph)
 {
        struct net *net = nf_ct_net(ct);
-       struct nf_tcp_net *tn = tcp_pernet(net);
+       struct nf_tcp_net *tn = nf_tcp_pernet(net);
        struct ip_ct_tcp_state *sender = &state->seen[dir];
        struct ip_ct_tcp_state *receiver = &state->seen[!dir];
        const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
@@ -767,7 +762,7 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
 {
        enum tcp_conntrack new_state;
        struct net *net = nf_ct_net(ct);
-       const struct nf_tcp_net *tn = tcp_pernet(net);
+       const struct nf_tcp_net *tn = nf_tcp_pernet(net);
        const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
        const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
 
@@ -841,7 +836,7 @@ static int tcp_packet(struct nf_conn *ct,
                      const struct nf_hook_state *state)
 {
        struct net *net = nf_ct_net(ct);
-       struct nf_tcp_net *tn = tcp_pernet(net);
+       struct nf_tcp_net *tn = nf_tcp_pernet(net);
        struct nf_conntrack_tuple *tuple;
        enum tcp_conntrack new_state, old_state;
        unsigned int index, *timeouts;
@@ -1283,7 +1278,7 @@ static unsigned int tcp_nlattr_tuple_size(void)
 static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
                                     struct net *net, void *data)
 {
-       struct nf_tcp_net *tn = tcp_pernet(net);
+       struct nf_tcp_net *tn = nf_tcp_pernet(net);
        unsigned int *timeouts = data;
        int i;
 
@@ -1508,7 +1503,7 @@ static int tcp_kmemdup_sysctl_table(struct nf_proto_net *pn,
 
 static int tcp_init_net(struct net *net)
 {
-       struct nf_tcp_net *tn = tcp_pernet(net);
+       struct nf_tcp_net *tn = nf_tcp_pernet(net);
        struct nf_proto_net *pn = &tn->pn;
 
        if (!pn->users) {
index a7aa70370913ce7e8914343270152fb009eb2a63..c879d8d78cfde88a223b961bb203bf7bb48ef1b2 100644 (file)
@@ -32,14 +32,9 @@ static const unsigned int udp_timeouts[UDP_CT_MAX] = {
        [UDP_CT_REPLIED]        = 180*HZ,
 };
 
-static inline struct nf_udp_net *udp_pernet(struct net *net)
-{
-       return &net->ct.nf_ct_proto.udp;
-}
-
 static unsigned int *udp_get_timeouts(struct net *net)
 {
-       return udp_pernet(net)->timeouts;
+       return nf_udp_pernet(net)->timeouts;
 }
 
 static void udp_error_log(const struct sk_buff *skb,
@@ -212,7 +207,7 @@ static int udp_timeout_nlattr_to_obj(struct nlattr *tb[],
                                     struct net *net, void *data)
 {
        unsigned int *timeouts = data;
-       struct nf_udp_net *un = udp_pernet(net);
+       struct nf_udp_net *un = nf_udp_pernet(net);
 
        if (!timeouts)
                timeouts = un->timeouts;
@@ -292,7 +287,7 @@ static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn,
 
 static int udp_init_net(struct net *net)
 {
-       struct nf_udp_net *un = udp_pernet(net);
+       struct nf_udp_net *un = nf_udp_pernet(net);
        struct nf_proto_net *pn = &un->pn;
 
        if (!pn->users) {
index 42487d01a3eda2306f78fde4b45919e2e06e941a..2e61aab6ed731356e34df28a6c1c8d41659ad749 100644 (file)
@@ -2457,7 +2457,7 @@ err:
 static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
                                   struct nft_rule *rule)
 {
-       struct nft_expr *expr;
+       struct nft_expr *expr, *next;
 
        /*
         * Careful: some expressions might not be initialized in case this
@@ -2465,8 +2465,9 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
         */
        expr = nft_expr_first(rule);
        while (expr != nft_expr_last(rule) && expr->ops) {
+               next = nft_expr_next(expr);
                nf_tables_expr_destroy(ctx, expr);
-               expr = nft_expr_next(expr);
+               expr = next;
        }
        kfree(rule);
 }
@@ -2589,17 +2590,14 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
 
                if (chain->use == UINT_MAX)
                        return -EOVERFLOW;
-       }
-
-       if (nla[NFTA_RULE_POSITION]) {
-               if (!(nlh->nlmsg_flags & NLM_F_CREATE))
-                       return -EOPNOTSUPP;
 
-               pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
-               old_rule = __nft_rule_lookup(chain, pos_handle);
-               if (IS_ERR(old_rule)) {
-                       NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]);
-                       return PTR_ERR(old_rule);
+               if (nla[NFTA_RULE_POSITION]) {
+                       pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION]));
+                       old_rule = __nft_rule_lookup(chain, pos_handle);
+                       if (IS_ERR(old_rule)) {
+                               NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_POSITION]);
+                               return PTR_ERR(old_rule);
+                       }
                }
        }
 
@@ -2669,21 +2667,14 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
        }
 
        if (nlh->nlmsg_flags & NLM_F_REPLACE) {
-               if (!nft_is_active_next(net, old_rule)) {
-                       err = -ENOENT;
-                       goto err2;
-               }
-               trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
-                                          old_rule);
+               trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule);
                if (trans == NULL) {
                        err = -ENOMEM;
                        goto err2;
                }
-               nft_deactivate_next(net, old_rule);
-               chain->use--;
-
-               if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) {
-                       err = -ENOMEM;
+               err = nft_delrule(&ctx, old_rule);
+               if (err < 0) {
+                       nft_trans_destroy(trans);
                        goto err2;
                }
 
@@ -6324,7 +6315,7 @@ static void nf_tables_commit_chain_free_rules_old(struct nft_rule **rules)
        call_rcu(&old->h, __nf_tables_commit_chain_free_rules_old);
 }
 
-static void nf_tables_commit_chain_active(struct net *net, struct nft_chain *chain)
+static void nf_tables_commit_chain(struct net *net, struct nft_chain *chain)
 {
        struct nft_rule **g0, **g1;
        bool next_genbit;
@@ -6441,11 +6432,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
 
        /* step 2.  Make rules_gen_X visible to packet path */
        list_for_each_entry(table, &net->nft.tables, list) {
-               list_for_each_entry(chain, &table->chains, list) {
-                       if (!nft_is_active_next(net, chain))
-                               continue;
-                       nf_tables_commit_chain_active(net, chain);
-               }
+               list_for_each_entry(chain, &table->chains, list)
+                       nf_tables_commit_chain(net, chain);
        }
 
        /*
index e7a50af1b3d61a6e12fb74eaa9a9ba02f0a8d22b..109b0d27345acc1afac24524c107043d4d10958a 100644 (file)
@@ -382,7 +382,8 @@ err:
 static int
 cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
                            u32 seq, u32 type, int event, u16 l3num,
-                           const struct nf_conntrack_l4proto *l4proto)
+                           const struct nf_conntrack_l4proto *l4proto,
+                           const unsigned int *timeouts)
 {
        struct nlmsghdr *nlh;
        struct nfgenmsg *nfmsg;
@@ -408,7 +409,7 @@ cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
        if (!nest_parms)
                goto nla_put_failure;
 
-       ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL);
+       ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
        if (ret < 0)
                goto nla_put_failure;
 
@@ -430,6 +431,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
                                 struct netlink_ext_ack *extack)
 {
        const struct nf_conntrack_l4proto *l4proto;
+       unsigned int *timeouts = NULL;
        struct sk_buff *skb2;
        int ret, err;
        __u16 l3num;
@@ -442,12 +444,55 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
        l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
        l4proto = nf_ct_l4proto_find_get(l4num);
 
-       /* This protocol is not supported, skip. */
-       if (l4proto->l4proto != l4num) {
-               err = -EOPNOTSUPP;
+       err = -EOPNOTSUPP;
+       if (l4proto->l4proto != l4num)
                goto err;
+
+       switch (l4proto->l4proto) {
+       case IPPROTO_ICMP:
+               timeouts = &nf_icmp_pernet(net)->timeout;
+               break;
+       case IPPROTO_TCP:
+               timeouts = nf_tcp_pernet(net)->timeouts;
+               break;
+       case IPPROTO_UDP: /* fallthrough */
+       case IPPROTO_UDPLITE:
+               timeouts = nf_udp_pernet(net)->timeouts;
+               break;
+       case IPPROTO_DCCP:
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+               timeouts = nf_dccp_pernet(net)->dccp_timeout;
+#endif
+               break;
+       case IPPROTO_ICMPV6:
+               timeouts = &nf_icmpv6_pernet(net)->timeout;
+               break;
+       case IPPROTO_SCTP:
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+               timeouts = nf_sctp_pernet(net)->timeouts;
+#endif
+               break;
+       case IPPROTO_GRE:
+#ifdef CONFIG_NF_CT_PROTO_GRE
+               if (l4proto->net_id) {
+                       struct netns_proto_gre *net_gre;
+
+                       net_gre = net_generic(net, *l4proto->net_id);
+                       timeouts = net_gre->gre_timeouts;
+               }
+#endif
+               break;
+       case 255:
+               timeouts = &nf_generic_pernet(net)->timeout;
+               break;
+       default:
+               WARN_ONCE(1, "Missing timeouts for proto %d", l4proto->l4proto);
+               break;
        }
 
+       if (!timeouts)
+               goto err;
+
        skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (skb2 == NULL) {
                err = -ENOMEM;
@@ -458,8 +503,7 @@ static int cttimeout_default_get(struct net *net, struct sock *ctnl,
                                          nlh->nlmsg_seq,
                                          NFNL_MSG_TYPE(nlh->nlmsg_type),
                                          IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
-                                         l3num,
-                                         l4proto);
+                                         l3num, l4proto, timeouts);
        if (ret <= 0) {
                kfree_skb(skb2);
                err = -ENOMEM;
index 768292eac2a46afe84df3b8a949a70bf77baf478..7334e0b80a5effe521bb807dda2ab259a62df8d8 100644 (file)
@@ -54,9 +54,11 @@ static bool nft_xt_put(struct nft_xt *xt)
        return false;
 }
 
-static int nft_compat_chain_validate_dependency(const char *tablename,
-                                               const struct nft_chain *chain)
+static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
+                                               const char *tablename)
 {
+       enum nft_chain_types type = NFT_CHAIN_T_DEFAULT;
+       const struct nft_chain *chain = ctx->chain;
        const struct nft_base_chain *basechain;
 
        if (!tablename ||
@@ -64,9 +66,12 @@ static int nft_compat_chain_validate_dependency(const char *tablename,
                return 0;
 
        basechain = nft_base_chain(chain);
-       if (strcmp(tablename, "nat") == 0 &&
-           basechain->type->type != NFT_CHAIN_T_NAT)
-               return -EINVAL;
+       if (strcmp(tablename, "nat") == 0) {
+               if (ctx->family != NFPROTO_BRIDGE)
+                       type = NFT_CHAIN_T_NAT;
+               if (basechain->type->type != type)
+                       return -EINVAL;
+       }
 
        return 0;
 }
@@ -342,8 +347,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
                if (target->hooks && !(hook_mask & target->hooks))
                        return -EINVAL;
 
-               ret = nft_compat_chain_validate_dependency(target->table,
-                                                          ctx->chain);
+               ret = nft_compat_chain_validate_dependency(ctx, target->table);
                if (ret < 0)
                        return ret;
        }
@@ -516,6 +520,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
                    void *info)
 {
        struct xt_match *match = expr->ops->data;
+       struct module *me = match->me;
        struct xt_mtdtor_param par;
 
        par.net = ctx->net;
@@ -526,7 +531,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
                par.match->destroy(&par);
 
        if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
-               module_put(match->me);
+               module_put(me);
 }
 
 static void
@@ -590,8 +595,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
                if (match->hooks && !(hook_mask & match->hooks))
                        return -EINVAL;
 
-               ret = nft_compat_chain_validate_dependency(match->table,
-                                                          ctx->chain);
+               ret = nft_compat_chain_validate_dependency(ctx, match->table);
                if (ret < 0)
                        return ret;
        }
index e82d9a966c45a5fc026b4ac939af294f4f57730b..974525eb92df7246c2e3acbc4cec495dcfd31862 100644 (file)
@@ -214,7 +214,9 @@ static int __init nft_flow_offload_module_init(void)
 {
        int err;
 
-       register_netdevice_notifier(&flow_offload_netdev_notifier);
+       err = register_netdevice_notifier(&flow_offload_netdev_notifier);
+       if (err)
+               goto err;
 
        err = nft_register_expr(&nft_flow_offload_type);
        if (err < 0)
@@ -224,6 +226,7 @@ static int __init nft_flow_offload_module_init(void)
 
 register_expr:
        unregister_netdevice_notifier(&flow_offload_netdev_notifier);
+err:
        return err;
 }
 
index 649d1700ec5ba026307c46596112b6b3fb667255..3cc1b3dc3c3cdb2508cef7825f3bd9c485679fdb 100644 (file)
@@ -24,7 +24,6 @@ struct nft_ng_inc {
        u32                     modulus;
        atomic_t                counter;
        u32                     offset;
-       struct nft_set          *map;
 };
 
 static u32 nft_ng_inc_gen(struct nft_ng_inc *priv)
@@ -48,34 +47,11 @@ static void nft_ng_inc_eval(const struct nft_expr *expr,
        regs->data[priv->dreg] = nft_ng_inc_gen(priv);
 }
 
-static void nft_ng_inc_map_eval(const struct nft_expr *expr,
-                               struct nft_regs *regs,
-                               const struct nft_pktinfo *pkt)
-{
-       struct nft_ng_inc *priv = nft_expr_priv(expr);
-       const struct nft_set *map = priv->map;
-       const struct nft_set_ext *ext;
-       u32 result;
-       bool found;
-
-       result = nft_ng_inc_gen(priv);
-       found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
-
-       if (!found)
-               return;
-
-       nft_data_copy(&regs->data[priv->dreg],
-                     nft_set_ext_data(ext), map->dlen);
-}
-
 static const struct nla_policy nft_ng_policy[NFTA_NG_MAX + 1] = {
        [NFTA_NG_DREG]          = { .type = NLA_U32 },
        [NFTA_NG_MODULUS]       = { .type = NLA_U32 },
        [NFTA_NG_TYPE]          = { .type = NLA_U32 },
        [NFTA_NG_OFFSET]        = { .type = NLA_U32 },
-       [NFTA_NG_SET_NAME]      = { .type = NLA_STRING,
-                                   .len = NFT_SET_MAXNAMELEN - 1 },
-       [NFTA_NG_SET_ID]        = { .type = NLA_U32 },
 };
 
 static int nft_ng_inc_init(const struct nft_ctx *ctx,
@@ -101,22 +77,6 @@ static int nft_ng_inc_init(const struct nft_ctx *ctx,
                                           NFT_DATA_VALUE, sizeof(u32));
 }
 
-static int nft_ng_inc_map_init(const struct nft_ctx *ctx,
-                              const struct nft_expr *expr,
-                              const struct nlattr * const tb[])
-{
-       struct nft_ng_inc *priv = nft_expr_priv(expr);
-       u8 genmask = nft_genmask_next(ctx->net);
-
-       nft_ng_inc_init(ctx, expr, tb);
-
-       priv->map = nft_set_lookup_global(ctx->net, ctx->table,
-                                         tb[NFTA_NG_SET_NAME],
-                                         tb[NFTA_NG_SET_ID], genmask);
-
-       return PTR_ERR_OR_ZERO(priv->map);
-}
-
 static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
                       u32 modulus, enum nft_ng_types type, u32 offset)
 {
@@ -143,27 +103,10 @@ static int nft_ng_inc_dump(struct sk_buff *skb, const struct nft_expr *expr)
                           priv->offset);
 }
 
-static int nft_ng_inc_map_dump(struct sk_buff *skb,
-                              const struct nft_expr *expr)
-{
-       const struct nft_ng_inc *priv = nft_expr_priv(expr);
-
-       if (nft_ng_dump(skb, priv->dreg, priv->modulus,
-                       NFT_NG_INCREMENTAL, priv->offset) ||
-           nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
-               goto nla_put_failure;
-
-       return 0;
-
-nla_put_failure:
-       return -1;
-}
-
 struct nft_ng_random {
        enum nft_registers      dreg:8;
        u32                     modulus;
        u32                     offset;
-       struct nft_set          *map;
 };
 
 static u32 nft_ng_random_gen(struct nft_ng_random *priv)
@@ -183,25 +126,6 @@ static void nft_ng_random_eval(const struct nft_expr *expr,
        regs->data[priv->dreg] = nft_ng_random_gen(priv);
 }
 
-static void nft_ng_random_map_eval(const struct nft_expr *expr,
-                                  struct nft_regs *regs,
-                                  const struct nft_pktinfo *pkt)
-{
-       struct nft_ng_random *priv = nft_expr_priv(expr);
-       const struct nft_set *map = priv->map;
-       const struct nft_set_ext *ext;
-       u32 result;
-       bool found;
-
-       result = nft_ng_random_gen(priv);
-       found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
-       if (!found)
-               return;
-
-       nft_data_copy(&regs->data[priv->dreg],
-                     nft_set_ext_data(ext), map->dlen);
-}
-
 static int nft_ng_random_init(const struct nft_ctx *ctx,
                              const struct nft_expr *expr,
                              const struct nlattr * const tb[])
@@ -226,21 +150,6 @@ static int nft_ng_random_init(const struct nft_ctx *ctx,
                                           NFT_DATA_VALUE, sizeof(u32));
 }
 
-static int nft_ng_random_map_init(const struct nft_ctx *ctx,
-                                 const struct nft_expr *expr,
-                                 const struct nlattr * const tb[])
-{
-       struct nft_ng_random *priv = nft_expr_priv(expr);
-       u8 genmask = nft_genmask_next(ctx->net);
-
-       nft_ng_random_init(ctx, expr, tb);
-       priv->map = nft_set_lookup_global(ctx->net, ctx->table,
-                                         tb[NFTA_NG_SET_NAME],
-                                         tb[NFTA_NG_SET_ID], genmask);
-
-       return PTR_ERR_OR_ZERO(priv->map);
-}
-
 static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
 {
        const struct nft_ng_random *priv = nft_expr_priv(expr);
@@ -249,22 +158,6 @@ static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
                           priv->offset);
 }
 
-static int nft_ng_random_map_dump(struct sk_buff *skb,
-                                 const struct nft_expr *expr)
-{
-       const struct nft_ng_random *priv = nft_expr_priv(expr);
-
-       if (nft_ng_dump(skb, priv->dreg, priv->modulus,
-                       NFT_NG_RANDOM, priv->offset) ||
-           nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
-               goto nla_put_failure;
-
-       return 0;
-
-nla_put_failure:
-       return -1;
-}
-
 static struct nft_expr_type nft_ng_type;
 static const struct nft_expr_ops nft_ng_inc_ops = {
        .type           = &nft_ng_type,
@@ -274,14 +167,6 @@ static const struct nft_expr_ops nft_ng_inc_ops = {
        .dump           = nft_ng_inc_dump,
 };
 
-static const struct nft_expr_ops nft_ng_inc_map_ops = {
-       .type           = &nft_ng_type,
-       .size           = NFT_EXPR_SIZE(sizeof(struct nft_ng_inc)),
-       .eval           = nft_ng_inc_map_eval,
-       .init           = nft_ng_inc_map_init,
-       .dump           = nft_ng_inc_map_dump,
-};
-
 static const struct nft_expr_ops nft_ng_random_ops = {
        .type           = &nft_ng_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
@@ -290,14 +175,6 @@ static const struct nft_expr_ops nft_ng_random_ops = {
        .dump           = nft_ng_random_dump,
 };
 
-static const struct nft_expr_ops nft_ng_random_map_ops = {
-       .type           = &nft_ng_type,
-       .size           = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
-       .eval           = nft_ng_random_map_eval,
-       .init           = nft_ng_random_map_init,
-       .dump           = nft_ng_random_map_dump,
-};
-
 static const struct nft_expr_ops *
 nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
 {
@@ -312,12 +189,8 @@ nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
 
        switch (type) {
        case NFT_NG_INCREMENTAL:
-               if (tb[NFTA_NG_SET_NAME])
-                       return &nft_ng_inc_map_ops;
                return &nft_ng_inc_ops;
        case NFT_NG_RANDOM:
-               if (tb[NFTA_NG_SET_NAME])
-                       return &nft_ng_random_map_ops;
                return &nft_ng_random_ops;
        }
 
index ca5e5d8c5ef8b91cd61cb039d652f4549c343948..b13618c764ec296377778ee405b9067515ada25a 100644 (file)
@@ -50,7 +50,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
        int err;
        u8 ttl;
 
-       if (nla_get_u8(tb[NFTA_OSF_TTL])) {
+       if (tb[NFTA_OSF_TTL]) {
                ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
                if (ttl > 2)
                        return -EINVAL;
index c6acfc2d9c8414d36173e3cf09f94ea64f0d7515..eb4cbd244c3d311e2630a4c4cae868f0343c30f0 100644 (file)
@@ -114,6 +114,22 @@ static void idletimer_tg_expired(struct timer_list *t)
        schedule_work(&timer->work);
 }
 
+static int idletimer_check_sysfs_name(const char *name, unsigned int size)
+{
+       int ret;
+
+       ret = xt_check_proc_name(name, size);
+       if (ret < 0)
+               return ret;
+
+       if (!strcmp(name, "power") ||
+           !strcmp(name, "subsystem") ||
+           !strcmp(name, "uevent"))
+               return -EINVAL;
+
+       return 0;
+}
+
 static int idletimer_tg_create(struct idletimer_tg_info *info)
 {
        int ret;
@@ -124,6 +140,10 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
                goto out;
        }
 
+       ret = idletimer_check_sysfs_name(info->label, sizeof(info->label));
+       if (ret < 0)
+               goto out_free_timer;
+
        sysfs_attr_init(&info->timer->attr.attr);
        info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
        if (!info->timer->attr.attr.name) {
index dec843cadf462667ae015c146d0c606808d57e65..9e05c86ba5c452f201cf1b1f7d7d59808b994b8a 100644 (file)
@@ -201,18 +201,8 @@ static __net_init int xt_rateest_net_init(struct net *net)
        return 0;
 }
 
-static void __net_exit xt_rateest_net_exit(struct net *net)
-{
-       struct xt_rateest_net *xn = net_generic(net, xt_rateest_id);
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(xn->hash); i++)
-               WARN_ON_ONCE(!hlist_empty(&xn->hash[i]));
-}
-
 static struct pernet_operations xt_rateest_net_ops = {
        .init = xt_rateest_net_init,
-       .exit = xt_rateest_net_exit,
        .id   = &xt_rateest_id,
        .size = sizeof(struct xt_rateest_net),
 };
index 3e7d259e5d8de01148729022f55c44310f7d6ed7..1ad4017f9b7349849d845c9e5bb9a5592533a291 100644 (file)
@@ -295,9 +295,10 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
 
        /* copy match config into hashtable config */
        ret = cfg_copy(&hinfo->cfg, (void *)cfg, 3);
-
-       if (ret)
+       if (ret) {
+               vfree(hinfo);
                return ret;
+       }
 
        hinfo->cfg.size = size;
        if (hinfo->cfg.max == 0)
@@ -814,7 +815,6 @@ hashlimit_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
        int ret;
 
        ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
-
        if (ret)
                return ret;
 
@@ -830,7 +830,6 @@ hashlimit_mt_v2(const struct sk_buff *skb, struct xt_action_param *par)
        int ret;
 
        ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
-
        if (ret)
                return ret;
 
@@ -921,7 +920,6 @@ static int hashlimit_mt_check_v1(const struct xt_mtchk_param *par)
                return ret;
 
        ret = cfg_copy(&cfg, (void *)&info->cfg, 1);
-
        if (ret)
                return ret;
 
@@ -940,7 +938,6 @@ static int hashlimit_mt_check_v2(const struct xt_mtchk_param *par)
                return ret;
 
        ret = cfg_copy(&cfg, (void *)&info->cfg, 2);
-
        if (ret)
                return ret;
 
index 6bec37ab4472796ecd1f453966b27bb911bf8fa8..cd94f925495a5ef6b6d2192961a0c97d6ac1764f 100644 (file)
@@ -1166,7 +1166,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
                                &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
                        if (err) {
                                net_warn_ratelimited("openvswitch: zone: %u "
-                                       "execeeds conntrack limit\n",
+                                       "exceeds conntrack limit\n",
                                        info->zone.id);
                                return err;
                        }
@@ -1203,7 +1203,8 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
                                         &info->labels.mask);
                if (err)
                        return err;
-       } else if (labels_nonzero(&info->labels.mask)) {
+       } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+                  labels_nonzero(&info->labels.mask)) {
                err = ovs_ct_set_labels(ct, key, &info->labels.value,
                                        &info->labels.mask);
                if (err)
index a70097ecf33c2bf9e9df7b92c2359ab679ae6d7e..865ecef68196900157b29c59b6bd57aff53e9e07 100644 (file)
@@ -3030,7 +3030,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
                         * is already present */
                        if (mac_proto != MAC_PROTO_NONE)
                                return -EINVAL;
-                       mac_proto = MAC_PROTO_NONE;
+                       mac_proto = MAC_PROTO_ETHERNET;
                        break;
 
                case OVS_ACTION_ATTR_POP_ETH:
@@ -3038,7 +3038,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
                                return -EINVAL;
                        if (vlan_tci & htons(VLAN_TAG_PRESENT))
                                return -EINVAL;
-                       mac_proto = MAC_PROTO_ETHERNET;
+                       mac_proto = MAC_PROTO_NONE;
                        break;
 
                case OVS_ACTION_ATTR_PUSH_NSH:
index ec3095f13aaee114476a8f06706bfeb7f7739002..a74650e98f423d752e3c49df1388e9c86cb5ee44 100644 (file)
@@ -2394,7 +2394,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
                void *ph;
                __u32 ts;
 
-               ph = skb_shinfo(skb)->destructor_arg;
+               ph = skb_zcopy_get_nouarg(skb);
                packet_dec_pending(&po->tx_ring);
 
                ts = __packet_set_timestamp(po, ph, skb);
@@ -2461,7 +2461,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
        skb->mark = po->sk.sk_mark;
        skb->tstamp = sockc->transmit_time;
        sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
-       skb_shinfo(skb)->destructor_arg = ph.raw;
+       skb_zcopy_set_nouarg(skb, ph.raw);
 
        skb_reserve(skb, hlen);
        skb_reset_network_header(skb);
index 64362d078da846daead402d0f0a5b88bae04b223..a2522f9d71e266d338f1c0d223ce5bcb670fd00f 100644 (file)
@@ -375,16 +375,35 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
  * getting ACKs from the server.  Returns a number representing the life state
  * which can be compared to that returned by a previous call.
  *
- * If this is a client call, ping ACKs will be sent to the server to find out
- * whether it's still responsive and whether the call is still alive on the
- * server.
+ * If the life state stalls, rxrpc_kernel_probe_life() should be called and
+ * then 2RTT waited.
  */
-u32 rxrpc_kernel_check_life(struct socket *sock, struct rxrpc_call *call)
+u32 rxrpc_kernel_check_life(const struct socket *sock,
+                           const struct rxrpc_call *call)
 {
        return call->acks_latest;
 }
 EXPORT_SYMBOL(rxrpc_kernel_check_life);
 
+/**
+ * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive
+ * @sock: The socket the call is on
+ * @call: The call to check
+ *
+ * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to
+ * find out whether a call is still alive by pinging it.  This should cause the
+ * life state to be bumped in about 2*RTT.
+ *
+ * The must be called in TASK_RUNNING state on pain of might_sleep() objecting.
+ */
+void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
+{
+       rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
+                         rxrpc_propose_ack_ping_for_check_life);
+       rxrpc_send_ack_packet(call, true, NULL);
+}
+EXPORT_SYMBOL(rxrpc_kernel_probe_life);
+
 /**
  * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call.
  * @sock: The socket the call is on
index 382196e57a26c137f03fea19cb2ff5d8d69c728b..bc628acf4f4ffe7172e1be6591811e056ccb4f1b 100644 (file)
@@ -611,6 +611,7 @@ struct rxrpc_call {
                                                 * not hard-ACK'd packet follows this.
                                                 */
        rxrpc_seq_t             tx_top;         /* Highest Tx slot allocated. */
+       u16                     tx_backoff;     /* Delay to insert due to Tx failure */
 
        /* TCP-style slow-start congestion control [RFC5681].  Since the SMSS
         * is fixed, we keep these numbers in terms of segments (ie. DATA
index 8e7434e92097e8f0a2676bcf87df090daf43ee2e..468efc3660c03805608d5e4f2f146f007e03f9b1 100644 (file)
@@ -123,6 +123,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
                else
                        ack_at = expiry;
 
+               ack_at += READ_ONCE(call->tx_backoff);
                ack_at += now;
                if (time_before(ack_at, call->ack_at)) {
                        WRITE_ONCE(call->ack_at, ack_at);
@@ -311,6 +312,7 @@ void rxrpc_process_call(struct work_struct *work)
                container_of(work, struct rxrpc_call, processor);
        rxrpc_serial_t *send_ack;
        unsigned long now, next, t;
+       unsigned int iterations = 0;
 
        rxrpc_see_call(call);
 
@@ -319,6 +321,11 @@ void rxrpc_process_call(struct work_struct *work)
               call->debug_id, rxrpc_call_states[call->state], call->events);
 
 recheck_state:
+       /* Limit the number of times we do this before returning to the manager */
+       iterations++;
+       if (iterations > 5)
+               goto requeue;
+
        if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
                rxrpc_send_abort_packet(call);
                goto recheck_state;
@@ -447,13 +454,16 @@ recheck_state:
        rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
 
        /* other events may have been raised since we started checking */
-       if (call->events && call->state < RXRPC_CALL_COMPLETE) {
-               __rxrpc_queue_call(call);
-               goto out;
-       }
+       if (call->events && call->state < RXRPC_CALL_COMPLETE)
+               goto requeue;
 
 out_put:
        rxrpc_put_call(call, rxrpc_call_put);
 out:
        _leave("");
+       return;
+
+requeue:
+       __rxrpc_queue_call(call);
+       goto out;
 }
index 1894188888391fca2ef98a5324de7bc99c4b381f..736aa92811004cfe5d157abd4827710783f8d57c 100644 (file)
@@ -34,6 +34,21 @@ struct rxrpc_abort_buffer {
 
 static const char rxrpc_keepalive_string[] = "";
 
+/*
+ * Increase Tx backoff on transmission failure and clear it on success.
+ */
+static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
+{
+       if (ret < 0) {
+               u16 tx_backoff = READ_ONCE(call->tx_backoff);
+
+               if (tx_backoff < HZ)
+                       WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
+       } else {
+               WRITE_ONCE(call->tx_backoff, 0);
+       }
+}
+
 /*
  * Arrange for a keepalive ping a certain time after we last transmitted.  This
  * lets the far side know we're still interested in this call and helps keep
@@ -210,6 +225,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
        else
                trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
                                      rxrpc_tx_point_call_ack);
+       rxrpc_tx_backoff(call, ret);
 
        if (call->state < RXRPC_CALL_COMPLETE) {
                if (ret < 0) {
@@ -218,7 +234,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
                        rxrpc_propose_ACK(call, pkt->ack.reason,
                                          ntohs(pkt->ack.maxSkew),
                                          ntohl(pkt->ack.serial),
-                                         true, true,
+                                         false, true,
                                          rxrpc_propose_ack_retry_tx);
                } else {
                        spin_lock_bh(&call->lock);
@@ -300,7 +316,7 @@ int rxrpc_send_abort_packet(struct rxrpc_call *call)
        else
                trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
                                      rxrpc_tx_point_call_abort);
-
+       rxrpc_tx_backoff(call, ret);
 
        rxrpc_put_connection(conn);
        return ret;
@@ -413,6 +429,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
        else
                trace_rxrpc_tx_packet(call->debug_id, &whdr,
                                      rxrpc_tx_point_call_data_nofrag);
+       rxrpc_tx_backoff(call, ret);
        if (ret == -EMSGSIZE)
                goto send_fragmentable;
 
@@ -445,9 +462,18 @@ done:
                        rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
                                                rxrpc_timer_set_for_normal);
                }
-       }
 
-       rxrpc_set_keepalive(call);
+               rxrpc_set_keepalive(call);
+       } else {
+               /* Cancel the call if the initial transmission fails,
+                * particularly if that's due to network routing issues that
+                * aren't going away anytime soon.  The layer above can arrange
+                * the retransmission.
+                */
+               if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
+                       rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
+                                                 RX_USER_ABORT, ret);
+       }
 
        _leave(" = %d [%u]", ret, call->peer->maxdata);
        return ret;
@@ -506,6 +532,7 @@ send_fragmentable:
        else
                trace_rxrpc_tx_packet(call->debug_id, &whdr,
                                      rxrpc_tx_point_call_data_frag);
+       rxrpc_tx_backoff(call, ret);
 
        up_write(&conn->params.local->defrag_sem);
        goto done;
index 1dae5f2b358fcf3dac2bbc0ef80f53b0dba16c91..c8cf4d10c4355f934c02d407ec725670e36433b8 100644 (file)
@@ -258,7 +258,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
        if (is_redirect) {
                skb2->tc_redirected = 1;
                skb2->tc_from_ingress = skb2->tc_at_ingress;
-
+               if (skb2->tc_from_ingress)
+                       skb2->tstamp = 0;
                /* let's the caller reinsert the packet, if possible */
                if (use_reinsert) {
                        res->ingress = want_ingress;
index da3dd0f68cc24d2c2b1828b43afc323a5786a3f8..2b372a06b432aec5b48209a3ac0e9424c9ae0b10 100644 (file)
@@ -201,7 +201,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                        goto out_release;
                }
        } else {
-               return err;
+               ret = err;
+               goto out_free;
        }
 
        p = to_pedit(*a);
index 052855d47354232f1c5d4762763367d638d96574..ec8ec55e0fe879a35ff17f6cc6542f5efa451886 100644 (file)
@@ -27,10 +27,7 @@ struct tcf_police_params {
        u32                     tcfp_ewma_rate;
        s64                     tcfp_burst;
        u32                     tcfp_mtu;
-       s64                     tcfp_toks;
-       s64                     tcfp_ptoks;
        s64                     tcfp_mtu_ptoks;
-       s64                     tcfp_t_c;
        struct psched_ratecfg   rate;
        bool                    rate_present;
        struct psched_ratecfg   peak;
@@ -41,6 +38,11 @@ struct tcf_police_params {
 struct tcf_police {
        struct tc_action        common;
        struct tcf_police_params __rcu *params;
+
+       spinlock_t              tcfp_lock ____cacheline_aligned_in_smp;
+       s64                     tcfp_toks;
+       s64                     tcfp_ptoks;
+       s64                     tcfp_t_c;
 };
 
 #define to_police(pc) ((struct tcf_police *)pc)
@@ -83,7 +85,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
                               int ovr, int bind, bool rtnl_held,
                               struct netlink_ext_ack *extack)
 {
-       int ret = 0, err;
+       int ret = 0, tcfp_result = TC_ACT_OK, err, size;
        struct nlattr *tb[TCA_POLICE_MAX + 1];
        struct tc_police *parm;
        struct tcf_police *police;
@@ -91,7 +93,6 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
        struct tc_action_net *tn = net_generic(net, police_net_id);
        struct tcf_police_params *new;
        bool exists = false;
-       int size;
 
        if (nla == NULL)
                return -EINVAL;
@@ -122,6 +123,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
                        return ret;
                }
                ret = ACT_P_CREATED;
+               spin_lock_init(&(to_police(*a)->tcfp_lock));
        } else if (!ovr) {
                tcf_idr_release(*a, bind);
                return -EEXIST;
@@ -157,6 +159,16 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
                goto failure;
        }
 
+       if (tb[TCA_POLICE_RESULT]) {
+               tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
+               if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) {
+                       NL_SET_ERR_MSG(extack,
+                                      "goto chain not allowed on fallback");
+                       err = -EINVAL;
+                       goto failure;
+               }
+       }
+
        new = kzalloc(sizeof(*new), GFP_KERNEL);
        if (unlikely(!new)) {
                err = -ENOMEM;
@@ -164,6 +176,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
        }
 
        /* No failure allowed after this point */
+       new->tcfp_result = tcfp_result;
        new->tcfp_mtu = parm->mtu;
        if (!new->tcfp_mtu) {
                new->tcfp_mtu = ~0;
@@ -186,28 +199,20 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
        }
 
        new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
-       new->tcfp_toks = new->tcfp_burst;
-       if (new->peak_present) {
+       if (new->peak_present)
                new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
                                                         new->tcfp_mtu);
-               new->tcfp_ptoks = new->tcfp_mtu_ptoks;
-       }
 
        if (tb[TCA_POLICE_AVRATE])
                new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
 
-       if (tb[TCA_POLICE_RESULT]) {
-               new->tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
-               if (TC_ACT_EXT_CMP(new->tcfp_result, TC_ACT_GOTO_CHAIN)) {
-                       NL_SET_ERR_MSG(extack,
-                                      "goto chain not allowed on fallback");
-                       err = -EINVAL;
-                       goto failure;
-               }
-       }
-
        spin_lock_bh(&police->tcf_lock);
-       new->tcfp_t_c = ktime_get_ns();
+       spin_lock_bh(&police->tcfp_lock);
+       police->tcfp_t_c = ktime_get_ns();
+       police->tcfp_toks = new->tcfp_burst;
+       if (new->peak_present)
+               police->tcfp_ptoks = new->tcfp_mtu_ptoks;
+       spin_unlock_bh(&police->tcfp_lock);
        police->tcf_action = parm->action;
        rcu_swap_protected(police->params,
                           new,
@@ -257,25 +262,28 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
                }
 
                now = ktime_get_ns();
-               toks = min_t(s64, now - p->tcfp_t_c, p->tcfp_burst);
+               spin_lock_bh(&police->tcfp_lock);
+               toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst);
                if (p->peak_present) {
-                       ptoks = toks + p->tcfp_ptoks;
+                       ptoks = toks + police->tcfp_ptoks;
                        if (ptoks > p->tcfp_mtu_ptoks)
                                ptoks = p->tcfp_mtu_ptoks;
                        ptoks -= (s64)psched_l2t_ns(&p->peak,
                                                    qdisc_pkt_len(skb));
                }
-               toks += p->tcfp_toks;
+               toks += police->tcfp_toks;
                if (toks > p->tcfp_burst)
                        toks = p->tcfp_burst;
                toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
                if ((toks|ptoks) >= 0) {
-                       p->tcfp_t_c = now;
-                       p->tcfp_toks = toks;
-                       p->tcfp_ptoks = ptoks;
+                       police->tcfp_t_c = now;
+                       police->tcfp_toks = toks;
+                       police->tcfp_ptoks = ptoks;
+                       spin_unlock_bh(&police->tcfp_lock);
                        ret = p->tcfp_result;
                        goto inc_drops;
                }
+               spin_unlock_bh(&police->tcfp_lock);
        }
 
 inc_overlimits:
index 9aada2d0ef06567a962f5bb2e929370278e5a2af..71312d7bd8f490c9b8200ccaac59ea0cd0031da6 100644 (file)
@@ -709,11 +709,23 @@ static int fl_set_enc_opt(struct nlattr **tb, struct fl_flow_key *key,
                          struct netlink_ext_ack *extack)
 {
        const struct nlattr *nla_enc_key, *nla_opt_key, *nla_opt_msk = NULL;
-       int option_len, key_depth, msk_depth = 0;
+       int err, option_len, key_depth, msk_depth = 0;
+
+       err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS],
+                                 TCA_FLOWER_KEY_ENC_OPTS_MAX,
+                                 enc_opts_policy, extack);
+       if (err)
+               return err;
 
        nla_enc_key = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS]);
 
        if (tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]) {
+               err = nla_validate_nested(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK],
+                                         TCA_FLOWER_KEY_ENC_OPTS_MAX,
+                                         enc_opts_policy, extack);
+               if (err)
+                       return err;
+
                nla_opt_msk = nla_data(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
                msk_depth = nla_len(tb[TCA_FLOWER_KEY_ENC_OPTS_MASK]);
        }
@@ -1226,18 +1238,16 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
        if (err)
                goto errout_idr;
 
-       if (!tc_skip_sw(fnew->flags)) {
-               if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
-                       err = -EEXIST;
-                       goto errout_mask;
-               }
-
-               err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
-                                            fnew->mask->filter_ht_params);
-               if (err)
-                       goto errout_mask;
+       if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
+               err = -EEXIST;
+               goto errout_mask;
        }
 
+       err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
+                                    fnew->mask->filter_ht_params);
+       if (err)
+               goto errout_mask;
+
        if (!tc_skip_hw(fnew->flags)) {
                err = fl_hw_replace_filter(tp, fnew, extack);
                if (err)
@@ -1291,9 +1301,8 @@ static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
        struct cls_fl_head *head = rtnl_dereference(tp->root);
        struct cls_fl_filter *f = arg;
 
-       if (!tc_skip_sw(f->flags))
-               rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
-                                      f->mask->filter_ht_params);
+       rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
+                              f->mask->filter_ht_params);
        __fl_delete(tp, f, extack);
        *last = list_empty(&head->masks);
        return 0;
index 4b1af706896c07e5a0fe6d542dfcd530acdcf8f5..25a7cf6d380fd1ef5610a43a06dea488121b8206 100644 (file)
@@ -469,22 +469,29 @@ begin:
                goto begin;
        }
        prefetch(&skb->end);
-       f->credit -= qdisc_pkt_len(skb);
+       plen = qdisc_pkt_len(skb);
+       f->credit -= plen;
 
-       if (ktime_to_ns(skb->tstamp) || !q->rate_enable)
+       if (!q->rate_enable)
                goto out;
 
        rate = q->flow_max_rate;
-       if (skb->sk)
-               rate = min(skb->sk->sk_pacing_rate, rate);
-
-       if (rate <= q->low_rate_threshold) {
-               f->credit = 0;
-               plen = qdisc_pkt_len(skb);
-       } else {
-               plen = max(qdisc_pkt_len(skb), q->quantum);
-               if (f->credit > 0)
-                       goto out;
+
+       /* If EDT time was provided for this skb, we need to
+        * update f->time_next_packet only if this qdisc enforces
+        * a flow max rate.
+        */
+       if (!skb->tstamp) {
+               if (skb->sk)
+                       rate = min(skb->sk->sk_pacing_rate, rate);
+
+               if (rate <= q->low_rate_threshold) {
+                       f->credit = 0;
+               } else {
+                       plen = max(plen, q->quantum);
+                       if (f->credit > 0)
+                               goto out;
+               }
        }
        if (rate != ~0UL) {
                u64 len = (u64)plen * NSEC_PER_SEC;
index 57b3ad9394ad7a9f42c48d7645a61a6a03b2efe0..22cd46a600576f286803536d45875cd9d537cdca 100644 (file)
@@ -431,6 +431,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
        int count = 1;
        int rc = NET_XMIT_SUCCESS;
 
+       /* Do not fool qdisc_drop_all() */
+       skb->prev = NULL;
+
        /* Random duplication */
        if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
                ++count;
@@ -648,15 +651,6 @@ deliver:
                         */
                        skb->dev = qdisc_dev(sch);
 
-#ifdef CONFIG_NET_CLS_ACT
-                       /*
-                        * If it's at ingress let's pretend the delay is
-                        * from the network (tstamp will be updated).
-                        */
-                       if (skb->tc_redirected && skb->tc_from_ingress)
-                               skb->tstamp = 0;
-#endif
-
                        if (q->slot.slot_next) {
                                q->slot.packets_left--;
                                q->slot.bytes_left -= qdisc_pkt_len(skb);
index a827a1f562bf323d03cd5e70ffce67da53401a61..914750b819b2661986a1dca9d0b049a68d020e67 100644 (file)
@@ -118,9 +118,6 @@ static struct sctp_association *sctp_association_init(
        asoc->flowlabel = sp->flowlabel;
        asoc->dscp = sp->dscp;
 
-       /* Initialize default path MTU. */
-       asoc->pathmtu = sp->pathmtu;
-
        /* Set association default SACK delay */
        asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
        asoc->sackfreq = sp->sackfreq;
@@ -252,6 +249,10 @@ static struct sctp_association *sctp_association_init(
                             0, gfp))
                goto fail_init;
 
+       /* Initialize default path MTU. */
+       asoc->pathmtu = sp->pathmtu;
+       sctp_assoc_update_frag_point(asoc);
+
        /* Assume that peer would support both address types unless we are
         * told otherwise.
         */
@@ -434,7 +435,7 @@ static void sctp_association_destroy(struct sctp_association *asoc)
 
        WARN_ON(atomic_read(&asoc->rmem_alloc));
 
-       kfree(asoc);
+       kfree_rcu(asoc, rcu);
        SCTP_DBG_OBJCNT_DEC(assoc);
 }
 
@@ -499,8 +500,9 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
 void sctp_assoc_rm_peer(struct sctp_association *asoc,
                        struct sctp_transport *peer)
 {
-       struct list_head        *pos;
-       struct sctp_transport   *transport;
+       struct sctp_transport *transport;
+       struct list_head *pos;
+       struct sctp_chunk *ch;
 
        pr_debug("%s: association:%p addr:%pISpc\n",
                 __func__, asoc, &peer->ipaddr.sa);
@@ -564,7 +566,6 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
         */
        if (!list_empty(&peer->transmitted)) {
                struct sctp_transport *active = asoc->peer.active_path;
-               struct sctp_chunk *ch;
 
                /* Reset the transport of each chunk on this list */
                list_for_each_entry(ch, &peer->transmitted,
@@ -586,6 +587,10 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
                                sctp_transport_hold(active);
        }
 
+       list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list)
+               if (ch->transport == peer)
+                       ch->transport = NULL;
+
        asoc->peer.transport_count--;
 
        sctp_transport_free(peer);
index ce8087846f05947d2990f6b6deebcadc7c255ac1..d2048de86e7c267d11b6fadd16535ddd7d8fc1b4 100644 (file)
@@ -191,6 +191,12 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
         * the packet
         */
        max_data = asoc->frag_point;
+       if (unlikely(!max_data)) {
+               max_data = sctp_min_frag_point(sctp_sk(asoc->base.sk),
+                                              sctp_datachk_len(&asoc->stream));
+               pr_warn_ratelimited("%s: asoc:%p frag_point is zero, forcing max_data to default minimum (%Zu)",
+                                   __func__, asoc, max_data);
+       }
 
        /* If the the peer requested that we authenticate DATA chunks
         * we need to account for bundling of the AUTH chunks along with
index 67939ad99c01335267c3cb21ade4b7dc3259f86f..025f48e14a91f19facf557acb27ee45c0dee46c8 100644 (file)
@@ -118,6 +118,9 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
                sctp_transport_route(tp, NULL, sp);
                if (asoc->param_flags & SPP_PMTUD_ENABLE)
                        sctp_assoc_sync_pmtu(asoc);
+       } else if (!sctp_transport_pmtu_check(tp)) {
+               if (asoc->param_flags & SPP_PMTUD_ENABLE)
+                       sctp_assoc_sync_pmtu(asoc);
        }
 
        if (asoc->pmtu_pending) {
@@ -396,25 +399,6 @@ finish:
        return retval;
 }
 
-static void sctp_packet_release_owner(struct sk_buff *skb)
-{
-       sk_free(skb->sk);
-}
-
-static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
-{
-       skb_orphan(skb);
-       skb->sk = sk;
-       skb->destructor = sctp_packet_release_owner;
-
-       /*
-        * The data chunks have already been accounted for in sctp_sendmsg(),
-        * therefore only reserve a single byte to keep socket around until
-        * the packet has been transmitted.
-        */
-       refcount_inc(&sk->sk_wmem_alloc);
-}
-
 static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
 {
        if (SCTP_OUTPUT_CB(head)->last == head)
@@ -426,6 +410,7 @@ static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
        head->truesize += skb->truesize;
        head->data_len += skb->len;
        head->len += skb->len;
+       refcount_add(skb->truesize, &head->sk->sk_wmem_alloc);
 
        __skb_header_release(skb);
 }
@@ -601,7 +586,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
        if (!head)
                goto out;
        skb_reserve(head, packet->overhead + MAX_HEADER);
-       sctp_packet_set_owner_w(head, sk);
+       skb_set_owner_w(head, sk);
 
        /* set sctp header */
        sh = skb_push(head, sizeof(struct sctphdr));
index 9cb854b05342e57a6743ee1fd7e91cab7c09bbd2..c37e1c2dec9d451f5bfc8ffd8a0f8b9d00358316 100644 (file)
@@ -212,7 +212,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
        INIT_LIST_HEAD(&q->retransmit);
        INIT_LIST_HEAD(&q->sacked);
        INIT_LIST_HEAD(&q->abandoned);
-       sctp_sched_set_sched(asoc, SCTP_SS_FCFS);
+       sctp_sched_set_sched(asoc, SCTP_SS_DEFAULT);
 }
 
 /* Free the outqueue structure and any related pending chunks.
index 4a4fd19712552b9ac3429897cf9f78e65db6214d..f4ac6c592e1396e136311defe312be22ece411d8 100644 (file)
@@ -2462,6 +2462,9 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
                             asoc->c.sinit_max_instreams, gfp))
                goto clean_up;
 
+       /* Update frag_point when stream_interleave may get changed. */
+       sctp_assoc_update_frag_point(asoc);
+
        if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
                goto clean_up;
 
index fc0386e8ff23933a0e5eae47661d39d8799b6a66..b8cebd5a87e5c3571cbc184324ed483d3e6eb9bd 100644 (file)
@@ -3324,8 +3324,7 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
                __u16 datasize = asoc ? sctp_datachk_len(&asoc->stream) :
                                 sizeof(struct sctp_data_chunk);
 
-               min_len = sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT,
-                                          datasize);
+               min_len = sctp_min_frag_point(sp, datasize);
                max_len = SCTP_MAX_CHUNK_LEN - datasize;
 
                if (val < min_len || val > max_len)
@@ -3940,32 +3939,16 @@ static int sctp_setsockopt_pr_supported(struct sock *sk,
                                        unsigned int optlen)
 {
        struct sctp_assoc_value params;
-       struct sctp_association *asoc;
-       int retval = -EINVAL;
 
        if (optlen != sizeof(params))
-               goto out;
-
-       if (copy_from_user(&params, optval, optlen)) {
-               retval = -EFAULT;
-               goto out;
-       }
-
-       asoc = sctp_id2assoc(sk, params.assoc_id);
-       if (asoc) {
-               asoc->prsctp_enable = !!params.assoc_value;
-       } else if (!params.assoc_id) {
-               struct sctp_sock *sp = sctp_sk(sk);
+               return -EINVAL;
 
-               sp->ep->prsctp_enable = !!params.assoc_value;
-       } else {
-               goto out;
-       }
+       if (copy_from_user(&params, optval, optlen))
+               return -EFAULT;
 
-       retval = 0;
+       sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value;
 
-out:
-       return retval;
+       return 0;
 }
 
 static int sctp_setsockopt_default_prinfo(struct sock *sk,
@@ -7083,14 +7066,15 @@ static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len,
        }
 
        policy = params.sprstat_policy;
-       if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)))
+       if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)) ||
+           ((policy & SCTP_PR_SCTP_ALL) && (policy & SCTP_PR_SCTP_MASK)))
                goto out;
 
        asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
        if (!asoc)
                goto out;
 
-       if (policy & SCTP_PR_SCTP_ALL) {
+       if (policy == SCTP_PR_SCTP_ALL) {
                params.sprstat_abandoned_unsent = 0;
                params.sprstat_abandoned_sent = 0;
                for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) {
@@ -7142,7 +7126,8 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len,
        }
 
        policy = params.sprstat_policy;
-       if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)))
+       if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL)) ||
+           ((policy & SCTP_PR_SCTP_ALL) && (policy & SCTP_PR_SCTP_MASK)))
                goto out;
 
        asoc = sctp_id2assoc(sk, params.sprstat_assoc_id);
index ffb940d3b57c1ca90dec8861b33da6d40f01824f..3892e7630f3adf6d42b57244b761c9e1aa558e95 100644 (file)
@@ -535,7 +535,6 @@ int sctp_send_add_streams(struct sctp_association *asoc,
                goto out;
        }
 
-       stream->incnt = incnt;
        stream->outcnt = outcnt;
 
        asoc->strreset_outstanding = !!out + !!in;
index 80e2119f1c7010ead3c493c60c511c125d6c7149..5fbaf1901571cd2e41a65773ac2f3dc7fa80525d 100644 (file)
@@ -127,6 +127,8 @@ static int smc_release(struct socket *sock)
        smc = smc_sk(sk);
 
        /* cleanup for a dangling non-blocking connect */
+       if (smc->connect_info && sk->sk_state == SMC_INIT)
+               tcp_abort(smc->clcsock->sk, ECONNABORTED);
        flush_work(&smc->connect_work);
        kfree(smc->connect_info);
        smc->connect_info = NULL;
@@ -547,7 +549,8 @@ static int smc_connect_rdma(struct smc_sock *smc,
 
        mutex_lock(&smc_create_lgr_pending);
        local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev,
-                                       ibport, &aclc->lcl, NULL, 0);
+                                       ibport, ntoh24(aclc->qpn), &aclc->lcl,
+                                       NULL, 0);
        if (local_contact < 0) {
                if (local_contact == -ENOMEM)
                        reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
@@ -618,7 +621,7 @@ static int smc_connect_ism(struct smc_sock *smc,
        int rc = 0;
 
        mutex_lock(&smc_create_lgr_pending);
-       local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0,
+       local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, 0,
                                        NULL, ismdev, aclc->gid);
        if (local_contact < 0)
                return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0);
@@ -1083,7 +1086,7 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
                                int *local_contact)
 {
        /* allocate connection / link group */
-       *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport,
+       *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, 0,
                                         &pclc->lcl, NULL, 0);
        if (*local_contact < 0) {
                if (*local_contact == -ENOMEM)
@@ -1107,7 +1110,7 @@ static int smc_listen_ism_init(struct smc_sock *new_smc,
        struct smc_clc_msg_smcd *pclc_smcd;
 
        pclc_smcd = smc_get_clc_msg_smcd(pclc);
-       *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, NULL,
+       *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, 0, NULL,
                                         ismdev, pclc_smcd->gid);
        if (*local_contact < 0) {
                if (*local_contact == -ENOMEM)
index ed5dcf03fe0b6ded9d363c1e5891be76f86fd49c..db83332ac1c8ce285f29e2bcc69e22f203b8afc3 100644 (file)
@@ -81,7 +81,7 @@ static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
                sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE,
                "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)");
        BUILD_BUG_ON_MSG(
-               sizeof(struct smc_cdc_msg) != SMC_WR_TX_SIZE,
+               offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE,
                "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
        BUILD_BUG_ON_MSG(
                sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
@@ -177,23 +177,24 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
 int smcd_cdc_msg_send(struct smc_connection *conn)
 {
        struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+       union smc_host_cursor curs;
        struct smcd_cdc_msg cdc;
        int rc, diff;
 
        memset(&cdc, 0, sizeof(cdc));
        cdc.common.type = SMC_CDC_MSG_TYPE;
-       cdc.prod_wrap = conn->local_tx_ctrl.prod.wrap;
-       cdc.prod_count = conn->local_tx_ctrl.prod.count;
-
-       cdc.cons_wrap = conn->local_tx_ctrl.cons.wrap;
-       cdc.cons_count = conn->local_tx_ctrl.cons.count;
-       cdc.prod_flags = conn->local_tx_ctrl.prod_flags;
-       cdc.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
+       curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs);
+       cdc.prod.wrap = curs.wrap;
+       cdc.prod.count = curs.count;
+       curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs);
+       cdc.cons.wrap = curs.wrap;
+       cdc.cons.count = curs.count;
+       cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags;
+       cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
        rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
        if (rc)
                return rc;
-       smc_curs_copy(&conn->rx_curs_confirmed, &conn->local_tx_ctrl.cons,
-                     conn);
+       smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
        /* Calculate transmitted data and increment free send buffer space */
        diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
                             &conn->tx_curs_sent);
@@ -331,13 +332,16 @@ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc)
 static void smcd_cdc_rx_tsklet(unsigned long data)
 {
        struct smc_connection *conn = (struct smc_connection *)data;
+       struct smcd_cdc_msg *data_cdc;
        struct smcd_cdc_msg cdc;
        struct smc_sock *smc;
 
        if (!conn)
                return;
 
-       memcpy(&cdc, conn->rmb_desc->cpu_addr, sizeof(cdc));
+       data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr;
+       smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn);
+       smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn);
        smc = container_of(conn, struct smc_sock, conn);
        smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc);
 }
index 934df4473a7cebc4f2cb4fa654d7189201745ac2..b5bfe38c7f9b6a87258adc0aecce58c31c2a164a 100644 (file)
@@ -48,21 +48,31 @@ struct smc_cdc_msg {
        struct smc_cdc_producer_flags   prod_flags;
        struct smc_cdc_conn_state_flags conn_state_flags;
        u8                              reserved[18];
-} __packed;                                    /* format defined in RFC7609 */
+};
+
+/* SMC-D cursor format */
+union smcd_cdc_cursor {
+       struct {
+               u16     wrap;
+               u32     count;
+               struct smc_cdc_producer_flags   prod_flags;
+               struct smc_cdc_conn_state_flags conn_state_flags;
+       } __packed;
+#ifdef KERNEL_HAS_ATOMIC64
+       atomic64_t              acurs;          /* for atomic processing */
+#else
+       u64                     acurs;          /* for atomic processing */
+#endif
+} __aligned(8);
 
 /* CDC message for SMC-D */
 struct smcd_cdc_msg {
        struct smc_wr_rx_hdr common;    /* Type = 0xFE */
        u8 res1[7];
-       u16 prod_wrap;
-       u32 prod_count;
-       u8 res2[2];
-       u16 cons_wrap;
-       u32 cons_count;
-       struct smc_cdc_producer_flags   prod_flags;
-       struct smc_cdc_conn_state_flags conn_state_flags;
+       union smcd_cdc_cursor   prod;
+       union smcd_cdc_cursor   cons;
        u8 res3[8];
-} __packed;
+} __aligned(8);
 
 static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn)
 {
@@ -135,6 +145,21 @@ static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt,
 #endif
 }
 
+static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt,
+                                 union smcd_cdc_cursor *src,
+                                 struct smc_connection *conn)
+{
+#ifndef KERNEL_HAS_ATOMIC64
+       unsigned long flags;
+
+       spin_lock_irqsave(&conn->acurs_lock, flags);
+       tgt->acurs = src->acurs;
+       spin_unlock_irqrestore(&conn->acurs_lock, flags);
+#else
+       atomic64_set(&tgt->acurs, atomic64_read(&src->acurs));
+#endif
+}
+
 /* calculate cursor difference between old and new, where old <= new */
 static inline int smc_curs_diff(unsigned int size,
                                union smc_host_cursor *old,
@@ -222,12 +247,17 @@ static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local,
 static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local,
                                        struct smcd_cdc_msg *peer)
 {
-       local->prod.wrap = peer->prod_wrap;
-       local->prod.count = peer->prod_count;
-       local->cons.wrap = peer->cons_wrap;
-       local->cons.count = peer->cons_count;
-       local->prod_flags = peer->prod_flags;
-       local->conn_state_flags = peer->conn_state_flags;
+       union smc_host_cursor temp;
+
+       temp.wrap = peer->prod.wrap;
+       temp.count = peer->prod.count;
+       atomic64_set(&local->prod.acurs, atomic64_read(&temp.acurs));
+
+       temp.wrap = peer->cons.wrap;
+       temp.count = peer->cons.count;
+       atomic64_set(&local->cons.acurs, atomic64_read(&temp.acurs));
+       local->prod_flags = peer->cons.prod_flags;
+       local->conn_state_flags = peer->cons.conn_state_flags;
 }
 
 static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local,
index 52241d679cc91cd3b6ec59a4388d33b68c76f1f1..89c3a8c7859a3ae11a63d8db5e8b42e0ca26a897 100644 (file)
@@ -286,7 +286,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
         */
        krflags = MSG_PEEK | MSG_WAITALL;
        smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
-       iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1,
+       iov_iter_kvec(&msg.msg_iter, READ, &vec, 1,
                        sizeof(struct smc_clc_msg_hdr));
        len = sock_recvmsg(smc->clcsock, &msg, krflags);
        if (signal_pending(current)) {
@@ -325,7 +325,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
 
        /* receive the complete CLC message */
        memset(&msg, 0, sizeof(struct msghdr));
-       iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen);
+       iov_iter_kvec(&msg.msg_iter, READ, &vec, 1, datlen);
        krflags = MSG_WAITALL;
        len = sock_recvmsg(smc->clcsock, &msg, krflags);
        if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
index 18daebcef1813eadd35f287eaa9c8e922ad98005..1c9fa7f0261a3c723c47bde5a3430de72a3278bf 100644 (file)
@@ -184,6 +184,8 @@ free:
 
                if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
                        smc_llc_link_inactive(lnk);
+               if (lgr->is_smcd)
+                       smc_ism_signal_shutdown(lgr);
                smc_lgr_free(lgr);
        }
 }
@@ -485,7 +487,7 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport)
 }
 
 /* Called when SMC-D device is terminated or peer is lost */
-void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
+void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan)
 {
        struct smc_link_group *lgr, *l;
        LIST_HEAD(lgr_free_list);
@@ -495,7 +497,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
        list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) {
                if (lgr->is_smcd && lgr->smcd == dev &&
                    (!peer_gid || lgr->peer_gid == peer_gid) &&
-                   !list_empty(&lgr->list)) {
+                   (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) {
                        __smc_lgr_terminate(lgr);
                        list_move(&lgr->list, &lgr_free_list);
                }
@@ -506,6 +508,8 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid)
        list_for_each_entry_safe(lgr, l, &lgr_free_list, list) {
                list_del_init(&lgr->list);
                cancel_delayed_work_sync(&lgr->free_work);
+               if (!peer_gid && vlan == VLAN_VID_MASK) /* dev terminated? */
+                       smc_ism_signal_shutdown(lgr);
                smc_lgr_free(lgr);
        }
 }
@@ -559,7 +563,7 @@ out:
 
 static bool smcr_lgr_match(struct smc_link_group *lgr,
                           struct smc_clc_msg_local *lcl,
-                          enum smc_lgr_role role)
+                          enum smc_lgr_role role, u32 clcqpn)
 {
        return !memcmp(lgr->peer_systemid, lcl->id_for_peer,
                       SMC_SYSTEMID_LEN) &&
@@ -567,7 +571,9 @@ static bool smcr_lgr_match(struct smc_link_group *lgr,
                        SMC_GID_SIZE) &&
                !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac,
                        sizeof(lcl->mac)) &&
-               lgr->role == role;
+               lgr->role == role &&
+               (lgr->role == SMC_SERV ||
+                lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn);
 }
 
 static bool smcd_lgr_match(struct smc_link_group *lgr,
@@ -578,7 +584,7 @@ static bool smcd_lgr_match(struct smc_link_group *lgr,
 
 /* create a new SMC connection (and a new link group if necessary) */
 int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
-                   struct smc_ib_device *smcibdev, u8 ibport,
+                   struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn,
                    struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
                    u64 peer_gid)
 {
@@ -603,7 +609,7 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
        list_for_each_entry(lgr, &smc_lgr_list.list, list) {
                write_lock_bh(&lgr->conns_lock);
                if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) :
-                    smcr_lgr_match(lgr, lcl, role)) &&
+                    smcr_lgr_match(lgr, lcl, role, clcqpn)) &&
                    !lgr->sync_err &&
                    lgr->vlan_id == vlan_id &&
                    (role == SMC_CLNT ||
@@ -1024,6 +1030,8 @@ void smc_core_exit(void)
                        smc_llc_link_inactive(lnk);
                }
                cancel_delayed_work_sync(&lgr->free_work);
+               if (lgr->is_smcd)
+                       smc_ism_signal_shutdown(lgr);
                smc_lgr_free(lgr); /* free link group */
        }
 }
index c156674733c9dcb37af68e47a660785d140231b3..cf98f4d6093e940ad8c5b184439a2e9b3545f203 100644 (file)
@@ -247,7 +247,8 @@ void smc_lgr_free(struct smc_link_group *lgr);
 void smc_lgr_forget(struct smc_link_group *lgr);
 void smc_lgr_terminate(struct smc_link_group *lgr);
 void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
-void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid);
+void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
+                       unsigned short vlan);
 int smc_buf_create(struct smc_sock *smc, bool is_smcd);
 int smc_uncompress_bufsize(u8 compressed);
 int smc_rmb_rtoken_handling(struct smc_connection *conn,
@@ -262,7 +263,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id);
 
 void smc_conn_free(struct smc_connection *conn);
 int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact,
-                   struct smc_ib_device *smcibdev, u8 ibport,
+                   struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn,
                    struct smc_clc_msg_local *lcl, struct smcd_dev *smcd,
                    u64 peer_gid);
 void smcd_conn_free(struct smc_connection *conn);
index e36f21ce725208c62164847602e99df91d2c40ba..2fff79db1a59ce3d2908722941dd9355810c65a0 100644 (file)
@@ -187,22 +187,28 @@ struct smc_ism_event_work {
 #define ISM_EVENT_REQUEST              0x0001
 #define ISM_EVENT_RESPONSE             0x0002
 #define ISM_EVENT_REQUEST_IR           0x00000001
+#define ISM_EVENT_CODE_SHUTDOWN                0x80
 #define ISM_EVENT_CODE_TESTLINK                0x83
 
+union smcd_sw_event_info {
+       u64     info;
+       struct {
+               u8              uid[SMC_LGR_ID_SIZE];
+               unsigned short  vlan_id;
+               u16             code;
+       };
+};
+
 static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
 {
-       union {
-               u64     info;
-               struct {
-                       u32             uid;
-                       unsigned short  vlanid;
-                       u16             code;
-               };
-       } ev_info;
+       union smcd_sw_event_info ev_info;
 
+       ev_info.info = wrk->event.info;
        switch (wrk->event.code) {
+       case ISM_EVENT_CODE_SHUTDOWN:   /* Peer shut down DMBs */
+               smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id);
+               break;
        case ISM_EVENT_CODE_TESTLINK:   /* Activity timer */
-               ev_info.info = wrk->event.info;
                if (ev_info.code == ISM_EVENT_REQUEST) {
                        ev_info.code = ISM_EVENT_RESPONSE;
                        wrk->smcd->ops->signal_event(wrk->smcd,
@@ -215,6 +221,21 @@ static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
        }
 }
 
+int smc_ism_signal_shutdown(struct smc_link_group *lgr)
+{
+       int rc;
+       union smcd_sw_event_info ev_info;
+
+       memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
+       ev_info.vlan_id = lgr->vlan_id;
+       ev_info.code = ISM_EVENT_REQUEST;
+       rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid,
+                                         ISM_EVENT_REQUEST_IR,
+                                         ISM_EVENT_CODE_SHUTDOWN,
+                                         ev_info.info);
+       return rc;
+}
+
 /* worker for SMC-D events */
 static void smc_ism_event_work(struct work_struct *work)
 {
@@ -223,7 +244,7 @@ static void smc_ism_event_work(struct work_struct *work)
 
        switch (wrk->event.type) {
        case ISM_EVENT_GID:     /* GID event, token is peer GID */
-               smc_smcd_terminate(wrk->smcd, wrk->event.tok);
+               smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK);
                break;
        case ISM_EVENT_DMB:
                break;
@@ -289,7 +310,7 @@ void smcd_unregister_dev(struct smcd_dev *smcd)
        spin_unlock(&smcd_dev_list.lock);
        flush_workqueue(smcd->event_wq);
        destroy_workqueue(smcd->event_wq);
-       smc_smcd_terminate(smcd, 0);
+       smc_smcd_terminate(smcd, 0, VLAN_VID_MASK);
 
        device_del(&smcd->dev);
 }
index aee45b860b799856c80862bc36160618af17610b..4da946cbfa29c2d7c233f16c983eaf4b738bd6bb 100644 (file)
@@ -45,4 +45,5 @@ int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size,
 int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc);
 int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos,
                  void *data, size_t len);
+int smc_ism_signal_shutdown(struct smc_link_group *lgr);
 #endif
index 3c458d27985574efc929c21565fd0aaae58f2c20..c2694750a6a8abe2f1353dae02e95420e5a01b48 100644 (file)
@@ -215,12 +215,14 @@ int smc_wr_tx_put_slot(struct smc_link *link,
 
        pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv);
        if (pend->idx < link->wr_tx_cnt) {
+               u32 idx = pend->idx;
+
                /* clear the full struct smc_wr_tx_pend including .priv */
                memset(&link->wr_tx_pends[pend->idx], 0,
                       sizeof(link->wr_tx_pends[pend->idx]));
                memset(&link->wr_tx_bufs[pend->idx], 0,
                       sizeof(link->wr_tx_bufs[pend->idx]));
-               test_and_clear_bit(pend->idx, link->wr_tx_mask);
+               test_and_clear_bit(idx, link->wr_tx_mask);
                return 1;
        }
 
index 99c96851469f4cfa1aa7e8bd9606e5e487b6bee3..334fcc617ef2737acd0a84f7921e28a88771756f 100644 (file)
@@ -635,7 +635,7 @@ EXPORT_SYMBOL(sock_sendmsg);
 int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
                   struct kvec *vec, size_t num, size_t size)
 {
-       iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size);
+       iov_iter_kvec(&msg->msg_iter, WRITE, vec, num, size);
        return sock_sendmsg(sock, msg);
 }
 EXPORT_SYMBOL(kernel_sendmsg);
@@ -648,7 +648,7 @@ int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
        if (!sock->ops->sendmsg_locked)
                return sock_no_sendmsg_locked(sk, msg, size);
 
-       iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, vec, num, size);
+       iov_iter_kvec(&msg->msg_iter, WRITE, vec, num, size);
 
        return sock->ops->sendmsg_locked(sk, msg, msg_data_left(msg));
 }
@@ -823,7 +823,7 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
        mm_segment_t oldfs = get_fs();
        int result;
 
-       iov_iter_kvec(&msg->msg_iter, READ | ITER_KVEC, vec, num, size);
+       iov_iter_kvec(&msg->msg_iter, READ, vec, num, size);
        set_fs(KERNEL_DS);
        result = sock_recvmsg(sock, msg, flags);
        set_fs(oldfs);
@@ -853,7 +853,7 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
        struct socket *sock = file->private_data;
 
        if (unlikely(!sock->ops->splice_read))
-               return -EINVAL;
+               return generic_file_splice_read(file, ppos, pipe, len, flags);
 
        return sock->ops->splice_read(sock, ppos, pipe, len, flags);
 }
index d8831b988b1e7a3273c73ee2457615b26c24b529..ab4a3be1542a0a6fcfb35153da1a62b3f852c2b6 100644 (file)
@@ -281,13 +281,7 @@ static bool generic_key_to_expire(struct rpc_cred *cred)
 {
        struct auth_cred *acred = &container_of(cred, struct generic_cred,
                                                gc_base)->acred;
-       bool ret;
-
-       get_rpccred(cred);
-       ret = test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
-       put_rpccred(cred);
-
-       return ret;
+       return test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
 }
 
 static const struct rpc_credops generic_credops = {
index 30f970cdc7f66375d45e5363dfa07cab233f4978..ba765473d1f0662ef79f6be24b499bd4c8a29509 100644 (file)
@@ -1239,36 +1239,59 @@ gss_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
        return &gss_auth->rpc_auth;
 }
 
+static struct gss_cred *
+gss_dup_cred(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
+{
+       struct gss_cred *new;
+
+       /* Make a copy of the cred so that we can reference count it */
+       new = kzalloc(sizeof(*gss_cred), GFP_NOIO);
+       if (new) {
+               struct auth_cred acred = {
+                       .uid = gss_cred->gc_base.cr_uid,
+               };
+               struct gss_cl_ctx *ctx =
+                       rcu_dereference_protected(gss_cred->gc_ctx, 1);
+
+               rpcauth_init_cred(&new->gc_base, &acred,
+                               &gss_auth->rpc_auth,
+                               &gss_nullops);
+               new->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
+               new->gc_service = gss_cred->gc_service;
+               new->gc_principal = gss_cred->gc_principal;
+               kref_get(&gss_auth->kref);
+               rcu_assign_pointer(new->gc_ctx, ctx);
+               gss_get_ctx(ctx);
+       }
+       return new;
+}
+
 /*
- * gss_destroying_context will cause the RPCSEC_GSS to send a NULL RPC call
+ * gss_send_destroy_context will cause the RPCSEC_GSS to send a NULL RPC call
  * to the server with the GSS control procedure field set to
  * RPC_GSS_PROC_DESTROY. This should normally cause the server to release
  * all RPCSEC_GSS state associated with that context.
  */
-static int
-gss_destroying_context(struct rpc_cred *cred)
+static void
+gss_send_destroy_context(struct rpc_cred *cred)
 {
        struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
        struct gss_auth *gss_auth = container_of(cred->cr_auth, struct gss_auth, rpc_auth);
        struct gss_cl_ctx *ctx = rcu_dereference_protected(gss_cred->gc_ctx, 1);
+       struct gss_cred *new;
        struct rpc_task *task;
 
-       if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
-               return 0;
+       new = gss_dup_cred(gss_auth, gss_cred);
+       if (new) {
+               ctx->gc_proc = RPC_GSS_PROC_DESTROY;
 
-       ctx->gc_proc = RPC_GSS_PROC_DESTROY;
-       cred->cr_ops = &gss_nullops;
+               task = rpc_call_null(gss_auth->client, &new->gc_base,
+                               RPC_TASK_ASYNC|RPC_TASK_SOFT);
+               if (!IS_ERR(task))
+                       rpc_put_task(task);
 
-       /* Take a reference to ensure the cred will be destroyed either
-        * by the RPC call or by the put_rpccred() below */
-       get_rpccred(cred);
-
-       task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT);
-       if (!IS_ERR(task))
-               rpc_put_task(task);
-
-       put_rpccred(cred);
-       return 1;
+               put_rpccred(&new->gc_base);
+       }
 }
 
 /* gss_destroy_cred (and gss_free_ctx) are used to clean up after failure
@@ -1330,8 +1353,8 @@ static void
 gss_destroy_cred(struct rpc_cred *cred)
 {
 
-       if (gss_destroying_context(cred))
-               return;
+       if (test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
+               gss_send_destroy_context(cred);
        gss_destroy_nullcred(cred);
 }
 
@@ -1768,6 +1791,7 @@ priv_release_snd_buf(struct rpc_rqst *rqstp)
        for (i=0; i < rqstp->rq_enc_pages_num; i++)
                __free_page(rqstp->rq_enc_pages[i]);
        kfree(rqstp->rq_enc_pages);
+       rqstp->rq_release_snd_buf = NULL;
 }
 
 static int
@@ -1776,6 +1800,9 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
        struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
        int first, last, i;
 
+       if (rqstp->rq_release_snd_buf)
+               rqstp->rq_release_snd_buf(rqstp);
+
        if (snd_buf->page_len == 0) {
                rqstp->rq_enc_pages_num = 0;
                return 0;
index 7f0424dfa8f6df7c5d5d2ce7c2c20b12ab366e30..eab71fc7af3e00ff72a339c1f3d7f1e910358e89 100644 (file)
@@ -274,6 +274,7 @@ out_err:
 static int
 gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
 {
+       u32 seq_send;
        int tmp;
 
        p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
@@ -315,9 +316,10 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
        p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
        if (IS_ERR(p))
                goto out_err;
-       p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send));
+       p = simple_get_bytes(p, end, &seq_send, sizeof(seq_send));
        if (IS_ERR(p))
                goto out_err;
+       atomic_set(&ctx->seq_send, seq_send);
        p = simple_get_netobj(p, end, &ctx->mech_used);
        if (IS_ERR(p))
                goto out_err;
@@ -607,6 +609,7 @@ static int
 gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
                gfp_t gfp_mask)
 {
+       u64 seq_send64;
        int keylen;
 
        p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags));
@@ -617,14 +620,15 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
        p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
        if (IS_ERR(p))
                goto out_err;
-       p = simple_get_bytes(p, end, &ctx->seq_send64, sizeof(ctx->seq_send64));
+       p = simple_get_bytes(p, end, &seq_send64, sizeof(seq_send64));
        if (IS_ERR(p))
                goto out_err;
+       atomic64_set(&ctx->seq_send64, seq_send64);
        /* set seq_send for use by "older" enctypes */
-       ctx->seq_send = ctx->seq_send64;
-       if (ctx->seq_send64 != ctx->seq_send) {
-               dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
-                       (unsigned long)ctx->seq_send64, ctx->seq_send);
+       atomic_set(&ctx->seq_send, seq_send64);
+       if (seq_send64 != atomic_read(&ctx->seq_send)) {
+               dprintk("%s: seq_send64 %llx, seq_send %x overflow?\n", __func__,
+                       seq_send64, atomic_read(&ctx->seq_send));
                p = ERR_PTR(-EINVAL);
                goto out_err;
        }
index b4adeb06660b15f6ffad21e5e0d79a88b61b1fd5..48fe4a591b543bb5f29969e1713ac0c5d51f7a5a 100644 (file)
@@ -123,30 +123,6 @@ setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token)
        return krb5_hdr;
 }
 
-u32
-gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx)
-{
-       u32 old, seq_send = READ_ONCE(ctx->seq_send);
-
-       do {
-               old = seq_send;
-               seq_send = cmpxchg(&ctx->seq_send, old, old + 1);
-       } while (old != seq_send);
-       return seq_send;
-}
-
-u64
-gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx)
-{
-       u64 old, seq_send = READ_ONCE(ctx->seq_send);
-
-       do {
-               old = seq_send;
-               seq_send = cmpxchg64(&ctx->seq_send64, old, old + 1);
-       } while (old != seq_send);
-       return seq_send;
-}
-
 static u32
 gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
                struct xdr_netobj *token)
@@ -177,7 +153,7 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
 
        memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
 
-       seq_send = gss_seq_send_fetch_and_inc(ctx);
+       seq_send = atomic_fetch_inc(&ctx->seq_send);
 
        if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff,
                              seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))
@@ -205,7 +181,7 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
 
        /* Set up the sequence number. Now 64-bits in clear
         * text and w/o direction indicator */
-       seq_send_be64 = cpu_to_be64(gss_seq_send64_fetch_and_inc(ctx));
+       seq_send_be64 = cpu_to_be64(atomic64_fetch_inc(&ctx->seq_send64));
        memcpy(krb5_hdr + 8, (char *) &seq_send_be64, 8);
 
        if (ctx->initiate) {
index 962fa84e6db114f95790f8d6bba485fe226ed43e..5cdde6cb703a423ff48682f86e5275e331bbe242 100644 (file)
@@ -228,7 +228,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
 
        memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
 
-       seq_send = gss_seq_send_fetch_and_inc(kctx);
+       seq_send = atomic_fetch_inc(&kctx->seq_send);
 
        /* XXX would probably be more efficient to compute checksum
         * and encrypt at the same time: */
@@ -475,7 +475,7 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
        *be16ptr++ = 0;
 
        be64ptr = (__be64 *)be16ptr;
-       *be64ptr = cpu_to_be64(gss_seq_send64_fetch_and_inc(kctx));
+       *be64ptr = cpu_to_be64(atomic64_fetch_inc(&kctx->seq_send64));
 
        err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages);
        if (err)
index ae3b8145da35a236cb24a7aff544b3f99d67547d..c6782aa475257bb510402a2172c8d4f55706a79a 100644 (file)
@@ -1915,6 +1915,13 @@ call_connect_status(struct rpc_task *task)
        struct rpc_clnt *clnt = task->tk_client;
        int status = task->tk_status;
 
+       /* Check if the task was already transmitted */
+       if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
+               xprt_end_transmit(task);
+               task->tk_action = call_transmit_status;
+               return;
+       }
+
        dprint_status(task);
 
        trace_rpc_connect_status(task);
@@ -2302,6 +2309,7 @@ out_retry:
        task->tk_status = 0;
        /* Note: rpc_verify_header() may have freed the RPC slot */
        if (task->tk_rqstp == req) {
+               xdr_free_bvec(&req->rq_rcv_buf);
                req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0;
                if (task->tk_client->cl_discrtry)
                        xprt_conditional_disconnect(req->rq_xprt,
index 3b525accaa6857bc76cda9a8b9f131791fcd1f26..986f3ed7d1a24800d31713143aebffc45e32fc16 100644 (file)
@@ -336,7 +336,7 @@ static ssize_t svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov,
        rqstp->rq_xprt_hlen = 0;
 
        clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
-       iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, iov, nr, buflen);
+       iov_iter_kvec(&msg.msg_iter, READ, iov, nr, buflen);
        if (base != 0) {
                iov_iter_advance(&msg.msg_iter, base);
                buflen -= base;
index 2bbb8d38d2bf5f6eeb87a5771aeb92683d25543f..f302c6eb8779063a71b9a590325a96b8026ab3e6 100644 (file)
@@ -546,7 +546,7 @@ EXPORT_SYMBOL_GPL(xdr_commit_encode);
 static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
                size_t nbytes)
 {
-       static __be32 *p;
+       __be32 *p;
        int space_left;
        int frag1bytes, frag2bytes;
 
@@ -673,11 +673,10 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
                WARN_ON_ONCE(xdr->iov);
                return;
        }
-       if (fraglen) {
+       if (fraglen)
                xdr->end = head->iov_base + head->iov_len;
-               xdr->page_ptr--;
-       }
        /* (otherwise assume xdr->end is already set) */
+       xdr->page_ptr--;
        head->iov_len = len;
        buf->len = len;
        xdr->p = head->iov_base + head->iov_len;
index 86bea4520c4d1fb3db7249bbd69f87721d02e6b5..ce927002862a675a9f1169d12fbeb6999984a1c6 100644 (file)
@@ -826,8 +826,15 @@ void xprt_connect(struct rpc_task *task)
                        return;
                if (xprt_test_and_set_connecting(xprt))
                        return;
-               xprt->stat.connect_start = jiffies;
-               xprt->ops->connect(xprt, task);
+               /* Race breaker */
+               if (!xprt_connected(xprt)) {
+                       xprt->stat.connect_start = jiffies;
+                       xprt->ops->connect(xprt, task);
+               } else {
+                       xprt_clear_connecting(xprt);
+                       task->tk_status = 0;
+                       rpc_wake_up_queued_task(&xprt->pending, task);
+               }
        }
        xprt_release_write(xprt, task);
 }
@@ -1623,6 +1630,8 @@ xprt_request_init(struct rpc_task *task)
        req->rq_snd_buf.buflen = 0;
        req->rq_rcv_buf.len = 0;
        req->rq_rcv_buf.buflen = 0;
+       req->rq_snd_buf.bvec = NULL;
+       req->rq_rcv_buf.bvec = NULL;
        req->rq_release_snd_buf = NULL;
        xprt_reset_majortimeo(req);
        dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
index 1b51e04d356609f37b9e8042768a083d31391600..8a5e823e0b339b1998ff21b9cac814de213a2b23 100644 (file)
@@ -330,18 +330,16 @@ xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
 {
        size_t i,n;
 
-       if (!(buf->flags & XDRBUF_SPARSE_PAGES))
+       if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES))
                return want;
-       if (want > buf->page_len)
-               want = buf->page_len;
        n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
        for (i = 0; i < n; i++) {
                if (buf->pages[i])
                        continue;
                buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
                if (!buf->pages[i]) {
-                       buf->page_len = (i * PAGE_SIZE) - buf->page_base;
-                       return buf->page_len;
+                       i *= PAGE_SIZE;
+                       return i > buf->page_base ? i - buf->page_base : 0;
                }
        }
        return want;
@@ -361,7 +359,7 @@ static ssize_t
 xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
                struct kvec *kvec, size_t count, size_t seek)
 {
-       iov_iter_kvec(&msg->msg_iter, READ | ITER_KVEC, kvec, 1, count);
+       iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count);
        return xs_sock_recvmsg(sock, msg, flags, seek);
 }
 
@@ -370,7 +368,7 @@ xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags,
                struct bio_vec *bvec, unsigned long nr, size_t count,
                size_t seek)
 {
-       iov_iter_bvec(&msg->msg_iter, READ | ITER_BVEC, bvec, nr, count);
+       iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count);
        return xs_sock_recvmsg(sock, msg, flags, seek);
 }
 
@@ -378,8 +376,8 @@ static ssize_t
 xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
                size_t count)
 {
-       struct kvec kvec = { 0 };
-       return xs_read_kvec(sock, msg, flags | MSG_TRUNC, &kvec, count, 0);
+       iov_iter_discard(&msg->msg_iter, READ, count);
+       return sock_recvmsg(sock, msg, flags);
 }
 
 static ssize_t
@@ -398,16 +396,17 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
                if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
                        goto out;
                if (ret != want)
-                       goto eagain;
+                       goto out;
                seek = 0;
        } else {
                seek -= buf->head[0].iov_len;
                offset += buf->head[0].iov_len;
        }
-       if (seek < buf->page_len) {
-               want = xs_alloc_sparse_pages(buf,
-                               min_t(size_t, count - offset, buf->page_len),
-                               GFP_NOWAIT);
+
+       want = xs_alloc_sparse_pages(buf,
+                       min_t(size_t, count - offset, buf->page_len),
+                       GFP_NOWAIT);
+       if (seek < want) {
                ret = xs_read_bvec(sock, msg, flags, buf->bvec,
                                xdr_buf_pagecount(buf),
                                want + buf->page_base,
@@ -418,12 +417,13 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
                if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
                        goto out;
                if (ret != want)
-                       goto eagain;
+                       goto out;
                seek = 0;
        } else {
-               seek -= buf->page_len;
-               offset += buf->page_len;
+               seek -= want;
+               offset += want;
        }
+
        if (seek < buf->tail[0].iov_len) {
                want = min_t(size_t, count - offset, buf->tail[0].iov_len);
                ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);
@@ -433,17 +433,13 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
                if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
                        goto out;
                if (ret != want)
-                       goto eagain;
+                       goto out;
        } else
                offset += buf->tail[0].iov_len;
        ret = -EMSGSIZE;
-       msg->msg_flags |= MSG_TRUNC;
 out:
        *read = offset - seek_init;
        return ret;
-eagain:
-       ret = -EAGAIN;
-       goto out;
 sock_err:
        offset += seek;
        goto out;
@@ -486,19 +482,20 @@ xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
        if (transport->recv.offset == transport->recv.len) {
                if (xs_read_stream_request_done(transport))
                        msg->msg_flags |= MSG_EOR;
-               return transport->recv.copied;
+               return read;
        }
 
        switch (ret) {
+       default:
+               break;
+       case -EFAULT:
        case -EMSGSIZE:
-               return transport->recv.copied;
+               msg->msg_flags |= MSG_TRUNC;
+               return read;
        case 0:
                return -ESHUTDOWN;
-       default:
-               if (ret < 0)
-                       return ret;
        }
-       return -EAGAIN;
+       return ret < 0 ? ret : read;
 }
 
 static size_t
@@ -537,7 +534,7 @@ xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
 
        ret = xs_read_stream_request(transport, msg, flags, req);
        if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
-               xprt_complete_bc_request(req, ret);
+               xprt_complete_bc_request(req, transport->recv.copied);
 
        return ret;
 }
@@ -570,7 +567,7 @@ xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
 
        spin_lock(&xprt->queue_lock);
        if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
-               xprt_complete_rqst(req->rq_task, ret);
+               xprt_complete_rqst(req->rq_task, transport->recv.copied);
        xprt_unpin_rqst(req);
 out:
        spin_unlock(&xprt->queue_lock);
@@ -591,10 +588,8 @@ xs_read_stream(struct sock_xprt *transport, int flags)
                if (ret <= 0)
                        goto out_err;
                transport->recv.offset = ret;
-               if (ret != want) {
-                       ret = -EAGAIN;
-                       goto out_err;
-               }
+               if (transport->recv.offset != want)
+                       return transport->recv.offset;
                transport->recv.len = be32_to_cpu(transport->recv.fraghdr) &
                        RPC_FRAGMENT_SIZE_MASK;
                transport->recv.offset -= sizeof(transport->recv.fraghdr);
@@ -602,6 +597,9 @@ xs_read_stream(struct sock_xprt *transport, int flags)
        }
 
        switch (be32_to_cpu(transport->recv.calldir)) {
+       default:
+               msg.msg_flags |= MSG_TRUNC;
+               break;
        case RPC_CALL:
                ret = xs_read_stream_call(transport, &msg, flags);
                break;
@@ -616,6 +614,9 @@ xs_read_stream(struct sock_xprt *transport, int flags)
                goto out_err;
        read += ret;
        if (transport->recv.offset < transport->recv.len) {
+               if (!(msg.msg_flags & MSG_TRUNC))
+                       return read;
+               msg.msg_flags = 0;
                ret = xs_read_discard(transport->sock, &msg, flags,
                                transport->recv.len - transport->recv.offset);
                if (ret <= 0)
@@ -623,7 +624,7 @@ xs_read_stream(struct sock_xprt *transport, int flags)
                transport->recv.offset += ret;
                read += ret;
                if (transport->recv.offset != transport->recv.len)
-                       return -EAGAIN;
+                       return read;
        }
        if (xs_read_stream_request_done(transport)) {
                trace_xs_stream_read_request(transport);
@@ -633,13 +634,7 @@ xs_read_stream(struct sock_xprt *transport, int flags)
        transport->recv.len = 0;
        return read;
 out_err:
-       switch (ret) {
-       case 0:
-       case -ESHUTDOWN:
-               xprt_force_disconnect(&transport->xprt);
-               return -ESHUTDOWN;
-       }
-       return ret;
+       return ret != 0 ? ret : -ESHUTDOWN;
 }
 
 static void xs_stream_data_receive(struct sock_xprt *transport)
@@ -648,12 +643,12 @@ static void xs_stream_data_receive(struct sock_xprt *transport)
        ssize_t ret = 0;
 
        mutex_lock(&transport->recv_mutex);
+       clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
        if (transport->sock == NULL)
                goto out;
-       clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
        for (;;) {
                ret = xs_read_stream(transport, MSG_DONTWAIT);
-               if (ret <= 0)
+               if (ret < 0)
                        break;
                read += ret;
                cond_resched();
@@ -1345,10 +1340,10 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
        int err;
 
        mutex_lock(&transport->recv_mutex);
+       clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
        sk = transport->inet;
        if (sk == NULL)
                goto out;
-       clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
        for (;;) {
                skb = skb_recv_udp(sk, 0, 1, &err);
                if (skb == NULL)
index 2830709957bddeb13adf0f352abb9aaacba3ec55..c138d68e8a695fde8fb1464f03a539cef5a03bd4 100644 (file)
@@ -166,7 +166,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
 
        /* Apply trial address if we just left trial period */
        if (!trial && !self) {
-               tipc_net_finalize(net, tn->trial_addr);
+               tipc_sched_net_finalize(net, tn->trial_addr);
+               msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
                msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
        }
 
@@ -300,14 +301,12 @@ static void tipc_disc_timeout(struct timer_list *t)
                goto exit;
        }
 
-       /* Trial period over ? */
-       if (!time_before(jiffies, tn->addr_trial_end)) {
-               /* Did we just leave it ? */
-               if (!tipc_own_addr(net))
-                       tipc_net_finalize(net, tn->trial_addr);
-
-               msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
-               msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net));
+       /* Did we just leave trial period ? */
+       if (!time_before(jiffies, tn->addr_trial_end) && !tipc_own_addr(net)) {
+               mod_timer(&d->timer, jiffies + TIPC_DISC_INIT);
+               spin_unlock_bh(&d->lock);
+               tipc_sched_net_finalize(net, tn->trial_addr);
+               return;
        }
 
        /* Adjust timeout interval according to discovery phase */
@@ -319,6 +318,8 @@ static void tipc_disc_timeout(struct timer_list *t)
                        d->timer_intv = TIPC_DISC_SLOW;
                else if (!d->num_nodes && d->timer_intv > TIPC_DISC_FAST)
                        d->timer_intv = TIPC_DISC_FAST;
+               msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
+               msg_set_prevnode(buf_msg(d->skb), tn->trial_addr);
        }
 
        mod_timer(&d->timer, jiffies + d->timer_intv);
index 201c3b5bc96be9fb412dbc60522b1513d2494a8f..836727e363c46290ab8ef55e9d7b630f1dfac293 100644 (file)
@@ -1594,14 +1594,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
                        l->priority = peers_prio;
 
-               /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
-               if (msg_peer_stopping(hdr))
+               /* If peer is going down we want full re-establish cycle */
+               if (msg_peer_stopping(hdr)) {
                        rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
-               else if ((mtyp == RESET_MSG) || !link_is_up(l))
+                       break;
+               }
+               /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
+               if (mtyp == RESET_MSG || !link_is_up(l))
                        rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
 
                /* ACTIVATE_MSG takes up link if it was already locally reset */
-               if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
+               if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
                        rc = TIPC_LINK_UP_EVT;
 
                l->peer_session = msg_session(hdr);
index 62199cf5a56c04db99af54dad9fc2564df8d6b05..f076edb74338247f0bad99cfaa5d23e5b14730ab 100644 (file)
  *     - A local spin_lock protecting the queue of subscriber events.
 */
 
+struct tipc_net_work {
+       struct work_struct work;
+       struct net *net;
+       u32 addr;
+};
+
+static void tipc_net_finalize(struct net *net, u32 addr);
+
 int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
 {
        if (tipc_own_id(net)) {
@@ -119,17 +127,38 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
        return 0;
 }
 
-void tipc_net_finalize(struct net *net, u32 addr)
+static void tipc_net_finalize(struct net *net, u32 addr)
 {
        struct tipc_net *tn = tipc_net(net);
 
-       if (!cmpxchg(&tn->node_addr, 0, addr)) {
-               tipc_set_node_addr(net, addr);
-               tipc_named_reinit(net);
-               tipc_sk_reinit(net);
-               tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
-                                    TIPC_CLUSTER_SCOPE, 0, addr);
-       }
+       if (cmpxchg(&tn->node_addr, 0, addr))
+               return;
+       tipc_set_node_addr(net, addr);
+       tipc_named_reinit(net);
+       tipc_sk_reinit(net);
+       tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
+                            TIPC_CLUSTER_SCOPE, 0, addr);
+}
+
+static void tipc_net_finalize_work(struct work_struct *work)
+{
+       struct tipc_net_work *fwork;
+
+       fwork = container_of(work, struct tipc_net_work, work);
+       tipc_net_finalize(fwork->net, fwork->addr);
+       kfree(fwork);
+}
+
+void tipc_sched_net_finalize(struct net *net, u32 addr)
+{
+       struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC);
+
+       if (!fwork)
+               return;
+       INIT_WORK(&fwork->work, tipc_net_finalize_work);
+       fwork->net = net;
+       fwork->addr = addr;
+       schedule_work(&fwork->work);
 }
 
 void tipc_net_stop(struct net *net)
index 09ad02b50bb1ba1e9798c41810c843d74b5e0185..b7f2e364eb99e774854ea22837107991619ef00a 100644 (file)
@@ -42,7 +42,7 @@
 extern const struct nla_policy tipc_nl_net_policy[];
 
 int tipc_net_init(struct net *net, u8 *node_id, u32 addr);
-void tipc_net_finalize(struct net *net, u32 addr);
+void tipc_sched_net_finalize(struct net *net, u32 addr);
 void tipc_net_stop(struct net *net);
 int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
 int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
index 2afc4f8c37a74db4896508283f434909a0151732..48801976643358efd72623e8c5de56cc77202afd 100644 (file)
@@ -584,12 +584,15 @@ static void  tipc_node_clear_links(struct tipc_node *node)
 /* tipc_node_cleanup - delete nodes that does not
  * have active links for NODE_CLEANUP_AFTER time
  */
-static int tipc_node_cleanup(struct tipc_node *peer)
+static bool tipc_node_cleanup(struct tipc_node *peer)
 {
        struct tipc_net *tn = tipc_net(peer->net);
        bool deleted = false;
 
-       spin_lock_bh(&tn->node_list_lock);
+       /* If lock held by tipc_node_stop() the node will be deleted anyway */
+       if (!spin_trylock_bh(&tn->node_list_lock))
+               return false;
+
        tipc_node_write_lock(peer);
 
        if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
index 636e6131769d83f9b5f7013a32210ec809835fb2..b57b1be7252baef2f6410710e225864877c08ac1 100644 (file)
@@ -1555,16 +1555,17 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
 /**
  * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
  * @m: descriptor for message info
- * @msg: received message header
+ * @skb: received message buffer
  * @tsk: TIPC port associated with message
  *
  * Note: Ancillary data is not captured if not requested by receiver.
  *
  * Returns 0 if successful, otherwise errno
  */
-static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
+static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
                                 struct tipc_sock *tsk)
 {
+       struct tipc_msg *msg;
        u32 anc_data[3];
        u32 err;
        u32 dest_type;
@@ -1573,6 +1574,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
 
        if (likely(m->msg_controllen == 0))
                return 0;
+       msg = buf_msg(skb);
 
        /* Optionally capture errored message object(s) */
        err = msg ? msg_errcode(msg) : 0;
@@ -1583,6 +1585,9 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
                if (res)
                        return res;
                if (anc_data[1]) {
+                       if (skb_linearize(skb))
+                               return -ENOMEM;
+                       msg = buf_msg(skb);
                        res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
                                       msg_data(msg));
                        if (res)
@@ -1744,9 +1749,10 @@ static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
 
        /* Collect msg meta data, including error code and rejected data */
        tipc_sk_set_orig_addr(m, skb);
-       rc = tipc_sk_anc_data_recv(m, hdr, tsk);
+       rc = tipc_sk_anc_data_recv(m, skb, tsk);
        if (unlikely(rc))
                goto exit;
+       hdr = buf_msg(skb);
 
        /* Capture data if non-error msg, otherwise just set return value */
        if (likely(!err)) {
@@ -1856,9 +1862,10 @@ static int tipc_recvstream(struct socket *sock, struct msghdr *m,
                /* Collect msg meta data, incl. error code and rejected data */
                if (!copied) {
                        tipc_sk_set_orig_addr(m, skb);
-                       rc = tipc_sk_anc_data_recv(m, hdr, tsk);
+                       rc = tipc_sk_anc_data_recv(m, skb, tsk);
                        if (rc)
                                break;
+                       hdr = buf_msg(skb);
                }
 
                /* Copy data if msg ok, otherwise return error/partial data */
index 4bdea00571711d16894e991bebee8d878955e97d..efb16f69bd2c4c0bc012d5f18c3ad42e71260e81 100644 (file)
@@ -394,7 +394,7 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
        iov.iov_base = &s;
        iov.iov_len = sizeof(s);
        msg.msg_name = NULL;
-       iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &iov, 1, iov.iov_len);
+       iov_iter_kvec(&msg.msg_iter, READ, &iov, 1, iov.iov_len);
        ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT);
        if (ret == -EWOULDBLOCK)
                return -EWOULDBLOCK;
index 276edbc04f3859efe96540e8e4705e730bcba56f..d753e362d2d9e625b9d9c4c476cc06cdcbc5430a 100644 (file)
@@ -489,7 +489,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
 
        iov.iov_base = kaddr + offset;
        iov.iov_len = size;
-       iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
+       iov_iter_kvec(&msg_iter, WRITE, &iov, 1, size);
        rc = tls_push_data(sk, &msg_iter, size,
                           flags, TLS_RECORD_TYPE_DATA);
        kunmap(page);
@@ -538,7 +538,7 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
 {
        struct iov_iter msg_iter;
 
-       iov_iter_kvec(&msg_iter, WRITE | ITER_KVEC, NULL, 0, 0);
+       iov_iter_kvec(&msg_iter, WRITE, NULL, 0, 0);
        return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
 }
 
index 5cd88ba8acd175bc013cc95917120d8eb6899b99..7b1af8b59cd20e56841c437aa97d981fa89be864 100644 (file)
@@ -799,7 +799,7 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
        struct crypto_tfm *tfm = crypto_aead_tfm(ctx->aead_send);
        bool async_capable = tfm->__crt_alg->cra_flags & CRYPTO_ALG_ASYNC;
        unsigned char record_type = TLS_RECORD_TYPE_DATA;
-       bool is_kvec = msg->msg_iter.type & ITER_KVEC;
+       bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
        bool eor = !(msg->msg_flags & MSG_MORE);
        size_t try_to_copy, copied = 0;
        struct sk_msg *msg_pl, *msg_en;
@@ -1457,7 +1457,7 @@ int tls_sw_recvmsg(struct sock *sk,
        bool cmsg = false;
        int target, err = 0;
        long timeo;
-       bool is_kvec = msg->msg_iter.type & ITER_KVEC;
+       bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
        int num_async = 0;
 
        flags |= nonblock;
index 12b3edf70a7b91966d4ee524fe10aa9a31bcc26b..1615e503f8e3919ac1eb33cab98445acc00957f9 100644 (file)
@@ -272,11 +272,11 @@ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
 
        p1 = (u8*)(ht_capa);
        p2 = (u8*)(ht_capa_mask);
-       for (i = 0; i<sizeof(*ht_capa); i++)
+       for (i = 0; i < sizeof(*ht_capa); i++)
                p1[i] &= p2[i];
 }
 
-/*  Do a logical ht_capa &= ht_capa_mask.  */
+/*  Do a logical vht_capa &= vht_capa_mask.  */
 void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa,
                                const struct ieee80211_vht_cap *vht_capa_mask)
 {
index 744b5851bbf9010ea2c2cac867fcbda487c4184b..8d763725498c15fc7474f5ca78802233800ee4c5 100644 (file)
@@ -7870,6 +7870,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
        }
 
        memset(&params, 0, sizeof(params));
+       params.beacon_csa.ftm_responder = -1;
 
        if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
            !info->attrs[NL80211_ATTR_CH_SWITCH_COUNT])
index d536b07582f8c90e9dce435c5e569dfb8d406fe9..f741d8376a463b588231550c4cca661e55d2e6b7 100644 (file)
@@ -642,11 +642,15 @@ static bool cfg80211_is_all_idle(void)
         * All devices must be idle as otherwise if you are actively
         * scanning some new beacon hints could be learned and would
         * count as new regulatory hints.
+        * Also if there is any other active beaconing interface we
+        * need not issue a disconnect hint and reset any info such
+        * as chan dfs state, etc.
         */
        list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
                list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
                        wdev_lock(wdev);
-                       if (wdev->conn || wdev->current_bss)
+                       if (wdev->conn || wdev->current_bss ||
+                           cfg80211_beaconing_iface_active(wdev))
                                is_all_idle = false;
                        wdev_unlock(wdev);
                }
@@ -1171,6 +1175,8 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
 
        cfg80211_oper_and_ht_capa(&connect->ht_capa_mask,
                                  rdev->wiphy.ht_capa_mod_mask);
+       cfg80211_oper_and_vht_capa(&connect->vht_capa_mask,
+                                  rdev->wiphy.vht_capa_mod_mask);
 
        if (connkeys && connkeys->def >= 0) {
                int idx;
index ef14d80ca03ee22c5568933e1ff3d970557e59ea..d473bd135da8babc52329982bafc518ffeaf52fb 100644 (file)
@@ -1421,6 +1421,8 @@ size_t ieee80211_ie_split_ric(const u8 *ies, size_t ielen,
                                                          ies[pos + ext],
                                                          ext == 2))
                                        pos = skip_ie(ies, ielen, pos);
+                               else
+                                       break;
                        }
                } else {
                        pos = skip_ie(ies, ielen, pos);
index d49aa79b79970d403b5c165d4000b2aa1d493442..5121729b8b631f45d81d3e0332b0d2e4346d3c5d 100644 (file)
@@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb,
        }
 
        len = *skb->data;
-       needed = 1 + (len >> 4) + (len & 0x0f);
+       needed = 1 + ((len >> 4) + (len & 0x0f) + 1) / 2;
 
        if (!pskb_may_pull(skb, needed)) {
                /* packet is too short to hold the addresses it claims
@@ -288,7 +288,7 @@ static struct sock *x25_find_listener(struct x25_address *addr,
        sk_for_each(s, &x25_list)
                if ((!strcmp(addr->x25_addr,
                        x25_sk(s)->source_addr.x25_addr) ||
-                               !strcmp(addr->x25_addr,
+                               !strcmp(x25_sk(s)->source_addr.x25_addr,
                                        null_x25_address.x25_addr)) &&
                                        s->sk_state == TCP_LISTEN) {
                        /*
@@ -688,11 +688,15 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                goto out;
        }
 
-       len = strlen(addr->sx25_addr.x25_addr);
-       for (i = 0; i < len; i++) {
-               if (!isdigit(addr->sx25_addr.x25_addr[i])) {
-                       rc = -EINVAL;
-                       goto out;
+       /* check for the null_x25_address */
+       if (strcmp(addr->sx25_addr.x25_addr, null_x25_address.x25_addr)) {
+
+               len = strlen(addr->sx25_addr.x25_addr);
+               for (i = 0; i < len; i++) {
+                       if (!isdigit(addr->sx25_addr.x25_addr[i])) {
+                               rc = -EINVAL;
+                               goto out;
+                       }
                }
        }
 
index 3c12cae32001da306a97cae51979118931360056..afb26221d8a8f26f96ec257abf8822bc36e6351a 100644 (file)
@@ -142,6 +142,15 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
                        sk->sk_state_change(sk);
                break;
        }
+       case X25_CALL_REQUEST:
+               /* call collision */
+               x25->causediag.cause      = 0x01;
+               x25->causediag.diagnostic = 0x48;
+
+               x25_write_internal(sk, X25_CLEAR_REQUEST);
+               x25_disconnect(sk, EISCONN, 0x01, 0x48);
+               break;
+
        case X25_CLEAR_REQUEST:
                if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2))
                        goto out_clear;
index 4a9ee2d83158ba87a4da985af1020faae8c440b7..140270a13d54f7c69584fa6aefbf6b1be0941ec6 100644 (file)
@@ -8,7 +8,6 @@ config XFRM
 
 config XFRM_OFFLOAD
        bool
-       depends on XFRM
 
 config XFRM_ALGO
        tristate
index b669262682c9763e7c863d6bb77f44ed34402cce..dc4a9f1fb941a8eef7f1a3b68563c17abcf4d919 100644 (file)
@@ -2077,10 +2077,8 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
        struct xfrm_mgr *km;
        struct xfrm_policy *pol = NULL;
 
-#ifdef CONFIG_COMPAT
        if (in_compat_syscall())
                return -EOPNOTSUPP;
-#endif
 
        if (!optval && !optlen) {
                xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
index ca7a207b81a9587c942dd8763e4444cc5675f1ee..c9a84e22f5d578216cd59687e293ed3a078cd565 100644 (file)
@@ -2621,10 +2621,8 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
        const struct xfrm_link *link;
        int type, err;
 
-#ifdef CONFIG_COMPAT
        if (in_compat_syscall())
                return -EOPNOTSUPP;
-#endif
 
        type = nlh->nlmsg_type;
        if (type > XFRM_MSG_MAX)
index 9bfd8ff6de82e654571bc74ec2b6f2c6c5559675..37a0ffcb4d63f11b6f3b10561e4a06da04c26ab9 100644 (file)
@@ -119,7 +119,7 @@ int main(int argc, char **argv)
        if (res < 0)
                perror("HIDIOCSFEATURE");
        else
-               printf("ioctl HIDIOCGFEATURE returned: %d\n", res);
+               printf("ioctl HIDIOCSFEATURE returned: %d\n", res);
 
        /* Get Feature */
        buf[0] = 0x9; /* Report Number */
index ca21a35fa244e77fd3c503d28f2f537c6fb8a7d4..bb015551c2d9ae11c67f276cc64d7f84ef5521fc 100644 (file)
@@ -140,17 +140,9 @@ cc-option-yn = $(call try-run,\
 cc-disable-warning = $(call try-run,\
        $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
 
-# cc-name
-# Expands to either gcc or clang
-cc-name = $(shell $(CC) -v 2>&1 | grep -q "clang version" && echo clang || echo gcc)
-
 # cc-version
 cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
 
-# cc-fullversion
-cc-fullversion = $(shell $(CONFIG_SHELL) \
-       $(srctree)/scripts/gcc-version.sh -p $(CC))
-
 # cc-ifversion
 # Usage:  EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
 cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
index a8e7ba9f73e875857a730606c2c4af89ad688f36..6a6be9f440cf940358a0236b6ad13360fed253ca 100644 (file)
@@ -236,10 +236,8 @@ ifdef CONFIG_GCOV_KERNEL
 objtool_args += --no-unreachable
 endif
 ifdef CONFIG_RETPOLINE
-ifneq ($(RETPOLINE_CFLAGS),)
   objtool_args += --retpoline
 endif
-endif
 
 
 ifdef CONFIG_MODVERSIONS
index 24b2fb1d12974d1507705f475b52aedc81c9be5c..768306add59131c67e2ec848c27c31032b8fae71 100644 (file)
@@ -29,6 +29,7 @@ warning-1 += $(call cc-option, -Wmissing-include-dirs)
 warning-1 += $(call cc-option, -Wunused-but-set-variable)
 warning-1 += $(call cc-option, -Wunused-const-variable)
 warning-1 += $(call cc-option, -Wpacked-not-aligned)
+warning-1 += $(call cc-option, -Wstringop-truncation)
 warning-1 += $(call cc-disable-warning, missing-field-initializers)
 warning-1 += $(call cc-disable-warning, sign-compare)
 
@@ -64,7 +65,7 @@ endif
 KBUILD_CFLAGS += $(warning)
 else
 
-ifeq ($(cc-name),clang)
+ifdef CONFIG_CC_IS_CLANG
 KBUILD_CFLAGS += $(call cc-disable-warning, initializer-overrides)
 KBUILD_CFLAGS += $(call cc-disable-warning, unused-value)
 KBUILD_CFLAGS += $(call cc-disable-warning, format)
index 0a482f341576766793cb7ac85601e92e5c0e8c8d..46c5c680980657dfc8997d5127bfab198b710490 100644 (file)
@@ -26,6 +26,16 @@ gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT)            \
 gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_RANDSTRUCT_PERFORMANCE)  \
                += -fplugin-arg-randomize_layout_plugin-performance-mode
 
+gcc-plugin-$(CONFIG_GCC_PLUGIN_STACKLEAK)      += stackleak_plugin.so
+gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK)               \
+               += -DSTACKLEAK_PLUGIN
+gcc-plugin-cflags-$(CONFIG_GCC_PLUGIN_STACKLEAK)               \
+               += -fplugin-arg-stackleak_plugin-track-min-size=$(CONFIG_STACKLEAK_TRACK_MIN_SIZE)
+ifdef CONFIG_GCC_PLUGIN_STACKLEAK
+    DISABLE_STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-disable
+endif
+export DISABLE_STACKLEAK_PLUGIN
+
 # All the plugin CFLAGS are collected here in case a build target needs to
 # filter them out of the KBUILD_CFLAGS.
 GCC_PLUGINS_CFLAGS := $(strip $(addprefix -fplugin=$(objtree)/scripts/gcc-plugins/, $(gcc-plugin-y)) $(gcc-plugin-cflags-y))
index a0149db00be752555ff4178f66540543a5835955..6c6439f69a725f4bd46210fb3bb2c3e80a49fab2 100755 (executable)
@@ -71,7 +71,7 @@ die() {
 
 # Try to figure out the source directory prefix so we can remove it from the
 # addr2line output.  HACK ALERT: This assumes that start_kernel() is in
-# kernel/init.c!  This only works for vmlinux.  Otherwise it falls back to
+# init/main.c!  This only works for vmlinux.  Otherwise it falls back to
 # printing the absolute path.
 find_dir_prefix() {
        local objfile=$1
index cb0c889e13aa05818e222bbfd2cef9f082a29655..0d5c799688f0ae31fa02f82dc8451ad1a5811ff5 100644 (file)
@@ -139,4 +139,55 @@ config GCC_PLUGIN_RANDSTRUCT_PERFORMANCE
          in structures.  This reduces the performance hit of RANDSTRUCT
          at the cost of weakened randomization.
 
+config GCC_PLUGIN_STACKLEAK
+       bool "Erase the kernel stack before returning from syscalls"
+       depends on GCC_PLUGINS
+       depends on HAVE_ARCH_STACKLEAK
+       help
+         This option makes the kernel erase the kernel stack before
+         returning from system calls. That reduces the information which
+         kernel stack leak bugs can reveal and blocks some uninitialized
+         stack variable attacks.
+
+         The tradeoff is the performance impact: on a single CPU system kernel
+         compilation sees a 1% slowdown, other systems and workloads may vary
+         and you are advised to test this feature on your expected workload
+         before deploying it.
+
+         This plugin was ported from grsecurity/PaX. More information at:
+          * https://grsecurity.net/
+          * https://pax.grsecurity.net/
+
+config STACKLEAK_TRACK_MIN_SIZE
+       int "Minimum stack frame size of functions tracked by STACKLEAK"
+       default 100
+       range 0 4096
+       depends on GCC_PLUGIN_STACKLEAK
+       help
+         The STACKLEAK gcc plugin instruments the kernel code for tracking
+         the lowest border of the kernel stack (and for some other purposes).
+         It inserts the stackleak_track_stack() call for the functions with
+         a stack frame size greater than or equal to this parameter.
+         If unsure, leave the default value 100.
+
+config STACKLEAK_METRICS
+       bool "Show STACKLEAK metrics in the /proc file system"
+       depends on GCC_PLUGIN_STACKLEAK
+       depends on PROC_FS
+       help
+         If this is set, STACKLEAK metrics for every task are available in
+         the /proc file system. In particular, /proc/<pid>/stack_depth
+         shows the maximum kernel stack consumption for the current and
+         previous syscalls. Although this information is not precise, it
+         can be useful for estimating the STACKLEAK performance impact for
+         your workloads.
+
+config STACKLEAK_RUNTIME_DISABLE
+       bool "Allow runtime disabling of kernel stack erasing"
+       depends on GCC_PLUGIN_STACKLEAK
+       help
+         This option provides 'stack_erasing' sysctl, which can be used in
+         runtime to control kernel stack erasing for kernels built with
+         CONFIG_GCC_PLUGIN_STACKLEAK.
+
 endif
diff --git a/scripts/gcc-plugins/stackleak_plugin.c b/scripts/gcc-plugins/stackleak_plugin.c
new file mode 100644 (file)
index 0000000..dbd3746
--- /dev/null
@@ -0,0 +1,429 @@
+/*
+ * Copyright 2011-2017 by the PaX Team <pageexec@freemail.hu>
+ * Modified by Alexander Popov <alex.popov@linux.com>
+ * Licensed under the GPL v2
+ *
+ * Note: the choice of the license means that the compilation process is
+ * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
+ * but for the kernel it doesn't matter since it doesn't link against
+ * any of the gcc libraries
+ *
+ * This gcc plugin is needed for tracking the lowest border of the kernel stack.
+ * It instruments the kernel code inserting stackleak_track_stack() calls:
+ *  - after alloca();
+ *  - for the functions with a stack frame size greater than or equal
+ *     to the "track-min-size" plugin parameter.
+ *
+ * This plugin is ported from grsecurity/PaX. For more information see:
+ *   https://grsecurity.net/
+ *   https://pax.grsecurity.net/
+ *
+ * Debugging:
+ *  - use fprintf() to stderr, debug_generic_expr(), debug_gimple_stmt(),
+ *     print_rtl() and print_simple_rtl();
+ *  - add "-fdump-tree-all -fdump-rtl-all" to the plugin CFLAGS in
+ *     Makefile.gcc-plugins to see the verbose dumps of the gcc passes;
+ *  - use gcc -E to understand the preprocessing shenanigans;
+ *  - use gcc with enabled CFG/GIMPLE/SSA verification (--enable-checking).
+ */
+
+#include "gcc-common.h"
+
+__visible int plugin_is_GPL_compatible;
+
+static int track_frame_size = -1;
+static const char track_function[] = "stackleak_track_stack";
+
+/*
+ * Mark these global variables (roots) for gcc garbage collector since
+ * they point to the garbage-collected memory.
+ */
+static GTY(()) tree track_function_decl;
+
+static struct plugin_info stackleak_plugin_info = {
+       .version = "201707101337",
+       .help = "track-min-size=nn\ttrack stack for functions with a stack frame size >= nn bytes\n"
+               "disable\t\tdo not activate the plugin\n"
+};
+
+static void stackleak_add_track_stack(gimple_stmt_iterator *gsi, bool after)
+{
+       gimple stmt;
+       gcall *stackleak_track_stack;
+       cgraph_node_ptr node;
+       int frequency;
+       basic_block bb;
+
+       /* Insert call to void stackleak_track_stack(void) */
+       stmt = gimple_build_call(track_function_decl, 0);
+       stackleak_track_stack = as_a_gcall(stmt);
+       if (after) {
+               gsi_insert_after(gsi, stackleak_track_stack,
+                                               GSI_CONTINUE_LINKING);
+       } else {
+               gsi_insert_before(gsi, stackleak_track_stack, GSI_SAME_STMT);
+       }
+
+       /* Update the cgraph */
+       bb = gimple_bb(stackleak_track_stack);
+       node = cgraph_get_create_node(track_function_decl);
+       gcc_assert(node);
+       frequency = compute_call_stmt_bb_frequency(current_function_decl, bb);
+       cgraph_create_edge(cgraph_get_node(current_function_decl), node,
+                       stackleak_track_stack, bb->count, frequency);
+}
+
+static bool is_alloca(gimple stmt)
+{
+       if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
+               return true;
+
+#if BUILDING_GCC_VERSION >= 4007
+       if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
+               return true;
+#endif
+
+       return false;
+}
+
+/*
+ * Work with the GIMPLE representation of the code. Insert the
+ * stackleak_track_stack() call after alloca() and into the beginning
+ * of the function if it is not instrumented.
+ */
+static unsigned int stackleak_instrument_execute(void)
+{
+       basic_block bb, entry_bb;
+       bool prologue_instrumented = false, is_leaf = true;
+       gimple_stmt_iterator gsi;
+
+       /*
+        * ENTRY_BLOCK_PTR is a basic block which represents possible entry
+        * point of a function. This block does not contain any code and
+        * has a CFG edge to its successor.
+        */
+       gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+       entry_bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun));
+
+       /*
+        * Loop through the GIMPLE statements in each of cfun basic blocks.
+        * cfun is a global variable which represents the function that is
+        * currently processed.
+        */
+       FOR_EACH_BB_FN(bb, cfun) {
+               for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
+                       gimple stmt;
+
+                       stmt = gsi_stmt(gsi);
+
+                       /* Leaf function is a function which makes no calls */
+                       if (is_gimple_call(stmt))
+                               is_leaf = false;
+
+                       if (!is_alloca(stmt))
+                               continue;
+
+                       /* Insert stackleak_track_stack() call after alloca() */
+                       stackleak_add_track_stack(&gsi, true);
+                       if (bb == entry_bb)
+                               prologue_instrumented = true;
+               }
+       }
+
+       if (prologue_instrumented)
+               return 0;
+
+       /*
+        * Special cases to skip the instrumentation.
+        *
+        * Taking the address of static inline functions materializes them,
+        * but we mustn't instrument some of them as the resulting stack
+        * alignment required by the function call ABI will break other
+        * assumptions regarding the expected (but not otherwise enforced)
+        * register clobbering ABI.
+        *
+        * Case in point: native_save_fl on amd64 when optimized for size
+        * clobbers rdx if it were instrumented here.
+        *
+        * TODO: any more special cases?
+        */
+       if (is_leaf &&
+           !TREE_PUBLIC(current_function_decl) &&
+           DECL_DECLARED_INLINE_P(current_function_decl)) {
+               return 0;
+       }
+
+       if (is_leaf &&
+           !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)),
+                    "_paravirt_", 10)) {
+               return 0;
+       }
+
+       /* Insert stackleak_track_stack() call at the function beginning */
+       bb = entry_bb;
+       if (!single_pred_p(bb)) {
+               /* gcc_assert(bb_loop_depth(bb) ||
+                               (bb->flags & BB_IRREDUCIBLE_LOOP)); */
+               split_edge(single_succ_edge(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+               gcc_assert(single_succ_p(ENTRY_BLOCK_PTR_FOR_FN(cfun)));
+               bb = single_succ(ENTRY_BLOCK_PTR_FOR_FN(cfun));
+       }
+       gsi = gsi_after_labels(bb);
+       stackleak_add_track_stack(&gsi, false);
+
+       return 0;
+}
+
+static bool large_stack_frame(void)
+{
+#if BUILDING_GCC_VERSION >= 8000
+       return maybe_ge(get_frame_size(), track_frame_size);
+#else
+       return (get_frame_size() >= track_frame_size);
+#endif
+}
+
+/*
+ * Work with the RTL representation of the code.
+ * Remove the unneeded stackleak_track_stack() calls from the functions
+ * which don't call alloca() and don't have a large enough stack frame size.
+ */
+static unsigned int stackleak_cleanup_execute(void)
+{
+       rtx_insn *insn, *next;
+
+       if (cfun->calls_alloca)
+               return 0;
+
+       if (large_stack_frame())
+               return 0;
+
+       /*
+        * Find stackleak_track_stack() calls. Loop through the chain of insns,
+        * which is an RTL representation of the code for a function.
+        *
+        * The example of a matching insn:
+        *  (call_insn 8 4 10 2 (call (mem (symbol_ref ("stackleak_track_stack")
+        *  [flags 0x41] <function_decl 0x7f7cd3302a80 stackleak_track_stack>)
+        *  [0 stackleak_track_stack S1 A8]) (0)) 675 {*call} (expr_list
+        *  (symbol_ref ("stackleak_track_stack") [flags 0x41] <function_decl
+        *  0x7f7cd3302a80 stackleak_track_stack>) (expr_list (0) (nil))) (nil))
+        */
+       for (insn = get_insns(); insn; insn = next) {
+               rtx body;
+
+               next = NEXT_INSN(insn);
+
+               /* Check the expression code of the insn */
+               if (!CALL_P(insn))
+                       continue;
+
+               /*
+                * Check the expression code of the insn body, which is an RTL
+                * Expression (RTX) describing the side effect performed by
+                * that insn.
+                */
+               body = PATTERN(insn);
+
+               if (GET_CODE(body) == PARALLEL)
+                       body = XVECEXP(body, 0, 0);
+
+               if (GET_CODE(body) != CALL)
+                       continue;
+
+               /*
+                * Check the first operand of the call expression. It should
+                * be a mem RTX describing the needed subroutine with a
+                * symbol_ref RTX.
+                */
+               body = XEXP(body, 0);
+               if (GET_CODE(body) != MEM)
+                       continue;
+
+               body = XEXP(body, 0);
+               if (GET_CODE(body) != SYMBOL_REF)
+                       continue;
+
+               if (SYMBOL_REF_DECL(body) != track_function_decl)
+                       continue;
+
+               /* Delete the stackleak_track_stack() call */
+               delete_insn_and_edges(insn);
+#if BUILDING_GCC_VERSION >= 4007 && BUILDING_GCC_VERSION < 8000
+               if (GET_CODE(next) == NOTE &&
+                   NOTE_KIND(next) == NOTE_INSN_CALL_ARG_LOCATION) {
+                       insn = next;
+                       next = NEXT_INSN(insn);
+                       delete_insn_and_edges(insn);
+               }
+#endif
+       }
+
+       return 0;
+}
+
+static bool stackleak_gate(void)
+{
+       tree section;
+
+       section = lookup_attribute("section",
+                                  DECL_ATTRIBUTES(current_function_decl));
+       if (section && TREE_VALUE(section)) {
+               section = TREE_VALUE(TREE_VALUE(section));
+
+               if (!strncmp(TREE_STRING_POINTER(section), ".init.text", 10))
+                       return false;
+               if (!strncmp(TREE_STRING_POINTER(section), ".devinit.text", 13))
+                       return false;
+               if (!strncmp(TREE_STRING_POINTER(section), ".cpuinit.text", 13))
+                       return false;
+               if (!strncmp(TREE_STRING_POINTER(section), ".meminit.text", 13))
+                       return false;
+       }
+
+       return track_frame_size >= 0;
+}
+
+/* Build the function declaration for stackleak_track_stack() */
+static void stackleak_start_unit(void *gcc_data __unused,
+                                void *user_data __unused)
+{
+       tree fntype;
+
+       /* void stackleak_track_stack(void) */
+       fntype = build_function_type_list(void_type_node, NULL_TREE);
+       track_function_decl = build_fn_decl(track_function, fntype);
+       DECL_ASSEMBLER_NAME(track_function_decl); /* for LTO */
+       TREE_PUBLIC(track_function_decl) = 1;
+       TREE_USED(track_function_decl) = 1;
+       DECL_EXTERNAL(track_function_decl) = 1;
+       DECL_ARTIFICIAL(track_function_decl) = 1;
+       DECL_PRESERVE_P(track_function_decl) = 1;
+}
+
+/*
+ * Pass gate function is a predicate function that gets executed before the
+ * corresponding pass. If the return value is 'true' the pass gets executed,
+ * otherwise, it is skipped.
+ */
+static bool stackleak_instrument_gate(void)
+{
+       return stackleak_gate();
+}
+
+#define PASS_NAME stackleak_instrument
+#define PROPERTIES_REQUIRED PROP_gimple_leh | PROP_cfg
+#define TODO_FLAGS_START TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts
+#define TODO_FLAGS_FINISH TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func \
+                       | TODO_update_ssa | TODO_rebuild_cgraph_edges
+#include "gcc-generate-gimple-pass.h"
+
+static bool stackleak_cleanup_gate(void)
+{
+       return stackleak_gate();
+}
+
+#define PASS_NAME stackleak_cleanup
+#define TODO_FLAGS_FINISH TODO_dump_func
+#include "gcc-generate-rtl-pass.h"
+
+/*
+ * Every gcc plugin exports a plugin_init() function that is called right
+ * after the plugin is loaded. This function is responsible for registering
+ * the plugin callbacks and doing other required initialization.
+ */
+__visible int plugin_init(struct plugin_name_args *plugin_info,
+                         struct plugin_gcc_version *version)
+{
+       const char * const plugin_name = plugin_info->base_name;
+       const int argc = plugin_info->argc;
+       const struct plugin_argument * const argv = plugin_info->argv;
+       int i = 0;
+
+       /* Extra GGC root tables describing our GTY-ed data */
+       static const struct ggc_root_tab gt_ggc_r_gt_stackleak[] = {
+               {
+                       .base = &track_function_decl,
+                       .nelt = 1,
+                       .stride = sizeof(track_function_decl),
+                       .cb = &gt_ggc_mx_tree_node,
+                       .pchw = &gt_pch_nx_tree_node
+               },
+               LAST_GGC_ROOT_TAB
+       };
+
+       /*
+        * The stackleak_instrument pass should be executed before the
+        * "optimized" pass, which is the control flow graph cleanup that is
+        * performed just before expanding gcc trees to the RTL. In former
+        * versions of the plugin this new pass was inserted before the
+        * "tree_profile" pass, which is currently called "profile".
+        */
+       PASS_INFO(stackleak_instrument, "optimized", 1,
+                                               PASS_POS_INSERT_BEFORE);
+
+       /*
+        * The stackleak_cleanup pass should be executed before the "*free_cfg"
+        * pass. It's the moment when the stack frame size is already final,
+        * function prologues and epilogues are generated, and the
+        * machine-dependent code transformations are not done.
+        */
+       PASS_INFO(stackleak_cleanup, "*free_cfg", 1, PASS_POS_INSERT_BEFORE);
+
+       if (!plugin_default_version_check(version, &gcc_version)) {
+               error(G_("incompatible gcc/plugin versions"));
+               return 1;
+       }
+
+       /* Parse the plugin arguments */
+       for (i = 0; i < argc; i++) {
+               if (!strcmp(argv[i].key, "disable"))
+                       return 0;
+
+               if (!strcmp(argv[i].key, "track-min-size")) {
+                       if (!argv[i].value) {
+                               error(G_("no value supplied for option '-fplugin-arg-%s-%s'"),
+                                       plugin_name, argv[i].key);
+                               return 1;
+                       }
+
+                       track_frame_size = atoi(argv[i].value);
+                       if (track_frame_size < 0) {
+                               error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"),
+                                       plugin_name, argv[i].key, argv[i].value);
+                               return 1;
+                       }
+               } else {
+                       error(G_("unknown option '-fplugin-arg-%s-%s'"),
+                                       plugin_name, argv[i].key);
+                       return 1;
+               }
+       }
+
+       /* Give the information about the plugin */
+       register_callback(plugin_name, PLUGIN_INFO, NULL,
+                                               &stackleak_plugin_info);
+
+       /* Register to be called before processing a translation unit */
+       register_callback(plugin_name, PLUGIN_START_UNIT,
+                                       &stackleak_start_unit, NULL);
+
+       /* Register an extra GCC garbage collector (GGC) root table */
+       register_callback(plugin_name, PLUGIN_REGISTER_GGC_ROOTS, NULL,
+                                       (void *)&gt_ggc_r_gt_stackleak);
+
+       /*
+        * Hook into the Pass Manager to register new gcc passes.
+        *
+        * The stack frame size info is available only at the last RTL pass,
+        * when it's too late to insert complex code like a function call.
+        * So we register two gcc passes to instrument every function at first
+        * and remove the unneeded instrumentation later.
+        */
+       register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL,
+                                       &stackleak_instrument_pass_info);
+       register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL,
+                                       &stackleak_cleanup_pass_info);
+
+       return 0;
+}
index 67ed9f6ccdf8f09d8cb193d864950ed4e14ed9f7..63b609243d03783beb1da485aaaa1e1d6f3c78eb 100644 (file)
@@ -68,21 +68,7 @@ PHONY += $(simple-targets)
 $(simple-targets): $(obj)/conf
        $< $(silent) --$@ $(Kconfig)
 
-PHONY += oldnoconfig silentoldconfig savedefconfig defconfig
-
-# oldnoconfig is an alias of olddefconfig, because people already are dependent
-# on its behavior (sets new symbols to their default value but not 'n') with the
-# counter-intuitive name.
-oldnoconfig: olddefconfig
-       @echo "  WARNING: \"oldnoconfig\" target will be removed after Linux 4.19"
-       @echo "            Please use \"olddefconfig\" instead, which is an alias."
-
-# We do not expect manual invokcation of "silentoldcofig" (or "syncconfig").
-silentoldconfig: syncconfig
-       @echo "  WARNING: \"silentoldconfig\" has been renamed to \"syncconfig\""
-       @echo "            and is now an internal implementation detail."
-       @echo "            What you want is probably \"oldconfig\"."
-       @echo "            \"silentoldconfig\" will be removed after Linux 4.19"
+PHONY += savedefconfig defconfig
 
 savedefconfig: $(obj)/conf
        $< $(silent) --$@=defconfig $(Kconfig)
index 7b2b37260669e333390f0c995990fe0fb65c9f06..98e0c7a34699e941402085b7ceda086ea29a79c8 100644 (file)
@@ -460,12 +460,6 @@ static struct option long_opts[] = {
        {"randconfig",      no_argument,       NULL, randconfig},
        {"listnewconfig",   no_argument,       NULL, listnewconfig},
        {"olddefconfig",    no_argument,       NULL, olddefconfig},
-       /*
-        * oldnoconfig is an alias of olddefconfig, because people already
-        * are dependent on its behavior(sets new symbols to their default
-        * value but not 'n') with the counter-intuitive name.
-        */
-       {"oldnoconfig",     no_argument,       NULL, olddefconfig},
        {NULL, 0, NULL, 0}
 };
 
@@ -480,7 +474,6 @@ static void conf_usage(const char *progname)
        printf("  --syncconfig            Similar to oldconfig but generates configuration in\n"
               "                          include/{generated/,config/}\n");
        printf("  --olddefconfig          Same as oldconfig but sets new symbols to their default value\n");
-       printf("  --oldnoconfig           An alias of olddefconfig\n");
        printf("  --defconfig <file>      New config with default defined in <file>\n");
        printf("  --savedefconfig <file>  Save the minimal current configuration to <file>\n");
        printf("  --allnoconfig           New config where all options are answered with no\n");
index 67d1314476314590f285a63700c13e047dc17c80..0ef906499646b57293bb85dea54234c601bfcafa 100755 (executable)
@@ -33,12 +33,15 @@ usage() {
        echo "  -n    use allnoconfig instead of alldefconfig"
        echo "  -r    list redundant entries when merging fragments"
        echo "  -O    dir to put generated output files.  Consider setting \$KCONFIG_CONFIG instead."
+       echo
+       echo "Used prefix: '$CONFIG_PREFIX'. You can redefine it with \$CONFIG_ environment variable."
 }
 
 RUNMAKE=true
 ALLTARGET=alldefconfig
 WARNREDUN=false
 OUTPUT=.
+CONFIG_PREFIX=${CONFIG_-CONFIG_}
 
 while true; do
        case $1 in
@@ -99,7 +102,9 @@ if [ ! -r "$INITFILE" ]; then
 fi
 
 MERGE_LIST=$*
-SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(CONFIG_[a-zA-Z0-9_]*\)[= ].*/\2/p"
+SED_CONFIG_EXP1="s/^\(${CONFIG_PREFIX}[a-zA-Z0-9_]*\)=.*/\1/p"
+SED_CONFIG_EXP2="s/^# \(${CONFIG_PREFIX}[a-zA-Z0-9_]*\) is not set$/\1/p"
+
 TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX)
 
 echo "Using $INITFILE as base"
@@ -112,7 +117,7 @@ for MERGE_FILE in $MERGE_LIST ; do
                echo "The merge file '$MERGE_FILE' does not exist.  Exit." >&2
                exit 1
        fi
-       CFG_LIST=$(sed -n "$SED_CONFIG_EXP" $MERGE_FILE)
+       CFG_LIST=$(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $MERGE_FILE)
 
        for CFG in $CFG_LIST ; do
                grep -q -w $CFG $TMP_FILE || continue
@@ -155,7 +160,7 @@ make KCONFIG_ALLCONFIG=$TMP_FILE $OUTPUT_ARG $ALLTARGET
 
 
 # Check all specified config values took (might have missed-dependency issues)
-for CFG in $(sed -n "$SED_CONFIG_EXP" $TMP_FILE); do
+for CFG in $(sed -n -e "$SED_CONFIG_EXP1" -e "$SED_CONFIG_EXP2" $TMP_FILE); do
 
        REQUESTED_VAL=$(grep -w -e "$CFG" $TMP_FILE)
        ACTUAL_VAL=$(grep -w -e "$CFG" "$KCONFIG_CONFIG")
index 90c9a8ac7adb81d9dc10d0cb00eda86ee3a35a8f..f43a274f4f1d5b820c00826d939b20d325b2311f 100755 (executable)
@@ -81,11 +81,11 @@ else
        cp System.map "$tmpdir/boot/System.map-$version"
        cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version"
 fi
-cp "$($MAKE -s image_name)" "$tmpdir/$installed_image_path"
+cp "$($MAKE -s -f $srctree/Makefile image_name)" "$tmpdir/$installed_image_path"
 
-if grep -q "^CONFIG_OF=y" $KCONFIG_CONFIG ; then
+if grep -q "^CONFIG_OF_EARLY_FLATTREE=y" $KCONFIG_CONFIG ; then
        # Only some architectures with OF support have this target
-       if grep -q dtbs_install "${srctree}/arch/$SRCARCH/Makefile"; then
+       if [ -d "${srctree}/arch/$SRCARCH/boot/dts" ]; then
                $MAKE KBUILD_SRC= INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install
        fi
 fi
index 663a7f343b42c5417e4769a8da65754fd6eab859..edcad61fe3cdae66b8e8fe497f7f52329591ee0a 100755 (executable)
@@ -88,6 +88,7 @@ set_debarch() {
 version=$KERNELRELEASE
 if [ -n "$KDEB_PKGVERSION" ]; then
        packageversion=$KDEB_PKGVERSION
+       revision=${packageversion##*-}
 else
        revision=$(cat .version 2>/dev/null||echo 1)
        packageversion=$version-$revision
@@ -205,10 +206,12 @@ cat <<EOF > debian/rules
 #!$(command -v $MAKE) -f
 
 build:
-       \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC=
+       \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
+       KBUILD_BUILD_VERSION=${revision} KBUILD_SRC=
 
 binary-arch:
-       \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} KBUILD_SRC= intdeb-pkg
+       \$(MAKE) KERNELRELEASE=${version} ARCH=${ARCH} \
+       KBUILD_BUILD_VERSION=${revision} KBUILD_SRC= intdeb-pkg
 
 clean:
        rm -rf debian/*tmp debian/files
index e05646dc24dcf633830ce6d8dd3da5f71a29731e..009147d4718eeead8117413a8bdf95108c84e4dd 100755 (executable)
@@ -12,6 +12,7 @@
 # how we were called determines which rpms we build and how we build them
 if [ "$1" = prebuilt ]; then
        S=DEL
+       MAKE="$MAKE -f $srctree/Makefile"
 else
        S=
 fi
@@ -78,19 +79,19 @@ $S  %prep
 $S     %setup -q
 $S
 $S     %build
-$S     make %{?_smp_mflags} KBUILD_BUILD_VERSION=%{release}
+$S     $MAKE %{?_smp_mflags} KBUILD_BUILD_VERSION=%{release}
 $S
        %install
        mkdir -p %{buildroot}/boot
        %ifarch ia64
        mkdir -p %{buildroot}/boot/efi
-       cp \$(make image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE
+       cp \$($MAKE image_name) %{buildroot}/boot/efi/vmlinuz-$KERNELRELEASE
        ln -s efi/vmlinuz-$KERNELRELEASE %{buildroot}/boot/
        %else
-       cp \$(make image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE
+       cp \$($MAKE image_name) %{buildroot}/boot/vmlinuz-$KERNELRELEASE
        %endif
-$M     make %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} KBUILD_SRC= modules_install
-       make %{?_smp_mflags} INSTALL_HDR_PATH=%{buildroot}/usr KBUILD_SRC= headers_install
+$M     $MAKE %{?_smp_mflags} INSTALL_MOD_PATH=%{buildroot} modules_install
+       $MAKE %{?_smp_mflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install
        cp System.map %{buildroot}/boot/System.map-$KERNELRELEASE
        cp .config %{buildroot}/boot/config-$KERNELRELEASE
        bzip2 -9 --keep vmlinux
index 79f7dd57d571e749dc3e36ed95a473cd599e0056..71f39410691b6be14774102000507c834c8ad493 100755 (executable)
@@ -74,7 +74,7 @@ scm_version()
                fi
 
                # Check for uncommitted changes
-               if git status -uno --porcelain | grep -qv '^.. scripts/package'; then
+               if git diff-index --name-only HEAD | grep -qv "^scripts/package"; then
                        printf '%s' -dirty
                fi
 
index 839e190bbd7a0075ef28040291fca87f44d3170a..5056fb3b897d0094e182bba4fca08ae491dab7cc 100755 (executable)
@@ -168,7 +168,6 @@ class id_parser(object):
         self.curline = 0
         try:
             for line in fd:
-                line = line.decode(locale.getpreferredencoding(False), errors='ignore')
                 self.curline += 1
                 if self.curline > maxlines:
                     break
index 7493c0ee51cc93e7cc7a7a7a3920a917ab560659..db00e3e30a59d77cf89b2793445e3f8901cc4577 100644 (file)
@@ -395,7 +395,7 @@ usage(void)
  * When we have processed a group that starts off with a known-false
  * #if/#elif sequence (which has therefore been deleted) followed by a
  * #elif that we don't understand and therefore must keep, we edit the
- * latter into a #if to keep the nesting correct. We use strncpy() to
+ * latter into a #if to keep the nesting correct. We use memcpy() to
  * overwrite the 4 byte token "elif" with "if  " without a '\0' byte.
  *
  * When we find a true #elif in a group, the following block will
@@ -450,7 +450,7 @@ static void Idrop (void) { Fdrop();  ignoreon(); }
 static void Itrue (void) { Ftrue();  ignoreon(); }
 static void Ifalse(void) { Ffalse(); ignoreon(); }
 /* modify this line */
-static void Mpass (void) { strncpy(keyword, "if  ", 4); Pelif(); }
+static void Mpass (void) { memcpy(keyword, "if  ", 4); Pelif(); }
 static void Mtrue (void) { keywordedit("else");  state(IS_TRUE_MIDDLE); }
 static void Melif (void) { keywordedit("endif"); state(IS_FALSE_TRAILER); }
 static void Melse (void) { keywordedit("endif"); state(IS_FALSE_ELSE); }
index e09fe4d7307cd02f85919aefccacc19276e96be9..8963203319ea4005a4176515fce3a65f370dc1e3 100644 (file)
@@ -1742,7 +1742,7 @@ static int ns_rmdir_op(struct inode *dir, struct dentry *dentry)
        if (error)
                return error;
 
-        parent = aa_get_ns(dir->i_private);
+       parent = aa_get_ns(dir->i_private);
        /* rmdir calls the generic securityfs functions to remove files
         * from the apparmor dir. It is up to the apparmor ns locking
         * to avoid races.
index 4285943f7260f36f271b7d7ad773345fb84d97f0..d0afed9ebd0ed9cac3a4803f6f1622959802c9d9 100644 (file)
@@ -496,7 +496,7 @@ static void update_file_ctx(struct aa_file_ctx *fctx, struct aa_label *label,
        /* update caching of label on file_ctx */
        spin_lock(&fctx->lock);
        old = rcu_dereference_protected(fctx->label,
-                                       spin_is_locked(&fctx->lock));
+                                       lockdep_is_held(&fctx->lock));
        l = aa_label_merge(old, label, GFP_ATOMIC);
        if (l) {
                if (l != old) {
index e287b7d0d4bebcf2cc83d9add032fdb1feed54c0..265ae6641a0644e84e40c7f774f4f447a1df3fc2 100644 (file)
@@ -151,6 +151,8 @@ static inline struct aa_label *begin_current_label_crit_section(void)
 {
        struct aa_label *label = aa_current_raw_label();
 
+       might_sleep();
+
        if (label_is_stale(label)) {
                label = aa_get_newest_label(label);
                if (aa_replace_current_label(label) == 0)
index ec7228e857a90d6dabf37de1136c6418d3716e3e..7334ac966d018d9d11220c9ffe30e0863bd6f700 100644 (file)
@@ -83,6 +83,13 @@ struct aa_sk_ctx {
        __e;                                    \
 })
 
+struct aa_secmark {
+       u8 audit;
+       u8 deny;
+       u32 secid;
+       char *label;
+};
+
 extern struct aa_sfs_entry aa_sfs_entry_network[];
 
 void audit_net_cb(struct audit_buffer *ab, void *va);
@@ -103,4 +110,7 @@ int aa_sk_perm(const char *op, u32 request, struct sock *sk);
 int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
                      struct socket *sock);
 
+int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
+                          u32 secid, struct sock *sk);
+
 #endif /* __AA_NET_H */
index ab64c6b5db5aca7aaae1b16a800d48c7499bc498..8e6707c837befae75d6e70accd3c5586ec8edfe0 100644 (file)
@@ -155,6 +155,9 @@ struct aa_profile {
 
        struct aa_rlimit rlimits;
 
+       int secmark_count;
+       struct aa_secmark *secmark;
+
        struct aa_loaddata *rawdata;
        unsigned char *hash;
        char *dirname;
index dee6fa3b6081e1342bfa3e0c4077ea960ca7bfff..fa2062711b63e75a9cf6598c979de04ffd0d3e40 100644 (file)
@@ -22,6 +22,9 @@ struct aa_label;
 /* secid value that will not be allocated */
 #define AA_SECID_INVALID 0
 
+/* secid value that matches any other secid */
+#define AA_SECID_WILDCARD 1
+
 struct aa_label *aa_secid_to_label(u32 secid);
 int apparmor_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
 int apparmor_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
index 974affe505314bcd5ecef87966ba698de1dd853e..76491e7f4177fa94c9d742e91d049a5ebcfe04e1 100644 (file)
@@ -90,10 +90,12 @@ const char *aa_splitn_fqname(const char *fqname, size_t n, const char **ns_name,
        const char *end = fqname + n;
        const char *name = skipn_spaces(fqname, n);
 
-       if (!name)
-               return NULL;
        *ns_name = NULL;
        *ns_len = 0;
+
+       if (!name)
+               return NULL;
+
        if (name[0] == ':') {
                char *split = strnchr(&name[1], end - &name[1], ':');
                *ns_name = skipn_spaces(&name[1], end - &name[1]);
index aa35939443c47799afc720d6119427389dc0bea2..42446a216f3bcfccfe2b0f099f4fa044c1efe6da 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/sysctl.h>
 #include <linux/audit.h>
 #include <linux/user_namespace.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
 #include <net/sock.h>
 
 #include "include/apparmor.h"
@@ -114,13 +116,13 @@ static int apparmor_ptrace_access_check(struct task_struct *child,
        struct aa_label *tracer, *tracee;
        int error;
 
-       tracer = begin_current_label_crit_section();
+       tracer = __begin_current_label_crit_section();
        tracee = aa_get_task_label(child);
        error = aa_may_ptrace(tracer, tracee,
                        (mode & PTRACE_MODE_READ) ? AA_PTRACE_READ
                                                  : AA_PTRACE_TRACE);
        aa_put_label(tracee);
-       end_current_label_crit_section(tracer);
+       __end_current_label_crit_section(tracer);
 
        return error;
 }
@@ -130,11 +132,11 @@ static int apparmor_ptrace_traceme(struct task_struct *parent)
        struct aa_label *tracer, *tracee;
        int error;
 
-       tracee = begin_current_label_crit_section();
+       tracee = __begin_current_label_crit_section();
        tracer = aa_get_task_label(parent);
        error = aa_may_ptrace(tracer, tracee, AA_PTRACE_TRACE);
        aa_put_label(tracer);
-       end_current_label_crit_section(tracee);
+       __end_current_label_crit_section(tracee);
 
        return error;
 }
@@ -1020,6 +1022,7 @@ static int apparmor_socket_shutdown(struct socket *sock, int how)
        return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock);
 }
 
+#ifdef CONFIG_NETWORK_SECMARK
 /**
  * apparmor_socket_sock_recv_skb - check perms before associating skb to sk
  *
@@ -1030,8 +1033,15 @@ static int apparmor_socket_shutdown(struct socket *sock, int how)
  */
 static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
 {
-       return 0;
+       struct aa_sk_ctx *ctx = SK_CTX(sk);
+
+       if (!skb->secmark)
+               return 0;
+
+       return apparmor_secmark_check(ctx->label, OP_RECVMSG, AA_MAY_RECEIVE,
+                                     skb->secmark, sk);
 }
+#endif
 
 
 static struct aa_label *sk_peer_label(struct sock *sk)
@@ -1126,6 +1136,20 @@ static void apparmor_sock_graft(struct sock *sk, struct socket *parent)
                ctx->label = aa_get_current_label();
 }
 
+#ifdef CONFIG_NETWORK_SECMARK
+static int apparmor_inet_conn_request(struct sock *sk, struct sk_buff *skb,
+                                     struct request_sock *req)
+{
+       struct aa_sk_ctx *ctx = SK_CTX(sk);
+
+       if (!skb->secmark)
+               return 0;
+
+       return apparmor_secmark_check(ctx->label, OP_CONNECT, AA_MAY_CONNECT,
+                                     skb->secmark, sk);
+}
+#endif
+
 static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
        LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check),
        LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme),
@@ -1177,12 +1201,17 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = {
        LSM_HOOK_INIT(socket_getsockopt, apparmor_socket_getsockopt),
        LSM_HOOK_INIT(socket_setsockopt, apparmor_socket_setsockopt),
        LSM_HOOK_INIT(socket_shutdown, apparmor_socket_shutdown),
+#ifdef CONFIG_NETWORK_SECMARK
        LSM_HOOK_INIT(socket_sock_rcv_skb, apparmor_socket_sock_rcv_skb),
+#endif
        LSM_HOOK_INIT(socket_getpeersec_stream,
                      apparmor_socket_getpeersec_stream),
        LSM_HOOK_INIT(socket_getpeersec_dgram,
                      apparmor_socket_getpeersec_dgram),
        LSM_HOOK_INIT(sock_graft, apparmor_sock_graft),
+#ifdef CONFIG_NETWORK_SECMARK
+       LSM_HOOK_INIT(inet_conn_request, apparmor_inet_conn_request),
+#endif
 
        LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank),
        LSM_HOOK_INIT(cred_free, apparmor_cred_free),
@@ -1538,6 +1567,97 @@ static inline int apparmor_init_sysctl(void)
 }
 #endif /* CONFIG_SYSCTL */
 
+#if defined(CONFIG_NETFILTER) && defined(CONFIG_NETWORK_SECMARK)
+static unsigned int apparmor_ip_postroute(void *priv,
+                                         struct sk_buff *skb,
+                                         const struct nf_hook_state *state)
+{
+       struct aa_sk_ctx *ctx;
+       struct sock *sk;
+
+       if (!skb->secmark)
+               return NF_ACCEPT;
+
+       sk = skb_to_full_sk(skb);
+       if (sk == NULL)
+               return NF_ACCEPT;
+
+       ctx = SK_CTX(sk);
+       if (!apparmor_secmark_check(ctx->label, OP_SENDMSG, AA_MAY_SEND,
+                                   skb->secmark, sk))
+               return NF_ACCEPT;
+
+       return NF_DROP_ERR(-ECONNREFUSED);
+
+}
+
+static unsigned int apparmor_ipv4_postroute(void *priv,
+                                           struct sk_buff *skb,
+                                           const struct nf_hook_state *state)
+{
+       return apparmor_ip_postroute(priv, skb, state);
+}
+
+static unsigned int apparmor_ipv6_postroute(void *priv,
+                                           struct sk_buff *skb,
+                                           const struct nf_hook_state *state)
+{
+       return apparmor_ip_postroute(priv, skb, state);
+}
+
+static const struct nf_hook_ops apparmor_nf_ops[] = {
+       {
+               .hook =         apparmor_ipv4_postroute,
+               .pf =           NFPROTO_IPV4,
+               .hooknum =      NF_INET_POST_ROUTING,
+               .priority =     NF_IP_PRI_SELINUX_FIRST,
+       },
+#if IS_ENABLED(CONFIG_IPV6)
+       {
+               .hook =         apparmor_ipv6_postroute,
+               .pf =           NFPROTO_IPV6,
+               .hooknum =      NF_INET_POST_ROUTING,
+               .priority =     NF_IP6_PRI_SELINUX_FIRST,
+       },
+#endif
+};
+
+static int __net_init apparmor_nf_register(struct net *net)
+{
+       int ret;
+
+       ret = nf_register_net_hooks(net, apparmor_nf_ops,
+                                   ARRAY_SIZE(apparmor_nf_ops));
+       return ret;
+}
+
+static void __net_exit apparmor_nf_unregister(struct net *net)
+{
+       nf_unregister_net_hooks(net, apparmor_nf_ops,
+                               ARRAY_SIZE(apparmor_nf_ops));
+}
+
+static struct pernet_operations apparmor_net_ops = {
+       .init = apparmor_nf_register,
+       .exit = apparmor_nf_unregister,
+};
+
+static int __init apparmor_nf_ip_init(void)
+{
+       int err;
+
+       if (!apparmor_enabled)
+               return 0;
+
+       err = register_pernet_subsys(&apparmor_net_ops);
+       if (err)
+               panic("Apparmor: register_pernet_subsys: error %d\n", err);
+
+       return 0;
+}
+__initcall(apparmor_nf_ip_init);
+#endif
+
 static int __init apparmor_init(void)
 {
        int error;
index bb24cfa0a164cbbc0fbebe0b5e85a176bf840069..c07fde444792d8caee5bea5ef99edc62117bdb34 100644 (file)
@@ -18,6 +18,7 @@
 #include "include/label.h"
 #include "include/net.h"
 #include "include/policy.h"
+#include "include/secid.h"
 
 #include "net_names.h"
 
@@ -146,17 +147,20 @@ int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
 static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
                            struct sock *sk)
 {
-       struct aa_profile *profile;
-       DEFINE_AUDIT_SK(sa, op, sk);
+       int error = 0;
 
        AA_BUG(!label);
        AA_BUG(!sk);
 
-       if (unconfined(label))
-               return 0;
+       if (!unconfined(label)) {
+               struct aa_profile *profile;
+               DEFINE_AUDIT_SK(sa, op, sk);
 
-       return fn_for_each_confined(label, profile,
-                       aa_profile_af_sk_perm(profile, &sa, request, sk));
+               error = fn_for_each_confined(label, profile,
+                           aa_profile_af_sk_perm(profile, &sa, request, sk));
+       }
+
+       return error;
 }
 
 int aa_sk_perm(const char *op, u32 request, struct sock *sk)
@@ -185,3 +189,70 @@ int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request,
 
        return aa_label_sk_perm(label, op, request, sock->sk);
 }
+
+#ifdef CONFIG_NETWORK_SECMARK
+static int apparmor_secmark_init(struct aa_secmark *secmark)
+{
+       struct aa_label *label;
+
+       if (secmark->label[0] == '*') {
+               secmark->secid = AA_SECID_WILDCARD;
+               return 0;
+       }
+
+       label = aa_label_strn_parse(&root_ns->unconfined->label,
+                                   secmark->label, strlen(secmark->label),
+                                   GFP_ATOMIC, false, false);
+
+       if (IS_ERR(label))
+               return PTR_ERR(label);
+
+       secmark->secid = label->secid;
+
+       return 0;
+}
+
+static int aa_secmark_perm(struct aa_profile *profile, u32 request, u32 secid,
+                          struct common_audit_data *sa, struct sock *sk)
+{
+       int i, ret;
+       struct aa_perms perms = { };
+
+       if (profile->secmark_count == 0)
+               return 0;
+
+       for (i = 0; i < profile->secmark_count; i++) {
+               if (!profile->secmark[i].secid) {
+                       ret = apparmor_secmark_init(&profile->secmark[i]);
+                       if (ret)
+                               return ret;
+               }
+
+               if (profile->secmark[i].secid == secid ||
+                   profile->secmark[i].secid == AA_SECID_WILDCARD) {
+                       if (profile->secmark[i].deny)
+                               perms.deny = ALL_PERMS_MASK;
+                       else
+                               perms.allow = ALL_PERMS_MASK;
+
+                       if (profile->secmark[i].audit)
+                               perms.audit = ALL_PERMS_MASK;
+               }
+       }
+
+       aa_apply_modes_to_perms(profile, &perms);
+
+       return aa_check_perms(profile, &perms, request, sa, audit_net_cb);
+}
+
+int apparmor_secmark_check(struct aa_label *label, char *op, u32 request,
+                          u32 secid, struct sock *sk)
+{
+       struct aa_profile *profile;
+       DEFINE_AUDIT_SK(sa, op, sk);
+
+       return fn_for_each_confined(label, profile,
+                                   aa_secmark_perm(profile, request, secid,
+                                                   &sa, sk));
+}
+#endif
index 1590e2de4e841c131ac472fa5e9c312d448b0866..df9c5890a87891dc1707fc51171148d01d952ea5 100644 (file)
@@ -231,6 +231,9 @@ void aa_free_profile(struct aa_profile *profile)
        for (i = 0; i < profile->xattr_count; i++)
                kzfree(profile->xattrs[i]);
        kzfree(profile->xattrs);
+       for (i = 0; i < profile->secmark_count; i++)
+               kzfree(profile->secmark[i].label);
+       kzfree(profile->secmark);
        kzfree(profile->dirname);
        aa_put_dfa(profile->xmatch);
        aa_put_dfa(profile->policy.dfa);
index 21cb384d712a2865d1da6f8d12625c250993b28e..379682e2a8d5db7e793fd1ebdd3d93f569bb3b4c 100644 (file)
@@ -292,6 +292,19 @@ fail:
        return 0;
 }
 
+static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
+{
+       if (unpack_nameX(e, AA_U8, name)) {
+               if (!inbounds(e, sizeof(u8)))
+                       return 0;
+               if (data)
+                       *data = get_unaligned((u8 *)e->pos);
+               e->pos += sizeof(u8);
+               return 1;
+       }
+       return 0;
+}
+
 static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
 {
        if (unpack_nameX(e, AA_U32, name)) {
@@ -529,6 +542,49 @@ fail:
        return 0;
 }
 
+static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
+{
+       void *pos = e->pos;
+       int i, size;
+
+       if (unpack_nameX(e, AA_STRUCT, "secmark")) {
+               size = unpack_array(e, NULL);
+
+               profile->secmark = kcalloc(size, sizeof(struct aa_secmark),
+                                          GFP_KERNEL);
+               if (!profile->secmark)
+                       goto fail;
+
+               profile->secmark_count = size;
+
+               for (i = 0; i < size; i++) {
+                       if (!unpack_u8(e, &profile->secmark[i].audit, NULL))
+                               goto fail;
+                       if (!unpack_u8(e, &profile->secmark[i].deny, NULL))
+                               goto fail;
+                       if (!unpack_strdup(e, &profile->secmark[i].label, NULL))
+                               goto fail;
+               }
+               if (!unpack_nameX(e, AA_ARRAYEND, NULL))
+                       goto fail;
+               if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+                       goto fail;
+       }
+
+       return 1;
+
+fail:
+       if (profile->secmark) {
+               for (i = 0; i < size; i++)
+                       kfree(profile->secmark[i].label);
+               kfree(profile->secmark);
+               profile->secmark_count = 0;
+       }
+
+       e->pos = pos;
+       return 0;
+}
+
 static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
 {
        void *pos = e->pos;
@@ -727,6 +783,11 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
                goto fail;
        }
 
+       if (!unpack_secmark(e, profile)) {
+               info = "failed to unpack profile secmark rules";
+               goto fail;
+       }
+
        if (unpack_nameX(e, AA_STRUCT, "policydb")) {
                /* generic policy dfa - optional and may be NULL */
                info = "failed to unpack policydb";
index 4ccec1bcf6f54f261542a546458cab77c6af9e52..05373d9a3d6af10643050e7a825c04b3aef30c96 100644 (file)
@@ -32,8 +32,7 @@
  * secids - do not pin labels with a refcount. They rely on the label
  * properly updating/freeing them
  */
-
-#define AA_FIRST_SECID 1
+#define AA_FIRST_SECID 2
 
 static DEFINE_IDR(aa_secids);
 static DEFINE_SPINLOCK(secid_lock);
index 6dc0751445087727f9d1b09a8c1efdbbc9105625..d775e03fbbcc7d87b9724529a4499b9449a9aa46 100644 (file)
@@ -106,6 +106,7 @@ int asymmetric_verify(struct key *keyring, const char *sig,
 
        pks.pkey_algo = "rsa";
        pks.hash_algo = hash_algo_name[hdr->hash_algo];
+       pks.encoding = "pkcs1";
        pks.digest = (u8 *)data;
        pks.digest_size = datalen;
        pks.s = hdr->sig;
index ef1581b337a3dc67dedd2e43cec94d28e5188cbd..9cef54064f6084a3694a33eae143f960aebba979 100644 (file)
@@ -22,6 +22,7 @@ obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_SYSCTL) += sysctl.o
 obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o
 obj-$(CONFIG_KEY_DH_OPERATIONS) += dh.o
+obj-$(CONFIG_ASYMMETRIC_KEY_TYPE) += keyctl_pkey.o
 
 #
 # Key types
index e87c89c0177c1559e9cc5e8874b4231de1dc3437..9482df601dc33de3b183b02b66b5964e884a5ba8 100644 (file)
@@ -141,6 +141,24 @@ COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
                return keyctl_restrict_keyring(arg2, compat_ptr(arg3),
                                               compat_ptr(arg4));
 
+       case KEYCTL_PKEY_QUERY:
+               if (arg3 != 0)
+                       return -EINVAL;
+               return keyctl_pkey_query(arg2,
+                                        compat_ptr(arg4),
+                                        compat_ptr(arg5));
+
+       case KEYCTL_PKEY_ENCRYPT:
+       case KEYCTL_PKEY_DECRYPT:
+       case KEYCTL_PKEY_SIGN:
+               return keyctl_pkey_e_d_s(option,
+                                        compat_ptr(arg2), compat_ptr(arg3),
+                                        compat_ptr(arg4), compat_ptr(arg5));
+
+       case KEYCTL_PKEY_VERIFY:
+               return keyctl_pkey_verify(compat_ptr(arg2), compat_ptr(arg3),
+                                         compat_ptr(arg4), compat_ptr(arg5));
+
        default:
                return -EOPNOTSUPP;
        }
index 9f8208dc0e55829c7e5821b3f123148e5f7dc801..74cb0ff42fedbca403a282058be7219109b1b3c6 100644 (file)
@@ -298,6 +298,45 @@ static inline long compat_keyctl_dh_compute(
 #endif
 #endif
 
+#ifdef CONFIG_ASYMMETRIC_KEY_TYPE
+extern long keyctl_pkey_query(key_serial_t,
+                             const char __user *,
+                             struct keyctl_pkey_query __user *);
+
+extern long keyctl_pkey_verify(const struct keyctl_pkey_params __user *,
+                              const char __user *,
+                              const void __user *, const void __user *);
+
+extern long keyctl_pkey_e_d_s(int,
+                             const struct keyctl_pkey_params __user *,
+                             const char __user *,
+                             const void __user *, void __user *);
+#else
+static inline long keyctl_pkey_query(key_serial_t id,
+                                    const char __user *_info,
+                                    struct keyctl_pkey_query __user *_res)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline long keyctl_pkey_verify(const struct keyctl_pkey_params __user *params,
+                                     const char __user *_info,
+                                     const void __user *_in,
+                                     const void __user *_in2)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline long keyctl_pkey_e_d_s(int op,
+                                    const struct keyctl_pkey_params __user *params,
+                                    const char __user *_info,
+                                    const void __user *_in,
+                                    void __user *_out)
+{
+       return -EOPNOTSUPP;
+}
+#endif
+
 /*
  * Debugging key validation
  */
index 1ffe60bb2845f97638157b01ed7fcc4f45714312..18619690ce77a926307796ec3be1626a1023db4c 100644 (file)
@@ -1747,6 +1747,30 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
                                               (const char __user *) arg3,
                                               (const char __user *) arg4);
 
+       case KEYCTL_PKEY_QUERY:
+               if (arg3 != 0)
+                       return -EINVAL;
+               return keyctl_pkey_query((key_serial_t)arg2,
+                                        (const char __user *)arg4,
+                                        (struct keyctl_pkey_query *)arg5);
+
+       case KEYCTL_PKEY_ENCRYPT:
+       case KEYCTL_PKEY_DECRYPT:
+       case KEYCTL_PKEY_SIGN:
+               return keyctl_pkey_e_d_s(
+                       option,
+                       (const struct keyctl_pkey_params __user *)arg2,
+                       (const char __user *)arg3,
+                       (const void __user *)arg4,
+                       (void __user *)arg5);
+
+       case KEYCTL_PKEY_VERIFY:
+               return keyctl_pkey_verify(
+                       (const struct keyctl_pkey_params __user *)arg2,
+                       (const char __user *)arg3,
+                       (const void __user *)arg4,
+                       (const void __user *)arg5);
+
        default:
                return -EOPNOTSUPP;
        }
diff --git a/security/keys/keyctl_pkey.c b/security/keys/keyctl_pkey.c
new file mode 100644 (file)
index 0000000..7839788
--- /dev/null
@@ -0,0 +1,323 @@
+/* Public-key operation keyctls
+ *
+ * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/key.h>
+#include <linux/keyctl.h>
+#include <linux/parser.h>
+#include <linux/uaccess.h>
+#include <keys/user-type.h>
+#include "internal.h"
+
+static void keyctl_pkey_params_free(struct kernel_pkey_params *params)
+{
+       kfree(params->info);
+       key_put(params->key);
+}
+
+enum {
+       Opt_err = -1,
+       Opt_enc,                /* "enc=<encoding>" eg. "enc=oaep" */
+       Opt_hash,               /* "hash=<digest-name>" eg. "hash=sha1" */
+};
+
+static const match_table_t param_keys = {
+       { Opt_enc,      "enc=%s" },
+       { Opt_hash,     "hash=%s" },
+       { Opt_err,      NULL }
+};
+
+/*
+ * Parse the information string which consists of key=val pairs.
+ */
+static int keyctl_pkey_params_parse(struct kernel_pkey_params *params)
+{
+       unsigned long token_mask = 0;
+       substring_t args[MAX_OPT_ARGS];
+       char *c = params->info, *p, *q;
+       int token;
+
+       while ((p = strsep(&c, " \t"))) {
+               if (*p == '\0' || *p == ' ' || *p == '\t')
+                       continue;
+               token = match_token(p, param_keys, args);
+               if (__test_and_set_bit(token, &token_mask))
+                       return -EINVAL;
+               q = args[0].from;
+               if (!q[0])
+                       return -EINVAL;
+
+               switch (token) {
+               case Opt_enc:
+                       params->encoding = q;
+                       break;
+
+               case Opt_hash:
+                       params->hash_algo = q;
+                       break;
+
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * Interpret parameters.  Callers must always call the free function
+ * on params, even if an error is returned.
+ */
+static int keyctl_pkey_params_get(key_serial_t id,
+                                 const char __user *_info,
+                                 struct kernel_pkey_params *params)
+{
+       key_ref_t key_ref;
+       void *p;
+       int ret;
+
+       memset(params, 0, sizeof(*params));
+       params->encoding = "raw";
+
+       p = strndup_user(_info, PAGE_SIZE);
+       if (IS_ERR(p))
+               return PTR_ERR(p);
+       params->info = p;
+
+       ret = keyctl_pkey_params_parse(params);
+       if (ret < 0)
+               return ret;
+
+       key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
+       if (IS_ERR(key_ref))
+               return PTR_ERR(key_ref);
+       params->key = key_ref_to_ptr(key_ref);
+
+       if (!params->key->type->asym_query)
+               return -EOPNOTSUPP;
+
+       return 0;
+}
+
+/*
+ * Get parameters from userspace.  Callers must always call the free function
+ * on params, even if an error is returned.
+ */
+static int keyctl_pkey_params_get_2(const struct keyctl_pkey_params __user *_params,
+                                   const char __user *_info,
+                                   int op,
+                                   struct kernel_pkey_params *params)
+{
+       struct keyctl_pkey_params uparams;
+       struct kernel_pkey_query info;
+       int ret;
+
+       memset(params, 0, sizeof(*params));
+       params->encoding = "raw";
+
+       if (copy_from_user(&uparams, _params, sizeof(uparams)) != 0)
+               return -EFAULT;
+
+       ret = keyctl_pkey_params_get(uparams.key_id, _info, params);
+       if (ret < 0)
+               return ret;
+
+       ret = params->key->type->asym_query(params, &info);
+       if (ret < 0)
+               return ret;
+
+       switch (op) {
+       case KEYCTL_PKEY_ENCRYPT:
+       case KEYCTL_PKEY_DECRYPT:
+               if (uparams.in_len  > info.max_enc_size ||
+                   uparams.out_len > info.max_dec_size)
+                       return -EINVAL;
+               break;
+       case KEYCTL_PKEY_SIGN:
+       case KEYCTL_PKEY_VERIFY:
+               if (uparams.in_len  > info.max_sig_size ||
+                   uparams.out_len > info.max_data_size)
+                       return -EINVAL;
+               break;
+       default:
+               BUG();
+       }
+
+       params->in_len  = uparams.in_len;
+       params->out_len = uparams.out_len;
+       return 0;
+}
+
+/*
+ * Query information about an asymmetric key.
+ */
+long keyctl_pkey_query(key_serial_t id,
+                      const char __user *_info,
+                      struct keyctl_pkey_query __user *_res)
+{
+       struct kernel_pkey_params params;
+       struct kernel_pkey_query res;
+       long ret;
+
+       memset(&params, 0, sizeof(params));
+
+       ret = keyctl_pkey_params_get(id, _info, &params);
+       if (ret < 0)
+               goto error;
+
+       ret = params.key->type->asym_query(&params, &res);
+       if (ret < 0)
+               goto error;
+
+       ret = -EFAULT;
+       if (copy_to_user(_res, &res, sizeof(res)) == 0 &&
+           clear_user(_res->__spare, sizeof(_res->__spare)) == 0)
+               ret = 0;
+
+error:
+       keyctl_pkey_params_free(&params);
+       return ret;
+}
+
+/*
+ * Encrypt/decrypt/sign
+ *
+ * Encrypt data, decrypt data or sign data using a public key.
+ *
+ * _info is a string of supplementary information in key=val format.  For
+ * instance, it might contain:
+ *
+ *     "enc=pkcs1 hash=sha256"
+ *
+ * where enc= specifies the encoding and hash= selects the OID to go in that
+ * particular encoding if required.  If enc= isn't supplied, it's assumed that
+ * the caller is supplying raw values.
+ *
+ * If successful, the amount of data written into the output buffer is
+ * returned.
+ */
+long keyctl_pkey_e_d_s(int op,
+                      const struct keyctl_pkey_params __user *_params,
+                      const char __user *_info,
+                      const void __user *_in,
+                      void __user *_out)
+{
+       struct kernel_pkey_params params;
+       void *in, *out;
+       long ret;
+
+       ret = keyctl_pkey_params_get_2(_params, _info, op, &params);
+       if (ret < 0)
+               goto error_params;
+
+       ret = -EOPNOTSUPP;
+       if (!params.key->type->asym_eds_op)
+               goto error_params;
+
+       switch (op) {
+       case KEYCTL_PKEY_ENCRYPT:
+               params.op = kernel_pkey_encrypt;
+               break;
+       case KEYCTL_PKEY_DECRYPT:
+               params.op = kernel_pkey_decrypt;
+               break;
+       case KEYCTL_PKEY_SIGN:
+               params.op = kernel_pkey_sign;
+               break;
+       default:
+               BUG();
+       }
+
+       in = memdup_user(_in, params.in_len);
+       if (IS_ERR(in)) {
+               ret = PTR_ERR(in);
+               goto error_params;
+       }
+
+       ret = -ENOMEM;
+       out = kmalloc(params.out_len, GFP_KERNEL);
+       if (!out)
+               goto error_in;
+
+       ret = params.key->type->asym_eds_op(&params, in, out);
+       if (ret < 0)
+               goto error_out;
+
+       if (copy_to_user(_out, out, ret) != 0)
+               ret = -EFAULT;
+
+error_out:
+       kfree(out);
+error_in:
+       kfree(in);
+error_params:
+       keyctl_pkey_params_free(&params);
+       return ret;
+}
+
+/*
+ * Verify a signature.
+ *
+ * Verify a public key signature using the given key, or if not given, search
+ * for a matching key.
+ *
+ * _info is a string of supplementary information in key=val format.  For
+ * instance, it might contain:
+ *
+ *     "enc=pkcs1 hash=sha256"
+ *
+ * where enc= specifies the signature blob encoding and hash= selects the OID
+ * to go in that particular encoding.  If enc= isn't supplied, it's assumed
+ * that the caller is supplying raw values.
+ *
+ * If successful, 0 is returned.
+ */
+long keyctl_pkey_verify(const struct keyctl_pkey_params __user *_params,
+                       const char __user *_info,
+                       const void __user *_in,
+                       const void __user *_in2)
+{
+       struct kernel_pkey_params params;
+       void *in, *in2;
+       long ret;
+
+       ret = keyctl_pkey_params_get_2(_params, _info, KEYCTL_PKEY_VERIFY,
+                                      &params);
+       if (ret < 0)
+               goto error_params;
+
+       ret = -EOPNOTSUPP;
+       if (!params.key->type->asym_verify_signature)
+               goto error_params;
+
+       in = memdup_user(_in, params.in_len);
+       if (IS_ERR(in)) {
+               ret = PTR_ERR(in);
+               goto error_params;
+       }
+
+       in2 = memdup_user(_in2, params.in2_len);
+       if (IS_ERR(in2)) {
+               ret = PTR_ERR(in2);
+               goto error_in;
+       }
+
+       params.op = kernel_pkey_verify;
+       ret = params.key->type->asym_verify_signature(&params, in, in2);
+
+       kfree(in2);
+error_in:
+       kfree(in);
+error_params:
+       keyctl_pkey_params_free(&params);
+       return ret;
+}
index b69d3b1777c25d1d3f9cc5af3514352ed0220fcc..ff6789365a12fb15ed91e160ba2936262ec5c29c 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/tpm.h>
 #include <linux/tpm_command.h>
 
-#include "trusted.h"
+#include <keys/trusted.h>
 
 static const char hmac_alg[] = "hmac(sha1)";
 static const char hash_alg[] = "sha1";
@@ -121,7 +121,7 @@ out:
 /*
  * calculate authorization info fields to send to TPM
  */
-static int TSS_authhmac(unsigned char *digest, const unsigned char *key,
+int TSS_authhmac(unsigned char *digest, const unsigned char *key,
                        unsigned int keylen, unsigned char *h1,
                        unsigned char *h2, unsigned char h3, ...)
 {
@@ -168,11 +168,12 @@ out:
        kzfree(sdesc);
        return ret;
 }
+EXPORT_SYMBOL_GPL(TSS_authhmac);
 
 /*
  * verify the AUTH1_COMMAND (Seal) result from TPM
  */
-static int TSS_checkhmac1(unsigned char *buffer,
+int TSS_checkhmac1(unsigned char *buffer,
                          const uint32_t command,
                          const unsigned char *ononce,
                          const unsigned char *key,
@@ -249,6 +250,7 @@ out:
        kzfree(sdesc);
        return ret;
 }
+EXPORT_SYMBOL_GPL(TSS_checkhmac1);
 
 /*
  * verify the AUTH2_COMMAND (unseal) result from TPM
@@ -355,7 +357,7 @@ out:
  * For key specific tpm requests, we will generate and send our
  * own TPM command packets using the drivers send function.
  */
-static int trusted_tpm_send(unsigned char *cmd, size_t buflen)
+int trusted_tpm_send(unsigned char *cmd, size_t buflen)
 {
        int rc;
 
@@ -367,6 +369,7 @@ static int trusted_tpm_send(unsigned char *cmd, size_t buflen)
                rc = -EPERM;
        return rc;
 }
+EXPORT_SYMBOL_GPL(trusted_tpm_send);
 
 /*
  * Lock a trusted key, by extending a selected PCR.
@@ -425,7 +428,7 @@ static int osap(struct tpm_buf *tb, struct osapsess *s,
 /*
  * Create an object independent authorisation protocol (oiap) session
  */
-static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
+int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
 {
        int ret;
 
@@ -442,6 +445,7 @@ static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
               TPM_NONCE_SIZE);
        return 0;
 }
+EXPORT_SYMBOL_GPL(oiap);
 
 struct tpm_digests {
        unsigned char encauth[SHA1_DIGEST_SIZE];
diff --git a/security/keys/trusted.h b/security/keys/trusted.h
deleted file mode 100644 (file)
index 8d5fe9e..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __TRUSTED_KEY_H
-#define __TRUSTED_KEY_H
-
-/* implementation specific TPM constants */
-#define MAX_BUF_SIZE                   512
-#define TPM_GETRANDOM_SIZE             14
-#define TPM_OSAP_SIZE                  36
-#define TPM_OIAP_SIZE                  10
-#define TPM_SEAL_SIZE                  87
-#define TPM_UNSEAL_SIZE                        104
-#define TPM_SIZE_OFFSET                        2
-#define TPM_RETURN_OFFSET              6
-#define TPM_DATA_OFFSET                        10
-
-#define LOAD32(buffer, offset) (ntohl(*(uint32_t *)&buffer[offset]))
-#define LOAD32N(buffer, offset)        (*(uint32_t *)&buffer[offset])
-#define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset]))
-
-struct tpm_buf {
-       int len;
-       unsigned char data[MAX_BUF_SIZE];
-};
-
-#define INIT_BUF(tb) (tb->len = 0)
-
-struct osapsess {
-       uint32_t handle;
-       unsigned char secret[SHA1_DIGEST_SIZE];
-       unsigned char enonce[TPM_NONCE_SIZE];
-};
-
-/* discrete values, but have to store in uint16_t for TPM use */
-enum {
-       SEAL_keytype = 1,
-       SRK_keytype = 4
-};
-
-#define TPM_DEBUG 0
-
-#if TPM_DEBUG
-static inline void dump_options(struct trusted_key_options *o)
-{
-       pr_info("trusted_key: sealing key type %d\n", o->keytype);
-       pr_info("trusted_key: sealing key handle %0X\n", o->keyhandle);
-       pr_info("trusted_key: pcrlock %d\n", o->pcrlock);
-       pr_info("trusted_key: pcrinfo %d\n", o->pcrinfo_len);
-       print_hex_dump(KERN_INFO, "pcrinfo ", DUMP_PREFIX_NONE,
-                      16, 1, o->pcrinfo, o->pcrinfo_len, 0);
-}
-
-static inline void dump_payload(struct trusted_key_payload *p)
-{
-       pr_info("trusted_key: key_len %d\n", p->key_len);
-       print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE,
-                      16, 1, p->key, p->key_len, 0);
-       pr_info("trusted_key: bloblen %d\n", p->blob_len);
-       print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE,
-                      16, 1, p->blob, p->blob_len, 0);
-       pr_info("trusted_key: migratable %d\n", p->migratable);
-}
-
-static inline void dump_sess(struct osapsess *s)
-{
-       print_hex_dump(KERN_INFO, "trusted-key: handle ", DUMP_PREFIX_NONE,
-                      16, 1, &s->handle, 4, 0);
-       pr_info("trusted-key: secret:\n");
-       print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
-                      16, 1, &s->secret, SHA1_DIGEST_SIZE, 0);
-       pr_info("trusted-key: enonce:\n");
-       print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
-                      16, 1, &s->enonce, SHA1_DIGEST_SIZE, 0);
-}
-
-static inline void dump_tpm_buf(unsigned char *buf)
-{
-       int len;
-
-       pr_info("\ntrusted-key: tpm buffer\n");
-       len = LOAD32(buf, TPM_SIZE_OFFSET);
-       print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, buf, len, 0);
-}
-#else
-static inline void dump_options(struct trusted_key_options *o)
-{
-}
-
-static inline void dump_payload(struct trusted_key_payload *p)
-{
-}
-
-static inline void dump_sess(struct osapsess *s)
-{
-}
-
-static inline void dump_tpm_buf(unsigned char *buf)
-{
-}
-#endif
-
-static inline void store8(struct tpm_buf *buf, const unsigned char value)
-{
-       buf->data[buf->len++] = value;
-}
-
-static inline void store16(struct tpm_buf *buf, const uint16_t value)
-{
-       *(uint16_t *) & buf->data[buf->len] = htons(value);
-       buf->len += sizeof value;
-}
-
-static inline void store32(struct tpm_buf *buf, const uint32_t value)
-{
-       *(uint32_t *) & buf->data[buf->len] = htonl(value);
-       buf->len += sizeof value;
-}
-
-static inline void storebytes(struct tpm_buf *buf, const unsigned char *in,
-                             const int len)
-{
-       memcpy(buf->data + buf->len, in, len);
-       buf->len += len;
-}
-#endif
index 7ce683259357750cdfd25feb978ee1d6a984312b..a67459eb62d5c8d3066a72c354f13894f68be8ae 100644 (file)
@@ -5318,6 +5318,9 @@ static int selinux_sctp_bind_connect(struct sock *sk, int optname,
        addr_buf = address;
 
        while (walk_size < addrlen) {
+               if (walk_size + sizeof(sa_family_t) > addrlen)
+                       return -EINVAL;
+
                addr = addr_buf;
                switch (addr->sa_family) {
                case AF_UNSPEC:
index 74b951f55608dca90a95cba914f3e74cd7e3b197..9cec81209617d5295cb244ca3b2c4079ae578391 100644 (file)
@@ -80,6 +80,9 @@ static const struct nlmsg_perm nlmsg_route_perms[] =
        { RTM_NEWSTATS,         NETLINK_ROUTE_SOCKET__NLMSG_READ },
        { RTM_GETSTATS,         NETLINK_ROUTE_SOCKET__NLMSG_READ  },
        { RTM_NEWCACHEREPORT,   NETLINK_ROUTE_SOCKET__NLMSG_READ },
+       { RTM_NEWCHAIN,         NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+       { RTM_DELCHAIN,         NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+       { RTM_GETCHAIN,         NETLINK_ROUTE_SOCKET__NLMSG_READ  },
 };
 
 static const struct nlmsg_perm nlmsg_tcpdiag_perms[] =
@@ -158,7 +161,11 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
 
        switch (sclass) {
        case SECCLASS_NETLINK_ROUTE_SOCKET:
-               /* RTM_MAX always point to RTM_SETxxxx, ie RTM_NEWxxx + 3 */
+               /* RTM_MAX always points to RTM_SETxxxx, ie RTM_NEWxxx + 3.
+                * If the BUILD_BUG_ON() below fails you must update the
+                * structures at the top of this file with the new mappings
+                * before updating the BUILD_BUG_ON() macro!
+                */
                BUILD_BUG_ON(RTM_MAX != (RTM_NEWCHAIN + 3));
                err = nlmsg_perm(nlmsg_type, perm, nlmsg_route_perms,
                                 sizeof(nlmsg_route_perms));
@@ -170,6 +177,10 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
                break;
 
        case SECCLASS_NETLINK_XFRM_SOCKET:
+               /* If the BUILD_BUG_ON() below fails you must update the
+                * structures at the top of this file with the new mappings
+                * before updating the BUILD_BUG_ON() macro!
+                */
                BUILD_BUG_ON(XFRM_MSG_MAX != XFRM_MSG_MAPPING);
                err = nlmsg_perm(nlmsg_type, perm, nlmsg_xfrm_perms,
                                 sizeof(nlmsg_xfrm_perms));
index 2fe459df3c858dc038e2a5e5f851ee4a0f07fafa..b7efa2296969c617dc292759b61c3c6f1ca450a5 100644 (file)
@@ -245,9 +245,13 @@ int mls_context_to_sid(struct policydb *pol,
        char *rangep[2];
 
        if (!pol->mls_enabled) {
-               if ((def_sid != SECSID_NULL && oldc) || (*scontext) == '\0')
-                       return 0;
-               return -EINVAL;
+               /*
+                * With no MLS, only return -EINVAL if there is a MLS field
+                * and it did not come from an xattr.
+                */
+               if (oldc && def_sid == SECSID_NULL)
+                       return -EINVAL;
+               return 0;
        }
 
        /*
index 9aa15bfc79369aebf6eaa0ef8dd32acecd7a1ec5..649d3217590ed465c6e13e39bdeb2f9e53a4a2e5 100644 (file)
@@ -348,6 +348,40 @@ static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
        return 0;
 }
 
+/* add a new kcontrol object; call with card->controls_rwsem locked */
+static int __snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
+{
+       struct snd_ctl_elem_id id;
+       unsigned int idx;
+       unsigned int count;
+
+       id = kcontrol->id;
+       if (id.index > UINT_MAX - kcontrol->count)
+               return -EINVAL;
+
+       if (snd_ctl_find_id(card, &id)) {
+               dev_err(card->dev,
+                       "control %i:%i:%i:%s:%i is already present\n",
+                       id.iface, id.device, id.subdevice, id.name, id.index);
+               return -EBUSY;
+       }
+
+       if (snd_ctl_find_hole(card, kcontrol->count) < 0)
+               return -ENOMEM;
+
+       list_add_tail(&kcontrol->list, &card->controls);
+       card->controls_count += kcontrol->count;
+       kcontrol->id.numid = card->last_numid + 1;
+       card->last_numid += kcontrol->count;
+
+       id = kcontrol->id;
+       count = kcontrol->count;
+       for (idx = 0; idx < count; idx++, id.index++, id.numid++)
+               snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
+
+       return 0;
+}
+
 /**
  * snd_ctl_add - add the control instance to the card
  * @card: the card instance
@@ -364,45 +398,18 @@ static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
  */
 int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
 {
-       struct snd_ctl_elem_id id;
-       unsigned int idx;
-       unsigned int count;
        int err = -EINVAL;
 
        if (! kcontrol)
                return err;
        if (snd_BUG_ON(!card || !kcontrol->info))
                goto error;
-       id = kcontrol->id;
-       if (id.index > UINT_MAX - kcontrol->count)
-               goto error;
 
        down_write(&card->controls_rwsem);
-       if (snd_ctl_find_id(card, &id)) {
-               up_write(&card->controls_rwsem);
-               dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n",
-                                       id.iface,
-                                       id.device,
-                                       id.subdevice,
-                                       id.name,
-                                       id.index);
-               err = -EBUSY;
-               goto error;
-       }
-       if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
-               up_write(&card->controls_rwsem);
-               err = -ENOMEM;
-               goto error;
-       }
-       list_add_tail(&kcontrol->list, &card->controls);
-       card->controls_count += kcontrol->count;
-       kcontrol->id.numid = card->last_numid + 1;
-       card->last_numid += kcontrol->count;
-       id = kcontrol->id;
-       count = kcontrol->count;
+       err = __snd_ctl_add(card, kcontrol);
        up_write(&card->controls_rwsem);
-       for (idx = 0; idx < count; idx++, id.index++, id.numid++)
-               snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
+       if (err < 0)
+               goto error;
        return 0;
 
  error:
@@ -1361,9 +1368,12 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
                kctl->tlv.c = snd_ctl_elem_user_tlv;
 
        /* This function manage to free the instance on failure. */
-       err = snd_ctl_add(card, kctl);
-       if (err < 0)
-               return err;
+       down_write(&card->controls_rwsem);
+       err = __snd_ctl_add(card, kctl);
+       if (err < 0) {
+               snd_ctl_free_one(kctl);
+               goto unlock;
+       }
        offset = snd_ctl_get_ioff(kctl, &info->id);
        snd_ctl_build_ioff(&info->id, kctl, offset);
        /*
@@ -1374,10 +1384,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
         * which locks the element.
         */
 
-       down_write(&card->controls_rwsem);
        card->user_ctl_count++;
-       up_write(&card->controls_rwsem);
 
+ unlock:
+       up_write(&card->controls_rwsem);
        return 0;
 }
 
index f8d4a419f3af957a2dd53ba8cf1673da607ec7e0..467039b342b511c7c4fb95f0d7c316f7715d7c70 100644 (file)
@@ -1062,8 +1062,8 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
        runtime->oss.channels = params_channels(params);
        runtime->oss.rate = params_rate(params);
 
-       vfree(runtime->oss.buffer);
-       runtime->oss.buffer = vmalloc(runtime->oss.period_bytes);
+       kvfree(runtime->oss.buffer);
+       runtime->oss.buffer = kvzalloc(runtime->oss.period_bytes, GFP_KERNEL);
        if (!runtime->oss.buffer) {
                err = -ENOMEM;
                goto failure;
@@ -2328,7 +2328,7 @@ static void snd_pcm_oss_release_substream(struct snd_pcm_substream *substream)
 {
        struct snd_pcm_runtime *runtime;
        runtime = substream->runtime;
-       vfree(runtime->oss.buffer);
+       kvfree(runtime->oss.buffer);
        runtime->oss.buffer = NULL;
 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
        snd_pcm_oss_plugin_clear(substream);
index 141c5f3a957501e0901102ce5bd519cbca73892e..31cb2acf8afcc5988d47cdb2332c005c0558d18a 100644 (file)
@@ -66,8 +66,8 @@ static int snd_pcm_plugin_alloc(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t
                return -ENXIO;
        size /= 8;
        if (plugin->buf_frames < frames) {
-               vfree(plugin->buf);
-               plugin->buf = vmalloc(size);
+               kvfree(plugin->buf);
+               plugin->buf = kvzalloc(size, GFP_KERNEL);
                plugin->buf_frames = frames;
        }
        if (!plugin->buf) {
@@ -191,7 +191,7 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin)
        if (plugin->private_free)
                plugin->private_free(plugin);
        kfree(plugin->buf_channels);
-       vfree(plugin->buf);
+       kvfree(plugin->buf);
        kfree(plugin);
        return 0;
 }
index 66c90f486af913243a1005571a6c88cce93de76f..818dff1de545fad25669adf1144cbd49e5934067 100644 (file)
@@ -36,6 +36,7 @@
 #include <sound/timer.h>
 #include <sound/minors.h>
 #include <linux/uio.h>
+#include <linux/delay.h>
 
 #include "pcm_local.h"
 
@@ -91,12 +92,12 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
  * and this may lead to a deadlock when the code path takes read sem
  * twice (e.g. one in snd_pcm_action_nonatomic() and another in
  * snd_pcm_stream_lock()).  As a (suboptimal) workaround, let writer to
- * spin until it gets the lock.
+ * sleep until all the readers are completed without blocking by writer.
  */
-static inline void down_write_nonblock(struct rw_semaphore *lock)
+static inline void down_write_nonfifo(struct rw_semaphore *lock)
 {
        while (!down_write_trylock(lock))
-               cond_resched();
+               msleep(1);
 }
 
 #define PCM_LOCK_DEFAULT       0
@@ -1967,7 +1968,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
                res = -ENOMEM;
                goto _nolock;
        }
-       down_write_nonblock(&snd_pcm_link_rwsem);
+       down_write_nonfifo(&snd_pcm_link_rwsem);
        write_lock_irq(&snd_pcm_link_rwlock);
        if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
            substream->runtime->status->state != substream1->runtime->status->state ||
@@ -2014,7 +2015,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
        struct snd_pcm_substream *s;
        int res = 0;
 
-       down_write_nonblock(&snd_pcm_link_rwsem);
+       down_write_nonfifo(&snd_pcm_link_rwsem);
        write_lock_irq(&snd_pcm_link_rwlock);
        if (!snd_pcm_stream_linked(substream)) {
                res = -EALREADY;
@@ -2369,7 +2370,8 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
 
 static void pcm_release_private(struct snd_pcm_substream *substream)
 {
-       snd_pcm_unlink(substream);
+       if (snd_pcm_stream_linked(substream))
+               snd_pcm_unlink(substream);
 }
 
 void snd_pcm_release_substream(struct snd_pcm_substream *substream)
index fcd965f1d69e820de7824de01365d95e73eb44a3..9be76c808fccf09ef8a8785152657510bcef1f1e 100644 (file)
@@ -146,53 +146,22 @@ static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
        struct snd_interval *s = hw_param_interval(params, rule->var);
        const struct snd_interval *r =
                hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
-       struct snd_interval t = {
-               .min = s->min, .max = s->max, .integer = 1,
-       };
+       struct snd_interval t = {0};
+       unsigned int step = 0;
        int i;
 
        for (i = 0; i < CIP_SFC_COUNT; ++i) {
-               unsigned int rate = amdtp_rate_table[i];
-               unsigned int step = amdtp_syt_intervals[i];
-
-               if (!snd_interval_test(r, rate))
-                       continue;
-
-               t.min = roundup(t.min, step);
-               t.max = rounddown(t.max, step);
+               if (snd_interval_test(r, amdtp_rate_table[i]))
+                       step = max(step, amdtp_syt_intervals[i]);
        }
 
-       if (snd_interval_checkempty(&t))
-               return -EINVAL;
+       t.min = roundup(s->min, step);
+       t.max = rounddown(s->max, step);
+       t.integer = 1;
 
        return snd_interval_refine(s, &t);
 }
 
-static int apply_constraint_to_rate(struct snd_pcm_hw_params *params,
-                                   struct snd_pcm_hw_rule *rule)
-{
-       struct snd_interval *r =
-                       hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
-       const struct snd_interval *s = hw_param_interval_c(params, rule->deps[0]);
-       struct snd_interval t = {
-               .min = UINT_MAX, .max = 0, .integer = 1,
-       };
-       int i;
-
-       for (i = 0; i < CIP_SFC_COUNT; ++i) {
-               unsigned int step = amdtp_syt_intervals[i];
-               unsigned int rate = amdtp_rate_table[i];
-
-               if (s->min % step || s->max % step)
-                       continue;
-
-               t.min = min(t.min, rate);
-               t.max = max(t.max, rate);
-       }
-
-       return snd_interval_refine(r, &t);
-}
-
 /**
  * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
  * @s:         the AMDTP stream, which must be initialized.
@@ -250,24 +219,16 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
         */
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
                                  apply_constraint_to_size, NULL,
+                                 SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
                                  SNDRV_PCM_HW_PARAM_RATE, -1);
        if (err < 0)
                goto end;
-       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
-                                 apply_constraint_to_rate, NULL,
-                                 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
-       if (err < 0)
-               goto end;
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
                                  apply_constraint_to_size, NULL,
+                                 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
                                  SNDRV_PCM_HW_PARAM_RATE, -1);
        if (err < 0)
                goto end;
-       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
-                                 apply_constraint_to_rate, NULL,
-                                 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
-       if (err < 0)
-               goto end;
 end:
        return err;
 }
index 0f6dbcffe711d62cfcba5fa7e57194f7f1a70e71..ed50b222d36ea0d4dd7ddd4440ca8f62ce0e7d8a 100644 (file)
@@ -240,8 +240,8 @@ static void dice_remove(struct fw_unit *unit)
        cancel_delayed_work_sync(&dice->dwork);
 
        if (dice->registered) {
-               /* No need to wait for releasing card object in this context. */
-               snd_card_free_when_closed(dice->card);
+               // Block till all of ALSA character devices are released.
+               snd_card_free(dice->card);
        }
 
        mutex_destroy(&dice->mutex);
index 32453f81b95a8d01d3d9d5d7c16d2d19c266a4a3..3a5008837576030685f0018f0aafe71ba8fb8697 100644 (file)
@@ -1531,7 +1531,6 @@ static int snd_wss_playback_open(struct snd_pcm_substream *substream)
        if (err < 0) {
                if (chip->release_dma)
                        chip->release_dma(chip, chip->dma_private_data, chip->dma1);
-               snd_free_pages(runtime->dma_area, runtime->dma_bytes);
                return err;
        }
        chip->playback_substream = substream;
@@ -1572,7 +1571,6 @@ static int snd_wss_capture_open(struct snd_pcm_substream *substream)
        if (err < 0) {
                if (chip->release_dma)
                        chip->release_dma(chip, chip->dma_private_data, chip->dma2);
-               snd_free_pages(runtime->dma_area, runtime->dma_bytes);
                return err;
        }
        chip->capture_substream = substream;
index f4459d1a9d67d0c43a13f494f348a9c34c72df11..27b468f057dd409d17294099aa833df1e3c3916f 100644 (file)
@@ -824,7 +824,7 @@ static int snd_ac97_put_spsa(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
 {
        struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
        int reg = kcontrol->private_value & 0xff;
-       int shift = (kcontrol->private_value >> 8) & 0xff;
+       int shift = (kcontrol->private_value >> 8) & 0x0f;
        int mask = (kcontrol->private_value >> 16) & 0xff;
        // int invert = (kcontrol->private_value >> 24) & 0xff;
        unsigned short value, old, new;
index 04402c14cb2392276bb23308244c626b6539fc4e..9847b669cf3cf0fbc698e89fd78774ace013d619 100644 (file)
 #define SPI_PL_BIT_R_R         (2<<7)  /* right channel = right */
 #define SPI_PL_BIT_R_C         (3<<7)  /* right channel = (L+R)/2 */
 #define SPI_IZD_REG            2
-#define SPI_IZD_BIT            (1<<4)  /* infinite zero detect */
+#define SPI_IZD_BIT            (0<<4)  /* infinite zero detect */
 
 #define SPI_FMT_REG            3
 #define SPI_FMT_BIT_RJ         (0<<0)  /* right justified mode */
index d8eb2b5f51ae7f59e3f905f092190f898f92242d..76f03abd15ab766190c4d5739f707d81aa0e2d70 100644 (file)
@@ -2169,6 +2169,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
        SND_PCI_QUIRK(0x1849, 0xc892, "Asrock B85M-ITX", 0),
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+       SND_PCI_QUIRK(0x1849, 0x0397, "Asrock N68C-S UCC", 0),
+       /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
        SND_PCI_QUIRK(0x1849, 0x7662, "Asrock H81M-HDS", 0),
        /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
        SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
@@ -2496,6 +2498,10 @@ static const struct pci_device_id azx_ids[] = {
        /* AMD Hudson */
        { PCI_DEVICE(0x1022, 0x780d),
          .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+       /* AMD Stoney */
+       { PCI_DEVICE(0x1022, 0x157a),
+         .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+                        AZX_DCAPS_PM_RUNTIME },
        /* AMD Raven */
        { PCI_DEVICE(0x1022, 0x15e3),
          .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
index 0a24037184c33e2cd53554d7cf9e4dd6586dd19b..0a567634e5faddafd470220c7a9ebaff88ce8cef 100644 (file)
@@ -1177,6 +1177,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
        SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
        SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
        SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
+       SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ),
        SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
        SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
        SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
@@ -8413,7 +8414,7 @@ static void ca0132_free(struct hda_codec *codec)
 
        snd_hda_power_down(codec);
        if (spec->mem_base)
-               iounmap(spec->mem_base);
+               pci_iounmap(codec->bus->pci, spec->mem_base);
        kfree(spec->spec_init_verbs);
        kfree(codec->spec);
 }
@@ -8488,7 +8489,7 @@ static void ca0132_config(struct hda_codec *codec)
                break;
        case QUIRK_AE5:
                codec_dbg(codec, "%s: QUIRK_AE5 applied.\n", __func__);
-               snd_hda_apply_pincfgs(codec, r3di_pincfgs);
+               snd_hda_apply_pincfgs(codec, ae5_pincfgs);
                break;
        }
 
index fa61674a560504224b9ee8ef35da2d4ddf85c6fe..8d75597028eebbb9f32e6de7009f101b8c117f4d 100644 (file)
@@ -388,6 +388,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0285:
        case 0x10ec0298:
        case 0x10ec0289:
+       case 0x10ec0300:
                alc_update_coef_idx(codec, 0x10, 1<<9, 0);
                break;
        case 0x10ec0275:
@@ -2830,6 +2831,7 @@ enum {
        ALC269_TYPE_ALC215,
        ALC269_TYPE_ALC225,
        ALC269_TYPE_ALC294,
+       ALC269_TYPE_ALC300,
        ALC269_TYPE_ALC700,
 };
 
@@ -2864,6 +2866,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        case ALC269_TYPE_ALC215:
        case ALC269_TYPE_ALC225:
        case ALC269_TYPE_ALC294:
+       case ALC269_TYPE_ALC300:
        case ALC269_TYPE_ALC700:
                ssids = alc269_ssids;
                break;
@@ -4985,9 +4988,18 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
                { 0x19, 0x21a11010 }, /* dock mic */
                { }
        };
+       /* Assure the speaker pin to be coupled with DAC NID 0x03; otherwise
+        * the speaker output becomes too low by some reason on Thinkpads with
+        * ALC298 codec
+        */
+       static hda_nid_t preferred_pairs[] = {
+               0x14, 0x03, 0x17, 0x02, 0x21, 0x02,
+               0
+       };
        struct alc_spec *spec = codec->spec;
 
        if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               spec->gen.preferred_dacs = preferred_pairs;
                spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
                snd_hda_apply_pincfgs(codec, pincfgs);
        } else if (action == HDA_FIXUP_ACT_INIT) {
@@ -5358,6 +5370,16 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
        spec->gen.preferred_dacs = preferred_pairs;
 }
 
+/* The DAC of NID 0x3 will introduce click/pop noise on headphones, so invalidate it */
+static void alc285_fixup_invalidate_dacs(struct hda_codec *codec,
+                             const struct hda_fixup *fix, int action)
+{
+       if (action != HDA_FIXUP_ACT_PRE_PROBE)
+               return;
+
+       snd_hda_override_wcaps(codec, 0x03, 0);
+}
+
 /* for hda_fixup_thinkpad_acpi() */
 #include "thinkpad_helper.c"
 
@@ -5495,6 +5517,9 @@ enum {
        ALC255_FIXUP_DELL_HEADSET_MIC,
        ALC295_FIXUP_HP_X360,
        ALC221_FIXUP_HP_HEADSET_MIC,
+       ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
+       ALC295_FIXUP_HP_AUTO_MUTE,
+       ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -5659,6 +5684,8 @@ static const struct hda_fixup alc269_fixups[] = {
        [ALC269_FIXUP_HP_MUTE_LED_MIC3] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc269_fixup_hp_mute_led_mic3,
+               .chained = true,
+               .chain_id = ALC295_FIXUP_HP_AUTO_MUTE
        },
        [ALC269_FIXUP_HP_GPIO_LED] = {
                .type = HDA_FIXUP_FUNC,
@@ -6362,6 +6389,23 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_HEADSET_MIC
        },
+       [ALC285_FIXUP_LENOVO_HEADPHONE_NOISE] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_invalidate_dacs,
+       },
+       [ALC295_FIXUP_HP_AUTO_MUTE] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_auto_mute_via_amp,
+       },
+       [ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_HEADSET_MIC
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6376,7 +6420,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
+       SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
+       SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
        SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
@@ -6481,6 +6529,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
        SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
        SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
        SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
@@ -6531,6 +6580,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
        SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -7033,6 +7083,15 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60130},
                {0x19, 0x03a11020},
                {0x21, 0x0321101f}),
+       SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x19, 0x04a11040},
+               {0x21, 0x04211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0286, 0x1025, "Acer", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x17, 0x90170110},
+               {0x21, 0x02211020}),
        SND_HDA_PIN_QUIRK(0x10ec0288, 0x1028, "Dell", ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x12, 0x90a60120},
                {0x14, 0x90170110},
@@ -7294,6 +7353,10 @@ static int patch_alc269(struct hda_codec *codec)
                spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */
                alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */
                break;
+       case 0x10ec0300:
+               spec->codec_variant = ALC269_TYPE_ALC300;
+               spec->gen.mixer_nid = 0; /* no loopback on ALC300 */
+               break;
        case 0x10ec0700:
        case 0x10ec0701:
        case 0x10ec0703:
@@ -8404,6 +8467,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0295, "ALC295", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0300, "ALC300", patch_alc269),
        HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
        HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
        HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861),
index 97f49b751e6eb3583c948e7e35c0ddc8860cc818..568575b72f2f7269c727a202f6544b6f51a57d03 100644 (file)
@@ -58,8 +58,8 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
                        removefunc = false;
                }
                if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0 &&
-                   snd_hda_gen_add_micmute_led(codec,
-                                               update_tpacpi_micmute) > 0)
+                   !snd_hda_gen_add_micmute_led(codec,
+                                                update_tpacpi_micmute))
                        removefunc = false;
        }
 
index 4e9854889a9570dd66e9476fb7a9b082053016e7..e63d6e33df487dff441f1890e8235d491d7d5144 100644 (file)
@@ -2187,11 +2187,6 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
         */
        snd_hdac_codec_read(hdev, hdev->afg, 0, AC_VERB_SET_POWER_STATE,
                                                        AC_PWRST_D3);
-       err = snd_hdac_display_power(bus, false);
-       if (err < 0) {
-               dev_err(dev, "Cannot turn on display power on i915\n");
-               return err;
-       }
 
        hlink = snd_hdac_ext_bus_get_link(bus, dev_name(dev));
        if (!hlink) {
@@ -2201,7 +2196,11 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
 
        snd_hdac_ext_bus_link_put(bus, hlink);
 
-       return 0;
+       err = snd_hdac_display_power(bus, false);
+       if (err < 0)
+               dev_err(dev, "Cannot turn off display power on i915\n");
+
+       return err;
 }
 
 static int hdac_hdmi_runtime_resume(struct device *dev)
index 2c6ba55bf394eb0f2ad4a2a4ce2f2fc21291a541..bb3f0c42a1cdddb31a603948c39bafa56a42a654 100644 (file)
@@ -139,7 +139,7 @@ enum pcm186x_type {
 #define PCM186X_MAX_REGISTER           PCM186X_CURR_TRIM_CTRL
 
 /* PCM186X_PAGE */
-#define PCM186X_RESET                  0xff
+#define PCM186X_RESET                  0xfe
 
 /* PCM186X_ADCX_INPUT_SEL_X */
 #define PCM186X_ADC_INPUT_SEL_POL      BIT(7)
index 494d9d662be8d084113e5720a2c3e86b3331eaf2..771b46e1974b9d068cc7797b8c2d0353d4b2ddfd 100644 (file)
@@ -198,20 +198,16 @@ static const struct snd_kcontrol_new pcm3060_dapm_controls[] = {
 };
 
 static const struct snd_soc_dapm_widget pcm3060_dapm_widgets[] = {
-       SND_SOC_DAPM_OUTPUT("OUTL+"),
-       SND_SOC_DAPM_OUTPUT("OUTR+"),
-       SND_SOC_DAPM_OUTPUT("OUTL-"),
-       SND_SOC_DAPM_OUTPUT("OUTR-"),
+       SND_SOC_DAPM_OUTPUT("OUTL"),
+       SND_SOC_DAPM_OUTPUT("OUTR"),
 
        SND_SOC_DAPM_INPUT("INL"),
        SND_SOC_DAPM_INPUT("INR"),
 };
 
 static const struct snd_soc_dapm_route pcm3060_dapm_map[] = {
-       { "OUTL+", NULL, "Playback" },
-       { "OUTR+", NULL, "Playback" },
-       { "OUTL-", NULL, "Playback" },
-       { "OUTR-", NULL, "Playback" },
+       { "OUTL", NULL, "Playback" },
+       { "OUTR", NULL, "Playback" },
 
        { "Capture", NULL, "INL" },
        { "Capture", NULL, "INR" },
index a53dc174bbf0702c2876e19ccf3d7e8b45b71e94..66501b8dc46fb17e962f7bf1b69923da7bdae1a6 100644 (file)
@@ -765,38 +765,41 @@ static unsigned int wm_adsp_region_to_reg(struct wm_adsp_region const *mem,
 
 static void wm_adsp2_show_fw_status(struct wm_adsp *dsp)
 {
-       u16 scratch[4];
+       unsigned int scratch[4];
+       unsigned int addr = dsp->base + ADSP2_SCRATCH0;
+       unsigned int i;
        int ret;
 
-       ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2_SCRATCH0,
-                               scratch, sizeof(scratch));
-       if (ret) {
-               adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret);
-               return;
+       for (i = 0; i < ARRAY_SIZE(scratch); ++i) {
+               ret = regmap_read(dsp->regmap, addr + i, &scratch[i]);
+               if (ret) {
+                       adsp_err(dsp, "Failed to read SCRATCH%u: %d\n", i, ret);
+                       return;
+               }
        }
 
        adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
-                be16_to_cpu(scratch[0]),
-                be16_to_cpu(scratch[1]),
-                be16_to_cpu(scratch[2]),
-                be16_to_cpu(scratch[3]));
+                scratch[0], scratch[1], scratch[2], scratch[3]);
 }
 
 static void wm_adsp2v2_show_fw_status(struct wm_adsp *dsp)
 {
-       u32 scratch[2];
+       unsigned int scratch[2];
        int ret;
 
-       ret = regmap_raw_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1,
-                             scratch, sizeof(scratch));
-
+       ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH0_1,
+                         &scratch[0]);
        if (ret) {
-               adsp_err(dsp, "Failed to read SCRATCH regs: %d\n", ret);
+               adsp_err(dsp, "Failed to read SCRATCH0_1: %d\n", ret);
                return;
        }
 
-       scratch[0] = be32_to_cpu(scratch[0]);
-       scratch[1] = be32_to_cpu(scratch[1]);
+       ret = regmap_read(dsp->regmap, dsp->base + ADSP2V2_SCRATCH2_3,
+                         &scratch[1]);
+       if (ret) {
+               adsp_err(dsp, "Failed to read SCRATCH2_3: %d\n", ret);
+               return;
+       }
 
        adsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
                 scratch[0] & 0xFFFF,
index 0caa1f4eb94d7d3ab086d2bdca642a9a975651b4..18e71770368550f167ea32385160e853e9dd5264 100644 (file)
@@ -101,22 +101,42 @@ config SND_SST_ATOM_HIFI2_PLATFORM_ACPI
          codec, then enable this option by saying Y or m. This is a
          recommended option
 
-config SND_SOC_INTEL_SKYLAKE_SSP_CLK
-       tristate
-
 config SND_SOC_INTEL_SKYLAKE
        tristate "SKL/BXT/KBL/GLK/CNL... Platforms"
        depends on PCI && ACPI
+       select SND_SOC_INTEL_SKYLAKE_COMMON
+       help
+         If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
+         GeminiLake or CannonLake platform with the DSP enabled in the BIOS
+         then enable this option by saying Y or m.
+
+if  SND_SOC_INTEL_SKYLAKE
+
+config SND_SOC_INTEL_SKYLAKE_SSP_CLK
+       tristate
+
+config SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
+       bool "HDAudio codec support"
+       help
+         If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
+         GeminiLake or CannonLake platform with an HDaudio codec
+         then enable this option by saying Y
+
+config SND_SOC_INTEL_SKYLAKE_COMMON
+       tristate
        select SND_HDA_EXT_CORE
        select SND_HDA_DSP_LOADER
        select SND_SOC_TOPOLOGY
        select SND_SOC_INTEL_SST
+       select SND_SOC_HDAC_HDA if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
        select SND_SOC_ACPI_INTEL_MATCH
        help
          If you have a Intel Skylake/Broxton/ApolloLake/KabyLake/
          GeminiLake or CannonLake platform with the DSP enabled in the BIOS
          then enable this option by saying Y or m.
 
+endif ## SND_SOC_INTEL_SKYLAKE
+
 config SND_SOC_ACPI_INTEL_MATCH
        tristate
        select SND_SOC_ACPI if ACPI
index 73ca1350aa3124f1e2d784509cbc9ad6ae9be1d1..b177db2a0dbb2ada825849e5e8d85d952a67d1b2 100644 (file)
@@ -293,16 +293,6 @@ config SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH
          Say Y if you have such a device.
          If unsure select "N".
 
-config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
-       tristate "SKL/KBL/BXT/APL with HDA Codecs"
-       select SND_SOC_HDAC_HDMI
-       select SND_SOC_HDAC_HDA
-       help
-         This adds support for ASoC machine driver for Intel platforms
-         SKL/KBL/BXT/APL with iDisp, HDA audio codecs.
-          Say Y or m if you have such a device. This is a recommended option.
-         If unsure select "N".
-
 config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
        tristate "GLK with RT5682 and MAX98357A in I2S Mode"
        depends on MFD_INTEL_LPSS && I2C && ACPI
@@ -319,4 +309,18 @@ config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
 
 endif ## SND_SOC_INTEL_SKYLAKE
 
+if SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
+
+config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
+       tristate "SKL/KBL/BXT/APL with HDA Codecs"
+       select SND_SOC_HDAC_HDMI
+       # SND_SOC_HDAC_HDA is already selected
+       help
+         This adds support for ASoC machine driver for Intel platforms
+         SKL/KBL/BXT/APL with iDisp, HDA audio codecs.
+          Say Y or m if you have such a device. This is a recommended option.
+         If unsure select "N".
+
+endif ## SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC
+
 endif ## SND_SOC_INTEL_MACH
index db6976f4ddaa28b9eb0e123b93d9952cd1869634..9d9f6e41d81c079796cb7bec6d9422bd6cbb5c67 100644 (file)
@@ -19,6 +19,7 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
+#include <linux/dmi.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -35,6 +36,8 @@
 #define CHT_PLAT_CLK_3_HZ      19200000
 #define CHT_CODEC_DAI  "HiFi"
 
+#define QUIRK_PMC_PLT_CLK_0                            0x01
+
 struct cht_mc_private {
        struct clk *mclk;
        struct snd_soc_jack jack;
@@ -385,11 +388,29 @@ static struct snd_soc_card snd_soc_card_cht = {
        .num_controls = ARRAY_SIZE(cht_mc_controls),
 };
 
+static const struct dmi_system_id cht_max98090_quirk_table[] = {
+       {
+               /* Swanky model Chromebook (Toshiba Chromebook 2) */
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Swanky"),
+               },
+               .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
+       },
+       {}
+};
+
 static int snd_cht_mc_probe(struct platform_device *pdev)
 {
+       const struct dmi_system_id *dmi_id;
        struct device *dev = &pdev->dev;
        int ret_val = 0;
        struct cht_mc_private *drv;
+       const char *mclk_name;
+       int quirks = 0;
+
+       dmi_id = dmi_first_match(cht_max98090_quirk_table);
+       if (dmi_id)
+               quirks = (unsigned long)dmi_id->driver_data;
 
        drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
        if (!drv)
@@ -411,11 +432,16 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
        snd_soc_card_cht.dev = &pdev->dev;
        snd_soc_card_set_drvdata(&snd_soc_card_cht, drv);
 
-       drv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+       if (quirks & QUIRK_PMC_PLT_CLK_0)
+               mclk_name = "pmc_plt_clk_0";
+       else
+               mclk_name = "pmc_plt_clk_3";
+
+       drv->mclk = devm_clk_get(&pdev->dev, mclk_name);
        if (IS_ERR(drv->mclk)) {
                dev_err(&pdev->dev,
-                       "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
-                       PTR_ERR(drv->mclk));
+                       "Failed to get MCLK from %s: %ld\n",
+                       mclk_name, PTR_ERR(drv->mclk));
                return PTR_ERR(drv->mclk);
        }
 
index 29225623b4b40d1c8fea6822b85b007e57aba690..7487f388e65d729c8fc6032a3e84d0908ac454f4 100644 (file)
@@ -37,7 +37,9 @@
 #include "skl.h"
 #include "skl-sst-dsp.h"
 #include "skl-sst-ipc.h"
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
 #include "../../../soc/codecs/hdac_hda.h"
+#endif
 
 /*
  * initialize the PCI registers
@@ -658,6 +660,8 @@ static void skl_clock_device_unregister(struct skl *skl)
                platform_device_unregister(skl->clk_dev);
 }
 
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
+
 #define IDISP_INTEL_VENDOR_ID  0x80860000
 
 /*
@@ -676,6 +680,8 @@ static void load_codec_module(struct hda_codec *codec)
 #endif
 }
 
+#endif /* CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC */
+
 /*
  * Probe the given codec address
  */
@@ -685,9 +691,11 @@ static int probe_codec(struct hdac_bus *bus, int addr)
                (AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
        unsigned int res = -1;
        struct skl *skl = bus_to_skl(bus);
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
        struct hdac_hda_priv *hda_codec;
-       struct hdac_device *hdev;
        int err;
+#endif
+       struct hdac_device *hdev;
 
        mutex_lock(&bus->cmd_mutex);
        snd_hdac_bus_send_cmd(bus, cmd);
@@ -697,6 +705,7 @@ static int probe_codec(struct hdac_bus *bus, int addr)
                return -EIO;
        dev_dbg(bus->dev, "codec #%d probed OK: %x\n", addr, res);
 
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
        hda_codec = devm_kzalloc(&skl->pci->dev, sizeof(*hda_codec),
                                 GFP_KERNEL);
        if (!hda_codec)
@@ -715,6 +724,13 @@ static int probe_codec(struct hdac_bus *bus, int addr)
                load_codec_module(&hda_codec->codec);
        }
        return 0;
+#else
+       hdev = devm_kzalloc(&skl->pci->dev, sizeof(*hdev), GFP_KERNEL);
+       if (!hdev)
+               return -ENOMEM;
+
+       return snd_hdac_ext_bus_device_init(bus, addr, hdev);
+#endif /* CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC */
 }
 
 /* Codec initialization */
@@ -815,6 +831,12 @@ static void skl_probe_work(struct work_struct *work)
                }
        }
 
+       /*
+        * we are done probing so decrement link counts
+        */
+       list_for_each_entry(hlink, &bus->hlink_list, list)
+               snd_hdac_ext_bus_link_put(bus, hlink);
+
        if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
                err = snd_hdac_display_power(bus, false);
                if (err < 0) {
@@ -824,12 +846,6 @@ static void skl_probe_work(struct work_struct *work)
                }
        }
 
-       /*
-        * we are done probing so decrement link counts
-        */
-       list_for_each_entry(hlink, &bus->hlink_list, list)
-               snd_hdac_ext_bus_link_put(bus, hlink);
-
        /* configure PM */
        pm_runtime_put_noidle(bus->dev);
        pm_runtime_allow(bus->dev);
@@ -870,7 +886,7 @@ static int skl_create(struct pci_dev *pci,
        hbus = skl_to_hbus(skl);
        bus = skl_to_bus(skl);
 
-#if IS_ENABLED(CONFIG_SND_SOC_HDAC_HDA)
+#if IS_ENABLED(CONFIG_SND_SOC_INTEL_SKYLAKE_HDAUDIO_CODEC)
        ext_ops = snd_soc_hdac_hda_get_ops();
 #endif
        snd_hdac_ext_bus_init(bus, &pci->dev, &bus_core_ops, io_ops, ext_ops);
index d5ae9eb8c7569f6364f8fc5173be2967cd9be092..fed45b41f9d3e7766c2239de15b9ef05453097ae 100644 (file)
@@ -36,6 +36,8 @@
 #include "../codecs/twl6040.h"
 
 struct abe_twl6040 {
+       struct snd_soc_card card;
+       struct snd_soc_dai_link dai_links[2];
        int     jack_detection; /* board can detect jack events */
        int     mclk_freq;      /* MCLK frequency speed for twl6040 */
 };
@@ -208,40 +210,10 @@ static int omap_abe_dmic_init(struct snd_soc_pcm_runtime *rtd)
                                ARRAY_SIZE(dmic_audio_map));
 }
 
-/* Digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link abe_twl6040_dai_links[] = {
-       {
-               .name = "TWL6040",
-               .stream_name = "TWL6040",
-               .codec_dai_name = "twl6040-legacy",
-               .codec_name = "twl6040-codec",
-               .init = omap_abe_twl6040_init,
-               .ops = &omap_abe_ops,
-       },
-       {
-               .name = "DMIC",
-               .stream_name = "DMIC Capture",
-               .codec_dai_name = "dmic-hifi",
-               .codec_name = "dmic-codec",
-               .init = omap_abe_dmic_init,
-               .ops = &omap_abe_dmic_ops,
-       },
-};
-
-/* Audio machine driver */
-static struct snd_soc_card omap_abe_card = {
-       .owner = THIS_MODULE,
-
-       .dapm_widgets = twl6040_dapm_widgets,
-       .num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets),
-       .dapm_routes = audio_map,
-       .num_dapm_routes = ARRAY_SIZE(audio_map),
-};
-
 static int omap_abe_probe(struct platform_device *pdev)
 {
        struct device_node *node = pdev->dev.of_node;
-       struct snd_soc_card *card = &omap_abe_card;
+       struct snd_soc_card *card;
        struct device_node *dai_node;
        struct abe_twl6040 *priv;
        int num_links = 0;
@@ -252,12 +224,18 @@ static int omap_abe_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       card->dev = &pdev->dev;
-
        priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
        if (priv == NULL)
                return -ENOMEM;
 
+       card = &priv->card;
+       card->dev = &pdev->dev;
+       card->owner = THIS_MODULE;
+       card->dapm_widgets = twl6040_dapm_widgets;
+       card->num_dapm_widgets = ARRAY_SIZE(twl6040_dapm_widgets);
+       card->dapm_routes = audio_map;
+       card->num_dapm_routes = ARRAY_SIZE(audio_map);
+
        if (snd_soc_of_parse_card_name(card, "ti,model")) {
                dev_err(&pdev->dev, "Card name is not provided\n");
                return -ENODEV;
@@ -274,14 +252,27 @@ static int omap_abe_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "McPDM node is not provided\n");
                return -EINVAL;
        }
-       abe_twl6040_dai_links[0].cpu_of_node = dai_node;
-       abe_twl6040_dai_links[0].platform_of_node = dai_node;
+
+       priv->dai_links[0].name = "DMIC";
+       priv->dai_links[0].stream_name = "TWL6040";
+       priv->dai_links[0].cpu_of_node = dai_node;
+       priv->dai_links[0].platform_of_node = dai_node;
+       priv->dai_links[0].codec_dai_name = "twl6040-legacy";
+       priv->dai_links[0].codec_name = "twl6040-codec";
+       priv->dai_links[0].init = omap_abe_twl6040_init;
+       priv->dai_links[0].ops = &omap_abe_ops;
 
        dai_node = of_parse_phandle(node, "ti,dmic", 0);
        if (dai_node) {
                num_links = 2;
-               abe_twl6040_dai_links[1].cpu_of_node = dai_node;
-               abe_twl6040_dai_links[1].platform_of_node = dai_node;
+               priv->dai_links[1].name = "TWL6040";
+               priv->dai_links[1].stream_name = "DMIC Capture";
+               priv->dai_links[1].cpu_of_node = dai_node;
+               priv->dai_links[1].platform_of_node = dai_node;
+               priv->dai_links[1].codec_dai_name = "dmic-hifi";
+               priv->dai_links[1].codec_name = "dmic-codec";
+               priv->dai_links[1].init = omap_abe_dmic_init;
+               priv->dai_links[1].ops = &omap_abe_dmic_ops;
        } else {
                num_links = 1;
        }
@@ -300,7 +291,7 @@ static int omap_abe_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       card->dai_link = abe_twl6040_dai_links;
+       card->dai_link = priv->dai_links;
        card->num_links = num_links;
 
        snd_soc_card_set_drvdata(card, priv);
index fe966272bd0cd0ba6d049bb60d80f759722eb96d..cba9645b648763cc03d7ffa7dbcca4f060056fe1 100644 (file)
@@ -48,6 +48,8 @@ struct omap_dmic {
        struct device *dev;
        void __iomem *io_base;
        struct clk *fclk;
+       struct pm_qos_request pm_qos_req;
+       int latency;
        int fclk_freq;
        int out_freq;
        int clk_div;
@@ -124,6 +126,8 @@ static void omap_dmic_dai_shutdown(struct snd_pcm_substream *substream,
 
        mutex_lock(&dmic->mutex);
 
+       pm_qos_remove_request(&dmic->pm_qos_req);
+
        if (!dai->active)
                dmic->active = 0;
 
@@ -228,6 +232,8 @@ static int omap_dmic_dai_hw_params(struct snd_pcm_substream *substream,
        /* packet size is threshold * channels */
        dma_data = snd_soc_dai_get_dma_data(dai, substream);
        dma_data->maxburst = dmic->threshold * channels;
+       dmic->latency = (OMAP_DMIC_THRES_MAX - dmic->threshold) * USEC_PER_SEC /
+                       params_rate(params);
 
        return 0;
 }
@@ -238,6 +244,9 @@ static int omap_dmic_dai_prepare(struct snd_pcm_substream *substream,
        struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
        u32 ctrl;
 
+       if (pm_qos_request_active(&dmic->pm_qos_req))
+               pm_qos_update_request(&dmic->pm_qos_req, dmic->latency);
+
        /* Configure uplink threshold */
        omap_dmic_write(dmic, OMAP_DMIC_FIFO_CTRL_REG, dmic->threshold);
 
index d0ebb6b9bfac3a38ed320cb957a32cb71788b418..2d6decbfc99efc021bb19f5bd1eecc9b333a6b77 100644 (file)
@@ -308,9 +308,9 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
                        pkt_size = channels;
                }
 
-               latency = ((((buffer_size - pkt_size) / channels) * 1000)
-                                / (params->rate_num / params->rate_den));
-
+               latency = (buffer_size - pkt_size) / channels;
+               latency = latency * USEC_PER_SEC /
+                         (params->rate_num / params->rate_den);
                mcbsp->latency[substream->stream] = latency;
 
                omap_mcbsp_set_threshold(substream, pkt_size);
index 4c1be36c22075b3b449325dfc44eccd565c50f1a..7d5bdc5a2890369eea9ea84ae7b0b5ed30d3e334 100644 (file)
@@ -54,6 +54,8 @@ struct omap_mcpdm {
        unsigned long phys_base;
        void __iomem *io_base;
        int irq;
+       struct pm_qos_request pm_qos_req;
+       int latency[2];
 
        struct mutex mutex;
 
@@ -277,6 +279,9 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
                                  struct snd_soc_dai *dai)
 {
        struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+       int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+       int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
+       int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
 
        mutex_lock(&mcpdm->mutex);
 
@@ -289,6 +294,14 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
                }
        }
 
+       if (mcpdm->latency[stream2])
+               pm_qos_update_request(&mcpdm->pm_qos_req,
+                                     mcpdm->latency[stream2]);
+       else if (mcpdm->latency[stream1])
+               pm_qos_remove_request(&mcpdm->pm_qos_req);
+
+       mcpdm->latency[stream1] = 0;
+
        mutex_unlock(&mcpdm->mutex);
 }
 
@@ -300,7 +313,7 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
        int stream = substream->stream;
        struct snd_dmaengine_dai_dma_data *dma_data;
        u32 threshold;
-       int channels;
+       int channels, latency;
        int link_mask = 0;
 
        channels = params_channels(params);
@@ -344,14 +357,25 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
 
                dma_data->maxburst =
                                (MCPDM_DN_THRES_MAX - threshold) * channels;
+               latency = threshold;
        } else {
                /* If playback is not running assume a stereo stream to come */
                if (!mcpdm->config[!stream].link_mask)
                        mcpdm->config[!stream].link_mask = (0x3 << 3);
 
                dma_data->maxburst = threshold * channels;
+               latency = (MCPDM_DN_THRES_MAX - threshold);
        }
 
+       /*
+        * The DMA must act to a DMA request within latency time (usec) to avoid
+        * under/overflow
+        */
+       mcpdm->latency[stream] = latency * USEC_PER_SEC / params_rate(params);
+
+       if (!mcpdm->latency[stream])
+               mcpdm->latency[stream] = 10;
+
        /* Check if we need to restart McPDM with this stream */
        if (mcpdm->config[stream].link_mask &&
            mcpdm->config[stream].link_mask != link_mask)
@@ -366,6 +390,20 @@ static int omap_mcpdm_prepare(struct snd_pcm_substream *substream,
                                  struct snd_soc_dai *dai)
 {
        struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+       struct pm_qos_request *pm_qos_req = &mcpdm->pm_qos_req;
+       int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+       int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
+       int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+       int latency = mcpdm->latency[stream2];
+
+       /* Prevent omap hardware from hitting off between FIFO fills */
+       if (!latency || mcpdm->latency[stream1] < latency)
+               latency = mcpdm->latency[stream1];
+
+       if (pm_qos_request_active(pm_qos_req))
+               pm_qos_update_request(pm_qos_req, latency);
+       else if (latency)
+               pm_qos_add_request(pm_qos_req, PM_QOS_CPU_DMA_LATENCY, latency);
 
        if (!omap_mcpdm_active(mcpdm)) {
                omap_mcpdm_start(mcpdm);
@@ -427,6 +465,9 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
        free_irq(mcpdm->irq, (void *)mcpdm);
        pm_runtime_disable(mcpdm->dev);
 
+       if (pm_qos_request_active(&mcpdm->pm_qos_req))
+               pm_qos_remove_request(&mcpdm->pm_qos_req);
+
        return 0;
 }
 
index eb1b9da05dd47a5dcf9013e01c88e0a74af3e60f..4715527054e5e07fc1d3218937aa3d91130e3c9e 100644 (file)
@@ -13,6 +13,7 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
        struct device_node *cpu = NULL;
        struct device *dev = card->dev;
        struct snd_soc_dai_link *link;
+       struct of_phandle_args args;
        int ret, num_links;
 
        ret = snd_soc_of_parse_card_name(card, "model");
@@ -47,12 +48,14 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
                        goto err;
                }
 
-               link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
-               if (!link->cpu_of_node) {
+               ret = of_parse_phandle_with_args(cpu, "sound-dai",
+                                       "#sound-dai-cells", 0, &args);
+               if (ret) {
                        dev_err(card->dev, "error getting cpu phandle\n");
-                       ret = -EINVAL;
                        goto err;
                }
+               link->cpu_of_node = args.np;
+               link->id = args.args[0];
 
                ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
                if (ret) {
index 60ff4a2d35774eebdaced12f36a275ce2738f5f2..8f6c8fc073a93048cb2e33d8439105ac2dc7d025 100644 (file)
@@ -1112,204 +1112,204 @@ static int q6afe_of_xlate_dai_name(struct snd_soc_component *component,
 }
 
 static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
-       SND_SOC_DAPM_AIF_OUT("HDMI_RX", "HDMI Playback", 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_RX", "Slimbus Playback", 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_RX", "Slimbus1 Playback", 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_RX", "Slimbus2 Playback", 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_RX", "Slimbus3 Playback", 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_RX", "Slimbus4 Playback", 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_RX", "Slimbus5 Playback", 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_RX", "Slimbus6 Playback", 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SLIMBUS_0_TX", "Slimbus Capture", 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SLIMBUS_1_TX", "Slimbus1 Capture", 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SLIMBUS_2_TX", "Slimbus2 Capture", 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SLIMBUS_3_TX", "Slimbus3 Capture", 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SLIMBUS_4_TX", "Slimbus4 Capture", 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SLIMBUS_5_TX", "Slimbus5 Capture", 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SLIMBUS_6_TX", "Slimbus6 Capture", 0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_RX", "Quaternary MI2S Playback",
+       SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, 0, 0, 0),
+       SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUAT_MI2S_TX", "Quaternary MI2S Capture",
+       SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("TERT_MI2S_RX", "Tertiary MI2S Playback",
+       SND_SOC_DAPM_AIF_IN("TERT_MI2S_RX", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("TERT_MI2S_TX", "Tertiary MI2S Capture",
+       SND_SOC_DAPM_AIF_OUT("TERT_MI2S_TX", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX", "Secondary MI2S Playback",
+       SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SEC_MI2S_TX", "Secondary MI2S Capture",
+       SND_SOC_DAPM_AIF_OUT("SEC_MI2S_TX", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SEC_MI2S_RX_SD1",
+       SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX_SD1",
                        "Secondary MI2S Playback SD1",
                        0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("PRI_MI2S_RX", "Primary MI2S Playback",
+       SND_SOC_DAPM_AIF_IN("PRI_MI2S_RX", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("PRI_MI2S_TX", "Primary MI2S Capture",
+       SND_SOC_DAPM_AIF_OUT("PRI_MI2S_TX", NULL,
                                                0, 0, 0, 0),
 
-       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_0", "Primary TDM0 Playback",
+       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_0", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_1", "Primary TDM1 Playback",
+       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_1", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_2", "Primary TDM2 Playback",
+       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_2", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_3", "Primary TDM3 Playback",
+       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_3", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_4", "Primary TDM4 Playback",
+       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_4", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_5", "Primary TDM5 Playback",
+       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_5", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_6", "Primary TDM6 Playback",
+       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_6", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_RX_7", "Primary TDM7 Playback",
+       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_7", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_0", "Primary TDM0 Capture",
+       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_0", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_1", "Primary TDM1 Capture",
+       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_1", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_2", "Primary TDM2 Capture",
+       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_2", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_3", "Primary TDM3 Capture",
+       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_3", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_4", "Primary TDM4 Capture",
+       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_4", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_5", "Primary TDM5 Capture",
+       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_5", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_6", "Primary TDM6 Capture",
+       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_6", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_TX_7", "Primary TDM7 Capture",
+       SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_7", NULL,
                                                0, 0, 0, 0),
 
-       SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_0", "Secondary TDM0 Playback",
+       SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_1", "Secondary TDM1 Playback",
+       SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_2", "Secondary TDM2 Playback",
+       SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_3", "Secondary TDM3 Playback",
+       SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_4", "Secondary TDM4 Playback",
+       SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_5", "Secondary TDM5 Playback",
+       SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_6", "Secondary TDM6 Playback",
+       SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("SEC_TDM_RX_7", "Secondary TDM7 Playback",
+       SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_0", "Secondary TDM0 Capture",
+       SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_1", "Secondary TDM1 Capture",
+       SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_2", "Secondary TDM2 Capture",
+       SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_3", "Secondary TDM3 Capture",
+       SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_4", "Secondary TDM4 Capture",
+       SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_5", "Secondary TDM5 Capture",
+       SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_6", "Secondary TDM6 Capture",
+       SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("SEC_TDM_TX_7", "Secondary TDM7 Capture",
+       SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7", NULL,
                                                0, 0, 0, 0),
 
-       SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_0", "Tertiary TDM0 Playback",
+       SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_1", "Tertiary TDM1 Playback",
+       SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_2", "Tertiary TDM2 Playback",
+       SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_3", "Tertiary TDM3 Playback",
+       SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_4", "Tertiary TDM4 Playback",
+       SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_5", "Tertiary TDM5 Playback",
+       SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_6", "Tertiary TDM6 Playback",
+       SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("TERT_TDM_RX_7", "Tertiary TDM7 Playback",
+       SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_0", "Tertiary TDM0 Capture",
+       SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_1", "Tertiary TDM1 Capture",
+       SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_2", "Tertiary TDM2 Capture",
+       SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_3", "Tertiary TDM3 Capture",
+       SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_4", "Tertiary TDM4 Capture",
+       SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_5", "Tertiary TDM5 Capture",
+       SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_6", "Tertiary TDM6 Capture",
+       SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("TERT_TDM_TX_7", "Tertiary TDM7 Capture",
+       SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7", NULL,
                                                0, 0, 0, 0),
 
-       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_0", "Quaternary TDM0 Playback",
+       SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_1", "Quaternary TDM1 Playback",
+       SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_2", "Quaternary TDM2 Playback",
+       SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_3", "Quaternary TDM3 Playback",
+       SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_4", "Quaternary TDM4 Playback",
+       SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_5", "Quaternary TDM5 Playback",
+       SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_6", "Quaternary TDM6 Playback",
+       SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_RX_7", "Quaternary TDM7 Playback",
+       SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_0", "Quaternary TDM0 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_1", "Quaternary TDM1 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_2", "Quaternary TDM2 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_3", "Quaternary TDM3 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_4", "Quaternary TDM4 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_5", "Quaternary TDM5 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_6", "Quaternary TDM6 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUAT_TDM_TX_7", "Quaternary TDM7 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7", NULL,
                                                0, 0, 0, 0),
 
-       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_0", "Quinary TDM0 Playback",
+       SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_0", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_1", "Quinary TDM1 Playback",
+       SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_1", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_2", "Quinary TDM2 Playback",
+       SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_2", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_3", "Quinary TDM3 Playback",
+       SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_3", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_4", "Quinary TDM4 Playback",
+       SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_4", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_5", "Quinary TDM5 Playback",
+       SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_5", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_6", "Quinary TDM6 Playback",
+       SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_6", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_RX_7", "Quinary TDM7 Playback",
+       SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_7", NULL,
                             0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_0", "Quinary TDM0 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_0", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_1", "Quinary TDM1 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_1", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_2", "Quinary TDM2 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_2", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_3", "Quinary TDM3 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_3", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_4", "Quinary TDM4 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_4", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_5", "Quinary TDM5 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_5", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_6", "Quinary TDM6 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_6", NULL,
                                                0, 0, 0, 0),
-       SND_SOC_DAPM_AIF_IN("QUIN_TDM_TX_7", "Quinary TDM7 Capture",
+       SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_7", NULL,
                                                0, 0, 0, 0),
 };
 
index 000775b4bba83d5d7e877acfb5aefca6d5b759be..829b5e987b2aaf03d234fc79f276bb5e324ba715 100644 (file)
 #define AFE_PORT_I2S_SD1               0x2
 #define AFE_PORT_I2S_SD2               0x3
 #define AFE_PORT_I2S_SD3               0x4
-#define AFE_PORT_I2S_SD0_MASK          BIT(0x1)
-#define AFE_PORT_I2S_SD1_MASK          BIT(0x2)
-#define AFE_PORT_I2S_SD2_MASK          BIT(0x3)
-#define AFE_PORT_I2S_SD3_MASK          BIT(0x4)
-#define AFE_PORT_I2S_SD0_1_MASK                GENMASK(2, 1)
-#define AFE_PORT_I2S_SD2_3_MASK                GENMASK(4, 3)
-#define AFE_PORT_I2S_SD0_1_2_MASK      GENMASK(3, 1)
-#define AFE_PORT_I2S_SD0_1_2_3_MASK    GENMASK(4, 1)
+#define AFE_PORT_I2S_SD0_MASK          BIT(0x0)
+#define AFE_PORT_I2S_SD1_MASK          BIT(0x1)
+#define AFE_PORT_I2S_SD2_MASK          BIT(0x2)
+#define AFE_PORT_I2S_SD3_MASK          BIT(0x3)
+#define AFE_PORT_I2S_SD0_1_MASK                GENMASK(1, 0)
+#define AFE_PORT_I2S_SD2_3_MASK                GENMASK(3, 2)
+#define AFE_PORT_I2S_SD0_1_2_MASK      GENMASK(2, 0)
+#define AFE_PORT_I2S_SD0_1_2_3_MASK    GENMASK(3, 0)
 #define AFE_PORT_I2S_QUAD01            0x5
 #define AFE_PORT_I2S_QUAD23            0x6
 #define AFE_PORT_I2S_6CHS              0x7
index a16c71c03058eb4e7a7a427a1aad3cb226f5f8ed..86115de5c1b2a1bd16a42a13305b4c62d6feee27 100644 (file)
@@ -122,7 +122,6 @@ static struct snd_pcm_hardware q6asm_dai_hardware_playback = {
                        .rate_max =     48000,                          \
                },                                                      \
                .name = "MultiMedia"#num,                               \
-               .probe = fe_dai_probe,                                  \
                .id = MSM_FRONTEND_DAI_MULTIMEDIA##num,                 \
        }
 
@@ -511,38 +510,6 @@ static void q6asm_dai_pcm_free(struct snd_pcm *pcm)
        }
 }
 
-static const struct snd_soc_dapm_route afe_pcm_routes[] = {
-       {"MM_DL1",  NULL, "MultiMedia1 Playback" },
-       {"MM_DL2",  NULL, "MultiMedia2 Playback" },
-       {"MM_DL3",  NULL, "MultiMedia3 Playback" },
-       {"MM_DL4",  NULL, "MultiMedia4 Playback" },
-       {"MM_DL5",  NULL, "MultiMedia5 Playback" },
-       {"MM_DL6",  NULL, "MultiMedia6 Playback" },
-       {"MM_DL7",  NULL, "MultiMedia7 Playback" },
-       {"MM_DL7",  NULL, "MultiMedia8 Playback" },
-       {"MultiMedia1 Capture", NULL, "MM_UL1"},
-       {"MultiMedia2 Capture", NULL, "MM_UL2"},
-       {"MultiMedia3 Capture", NULL, "MM_UL3"},
-       {"MultiMedia4 Capture", NULL, "MM_UL4"},
-       {"MultiMedia5 Capture", NULL, "MM_UL5"},
-       {"MultiMedia6 Capture", NULL, "MM_UL6"},
-       {"MultiMedia7 Capture", NULL, "MM_UL7"},
-       {"MultiMedia8 Capture", NULL, "MM_UL8"},
-
-};
-
-static int fe_dai_probe(struct snd_soc_dai *dai)
-{
-       struct snd_soc_dapm_context *dapm;
-
-       dapm = snd_soc_component_get_dapm(dai->component);
-       snd_soc_dapm_add_routes(dapm, afe_pcm_routes,
-                               ARRAY_SIZE(afe_pcm_routes));
-
-       return 0;
-}
-
-
 static const struct snd_soc_component_driver q6asm_fe_dai_component = {
        .name           = DRV_NAME,
        .ops            = &q6asm_dai_ops,
index c6b51571be945e5262dbf46966caab4e1615f529..d61b8404f7da999c2326e2aa6ee26522a044257e 100644 (file)
@@ -909,6 +909,25 @@ static const struct snd_soc_dapm_route intercon[] = {
        {"MM_UL6", NULL, "MultiMedia6 Mixer"},
        {"MM_UL7", NULL, "MultiMedia7 Mixer"},
        {"MM_UL8", NULL, "MultiMedia8 Mixer"},
+
+       {"MM_DL1",  NULL, "MultiMedia1 Playback" },
+       {"MM_DL2",  NULL, "MultiMedia2 Playback" },
+       {"MM_DL3",  NULL, "MultiMedia3 Playback" },
+       {"MM_DL4",  NULL, "MultiMedia4 Playback" },
+       {"MM_DL5",  NULL, "MultiMedia5 Playback" },
+       {"MM_DL6",  NULL, "MultiMedia6 Playback" },
+       {"MM_DL7",  NULL, "MultiMedia7 Playback" },
+       {"MM_DL8",  NULL, "MultiMedia8 Playback" },
+
+       {"MultiMedia1 Capture", NULL, "MM_UL1"},
+       {"MultiMedia2 Capture", NULL, "MM_UL2"},
+       {"MultiMedia3 Capture", NULL, "MM_UL3"},
+       {"MultiMedia4 Capture", NULL, "MM_UL4"},
+       {"MultiMedia5 Capture", NULL, "MM_UL5"},
+       {"MultiMedia6 Capture", NULL, "MM_UL6"},
+       {"MultiMedia7 Capture", NULL, "MM_UL7"},
+       {"MultiMedia8 Capture", NULL, "MM_UL8"},
+
 };
 
 static int routing_hw_params(struct snd_pcm_substream *substream,
index 9e7b5fa4cf59630ee65d2ee15e9893f97d8245ce..4ac78d7a4b2da8fb51e9de8fcc751614787fb324 100644 (file)
@@ -33,6 +33,7 @@ static const struct snd_pcm_hardware snd_rockchip_hardware = {
 
 static const struct snd_dmaengine_pcm_config rk_dmaengine_pcm_config = {
        .pcm_hardware = &snd_rockchip_hardware,
+       .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config,
        .prealloc_buffer_size = 32 * 1024,
 };
 
index fcb4df23248c163b287ff39aab02dd6b2970b237..6ec78f3096dd3e8158561535569e1f2502674d49 100644 (file)
@@ -306,7 +306,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
        if (rsnd_ssi_is_multi_slave(mod, io))
                return 0;
 
-       if (ssi->rate) {
+       if (ssi->usrcnt > 1) {
                if (ssi->rate != rate) {
                        dev_err(dev, "SSI parent/child should use same rate\n");
                        return -EINVAL;
index b8e72b52db30ea44c92092dc2a0a4135218624fc..4fb29f0e561ef3b1a558ebe93f201efb105628df 100644 (file)
@@ -10,11 +10,17 @@ struct snd_soc_acpi_mach *
 snd_soc_acpi_find_machine(struct snd_soc_acpi_mach *machines)
 {
        struct snd_soc_acpi_mach *mach;
+       struct snd_soc_acpi_mach *mach_alt;
 
        for (mach = machines; mach->id[0]; mach++) {
                if (acpi_dev_present(mach->id, NULL, -1)) {
-                       if (mach->machine_quirk)
-                               mach = mach->machine_quirk(mach);
+                       if (mach->machine_quirk) {
+                               mach_alt = mach->machine_quirk(mach);
+                               if (!mach_alt)
+                                       continue; /* not full match, ignore */
+                               mach = mach_alt;
+                       }
+
                        return mach;
                }
        }
index 6ddcf12bc030dbb6a8a582fae810c8ba4ccf5bd3..b29d0f65611eb536ded7aaa3e41416ca600c2bde 100644 (file)
@@ -2131,6 +2131,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
        }
 
        card->instantiated = 1;
+       dapm_mark_endpoints_dirty(card);
        snd_soc_dapm_sync(&card->dapm);
        mutex_unlock(&card->mutex);
        mutex_unlock(&client_mutex);
index ea05cc91aa05d8d14a402769928cb9119a8c9147..211589b0b2ef5400276d689599134bd02919691d 100644 (file)
@@ -390,7 +390,7 @@ static int stm32_sai_add_mclk_provider(struct stm32_sai_sub_data *sai)
        char *mclk_name, *p, *s = (char *)pname;
        int ret, i = 0;
 
-       mclk = devm_kzalloc(dev, sizeof(mclk), GFP_KERNEL);
+       mclk = devm_kzalloc(dev, sizeof(*mclk), GFP_KERNEL);
        if (!mclk)
                return -ENOMEM;
 
index 66aad0d3f9c7f9d04a44795eeca7591b190fde77..8134c3c94229263a982d0009bc89dfa6b5a0aad6 100644 (file)
@@ -31,7 +31,7 @@ config SND_SUN8I_CODEC_ANALOG
 config SND_SUN50I_CODEC_ANALOG
        tristate "Allwinner sun50i Codec Analog Controls Support"
        depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
-       select SND_SUNXI_ADDA_PR_REGMAP
+       select SND_SUN8I_ADDA_PR_REGMAP
        help
          Say Y or M if you want to add support for the analog controls for
          the codec embedded in Allwinner A64 SoC.
index 522a72fde78da9b89f40551577de40fee351f59f..92c5de026c43a7c8cf1ec3913ee5257353ef201f 100644 (file)
@@ -481,7 +481,11 @@ static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = {
        { "Right Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch",
          "AIF1 Slot 0 Right"},
 
-       /* ADC routes */
+       /* ADC Routes */
+       { "AIF1 Slot 0 Right ADC", NULL, "ADC" },
+       { "AIF1 Slot 0 Left ADC", NULL, "ADC" },
+
+       /* ADC Mixer Routes */
        { "Left Digital ADC Mixer", "AIF1 Data Digital ADC Capture Switch",
          "AIF1 Slot 0 Left ADC" },
        { "Right Digital ADC Mixer", "AIF1 Data Digital ADC Capture Switch",
@@ -605,16 +609,10 @@ err_pm_disable:
 
 static int sun8i_codec_remove(struct platform_device *pdev)
 {
-       struct snd_soc_card *card = platform_get_drvdata(pdev);
-       struct sun8i_codec *scodec = snd_soc_card_get_drvdata(card);
-
        pm_runtime_disable(&pdev->dev);
        if (!pm_runtime_status_suspended(&pdev->dev))
                sun8i_codec_runtime_suspend(&pdev->dev);
 
-       clk_disable_unprepare(scodec->clk_module);
-       clk_disable_unprepare(scodec->clk_bus);
-
        return 0;
 }
 
index e73c962590eb689f446cd1f17d179aab3e292b9b..079063d8038d943e634988d16388c7649a17ec15 100644 (file)
@@ -1146,10 +1146,8 @@ static int snd_cs4231_playback_open(struct snd_pcm_substream *substream)
        runtime->hw = snd_cs4231_playback;
 
        err = snd_cs4231_open(chip, CS4231_MODE_PLAY);
-       if (err < 0) {
-               snd_free_pages(runtime->dma_area, runtime->dma_bytes);
+       if (err < 0)
                return err;
-       }
        chip->playback_substream = substream;
        chip->p_periods_sent = 0;
        snd_pcm_set_sync(substream);
@@ -1167,10 +1165,8 @@ static int snd_cs4231_capture_open(struct snd_pcm_substream *substream)
        runtime->hw = snd_cs4231_capture;
 
        err = snd_cs4231_open(chip, CS4231_MODE_RECORD);
-       if (err < 0) {
-               snd_free_pages(runtime->dma_area, runtime->dma_bytes);
+       if (err < 0)
                return err;
-       }
        chip->capture_substream = substream;
        chip->c_periods_sent = 0;
        snd_pcm_set_sync(substream);
index 2bfe4e80a6b92b467c502d66ddf04d2b64f8154c..a105947eaf55cd64c0d053cd70bfbf0f68f52ee1 100644 (file)
@@ -682,9 +682,12 @@ static int usb_audio_probe(struct usb_interface *intf,
 
  __error:
        if (chip) {
+               /* chip->active is inside the chip->card object,
+                * decrement before memory is possibly returned.
+                */
+               atomic_dec(&chip->active);
                if (!chip->num_interfaces)
                        snd_card_free(chip->card);
-               atomic_dec(&chip->active);
        }
        mutex_unlock(&register_mutex);
        return err;
index 849953e5775c27a32ebd6f1e942bd6283a9cf2e7..37fc0447c071045745c8c43831daeff47eac507f 100644 (file)
@@ -3382,5 +3382,15 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
                .ifnum = QUIRK_NO_INTERFACE
        }
 },
+/* Dell WD19 Dock */
+{
+       USB_DEVICE(0x0bda, 0x402e),
+       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+               .vendor_name = "Dell",
+               .product_name = "WD19 Dock",
+               .profile_name = "Dell-WD15-Dock",
+               .ifnum = QUIRK_NO_INTERFACE
+       }
+},
 
 #undef USB_DEVICE_VENDOR_SPEC
index 8a945ece98690d96b580da4121657c60a0d9b118..6623cafc94f2c639bcceefe877c57927ac31042b 100644 (file)
@@ -1373,6 +1373,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
 
+       case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */
        case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
        case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
        case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */
index 12835ea0e4173c281e5ddfe0883595492e985b09..378c051fa1776534b0e1fca7e56567f9f8a3d2d4 100644 (file)
 #define wmb()          asm volatile("dmb ishst" ::: "memory")
 #define rmb()          asm volatile("dmb ishld" ::: "memory")
 
-#define smp_store_release(p, v)                                        \
-do {                                                           \
-       union { typeof(*p) __val; char __c[1]; } __u =          \
-               { .__val = (__force typeof(*p)) (v) };          \
-                                                               \
-       switch (sizeof(*p)) {                                   \
-       case 1:                                                 \
-               asm volatile ("stlrb %w1, %0"                   \
-                               : "=Q" (*p)                     \
-                               : "r" (*(__u8 *)__u.__c)        \
-                               : "memory");                    \
-               break;                                          \
-       case 2:                                                 \
-               asm volatile ("stlrh %w1, %0"                   \
-                               : "=Q" (*p)                     \
-                               : "r" (*(__u16 *)__u.__c)       \
-                               : "memory");                    \
-               break;                                          \
-       case 4:                                                 \
-               asm volatile ("stlr %w1, %0"                    \
-                               : "=Q" (*p)                     \
-                               : "r" (*(__u32 *)__u.__c)       \
-                               : "memory");                    \
-               break;                                          \
-       case 8:                                                 \
-               asm volatile ("stlr %1, %0"                     \
-                               : "=Q" (*p)                     \
-                               : "r" (*(__u64 *)__u.__c)       \
-                               : "memory");                    \
-               break;                                          \
-       default:                                                \
-               /* Only to shut up gcc ... */                   \
-               mb();                                           \
-               break;                                          \
-       }                                                       \
+#define smp_store_release(p, v)                                                \
+do {                                                                   \
+       union { typeof(*p) __val; char __c[1]; } __u =                  \
+               { .__val = (v) };                                       \
+                                                                       \
+       switch (sizeof(*p)) {                                           \
+       case 1:                                                         \
+               asm volatile ("stlrb %w1, %0"                           \
+                               : "=Q" (*p)                             \
+                               : "r" (*(__u8_alias_t *)__u.__c)        \
+                               : "memory");                            \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile ("stlrh %w1, %0"                           \
+                               : "=Q" (*p)                             \
+                               : "r" (*(__u16_alias_t *)__u.__c)       \
+                               : "memory");                            \
+               break;                                                  \
+       case 4:                                                         \
+               asm volatile ("stlr %w1, %0"                            \
+                               : "=Q" (*p)                             \
+                               : "r" (*(__u32_alias_t *)__u.__c)       \
+                               : "memory");                            \
+               break;                                                  \
+       case 8:                                                         \
+               asm volatile ("stlr %1, %0"                             \
+                               : "=Q" (*p)                             \
+                               : "r" (*(__u64_alias_t *)__u.__c)       \
+                               : "memory");                            \
+               break;                                                  \
+       default:                                                        \
+               /* Only to shut up gcc ... */                           \
+               mb();                                                   \
+               break;                                                  \
+       }                                                               \
 } while (0)
 
-#define smp_load_acquire(p)                                    \
-({                                                             \
-       union { typeof(*p) __val; char __c[1]; } __u;           \
-                                                               \
-       switch (sizeof(*p)) {                                   \
-       case 1:                                                 \
-               asm volatile ("ldarb %w0, %1"                   \
-                       : "=r" (*(__u8 *)__u.__c)               \
-                       : "Q" (*p) : "memory");                 \
-               break;                                          \
-       case 2:                                                 \
-               asm volatile ("ldarh %w0, %1"                   \
-                       : "=r" (*(__u16 *)__u.__c)              \
-                       : "Q" (*p) : "memory");                 \
-               break;                                          \
-       case 4:                                                 \
-               asm volatile ("ldar %w0, %1"                    \
-                       : "=r" (*(__u32 *)__u.__c)              \
-                       : "Q" (*p) : "memory");                 \
-               break;                                          \
-       case 8:                                                 \
-               asm volatile ("ldar %0, %1"                     \
-                       : "=r" (*(__u64 *)__u.__c)              \
-                       : "Q" (*p) : "memory");                 \
-               break;                                          \
-       default:                                                \
-               /* Only to shut up gcc ... */                   \
-               mb();                                           \
-               break;                                          \
-       }                                                       \
-       __u.__val;                                              \
+#define smp_load_acquire(p)                                            \
+({                                                                     \
+       union { typeof(*p) __val; char __c[1]; } __u =                  \
+               { .__c = { 0 } };                                       \
+                                                                       \
+       switch (sizeof(*p)) {                                           \
+       case 1:                                                         \
+               asm volatile ("ldarb %w0, %1"                           \
+                       : "=r" (*(__u8_alias_t *)__u.__c)               \
+                       : "Q" (*p) : "memory");                         \
+               break;                                                  \
+       case 2:                                                         \
+               asm volatile ("ldarh %w0, %1"                           \
+                       : "=r" (*(__u16_alias_t *)__u.__c)              \
+                       : "Q" (*p) : "memory");                         \
+               break;                                                  \
+       case 4:                                                         \
+               asm volatile ("ldar %w0, %1"                            \
+                       : "=r" (*(__u32_alias_t *)__u.__c)              \
+                       : "Q" (*p) : "memory");                         \
+               break;                                                  \
+       case 8:                                                         \
+               asm volatile ("ldar %0, %1"                             \
+                       : "=r" (*(__u64_alias_t *)__u.__c)              \
+                       : "Q" (*p) : "memory");                         \
+               break;                                                  \
+       default:                                                        \
+               /* Only to shut up gcc ... */                           \
+               mb();                                                   \
+               break;                                                  \
+       }                                                               \
+       __u.__val;                                                      \
 })
 
 #endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */
index 5072cbd15c82955ce3fcf1a3ca828103b882248d..dae1584cf017f6aa311a5b78c3311b0bf55c2b18 100644 (file)
@@ -16,5 +16,6 @@
  */
 
 #define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_NEW_STAT
 
 #include <asm-generic/unistd.h>
index 1b32b56a03d34ce2a5f0b7f79c621f87d8c89dbf..8c876c166ef27b2c6fa754781fdbb103f2addc54 100644 (file)
@@ -634,6 +634,7 @@ struct kvm_ppc_cpu_char {
 
 #define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
 #define KVM_REG_PPC_ONLINE     (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
+#define KVM_REG_PPC_PTCR       (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc0)
 
 /* Transactional Memory checkpointed state:
  * This is all GPRs, all VSX regs and a subset of SPRs
index 9a50f02b98946eb49df6cb5f407b1a4a04b89e6c..16511d97e8dc037c8c0b9a60b09a0adf409e6d20 100644 (file)
@@ -160,6 +160,8 @@ struct kvm_s390_vm_cpu_subfunc {
 #define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW       1
 #define KVM_S390_VM_CRYPTO_DISABLE_AES_KW      2
 #define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW      3
+#define KVM_S390_VM_CRYPTO_ENABLE_APIE         4
+#define KVM_S390_VM_CRYPTO_DISABLE_APIE                5
 
 /* kvm attributes for migration mode */
 #define KVM_S390_VM_MIGRATION_STOP     0
index 89a048c2faec7f8a818d1a461ccd7fa67eca0fd9..28c4a502b4197cce9ae968deb8ea2fe7797e8da4 100644 (file)
 #define X86_FEATURE_LA57               (16*32+16) /* 5-level page tables */
 #define X86_FEATURE_RDPID              (16*32+22) /* RDPID instruction */
 #define X86_FEATURE_CLDEMOTE           (16*32+25) /* CLDEMOTE instruction */
+#define X86_FEATURE_MOVDIRI            (16*32+27) /* MOVDIRI instruction */
+#define X86_FEATURE_MOVDIR64B          (16*32+28) /* MOVDIR64B instruction */
 
 /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
 #define X86_FEATURE_OVERFLOW_RECOV     (17*32+ 0) /* MCA overflow recovery support */
index 8a6eff9c27f3faf349b2b8ba192e55fe0f808f57..dabfcf7c3941aa90a92a91ee37f1164447c71655 100644 (file)
@@ -300,10 +300,7 @@ struct kvm_vcpu_events {
                __u8 injected;
                __u8 nr;
                __u8 has_error_code;
-               union {
-                       __u8 pad;
-                       __u8 pending;
-               };
+               __u8 pending;
                __u32 error_code;
        } exception;
        struct {
@@ -387,6 +384,7 @@ struct kvm_sync_regs {
 
 #define KVM_STATE_NESTED_GUEST_MODE    0x00000001
 #define KVM_STATE_NESTED_RUN_PENDING   0x00000002
+#define KVM_STATE_NESTED_EVMCS         0x00000004
 
 #define KVM_STATE_NESTED_SMM_GUEST_MODE        0x00000001
 #define KVM_STATE_NESTED_SMM_VMXON     0x00000002
index edbe81534c6d2941b955cd0ab15cf845110fb130..d07ccf8a23f7170be674d12a08957d673a560f77 100644 (file)
@@ -137,4 +137,10 @@ EXAMPLES
 
 SEE ALSO
 ========
-       **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8)
+       **bpf**\ (2),
+       **bpf-helpers**\ (7),
+       **bpftool**\ (8),
+       **bpftool-prog**\ (8),
+       **bpftool-map**\ (8),
+       **bpftool-net**\ (8),
+       **bpftool-perf**\ (8)
index f55a2daed59b7e8dead1a1fc429dd6a097f2f83f..7bb787cfa97145eb1bede4ea2057903d6587398f 100644 (file)
@@ -171,4 +171,10 @@ The following three commands are equivalent:
 
 SEE ALSO
 ========
-       **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8)
+       **bpf**\ (2),
+       **bpf-helpers**\ (7),
+       **bpftool**\ (8),
+       **bpftool-prog**\ (8),
+       **bpftool-cgroup**\ (8),
+       **bpftool-net**\ (8),
+       **bpftool-perf**\ (8)
index 408ec30d88726206b3bad8436ec8c9abdd11d806..ed87c9b619adc889310e7795bcfcc4b790251827 100644 (file)
@@ -136,4 +136,10 @@ EXAMPLES
 
 SEE ALSO
 ========
-       **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8)
+       **bpf**\ (2),
+       **bpf-helpers**\ (7),
+       **bpftool**\ (8),
+       **bpftool-prog**\ (8),
+       **bpftool-map**\ (8),
+       **bpftool-cgroup**\ (8),
+       **bpftool-perf**\ (8)
index e3eb0eab76419427509b213e00c6458e68ff61fc..f4c5e5538bb8e516624b0f28f749bf613c5a477e 100644 (file)
@@ -78,4 +78,10 @@ EXAMPLES
 
 SEE ALSO
 ========
-       **bpftool**\ (8), **bpftool-prog**\ (8), **bpftool-map**\ (8)
+       **bpf**\ (2),
+       **bpf-helpers**\ (7),
+       **bpftool**\ (8),
+       **bpftool-prog**\ (8),
+       **bpftool-map**\ (8),
+       **bpftool-cgroup**\ (8),
+       **bpftool-net**\ (8)
index ac4e904b10fbd9b4b504d943f62aef0be68f0829..ecf618807125d9af832ec1ec969047c060393620 100644 (file)
@@ -124,7 +124,8 @@ OPTIONS
                  Generate human-readable JSON output. Implies **-j**.
 
        -f, --bpffs
-                 Show file names of pinned programs.
+                 When showing BPF programs, show file names of pinned
+                 programs.
 
 EXAMPLES
 ========
@@ -206,4 +207,10 @@ EXAMPLES
 
 SEE ALSO
 ========
-       **bpftool**\ (8), **bpftool-map**\ (8), **bpftool-cgroup**\ (8)
+       **bpf**\ (2),
+       **bpf-helpers**\ (7),
+       **bpftool**\ (8),
+       **bpftool-map**\ (8),
+       **bpftool-cgroup**\ (8),
+       **bpftool-net**\ (8),
+       **bpftool-perf**\ (8)
index 04cd4f92ab89c9dd8009180c3e4601c7b1956d1e..129b7a9c0f9bce5ac78f49d6678e39c8c6fbaa8b 100644 (file)
@@ -63,5 +63,10 @@ OPTIONS
 
 SEE ALSO
 ========
-       **bpftool-map**\ (8), **bpftool-prog**\ (8), **bpftool-cgroup**\ (8)
-        **bpftool-perf**\ (8), **bpftool-net**\ (8)
+       **bpf**\ (2),
+       **bpf-helpers**\ (7),
+       **bpftool-prog**\ (8),
+       **bpftool-map**\ (8),
+       **bpftool-cgroup**\ (8),
+       **bpftool-net**\ (8),
+       **bpftool-perf**\ (8)
index 55bc512a18318c8b8745a4f174ba2f85c85c83e9..e4e6e2b3fd84742758a53c97d1c55df009fb2fbc 100644 (file)
@@ -32,7 +32,7 @@ static void btf_dumper_ptr(const void *data, json_writer_t *jw,
 }
 
 static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
-                              const void *data)
+                              __u8 bit_offset, const void *data)
 {
        int actual_type_id;
 
@@ -40,7 +40,7 @@ static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
        if (actual_type_id < 0)
                return actual_type_id;
 
-       return btf_dumper_do_type(d, actual_type_id, 0, data);
+       return btf_dumper_do_type(d, actual_type_id, bit_offset, data);
 }
 
 static void btf_dumper_enum(const void *data, json_writer_t *jw)
@@ -237,7 +237,7 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
        case BTF_KIND_VOLATILE:
        case BTF_KIND_CONST:
        case BTF_KIND_RESTRICT:
-               return btf_dumper_modifier(d, type_id, data);
+               return btf_dumper_modifier(d, type_id, bit_offset, data);
        default:
                jsonw_printf(d->jw, "(unsupported-kind");
                return -EINVAL;
index 25af85304ebee3b073d9e7d0036038c5c4c05d68..70fd48d79f611fd98666a0ecf156c00285007fd9 100644 (file)
@@ -130,16 +130,17 @@ static int mnt_bpffs(const char *target, char *buff, size_t bufflen)
        return 0;
 }
 
-int open_obj_pinned(char *path)
+int open_obj_pinned(char *path, bool quiet)
 {
        int fd;
 
        fd = bpf_obj_get(path);
        if (fd < 0) {
-               p_err("bpf obj get (%s): %s", path,
-                     errno == EACCES && !is_bpffs(dirname(path)) ?
-                   "directory not in bpf file system (bpffs)" :
-                   strerror(errno));
+               if (!quiet)
+                       p_err("bpf obj get (%s): %s", path,
+                             errno == EACCES && !is_bpffs(dirname(path)) ?
+                           "directory not in bpf file system (bpffs)" :
+                           strerror(errno));
                return -1;
        }
 
@@ -151,7 +152,7 @@ int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type)
        enum bpf_obj_type type;
        int fd;
 
-       fd = open_obj_pinned(path);
+       fd = open_obj_pinned(path, false);
        if (fd < 0)
                return -1;
 
@@ -304,7 +305,7 @@ char *get_fdinfo(int fd, const char *key)
                return NULL;
        }
 
-       while ((n = getline(&line, &line_n, fdi))) {
+       while ((n = getline(&line, &line_n, fdi)) > 0) {
                char *value;
                int len;
 
@@ -384,7 +385,7 @@ int build_pinned_obj_table(struct pinned_obj_table *tab,
                while ((ftse = fts_read(fts))) {
                        if (!(ftse->fts_info & FTS_F))
                                continue;
-                       fd = open_obj_pinned(ftse->fts_path);
+                       fd = open_obj_pinned(ftse->fts_path, true);
                        if (fd < 0)
                                continue;
 
index 28322ace285653f91a9fe42e69a29483b0a1280b..a8bf1e2d9818debfc200701f21d438d432e093fc 100644 (file)
@@ -127,7 +127,7 @@ int cmd_select(const struct cmd *cmds, int argc, char **argv,
 int get_fd_type(int fd);
 const char *get_fd_type_name(enum bpf_obj_type type);
 char *get_fdinfo(int fd, const char *key);
-int open_obj_pinned(char *path);
+int open_obj_pinned(char *path, bool quiet);
 int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
 int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32));
 int do_pin_fd(int fd, const char *name);
index 5302ee282409eb039458447ad40215f24516c48a..ccee180dfb761248f078ffa4b2154793d0308420 100644 (file)
@@ -357,10 +357,9 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
        if (!hash_empty(prog_table.table)) {
                struct pinned_obj *obj;
 
-               printf("\n");
                hash_for_each_possible(prog_table.table, obj, hash, info->id) {
                        if (obj->id == info->id)
-                               printf("\tpinned %s\n", obj->path);
+                               printf("\n\tpinned %s", obj->path);
                }
        }
 
@@ -845,6 +844,7 @@ static int do_load(int argc, char **argv)
                        }
                        NEXT_ARG();
                } else if (is_prefix(*argv, "map")) {
+                       void *new_map_replace;
                        char *endptr, *name;
                        int fd;
 
@@ -878,12 +878,15 @@ static int do_load(int argc, char **argv)
                        if (fd < 0)
                                goto err_free_reuse_maps;
 
-                       map_replace = reallocarray(map_replace, old_map_fds + 1,
-                                                  sizeof(*map_replace));
-                       if (!map_replace) {
+                       new_map_replace = reallocarray(map_replace,
+                                                      old_map_fds + 1,
+                                                      sizeof(*map_replace));
+                       if (!new_map_replace) {
                                p_err("mem alloc failed");
                                goto err_free_reuse_maps;
                        }
+                       map_replace = new_map_replace;
+
                        map_replace[old_map_fds].idx = idx;
                        map_replace[old_map_fds].name = name;
                        map_replace[old_map_fds].fd = fd;
index f216b2f5c3d7b591387acf171cd77d633b6c3757..d74bb9414d7c6026e5e9007b16b0e18f179f0488 100644 (file)
@@ -33,6 +33,7 @@ FEATURE_TESTS_BASIC :=                  \
         dwarf_getlocations              \
         fortify-source                  \
         sync-compare-and-swap           \
+        get_current_dir_name            \
         glibc                           \
         gtk2                            \
         gtk2-infobar                    \
index 0516259be70f071f2533496ead690d1ebd5ba3b8..304b984f11b99bd7240c5ec26dd85d5029b33944 100644 (file)
@@ -7,6 +7,7 @@ FILES=                                          \
          test-dwarf_getlocations.bin            \
          test-fortify-source.bin                \
          test-sync-compare-and-swap.bin         \
+         test-get_current_dir_name.bin          \
          test-glibc.bin                         \
          test-gtk2.bin                          \
          test-gtk2-infobar.bin                  \
@@ -101,6 +102,9 @@ $(OUTPUT)test-bionic.bin:
 $(OUTPUT)test-libelf.bin:
        $(BUILD) -lelf
 
+$(OUTPUT)test-get_current_dir_name.bin:
+       $(BUILD)
+
 $(OUTPUT)test-glibc.bin:
        $(BUILD)
 
index 8dc20a61341f61533cf6562122d1b1d7232187f2..56722bfe6bdd32e8d0f9592921921fcc49a88b3c 100644 (file)
 # include "test-libelf-mmap.c"
 #undef main
 
+#define main main_test_get_current_dir_name
+# include "test-get_current_dir_name.c"
+#undef main
+
 #define main main_test_glibc
 # include "test-glibc.c"
 #undef main
@@ -174,6 +178,7 @@ int main(int argc, char *argv[])
        main_test_hello();
        main_test_libelf();
        main_test_libelf_mmap();
+       main_test_get_current_dir_name();
        main_test_glibc();
        main_test_dwarf();
        main_test_dwarf_getlocations();
diff --git a/tools/build/feature/test-get_current_dir_name.c b/tools/build/feature/test-get_current_dir_name.c
new file mode 100644 (file)
index 0000000..573000f
--- /dev/null
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <stdlib.h>
+
+int main(void)
+{
+       free(get_current_dir_name());
+       return 0;
+}
index 040651735662983693d51f5645f311f128fe63f4..cdc9f4ca8c27504d7058903a2f8fc4de877a82bd 100644 (file)
@@ -79,6 +79,8 @@
 #define TIOCGPTLCK     _IOR('T', 0x39, int) /* Get Pty lock state */
 #define TIOCGEXCL      _IOR('T', 0x40, int) /* Get exclusive mode state */
 #define TIOCGPTPEER    _IO('T', 0x41) /* Safely open the slave */
+#define TIOCGISO7816   _IOR('T', 0x42, struct serial_iso7816)
+#define TIOCSISO7816   _IOWR('T', 0x43, struct serial_iso7816)
 
 #define FIONCLEX       0x5450
 #define FIOCLEX                0x5451
index df4bedb9b01c281b7bf15048fef3063a35ede51c..538546edbfbd2bd1cfca431aa95864f018fcc7ee 100644 (file)
@@ -242,10 +242,12 @@ __SYSCALL(__NR_tee, sys_tee)
 /* fs/stat.c */
 #define __NR_readlinkat 78
 __SYSCALL(__NR_readlinkat, sys_readlinkat)
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
 #define __NR3264_fstatat 79
 __SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
 #define __NR3264_fstat 80
 __SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
+#endif
 
 /* fs/sync.c */
 #define __NR_sync 81
index 7f5634ce8e885d0aa26ee210592b5b93fb9f01e5..a4446f452040aa2bdb15dfd8c28c320b073f9bf0 100644 (file)
@@ -529,6 +529,28 @@ typedef struct drm_i915_irq_wait {
  */
 #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
 
+/*
+ * Once upon a time we supposed that writes through the GGTT would be
+ * immediately in physical memory (once flushed out of the CPU path). However,
+ * on a few different processors and chipsets, this is not necessarily the case
+ * as the writes appear to be buffered internally. Thus a read of the backing
+ * storage (physical memory) via a different path (with different physical tags
+ * to the indirect write via the GGTT) will see stale values from before
+ * the GGTT write. Inside the kernel, we can for the most part keep track of
+ * the different read/write domains in use (e.g. set-domain), but the assumption
+ * of coherency is baked into the ABI, hence reporting its true state in this
+ * parameter.
+ *
+ * Reports true when writes via mmap_gtt are immediately visible following an
+ * lfence to flush the WCB.
+ *
+ * Reports false when writes via mmap_gtt are indeterminately delayed in an in
+ * internal buffer and are _not_ immediately visible to third parties accessing
+ * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
+ * communications channel when reporting false is strongly disadvised.
+ */
+#define I915_PARAM_MMAP_GTT_COHERENT   52
+
 typedef struct drm_i915_getparam {
        __s32 param;
        /*
index 852dc17ab47a07f2580ade5f9e4a1130ee779c26..72c453a8bf50ed5cd4a0383997f5727048ce8d60 100644 (file)
@@ -2170,7 +2170,7 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
  *     Description
  *             Look for TCP socket matching *tuple*, optionally in a child
  *             network namespace *netns*. The return value must be checked,
@@ -2187,12 +2187,14 @@ union bpf_attr {
  *             **sizeof**\ (*tuple*\ **->ipv6**)
  *                     Look for an IPv6 socket.
  *
- *             If the *netns* is zero, then the socket lookup table in the
- *             netns associated with the *ctx* will be used. For the TC hooks,
- *             this in the netns of the device in the skb. For socket hooks,
- *             this in the netns of the socket. If *netns* is non-zero, then
- *             it specifies the ID of the netns relative to the netns
- *             associated with the *ctx*.
+ *             If the *netns* is a negative signed 32-bit integer, then the
+ *             socket lookup table in the netns associated with the *ctx* will
+ *             will be used. For the TC hooks, this is the netns of the device
+ *             in the skb. For socket hooks, this is the netns of the socket.
+ *             If *netns* is any other signed 32-bit value greater than or
+ *             equal to zero then it specifies the ID of the netns relative to
+ *             the netns associated with the *ctx*. *netns* values beyond the
+ *             range of 32-bit integers are reserved for future use.
  *
  *             All values for *flags* are reserved for future usage, and must
  *             be left at zero.
@@ -2201,8 +2203,10 @@ union bpf_attr {
  *             **CONFIG_NET** configuration option.
  *     Return
  *             Pointer to *struct bpf_sock*, or NULL in case of failure.
+ *             For sockets with reuseport option, the *struct bpf_sock*
+ *             result is from reuse->socks[] using the hash of the tuple.
  *
- * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
+ * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
  *     Description
  *             Look for UDP socket matching *tuple*, optionally in a child
  *             network namespace *netns*. The return value must be checked,
@@ -2219,12 +2223,14 @@ union bpf_attr {
  *             **sizeof**\ (*tuple*\ **->ipv6**)
  *                     Look for an IPv6 socket.
  *
- *             If the *netns* is zero, then the socket lookup table in the
- *             netns associated with the *ctx* will be used. For the TC hooks,
- *             this in the netns of the device in the skb. For socket hooks,
- *             this in the netns of the socket. If *netns* is non-zero, then
- *             it specifies the ID of the netns relative to the netns
- *             associated with the *ctx*.
+ *             If the *netns* is a negative signed 32-bit integer, then the
+ *             socket lookup table in the netns associated with the *ctx* will
+ *             will be used. For the TC hooks, this is the netns of the device
+ *             in the skb. For socket hooks, this is the netns of the socket.
+ *             If *netns* is any other signed 32-bit value greater than or
+ *             equal to zero then it specifies the ID of the netns relative to
+ *             the netns associated with the *ctx*. *netns* values beyond the
+ *             range of 32-bit integers are reserved for future use.
  *
  *             All values for *flags* are reserved for future usage, and must
  *             be left at zero.
@@ -2233,6 +2239,8 @@ union bpf_attr {
  *             **CONFIG_NET** configuration option.
  *     Return
  *             Pointer to *struct bpf_sock*, or NULL in case of failure.
+ *             For sockets with reuseport option, the *struct bpf_sock*
+ *             result is from reuse->socks[] using the hash of the tuple.
  *
  * int bpf_sk_release(struct bpf_sock *sk)
  *     Description
@@ -2405,6 +2413,9 @@ enum bpf_func_id {
 /* BPF_FUNC_perf_event_output for sk_buff input context. */
 #define BPF_F_CTXLEN_MASK              (0xfffffULL << 32)
 
+/* Current network namespace */
+#define BPF_F_CURRENT_NETNS            (-1L)
+
 /* Mode for BPF_FUNC_skb_adjust_room helper. */
 enum bpf_adj_room_mode {
        BPF_ADJ_ROOM_NET,
@@ -2422,6 +2433,12 @@ enum bpf_lwt_encap_mode {
        BPF_LWT_ENCAP_SEG6_INLINE
 };
 
+#define __bpf_md_ptr(type, name)       \
+union {                                        \
+       type name;                      \
+       __u64 :64;                      \
+} __attribute__((aligned(8)))
+
 /* user accessible mirror of in-kernel sk_buff.
  * new fields can only be added to the end of this structure
  */
@@ -2456,7 +2473,7 @@ struct __sk_buff {
        /* ... here. */
 
        __u32 data_meta;
-       struct bpf_flow_keys *flow_keys;
+       __bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
 };
 
 struct bpf_tunnel_key {
@@ -2572,8 +2589,8 @@ enum sk_action {
  * be added to the end of this structure
  */
 struct sk_msg_md {
-       void *data;
-       void *data_end;
+       __bpf_md_ptr(void *, data);
+       __bpf_md_ptr(void *, data_end);
 
        __u32 family;
        __u32 remote_ip4;       /* Stored in network byte order */
@@ -2589,8 +2606,9 @@ struct sk_reuseport_md {
         * Start of directly accessible data. It begins from
         * the tcp/udp header.
         */
-       void *data;
-       void *data_end;         /* End of directly accessible data */
+       __bpf_md_ptr(void *, data);
+       /* End of directly accessible data */
+       __bpf_md_ptr(void *, data_end);
        /*
         * Total length of packet (starting from the tcp/udp header).
         * Note that the directly accessible bytes (data_end - data)
diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h
new file mode 100644 (file)
index 0000000..a441ea1
--- /dev/null
@@ -0,0 +1,393 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_FS_H
+#define _UAPI_LINUX_FS_H
+
+/*
+ * This file has definitions for some important file table structures
+ * and constants and structures used by various generic file system
+ * ioctl's.  Please do not make any changes in this file before
+ * sending patches for review to linux-fsdevel@vger.kernel.org and
+ * linux-api@vger.kernel.org.
+ */
+
+#include <linux/limits.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * It's silly to have NR_OPEN bigger than NR_FILE, but you can change
+ * the file limit at runtime and only root can increase the per-process
+ * nr_file rlimit, so it's safe to set up a ridiculously high absolute
+ * upper limit on files-per-process.
+ *
+ * Some programs (notably those using select()) may have to be 
+ * recompiled to take full advantage of the new limits..  
+ */
+
+/* Fixed constants first: */
+#undef NR_OPEN
+#define INR_OPEN_CUR 1024      /* Initial setting for nfile rlimits */
+#define INR_OPEN_MAX 4096      /* Hard limit for nfile rlimits */
+
+#define BLOCK_SIZE_BITS 10
+#define BLOCK_SIZE (1<<BLOCK_SIZE_BITS)
+
+#define SEEK_SET       0       /* seek relative to beginning of file */
+#define SEEK_CUR       1       /* seek relative to current file position */
+#define SEEK_END       2       /* seek relative to end of file */
+#define SEEK_DATA      3       /* seek to the next data */
+#define SEEK_HOLE      4       /* seek to the next hole */
+#define SEEK_MAX       SEEK_HOLE
+
+#define RENAME_NOREPLACE       (1 << 0)        /* Don't overwrite target */
+#define RENAME_EXCHANGE                (1 << 1)        /* Exchange source and dest */
+#define RENAME_WHITEOUT                (1 << 2)        /* Whiteout source */
+
+struct file_clone_range {
+       __s64 src_fd;
+       __u64 src_offset;
+       __u64 src_length;
+       __u64 dest_offset;
+};
+
+struct fstrim_range {
+       __u64 start;
+       __u64 len;
+       __u64 minlen;
+};
+
+/* extent-same (dedupe) ioctls; these MUST match the btrfs ioctl definitions */
+#define FILE_DEDUPE_RANGE_SAME         0
+#define FILE_DEDUPE_RANGE_DIFFERS      1
+
+/* from struct btrfs_ioctl_file_extent_same_info */
+struct file_dedupe_range_info {
+       __s64 dest_fd;          /* in - destination file */
+       __u64 dest_offset;      /* in - start of extent in destination */
+       __u64 bytes_deduped;    /* out - total # of bytes we were able
+                                * to dedupe from this file. */
+       /* status of this dedupe operation:
+        * < 0 for error
+        * == FILE_DEDUPE_RANGE_SAME if dedupe succeeds
+        * == FILE_DEDUPE_RANGE_DIFFERS if data differs
+        */
+       __s32 status;           /* out - see above description */
+       __u32 reserved;         /* must be zero */
+};
+
+/* from struct btrfs_ioctl_file_extent_same_args */
+struct file_dedupe_range {
+       __u64 src_offset;       /* in - start of extent in source */
+       __u64 src_length;       /* in - length of extent */
+       __u16 dest_count;       /* in - total elements in info array */
+       __u16 reserved1;        /* must be zero */
+       __u32 reserved2;        /* must be zero */
+       struct file_dedupe_range_info info[0];
+};
+
+/* And dynamically-tunable limits and defaults: */
+struct files_stat_struct {
+       unsigned long nr_files;         /* read only */
+       unsigned long nr_free_files;    /* read only */
+       unsigned long max_files;                /* tunable */
+};
+
+struct inodes_stat_t {
+       long nr_inodes;
+       long nr_unused;
+       long dummy[5];          /* padding for sysctl ABI compatibility */
+};
+
+
+#define NR_FILE  8192  /* this can well be larger on a larger system */
+
+
+/*
+ * These are the fs-independent mount-flags: up to 32 flags are supported
+ */
+#define MS_RDONLY       1      /* Mount read-only */
+#define MS_NOSUID       2      /* Ignore suid and sgid bits */
+#define MS_NODEV        4      /* Disallow access to device special files */
+#define MS_NOEXEC       8      /* Disallow program execution */
+#define MS_SYNCHRONOUS 16      /* Writes are synced at once */
+#define MS_REMOUNT     32      /* Alter flags of a mounted FS */
+#define MS_MANDLOCK    64      /* Allow mandatory locks on an FS */
+#define MS_DIRSYNC     128     /* Directory modifications are synchronous */
+#define MS_NOATIME     1024    /* Do not update access times. */
+#define MS_NODIRATIME  2048    /* Do not update directory access times */
+#define MS_BIND                4096
+#define MS_MOVE                8192
+#define MS_REC         16384
+#define MS_VERBOSE     32768   /* War is peace. Verbosity is silence.
+                                  MS_VERBOSE is deprecated. */
+#define MS_SILENT      32768
+#define MS_POSIXACL    (1<<16) /* VFS does not apply the umask */
+#define MS_UNBINDABLE  (1<<17) /* change to unbindable */
+#define MS_PRIVATE     (1<<18) /* change to private */
+#define MS_SLAVE       (1<<19) /* change to slave */
+#define MS_SHARED      (1<<20) /* change to shared */
+#define MS_RELATIME    (1<<21) /* Update atime relative to mtime/ctime. */
+#define MS_KERNMOUNT   (1<<22) /* this is a kern_mount call */
+#define MS_I_VERSION   (1<<23) /* Update inode I_version field */
+#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
+#define MS_LAZYTIME    (1<<25) /* Update the on-disk [acm]times lazily */
+
+/* These sb flags are internal to the kernel */
+#define MS_SUBMOUNT     (1<<26)
+#define MS_NOREMOTELOCK        (1<<27)
+#define MS_NOSEC       (1<<28)
+#define MS_BORN                (1<<29)
+#define MS_ACTIVE      (1<<30)
+#define MS_NOUSER      (1<<31)
+
+/*
+ * Superblock flags that can be altered by MS_REMOUNT
+ */
+#define MS_RMT_MASK    (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\
+                        MS_LAZYTIME)
+
+/*
+ * Old magic mount flag and mask
+ */
+#define MS_MGC_VAL 0xC0ED0000
+#define MS_MGC_MSK 0xffff0000
+
+/*
+ * Structure for FS_IOC_FSGETXATTR[A] and FS_IOC_FSSETXATTR.
+ */
+struct fsxattr {
+       __u32           fsx_xflags;     /* xflags field value (get/set) */
+       __u32           fsx_extsize;    /* extsize field value (get/set)*/
+       __u32           fsx_nextents;   /* nextents field value (get)   */
+       __u32           fsx_projid;     /* project identifier (get/set) */
+       __u32           fsx_cowextsize; /* CoW extsize field value (get/set)*/
+       unsigned char   fsx_pad[8];
+};
+
+/*
+ * Flags for the fsx_xflags field
+ */
+#define FS_XFLAG_REALTIME      0x00000001      /* data in realtime volume */
+#define FS_XFLAG_PREALLOC      0x00000002      /* preallocated file extents */
+#define FS_XFLAG_IMMUTABLE     0x00000008      /* file cannot be modified */
+#define FS_XFLAG_APPEND                0x00000010      /* all writes append */
+#define FS_XFLAG_SYNC          0x00000020      /* all writes synchronous */
+#define FS_XFLAG_NOATIME       0x00000040      /* do not update access time */
+#define FS_XFLAG_NODUMP                0x00000080      /* do not include in backups */
+#define FS_XFLAG_RTINHERIT     0x00000100      /* create with rt bit set */
+#define FS_XFLAG_PROJINHERIT   0x00000200      /* create with parents projid */
+#define FS_XFLAG_NOSYMLINKS    0x00000400      /* disallow symlink creation */
+#define FS_XFLAG_EXTSIZE       0x00000800      /* extent size allocator hint */
+#define FS_XFLAG_EXTSZINHERIT  0x00001000      /* inherit inode extent size */
+#define FS_XFLAG_NODEFRAG      0x00002000      /* do not defragment */
+#define FS_XFLAG_FILESTREAM    0x00004000      /* use filestream allocator */
+#define FS_XFLAG_DAX           0x00008000      /* use DAX for IO */
+#define FS_XFLAG_COWEXTSIZE    0x00010000      /* CoW extent size allocator hint */
+#define FS_XFLAG_HASATTR       0x80000000      /* no DIFLAG for this   */
+
+/* the read-only stuff doesn't really belong here, but any other place is
+   probably as bad and I don't want to create yet another include file. */
+
+#define BLKROSET   _IO(0x12,93)        /* set device read-only (0 = read-write) */
+#define BLKROGET   _IO(0x12,94)        /* get read-only status (0 = read_write) */
+#define BLKRRPART  _IO(0x12,95)        /* re-read partition table */
+#define BLKGETSIZE _IO(0x12,96)        /* return device size /512 (long *arg) */
+#define BLKFLSBUF  _IO(0x12,97)        /* flush buffer cache */
+#define BLKRASET   _IO(0x12,98)        /* set read ahead for block device */
+#define BLKRAGET   _IO(0x12,99)        /* get current read ahead setting */
+#define BLKFRASET  _IO(0x12,100)/* set filesystem (mm/filemap.c) read-ahead */
+#define BLKFRAGET  _IO(0x12,101)/* get filesystem (mm/filemap.c) read-ahead */
+#define BLKSECTSET _IO(0x12,102)/* set max sectors per request (ll_rw_blk.c) */
+#define BLKSECTGET _IO(0x12,103)/* get max sectors per request (ll_rw_blk.c) */
+#define BLKSSZGET  _IO(0x12,104)/* get block device sector size */
+#if 0
+#define BLKPG      _IO(0x12,105)/* See blkpg.h */
+
+/* Some people are morons.  Do not use sizeof! */
+
+#define BLKELVGET  _IOR(0x12,106,size_t)/* elevator get */
+#define BLKELVSET  _IOW(0x12,107,size_t)/* elevator set */
+/* This was here just to show that the number is taken -
+   probably all these _IO(0x12,*) ioctls should be moved to blkpg.h. */
+#endif
+/* A jump here: 108-111 have been used for various private purposes. */
+#define BLKBSZGET  _IOR(0x12,112,size_t)
+#define BLKBSZSET  _IOW(0x12,113,size_t)
+#define BLKGETSIZE64 _IOR(0x12,114,size_t)     /* return device size in bytes (u64 *arg) */
+#define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup)
+#define BLKTRACESTART _IO(0x12,116)
+#define BLKTRACESTOP _IO(0x12,117)
+#define BLKTRACETEARDOWN _IO(0x12,118)
+#define BLKDISCARD _IO(0x12,119)
+#define BLKIOMIN _IO(0x12,120)
+#define BLKIOOPT _IO(0x12,121)
+#define BLKALIGNOFF _IO(0x12,122)
+#define BLKPBSZGET _IO(0x12,123)
+#define BLKDISCARDZEROES _IO(0x12,124)
+#define BLKSECDISCARD _IO(0x12,125)
+#define BLKROTATIONAL _IO(0x12,126)
+#define BLKZEROOUT _IO(0x12,127)
+/*
+ * A jump here: 130-131 are reserved for zoned block devices
+ * (see uapi/linux/blkzoned.h)
+ */
+
+#define BMAP_IOCTL 1           /* obsolete - kept for compatibility */
+#define FIBMAP    _IO(0x00,1)  /* bmap access */
+#define FIGETBSZ   _IO(0x00,2) /* get the block size used for bmap */
+#define FIFREEZE       _IOWR('X', 119, int)    /* Freeze */
+#define FITHAW         _IOWR('X', 120, int)    /* Thaw */
+#define FITRIM         _IOWR('X', 121, struct fstrim_range)    /* Trim */
+#define FICLONE                _IOW(0x94, 9, int)
+#define FICLONERANGE   _IOW(0x94, 13, struct file_clone_range)
+#define FIDEDUPERANGE  _IOWR(0x94, 54, struct file_dedupe_range)
+
+#define FSLABEL_MAX 256        /* Max chars for the interface; each fs may differ */
+
+#define        FS_IOC_GETFLAGS                 _IOR('f', 1, long)
+#define        FS_IOC_SETFLAGS                 _IOW('f', 2, long)
+#define        FS_IOC_GETVERSION               _IOR('v', 1, long)
+#define        FS_IOC_SETVERSION               _IOW('v', 2, long)
+#define FS_IOC_FIEMAP                  _IOWR('f', 11, struct fiemap)
+#define FS_IOC32_GETFLAGS              _IOR('f', 1, int)
+#define FS_IOC32_SETFLAGS              _IOW('f', 2, int)
+#define FS_IOC32_GETVERSION            _IOR('v', 1, int)
+#define FS_IOC32_SETVERSION            _IOW('v', 2, int)
+#define FS_IOC_FSGETXATTR              _IOR('X', 31, struct fsxattr)
+#define FS_IOC_FSSETXATTR              _IOW('X', 32, struct fsxattr)
+#define FS_IOC_GETFSLABEL              _IOR(0x94, 49, char[FSLABEL_MAX])
+#define FS_IOC_SETFSLABEL              _IOW(0x94, 50, char[FSLABEL_MAX])
+
+/*
+ * File system encryption support
+ */
+/* Policy provided via an ioctl on the topmost directory */
+#define FS_KEY_DESCRIPTOR_SIZE 8
+
+#define FS_POLICY_FLAGS_PAD_4          0x00
+#define FS_POLICY_FLAGS_PAD_8          0x01
+#define FS_POLICY_FLAGS_PAD_16         0x02
+#define FS_POLICY_FLAGS_PAD_32         0x03
+#define FS_POLICY_FLAGS_PAD_MASK       0x03
+#define FS_POLICY_FLAGS_VALID          0x03
+
+/* Encryption algorithms */
+#define FS_ENCRYPTION_MODE_INVALID             0
+#define FS_ENCRYPTION_MODE_AES_256_XTS         1
+#define FS_ENCRYPTION_MODE_AES_256_GCM         2
+#define FS_ENCRYPTION_MODE_AES_256_CBC         3
+#define FS_ENCRYPTION_MODE_AES_256_CTS         4
+#define FS_ENCRYPTION_MODE_AES_128_CBC         5
+#define FS_ENCRYPTION_MODE_AES_128_CTS         6
+#define FS_ENCRYPTION_MODE_SPECK128_256_XTS    7 /* Removed, do not use. */
+#define FS_ENCRYPTION_MODE_SPECK128_256_CTS    8 /* Removed, do not use. */
+
+struct fscrypt_policy {
+       __u8 version;
+       __u8 contents_encryption_mode;
+       __u8 filenames_encryption_mode;
+       __u8 flags;
+       __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
+};
+
+#define FS_IOC_SET_ENCRYPTION_POLICY   _IOR('f', 19, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_PWSALT   _IOW('f', 20, __u8[16])
+#define FS_IOC_GET_ENCRYPTION_POLICY   _IOW('f', 21, struct fscrypt_policy)
+
+/* Parameters for passing an encryption key into the kernel keyring */
+#define FS_KEY_DESC_PREFIX             "fscrypt:"
+#define FS_KEY_DESC_PREFIX_SIZE                8
+
+/* Structure that userspace passes to the kernel keyring */
+#define FS_MAX_KEY_SIZE                        64
+
+struct fscrypt_key {
+       __u32 mode;
+       __u8 raw[FS_MAX_KEY_SIZE];
+       __u32 size;
+};
+
+/*
+ * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
+ *
+ * Note: for historical reasons, these flags were originally used and
+ * defined for use by ext2/ext3, and then other file systems started
+ * using these flags so they wouldn't need to write their own version
+ * of chattr/lsattr (which was shipped as part of e2fsprogs).  You
+ * should think twice before trying to use these flags in new
+ * contexts, or trying to assign these flags, since they are used both
+ * as the UAPI and the on-disk encoding for ext2/3/4.  Also, we are
+ * almost out of 32-bit flags.  :-)
+ *
+ * We have recently hoisted FS_IOC_FSGETXATTR / FS_IOC_FSSETXATTR from
+ * XFS to the generic FS level interface.  This uses a structure that
+ * has padding and hence has more room to grow, so it may be more
+ * appropriate for many new use cases.
+ *
+ * Please do not change these flags or interfaces before checking with
+ * linux-fsdevel@vger.kernel.org and linux-api@vger.kernel.org.
+ */
+#define        FS_SECRM_FL                     0x00000001 /* Secure deletion */
+#define        FS_UNRM_FL                      0x00000002 /* Undelete */
+#define        FS_COMPR_FL                     0x00000004 /* Compress file */
+#define FS_SYNC_FL                     0x00000008 /* Synchronous updates */
+#define FS_IMMUTABLE_FL                        0x00000010 /* Immutable file */
+#define FS_APPEND_FL                   0x00000020 /* writes to file may only append */
+#define FS_NODUMP_FL                   0x00000040 /* do not dump file */
+#define FS_NOATIME_FL                  0x00000080 /* do not update atime */
+/* Reserved for compression usage... */
+#define FS_DIRTY_FL                    0x00000100
+#define FS_COMPRBLK_FL                 0x00000200 /* One or more compressed clusters */
+#define FS_NOCOMP_FL                   0x00000400 /* Don't compress */
+/* End compression flags --- maybe not all used */
+#define FS_ENCRYPT_FL                  0x00000800 /* Encrypted file */
+#define FS_BTREE_FL                    0x00001000 /* btree format dir */
+#define FS_INDEX_FL                    0x00001000 /* hash-indexed directory */
+#define FS_IMAGIC_FL                   0x00002000 /* AFS directory */
+#define FS_JOURNAL_DATA_FL             0x00004000 /* Reserved for ext3 */
+#define FS_NOTAIL_FL                   0x00008000 /* file tail should not be merged */
+#define FS_DIRSYNC_FL                  0x00010000 /* dirsync behaviour (directories only) */
+#define FS_TOPDIR_FL                   0x00020000 /* Top of directory hierarchies*/
+#define FS_HUGE_FILE_FL                        0x00040000 /* Reserved for ext4 */
+#define FS_EXTENT_FL                   0x00080000 /* Extents */
+#define FS_EA_INODE_FL                 0x00200000 /* Inode used for large EA */
+#define FS_EOFBLOCKS_FL                        0x00400000 /* Reserved for ext4 */
+#define FS_NOCOW_FL                    0x00800000 /* Do not cow file */
+#define FS_INLINE_DATA_FL              0x10000000 /* Reserved for ext4 */
+#define FS_PROJINHERIT_FL              0x20000000 /* Create with parents projid */
+#define FS_RESERVED_FL                 0x80000000 /* reserved for ext2 lib */
+
+#define FS_FL_USER_VISIBLE             0x0003DFFF /* User visible flags */
+#define FS_FL_USER_MODIFIABLE          0x000380FF /* User modifiable flags */
+
+
+#define SYNC_FILE_RANGE_WAIT_BEFORE    1
+#define SYNC_FILE_RANGE_WRITE          2
+#define SYNC_FILE_RANGE_WAIT_AFTER     4
+
+/*
+ * Flags for preadv2/pwritev2:
+ */
+
+typedef int __bitwise __kernel_rwf_t;
+
+/* high priority request, poll if possible */
+#define RWF_HIPRI      ((__force __kernel_rwf_t)0x00000001)
+
+/* per-IO O_DSYNC */
+#define RWF_DSYNC      ((__force __kernel_rwf_t)0x00000002)
+
+/* per-IO O_SYNC */
+#define RWF_SYNC       ((__force __kernel_rwf_t)0x00000004)
+
+/* per-IO, return -EAGAIN if operation would block */
+#define RWF_NOWAIT     ((__force __kernel_rwf_t)0x00000008)
+
+/* per-IO O_APPEND */
+#define RWF_APPEND     ((__force __kernel_rwf_t)0x00000010)
+
+/* mask of flags supported by the kernel */
+#define RWF_SUPPORTED  (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\
+                        RWF_APPEND)
+
+#endif /* _UAPI_LINUX_FS_H */
index 58faab897201f7c62a77ea336d4f16707b433854..1debfa42cba1a965fcca8532d8b63c0bf4cf7949 100644 (file)
@@ -287,6 +287,7 @@ enum {
        IFLA_BR_MCAST_STATS_ENABLED,
        IFLA_BR_MCAST_IGMP_VERSION,
        IFLA_BR_MCAST_MLD_VERSION,
+       IFLA_BR_VLAN_STATS_PER_PORT,
        __IFLA_BR_MAX,
 };
 
index 2875ce85b3226c824a9fdf48fc76be4a14645694..2b7a652c9fa4635b3b83d97f644a9e3ecb0866a0 100644 (file)
@@ -420,13 +420,19 @@ struct kvm_run {
 struct kvm_coalesced_mmio_zone {
        __u64 addr;
        __u32 size;
-       __u32 pad;
+       union {
+               __u32 pad;
+               __u32 pio;
+       };
 };
 
 struct kvm_coalesced_mmio {
        __u64 phys_addr;
        __u32 len;
-       __u32 pad;
+       union {
+               __u32 pad;
+               __u32 pio;
+       };
        __u8  data[8];
 };
 
@@ -751,6 +757,15 @@ struct kvm_ppc_resize_hpt {
 
 #define KVM_S390_SIE_PAGE_OFFSET 1
 
+/*
+ * On arm64, machine type can be used to request the physical
+ * address size for the VM. Bits[7-0] are reserved for the guest
+ * PA size shift (i.e, log2(PA_Size)). For backward compatibility,
+ * value 0 implies the default IPA size, 40bits.
+ */
+#define KVM_VM_TYPE_ARM_IPA_SIZE_MASK  0xffULL
+#define KVM_VM_TYPE_ARM_IPA_SIZE(x)            \
+       ((x) & KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
 /*
  * ioctls for /dev/kvm fds:
  */
@@ -958,6 +973,8 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_HYPERV_SEND_IPI 161
 #define KVM_CAP_COALESCED_PIO 162
 #define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
+#define KVM_CAP_EXCEPTION_PAYLOAD 164
+#define KVM_CAP_ARM_VM_IPA_SIZE 165
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index bfd5938fede6c1ba3b71d096cd36127da2837042..d0f515d53299ea5784ffdb61dd1b829b04fd045c 100644 (file)
@@ -28,7 +28,9 @@
 #define MAP_HUGE_2MB   HUGETLB_FLAG_ENCODE_2MB
 #define MAP_HUGE_8MB   HUGETLB_FLAG_ENCODE_8MB
 #define MAP_HUGE_16MB  HUGETLB_FLAG_ENCODE_16MB
+#define MAP_HUGE_32MB  HUGETLB_FLAG_ENCODE_32MB
 #define MAP_HUGE_256MB HUGETLB_FLAG_ENCODE_256MB
+#define MAP_HUGE_512MB HUGETLB_FLAG_ENCODE_512MB
 #define MAP_HUGE_1GB   HUGETLB_FLAG_ENCODE_1GB
 #define MAP_HUGE_2GB   HUGETLB_FLAG_ENCODE_2GB
 #define MAP_HUGE_16GB  HUGETLB_FLAG_ENCODE_16GB
index 776bc92e91180725e75f0291b1635234d6b6875f..486ed1f0c0bc17f48dca895ebf9581aa7d69278d 100644 (file)
@@ -155,6 +155,7 @@ enum nlmsgerr_attrs {
 #define NETLINK_LIST_MEMBERSHIPS       9
 #define NETLINK_CAP_ACK                        10
 #define NETLINK_EXT_ACK                        11
+#define NETLINK_DUMP_STRICT_CHK                12
 
 struct nl_pktinfo {
        __u32   group;
index f35eb72739c09e3ad0bd22e279fa4a33119c15f6..9de8780ac8d97568932d3857de3dc2c8e5de2806 100644 (file)
@@ -646,10 +646,12 @@ struct perf_event_mmap_page {
  *
  *   PERF_RECORD_MISC_MMAP_DATA  - PERF_RECORD_MMAP* events
  *   PERF_RECORD_MISC_COMM_EXEC  - PERF_RECORD_COMM event
+ *   PERF_RECORD_MISC_FORK_EXEC  - PERF_RECORD_FORK event (perf internal)
  *   PERF_RECORD_MISC_SWITCH_OUT - PERF_RECORD_SWITCH* events
  */
 #define PERF_RECORD_MISC_MMAP_DATA             (1 << 13)
 #define PERF_RECORD_MISC_COMM_EXEC             (1 << 13)
+#define PERF_RECORD_MISC_FORK_EXEC             (1 << 13)
 #define PERF_RECORD_MISC_SWITCH_OUT            (1 << 13)
 /*
  * These PERF_RECORD_MISC_* flags below are safely reused
diff --git a/tools/include/uapi/linux/pkt_cls.h b/tools/include/uapi/linux/pkt_cls.h
new file mode 100644 (file)
index 0000000..401d0c1
--- /dev/null
@@ -0,0 +1,612 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __LINUX_PKT_CLS_H
+#define __LINUX_PKT_CLS_H
+
+#include <linux/types.h>
+#include <linux/pkt_sched.h>
+
+#define TC_COOKIE_MAX_SIZE 16
+
+/* Action attributes */
+enum {
+       TCA_ACT_UNSPEC,
+       TCA_ACT_KIND,
+       TCA_ACT_OPTIONS,
+       TCA_ACT_INDEX,
+       TCA_ACT_STATS,
+       TCA_ACT_PAD,
+       TCA_ACT_COOKIE,
+       __TCA_ACT_MAX
+};
+
+#define TCA_ACT_MAX __TCA_ACT_MAX
+#define TCA_OLD_COMPAT (TCA_ACT_MAX+1)
+#define TCA_ACT_MAX_PRIO 32
+#define TCA_ACT_BIND   1
+#define TCA_ACT_NOBIND 0
+#define TCA_ACT_UNBIND 1
+#define TCA_ACT_NOUNBIND       0
+#define TCA_ACT_REPLACE                1
+#define TCA_ACT_NOREPLACE      0
+
+#define TC_ACT_UNSPEC  (-1)
+#define TC_ACT_OK              0
+#define TC_ACT_RECLASSIFY      1
+#define TC_ACT_SHOT            2
+#define TC_ACT_PIPE            3
+#define TC_ACT_STOLEN          4
+#define TC_ACT_QUEUED          5
+#define TC_ACT_REPEAT          6
+#define TC_ACT_REDIRECT                7
+#define TC_ACT_TRAP            8 /* For hw path, this means "trap to cpu"
+                                  * and don't further process the frame
+                                  * in hardware. For sw path, this is
+                                  * equivalent of TC_ACT_STOLEN - drop
+                                  * the skb and act like everything
+                                  * is alright.
+                                  */
+#define TC_ACT_VALUE_MAX       TC_ACT_TRAP
+
+/* There is a special kind of actions called "extended actions",
+ * which need a value parameter. These have a local opcode located in
+ * the highest nibble, starting from 1. The rest of the bits
+ * are used to carry the value. These two parts together make
+ * a combined opcode.
+ */
+#define __TC_ACT_EXT_SHIFT 28
+#define __TC_ACT_EXT(local) ((local) << __TC_ACT_EXT_SHIFT)
+#define TC_ACT_EXT_VAL_MASK ((1 << __TC_ACT_EXT_SHIFT) - 1)
+#define TC_ACT_EXT_OPCODE(combined) ((combined) & (~TC_ACT_EXT_VAL_MASK))
+#define TC_ACT_EXT_CMP(combined, opcode) (TC_ACT_EXT_OPCODE(combined) == opcode)
+
+#define TC_ACT_JUMP __TC_ACT_EXT(1)
+#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2)
+#define TC_ACT_EXT_OPCODE_MAX  TC_ACT_GOTO_CHAIN
+
+/* Action type identifiers*/
+enum {
+       TCA_ID_UNSPEC=0,
+       TCA_ID_POLICE=1,
+       /* other actions go here */
+       __TCA_ID_MAX=255
+};
+
+#define TCA_ID_MAX __TCA_ID_MAX
+
+struct tc_police {
+       __u32                   index;
+       int                     action;
+#define TC_POLICE_UNSPEC       TC_ACT_UNSPEC
+#define TC_POLICE_OK           TC_ACT_OK
+#define TC_POLICE_RECLASSIFY   TC_ACT_RECLASSIFY
+#define TC_POLICE_SHOT         TC_ACT_SHOT
+#define TC_POLICE_PIPE         TC_ACT_PIPE
+
+       __u32                   limit;
+       __u32                   burst;
+       __u32                   mtu;
+       struct tc_ratespec      rate;
+       struct tc_ratespec      peakrate;
+       int                     refcnt;
+       int                     bindcnt;
+       __u32                   capab;
+};
+
+struct tcf_t {
+       __u64   install;
+       __u64   lastuse;
+       __u64   expires;
+       __u64   firstuse;
+};
+
+struct tc_cnt {
+       int                   refcnt;
+       int                   bindcnt;
+};
+
+#define tc_gen \
+       __u32                 index; \
+       __u32                 capab; \
+       int                   action; \
+       int                   refcnt; \
+       int                   bindcnt
+
+enum {
+       TCA_POLICE_UNSPEC,
+       TCA_POLICE_TBF,
+       TCA_POLICE_RATE,
+       TCA_POLICE_PEAKRATE,
+       TCA_POLICE_AVRATE,
+       TCA_POLICE_RESULT,
+       TCA_POLICE_TM,
+       TCA_POLICE_PAD,
+       __TCA_POLICE_MAX
+#define TCA_POLICE_RESULT TCA_POLICE_RESULT
+};
+
+#define TCA_POLICE_MAX (__TCA_POLICE_MAX - 1)
+
+/* tca flags definitions */
+#define TCA_CLS_FLAGS_SKIP_HW  (1 << 0) /* don't offload filter to HW */
+#define TCA_CLS_FLAGS_SKIP_SW  (1 << 1) /* don't use filter in SW */
+#define TCA_CLS_FLAGS_IN_HW    (1 << 2) /* filter is offloaded to HW */
+#define TCA_CLS_FLAGS_NOT_IN_HW (1 << 3) /* filter isn't offloaded to HW */
+#define TCA_CLS_FLAGS_VERBOSE  (1 << 4) /* verbose logging */
+
+/* U32 filters */
+
+#define TC_U32_HTID(h) ((h)&0xFFF00000)
+#define TC_U32_USERHTID(h) (TC_U32_HTID(h)>>20)
+#define TC_U32_HASH(h) (((h)>>12)&0xFF)
+#define TC_U32_NODE(h) ((h)&0xFFF)
+#define TC_U32_KEY(h) ((h)&0xFFFFF)
+#define TC_U32_UNSPEC  0
+#define TC_U32_ROOT    (0xFFF00000)
+
+enum {
+       TCA_U32_UNSPEC,
+       TCA_U32_CLASSID,
+       TCA_U32_HASH,
+       TCA_U32_LINK,
+       TCA_U32_DIVISOR,
+       TCA_U32_SEL,
+       TCA_U32_POLICE,
+       TCA_U32_ACT,
+       TCA_U32_INDEV,
+       TCA_U32_PCNT,
+       TCA_U32_MARK,
+       TCA_U32_FLAGS,
+       TCA_U32_PAD,
+       __TCA_U32_MAX
+};
+
+#define TCA_U32_MAX (__TCA_U32_MAX - 1)
+
+struct tc_u32_key {
+       __be32          mask;
+       __be32          val;
+       int             off;
+       int             offmask;
+};
+
+struct tc_u32_sel {
+       unsigned char           flags;
+       unsigned char           offshift;
+       unsigned char           nkeys;
+
+       __be16                  offmask;
+       __u16                   off;
+       short                   offoff;
+
+       short                   hoff;
+       __be32                  hmask;
+       struct tc_u32_key       keys[0];
+};
+
+struct tc_u32_mark {
+       __u32           val;
+       __u32           mask;
+       __u32           success;
+};
+
+struct tc_u32_pcnt {
+       __u64 rcnt;
+       __u64 rhit;
+       __u64 kcnts[0];
+};
+
+/* Flags */
+
+#define TC_U32_TERMINAL                1
+#define TC_U32_OFFSET          2
+#define TC_U32_VAROFFSET       4
+#define TC_U32_EAT             8
+
+#define TC_U32_MAXDEPTH 8
+
+
+/* RSVP filter */
+
+enum {
+       TCA_RSVP_UNSPEC,
+       TCA_RSVP_CLASSID,
+       TCA_RSVP_DST,
+       TCA_RSVP_SRC,
+       TCA_RSVP_PINFO,
+       TCA_RSVP_POLICE,
+       TCA_RSVP_ACT,
+       __TCA_RSVP_MAX
+};
+
+#define TCA_RSVP_MAX (__TCA_RSVP_MAX - 1 )
+
+struct tc_rsvp_gpi {
+       __u32   key;
+       __u32   mask;
+       int     offset;
+};
+
+struct tc_rsvp_pinfo {
+       struct tc_rsvp_gpi dpi;
+       struct tc_rsvp_gpi spi;
+       __u8    protocol;
+       __u8    tunnelid;
+       __u8    tunnelhdr;
+       __u8    pad;
+};
+
+/* ROUTE filter */
+
+enum {
+       TCA_ROUTE4_UNSPEC,
+       TCA_ROUTE4_CLASSID,
+       TCA_ROUTE4_TO,
+       TCA_ROUTE4_FROM,
+       TCA_ROUTE4_IIF,
+       TCA_ROUTE4_POLICE,
+       TCA_ROUTE4_ACT,
+       __TCA_ROUTE4_MAX
+};
+
+#define TCA_ROUTE4_MAX (__TCA_ROUTE4_MAX - 1)
+
+
+/* FW filter */
+
+enum {
+       TCA_FW_UNSPEC,
+       TCA_FW_CLASSID,
+       TCA_FW_POLICE,
+       TCA_FW_INDEV, /*  used by CONFIG_NET_CLS_IND */
+       TCA_FW_ACT, /* used by CONFIG_NET_CLS_ACT */
+       TCA_FW_MASK,
+       __TCA_FW_MAX
+};
+
+#define TCA_FW_MAX (__TCA_FW_MAX - 1)
+
+/* TC index filter */
+
+enum {
+       TCA_TCINDEX_UNSPEC,
+       TCA_TCINDEX_HASH,
+       TCA_TCINDEX_MASK,
+       TCA_TCINDEX_SHIFT,
+       TCA_TCINDEX_FALL_THROUGH,
+       TCA_TCINDEX_CLASSID,
+       TCA_TCINDEX_POLICE,
+       TCA_TCINDEX_ACT,
+       __TCA_TCINDEX_MAX
+};
+
+#define TCA_TCINDEX_MAX     (__TCA_TCINDEX_MAX - 1)
+
+/* Flow filter */
+
+enum {
+       FLOW_KEY_SRC,
+       FLOW_KEY_DST,
+       FLOW_KEY_PROTO,
+       FLOW_KEY_PROTO_SRC,
+       FLOW_KEY_PROTO_DST,
+       FLOW_KEY_IIF,
+       FLOW_KEY_PRIORITY,
+       FLOW_KEY_MARK,
+       FLOW_KEY_NFCT,
+       FLOW_KEY_NFCT_SRC,
+       FLOW_KEY_NFCT_DST,
+       FLOW_KEY_NFCT_PROTO_SRC,
+       FLOW_KEY_NFCT_PROTO_DST,
+       FLOW_KEY_RTCLASSID,
+       FLOW_KEY_SKUID,
+       FLOW_KEY_SKGID,
+       FLOW_KEY_VLAN_TAG,
+       FLOW_KEY_RXHASH,
+       __FLOW_KEY_MAX,
+};
+
+#define FLOW_KEY_MAX   (__FLOW_KEY_MAX - 1)
+
+enum {
+       FLOW_MODE_MAP,
+       FLOW_MODE_HASH,
+};
+
+enum {
+       TCA_FLOW_UNSPEC,
+       TCA_FLOW_KEYS,
+       TCA_FLOW_MODE,
+       TCA_FLOW_BASECLASS,
+       TCA_FLOW_RSHIFT,
+       TCA_FLOW_ADDEND,
+       TCA_FLOW_MASK,
+       TCA_FLOW_XOR,
+       TCA_FLOW_DIVISOR,
+       TCA_FLOW_ACT,
+       TCA_FLOW_POLICE,
+       TCA_FLOW_EMATCHES,
+       TCA_FLOW_PERTURB,
+       __TCA_FLOW_MAX
+};
+
+#define TCA_FLOW_MAX   (__TCA_FLOW_MAX - 1)
+
+/* Basic filter */
+
+enum {
+       TCA_BASIC_UNSPEC,
+       TCA_BASIC_CLASSID,
+       TCA_BASIC_EMATCHES,
+       TCA_BASIC_ACT,
+       TCA_BASIC_POLICE,
+       __TCA_BASIC_MAX
+};
+
+#define TCA_BASIC_MAX (__TCA_BASIC_MAX - 1)
+
+
+/* Cgroup classifier */
+
+enum {
+       TCA_CGROUP_UNSPEC,
+       TCA_CGROUP_ACT,
+       TCA_CGROUP_POLICE,
+       TCA_CGROUP_EMATCHES,
+       __TCA_CGROUP_MAX,
+};
+
+#define TCA_CGROUP_MAX (__TCA_CGROUP_MAX - 1)
+
+/* BPF classifier */
+
+#define TCA_BPF_FLAG_ACT_DIRECT                (1 << 0)
+
+enum {
+       TCA_BPF_UNSPEC,
+       TCA_BPF_ACT,
+       TCA_BPF_POLICE,
+       TCA_BPF_CLASSID,
+       TCA_BPF_OPS_LEN,
+       TCA_BPF_OPS,
+       TCA_BPF_FD,
+       TCA_BPF_NAME,
+       TCA_BPF_FLAGS,
+       TCA_BPF_FLAGS_GEN,
+       TCA_BPF_TAG,
+       TCA_BPF_ID,
+       __TCA_BPF_MAX,
+};
+
+#define TCA_BPF_MAX (__TCA_BPF_MAX - 1)
+
+/* Flower classifier */
+
+enum {
+       TCA_FLOWER_UNSPEC,
+       TCA_FLOWER_CLASSID,
+       TCA_FLOWER_INDEV,
+       TCA_FLOWER_ACT,
+       TCA_FLOWER_KEY_ETH_DST,         /* ETH_ALEN */
+       TCA_FLOWER_KEY_ETH_DST_MASK,    /* ETH_ALEN */
+       TCA_FLOWER_KEY_ETH_SRC,         /* ETH_ALEN */
+       TCA_FLOWER_KEY_ETH_SRC_MASK,    /* ETH_ALEN */
+       TCA_FLOWER_KEY_ETH_TYPE,        /* be16 */
+       TCA_FLOWER_KEY_IP_PROTO,        /* u8 */
+       TCA_FLOWER_KEY_IPV4_SRC,        /* be32 */
+       TCA_FLOWER_KEY_IPV4_SRC_MASK,   /* be32 */
+       TCA_FLOWER_KEY_IPV4_DST,        /* be32 */
+       TCA_FLOWER_KEY_IPV4_DST_MASK,   /* be32 */
+       TCA_FLOWER_KEY_IPV6_SRC,        /* struct in6_addr */
+       TCA_FLOWER_KEY_IPV6_SRC_MASK,   /* struct in6_addr */
+       TCA_FLOWER_KEY_IPV6_DST,        /* struct in6_addr */
+       TCA_FLOWER_KEY_IPV6_DST_MASK,   /* struct in6_addr */
+       TCA_FLOWER_KEY_TCP_SRC,         /* be16 */
+       TCA_FLOWER_KEY_TCP_DST,         /* be16 */
+       TCA_FLOWER_KEY_UDP_SRC,         /* be16 */
+       TCA_FLOWER_KEY_UDP_DST,         /* be16 */
+
+       TCA_FLOWER_FLAGS,
+       TCA_FLOWER_KEY_VLAN_ID,         /* be16 */
+       TCA_FLOWER_KEY_VLAN_PRIO,       /* u8   */
+       TCA_FLOWER_KEY_VLAN_ETH_TYPE,   /* be16 */
+
+       TCA_FLOWER_KEY_ENC_KEY_ID,      /* be32 */
+       TCA_FLOWER_KEY_ENC_IPV4_SRC,    /* be32 */
+       TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,/* be32 */
+       TCA_FLOWER_KEY_ENC_IPV4_DST,    /* be32 */
+       TCA_FLOWER_KEY_ENC_IPV4_DST_MASK,/* be32 */
+       TCA_FLOWER_KEY_ENC_IPV6_SRC,    /* struct in6_addr */
+       TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,/* struct in6_addr */
+       TCA_FLOWER_KEY_ENC_IPV6_DST,    /* struct in6_addr */
+       TCA_FLOWER_KEY_ENC_IPV6_DST_MASK,/* struct in6_addr */
+
+       TCA_FLOWER_KEY_TCP_SRC_MASK,    /* be16 */
+       TCA_FLOWER_KEY_TCP_DST_MASK,    /* be16 */
+       TCA_FLOWER_KEY_UDP_SRC_MASK,    /* be16 */
+       TCA_FLOWER_KEY_UDP_DST_MASK,    /* be16 */
+       TCA_FLOWER_KEY_SCTP_SRC_MASK,   /* be16 */
+       TCA_FLOWER_KEY_SCTP_DST_MASK,   /* be16 */
+
+       TCA_FLOWER_KEY_SCTP_SRC,        /* be16 */
+       TCA_FLOWER_KEY_SCTP_DST,        /* be16 */
+
+       TCA_FLOWER_KEY_ENC_UDP_SRC_PORT,        /* be16 */
+       TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK,   /* be16 */
+       TCA_FLOWER_KEY_ENC_UDP_DST_PORT,        /* be16 */
+       TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK,   /* be16 */
+
+       TCA_FLOWER_KEY_FLAGS,           /* be32 */
+       TCA_FLOWER_KEY_FLAGS_MASK,      /* be32 */
+
+       TCA_FLOWER_KEY_ICMPV4_CODE,     /* u8 */
+       TCA_FLOWER_KEY_ICMPV4_CODE_MASK,/* u8 */
+       TCA_FLOWER_KEY_ICMPV4_TYPE,     /* u8 */
+       TCA_FLOWER_KEY_ICMPV4_TYPE_MASK,/* u8 */
+       TCA_FLOWER_KEY_ICMPV6_CODE,     /* u8 */
+       TCA_FLOWER_KEY_ICMPV6_CODE_MASK,/* u8 */
+       TCA_FLOWER_KEY_ICMPV6_TYPE,     /* u8 */
+       TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,/* u8 */
+
+       TCA_FLOWER_KEY_ARP_SIP,         /* be32 */
+       TCA_FLOWER_KEY_ARP_SIP_MASK,    /* be32 */
+       TCA_FLOWER_KEY_ARP_TIP,         /* be32 */
+       TCA_FLOWER_KEY_ARP_TIP_MASK,    /* be32 */
+       TCA_FLOWER_KEY_ARP_OP,          /* u8 */
+       TCA_FLOWER_KEY_ARP_OP_MASK,     /* u8 */
+       TCA_FLOWER_KEY_ARP_SHA,         /* ETH_ALEN */
+       TCA_FLOWER_KEY_ARP_SHA_MASK,    /* ETH_ALEN */
+       TCA_FLOWER_KEY_ARP_THA,         /* ETH_ALEN */
+       TCA_FLOWER_KEY_ARP_THA_MASK,    /* ETH_ALEN */
+
+       TCA_FLOWER_KEY_MPLS_TTL,        /* u8 - 8 bits */
+       TCA_FLOWER_KEY_MPLS_BOS,        /* u8 - 1 bit */
+       TCA_FLOWER_KEY_MPLS_TC,         /* u8 - 3 bits */
+       TCA_FLOWER_KEY_MPLS_LABEL,      /* be32 - 20 bits */
+
+       TCA_FLOWER_KEY_TCP_FLAGS,       /* be16 */
+       TCA_FLOWER_KEY_TCP_FLAGS_MASK,  /* be16 */
+
+       TCA_FLOWER_KEY_IP_TOS,          /* u8 */
+       TCA_FLOWER_KEY_IP_TOS_MASK,     /* u8 */
+       TCA_FLOWER_KEY_IP_TTL,          /* u8 */
+       TCA_FLOWER_KEY_IP_TTL_MASK,     /* u8 */
+
+       TCA_FLOWER_KEY_CVLAN_ID,        /* be16 */
+       TCA_FLOWER_KEY_CVLAN_PRIO,      /* u8   */
+       TCA_FLOWER_KEY_CVLAN_ETH_TYPE,  /* be16 */
+
+       TCA_FLOWER_KEY_ENC_IP_TOS,      /* u8 */
+       TCA_FLOWER_KEY_ENC_IP_TOS_MASK, /* u8 */
+       TCA_FLOWER_KEY_ENC_IP_TTL,      /* u8 */
+       TCA_FLOWER_KEY_ENC_IP_TTL_MASK, /* u8 */
+
+       TCA_FLOWER_KEY_ENC_OPTS,
+       TCA_FLOWER_KEY_ENC_OPTS_MASK,
+
+       TCA_FLOWER_IN_HW_COUNT,
+
+       __TCA_FLOWER_MAX,
+};
+
+#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
+
+enum {
+       TCA_FLOWER_KEY_ENC_OPTS_UNSPEC,
+       TCA_FLOWER_KEY_ENC_OPTS_GENEVE, /* Nested
+                                        * TCA_FLOWER_KEY_ENC_OPT_GENEVE_
+                                        * attributes
+                                        */
+       __TCA_FLOWER_KEY_ENC_OPTS_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPTS_MAX (__TCA_FLOWER_KEY_ENC_OPTS_MAX - 1)
+
+enum {
+       TCA_FLOWER_KEY_ENC_OPT_GENEVE_UNSPEC,
+       TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS,            /* u16 */
+       TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE,             /* u8 */
+       TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA,             /* 4 to 128 bytes */
+
+       __TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX \
+               (__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX - 1)
+
+enum {
+       TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
+       TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
+};
+
+/* Match-all classifier */
+
+enum {
+       TCA_MATCHALL_UNSPEC,
+       TCA_MATCHALL_CLASSID,
+       TCA_MATCHALL_ACT,
+       TCA_MATCHALL_FLAGS,
+       __TCA_MATCHALL_MAX,
+};
+
+#define TCA_MATCHALL_MAX (__TCA_MATCHALL_MAX - 1)
+
+/* Extended Matches */
+
+struct tcf_ematch_tree_hdr {
+       __u16           nmatches;
+       __u16           progid;
+};
+
+enum {
+       TCA_EMATCH_TREE_UNSPEC,
+       TCA_EMATCH_TREE_HDR,
+       TCA_EMATCH_TREE_LIST,
+       __TCA_EMATCH_TREE_MAX
+};
+#define TCA_EMATCH_TREE_MAX (__TCA_EMATCH_TREE_MAX - 1)
+
+struct tcf_ematch_hdr {
+       __u16           matchid;
+       __u16           kind;
+       __u16           flags;
+       __u16           pad; /* currently unused */
+};
+
+/*  0                   1
+ *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 
+ * +-----------------------+-+-+---+
+ * |         Unused        |S|I| R |
+ * +-----------------------+-+-+---+
+ *
+ * R(2) ::= relation to next ematch
+ *          where: 0 0 END (last ematch)
+ *                 0 1 AND
+ *                 1 0 OR
+ *                 1 1 Unused (invalid)
+ * I(1) ::= invert result
+ * S(1) ::= simple payload
+ */
+#define TCF_EM_REL_END 0
+#define TCF_EM_REL_AND (1<<0)
+#define TCF_EM_REL_OR  (1<<1)
+#define TCF_EM_INVERT  (1<<2)
+#define TCF_EM_SIMPLE  (1<<3)
+
+#define TCF_EM_REL_MASK        3
+#define TCF_EM_REL_VALID(v) (((v) & TCF_EM_REL_MASK) != TCF_EM_REL_MASK)
+
+enum {
+       TCF_LAYER_LINK,
+       TCF_LAYER_NETWORK,
+       TCF_LAYER_TRANSPORT,
+       __TCF_LAYER_MAX
+};
+#define TCF_LAYER_MAX (__TCF_LAYER_MAX - 1)
+
+/* Ematch type assignments
+ *   1..32767          Reserved for ematches inside kernel tree
+ *   32768..65535      Free to use, not reliable
+ */
+#define        TCF_EM_CONTAINER        0
+#define        TCF_EM_CMP              1
+#define        TCF_EM_NBYTE            2
+#define        TCF_EM_U32              3
+#define        TCF_EM_META             4
+#define        TCF_EM_TEXT             5
+#define        TCF_EM_VLAN             6
+#define        TCF_EM_CANID            7
+#define        TCF_EM_IPSET            8
+#define        TCF_EM_IPT              9
+#define        TCF_EM_MAX              9
+
+enum {
+       TCF_EM_PROG_TC
+};
+
+enum {
+       TCF_EM_OPND_EQ,
+       TCF_EM_OPND_GT,
+       TCF_EM_OPND_LT
+};
+
+#endif
index c0d7ea0bf5b62438ca8184551b64d5d29ad7951b..b17201edfa09a4d00b01b4b0665b67825f6078b7 100644 (file)
@@ -212,6 +212,7 @@ struct prctl_mm_map {
 #define PR_SET_SPECULATION_CTRL                53
 /* Speculation control variants */
 # define PR_SPEC_STORE_BYPASS          0
+# define PR_SPEC_INDIRECT_BRANCH       1
 /* Return and control values for PR_SET/GET_SPECULATION_CTRL */
 # define PR_SPEC_NOT_AFFECTED          0
 # define PR_SPEC_PRCTL                 (1UL << 0)
diff --git a/tools/include/uapi/linux/tc_act/tc_bpf.h b/tools/include/uapi/linux/tc_act/tc_bpf.h
new file mode 100644 (file)
index 0000000..6e89a5d
--- /dev/null
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_TC_BPF_H
+#define __LINUX_TC_BPF_H
+
+#include <linux/pkt_cls.h>
+
+#define TCA_ACT_BPF 13
+
+struct tc_act_bpf {
+       tc_gen;
+};
+
+enum {
+       TCA_ACT_BPF_UNSPEC,
+       TCA_ACT_BPF_TM,
+       TCA_ACT_BPF_PARMS,
+       TCA_ACT_BPF_OPS_LEN,
+       TCA_ACT_BPF_OPS,
+       TCA_ACT_BPF_FD,
+       TCA_ACT_BPF_NAME,
+       TCA_ACT_BPF_PAD,
+       TCA_ACT_BPF_TAG,
+       TCA_ACT_BPF_ID,
+       __TCA_ACT_BPF_MAX,
+};
+#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
+
+#endif
index ed0a120d4f084fa0cfc10b5257ea9bce0c3d3a24..404d4b9ffe7644553a1b59fba043b151d935a2e9 100644 (file)
@@ -752,7 +752,7 @@ struct snd_timer_info {
 #define SNDRV_TIMER_PSFLG_EARLY_EVENT  (1<<2)  /* write early event to the poll queue */
 
 struct snd_timer_params {
-       unsigned int flags;             /* flags - SNDRV_MIXER_PSFLG_* */
+       unsigned int flags;             /* flags - SNDRV_TIMER_PSFLG_* */
        unsigned int ticks;             /* requested resolution in ticks */
        unsigned int queue_size;        /* total size of queue (32-1024) */
        unsigned int reserved0;         /* reserved, was: failure locations */
index b607be7236d3e580fda376f04cbe62302f698e4f..d6e62e90e8d44df2a3b710b6cfd875b32e20daec 100644 (file)
@@ -2084,19 +2084,19 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog,
        prog->expected_attach_type = type;
 }
 
-#define BPF_PROG_SEC_IMPL(string, ptype, eatype, atype) \
-       { string, sizeof(string) - 1, ptype, eatype, atype }
+#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
+       { string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
 
 /* Programs that can NOT be attached. */
-#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, -EINVAL)
+#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
 
 /* Programs that can be attached. */
 #define BPF_APROG_SEC(string, ptype, atype) \
-       BPF_PROG_SEC_IMPL(string, ptype, 0, atype)
+       BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
 
 /* Programs that must specify expected attach type at load time. */
 #define BPF_EAPROG_SEC(string, ptype, eatype) \
-       BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype)
+       BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
 
 /* Programs that can be attached but attach type can't be identified by section
  * name. Kept for backward compatibility.
@@ -2108,6 +2108,7 @@ static const struct {
        size_t len;
        enum bpf_prog_type prog_type;
        enum bpf_attach_type expected_attach_type;
+       int is_attachable;
        enum bpf_attach_type attach_type;
 } section_names[] = {
        BPF_PROG_SEC("socket",                  BPF_PROG_TYPE_SOCKET_FILTER),
@@ -2198,7 +2199,7 @@ int libbpf_attach_type_by_name(const char *name,
        for (i = 0; i < ARRAY_SIZE(section_names); i++) {
                if (strncmp(name, section_names[i].sec, section_names[i].len))
                        continue;
-               if (section_names[i].attach_type == -EINVAL)
+               if (!section_names[i].is_attachable)
                        return -EINVAL;
                *attach_type = section_names[i].attach_type;
                return 0;
index cb7154eccbdc1e6a825060b137155ce4560df2a4..dbb9efbf718a065d4d6e9998fa4d147b1cc691fc 100644 (file)
@@ -116,6 +116,7 @@ static int get_value(struct parse_opt_ctx_t *p,
                case OPTION_INTEGER:
                case OPTION_UINTEGER:
                case OPTION_LONG:
+               case OPTION_ULONG:
                case OPTION_U64:
                default:
                        break;
@@ -166,6 +167,7 @@ static int get_value(struct parse_opt_ctx_t *p,
                case OPTION_INTEGER:
                case OPTION_UINTEGER:
                case OPTION_LONG:
+               case OPTION_ULONG:
                case OPTION_U64:
                default:
                        break;
@@ -295,6 +297,22 @@ static int get_value(struct parse_opt_ctx_t *p,
                        return opterror(opt, "expects a numerical value", flags);
                return 0;
 
+       case OPTION_ULONG:
+               if (unset) {
+                       *(unsigned long *)opt->value = 0;
+                       return 0;
+               }
+               if (opt->flags & PARSE_OPT_OPTARG && !p->opt) {
+                       *(unsigned long *)opt->value = opt->defval;
+                       return 0;
+               }
+               if (get_arg(p, opt, flags, &arg))
+                       return -1;
+               *(unsigned long *)opt->value = strtoul(arg, (char **)&s, 10);
+               if (*s)
+                       return opterror(opt, "expects a numerical value", flags);
+               return 0;
+
        case OPTION_U64:
                if (unset) {
                        *(u64 *)opt->value = 0;
@@ -703,6 +721,7 @@ static void print_option_help(const struct option *opts, int full)
        case OPTION_ARGUMENT:
                break;
        case OPTION_LONG:
+       case OPTION_ULONG:
        case OPTION_U64:
        case OPTION_INTEGER:
        case OPTION_UINTEGER:
index 92fdbe1519f6d8f2aebfd8cf05a119bea7be344a..6ca2a8bfe716b1c658f693b29c64b2dd0b79642e 100644 (file)
@@ -25,6 +25,7 @@ enum parse_opt_type {
        OPTION_STRING,
        OPTION_INTEGER,
        OPTION_LONG,
+       OPTION_ULONG,
        OPTION_CALLBACK,
        OPTION_U64,
        OPTION_UINTEGER,
@@ -133,6 +134,7 @@ struct option {
 #define OPT_INTEGER(s, l, v, h)     { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) }
 #define OPT_UINTEGER(s, l, v, h)    { .type = OPTION_UINTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h) }
 #define OPT_LONG(s, l, v, h)        { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, long *), .help = (h) }
+#define OPT_ULONG(s, l, v, h)        { .type = OPTION_ULONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned long *), .help = (h) }
 #define OPT_U64(s, l, v, h)         { .type = OPTION_U64, .short_name = (s), .long_name = (l), .value = check_vtype(v, u64 *), .help = (h) }
 #define OPT_STRING(s, l, v, a, h)   { .type = OPTION_STRING,  .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), .argh = (a), .help = (h) }
 #define OPT_STRING_OPTARG(s, l, v, a, h, d) \
index 2928939b98ec208fee9069cfc3ddf4aea91f01f3..0414a0d522621d4ca973240979e89c07d6cd4f8a 100644 (file)
@@ -836,7 +836,7 @@ static int add_switch_table(struct objtool_file *file, struct instruction *insn,
        struct symbol *pfunc = insn->func->pfunc;
        unsigned int prev_offset = 0;
 
-       list_for_each_entry_from(rela, &file->rodata->rela->rela_list, list) {
+       list_for_each_entry_from(rela, &table->rela_sec->rela_list, list) {
                if (rela == next_table)
                        break;
 
@@ -926,6 +926,7 @@ static struct rela *find_switch_table(struct objtool_file *file,
 {
        struct rela *text_rela, *rodata_rela;
        struct instruction *orig_insn = insn;
+       struct section *rodata_sec;
        unsigned long table_offset;
 
        /*
@@ -953,10 +954,13 @@ static struct rela *find_switch_table(struct objtool_file *file,
                /* look for a relocation which references .rodata */
                text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
                                                    insn->len);
-               if (!text_rela || text_rela->sym != file->rodata->sym)
+               if (!text_rela || text_rela->sym->type != STT_SECTION ||
+                   !text_rela->sym->sec->rodata)
                        continue;
 
                table_offset = text_rela->addend;
+               rodata_sec = text_rela->sym->sec;
+
                if (text_rela->type == R_X86_64_PC32)
                        table_offset += 4;
 
@@ -964,10 +968,10 @@ static struct rela *find_switch_table(struct objtool_file *file,
                 * Make sure the .rodata address isn't associated with a
                 * symbol.  gcc jump tables are anonymous data.
                 */
-               if (find_symbol_containing(file->rodata, table_offset))
+               if (find_symbol_containing(rodata_sec, table_offset))
                        continue;
 
-               rodata_rela = find_rela_by_dest(file->rodata, table_offset);
+               rodata_rela = find_rela_by_dest(rodata_sec, table_offset);
                if (rodata_rela) {
                        /*
                         * Use of RIP-relative switch jumps is quite rare, and
@@ -1052,7 +1056,7 @@ static int add_switch_table_alts(struct objtool_file *file)
        struct symbol *func;
        int ret;
 
-       if (!file->rodata || !file->rodata->rela)
+       if (!file->rodata)
                return 0;
 
        for_each_sec(file, sec) {
@@ -1198,10 +1202,33 @@ static int read_retpoline_hints(struct objtool_file *file)
        return 0;
 }
 
+static void mark_rodata(struct objtool_file *file)
+{
+       struct section *sec;
+       bool found = false;
+
+       /*
+        * This searches for the .rodata section or multiple .rodata.func_name
+        * sections if -fdata-sections is being used. The .str.1.1 and .str.1.8
+        * rodata sections are ignored as they don't contain jump tables.
+        */
+       for_each_sec(file, sec) {
+               if (!strncmp(sec->name, ".rodata", 7) &&
+                   !strstr(sec->name, ".str1.")) {
+                       sec->rodata = true;
+                       found = true;
+               }
+       }
+
+       file->rodata = found;
+}
+
 static int decode_sections(struct objtool_file *file)
 {
        int ret;
 
+       mark_rodata(file);
+
        ret = decode_instructions(file);
        if (ret)
                return ret;
@@ -2171,7 +2198,6 @@ int check(const char *_objname, bool orc)
        INIT_LIST_HEAD(&file.insn_list);
        hash_init(file.insn_hash);
        file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard");
-       file.rodata = find_section_by_name(file.elf, ".rodata");
        file.c_file = find_section_by_name(file.elf, ".comment");
        file.ignore_unreachables = no_unreachable;
        file.hints = false;
index 95700a2bcb7c1ee429c6b2e0270428a5bfb8b54a..e6e8a655b5563e84bcd5d67eee13a446db81cc31 100644 (file)
@@ -60,8 +60,8 @@ struct objtool_file {
        struct elf *elf;
        struct list_head insn_list;
        DECLARE_HASHTABLE(insn_hash, 16);
-       struct section *rodata, *whitelist;
-       bool ignore_unreachables, c_file, hints;
+       struct section *whitelist;
+       bool ignore_unreachables, c_file, hints, rodata;
 };
 
 int check(const char *objname, bool orc);
index 7ec85d567598c5047fbe00b9660c9e7fc76870cf..b8f3cca8e58b4ec327876c7fd3173a4a3ae6c31d 100644 (file)
@@ -31,6 +31,8 @@
 #include "elf.h"
 #include "warn.h"
 
+#define MAX_NAME_LEN 128
+
 struct section *find_section_by_name(struct elf *elf, const char *name)
 {
        struct section *sec;
@@ -298,21 +300,30 @@ static int read_symbols(struct elf *elf)
        /* Create parent/child links for any cold subfunctions */
        list_for_each_entry(sec, &elf->sections, list) {
                list_for_each_entry(sym, &sec->symbol_list, list) {
+                       char pname[MAX_NAME_LEN + 1];
+                       size_t pnamelen;
                        if (sym->type != STT_FUNC)
                                continue;
                        sym->pfunc = sym->cfunc = sym;
-                       coldstr = strstr(sym->name, ".cold.");
+                       coldstr = strstr(sym->name, ".cold");
                        if (!coldstr)
                                continue;
 
-                       coldstr[0] = '\0';
-                       pfunc = find_symbol_by_name(elf, sym->name);
-                       coldstr[0] = '.';
+                       pnamelen = coldstr - sym->name;
+                       if (pnamelen > MAX_NAME_LEN) {
+                               WARN("%s(): parent function name exceeds maximum length of %d characters",
+                                    sym->name, MAX_NAME_LEN);
+                               return -1;
+                       }
+
+                       strncpy(pname, sym->name, pnamelen);
+                       pname[pnamelen] = '\0';
+                       pfunc = find_symbol_by_name(elf, pname);
 
                        if (!pfunc) {
                                WARN("%s(): can't find parent function",
                                     sym->name);
-                               goto err;
+                               return -1;
                        }
 
                        sym->pfunc = pfunc;
@@ -379,6 +390,7 @@ static int read_relas(struct elf *elf)
                        rela->offset = rela->rela.r_offset;
                        symndx = GELF_R_SYM(rela->rela.r_info);
                        rela->sym = find_symbol_by_index(elf, symndx);
+                       rela->rela_sec = sec;
                        if (!rela->sym) {
                                WARN("can't find rela entry symbol %d for %s",
                                     symndx, sec->name);
index de5cd2ddded987bf524be46e446bd1e814422761..bc97ed86b9cd8ebd3fc8e9e1512d8d06b3e96d14 100644 (file)
@@ -48,7 +48,7 @@ struct section {
        char *name;
        int idx;
        unsigned int len;
-       bool changed, text;
+       bool changed, text, rodata;
 };
 
 struct symbol {
@@ -68,6 +68,7 @@ struct rela {
        struct list_head list;
        struct hlist_node hash;
        GElf_Rela rela;
+       struct section *rela_sec;
        struct symbol *sym;
        unsigned int type;
        unsigned long offset;
diff --git a/tools/perf/Documentation/build-xed.txt b/tools/perf/Documentation/build-xed.txt
new file mode 100644 (file)
index 0000000..6222c1e
--- /dev/null
@@ -0,0 +1,19 @@
+
+For --xed the xed tool is needed. Here is how to install it:
+
+  $ git clone https://github.com/intelxed/mbuild.git mbuild
+  $ git clone https://github.com/intelxed/xed
+  $ cd xed
+  $ ./mfile.py --share
+  $ ./mfile.py examples
+  $ sudo ./mfile.py --prefix=/usr/local install
+  $ sudo ldconfig
+  $ sudo cp obj/examples/xed /usr/local/bin
+
+Basic xed testing:
+
+  $ xed | head -3
+  ERROR: required argument(s) were missing
+  Copyright (C) 2017, Intel Corporation. All rights reserved.
+  XED version: [v10.0-328-g7d62c8c49b7b]
+  $
index 76971d2e416450c24fbb24bb51db584da7661180..115eaacc455fdb020ca34f3c16fb1bcad092b9d1 100644 (file)
@@ -106,7 +106,7 @@ in transaction, respectively.
 While it is possible to create scripts to analyze the data, an alternative
 approach is available to export the data to a sqlite or postgresql database.
 Refer to script export-to-sqlite.py or export-to-postgresql.py for more details,
-and to script call-graph-from-sql.py for an example of using the database.
+and to script exported-sql-viewer.py for an example of using the database.
 
 There is also script intel-pt-events.py which provides an example of how to
 unpack the raw data for power events and PTWRITE.
index a3abe04c779d03615a9ba3815337bf1345b45082..c2182cbabde3a07196b26e44a3a8c6a0165d3cb4 100644 (file)
                l       synthesize last branch entries (use with i or x)
                s       skip initial number of events
 
-       The default is all events i.e. the same as --itrace=ibxwpe
+       The default is all events i.e. the same as --itrace=ibxwpe,
+       except for perf script where it is --itrace=ce
 
-       In addition, the period (default 100000) for instructions events
-       can be specified in units of:
+       In addition, the period (default 100000, except for perf script where it is 1)
+       for instructions events can be specified in units of:
 
                i       instructions
                t       ticks
index 236b9b97dfdb1d5d52d6cc9dcb9198cfd2ec1739..667c14e56031b5c3ac91c291aed3c876644d1247 100644 (file)
@@ -55,7 +55,6 @@ counted. The following modifiers exist:
  S - read sample value (PERF_SAMPLE_READ)
  D - pin the event to the PMU
  W - group is weak and will fallback to non-group if not schedulable,
-     only supported in 'perf stat' for now.
 
 The 'p' modifier can be used for specifying how precise the instruction
 address should be. The 'p' modifier can be specified multiple times:
index afdafe2110a17adea848871e033a85f85972b4a1..a2b37ce48094de7e66bc3479c9aced16ef128002 100644 (file)
@@ -383,6 +383,24 @@ include::itrace.txt[]
        will be printed. Each entry has function name and file/line. Enabled by
        default, disable with --no-inline.
 
+--insn-trace::
+       Show instruction stream for intel_pt traces. Combine with --xed to
+       show disassembly.
+
+--xed::
+       Run xed disassembler on output. Requires installing the xed disassembler.
+
+--call-trace::
+       Show call stream for intel_pt traces. The CPUs are interleaved, but
+       can be filtered with -C.
+
+--call-ret-trace::
+       Show call and return stream for intel_pt traces.
+
+--graph-function::
+       For itrace only show specified functions and their callees for
+       itrace. Multiple functions can be separated by comma.
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-script-perl[1],
index 114fda12aa490089c16802282ed598f51303bd10..808b664343c9f5faa9a07d5327945926d844c7cb 100644 (file)
@@ -242,6 +242,16 @@ Default is to monitor all CPUS.
 --hierarchy::
        Enable hierarchy output.
 
+--overwrite::
+       Enable this to use just the most recent records, which helps in high core count
+       machines such as Knights Landing/Mill, but right now is disabled by default as
+       the pausing used in this technique is leading to loss of metadata events such
+       as PERF_RECORD_MMAP which makes 'perf top' unable to resolve samples, leading
+       to lots of unknown samples appearing on the UI. Enable this if you are in such
+       machines and profiling a workload that doesn't creates short lived threads and/or
+       doesn't uses many executable mmap operations. Work is being planed to solve
+       this situation, till then, this will remain disabled by default.
+
 --force::
        Don't do ownership validation.
 
index 115db9e06ecd8a8d3f53059b8a8aa28c2337584a..e113450503d2f6fdf63d356adff4b9c580bd640d 100644 (file)
@@ -171,6 +171,11 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
 --kernel-syscall-graph::
         Show the kernel callchains on the syscall exit path.
 
+--max-events=N::
+       Stop after processing N events. Note that strace-like events are considered
+       only at exit time or when a syscall is interrupted, i.e. in those cases this
+       option is equivalent to the number of lines printed.
+
 --max-stack::
         Set the stack depth limit when parsing the callchain, anything
         beyond the specified depth will be ignored. Note that at this point
@@ -238,6 +243,68 @@ Trace syscalls, major and minor pagefaults:
   As you can see, there was major pagefault in python process, from
   CRYPTO_push_info_ routine which faulted somewhere in libcrypto.so.
 
+Trace the first 4 open, openat or open_by_handle_at syscalls (in the future more syscalls may match here):
+
+  $ perf trace -e open* --max-events 4
+  [root@jouet perf]# trace -e open* --max-events 4
+  2272.992 ( 0.037 ms): gnome-shell/1370 openat(dfd: CWD, filename: /proc/self/stat) = 31
+  2277.481 ( 0.139 ms): gnome-shell/3039 openat(dfd: CWD, filename: /proc/self/stat) = 65
+  3026.398 ( 0.076 ms): gnome-shell/3039 openat(dfd: CWD, filename: /proc/self/stat) = 65
+  4294.665 ( 0.015 ms): sed/15879 openat(dfd: CWD, filename: /etc/ld.so.cache, flags: CLOEXEC) = 3
+  $
+
+Trace the first minor page fault when running a workload:
+
+  # perf trace -F min --max-stack=7 --max-events 1 sleep 1
+     0.000 ( 0.000 ms): sleep/18006 minfault [__clear_user+0x1a] => 0x5626efa56080 (?k)
+                                       __clear_user ([kernel.kallsyms])
+                                       load_elf_binary ([kernel.kallsyms])
+                                       search_binary_handler ([kernel.kallsyms])
+                                       __do_execve_file.isra.33 ([kernel.kallsyms])
+                                       __x64_sys_execve ([kernel.kallsyms])
+                                       do_syscall_64 ([kernel.kallsyms])
+                                       entry_SYSCALL_64 ([kernel.kallsyms])
+  #
+
+Trace the next min page page fault to take place on the first CPU:
+
+  # perf trace -F min --call-graph=dwarf --max-events 1 --cpu 0
+     0.000 ( 0.000 ms): Web Content/17136 minfault [js::gc::Chunk::fetchNextDecommittedArena+0x4b] => 0x7fbe6181b000 (?.)
+                                       js::gc::FreeSpan::initAsEmpty (inlined)
+                                       js::gc::Arena::setAsNotAllocated (inlined)
+                                       js::gc::Chunk::fetchNextDecommittedArena (/usr/lib64/firefox/libxul.so)
+                                       js::gc::Chunk::allocateArena (/usr/lib64/firefox/libxul.so)
+                                       js::gc::GCRuntime::allocateArena (/usr/lib64/firefox/libxul.so)
+                                       js::gc::ArenaLists::allocateFromArena (/usr/lib64/firefox/libxul.so)
+                                       js::gc::GCRuntime::tryNewTenuredThing<JSString, (js::AllowGC)1> (inlined)
+                                       js::AllocateString<JSString, (js::AllowGC)1> (/usr/lib64/firefox/libxul.so)
+                                       js::Allocate<JSThinInlineString, (js::AllowGC)1> (inlined)
+                                       JSThinInlineString::new_<(js::AllowGC)1> (inlined)
+                                       AllocateInlineString<(js::AllowGC)1, unsigned char> (inlined)
+                                       js::ConcatStrings<(js::AllowGC)1> (/usr/lib64/firefox/libxul.so)
+                                       [0x18b26e6bc2bd] (/tmp/perf-17136.map)
+  #
+
+Trace the next two sched:sched_switch events, four block:*_plug events, the
+next block:*_unplug and the next three net:*dev_queue events, this last one
+with a backtrace of at most 16 entries, system wide:
+
+  # perf trace -e sched:*switch/nr=2/,block:*_plug/nr=4/,block:*_unplug/nr=1/,net:*dev_queue/nr=3,max-stack=16/
+     0.000 :0/0 sched:sched_switch:swapper/2:0 [120] S ==> rcu_sched:10 [120]
+     0.015 rcu_sched/10 sched:sched_switch:rcu_sched:10 [120] R ==> swapper/2:0 [120]
+   254.198 irq/50-iwlwifi/680 net:net_dev_queue:dev=wlp3s0 skbaddr=0xffff93498051f600 len=66
+                                       __dev_queue_xmit ([kernel.kallsyms])
+   273.977 :0/0 net:net_dev_queue:dev=wlp3s0 skbaddr=0xffff93498051f600 len=78
+                                       __dev_queue_xmit ([kernel.kallsyms])
+   274.007 :0/0 net:net_dev_queue:dev=wlp3s0 skbaddr=0xffff93498051ff00 len=78
+                                       __dev_queue_xmit ([kernel.kallsyms])
+  2930.140 kworker/u16:58/2722 block:block_plug:[kworker/u16:58]
+  2930.162 kworker/u16:58/2722 block:block_unplug:[kworker/u16:58] 1
+  4466.094 jbd2/dm-2-8/748 block:block_plug:[jbd2/dm-2-8]
+  8050.123 kworker/u16:30/2694 block:block_plug:[kworker/u16:30]
+  8050.271 kworker/u16:30/2694 block:block_plug:[kworker/u16:30]
+  #
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-script[1]
index e30d20fb482d0a705bc41e49d3d8b63f9da7737f..a0e8c23f91255960ef10af2f7485a8a1f25d8985 100644 (file)
@@ -299,6 +299,11 @@ ifndef NO_BIONIC
   endif
 endif
 
+ifeq ($(feature-get_current_dir_name), 1)
+  CFLAGS += -DHAVE_GET_CURRENT_DIR_NAME
+endif
+
+
 ifdef NO_LIBELF
   NO_DWARF := 1
   NO_DEMANGLE := 1
index 2f3bf025e3050f94252c5ebd0a2ebdd18a5ce819..d95655489f7e17adcd16dcca6a772875f6d1c6b2 100644 (file)
@@ -1,4 +1,5 @@
 include ../scripts/Makefile.include
+include ../scripts/Makefile.arch
 
 # The default target of this Makefile is...
 all:
@@ -385,6 +386,8 @@ export INSTALL SHELL_PATH
 SHELL = $(SHELL_PATH)
 
 linux_uapi_dir := $(srctree)/tools/include/uapi/linux
+asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic
+arch_asm_uapi_dir := $(srctree)/tools/arch/$(SRCARCH)/include/uapi/asm/
 
 beauty_outdir := $(OUTPUT)trace/beauty/generated
 beauty_ioctl_outdir := $(beauty_outdir)/ioctl
@@ -460,6 +463,18 @@ madvise_behavior_tbl := $(srctree)/tools/perf/trace/beauty/madvise_behavior.sh
 $(madvise_behavior_array): $(madvise_hdr_dir)/mman-common.h $(madvise_behavior_tbl)
        $(Q)$(SHELL) '$(madvise_behavior_tbl)' $(madvise_hdr_dir) > $@
 
+mmap_flags_array := $(beauty_outdir)/mmap_flags_array.c
+mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh
+
+$(mmap_flags_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(arch_asm_uapi_dir)/mman.h $(mmap_flags_tbl)
+       $(Q)$(SHELL) '$(mmap_flags_tbl)' $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
+
+mount_flags_array := $(beauty_outdir)/mount_flags_array.c
+mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh
+
+$(mount_flags_array): $(linux_uapi_dir)/fs.h $(mount_flags_tbl)
+       $(Q)$(SHELL) '$(mount_flags_tbl)' $(linux_uapi_dir) > $@
+
 prctl_option_array := $(beauty_outdir)/prctl_option_array.c
 prctl_hdr_dir := $(srctree)/tools/include/uapi/linux/
 prctl_option_tbl := $(srctree)/tools/perf/trace/beauty/prctl_option.sh
@@ -577,6 +592,8 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
        $(socket_ipproto_array) \
        $(vhost_virtio_ioctl_array) \
        $(madvise_behavior_array) \
+       $(mmap_flags_array) \
+       $(mount_flags_array) \
        $(perf_ioctl_array) \
        $(prctl_option_array) \
        $(arch_errno_name_array)
@@ -863,6 +880,8 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
                $(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c \
                $(OUTPUT)pmu-events/pmu-events.c \
                $(OUTPUT)$(madvise_behavior_array) \
+               $(OUTPUT)$(mmap_flags_array) \
+               $(OUTPUT)$(mount_flags_array) \
                $(OUTPUT)$(drm_ioctl_array) \
                $(OUTPUT)$(pkey_alloc_access_rights_array) \
                $(OUTPUT)$(sndrv_ctl_ioctl_array) \
index 2dbb8cade048f76b4b43d88d5d9e27c09e025e0f..c88fd32563ebc013e6f261f072dd57e37444e236 100755 (executable)
@@ -23,7 +23,7 @@ create_table_from_c()
 {
        local sc nr last_sc
 
-       create_table_exe=`mktemp /tmp/create-table-XXXXXX`
+       create_table_exe=`mktemp ${TMPDIR:-/tmp}/create-table-XXXXXX`
 
        {
 
index 7fbca175099ec917ad69b8025c8249ee6c52a6a4..275dea7ff59a092b96561706306a36860955ceb9 100644 (file)
@@ -1,3 +1,5 @@
 ifndef NO_DWARF
 PERF_HAVE_DWARF_REGS := 1
 endif
+
+PERF_HAVE_JITDUMP := 1
diff --git a/tools/perf/arch/sparc/annotate/instructions.c b/tools/perf/arch/sparc/annotate/instructions.c
new file mode 100644 (file)
index 0000000..2614c01
--- /dev/null
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+
+static int is_branch_cond(const char *cond)
+{
+       if (cond[0] == '\0')
+               return 1;
+
+       if (cond[0] == 'a' && cond[1] == '\0')
+               return 1;
+
+       if (cond[0] == 'c' &&
+           (cond[1] == 'c' || cond[1] == 's') &&
+           cond[2] == '\0')
+               return 1;
+
+       if (cond[0] == 'e' &&
+           (cond[1] == '\0' ||
+            (cond[1] == 'q' && cond[2] == '\0')))
+               return 1;
+
+       if (cond[0] == 'g' &&
+           (cond[1] == '\0' ||
+            (cond[1] == 't' && cond[2] == '\0') ||
+            (cond[1] == 'e' && cond[2] == '\0') ||
+            (cond[1] == 'e' && cond[2] == 'u' && cond[3] == '\0')))
+               return 1;
+
+       if (cond[0] == 'l' &&
+           (cond[1] == '\0' ||
+            (cond[1] == 't' && cond[2] == '\0') ||
+            (cond[1] == 'u' && cond[2] == '\0') ||
+            (cond[1] == 'e' && cond[2] == '\0') ||
+            (cond[1] == 'e' && cond[2] == 'u' && cond[3] == '\0')))
+               return 1;
+
+       if (cond[0] == 'n' &&
+           (cond[1] == '\0' ||
+            (cond[1] == 'e' && cond[2] == '\0') ||
+            (cond[1] == 'z' && cond[2] == '\0') ||
+            (cond[1] == 'e' && cond[2] == 'g' && cond[3] == '\0')))
+               return 1;
+
+       if (cond[0] == 'b' &&
+           cond[1] == 'p' &&
+           cond[2] == 'o' &&
+           cond[3] == 's' &&
+           cond[4] == '\0')
+               return 1;
+
+       if (cond[0] == 'v' &&
+           (cond[1] == 'c' || cond[1] == 's') &&
+           cond[2] == '\0')
+               return 1;
+
+       if (cond[0] == 'b' &&
+           cond[1] == 'z' &&
+           cond[2] == '\0')
+               return 1;
+
+       return 0;
+}
+
+static int is_branch_reg_cond(const char *cond)
+{
+       if ((cond[0] == 'n' || cond[0] == 'l') &&
+           cond[1] == 'z' &&
+           cond[2] == '\0')
+               return 1;
+
+       if (cond[0] == 'z' &&
+           cond[1] == '\0')
+               return 1;
+
+       if ((cond[0] == 'g' || cond[0] == 'l') &&
+           cond[1] == 'e' &&
+           cond[2] == 'z' &&
+           cond[3] == '\0')
+               return 1;
+
+       if (cond[0] == 'g' &&
+           cond[1] == 'z' &&
+           cond[2] == '\0')
+               return 1;
+
+       return 0;
+}
+
+static int is_branch_float_cond(const char *cond)
+{
+       if (cond[0] == '\0')
+               return 1;
+
+       if ((cond[0] == 'a' || cond[0] == 'e' ||
+            cond[0] == 'z' || cond[0] == 'g' ||
+            cond[0] == 'l' || cond[0] == 'n' ||
+            cond[0] == 'o' || cond[0] == 'u') &&
+           cond[1] == '\0')
+               return 1;
+
+       if (((cond[0] == 'g' && cond[1] == 'e') ||
+            (cond[0] == 'l' && (cond[1] == 'e' ||
+                                cond[1] == 'g')) ||
+            (cond[0] == 'n' && (cond[1] == 'e' ||
+                                cond[1] == 'z')) ||
+            (cond[0] == 'u' && (cond[1] == 'e' ||
+                                cond[1] == 'g' ||
+                                cond[1] == 'l'))) &&
+           cond[2] == '\0')
+               return 1;
+
+       if (cond[0] == 'u' &&
+           (cond[1] == 'g' || cond[1] == 'l') &&
+           cond[2] == 'e' &&
+           cond[3] == '\0')
+               return 1;
+
+       return 0;
+}
+
+static struct ins_ops *sparc__associate_instruction_ops(struct arch *arch, const char *name)
+{
+       struct ins_ops *ops = NULL;
+
+       if (!strcmp(name, "call") ||
+           !strcmp(name, "jmp") ||
+           !strcmp(name, "jmpl")) {
+               ops = &call_ops;
+       } else if (!strcmp(name, "ret") ||
+                  !strcmp(name, "retl") ||
+                  !strcmp(name, "return")) {
+               ops = &ret_ops;
+       } else if (!strcmp(name, "mov")) {
+               ops = &mov_ops;
+       } else {
+               if (name[0] == 'c' &&
+                   (name[1] == 'w' || name[1] == 'x'))
+                       name += 2;
+
+               if (name[0] == 'b') {
+                       const char *cond = name + 1;
+
+                       if (cond[0] == 'r') {
+                               if (is_branch_reg_cond(cond + 1))
+                                       ops = &jump_ops;
+                       } else if (is_branch_cond(cond)) {
+                               ops = &jump_ops;
+                       }
+               } else if (name[0] == 'f' && name[1] == 'b') {
+                       if (is_branch_float_cond(name + 2))
+                               ops = &jump_ops;
+               }
+       }
+
+       if (ops)
+               arch__associate_ins_ops(arch, name, ops);
+
+       return ops;
+}
+
+static int sparc__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
+{
+       if (!arch->initialized) {
+               arch->initialized = true;
+               arch->associate_instruction_ops = sparc__associate_instruction_ops;
+               arch->objdump.comment_char = '#';
+       }
+
+       return 0;
+}
index 0980dfe3396b188c3dd5692ad673bae137cb357c..488779bc4c8d2f6ed8dbcad69e1de5e477ede138 100644 (file)
@@ -391,7 +391,12 @@ try_again:
                                        ui__warning("%s\n", msg);
                                goto try_again;
                        }
-
+                       if ((errno == EINVAL || errno == EBADF) &&
+                           pos->leader != pos &&
+                           pos->weak_group) {
+                               pos = perf_evlist__reset_weak_group(evlist, pos);
+                               goto try_again;
+                       }
                        rc = -errno;
                        perf_evsel__open_strerror(pos, &opts->target,
                                                  errno, msg, sizeof(msg));
@@ -592,6 +597,9 @@ static void record__init_features(struct record *rec)
        if (!rec->opts.full_auxtrace)
                perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
 
+       if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
+               perf_header__clear_feat(&session->header, HEADER_CLOCKID);
+
        perf_header__clear_feat(&session->header, HEADER_STAT);
 }
 
@@ -897,6 +905,9 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
 
        record__init_features(rec);
 
+       if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
+               session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
+
        if (forks) {
                err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
                                                    argv, data->is_pipe,
@@ -1337,6 +1348,19 @@ static const struct clockid_map clockids[] = {
        CLOCKID_END,
 };
 
+static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
+{
+       struct timespec res;
+
+       *res_ns = 0;
+       if (!clock_getres(clk_id, &res))
+               *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
+       else
+               pr_warning("WARNING: Failed to determine specified clock resolution.\n");
+
+       return 0;
+}
+
 static int parse_clockid(const struct option *opt, const char *str, int unset)
 {
        struct record_opts *opts = (struct record_opts *)opt->value;
@@ -1360,7 +1384,7 @@ static int parse_clockid(const struct option *opt, const char *str, int unset)
 
        /* if its a number, we're done */
        if (sscanf(str, "%d", &opts->clockid) == 1)
-               return 0;
+               return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
 
        /* allow a "CLOCK_" prefix to the name */
        if (!strncasecmp(str, "CLOCK_", 6))
@@ -1369,7 +1393,8 @@ static int parse_clockid(const struct option *opt, const char *str, int unset)
        for (cm = clockids; cm->name; cm++) {
                if (!strcasecmp(str, cm->name)) {
                        opts->clockid = cm->clockid;
-                       return 0;
+                       return get_clockid_res(opts->clockid,
+                                              &opts->clockid_res_ns);
                }
        }
 
index 4da5e32b9e035a97a797836f88a562c3a8206a2e..b5bc85bd0bbea48aec2f745f5b94c42a7d0fec4f 100644 (file)
@@ -44,6 +44,7 @@
 #include <sys/stat.h>
 #include <fcntl.h>
 #include <unistd.h>
+#include <subcmd/pager.h>
 
 #include "sane_ctype.h"
 
@@ -912,7 +913,7 @@ static int grab_bb(u8 *buffer, u64 start, u64 end,
 
 static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
                            struct perf_insn *x, u8 *inbuf, int len,
-                           int insn, FILE *fp)
+                           int insn, FILE *fp, int *total_cycles)
 {
        int printed = fprintf(fp, "\t%016" PRIx64 "\t%-30s\t#%s%s%s%s", ip,
                              dump_insn(x, ip, inbuf, len, NULL),
@@ -921,7 +922,8 @@ static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
                              en->flags.in_tx ? " INTX" : "",
                              en->flags.abort ? " ABORT" : "");
        if (en->flags.cycles) {
-               printed += fprintf(fp, " %d cycles", en->flags.cycles);
+               *total_cycles += en->flags.cycles;
+               printed += fprintf(fp, " %d cycles [%d]", en->flags.cycles, *total_cycles);
                if (insn)
                        printed += fprintf(fp, " %.2f IPC", (float)insn / en->flags.cycles);
        }
@@ -978,6 +980,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
        u8 buffer[MAXBB];
        unsigned off;
        struct symbol *lastsym = NULL;
+       int total_cycles = 0;
 
        if (!(br && br->nr))
                return 0;
@@ -998,7 +1001,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
                printed += ip__fprintf_sym(br->entries[nr - 1].from, thread,
                                           x.cpumode, x.cpu, &lastsym, attr, fp);
                printed += ip__fprintf_jump(br->entries[nr - 1].from, &br->entries[nr - 1],
-                                           &x, buffer, len, 0, fp);
+                                           &x, buffer, len, 0, fp, &total_cycles);
        }
 
        /* Print all blocks */
@@ -1026,7 +1029,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
 
                        printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
                        if (ip == end) {
-                               printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp);
+                               printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp,
+                                                           &total_cycles);
                                break;
                        } else {
                                printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", ip,
@@ -1104,6 +1108,35 @@ out:
        return printed;
 }
 
+static const char *resolve_branch_sym(struct perf_sample *sample,
+                                     struct perf_evsel *evsel,
+                                     struct thread *thread,
+                                     struct addr_location *al,
+                                     u64 *ip)
+{
+       struct addr_location addr_al;
+       struct perf_event_attr *attr = &evsel->attr;
+       const char *name = NULL;
+
+       if (sample->flags & (PERF_IP_FLAG_CALL | PERF_IP_FLAG_TRACE_BEGIN)) {
+               if (sample_addr_correlates_sym(attr)) {
+                       thread__resolve(thread, &addr_al, sample);
+                       if (addr_al.sym)
+                               name = addr_al.sym->name;
+                       else
+                               *ip = sample->addr;
+               } else {
+                       *ip = sample->addr;
+               }
+       } else if (sample->flags & (PERF_IP_FLAG_RETURN | PERF_IP_FLAG_TRACE_END)) {
+               if (al->sym)
+                       name = al->sym->name;
+               else
+                       *ip = sample->ip;
+       }
+       return name;
+}
+
 static int perf_sample__fprintf_callindent(struct perf_sample *sample,
                                           struct perf_evsel *evsel,
                                           struct thread *thread,
@@ -1111,7 +1144,6 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
 {
        struct perf_event_attr *attr = &evsel->attr;
        size_t depth = thread_stack__depth(thread);
-       struct addr_location addr_al;
        const char *name = NULL;
        static int spacing;
        int len = 0;
@@ -1125,22 +1157,7 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
        if (thread->ts && sample->flags & PERF_IP_FLAG_RETURN)
                depth += 1;
 
-       if (sample->flags & (PERF_IP_FLAG_CALL | PERF_IP_FLAG_TRACE_BEGIN)) {
-               if (sample_addr_correlates_sym(attr)) {
-                       thread__resolve(thread, &addr_al, sample);
-                       if (addr_al.sym)
-                               name = addr_al.sym->name;
-                       else
-                               ip = sample->addr;
-               } else {
-                       ip = sample->addr;
-               }
-       } else if (sample->flags & (PERF_IP_FLAG_RETURN | PERF_IP_FLAG_TRACE_END)) {
-               if (al->sym)
-                       name = al->sym->name;
-               else
-                       ip = sample->ip;
-       }
+       name = resolve_branch_sym(sample, evsel, thread, al, &ip);
 
        if (PRINT_FIELD(DSO) && !(PRINT_FIELD(IP) || PRINT_FIELD(ADDR))) {
                dlen += fprintf(fp, "(");
@@ -1646,6 +1663,47 @@ static void perf_sample__fprint_metric(struct perf_script *script,
        }
 }
 
+static bool show_event(struct perf_sample *sample,
+                      struct perf_evsel *evsel,
+                      struct thread *thread,
+                      struct addr_location *al)
+{
+       int depth = thread_stack__depth(thread);
+
+       if (!symbol_conf.graph_function)
+               return true;
+
+       if (thread->filter) {
+               if (depth <= thread->filter_entry_depth) {
+                       thread->filter = false;
+                       return false;
+               }
+               return true;
+       } else {
+               const char *s = symbol_conf.graph_function;
+               u64 ip;
+               const char *name = resolve_branch_sym(sample, evsel, thread, al,
+                               &ip);
+               unsigned nlen;
+
+               if (!name)
+                       return false;
+               nlen = strlen(name);
+               while (*s) {
+                       unsigned len = strcspn(s, ",");
+                       if (nlen == len && !strncmp(name, s, len)) {
+                               thread->filter = true;
+                               thread->filter_entry_depth = depth;
+                               return true;
+                       }
+                       s += len;
+                       if (*s == ',')
+                               s++;
+               }
+               return false;
+       }
+}
+
 static void process_event(struct perf_script *script,
                          struct perf_sample *sample, struct perf_evsel *evsel,
                          struct addr_location *al,
@@ -1660,6 +1718,9 @@ static void process_event(struct perf_script *script,
        if (output[type].fields == 0)
                return;
 
+       if (!show_event(sample, evsel, thread, al))
+               return;
+
        ++es->samples;
 
        perf_sample__fprintf_start(sample, thread, evsel,
@@ -1737,6 +1798,9 @@ static void process_event(struct perf_script *script,
 
        if (PRINT_FIELD(METRIC))
                perf_sample__fprint_metric(script, thread, evsel, sample, fp);
+
+       if (verbose)
+               fflush(fp);
 }
 
 static struct scripting_ops    *scripting_ops;
@@ -3100,6 +3164,44 @@ static int perf_script__process_auxtrace_info(struct perf_session *session,
 #define perf_script__process_auxtrace_info 0
 #endif
 
+static int parse_insn_trace(const struct option *opt __maybe_unused,
+                           const char *str __maybe_unused,
+                           int unset __maybe_unused)
+{
+       parse_output_fields(NULL, "+insn,-event,-period", 0);
+       itrace_parse_synth_opts(opt, "i0ns", 0);
+       nanosecs = true;
+       return 0;
+}
+
+static int parse_xed(const struct option *opt __maybe_unused,
+                    const char *str __maybe_unused,
+                    int unset __maybe_unused)
+{
+       force_pager("xed -F insn: -A -64 | less");
+       return 0;
+}
+
+static int parse_call_trace(const struct option *opt __maybe_unused,
+                           const char *str __maybe_unused,
+                           int unset __maybe_unused)
+{
+       parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent", 0);
+       itrace_parse_synth_opts(opt, "cewp", 0);
+       nanosecs = true;
+       return 0;
+}
+
+static int parse_callret_trace(const struct option *opt __maybe_unused,
+                           const char *str __maybe_unused,
+                           int unset __maybe_unused)
+{
+       parse_output_fields(NULL, "-ip,-addr,-event,-period,+callindent,+flags", 0);
+       itrace_parse_synth_opts(opt, "crewp", 0);
+       nanosecs = true;
+       return 0;
+}
+
 int cmd_script(int argc, const char **argv)
 {
        bool show_full_info = false;
@@ -3109,7 +3211,10 @@ int cmd_script(int argc, const char **argv)
        char *rec_script_path = NULL;
        char *rep_script_path = NULL;
        struct perf_session *session;
-       struct itrace_synth_opts itrace_synth_opts = { .set = false, };
+       struct itrace_synth_opts itrace_synth_opts = {
+               .set = false,
+               .default_no_sample = true,
+       };
        char *script_path = NULL;
        const char **__argv;
        int i, j, err = 0;
@@ -3184,6 +3289,16 @@ int cmd_script(int argc, const char **argv)
                    "system-wide collection from all CPUs"),
        OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
                   "only consider these symbols"),
+       OPT_CALLBACK_OPTARG(0, "insn-trace", &itrace_synth_opts, NULL, NULL,
+                       "Decode instructions from itrace", parse_insn_trace),
+       OPT_CALLBACK_OPTARG(0, "xed", NULL, NULL, NULL,
+                       "Run xed disassembler on output", parse_xed),
+       OPT_CALLBACK_OPTARG(0, "call-trace", &itrace_synth_opts, NULL, NULL,
+                       "Decode calls from from itrace", parse_call_trace),
+       OPT_CALLBACK_OPTARG(0, "call-ret-trace", &itrace_synth_opts, NULL, NULL,
+                       "Decode calls and returns from itrace", parse_callret_trace),
+       OPT_STRING(0, "graph-function", &symbol_conf.graph_function, "symbol[,symbol...]",
+                       "Only print symbols and callees with --call-trace/--call-ret-trace"),
        OPT_STRING(0, "stop-bt", &symbol_conf.bt_stop_list_str, "symbol[,symbol...]",
                   "Stop display of callgraph at these symbols"),
        OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"),
@@ -3417,8 +3532,10 @@ int cmd_script(int argc, const char **argv)
                exit(-1);
        }
 
-       if (!script_name)
+       if (!script_name) {
                setup_pager();
+               use_browser = 0;
+       }
 
        session = perf_session__new(&data, false, &script.tool);
        if (session == NULL)
@@ -3439,7 +3556,8 @@ int cmd_script(int argc, const char **argv)
        script.session = session;
        script__setup_sample_type(&script);
 
-       if (output[PERF_TYPE_HARDWARE].fields & PERF_OUTPUT_CALLINDENT)
+       if ((output[PERF_TYPE_HARDWARE].fields & PERF_OUTPUT_CALLINDENT) ||
+           symbol_conf.graph_function)
                itrace_synth_opts.thread_stack = true;
 
        session->itrace_synth_opts = &itrace_synth_opts;
index b86aba1c8028f0fae6043cefdc96e61adbbafe30..a635abfa77b6a9e38fa7aa994ea4cfd31608f16a 100644 (file)
@@ -383,30 +383,26 @@ static bool perf_evsel__should_store_id(struct perf_evsel *counter)
        return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID;
 }
 
-static struct perf_evsel *perf_evsel__reset_weak_group(struct perf_evsel *evsel)
+static bool is_target_alive(struct target *_target,
+                           struct thread_map *threads)
 {
-       struct perf_evsel *c2, *leader;
-       bool is_open = true;
+       struct stat st;
+       int i;
 
-       leader = evsel->leader;
-       pr_debug("Weak group for %s/%d failed\n",
-                       leader->name, leader->nr_members);
+       if (!target__has_task(_target))
+               return true;
 
-       /*
-        * for_each_group_member doesn't work here because it doesn't
-        * include the first entry.
-        */
-       evlist__for_each_entry(evsel_list, c2) {
-               if (c2 == evsel)
-                       is_open = false;
-               if (c2->leader == leader) {
-                       if (is_open)
-                               perf_evsel__close(c2);
-                       c2->leader = c2;
-                       c2->nr_members = 0;
-               }
+       for (i = 0; i < threads->nr; i++) {
+               char path[PATH_MAX];
+
+               scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(),
+                         threads->map[i].pid);
+
+               if (!stat(path, &st))
+                       return true;
        }
-       return leader;
+
+       return false;
 }
 
 static int __run_perf_stat(int argc, const char **argv, int run_idx)
@@ -455,7 +451,7 @@ try_again:
                        if ((errno == EINVAL || errno == EBADF) &&
                            counter->leader != counter &&
                            counter->weak_group) {
-                               counter = perf_evsel__reset_weak_group(counter);
+                               counter = perf_evlist__reset_weak_group(evsel_list, counter);
                                goto try_again;
                        }
 
@@ -579,6 +575,8 @@ try_again:
                enable_counters();
                while (!done) {
                        nanosleep(&ts, NULL);
+                       if (!is_target_alive(&target, evsel_list->threads))
+                               break;
                        if (timeout)
                                break;
                        if (interval) {
index d21d8751e74910db9639f0b3c450abf42eb223ab..aa0c73e5792404355c5e8c2de048ff8e5da18e39 100644 (file)
@@ -1134,11 +1134,6 @@ static int __cmd_top(struct perf_top *top)
         if (!target__none(&opts->target))
                 perf_evlist__enable(top->evlist);
 
-       /* Wait for a minimal set of events before starting the snapshot */
-       perf_evlist__poll(top->evlist, 100);
-
-       perf_top__mmap_read(top);
-
        ret = -1;
        if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
                                                            display_thread), top)) {
@@ -1156,6 +1151,11 @@ static int __cmd_top(struct perf_top *top)
                }
        }
 
+       /* Wait for a minimal set of events before starting the snapshot */
+       perf_evlist__poll(top->evlist, 100);
+
+       perf_top__mmap_read(top);
+
        while (!done) {
                u64 hits = top->samples;
 
@@ -1257,7 +1257,14 @@ int cmd_top(int argc, const char **argv)
                                .uses_mmap   = true,
                        },
                        .proc_map_timeout    = 500,
-                       .overwrite      = 1,
+                       /*
+                        * FIXME: This will lose PERF_RECORD_MMAP and other metadata
+                        * when we pause, fix that and reenable. Probably using a
+                        * separate evlist with a dummy event, i.e. a non-overwrite
+                        * ring buffer just for metadata events, while PERF_RECORD_SAMPLE
+                        * stays in overwrite mode. -acme
+                        * */
+                       .overwrite      = 0,
                },
                .max_stack           = sysctl__max_stack(),
                .annotation_opts     = annotation__default_options,
@@ -1372,6 +1379,8 @@ int cmd_top(int argc, const char **argv)
                    "Show raw trace event output (do not use print fmt or plugins)"),
        OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
                    "Show entries in a hierarchy"),
+       OPT_BOOLEAN(0, "overwrite", &top.record_opts.overwrite,
+                   "Use a backward ring buffer, default: no"),
        OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
        OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
                        "number of thread to run event synthesize"),
@@ -1420,6 +1429,9 @@ int cmd_top(int argc, const char **argv)
                }
        }
 
+       if (opts->branch_stack && callchain_param.enabled)
+               symbol_conf.show_branchflag_count = true;
+
        sort__mode = SORT_MODE__TOP;
        /* display thread wants entries to be collapsed in a different tree */
        perf_hpp_list.need_collapse = 1;
index 90289f31dd87c774ef882c24b1cdf55da9edcca3..835619476370cc0ae1d43bae705597f819a4c732 100644 (file)
@@ -89,6 +89,8 @@ struct trace {
        u64                     base_time;
        FILE                    *output;
        unsigned long           nr_events;
+       unsigned long           nr_events_printed;
+       unsigned long           max_events;
        struct strlist          *ev_qualifier;
        struct {
                size_t          nr;
@@ -106,6 +108,7 @@ struct trace {
        } stats;
        unsigned int            max_stack;
        unsigned int            min_stack;
+       bool                    raw_augmented_syscalls;
        bool                    not_ev_qualifier;
        bool                    live;
        bool                    full_time;
@@ -612,6 +615,7 @@ static size_t syscall_arg__scnprintf_getrandom_flags(char *bf, size_t size,
 
 struct syscall_arg_fmt {
        size_t     (*scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
+       unsigned long (*mask_val)(struct syscall_arg *arg, unsigned long val);
        void       *parm;
        const char *name;
        bool       show_zero;
@@ -723,6 +727,10 @@ static struct syscall_fmt {
          .arg = { [0] = { .scnprintf = SCA_HEX,        /* addr */ },
                   [2] = { .scnprintf = SCA_MMAP_PROT,  /* prot */ },
                   [3] = { .scnprintf = SCA_MMAP_FLAGS, /* flags */ }, }, },
+       { .name     = "mount",
+         .arg = { [0] = { .scnprintf = SCA_FILENAME, /* dev_name */ },
+                  [3] = { .scnprintf = SCA_MOUNT_FLAGS, /* flags */
+                          .mask_val  = SCAMV_MOUNT_FLAGS, /* flags */ }, }, },
        { .name     = "mprotect",
          .arg = { [0] = { .scnprintf = SCA_HEX,        /* start */ },
                   [2] = { .scnprintf = SCA_MMAP_PROT,  /* prot */ }, }, },
@@ -832,7 +840,8 @@ static struct syscall_fmt {
          .arg = { [2] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
        { .name     = "tkill",
          .arg = { [1] = { .scnprintf = SCA_SIGNUM, /* sig */ }, }, },
-       { .name     = "umount2", .alias = "umount", },
+       { .name     = "umount2", .alias = "umount",
+         .arg = { [0] = { .scnprintf = SCA_FILENAME, /* name */ }, }, },
        { .name     = "uname", .alias = "newuname", },
        { .name     = "unlinkat",
          .arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ }, }, },
@@ -856,6 +865,18 @@ static struct syscall_fmt *syscall_fmt__find(const char *name)
        return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp);
 }
 
+static struct syscall_fmt *syscall_fmt__find_by_alias(const char *alias)
+{
+       int i, nmemb = ARRAY_SIZE(syscall_fmts);
+
+       for (i = 0; i < nmemb; ++i) {
+               if (syscall_fmts[i].alias && strcmp(syscall_fmts[i].alias, alias) == 0)
+                       return &syscall_fmts[i];
+       }
+
+       return NULL;
+}
+
 /*
  * is_exit: is this "exit" or "exit_group"?
  * is_open: is this "open" or "openat"? To associate the fd returned in sys_exit with the pathname in sys_enter.
@@ -1485,6 +1506,19 @@ static size_t syscall__scnprintf_name(struct syscall *sc, char *bf, size_t size,
        return scnprintf(bf, size, "arg%d: ", arg->idx);
 }
 
+/*
+ * Check if the value is in fact zero, i.e. mask whatever needs masking, such
+ * as mount 'flags' argument that needs ignoring some magic flag, see comment
+ * in tools/perf/trace/beauty/mount_flags.c
+ */
+static unsigned long syscall__mask_val(struct syscall *sc, struct syscall_arg *arg, unsigned long val)
+{
+       if (sc->arg_fmt && sc->arg_fmt[arg->idx].mask_val)
+               return sc->arg_fmt[arg->idx].mask_val(arg, val);
+
+       return val;
+}
+
 static size_t syscall__scnprintf_val(struct syscall *sc, char *bf, size_t size,
                                     struct syscall_arg *arg, unsigned long val)
 {
@@ -1533,6 +1567,11 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
                                continue;
 
                        val = syscall_arg__val(&arg, arg.idx);
+                       /*
+                        * Some syscall args need some mask, most don't and
+                        * return val untouched.
+                        */
+                       val = syscall__mask_val(sc, &arg, val);
 
                        /*
                         * Suppress this argument if its value is zero and
@@ -1664,6 +1703,8 @@ static int trace__printf_interrupted_entry(struct trace *trace)
        printed += fprintf(trace->output, "%-70s) ...\n", ttrace->entry_str);
        ttrace->entry_pending = false;
 
+       ++trace->nr_events_printed;
+
        return printed;
 }
 
@@ -1684,13 +1725,28 @@ static int trace__fprintf_sample(struct trace *trace, struct perf_evsel *evsel,
        return printed;
 }
 
-static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size)
+static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, bool raw_augmented)
 {
        void *augmented_args = NULL;
+       /*
+        * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
+        * and there we get all 6 syscall args plus the tracepoint common
+        * fields (sizeof(long)) and the syscall_nr (another long). So we check
+        * if that is the case and if so don't look after the sc->args_size,
+        * but always after the full raw_syscalls:sys_enter payload, which is
+        * fixed.
+        *
+        * We'll revisit this later to pass s->args_size to the BPF augmenter
+        * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
+        * copies only what we need for each syscall, like what happens when we
+        * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
+        * traffic to just what is needed for each syscall.
+        */
+       int args_size = raw_augmented ? (8 * (int)sizeof(long)) : sc->args_size;
 
-       *augmented_args_size = sample->raw_size - sc->args_size;
+       *augmented_args_size = sample->raw_size - args_size;
        if (*augmented_args_size > 0)
-               augmented_args = sample->raw_data + sc->args_size;
+               augmented_args = sample->raw_data + args_size;
 
        return augmented_args;
 }
@@ -1740,7 +1796,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
         * here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
         */
        if (evsel != trace->syscalls.events.sys_enter)
-               augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size);
+               augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
        ttrace->entry_time = sample->time;
        msg = ttrace->entry_str;
        printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
@@ -1793,7 +1849,7 @@ static int trace__fprintf_sys_enter(struct trace *trace, struct perf_evsel *evse
                goto out_put;
 
        args = perf_evsel__sc_tp_ptr(evsel, args, sample);
-       augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size);
+       augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
        syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
        fprintf(trace->output, "%s", msg);
        err = 0;
@@ -1810,12 +1866,14 @@ static int trace__resolve_callchain(struct trace *trace, struct perf_evsel *evse
        int max_stack = evsel->attr.sample_max_stack ?
                        evsel->attr.sample_max_stack :
                        trace->max_stack;
+       int err;
 
-       if (machine__resolve(trace->host, &al, sample) < 0 ||
-           thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack))
+       if (machine__resolve(trace->host, &al, sample) < 0)
                return -1;
 
-       return 0;
+       err = thread__resolve_callchain(al.thread, cursor, evsel, sample, NULL, NULL, max_stack);
+       addr_location__put(&al);
+       return err;
 }
 
 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample)
@@ -1940,6 +1998,13 @@ errno_print: {
 
        fputc('\n', trace->output);
 
+       /*
+        * We only consider an 'event' for the sake of --max-events a non-filtered
+        * sys_enter + sys_exit and other tracepoint events.
+        */
+       if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
+               interrupted = true;
+
        if (callchain_ret > 0)
                trace__fprintf_callchain(trace, sample);
        else if (callchain_ret < 0)
@@ -2072,14 +2137,25 @@ static void bpf_output__fprintf(struct trace *trace,
 {
        binary__fprintf(sample->raw_data, sample->raw_size, 8,
                        bpf_output__printer, NULL, trace->output);
+       ++trace->nr_events_printed;
 }
 
 static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
                                union perf_event *event __maybe_unused,
                                struct perf_sample *sample)
 {
-       struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
+       struct thread *thread;
        int callchain_ret = 0;
+       /*
+        * Check if we called perf_evsel__disable(evsel) due to, for instance,
+        * this event's max_events having been hit and this is an entry coming
+        * from the ring buffer that we should discard, since the max events
+        * have already been considered/printed.
+        */
+       if (evsel->disabled)
+               return 0;
+
+       thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
 
        if (sample->callchain) {
                callchain_ret = trace__resolve_callchain(trace, evsel, sample, &callchain_cursor);
@@ -2127,6 +2203,12 @@ static int trace__event_handler(struct trace *trace, struct perf_evsel *evsel,
                        event_format__fprintf(evsel->tp_format, sample->cpu,
                                              sample->raw_data, sample->raw_size,
                                              trace->output);
+                       ++trace->nr_events_printed;
+
+                       if (evsel->max_events != ULONG_MAX && ++evsel->nr_events_printed == evsel->max_events) {
+                               perf_evsel__disable(evsel);
+                               perf_evsel__close(evsel);
+                       }
                }
        }
 
@@ -2137,8 +2219,8 @@ newline:
                trace__fprintf_callchain(trace, sample);
        else if (callchain_ret < 0)
                pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
-       thread__put(thread);
 out:
+       thread__put(thread);
        return 0;
 }
 
@@ -2225,6 +2307,8 @@ static int trace__pgfault(struct trace *trace,
                trace__fprintf_callchain(trace, sample);
        else if (callchain_ret < 0)
                pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
+
+       ++trace->nr_events_printed;
 out:
        err = 0;
 out_put:
@@ -2402,6 +2486,9 @@ static void trace__handle_event(struct trace *trace, union perf_event *event, st
                tracepoint_handler handler = evsel->handler;
                handler(trace, evsel, event, sample);
        }
+
+       if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
+               interrupted = true;
 }
 
 static int trace__add_syscall_newtp(struct trace *trace)
@@ -2706,7 +2793,7 @@ next_event:
                int timeout = done ? 100 : -1;
 
                if (!draining && perf_evlist__poll(evlist, timeout) > 0) {
-                       if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP) == 0)
+                       if (perf_evlist__filter_pollfd(evlist, POLLERR | POLLHUP | POLLNVAL) == 0)
                                draining = true;
 
                        goto again;
@@ -3138,6 +3225,7 @@ static int trace__parse_events_option(const struct option *opt, const char *str,
        int len = strlen(str) + 1, err = -1, list, idx;
        char *strace_groups_dir = system_path(STRACE_GROUPS_DIR);
        char group_name[PATH_MAX];
+       struct syscall_fmt *fmt;
 
        if (strace_groups_dir == NULL)
                return -1;
@@ -3155,12 +3243,19 @@ static int trace__parse_events_option(const struct option *opt, const char *str,
                if (syscalltbl__id(trace->sctbl, s) >= 0 ||
                    syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
                        list = 1;
+                       goto do_concat;
+               }
+
+               fmt = syscall_fmt__find_by_alias(s);
+               if (fmt != NULL) {
+                       list = 1;
+                       s = fmt->name;
                } else {
                        path__join(group_name, sizeof(group_name), strace_groups_dir, s);
                        if (access(group_name, R_OK) == 0)
                                list = 1;
                }
-
+do_concat:
                if (lists[list]) {
                        sprintf(lists[list] + strlen(lists[list]), ",%s", s);
                } else {
@@ -3249,6 +3344,7 @@ int cmd_trace(int argc, const char **argv)
                .trace_syscalls = false,
                .kernel_syscallchains = false,
                .max_stack = UINT_MAX,
+               .max_events = ULONG_MAX,
        };
        const char *output_name = NULL;
        const struct option trace_options[] = {
@@ -3301,6 +3397,8 @@ int cmd_trace(int argc, const char **argv)
                     &record_parse_callchain_opt),
        OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
                    "Show the kernel callchains on the syscall exit path"),
+       OPT_ULONG(0, "max-events", &trace.max_events,
+               "Set the maximum number of events to print, exit after that is reached. "),
        OPT_UINTEGER(0, "min-stack", &trace.min_stack,
                     "Set the minimum stack depth when parsing the callchain, "
                     "anything below the specified depth will be ignored."),
@@ -3419,7 +3517,15 @@ int cmd_trace(int argc, const char **argv)
                evsel->handler = trace__sys_enter;
 
                evlist__for_each_entry(trace.evlist, evsel) {
+                       bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
+
+                       if (raw_syscalls_sys_exit) {
+                               trace.raw_augmented_syscalls = true;
+                               goto init_augmented_syscall_tp;
+                       }
+
                        if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
+init_augmented_syscall_tp:
                                perf_evsel__init_augmented_syscall_tp(evsel);
                                perf_evsel__init_augmented_syscall_tp_ret(evsel);
                                evsel->handler = trace__sys_exit;
index c72cc73a6b09a7c008eec2e19fda38e8924c5d0f..9531f7bd7d9bd9e114fd57840e4f8fb87a642610 100755 (executable)
@@ -5,6 +5,7 @@ HEADERS='
 include/uapi/drm/drm.h
 include/uapi/drm/i915_drm.h
 include/uapi/linux/fcntl.h
+include/uapi/linux/fs.h
 include/uapi/linux/kcmp.h
 include/uapi/linux/kvm.h
 include/uapi/linux/in.h
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
new file mode 100644 (file)
index 0000000..90a1933
--- /dev/null
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Augment the raw_syscalls tracepoints with the contents of the pointer arguments.
+ *
+ * Test it with:
+ *
+ * perf trace -e tools/perf/examples/bpf/augmented_raw_syscalls.c cat /etc/passwd > /dev/null
+ *
+ * This exactly matches what is marshalled into the raw_syscall:sys_enter
+ * payload expected by the 'perf trace' beautifiers.
+ *
+ * For now it just uses the existing tracepoint augmentation code in 'perf
+ * trace', in the next csets we'll hook up these with the sys_enter/sys_exit
+ * code that will combine entry/exit in a strace like way.
+ */
+
+#include <stdio.h>
+#include <linux/socket.h>
+
+/* bpf-output associated map */
+struct bpf_map SEC("maps") __augmented_syscalls__ = {
+       .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+       .key_size = sizeof(int),
+       .value_size = sizeof(u32),
+       .max_entries = __NR_CPUS__,
+};
+
+struct syscall_enter_args {
+       unsigned long long common_tp_fields;
+       long               syscall_nr;
+       unsigned long      args[6];
+};
+
+struct syscall_exit_args {
+       unsigned long long common_tp_fields;
+       long               syscall_nr;
+       long               ret;
+};
+
+struct augmented_filename {
+       unsigned int    size;
+       int             reserved;
+       char            value[256];
+};
+
+#define SYS_OPEN 2
+#define SYS_OPENAT 257
+
+SEC("raw_syscalls:sys_enter")
+int sys_enter(struct syscall_enter_args *args)
+{
+       struct {
+               struct syscall_enter_args args;
+               struct augmented_filename filename;
+       } augmented_args;
+       unsigned int len = sizeof(augmented_args);
+       const void *filename_arg = NULL;
+
+       probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
+       /*
+        * Yonghong and Edward Cree sayz:
+        *
+        * https://www.spinics.net/lists/netdev/msg531645.html
+        *
+        * >>   R0=inv(id=0) R1=inv2 R6=ctx(id=0,off=0,imm=0) R7=inv64 R10=fp0,call_-1
+        * >> 10: (bf) r1 = r6
+        * >> 11: (07) r1 += 16
+        * >> 12: (05) goto pc+2
+        * >> 15: (79) r3 = *(u64 *)(r1 +0)
+        * >> dereference of modified ctx ptr R1 off=16 disallowed
+        * > Aha, we at least got a different error message this time.
+        * > And indeed llvm has done that optimisation, rather than the more obvious
+        * > 11: r3 = *(u64 *)(r1 +16)
+        * > because it wants to have lots of reads share a single insn.  You may be able
+        * > to defeat that optimisation by adding compiler barriers, idk.  Maybe someone
+        * > with llvm knowledge can figure out how to stop it (ideally, llvm would know
+        * > when it's generating for bpf backend and not do that).  -O0?  Â¯\_(ツ)_/¯
+        *
+        * The optimization mostly likes below:
+        *
+        *      br1:
+        *      ...
+        *      r1 += 16
+        *      goto merge
+        *      br2:
+        *      ...
+        *      r1 += 20
+        *      goto merge
+        *      merge:
+        *      *(u64 *)(r1 + 0)
+        *
+        * The compiler tries to merge common loads. There is no easy way to
+        * stop this compiler optimization without turning off a lot of other
+        * optimizations. The easiest way is to add barriers:
+        *
+        *       __asm__ __volatile__("": : :"memory")
+        *
+        *       after the ctx memory access to prevent their down stream merging.
+        */
+       switch (augmented_args.args.syscall_nr) {
+       case SYS_OPEN:   filename_arg = (const void *)args->args[0];
+                       __asm__ __volatile__("": : :"memory");
+                        break;
+       case SYS_OPENAT: filename_arg = (const void *)args->args[1];
+                        break;
+       }
+
+       if (filename_arg != NULL) {
+               augmented_args.filename.reserved = 0;
+               augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
+                                                             sizeof(augmented_args.filename.value),
+                                                             filename_arg);
+               if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
+                       len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
+                       len &= sizeof(augmented_args.filename.value) - 1;
+               }
+       } else {
+               len = sizeof(augmented_args.args);
+       }
+
+       perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
+       return 0;
+}
+
+SEC("raw_syscalls:sys_exit")
+int sys_exit(struct syscall_exit_args *args)
+{
+       return 1; /* 0 as soon as we start copying data returned by the kernel, e.g. 'read' */
+}
+
+license(GPL);
index ac1bcdc17dae7554f51a780b843605c441c6abbf..f7eb63cbbc655bdcebbd710ed38ce39a907db4bc 100644 (file)
@@ -125,7 +125,7 @@ perf_get_timestamp(void)
 }
 
 static int
-debug_cache_init(void)
+create_jit_cache_dir(void)
 {
        char str[32];
        char *base, *p;
@@ -144,8 +144,13 @@ debug_cache_init(void)
 
        strftime(str, sizeof(str), JIT_LANG"-jit-%Y%m%d", &tm);
 
-       snprintf(jit_path, PATH_MAX - 1, "%s/.debug/", base);
-
+       ret = snprintf(jit_path, PATH_MAX, "%s/.debug/", base);
+       if (ret >= PATH_MAX) {
+               warnx("jvmti: cannot generate jit cache dir because %s/.debug/"
+                       " is too long, please check the cwd, JITDUMPDIR, and"
+                       " HOME variables", base);
+               return -1;
+       }
        ret = mkdir(jit_path, 0755);
        if (ret == -1) {
                if (errno != EEXIST) {
@@ -154,20 +159,32 @@ debug_cache_init(void)
                }
        }
 
-       snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit", base);
+       ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit", base);
+       if (ret >= PATH_MAX) {
+               warnx("jvmti: cannot generate jit cache dir because"
+                       " %s/.debug/jit is too long, please check the cwd,"
+                       " JITDUMPDIR, and HOME variables", base);
+               return -1;
+       }
        ret = mkdir(jit_path, 0755);
        if (ret == -1) {
                if (errno != EEXIST) {
-                       warn("cannot create jit cache dir %s", jit_path);
+                       warn("jvmti: cannot create jit cache dir %s", jit_path);
                        return -1;
                }
        }
 
-       snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit/%s.XXXXXXXX", base, str);
-
+       ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit/%s.XXXXXXXX", base, str);
+       if (ret >= PATH_MAX) {
+               warnx("jvmti: cannot generate jit cache dir because"
+                       " %s/.debug/jit/%s.XXXXXXXX is too long, please check"
+                       " the cwd, JITDUMPDIR, and HOME variables",
+                       base, str);
+               return -1;
+       }
        p = mkdtemp(jit_path);
        if (p != jit_path) {
-               warn("cannot create jit cache dir %s", jit_path);
+               warn("jvmti: cannot create jit cache dir %s", jit_path);
                return -1;
        }
 
@@ -228,7 +245,7 @@ void *jvmti_open(void)
 {
        char dump_path[PATH_MAX];
        struct jitheader header;
-       int fd;
+       int fd, ret;
        FILE *fp;
 
        init_arch_timestamp();
@@ -245,12 +262,22 @@ void *jvmti_open(void)
 
        memset(&header, 0, sizeof(header));
 
-       debug_cache_init();
+       /*
+        * jitdump file dir
+        */
+       if (create_jit_cache_dir() < 0)
+               return NULL;
 
        /*
         * jitdump file name
         */
-       scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+       ret = snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+       if (ret >= PATH_MAX) {
+               warnx("jvmti: cannot generate jitdump file full path because"
+                       " %s/jit-%i.dump is too long, please check the cwd,"
+                       " JITDUMPDIR, and HOME variables", jit_path, getpid());
+               return NULL;
+       }
 
        fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
        if (fd == -1)
index 21bf7f5a3cf51a1a42e3169daa738c8e7e0a8d83..0ed4a34c74c4bc6d2c38457c05a59dd9f6ea8551 100644 (file)
@@ -81,6 +81,7 @@ struct record_opts {
        unsigned     initial_delay;
        bool         use_clockid;
        clockid_t    clockid;
+       u64          clockid_res_ns;
        unsigned int proc_map_timeout;
 };
 
diff --git a/tools/perf/scripts/python/call-graph-from-sql.py b/tools/perf/scripts/python/call-graph-from-sql.py
deleted file mode 100644 (file)
index b494a67..0000000
+++ /dev/null
@@ -1,339 +0,0 @@
-#!/usr/bin/python2
-# call-graph-from-sql.py: create call-graph from sql database
-# Copyright (c) 2014-2017, Intel Corporation.
-#
-# This program is free software; you can redistribute it and/or modify it
-# under the terms and conditions of the GNU General Public License,
-# version 2, as published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
-# more details.
-
-# To use this script you will need to have exported data using either the
-# export-to-sqlite.py or the export-to-postgresql.py script.  Refer to those
-# scripts for details.
-#
-# Following on from the example in the export scripts, a
-# call-graph can be displayed for the pt_example database like this:
-#
-#      python tools/perf/scripts/python/call-graph-from-sql.py pt_example
-#
-# Note that for PostgreSQL, this script supports connecting to remote databases
-# by setting hostname, port, username, password, and dbname e.g.
-#
-#      python tools/perf/scripts/python/call-graph-from-sql.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
-#
-# The result is a GUI window with a tree representing a context-sensitive
-# call-graph.  Expanding a couple of levels of the tree and adjusting column
-# widths to suit will display something like:
-#
-#                                         Call Graph: pt_example
-# Call Path                          Object      Count   Time(ns)  Time(%)  Branch Count   Branch Count(%)
-# v- ls
-#     v- 2638:2638
-#         v- _start                  ld-2.19.so    1     10074071   100.0         211135            100.0
-#           |- unknown               unknown       1        13198     0.1              1              0.0
-#           >- _dl_start             ld-2.19.so    1      1400980    13.9          19637              9.3
-#           >- _d_linit_internal     ld-2.19.so    1       448152     4.4          11094              5.3
-#           v-__libc_start_main@plt  ls            1      8211741    81.5         180397             85.4
-#              >- _dl_fixup          ld-2.19.so    1         7607     0.1            108              0.1
-#              >- __cxa_atexit       libc-2.19.so  1        11737     0.1             10              0.0
-#              >- __libc_csu_init    ls            1        10354     0.1             10              0.0
-#              |- _setjmp            libc-2.19.so  1            0     0.0              4              0.0
-#              v- main               ls            1      8182043    99.6         180254             99.9
-#
-# Points to note:
-#      The top level is a command name (comm)
-#      The next level is a thread (pid:tid)
-#      Subsequent levels are functions
-#      'Count' is the number of calls
-#      'Time' is the elapsed time until the function returns
-#      Percentages are relative to the level above
-#      'Branch Count' is the total number of branches for that function and all
-#       functions that it calls
-
-import sys
-from PySide.QtCore import *
-from PySide.QtGui import *
-from PySide.QtSql import *
-from decimal import *
-
-class TreeItem():
-
-       def __init__(self, db, row, parent_item):
-               self.db = db
-               self.row = row
-               self.parent_item = parent_item
-               self.query_done = False;
-               self.child_count = 0
-               self.child_items = []
-               self.data = ["", "", "", "", "", "", ""]
-               self.comm_id = 0
-               self.thread_id = 0
-               self.call_path_id = 1
-               self.branch_count = 0
-               self.time = 0
-               if not parent_item:
-                       self.setUpRoot()
-
-       def setUpRoot(self):
-               self.query_done = True
-               query = QSqlQuery(self.db)
-               ret = query.exec_('SELECT id, comm FROM comms')
-               if not ret:
-                       raise Exception("Query failed: " + query.lastError().text())
-               while query.next():
-                       if not query.value(0):
-                               continue
-                       child_item = TreeItem(self.db, self.child_count, self)
-                       self.child_items.append(child_item)
-                       self.child_count += 1
-                       child_item.setUpLevel1(query.value(0), query.value(1))
-
-       def setUpLevel1(self, comm_id, comm):
-               self.query_done = True;
-               self.comm_id = comm_id
-               self.data[0] = comm
-               self.child_items = []
-               self.child_count = 0
-               query = QSqlQuery(self.db)
-               ret = query.exec_('SELECT thread_id, ( SELECT pid FROM threads WHERE id = thread_id ), ( SELECT tid FROM threads WHERE id = thread_id ) FROM comm_threads WHERE comm_id = ' + str(comm_id))
-               if not ret:
-                       raise Exception("Query failed: " + query.lastError().text())
-               while query.next():
-                       child_item = TreeItem(self.db, self.child_count, self)
-                       self.child_items.append(child_item)
-                       self.child_count += 1
-                       child_item.setUpLevel2(comm_id, query.value(0), query.value(1), query.value(2))
-
-       def setUpLevel2(self, comm_id, thread_id, pid, tid):
-               self.comm_id = comm_id
-               self.thread_id = thread_id
-               self.data[0] = str(pid) + ":" + str(tid)
-
-       def getChildItem(self, row):
-               return self.child_items[row]
-
-       def getParentItem(self):
-               return self.parent_item
-
-       def getRow(self):
-               return self.row
-
-       def timePercent(self, b):
-               if not self.time:
-                       return "0.0"
-               x = (b * Decimal(100)) / self.time
-               return str(x.quantize(Decimal('.1'), rounding=ROUND_HALF_UP))
-
-       def branchPercent(self, b):
-               if not self.branch_count:
-                       return "0.0"
-               x = (b * Decimal(100)) / self.branch_count
-               return str(x.quantize(Decimal('.1'), rounding=ROUND_HALF_UP))
-
-       def addChild(self, call_path_id, name, dso, count, time, branch_count):
-               child_item = TreeItem(self.db, self.child_count, self)
-               child_item.comm_id = self.comm_id
-               child_item.thread_id = self.thread_id
-               child_item.call_path_id = call_path_id
-               child_item.branch_count = branch_count
-               child_item.time = time
-               child_item.data[0] = name
-               if dso == "[kernel.kallsyms]":
-                       dso = "[kernel]"
-               child_item.data[1] = dso
-               child_item.data[2] = str(count)
-               child_item.data[3] = str(time)
-               child_item.data[4] = self.timePercent(time)
-               child_item.data[5] = str(branch_count)
-               child_item.data[6] = self.branchPercent(branch_count)
-               self.child_items.append(child_item)
-               self.child_count += 1
-
-       def selectCalls(self):
-               self.query_done = True;
-               query = QSqlQuery(self.db)
-               ret = query.exec_('SELECT id, call_path_id, branch_count, call_time, return_time, '
-                                 '( SELECT name FROM symbols WHERE id = ( SELECT symbol_id FROM call_paths WHERE id = call_path_id ) ), '
-                                 '( SELECT short_name FROM dsos WHERE id = ( SELECT dso_id FROM symbols WHERE id = ( SELECT symbol_id FROM call_paths WHERE id = call_path_id ) ) ), '
-                                 '( SELECT ip FROM call_paths where id = call_path_id ) '
-                                 'FROM calls WHERE parent_call_path_id = ' + str(self.call_path_id) + ' AND comm_id = ' + str(self.comm_id) + ' AND thread_id = ' + str(self.thread_id) +
-                                 ' ORDER BY call_path_id')
-               if not ret:
-                       raise Exception("Query failed: " + query.lastError().text())
-               last_call_path_id = 0
-               name = ""
-               dso = ""
-               count = 0
-               branch_count = 0
-               total_branch_count = 0
-               time = 0
-               total_time = 0
-               while query.next():
-                       if query.value(1) == last_call_path_id:
-                               count += 1
-                               branch_count += query.value(2)
-                               time += query.value(4) - query.value(3)
-                       else:
-                               if count:
-                                       self.addChild(last_call_path_id, name, dso, count, time, branch_count)
-                               last_call_path_id = query.value(1)
-                               name = query.value(5)
-                               dso = query.value(6)
-                               count = 1
-                               total_branch_count += branch_count
-                               total_time += time
-                               branch_count = query.value(2)
-                               time = query.value(4) - query.value(3)
-               if count:
-                       self.addChild(last_call_path_id, name, dso, count, time, branch_count)
-               total_branch_count += branch_count
-               total_time += time
-               # Top level does not have time or branch count, so fix that here
-               if total_branch_count > self.branch_count:
-                       self.branch_count = total_branch_count
-                       if self.branch_count:
-                               for child_item in self.child_items:
-                                       child_item.data[6] = self.branchPercent(child_item.branch_count)
-               if total_time > self.time:
-                       self.time = total_time
-                       if self.time:
-                               for child_item in self.child_items:
-                                       child_item.data[4] = self.timePercent(child_item.time)
-
-       def childCount(self):
-               if not self.query_done:
-                       self.selectCalls()
-               return self.child_count
-
-       def columnCount(self):
-               return 7
-
-       def columnHeader(self, column):
-               headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
-               return headers[column]
-
-       def getData(self, column):
-               return self.data[column]
-
-class TreeModel(QAbstractItemModel):
-
-       def __init__(self, db, parent=None):
-               super(TreeModel, self).__init__(parent)
-               self.db = db
-               self.root = TreeItem(db, 0, None)
-
-       def columnCount(self, parent):
-               return self.root.columnCount()
-
-       def rowCount(self, parent):
-               if parent.isValid():
-                       parent_item = parent.internalPointer()
-               else:
-                       parent_item = self.root
-               return parent_item.childCount()
-
-       def headerData(self, section, orientation, role):
-               if role == Qt.TextAlignmentRole:
-                       if section > 1:
-                               return Qt.AlignRight
-               if role != Qt.DisplayRole:
-                       return None
-               if orientation != Qt.Horizontal:
-                       return None
-               return self.root.columnHeader(section)
-
-       def parent(self, child):
-               child_item = child.internalPointer()
-               if child_item is self.root:
-                       return QModelIndex()
-               parent_item = child_item.getParentItem()
-               return self.createIndex(parent_item.getRow(), 0, parent_item)
-
-       def index(self, row, column, parent):
-               if parent.isValid():
-                       parent_item = parent.internalPointer()
-               else:
-                       parent_item = self.root
-               child_item = parent_item.getChildItem(row)
-               return self.createIndex(row, column, child_item)
-
-       def data(self, index, role):
-               if role == Qt.TextAlignmentRole:
-                       if index.column() > 1:
-                               return Qt.AlignRight
-               if role != Qt.DisplayRole:
-                       return None
-               index_item = index.internalPointer()
-               return index_item.getData(index.column())
-
-class MainWindow(QMainWindow):
-
-       def __init__(self, db, dbname, parent=None):
-               super(MainWindow, self).__init__(parent)
-
-               self.setObjectName("MainWindow")
-               self.setWindowTitle("Call Graph: " + dbname)
-               self.move(100, 100)
-               self.resize(800, 600)
-               style = self.style()
-               icon = style.standardIcon(QStyle.SP_MessageBoxInformation)
-               self.setWindowIcon(icon);
-
-               self.model = TreeModel(db)
-
-               self.view = QTreeView()
-               self.view.setModel(self.model)
-
-               self.setCentralWidget(self.view)
-
-if __name__ == '__main__':
-       if (len(sys.argv) < 2):
-               print >> sys.stderr, "Usage is: call-graph-from-sql.py <database name>"
-               raise Exception("Too few arguments")
-
-       dbname = sys.argv[1]
-
-       is_sqlite3 = False
-       try:
-               f = open(dbname)
-               if f.read(15) == "SQLite format 3":
-                       is_sqlite3 = True
-               f.close()
-       except:
-               pass
-
-       if is_sqlite3:
-               db = QSqlDatabase.addDatabase('QSQLITE')
-       else:
-               db = QSqlDatabase.addDatabase('QPSQL')
-               opts = dbname.split()
-               for opt in opts:
-                       if '=' in opt:
-                               opt = opt.split('=')
-                               if opt[0] == 'hostname':
-                                       db.setHostName(opt[1])
-                               elif opt[0] == 'port':
-                                       db.setPort(int(opt[1]))
-                               elif opt[0] == 'username':
-                                       db.setUserName(opt[1])
-                               elif opt[0] == 'password':
-                                       db.setPassword(opt[1])
-                               elif opt[0] == 'dbname':
-                                       dbname = opt[1]
-                       else:
-                               dbname = opt
-
-       db.setDatabaseName(dbname)
-       if not db.open():
-               raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
-
-       app = QApplication(sys.argv)
-       window = MainWindow(db, dbname)
-       window.show()
-       err = app.exec_()
-       db.close()
-       sys.exit(err)
index e46f51b1751310a5263283fa9a9b5f9227ed1e52..0564dd7377f22f098d98a254bc028949628dd824 100644 (file)
@@ -59,7 +59,7 @@ import datetime
 #      pt_example=# \q
 #
 # An example of using the database is provided by the script
-# call-graph-from-sql.py.  Refer to that script for details.
+# exported-sql-viewer.py.  Refer to that script for details.
 #
 # Tables:
 #
index e4bb82c8aba9e835ea4712e5c00d43443e1d9a9a..245caf2643ed1c4548549be6a48426e1830ab5e1 100644 (file)
@@ -40,7 +40,7 @@ import datetime
 #      sqlite> .quit
 #
 # An example of using the database is provided by the script
-# call-graph-from-sql.py.  Refer to that script for details.
+# exported-sql-viewer.py.  Refer to that script for details.
 #
 # The database structure is practically the same as created by the script
 # export-to-postgresql.py. Refer to that script for details.  A notable
diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
new file mode 100755 (executable)
index 0000000..f278ce5
--- /dev/null
@@ -0,0 +1,2615 @@
+#!/usr/bin/python2
+# SPDX-License-Identifier: GPL-2.0
+# exported-sql-viewer.py: view data from sql database
+# Copyright (c) 2014-2018, Intel Corporation.
+
+# To use this script you will need to have exported data using either the
+# export-to-sqlite.py or the export-to-postgresql.py script.  Refer to those
+# scripts for details.
+#
+# Following on from the example in the export scripts, a
+# call-graph can be displayed for the pt_example database like this:
+#
+#      python tools/perf/scripts/python/exported-sql-viewer.py pt_example
+#
+# Note that for PostgreSQL, this script supports connecting to remote databases
+# by setting hostname, port, username, password, and dbname e.g.
+#
+#      python tools/perf/scripts/python/exported-sql-viewer.py "hostname=myhost username=myuser password=mypassword dbname=pt_example"
+#
+# The result is a GUI window with a tree representing a context-sensitive
+# call-graph.  Expanding a couple of levels of the tree and adjusting column
+# widths to suit will display something like:
+#
+#                                         Call Graph: pt_example
+# Call Path                          Object      Count   Time(ns)  Time(%)  Branch Count   Branch Count(%)
+# v- ls
+#     v- 2638:2638
+#         v- _start                  ld-2.19.so    1     10074071   100.0         211135            100.0
+#           |- unknown               unknown       1        13198     0.1              1              0.0
+#           >- _dl_start             ld-2.19.so    1      1400980    13.9          19637              9.3
+#           >- _d_linit_internal     ld-2.19.so    1       448152     4.4          11094              5.3
+#           v-__libc_start_main@plt  ls            1      8211741    81.5         180397             85.4
+#              >- _dl_fixup          ld-2.19.so    1         7607     0.1            108              0.1
+#              >- __cxa_atexit       libc-2.19.so  1        11737     0.1             10              0.0
+#              >- __libc_csu_init    ls            1        10354     0.1             10              0.0
+#              |- _setjmp            libc-2.19.so  1            0     0.0              4              0.0
+#              v- main               ls            1      8182043    99.6         180254             99.9
+#
+# Points to note:
+#      The top level is a command name (comm)
+#      The next level is a thread (pid:tid)
+#      Subsequent levels are functions
+#      'Count' is the number of calls
+#      'Time' is the elapsed time until the function returns
+#      Percentages are relative to the level above
+#      'Branch Count' is the total number of branches for that function and all
+#       functions that it calls
+
+# There is also a "All branches" report, which displays branches and
+# possibly disassembly.  However, presently, the only supported disassembler is
+# Intel XED, and additionally the object code must be present in perf build ID
+# cache. To use Intel XED, libxed.so must be present. To build and install
+# libxed.so:
+#            git clone https://github.com/intelxed/mbuild.git mbuild
+#            git clone https://github.com/intelxed/xed
+#            cd xed
+#            ./mfile.py --share
+#            sudo ./mfile.py --prefix=/usr/local install
+#            sudo ldconfig
+#
+# Example report:
+#
+# Time           CPU  Command  PID    TID    Branch Type            In Tx  Branch
+# 8107675239590  2    ls       22011  22011  return from interrupt  No     ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
+#                                                                              7fab593ea260 48 89 e7                                        mov %rsp, %rdi
+# 8107675239899  2    ls       22011  22011  hardware interrupt     No         7fab593ea260 _start (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
+# 8107675241900  2    ls       22011  22011  return from interrupt  No     ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea260 _start (ld-2.19.so)
+#                                                                              7fab593ea260 48 89 e7                                        mov %rsp, %rdi
+#                                                                              7fab593ea263 e8 c8 06 00 00                                  callq  0x7fab593ea930
+# 8107675241900  2    ls       22011  22011  call                   No         7fab593ea263 _start+0x3 (ld-2.19.so) -> 7fab593ea930 _dl_start (ld-2.19.so)
+#                                                                              7fab593ea930 55                                              pushq  %rbp
+#                                                                              7fab593ea931 48 89 e5                                        mov %rsp, %rbp
+#                                                                              7fab593ea934 41 57                                           pushq  %r15
+#                                                                              7fab593ea936 41 56                                           pushq  %r14
+#                                                                              7fab593ea938 41 55                                           pushq  %r13
+#                                                                              7fab593ea93a 41 54                                           pushq  %r12
+#                                                                              7fab593ea93c 53                                              pushq  %rbx
+#                                                                              7fab593ea93d 48 89 fb                                        mov %rdi, %rbx
+#                                                                              7fab593ea940 48 83 ec 68                                     sub $0x68, %rsp
+#                                                                              7fab593ea944 0f 31                                           rdtsc
+#                                                                              7fab593ea946 48 c1 e2 20                                     shl $0x20, %rdx
+#                                                                              7fab593ea94a 89 c0                                           mov %eax, %eax
+#                                                                              7fab593ea94c 48 09 c2                                        or %rax, %rdx
+#                                                                              7fab593ea94f 48 8b 05 1a 15 22 00                            movq  0x22151a(%rip), %rax
+# 8107675242232  2    ls       22011  22011  hardware interrupt     No         7fab593ea94f _dl_start+0x1f (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
+# 8107675242900  2    ls       22011  22011  return from interrupt  No     ffffffff86a00a67 native_irq_return_iret ([kernel]) -> 7fab593ea94f _dl_start+0x1f (ld-2.19.so)
+#                                                                              7fab593ea94f 48 8b 05 1a 15 22 00                            movq  0x22151a(%rip), %rax
+#                                                                              7fab593ea956 48 89 15 3b 13 22 00                            movq  %rdx, 0x22133b(%rip)
+# 8107675243232  2    ls       22011  22011  hardware interrupt     No         7fab593ea956 _dl_start+0x26 (ld-2.19.so) -> ffffffff86a012e0 page_fault ([kernel])
+
+import sys
+import weakref
+import threading
+import string
+import cPickle
+import re
+import os
+from PySide.QtCore import *
+from PySide.QtGui import *
+from PySide.QtSql import *
+from decimal import *
+from ctypes import *
+from multiprocessing import Process, Array, Value, Event
+
+# Data formatting helpers
+
+def tohex(ip):
+       if ip < 0:
+               ip += 1 << 64
+       return "%x" % ip
+
+def offstr(offset):
+       if offset:
+               return "+0x%x" % offset
+       return ""
+
+def dsoname(name):
+       if name == "[kernel.kallsyms]":
+               return "[kernel]"
+       return name
+
+def findnth(s, sub, n, offs=0):
+       pos = s.find(sub)
+       if pos < 0:
+               return pos
+       if n <= 1:
+               return offs + pos
+       return findnth(s[pos + 1:], sub, n - 1, offs + pos + 1)
+
+# Percent to one decimal place
+
+def PercentToOneDP(n, d):
+       if not d:
+               return "0.0"
+       x = (n * Decimal(100)) / d
+       return str(x.quantize(Decimal(".1"), rounding=ROUND_HALF_UP))
+
+# Helper for queries that must not fail
+
+def QueryExec(query, stmt):
+       ret = query.exec_(stmt)
+       if not ret:
+               raise Exception("Query failed: " + query.lastError().text())
+
+# Background thread
+
+class Thread(QThread):
+
+       done = Signal(object)
+
+       def __init__(self, task, param=None, parent=None):
+               super(Thread, self).__init__(parent)
+               self.task = task
+               self.param = param
+
+       def run(self):
+               while True:
+                       if self.param is None:
+                               done, result = self.task()
+                       else:
+                               done, result = self.task(self.param)
+                       self.done.emit(result)
+                       if done:
+                               break
+
+# Tree data model
+
+class TreeModel(QAbstractItemModel):
+
+       def __init__(self, root, parent=None):
+               super(TreeModel, self).__init__(parent)
+               self.root = root
+               self.last_row_read = 0
+
+       def Item(self, parent):
+               if parent.isValid():
+                       return parent.internalPointer()
+               else:
+                       return self.root
+
+       def rowCount(self, parent):
+               result = self.Item(parent).childCount()
+               if result < 0:
+                       result = 0
+                       self.dataChanged.emit(parent, parent)
+               return result
+
+       def hasChildren(self, parent):
+               return self.Item(parent).hasChildren()
+
+       def headerData(self, section, orientation, role):
+               if role == Qt.TextAlignmentRole:
+                       return self.columnAlignment(section)
+               if role != Qt.DisplayRole:
+                       return None
+               if orientation != Qt.Horizontal:
+                       return None
+               return self.columnHeader(section)
+
+       def parent(self, child):
+               child_item = child.internalPointer()
+               if child_item is self.root:
+                       return QModelIndex()
+               parent_item = child_item.getParentItem()
+               return self.createIndex(parent_item.getRow(), 0, parent_item)
+
+       def index(self, row, column, parent):
+               child_item = self.Item(parent).getChildItem(row)
+               return self.createIndex(row, column, child_item)
+
+       def DisplayData(self, item, index):
+               return item.getData(index.column())
+
+       def FetchIfNeeded(self, row):
+               if row > self.last_row_read:
+                       self.last_row_read = row
+                       if row + 10 >= self.root.child_count:
+                               self.fetcher.Fetch(glb_chunk_sz)
+
+       def columnAlignment(self, column):
+               return Qt.AlignLeft
+
+       def columnFont(self, column):
+               return None
+
+       def data(self, index, role):
+               if role == Qt.TextAlignmentRole:
+                       return self.columnAlignment(index.column())
+               if role == Qt.FontRole:
+                       return self.columnFont(index.column())
+               if role != Qt.DisplayRole:
+                       return None
+               item = index.internalPointer()
+               return self.DisplayData(item, index)
+
+# Table data model
+
+class TableModel(QAbstractTableModel):
+
+       def __init__(self, parent=None):
+               super(TableModel, self).__init__(parent)
+               self.child_count = 0
+               self.child_items = []
+               self.last_row_read = 0
+
+       def Item(self, parent):
+               if parent.isValid():
+                       return parent.internalPointer()
+               else:
+                       return self
+
+       def rowCount(self, parent):
+               return self.child_count
+
+       def headerData(self, section, orientation, role):
+               if role == Qt.TextAlignmentRole:
+                       return self.columnAlignment(section)
+               if role != Qt.DisplayRole:
+                       return None
+               if orientation != Qt.Horizontal:
+                       return None
+               return self.columnHeader(section)
+
+       def index(self, row, column, parent):
+               return self.createIndex(row, column, self.child_items[row])
+
+       def DisplayData(self, item, index):
+               return item.getData(index.column())
+
+       def FetchIfNeeded(self, row):
+               if row > self.last_row_read:
+                       self.last_row_read = row
+                       if row + 10 >= self.child_count:
+                               self.fetcher.Fetch(glb_chunk_sz)
+
+       def columnAlignment(self, column):
+               return Qt.AlignLeft
+
+       def columnFont(self, column):
+               return None
+
+       def data(self, index, role):
+               if role == Qt.TextAlignmentRole:
+                       return self.columnAlignment(index.column())
+               if role == Qt.FontRole:
+                       return self.columnFont(index.column())
+               if role != Qt.DisplayRole:
+                       return None
+               item = index.internalPointer()
+               return self.DisplayData(item, index)
+
+# Model cache
+
+model_cache = weakref.WeakValueDictionary()
+model_cache_lock = threading.Lock()
+
+def LookupCreateModel(model_name, create_fn):
+       model_cache_lock.acquire()
+       try:
+               model = model_cache[model_name]
+       except:
+               model = None
+       if model is None:
+               model = create_fn()
+               model_cache[model_name] = model
+       model_cache_lock.release()
+       return model
+
+# Find bar
+
+class FindBar():
+
+       def __init__(self, parent, finder, is_reg_expr=False):
+               self.finder = finder
+               self.context = []
+               self.last_value = None
+               self.last_pattern = None
+
+               label = QLabel("Find:")
+               label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+               self.textbox = QComboBox()
+               self.textbox.setEditable(True)
+               self.textbox.currentIndexChanged.connect(self.ValueChanged)
+
+               self.progress = QProgressBar()
+               self.progress.setRange(0, 0)
+               self.progress.hide()
+
+               if is_reg_expr:
+                       self.pattern = QCheckBox("Regular Expression")
+               else:
+                       self.pattern = QCheckBox("Pattern")
+               self.pattern.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+               self.next_button = QToolButton()
+               self.next_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowDown))
+               self.next_button.released.connect(lambda: self.NextPrev(1))
+
+               self.prev_button = QToolButton()
+               self.prev_button.setIcon(parent.style().standardIcon(QStyle.SP_ArrowUp))
+               self.prev_button.released.connect(lambda: self.NextPrev(-1))
+
+               self.close_button = QToolButton()
+               self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
+               self.close_button.released.connect(self.Deactivate)
+
+               self.hbox = QHBoxLayout()
+               self.hbox.setContentsMargins(0, 0, 0, 0)
+
+               self.hbox.addWidget(label)
+               self.hbox.addWidget(self.textbox)
+               self.hbox.addWidget(self.progress)
+               self.hbox.addWidget(self.pattern)
+               self.hbox.addWidget(self.next_button)
+               self.hbox.addWidget(self.prev_button)
+               self.hbox.addWidget(self.close_button)
+
+               self.bar = QWidget()
+               self.bar.setLayout(self.hbox);
+               self.bar.hide()
+
+       def Widget(self):
+               return self.bar
+
+       def Activate(self):
+               self.bar.show()
+               self.textbox.setFocus()
+
+       def Deactivate(self):
+               self.bar.hide()
+
+       def Busy(self):
+               self.textbox.setEnabled(False)
+               self.pattern.hide()
+               self.next_button.hide()
+               self.prev_button.hide()
+               self.progress.show()
+
+       def Idle(self):
+               self.textbox.setEnabled(True)
+               self.progress.hide()
+               self.pattern.show()
+               self.next_button.show()
+               self.prev_button.show()
+
+       def Find(self, direction):
+               value = self.textbox.currentText()
+               pattern = self.pattern.isChecked()
+               self.last_value = value
+               self.last_pattern = pattern
+               self.finder.Find(value, direction, pattern, self.context)
+
+       def ValueChanged(self):
+               value = self.textbox.currentText()
+               pattern = self.pattern.isChecked()
+               index = self.textbox.currentIndex()
+               data = self.textbox.itemData(index)
+               # Store the pattern in the combo box to keep it with the text value
+               if data == None:
+                       self.textbox.setItemData(index, pattern)
+               else:
+                       self.pattern.setChecked(data)
+               self.Find(0)
+
+       def NextPrev(self, direction):
+               value = self.textbox.currentText()
+               pattern = self.pattern.isChecked()
+               if value != self.last_value:
+                       index = self.textbox.findText(value)
+                       # Allow for a button press before the value has been added to the combo box
+                       if index < 0:
+                               index = self.textbox.count()
+                               self.textbox.addItem(value, pattern)
+                               self.textbox.setCurrentIndex(index)
+                               return
+                       else:
+                               self.textbox.setItemData(index, pattern)
+               elif pattern != self.last_pattern:
+                       # Keep the pattern recorded in the combo box up to date
+                       index = self.textbox.currentIndex()
+                       self.textbox.setItemData(index, pattern)
+               self.Find(direction)
+
+       def NotFound(self):
+               QMessageBox.information(self.bar, "Find", "'" + self.textbox.currentText() + "' not found")
+
+# Context-sensitive call graph data model item base
+
+class CallGraphLevelItemBase(object):
+
+       def __init__(self, glb, row, parent_item):
+               self.glb = glb
+               self.row = row
+               self.parent_item = parent_item
+               self.query_done = False;
+               self.child_count = 0
+               self.child_items = []
+
+       def getChildItem(self, row):
+               return self.child_items[row]
+
+       def getParentItem(self):
+               return self.parent_item
+
+       def getRow(self):
+               return self.row
+
+       def childCount(self):
+               if not self.query_done:
+                       self.Select()
+                       if not self.child_count:
+                               return -1
+               return self.child_count
+
+       def hasChildren(self):
+               if not self.query_done:
+                       return True
+               return self.child_count > 0
+
+       def getData(self, column):
+               return self.data[column]
+
+# Context-sensitive call graph data model level 2+ item base
+
+class CallGraphLevelTwoPlusItemBase(CallGraphLevelItemBase):
+
+       def __init__(self, glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item):
+               super(CallGraphLevelTwoPlusItemBase, self).__init__(glb, row, parent_item)
+               self.comm_id = comm_id
+               self.thread_id = thread_id
+               self.call_path_id = call_path_id
+               self.branch_count = branch_count
+               self.time = time
+
+       def Select(self):
+               self.query_done = True;
+               query = QSqlQuery(self.glb.db)
+               QueryExec(query, "SELECT call_path_id, name, short_name, COUNT(calls.id), SUM(return_time - call_time), SUM(branch_count)"
+                                       " FROM calls"
+                                       " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+                                       " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+                                       " INNER JOIN dsos ON symbols.dso_id = dsos.id"
+                                       " WHERE parent_call_path_id = " + str(self.call_path_id) +
+                                       " AND comm_id = " + str(self.comm_id) +
+                                       " AND thread_id = " + str(self.thread_id) +
+                                       " GROUP BY call_path_id, name, short_name"
+                                       " ORDER BY call_path_id")
+               while query.next():
+                       child_item = CallGraphLevelThreeItem(self.glb, self.child_count, self.comm_id, self.thread_id, query.value(0), query.value(1), query.value(2), query.value(3), int(query.value(4)), int(query.value(5)), self)
+                       self.child_items.append(child_item)
+                       self.child_count += 1
+
+# Context-sensitive call graph data model level three item
+
+class CallGraphLevelThreeItem(CallGraphLevelTwoPlusItemBase):
+
+       def __init__(self, glb, row, comm_id, thread_id, call_path_id, name, dso, count, time, branch_count, parent_item):
+               super(CallGraphLevelThreeItem, self).__init__(glb, row, comm_id, thread_id, call_path_id, time, branch_count, parent_item)
+               dso = dsoname(dso)
+               self.data = [ name, dso, str(count), str(time), PercentToOneDP(time, parent_item.time), str(branch_count), PercentToOneDP(branch_count, parent_item.branch_count) ]
+               self.dbid = call_path_id
+
+# Context-sensitive call graph data model level two item
+
+class CallGraphLevelTwoItem(CallGraphLevelTwoPlusItemBase):
+
+       def __init__(self, glb, row, comm_id, thread_id, pid, tid, parent_item):
+               super(CallGraphLevelTwoItem, self).__init__(glb, row, comm_id, thread_id, 1, 0, 0, parent_item)
+               self.data = [str(pid) + ":" + str(tid), "", "", "", "", "", ""]
+               self.dbid = thread_id
+
+       def Select(self):
+               super(CallGraphLevelTwoItem, self).Select()
+               for child_item in self.child_items:
+                       self.time += child_item.time
+                       self.branch_count += child_item.branch_count
+               for child_item in self.child_items:
+                       child_item.data[4] = PercentToOneDP(child_item.time, self.time)
+                       child_item.data[6] = PercentToOneDP(child_item.branch_count, self.branch_count)
+
+# Context-sensitive call graph data model level one item
+
+class CallGraphLevelOneItem(CallGraphLevelItemBase):
+
+       def __init__(self, glb, row, comm_id, comm, parent_item):
+               super(CallGraphLevelOneItem, self).__init__(glb, row, parent_item)
+               self.data = [comm, "", "", "", "", "", ""]
+               self.dbid = comm_id
+
+       def Select(self):
+               self.query_done = True;
+               query = QSqlQuery(self.glb.db)
+               QueryExec(query, "SELECT thread_id, pid, tid"
+                                       " FROM comm_threads"
+                                       " INNER JOIN threads ON thread_id = threads.id"
+                                       " WHERE comm_id = " + str(self.dbid))
+               while query.next():
+                       child_item = CallGraphLevelTwoItem(self.glb, self.child_count, self.dbid, query.value(0), query.value(1), query.value(2), self)
+                       self.child_items.append(child_item)
+                       self.child_count += 1
+
+# Context-sensitive call graph data model root item
+
+class CallGraphRootItem(CallGraphLevelItemBase):
+
+       def __init__(self, glb):
+               super(CallGraphRootItem, self).__init__(glb, 0, None)
+               self.dbid = 0
+               self.query_done = True;
+               query = QSqlQuery(glb.db)
+               QueryExec(query, "SELECT id, comm FROM comms")
+               while query.next():
+                       if not query.value(0):
+                               continue
+                       child_item = CallGraphLevelOneItem(glb, self.child_count, query.value(0), query.value(1), self)
+                       self.child_items.append(child_item)
+                       self.child_count += 1
+
+# Context-sensitive call graph data model
+
+class CallGraphModel(TreeModel):
+
+       def __init__(self, glb, parent=None):
+               super(CallGraphModel, self).__init__(CallGraphRootItem(glb), parent)
+               self.glb = glb
+
+       def columnCount(self, parent=None):
+               return 7
+
+       def columnHeader(self, column):
+               headers = ["Call Path", "Object", "Count ", "Time (ns) ", "Time (%) ", "Branch Count ", "Branch Count (%) "]
+               return headers[column]
+
+       def columnAlignment(self, column):
+               alignment = [ Qt.AlignLeft, Qt.AlignLeft, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight, Qt.AlignRight ]
+               return alignment[column]
+
+       def FindSelect(self, value, pattern, query):
+               if pattern:
+                       # postgresql and sqlite pattern patching differences:
+                       #   postgresql LIKE is case sensitive but sqlite LIKE is not
+                       #   postgresql LIKE allows % and _ to be escaped with \ but sqlite LIKE does not
+                       #   postgresql supports ILIKE which is case insensitive
+                       #   sqlite supports GLOB (text only) which uses * and ? and is case sensitive
+                       if not self.glb.dbref.is_sqlite3:
+                               # Escape % and _
+                               s = value.replace("%", "\%")
+                               s = s.replace("_", "\_")
+                               # Translate * and ? into SQL LIKE pattern characters % and _
+                               trans = string.maketrans("*?", "%_")
+                               match = " LIKE '" + str(s).translate(trans) + "'"
+                       else:
+                               match = " GLOB '" + str(value) + "'"
+               else:
+                       match = " = '" + str(value) + "'"
+               QueryExec(query, "SELECT call_path_id, comm_id, thread_id"
+                                               " FROM calls"
+                                               " INNER JOIN call_paths ON calls.call_path_id = call_paths.id"
+                                               " INNER JOIN symbols ON call_paths.symbol_id = symbols.id"
+                                               " WHERE symbols.name" + match +
+                                               " GROUP BY comm_id, thread_id, call_path_id"
+                                               " ORDER BY comm_id, thread_id, call_path_id")
+
+       def FindPath(self, query):
+               # Turn the query result into a list of ids that the tree view can walk
+               # to open the tree at the right place.
+               ids = []
+               parent_id = query.value(0)
+               while parent_id:
+                       ids.insert(0, parent_id)
+                       q2 = QSqlQuery(self.glb.db)
+                       QueryExec(q2, "SELECT parent_id"
+                                       " FROM call_paths"
+                                       " WHERE id = " + str(parent_id))
+                       if not q2.next():
+                               break
+                       parent_id = q2.value(0)
+               # The call path root is not used
+               if ids[0] == 1:
+                       del ids[0]
+               ids.insert(0, query.value(2))
+               ids.insert(0, query.value(1))
+               return ids
+
+       def Found(self, query, found):
+               if found:
+                       return self.FindPath(query)
+               return []
+
+       def FindValue(self, value, pattern, query, last_value, last_pattern):
+               if last_value == value and pattern == last_pattern:
+                       found = query.first()
+               else:
+                       self.FindSelect(value, pattern, query)
+                       found = query.next()
+               return self.Found(query, found)
+
+       def FindNext(self, query):
+               found = query.next()
+               if not found:
+                       found = query.first()
+               return self.Found(query, found)
+
+       def FindPrev(self, query):
+               found = query.previous()
+               if not found:
+                       found = query.last()
+               return self.Found(query, found)
+
+       def FindThread(self, c):
+               if c.direction == 0 or c.value != c.last_value or c.pattern != c.last_pattern:
+                       ids = self.FindValue(c.value, c.pattern, c.query, c.last_value, c.last_pattern)
+               elif c.direction > 0:
+                       ids = self.FindNext(c.query)
+               else:
+                       ids = self.FindPrev(c.query)
+               return (True, ids)
+
+       def Find(self, value, direction, pattern, context, callback):
+               class Context():
+                       def __init__(self, *x):
+                               self.value, self.direction, self.pattern, self.query, self.last_value, self.last_pattern = x
+                       def Update(self, *x):
+                               self.value, self.direction, self.pattern, self.last_value, self.last_pattern = x + (self.value, self.pattern)
+               if len(context):
+                       context[0].Update(value, direction, pattern)
+               else:
+                       context.append(Context(value, direction, pattern, QSqlQuery(self.glb.db), None, None))
+               # Use a thread so the UI is not blocked during the SELECT
+               thread = Thread(self.FindThread, context[0])
+               thread.done.connect(lambda ids, t=thread, c=callback: self.FindDone(t, c, ids), Qt.QueuedConnection)
+               thread.start()
+
+       def FindDone(self, thread, callback, ids):
+               callback(ids)
+
+# Vertical widget layout
+
+class VBox():
+
+       def __init__(self, w1, w2, w3=None):
+               self.vbox = QWidget()
+               self.vbox.setLayout(QVBoxLayout());
+
+               self.vbox.layout().setContentsMargins(0, 0, 0, 0)
+
+               self.vbox.layout().addWidget(w1)
+               self.vbox.layout().addWidget(w2)
+               if w3:
+                       self.vbox.layout().addWidget(w3)
+
+       def Widget(self):
+               return self.vbox
+
+# Context-sensitive call graph window
+
+class CallGraphWindow(QMdiSubWindow):
+
+       def __init__(self, glb, parent=None):
+               super(CallGraphWindow, self).__init__(parent)
+
+               self.model = LookupCreateModel("Context-Sensitive Call Graph", lambda x=glb: CallGraphModel(x))
+
+               self.view = QTreeView()
+               self.view.setModel(self.model)
+
+               for c, w in ((0, 250), (1, 100), (2, 60), (3, 70), (4, 70), (5, 100)):
+                       self.view.setColumnWidth(c, w)
+
+               self.find_bar = FindBar(self, self)
+
+               self.vbox = VBox(self.view, self.find_bar.Widget())
+
+               self.setWidget(self.vbox.Widget())
+
+               AddSubWindow(glb.mainwindow.mdi_area, self, "Context-Sensitive Call Graph")
+
+       def DisplayFound(self, ids):
+               if not len(ids):
+                       return False
+               parent = QModelIndex()
+               for dbid in ids:
+                       found = False
+                       n = self.model.rowCount(parent)
+                       for row in xrange(n):
+                               child = self.model.index(row, 0, parent)
+                               if child.internalPointer().dbid == dbid:
+                                       found = True
+                                       self.view.setCurrentIndex(child)
+                                       parent = child
+                                       break
+                       if not found:
+                               break
+               return found
+
+       def Find(self, value, direction, pattern, context):
+               self.view.setFocus()
+               self.find_bar.Busy()
+               self.model.Find(value, direction, pattern, context, self.FindDone)
+
+       def FindDone(self, ids):
+               found = True
+               if not self.DisplayFound(ids):
+                       found = False
+               self.find_bar.Idle()
+               if not found:
+                       self.find_bar.NotFound()
+
+# Child data item  finder
+
+class ChildDataItemFinder():
+
+       def __init__(self, root):
+               self.root = root
+               self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (None,) * 5
+               self.rows = []
+               self.pos = 0
+
+       def FindSelect(self):
+               self.rows = []
+               if self.pattern:
+                       pattern = re.compile(self.value)
+                       for child in self.root.child_items:
+                               for column_data in child.data:
+                                       if re.search(pattern, str(column_data)) is not None:
+                                               self.rows.append(child.row)
+                                               break
+               else:
+                       for child in self.root.child_items:
+                               for column_data in child.data:
+                                       if self.value in str(column_data):
+                                               self.rows.append(child.row)
+                                               break
+
+       def FindValue(self):
+               self.pos = 0
+               if self.last_value != self.value or self.pattern != self.last_pattern:
+                       self.FindSelect()
+               if not len(self.rows):
+                       return -1
+               return self.rows[self.pos]
+
+       def FindThread(self):
+               if self.direction == 0 or self.value != self.last_value or self.pattern != self.last_pattern:
+                       row = self.FindValue()
+               elif len(self.rows):
+                       if self.direction > 0:
+                               self.pos += 1
+                               if self.pos >= len(self.rows):
+                                       self.pos = 0
+                       else:
+                               self.pos -= 1
+                               if self.pos < 0:
+                                       self.pos = len(self.rows) - 1
+                       row = self.rows[self.pos]
+               else:
+                       row = -1
+               return (True, row)
+
+       def Find(self, value, direction, pattern, context, callback):
+               self.value, self.direction, self.pattern, self.last_value, self.last_pattern = (value, direction,pattern, self.value, self.pattern)
+               # Use a thread so the UI is not blocked
+               thread = Thread(self.FindThread)
+               thread.done.connect(lambda row, t=thread, c=callback: self.FindDone(t, c, row), Qt.QueuedConnection)
+               thread.start()
+
+       def FindDone(self, thread, callback, row):
+               callback(row)
+
+# Number of database records to fetch in one go
+
+glb_chunk_sz = 10000
+
+# size of pickled integer big enough for record size
+
+glb_nsz = 8
+
+# Background process for SQL data fetcher
+
+class SQLFetcherProcess():
+
+       def __init__(self, dbref, sql, buffer, head, tail, fetch_count, fetching_done, process_target, wait_event, fetched_event, prep):
+               # Need a unique connection name
+               conn_name = "SQLFetcher" + str(os.getpid())
+               self.db, dbname = dbref.Open(conn_name)
+               self.sql = sql
+               self.buffer = buffer
+               self.head = head
+               self.tail = tail
+               self.fetch_count = fetch_count
+               self.fetching_done = fetching_done
+               self.process_target = process_target
+               self.wait_event = wait_event
+               self.fetched_event = fetched_event
+               self.prep = prep
+               self.query = QSqlQuery(self.db)
+               self.query_limit = 0 if "$$last_id$$" in sql else 2
+               self.last_id = -1
+               self.fetched = 0
+               self.more = True
+               self.local_head = self.head.value
+               self.local_tail = self.tail.value
+
+       def Select(self):
+               if self.query_limit:
+                       if self.query_limit == 1:
+                               return
+                       self.query_limit -= 1
+               stmt = self.sql.replace("$$last_id$$", str(self.last_id))
+               QueryExec(self.query, stmt)
+
+       def Next(self):
+               if not self.query.next():
+                       self.Select()
+                       if not self.query.next():
+                               return None
+               self.last_id = self.query.value(0)
+               return self.prep(self.query)
+
+       def WaitForTarget(self):
+               while True:
+                       self.wait_event.clear()
+                       target = self.process_target.value
+                       if target > self.fetched or target < 0:
+                               break
+                       self.wait_event.wait()
+               return target
+
+       def HasSpace(self, sz):
+               if self.local_tail <= self.local_head:
+                       space = len(self.buffer) - self.local_head
+                       if space > sz:
+                               return True
+                       if space >= glb_nsz:
+                               # Use 0 (or space < glb_nsz) to mean there is no more at the top of the buffer
+                               nd = cPickle.dumps(0, cPickle.HIGHEST_PROTOCOL)
+                               self.buffer[self.local_head : self.local_head + len(nd)] = nd
+                       self.local_head = 0
+               if self.local_tail - self.local_head > sz:
+                       return True
+               return False
+
+       def WaitForSpace(self, sz):
+               if self.HasSpace(sz):
+                       return
+               while True:
+                       self.wait_event.clear()
+                       self.local_tail = self.tail.value
+                       if self.HasSpace(sz):
+                               return
+                       self.wait_event.wait()
+
+       def AddToBuffer(self, obj):
+               d = cPickle.dumps(obj, cPickle.HIGHEST_PROTOCOL)
+               n = len(d)
+               nd = cPickle.dumps(n, cPickle.HIGHEST_PROTOCOL)
+               sz = n + glb_nsz
+               self.WaitForSpace(sz)
+               pos = self.local_head
+               self.buffer[pos : pos + len(nd)] = nd
+               self.buffer[pos + glb_nsz : pos + sz] = d
+               self.local_head += sz
+
+       def FetchBatch(self, batch_size):
+               fetched = 0
+               while batch_size > fetched:
+                       obj = self.Next()
+                       if obj is None:
+                               self.more = False
+                               break
+                       self.AddToBuffer(obj)
+                       fetched += 1
+               if fetched:
+                       self.fetched += fetched
+                       with self.fetch_count.get_lock():
+                               self.fetch_count.value += fetched
+                       self.head.value = self.local_head
+                       self.fetched_event.set()
+
+       def Run(self):
+               while self.more:
+                       target = self.WaitForTarget()
+                       if target < 0:
+                               break
+                       batch_size = min(glb_chunk_sz, target - self.fetched)
+                       self.FetchBatch(batch_size)
+               self.fetching_done.value = True
+               self.fetched_event.set()
+
+def SQLFetcherFn(*x):
+       process = SQLFetcherProcess(*x)
+       process.Run()
+
+# SQL data fetcher
+
+class SQLFetcher(QObject):
+
+       done = Signal(object)
+
+       def __init__(self, glb, sql, prep, process_data, parent=None):
+               super(SQLFetcher, self).__init__(parent)
+               self.process_data = process_data
+               self.more = True
+               self.target = 0
+               self.last_target = 0
+               self.fetched = 0
+               self.buffer_size = 16 * 1024 * 1024
+               self.buffer = Array(c_char, self.buffer_size, lock=False)
+               self.head = Value(c_longlong)
+               self.tail = Value(c_longlong)
+               self.local_tail = 0
+               self.fetch_count = Value(c_longlong)
+               self.fetching_done = Value(c_bool)
+               self.last_count = 0
+               self.process_target = Value(c_longlong)
+               self.wait_event = Event()
+               self.fetched_event = Event()
+               glb.AddInstanceToShutdownOnExit(self)
+               self.process = Process(target=SQLFetcherFn, args=(glb.dbref, sql, self.buffer, self.head, self.tail, self.fetch_count, self.fetching_done, self.process_target, self.wait_event, self.fetched_event, prep))
+               self.process.start()
+               self.thread = Thread(self.Thread)
+               self.thread.done.connect(self.ProcessData, Qt.QueuedConnection)
+               self.thread.start()
+
+       def Shutdown(self):
+               # Tell the thread and process to exit
+               self.process_target.value = -1
+               self.wait_event.set()
+               self.more = False
+               self.fetching_done.value = True
+               self.fetched_event.set()
+
+       def Thread(self):
+               if not self.more:
+                       return True, 0
+               while True:
+                       self.fetched_event.clear()
+                       fetch_count = self.fetch_count.value
+                       if fetch_count != self.last_count:
+                               break
+                       if self.fetching_done.value:
+                               self.more = False
+                               return True, 0
+                       self.fetched_event.wait()
+               count = fetch_count - self.last_count
+               self.last_count = fetch_count
+               self.fetched += count
+               return False, count
+
+       def Fetch(self, nr):
+               if not self.more:
+                       # -1 inidcates there are no more
+                       return -1
+               result = self.fetched
+               extra = result + nr - self.target
+               if extra > 0:
+                       self.target += extra
+                       # process_target < 0 indicates shutting down
+                       if self.process_target.value >= 0:
+                               self.process_target.value = self.target
+                       self.wait_event.set()
+               return result
+
+       def RemoveFromBuffer(self):
+               pos = self.local_tail
+               if len(self.buffer) - pos < glb_nsz:
+                       pos = 0
+               n = cPickle.loads(self.buffer[pos : pos + glb_nsz])
+               if n == 0:
+                       pos = 0
+                       n = cPickle.loads(self.buffer[0 : glb_nsz])
+               pos += glb_nsz
+               obj = cPickle.loads(self.buffer[pos : pos + n])
+               self.local_tail = pos + n
+               return obj
+
+       def ProcessData(self, count):
+               for i in xrange(count):
+                       obj = self.RemoveFromBuffer()
+                       self.process_data(obj)
+               self.tail.value = self.local_tail
+               self.wait_event.set()
+               self.done.emit(count)
+
+# Fetch more records bar
+
+class FetchMoreRecordsBar():
+
+       def __init__(self, model, parent):
+               self.model = model
+
+               self.label = QLabel("Number of records (x " + "{:,}".format(glb_chunk_sz) + ") to fetch:")
+               self.label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+               self.fetch_count = QSpinBox()
+               self.fetch_count.setRange(1, 1000000)
+               self.fetch_count.setValue(10)
+               self.fetch_count.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+               self.fetch = QPushButton("Go!")
+               self.fetch.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+               self.fetch.released.connect(self.FetchMoreRecords)
+
+               self.progress = QProgressBar()
+               self.progress.setRange(0, 100)
+               self.progress.hide()
+
+               self.done_label = QLabel("All records fetched")
+               self.done_label.hide()
+
+               self.spacer = QLabel("")
+
+               self.close_button = QToolButton()
+               self.close_button.setIcon(parent.style().standardIcon(QStyle.SP_DockWidgetCloseButton))
+               self.close_button.released.connect(self.Deactivate)
+
+               self.hbox = QHBoxLayout()
+               self.hbox.setContentsMargins(0, 0, 0, 0)
+
+               self.hbox.addWidget(self.label)
+               self.hbox.addWidget(self.fetch_count)
+               self.hbox.addWidget(self.fetch)
+               self.hbox.addWidget(self.spacer)
+               self.hbox.addWidget(self.progress)
+               self.hbox.addWidget(self.done_label)
+               self.hbox.addWidget(self.close_button)
+
+               self.bar = QWidget()
+               self.bar.setLayout(self.hbox);
+               self.bar.show()
+
+               self.in_progress = False
+               self.model.progress.connect(self.Progress)
+
+               self.done = False
+
+               if not model.HasMoreRecords():
+                       self.Done()
+
+       def Widget(self):
+               return self.bar
+
+       def Activate(self):
+               self.bar.show()
+               self.fetch.setFocus()
+
+       def Deactivate(self):
+               self.bar.hide()
+
+       def Enable(self, enable):
+               self.fetch.setEnabled(enable)
+               self.fetch_count.setEnabled(enable)
+
+       def Busy(self):
+               self.Enable(False)
+               self.fetch.hide()
+               self.spacer.hide()
+               self.progress.show()
+
+       def Idle(self):
+               self.in_progress = False
+               self.Enable(True)
+               self.progress.hide()
+               self.fetch.show()
+               self.spacer.show()
+
+       def Target(self):
+               return self.fetch_count.value() * glb_chunk_sz
+
+       def Done(self):
+               self.done = True
+               self.Idle()
+               self.label.hide()
+               self.fetch_count.hide()
+               self.fetch.hide()
+               self.spacer.hide()
+               self.done_label.show()
+
+       def Progress(self, count):
+               if self.in_progress:
+                       if count:
+                               percent = ((count - self.start) * 100) / self.Target()
+                               if percent >= 100:
+                                       self.Idle()
+                               else:
+                                       self.progress.setValue(percent)
+               if not count:
+                       # Count value of zero means no more records
+                       self.Done()
+
+       def FetchMoreRecords(self):
+               if self.done:
+                       return
+               self.progress.setValue(0)
+               self.Busy()
+               self.in_progress = True
+               self.start = self.model.FetchMoreRecords(self.Target())
+
+# Brance data model level two item
+
+class BranchLevelTwoItem():
+
+       def __init__(self, row, text, parent_item):
+               self.row = row
+               self.parent_item = parent_item
+               self.data = [""] * 8
+               self.data[7] = text
+               self.level = 2
+
+       def getParentItem(self):
+               return self.parent_item
+
+       def getRow(self):
+               return self.row
+
+       def childCount(self):
+               return 0
+
+       def hasChildren(self):
+               return False
+
+       def getData(self, column):
+               return self.data[column]
+
+# Brance data model level one item
+
+class BranchLevelOneItem():
+
+       def __init__(self, glb, row, data, parent_item):
+               self.glb = glb
+               self.row = row
+               self.parent_item = parent_item
+               self.child_count = 0
+               self.child_items = []
+               self.data = data[1:]
+               self.dbid = data[0]
+               self.level = 1
+               self.query_done = False
+
+       def getChildItem(self, row):
+               return self.child_items[row]
+
+       def getParentItem(self):
+               return self.parent_item
+
+       def getRow(self):
+               return self.row
+
+       def Select(self):
+               self.query_done = True
+
+               if not self.glb.have_disassembler:
+                       return
+
+               query = QSqlQuery(self.glb.db)
+
+               QueryExec(query, "SELECT cpu, to_dso_id, to_symbol_id, to_sym_offset, short_name, long_name, build_id, sym_start, to_ip"
+                                 " FROM samples"
+                                 " INNER JOIN dsos ON samples.to_dso_id = dsos.id"
+                                 " INNER JOIN symbols ON samples.to_symbol_id = symbols.id"
+                                 " WHERE samples.id = " + str(self.dbid))
+               if not query.next():
+                       return
+               cpu = query.value(0)
+               dso = query.value(1)
+               sym = query.value(2)
+               if dso == 0 or sym == 0:
+                       return
+               off = query.value(3)
+               short_name = query.value(4)
+               long_name = query.value(5)
+               build_id = query.value(6)
+               sym_start = query.value(7)
+               ip = query.value(8)
+
+               QueryExec(query, "SELECT samples.dso_id, symbol_id, sym_offset, sym_start"
+                                 " FROM samples"
+                                 " INNER JOIN symbols ON samples.symbol_id = symbols.id"
+                                 " WHERE samples.id > " + str(self.dbid) + " AND cpu = " + str(cpu) +
+                                 " ORDER BY samples.id"
+                                 " LIMIT 1")
+               if not query.next():
+                       return
+               if query.value(0) != dso:
+                       # Cannot disassemble from one dso to another
+                       return
+               bsym = query.value(1)
+               boff = query.value(2)
+               bsym_start = query.value(3)
+               if bsym == 0:
+                       return
+               tot = bsym_start + boff + 1 - sym_start - off
+               if tot <= 0 or tot > 16384:
+                       return
+
+               inst = self.glb.disassembler.Instruction()
+               f = self.glb.FileFromNamesAndBuildId(short_name, long_name, build_id)
+               if not f:
+                       return
+               mode = 0 if Is64Bit(f) else 1
+               self.glb.disassembler.SetMode(inst, mode)
+
+               buf_sz = tot + 16
+               buf = create_string_buffer(tot + 16)
+               f.seek(sym_start + off)
+               buf.value = f.read(buf_sz)
+               buf_ptr = addressof(buf)
+               i = 0
+               while tot > 0:
+                       cnt, text = self.glb.disassembler.DisassembleOne(inst, buf_ptr, buf_sz, ip)
+                       if cnt:
+                               byte_str = tohex(ip).rjust(16)
+                               for k in xrange(cnt):
+                                       byte_str += " %02x" % ord(buf[i])
+                                       i += 1
+                               while k < 15:
+                                       byte_str += "   "
+                                       k += 1
+                               self.child_items.append(BranchLevelTwoItem(0, byte_str + " " + text, self))
+                               self.child_count += 1
+                       else:
+                               return
+                       buf_ptr += cnt
+                       tot -= cnt
+                       buf_sz -= cnt
+                       ip += cnt
+
+       def childCount(self):
+               if not self.query_done:
+                       self.Select()
+                       if not self.child_count:
+                               return -1
+               return self.child_count
+
+       def hasChildren(self):
+               if not self.query_done:
+                       return True
+               return self.child_count > 0
+
+       def getData(self, column):
+               return self.data[column]
+
+# Brance data model root item
+
+class BranchRootItem():
+
+       def __init__(self):
+               self.child_count = 0
+               self.child_items = []
+               self.level = 0
+
+       def getChildItem(self, row):
+               return self.child_items[row]
+
+       def getParentItem(self):
+               return None
+
+       def getRow(self):
+               return 0
+
+       def childCount(self):
+               return self.child_count
+
+       def hasChildren(self):
+               return self.child_count > 0
+
+       def getData(self, column):
+               return ""
+
+# Branch data preparation
+
+def BranchDataPrep(query):
+       data = []
+       for i in xrange(0, 8):
+               data.append(query.value(i))
+       data.append(tohex(query.value(8)).rjust(16) + " " + query.value(9) + offstr(query.value(10)) +
+                       " (" + dsoname(query.value(11)) + ")" + " -> " +
+                       tohex(query.value(12)) + " " + query.value(13) + offstr(query.value(14)) +
+                       " (" + dsoname(query.value(15)) + ")")
+       return data
+
+# Branch data model
+
+class BranchModel(TreeModel):
+
+       progress = Signal(object)
+
+       def __init__(self, glb, event_id, where_clause, parent=None):
+               super(BranchModel, self).__init__(BranchRootItem(), parent)
+               self.glb = glb
+               self.event_id = event_id
+               self.more = True
+               self.populated = 0
+               sql = ("SELECT samples.id, time, cpu, comm, pid, tid, branch_types.name,"
+                       " CASE WHEN in_tx = '0' THEN 'No' ELSE 'Yes' END,"
+                       " ip, symbols.name, sym_offset, dsos.short_name,"
+                       " to_ip, to_symbols.name, to_sym_offset, to_dsos.short_name"
+                       " FROM samples"
+                       " INNER JOIN comms ON comm_id = comms.id"
+                       " INNER JOIN threads ON thread_id = threads.id"
+                       " INNER JOIN branch_types ON branch_type = branch_types.id"
+                       " INNER JOIN symbols ON symbol_id = symbols.id"
+                       " INNER JOIN symbols to_symbols ON to_symbol_id = to_symbols.id"
+                       " INNER JOIN dsos ON samples.dso_id = dsos.id"
+                       " INNER JOIN dsos AS to_dsos ON samples.to_dso_id = to_dsos.id"
+                       " WHERE samples.id > $$last_id$$" + where_clause +
+                       " AND evsel_id = " + str(self.event_id) +
+                       " ORDER BY samples.id"
+                       " LIMIT " + str(glb_chunk_sz))
+               self.fetcher = SQLFetcher(glb, sql, BranchDataPrep, self.AddSample)
+               self.fetcher.done.connect(self.Update)
+               self.fetcher.Fetch(glb_chunk_sz)
+
+       def columnCount(self, parent=None):
+               return 8
+
+       def columnHeader(self, column):
+               return ("Time", "CPU", "Command", "PID", "TID", "Branch Type", "In Tx", "Branch")[column]
+
+       def columnFont(self, column):
+               if column != 7:
+                       return None
+               return QFont("Monospace")
+
+       def DisplayData(self, item, index):
+               if item.level == 1:
+                       self.FetchIfNeeded(item.row)
+               return item.getData(index.column())
+
+       def AddSample(self, data):
+               child = BranchLevelOneItem(self.glb, self.populated, data, self.root)
+               self.root.child_items.append(child)
+               self.populated += 1
+
+       def Update(self, fetched):
+               if not fetched:
+                       self.more = False
+                       self.progress.emit(0)
+               child_count = self.root.child_count
+               count = self.populated - child_count
+               if count > 0:
+                       parent = QModelIndex()
+                       self.beginInsertRows(parent, child_count, child_count + count - 1)
+                       self.insertRows(child_count, count, parent)
+                       self.root.child_count += count
+                       self.endInsertRows()
+                       self.progress.emit(self.root.child_count)
+
+       def FetchMoreRecords(self, count):
+               current = self.root.child_count
+               if self.more:
+                       self.fetcher.Fetch(count)
+               else:
+                       self.progress.emit(0)
+               return current
+
+       def HasMoreRecords(self):
+               return self.more
+
+# Branch window
+
+class BranchWindow(QMdiSubWindow):
+
+       def __init__(self, glb, event_id, name, where_clause, parent=None):
+               super(BranchWindow, self).__init__(parent)
+
+               model_name = "Branch Events " + str(event_id)
+               if len(where_clause):
+                       model_name = where_clause + " " + model_name
+
+               self.model = LookupCreateModel(model_name, lambda: BranchModel(glb, event_id, where_clause))
+
+               self.view = QTreeView()
+               self.view.setUniformRowHeights(True)
+               self.view.setModel(self.model)
+
+               self.ResizeColumnsToContents()
+
+               self.find_bar = FindBar(self, self, True)
+
+               self.finder = ChildDataItemFinder(self.model.root)
+
+               self.fetch_bar = FetchMoreRecordsBar(self.model, self)
+
+               self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
+
+               self.setWidget(self.vbox.Widget())
+
+               AddSubWindow(glb.mainwindow.mdi_area, self, name + " Branch Events")
+
+       def ResizeColumnToContents(self, column, n):
+               # Using the view's resizeColumnToContents() here is extrememly slow
+               # so implement a crude alternative
+               mm = "MM" if column else "MMMM"
+               font = self.view.font()
+               metrics = QFontMetrics(font)
+               max = 0
+               for row in xrange(n):
+                       val = self.model.root.child_items[row].data[column]
+                       len = metrics.width(str(val) + mm)
+                       max = len if len > max else max
+               val = self.model.columnHeader(column)
+               len = metrics.width(str(val) + mm)
+               max = len if len > max else max
+               self.view.setColumnWidth(column, max)
+
+       def ResizeColumnsToContents(self):
+               n = min(self.model.root.child_count, 100)
+               if n < 1:
+                       # No data yet, so connect a signal to notify when there is
+                       self.model.rowsInserted.connect(self.UpdateColumnWidths)
+                       return
+               columns = self.model.columnCount()
+               for i in xrange(columns):
+                       self.ResizeColumnToContents(i, n)
+
+       def UpdateColumnWidths(self, *x):
+               # This only needs to be done once, so disconnect the signal now
+               self.model.rowsInserted.disconnect(self.UpdateColumnWidths)
+               self.ResizeColumnsToContents()
+
+       def Find(self, value, direction, pattern, context):
+               self.view.setFocus()
+               self.find_bar.Busy()
+               self.finder.Find(value, direction, pattern, context, self.FindDone)
+
+       def FindDone(self, row):
+               self.find_bar.Idle()
+               if row >= 0:
+                       self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
+               else:
+                       self.find_bar.NotFound()
+
+# Dialog data item converted and validated using a SQL table
+
+class SQLTableDialogDataItem():
+
+       def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
+               self.glb = glb
+               self.label = label
+               self.placeholder_text = placeholder_text
+               self.table_name = table_name
+               self.match_column = match_column
+               self.column_name1 = column_name1
+               self.column_name2 = column_name2
+               self.parent = parent
+
+               self.value = ""
+
+               self.widget = QLineEdit()
+               self.widget.editingFinished.connect(self.Validate)
+               self.widget.textChanged.connect(self.Invalidate)
+               self.red = False
+               self.error = ""
+               self.validated = True
+
+               self.last_id = 0
+               self.first_time = 0
+               self.last_time = 2 ** 64
+               if self.table_name == "<timeranges>":
+                       query = QSqlQuery(self.glb.db)
+                       QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
+                       if query.next():
+                               self.last_id = int(query.value(0))
+                               self.last_time = int(query.value(1))
+                       QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
+                       if query.next():
+                               self.first_time = int(query.value(0))
+                       if placeholder_text:
+                               placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
+
+               if placeholder_text:
+                       self.widget.setPlaceholderText(placeholder_text)
+
+       def ValueToIds(self, value):
+               ids = []
+               query = QSqlQuery(self.glb.db)
+               stmt = "SELECT id FROM " + self.table_name + " WHERE " + self.match_column + " = '" + value + "'"
+               ret = query.exec_(stmt)
+               if ret:
+                       while query.next():
+                               ids.append(str(query.value(0)))
+               return ids
+
+       def IdBetween(self, query, lower_id, higher_id, order):
+               QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
+               if query.next():
+                       return True, int(query.value(0))
+               else:
+                       return False, 0
+
+       def BinarySearchTime(self, lower_id, higher_id, target_time, get_floor):
+               query = QSqlQuery(self.glb.db)
+               while True:
+                       next_id = int((lower_id + higher_id) / 2)
+                       QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
+                       if not query.next():
+                               ok, dbid = self.IdBetween(query, lower_id, next_id, "DESC")
+                               if not ok:
+                                       ok, dbid = self.IdBetween(query, next_id, higher_id, "")
+                                       if not ok:
+                                               return str(higher_id)
+                               next_id = dbid
+                               QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
+                       next_time = int(query.value(0))
+                       if get_floor:
+                               if target_time > next_time:
+                                       lower_id = next_id
+                               else:
+                                       higher_id = next_id
+                               if higher_id <= lower_id + 1:
+                                       return str(higher_id)
+                       else:
+                               if target_time >= next_time:
+                                       lower_id = next_id
+                               else:
+                                       higher_id = next_id
+                               if higher_id <= lower_id + 1:
+                                       return str(lower_id)
+
+       def ConvertRelativeTime(self, val):
+               print "val ", val
+               mult = 1
+               suffix = val[-2:]
+               if suffix == "ms":
+                       mult = 1000000
+               elif suffix == "us":
+                       mult = 1000
+               elif suffix == "ns":
+                       mult = 1
+               else:
+                       return val
+               val = val[:-2].strip()
+               if not self.IsNumber(val):
+                       return val
+               val = int(val) * mult
+               if val >= 0:
+                       val += self.first_time
+               else:
+                       val += self.last_time
+               return str(val)
+
+       def ConvertTimeRange(self, vrange):
+               print "vrange ", vrange
+               if vrange[0] == "":
+                       vrange[0] = str(self.first_time)
+               if vrange[1] == "":
+                       vrange[1] = str(self.last_time)
+               vrange[0] = self.ConvertRelativeTime(vrange[0])
+               vrange[1] = self.ConvertRelativeTime(vrange[1])
+               print "vrange2 ", vrange
+               if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+                       return False
+               print "ok1"
+               beg_range = max(int(vrange[0]), self.first_time)
+               end_range = min(int(vrange[1]), self.last_time)
+               if beg_range > self.last_time or end_range < self.first_time:
+                       return False
+               print "ok2"
+               vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
+               vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
+               print "vrange3 ", vrange
+               return True
+
+       def AddTimeRange(self, value, ranges):
+               print "value ", value
+               n = value.count("-")
+               if n == 1:
+                       pass
+               elif n == 2:
+                       if value.split("-")[1].strip() == "":
+                               n = 1
+               elif n == 3:
+                       n = 2
+               else:
+                       return False
+               pos = findnth(value, "-", n)
+               vrange = [value[:pos].strip() ,value[pos+1:].strip()]
+               if self.ConvertTimeRange(vrange):
+                       ranges.append(vrange)
+                       return True
+               return False
+
+       def InvalidValue(self, value):
+               self.value = ""
+               palette = QPalette()
+               palette.setColor(QPalette.Text,Qt.red)
+               self.widget.setPalette(palette)
+               self.red = True
+               self.error = self.label + " invalid value '" + value + "'"
+               self.parent.ShowMessage(self.error)
+
+       def IsNumber(self, value):
+               try:
+                       x = int(value)
+               except:
+                       x = 0
+               return str(x) == value
+
+       def Invalidate(self):
+               self.validated = False
+
+       def Validate(self):
+               input_string = self.widget.text()
+               self.validated = True
+               if self.red:
+                       palette = QPalette()
+                       self.widget.setPalette(palette)
+                       self.red = False
+               if not len(input_string.strip()):
+                       self.error = ""
+                       self.value = ""
+                       return
+               if self.table_name == "<timeranges>":
+                       ranges = []
+                       for value in [x.strip() for x in input_string.split(",")]:
+                               if not self.AddTimeRange(value, ranges):
+                                       return self.InvalidValue(value)
+                       ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
+                       self.value = " OR ".join(ranges)
+               elif self.table_name == "<ranges>":
+                       singles = []
+                       ranges = []
+                       for value in [x.strip() for x in input_string.split(",")]:
+                               if "-" in value:
+                                       vrange = value.split("-")
+                                       if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+                                               return self.InvalidValue(value)
+                                       ranges.append(vrange)
+                               else:
+                                       if not self.IsNumber(value):
+                                               return self.InvalidValue(value)
+                                       singles.append(value)
+                       ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
+                       if len(singles):
+                               ranges.append(self.column_name1 + " IN (" + ",".join(singles) + ")")
+                       self.value = " OR ".join(ranges)
+               elif self.table_name:
+                       all_ids = []
+                       for value in [x.strip() for x in input_string.split(",")]:
+                               ids = self.ValueToIds(value)
+                               if len(ids):
+                                       all_ids.extend(ids)
+                               else:
+                                       return self.InvalidValue(value)
+                       self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
+                       if self.column_name2:
+                               self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
+               else:
+                       self.value = input_string.strip()
+               self.error = ""
+               self.parent.ClearMessage()
+
+       def IsValid(self):
+               if not self.validated:
+                       self.Validate()
+               if len(self.error):
+                       self.parent.ShowMessage(self.error)
+                       return False
+               return True
+
+# Selected branch report creation dialog
+
+class SelectedBranchDialog(QDialog):
+
+       def __init__(self, glb, parent=None):
+               super(SelectedBranchDialog, self).__init__(parent)
+
+               self.glb = glb
+
+               self.name = ""
+               self.where_clause = ""
+
+               self.setWindowTitle("Selected Branches")
+               self.setMinimumWidth(600)
+
+               items = (
+                       ("Report name:", "Enter a name to appear in the window title bar", "", "", "", ""),
+                       ("Time ranges:", "Enter time ranges", "<timeranges>", "", "samples.id", ""),
+                       ("CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "<ranges>", "", "cpu", ""),
+                       ("Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", ""),
+                       ("PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", ""),
+                       ("TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", ""),
+                       ("DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id"),
+                       ("Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id"),
+                       ("Raw SQL clause: ", "Enter a raw SQL WHERE clause", "", "", "", ""),
+                       )
+               self.data_items = [SQLTableDialogDataItem(glb, *x, parent=self) for x in items]
+
+               self.grid = QGridLayout()
+
+               for row in xrange(len(self.data_items)):
+                       self.grid.addWidget(QLabel(self.data_items[row].label), row, 0)
+                       self.grid.addWidget(self.data_items[row].widget, row, 1)
+
+               self.status = QLabel()
+
+               self.ok_button = QPushButton("Ok", self)
+               self.ok_button.setDefault(True)
+               self.ok_button.released.connect(self.Ok)
+               self.ok_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+               self.cancel_button = QPushButton("Cancel", self)
+               self.cancel_button.released.connect(self.reject)
+               self.cancel_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+               self.hbox = QHBoxLayout()
+               #self.hbox.addStretch()
+               self.hbox.addWidget(self.status)
+               self.hbox.addWidget(self.ok_button)
+               self.hbox.addWidget(self.cancel_button)
+
+               self.vbox = QVBoxLayout()
+               self.vbox.addLayout(self.grid)
+               self.vbox.addLayout(self.hbox)
+
+               self.setLayout(self.vbox);
+
+       def Ok(self):
+               self.name = self.data_items[0].value
+               if not self.name:
+                       self.ShowMessage("Report name is required")
+                       return
+               for d in self.data_items:
+                       if not d.IsValid():
+                               return
+               for d in self.data_items[1:]:
+                       if len(d.value):
+                               if len(self.where_clause):
+                                       self.where_clause += " AND "
+                               self.where_clause += d.value
+               if len(self.where_clause):
+                       self.where_clause = " AND ( " + self.where_clause + " ) "
+               else:
+                       self.ShowMessage("No selection")
+                       return
+               self.accept()
+
+       def ShowMessage(self, msg):
+               self.status.setText("<font color=#FF0000>" + msg)
+
+       def ClearMessage(self):
+               self.status.setText("")
+
+# Event list
+
+def GetEventList(db):
+       events = []
+       query = QSqlQuery(db)
+       QueryExec(query, "SELECT name FROM selected_events WHERE id > 0 ORDER BY id")
+       while query.next():
+               events.append(query.value(0))
+       return events
+
+# SQL data preparation
+
+def SQLTableDataPrep(query, count):
+       data = []
+       for i in xrange(count):
+               data.append(query.value(i))
+       return data
+
+# SQL table data model item
+
+class SQLTableItem():
+
+       def __init__(self, row, data):
+               self.row = row
+               self.data = data
+
+       def getData(self, column):
+               return self.data[column]
+
+# SQL table data model
+
+class SQLTableModel(TableModel):
+
+       progress = Signal(object)
+
+       def __init__(self, glb, sql, column_count, parent=None):
+               super(SQLTableModel, self).__init__(parent)
+               self.glb = glb
+               self.more = True
+               self.populated = 0
+               self.fetcher = SQLFetcher(glb, sql, lambda x, y=column_count: SQLTableDataPrep(x, y), self.AddSample)
+               self.fetcher.done.connect(self.Update)
+               self.fetcher.Fetch(glb_chunk_sz)
+
+       def DisplayData(self, item, index):
+               self.FetchIfNeeded(item.row)
+               return item.getData(index.column())
+
+       def AddSample(self, data):
+               child = SQLTableItem(self.populated, data)
+               self.child_items.append(child)
+               self.populated += 1
+
+       def Update(self, fetched):
+               if not fetched:
+                       self.more = False
+                       self.progress.emit(0)
+               child_count = self.child_count
+               count = self.populated - child_count
+               if count > 0:
+                       parent = QModelIndex()
+                       self.beginInsertRows(parent, child_count, child_count + count - 1)
+                       self.insertRows(child_count, count, parent)
+                       self.child_count += count
+                       self.endInsertRows()
+                       self.progress.emit(self.child_count)
+
+       def FetchMoreRecords(self, count):
+               current = self.child_count
+               if self.more:
+                       self.fetcher.Fetch(count)
+               else:
+                       self.progress.emit(0)
+               return current
+
+       def HasMoreRecords(self):
+               return self.more
+
+# SQL automatic table data model
+
+class SQLAutoTableModel(SQLTableModel):
+
+       def __init__(self, glb, table_name, parent=None):
+               sql = "SELECT * FROM " + table_name + " WHERE id > $$last_id$$ ORDER BY id LIMIT " + str(glb_chunk_sz)
+               if table_name == "comm_threads_view":
+                       # For now, comm_threads_view has no id column
+                       sql = "SELECT * FROM " + table_name + " WHERE comm_id > $$last_id$$ ORDER BY comm_id LIMIT " + str(glb_chunk_sz)
+               self.column_headers = []
+               query = QSqlQuery(glb.db)
+               if glb.dbref.is_sqlite3:
+                       QueryExec(query, "PRAGMA table_info(" + table_name + ")")
+                       while query.next():
+                               self.column_headers.append(query.value(1))
+                       if table_name == "sqlite_master":
+                               sql = "SELECT * FROM " + table_name
+               else:
+                       if table_name[:19] == "information_schema.":
+                               sql = "SELECT * FROM " + table_name
+                               select_table_name = table_name[19:]
+                               schema = "information_schema"
+                       else:
+                               select_table_name = table_name
+                               schema = "public"
+                       QueryExec(query, "SELECT column_name FROM information_schema.columns WHERE table_schema = '" + schema + "' and table_name = '" + select_table_name + "'")
+                       while query.next():
+                               self.column_headers.append(query.value(0))
+               super(SQLAutoTableModel, self).__init__(glb, sql, len(self.column_headers), parent)
+
+       def columnCount(self, parent=None):
+               return len(self.column_headers)
+
+       def columnHeader(self, column):
+               return self.column_headers[column]
+
+# Base class for custom ResizeColumnsToContents
+
+class ResizeColumnsToContentsBase(QObject):
+
+       def __init__(self, parent=None):
+               super(ResizeColumnsToContentsBase, self).__init__(parent)
+
+       def ResizeColumnToContents(self, column, n):
+               # Using the view's resizeColumnToContents() here is extrememly slow
+               # so implement a crude alternative
+               font = self.view.font()
+               metrics = QFontMetrics(font)
+               max = 0
+               for row in xrange(n):
+                       val = self.data_model.child_items[row].data[column]
+                       len = metrics.width(str(val) + "MM")
+                       max = len if len > max else max
+               val = self.data_model.columnHeader(column)
+               len = metrics.width(str(val) + "MM")
+               max = len if len > max else max
+               self.view.setColumnWidth(column, max)
+
+       def ResizeColumnsToContents(self):
+               n = min(self.data_model.child_count, 100)
+               if n < 1:
+                       # No data yet, so connect a signal to notify when there is
+                       self.data_model.rowsInserted.connect(self.UpdateColumnWidths)
+                       return
+               columns = self.data_model.columnCount()
+               for i in xrange(columns):
+                       self.ResizeColumnToContents(i, n)
+
+       def UpdateColumnWidths(self, *x):
+               # This only needs to be done once, so disconnect the signal now
+               self.data_model.rowsInserted.disconnect(self.UpdateColumnWidths)
+               self.ResizeColumnsToContents()
+
+# Table window
+
+class TableWindow(QMdiSubWindow, ResizeColumnsToContentsBase):
+
+       def __init__(self, glb, table_name, parent=None):
+               super(TableWindow, self).__init__(parent)
+
+               self.data_model = LookupCreateModel(table_name + " Table", lambda: SQLAutoTableModel(glb, table_name))
+
+               self.model = QSortFilterProxyModel()
+               self.model.setSourceModel(self.data_model)
+
+               self.view = QTableView()
+               self.view.setModel(self.model)
+               self.view.setEditTriggers(QAbstractItemView.NoEditTriggers)
+               self.view.verticalHeader().setVisible(False)
+               self.view.sortByColumn(-1, Qt.AscendingOrder)
+               self.view.setSortingEnabled(True)
+
+               self.ResizeColumnsToContents()
+
+               self.find_bar = FindBar(self, self, True)
+
+               self.finder = ChildDataItemFinder(self.data_model)
+
+               self.fetch_bar = FetchMoreRecordsBar(self.data_model, self)
+
+               self.vbox = VBox(self.view, self.find_bar.Widget(), self.fetch_bar.Widget())
+
+               self.setWidget(self.vbox.Widget())
+
+               AddSubWindow(glb.mainwindow.mdi_area, self, table_name + " Table")
+
+       def Find(self, value, direction, pattern, context):
+               self.view.setFocus()
+               self.find_bar.Busy()
+               self.finder.Find(value, direction, pattern, context, self.FindDone)
+
+       def FindDone(self, row):
+               self.find_bar.Idle()
+               if row >= 0:
+                       self.view.setCurrentIndex(self.model.mapFromSource(self.data_model.index(row, 0, QModelIndex())))
+               else:
+                       self.find_bar.NotFound()
+
+# Table list
+
+def GetTableList(glb):
+       tables = []
+       query = QSqlQuery(glb.db)
+       if glb.dbref.is_sqlite3:
+               QueryExec(query, "SELECT name FROM sqlite_master WHERE type IN ( 'table' , 'view' ) ORDER BY name")
+       else:
+               QueryExec(query, "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type IN ( 'BASE TABLE' , 'VIEW' ) ORDER BY table_name")
+       while query.next():
+               tables.append(query.value(0))
+       if glb.dbref.is_sqlite3:
+               tables.append("sqlite_master")
+       else:
+               tables.append("information_schema.tables")
+               tables.append("information_schema.views")
+               tables.append("information_schema.columns")
+       return tables
+
+# Action Definition
+
+def CreateAction(label, tip, callback, parent=None, shortcut=None):
+       action = QAction(label, parent)
+       if shortcut != None:
+               action.setShortcuts(shortcut)
+       action.setStatusTip(tip)
+       action.triggered.connect(callback)
+       return action
+
+# Typical application actions
+
+def CreateExitAction(app, parent=None):
+       return CreateAction("&Quit", "Exit the application", app.closeAllWindows, parent, QKeySequence.Quit)
+
+# Typical MDI actions
+
+def CreateCloseActiveWindowAction(mdi_area):
+       return CreateAction("Cl&ose", "Close the active window", mdi_area.closeActiveSubWindow, mdi_area)
+
+def CreateCloseAllWindowsAction(mdi_area):
+       return CreateAction("Close &All", "Close all the windows", mdi_area.closeAllSubWindows, mdi_area)
+
+def CreateTileWindowsAction(mdi_area):
+       return CreateAction("&Tile", "Tile the windows", mdi_area.tileSubWindows, mdi_area)
+
+def CreateCascadeWindowsAction(mdi_area):
+       return CreateAction("&Cascade", "Cascade the windows", mdi_area.cascadeSubWindows, mdi_area)
+
+def CreateNextWindowAction(mdi_area):
+       return CreateAction("Ne&xt", "Move the focus to the next window", mdi_area.activateNextSubWindow, mdi_area, QKeySequence.NextChild)
+
+def CreatePreviousWindowAction(mdi_area):
+       return CreateAction("Pre&vious", "Move the focus to the previous window", mdi_area.activatePreviousSubWindow, mdi_area, QKeySequence.PreviousChild)
+
+# Typical MDI window menu
+
+class WindowMenu():
+
+       def __init__(self, mdi_area, menu):
+               self.mdi_area = mdi_area
+               self.window_menu = menu.addMenu("&Windows")
+               self.close_active_window = CreateCloseActiveWindowAction(mdi_area)
+               self.close_all_windows = CreateCloseAllWindowsAction(mdi_area)
+               self.tile_windows = CreateTileWindowsAction(mdi_area)
+               self.cascade_windows = CreateCascadeWindowsAction(mdi_area)
+               self.next_window = CreateNextWindowAction(mdi_area)
+               self.previous_window = CreatePreviousWindowAction(mdi_area)
+               self.window_menu.aboutToShow.connect(self.Update)
+
+       def Update(self):
+               self.window_menu.clear()
+               sub_window_count = len(self.mdi_area.subWindowList())
+               have_sub_windows = sub_window_count != 0
+               self.close_active_window.setEnabled(have_sub_windows)
+               self.close_all_windows.setEnabled(have_sub_windows)
+               self.tile_windows.setEnabled(have_sub_windows)
+               self.cascade_windows.setEnabled(have_sub_windows)
+               self.next_window.setEnabled(have_sub_windows)
+               self.previous_window.setEnabled(have_sub_windows)
+               self.window_menu.addAction(self.close_active_window)
+               self.window_menu.addAction(self.close_all_windows)
+               self.window_menu.addSeparator()
+               self.window_menu.addAction(self.tile_windows)
+               self.window_menu.addAction(self.cascade_windows)
+               self.window_menu.addSeparator()
+               self.window_menu.addAction(self.next_window)
+               self.window_menu.addAction(self.previous_window)
+               if sub_window_count == 0:
+                       return
+               self.window_menu.addSeparator()
+               nr = 1
+               for sub_window in self.mdi_area.subWindowList():
+                       label = str(nr) + " " + sub_window.name
+                       if nr < 10:
+                               label = "&" + label
+                       action = self.window_menu.addAction(label)
+                       action.setCheckable(True)
+                       action.setChecked(sub_window == self.mdi_area.activeSubWindow())
+                       action.triggered.connect(lambda x=nr: self.setActiveSubWindow(x))
+                       self.window_menu.addAction(action)
+                       nr += 1
+
+       def setActiveSubWindow(self, nr):
+               self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1])
+
+# Help text
+
+glb_help_text = """
+<h1>Contents</h1>
+<style>
+p.c1 {
+    text-indent: 40px;
+}
+p.c2 {
+    text-indent: 80px;
+}
+}
+</style>
+<p class=c1><a href=#reports>1. Reports</a></p>
+<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
+<p class=c2><a href=#allbranches>1.2 All branches</a></p>
+<p class=c2><a href=#selectedbranches>1.3 Selected branches</a></p>
+<p class=c1><a href=#tables>2. Tables</a></p>
+<h1 id=reports>1. Reports</h1>
+<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
+The result is a GUI window with a tree representing a context-sensitive
+call-graph. Expanding a couple of levels of the tree and adjusting column
+widths to suit will display something like:
+<pre>
+                                         Call Graph: pt_example
+Call Path                          Object      Count   Time(ns)  Time(%)  Branch Count   Branch Count(%)
+v- ls
+    v- 2638:2638
+        v- _start                  ld-2.19.so    1     10074071   100.0         211135            100.0
+          |- unknown               unknown       1        13198     0.1              1              0.0
+          >- _dl_start             ld-2.19.so    1      1400980    13.9          19637              9.3
+          >- _d_linit_internal     ld-2.19.so    1       448152     4.4          11094              5.3
+          v-__libc_start_main@plt  ls            1      8211741    81.5         180397             85.4
+             >- _dl_fixup          ld-2.19.so    1         7607     0.1            108              0.1
+             >- __cxa_atexit       libc-2.19.so  1        11737     0.1             10              0.0
+             >- __libc_csu_init    ls            1        10354     0.1             10              0.0
+             |- _setjmp            libc-2.19.so  1            0     0.0              4              0.0
+             v- main               ls            1      8182043    99.6         180254             99.9
+</pre>
+<h3>Points to note:</h3>
+<ul>
+<li>The top level is a command name (comm)</li>
+<li>The next level is a thread (pid:tid)</li>
+<li>Subsequent levels are functions</li>
+<li>'Count' is the number of calls</li>
+<li>'Time' is the elapsed time until the function returns</li>
+<li>Percentages are relative to the level above</li>
+<li>'Branch Count' is the total number of branches for that function and all functions that it calls
+</ul>
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
+The pattern matching symbols are ? for any character and * for zero or more characters.
+<h2 id=allbranches>1.2 All branches</h2>
+The All branches report displays all branches in chronological order.
+Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
+<h3>Disassembly</h3>
+Open a branch to display disassembly. This only works if:
+<ol>
+<li>The disassembler is available. Currently, only Intel XED is supported - see <a href=#xed>Intel XED Setup</a></li>
+<li>The object code is available. Currently, only the perf build ID cache is searched for object code.
+The default directory ~/.debug can be overridden by setting environment variable PERF_BUILDID_DIR.
+One exception is kcore where the DSO long name is used (refer dsos_view on the Tables menu),
+or alternatively, set environment variable PERF_KCORE to the kcore file name.</li>
+</ol>
+<h4 id=xed>Intel XED Setup</h4>
+To use Intel XED, libxed.so must be present.  To build and install libxed.so:
+<pre>
+git clone https://github.com/intelxed/mbuild.git mbuild
+git clone https://github.com/intelxed/xed
+cd xed
+./mfile.py --share
+sudo ./mfile.py --prefix=/usr/local install
+sudo ldconfig
+</pre>
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
+Refer to Python documentation for the regular expression syntax.
+All columns are searched, but only currently fetched rows are searched.
+<h2 id=selectedbranches>1.3 Selected branches</h2>
+This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
+by various selection criteria. A dialog box displays available criteria which are AND'ed together.
+<h3>1.3.1 Time ranges</h3>
+The time ranges hint text shows the total time range. Relative time ranges can also be entered in
+ms, us or ns. Also, negative values are relative to the end of trace.  Examples:
+<pre>
+       81073085947329-81073085958238   From 81073085947329 to 81073085958238
+       100us-200us             From 100us to 200us
+       10ms-                   From 10ms to the end
+       -100ns                  The first 100ns
+       -10ms-                  The last 10ms
+</pre>
+N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
+<h1 id=tables>2. Tables</h1>
+The Tables menu shows all tables and views in the database. Most tables have an associated view
+which displays the information in a more friendly way. Not all data for large tables is fetched
+immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted,
+but that can be slow for large tables.
+<p>There are also tables of database meta-information.
+For SQLite3 databases, the sqlite_master table is included.
+For PostgreSQL databases, information_schema.tables/views/columns are included.
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
+Refer to Python documentation for the regular expression syntax.
+All columns are searched, but only currently fetched rows are searched.
+<p>N.B. Results are found in id order, so if the table is re-ordered, find-next and find-previous
+will go to the next/previous result in id order, instead of display order.
+"""
+
+# Help window
+
+class HelpWindow(QMdiSubWindow):
+
+       def __init__(self, glb, parent=None):
+               super(HelpWindow, self).__init__(parent)
+
+               self.text = QTextBrowser()
+               self.text.setHtml(glb_help_text)
+               self.text.setReadOnly(True)
+               self.text.setOpenExternalLinks(True)
+
+               self.setWidget(self.text)
+
+               AddSubWindow(glb.mainwindow.mdi_area, self, "Exported SQL Viewer Help")
+
+# Main window that only displays the help text
+
+class HelpOnlyWindow(QMainWindow):
+
+       def __init__(self, parent=None):
+               super(HelpOnlyWindow, self).__init__(parent)
+
+               self.setMinimumSize(200, 100)
+               self.resize(800, 600)
+               self.setWindowTitle("Exported SQL Viewer Help")
+               self.setWindowIcon(self.style().standardIcon(QStyle.SP_MessageBoxInformation))
+
+               self.text = QTextBrowser()
+               self.text.setHtml(glb_help_text)
+               self.text.setReadOnly(True)
+               self.text.setOpenExternalLinks(True)
+
+               self.setCentralWidget(self.text)
+
+# Font resize
+
+def ResizeFont(widget, diff):
+       font = widget.font()
+       sz = font.pointSize()
+       font.setPointSize(sz + diff)
+       widget.setFont(font)
+
+def ShrinkFont(widget):
+       ResizeFont(widget, -1)
+
+def EnlargeFont(widget):
+       ResizeFont(widget, 1)
+
+# Unique name for sub-windows
+
+def NumberedWindowName(name, nr):
+       if nr > 1:
+               name += " <" + str(nr) + ">"
+       return name
+
+def UniqueSubWindowName(mdi_area, name):
+       nr = 1
+       while True:
+               unique_name = NumberedWindowName(name, nr)
+               ok = True
+               for sub_window in mdi_area.subWindowList():
+                       if sub_window.name == unique_name:
+                               ok = False
+                               break
+               if ok:
+                       return unique_name
+               nr += 1
+
+# Add a sub-window
+
+def AddSubWindow(mdi_area, sub_window, name):
+       unique_name = UniqueSubWindowName(mdi_area, name)
+       sub_window.setMinimumSize(200, 100)
+       sub_window.resize(800, 600)
+       sub_window.setWindowTitle(unique_name)
+       sub_window.setAttribute(Qt.WA_DeleteOnClose)
+       sub_window.setWindowIcon(sub_window.style().standardIcon(QStyle.SP_FileIcon))
+       sub_window.name = unique_name
+       mdi_area.addSubWindow(sub_window)
+       sub_window.show()
+
+# Main window
+
+class MainWindow(QMainWindow):
+
+       def __init__(self, glb, parent=None):
+               super(MainWindow, self).__init__(parent)
+
+               self.glb = glb
+
+               self.setWindowTitle("Exported SQL Viewer: " + glb.dbname)
+               self.setWindowIcon(self.style().standardIcon(QStyle.SP_ComputerIcon))
+               self.setMinimumSize(200, 100)
+
+               self.mdi_area = QMdiArea()
+               self.mdi_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
+               self.mdi_area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
+
+               self.setCentralWidget(self.mdi_area)
+
+               menu = self.menuBar()
+
+               file_menu = menu.addMenu("&File")
+               file_menu.addAction(CreateExitAction(glb.app, self))
+
+               edit_menu = menu.addMenu("&Edit")
+               edit_menu.addAction(CreateAction("&Find...", "Find items", self.Find, self, QKeySequence.Find))
+               edit_menu.addAction(CreateAction("Fetch &more records...", "Fetch more records", self.FetchMoreRecords, self, [QKeySequence(Qt.Key_F8)]))
+               edit_menu.addAction(CreateAction("&Shrink Font", "Make text smaller", self.ShrinkFont, self, [QKeySequence("Ctrl+-")]))
+               edit_menu.addAction(CreateAction("&Enlarge Font", "Make text bigger", self.EnlargeFont, self, [QKeySequence("Ctrl++")]))
+
+               reports_menu = menu.addMenu("&Reports")
+               reports_menu.addAction(CreateAction("Context-Sensitive Call &Graph", "Create a new window containing a context-sensitive call graph", self.NewCallGraph, self))
+
+               self.EventMenu(GetEventList(glb.db), reports_menu)
+
+               self.TableMenu(GetTableList(glb), menu)
+
+               self.window_menu = WindowMenu(self.mdi_area, menu)
+
+               help_menu = menu.addMenu("&Help")
+               help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents))
+
+       def Find(self):
+               win = self.mdi_area.activeSubWindow()
+               if win:
+                       try:
+                               win.find_bar.Activate()
+                       except:
+                               pass
+
+       def FetchMoreRecords(self):
+               win = self.mdi_area.activeSubWindow()
+               if win:
+                       try:
+                               win.fetch_bar.Activate()
+                       except:
+                               pass
+
+       def ShrinkFont(self):
+               win = self.mdi_area.activeSubWindow()
+               ShrinkFont(win.view)
+
+       def EnlargeFont(self):
+               win = self.mdi_area.activeSubWindow()
+               EnlargeFont(win.view)
+
+       def EventMenu(self, events, reports_menu):
+               branches_events = 0
+               for event in events:
+                       event = event.split(":")[0]
+                       if event == "branches":
+                               branches_events += 1
+               dbid = 0
+               for event in events:
+                       dbid += 1
+                       event = event.split(":")[0]
+                       if event == "branches":
+                               label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")"
+                               reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewBranchView(x), self))
+                               label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")"
+                               reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewSelectedBranchView(x), self))
+
+       def TableMenu(self, tables, menu):
+               table_menu = menu.addMenu("&Tables")
+               for table in tables:
+                       table_menu.addAction(CreateAction(table, "Create a new window containing a table view", lambda t=table: self.NewTableView(t), self))
+
+       def NewCallGraph(self):
+               CallGraphWindow(self.glb, self)
+
+       def NewBranchView(self, event_id):
+               BranchWindow(self.glb, event_id, "", "", self)
+
+       def NewSelectedBranchView(self, event_id):
+               dialog = SelectedBranchDialog(self.glb, self)
+               ret = dialog.exec_()
+               if ret:
+                       BranchWindow(self.glb, event_id, dialog.name, dialog.where_clause, self)
+
+       def NewTableView(self, table_name):
+               TableWindow(self.glb, table_name, self)
+
+       def Help(self):
+               HelpWindow(self.glb, self)
+
+# XED Disassembler
+
+class xed_state_t(Structure):
+
+       _fields_ = [
+               ("mode", c_int),
+               ("width", c_int)
+       ]
+
+class XEDInstruction():
+
+       def __init__(self, libxed):
+               # Current xed_decoded_inst_t structure is 192 bytes. Use 512 to allow for future expansion
+               xedd_t = c_byte * 512
+               self.xedd = xedd_t()
+               self.xedp = addressof(self.xedd)
+               libxed.xed_decoded_inst_zero(self.xedp)
+               self.state = xed_state_t()
+               self.statep = addressof(self.state)
+               # Buffer for disassembled instruction text
+               self.buffer = create_string_buffer(256)
+               self.bufferp = addressof(self.buffer)
+
+class LibXED():
+
+       def __init__(self):
+               try:
+                       self.libxed = CDLL("libxed.so")
+               except:
+                       self.libxed = None
+               if not self.libxed:
+                       self.libxed = CDLL("/usr/local/lib/libxed.so")
+
+               self.xed_tables_init = self.libxed.xed_tables_init
+               self.xed_tables_init.restype = None
+               self.xed_tables_init.argtypes = []
+
+               self.xed_decoded_inst_zero = self.libxed.xed_decoded_inst_zero
+               self.xed_decoded_inst_zero.restype = None
+               self.xed_decoded_inst_zero.argtypes = [ c_void_p ]
+
+               self.xed_operand_values_set_mode = self.libxed.xed_operand_values_set_mode
+               self.xed_operand_values_set_mode.restype = None
+               self.xed_operand_values_set_mode.argtypes = [ c_void_p, c_void_p ]
+
+               self.xed_decoded_inst_zero_keep_mode = self.libxed.xed_decoded_inst_zero_keep_mode
+               self.xed_decoded_inst_zero_keep_mode.restype = None
+               self.xed_decoded_inst_zero_keep_mode.argtypes = [ c_void_p ]
+
+               self.xed_decode = self.libxed.xed_decode
+               self.xed_decode.restype = c_int
+               self.xed_decode.argtypes = [ c_void_p, c_void_p, c_uint ]
+
+               self.xed_format_context = self.libxed.xed_format_context
+               self.xed_format_context.restype = c_uint
+               self.xed_format_context.argtypes = [ c_int, c_void_p, c_void_p, c_int, c_ulonglong, c_void_p, c_void_p ]
+
+               self.xed_tables_init()
+
+       def Instruction(self):
+               return XEDInstruction(self)
+
+       def SetMode(self, inst, mode):
+               if mode:
+                       inst.state.mode = 4 # 32-bit
+                       inst.state.width = 4 # 4 bytes
+               else:
+                       inst.state.mode = 1 # 64-bit
+                       inst.state.width = 8 # 8 bytes
+               self.xed_operand_values_set_mode(inst.xedp, inst.statep)
+
+       def DisassembleOne(self, inst, bytes_ptr, bytes_cnt, ip):
+               self.xed_decoded_inst_zero_keep_mode(inst.xedp)
+               err = self.xed_decode(inst.xedp, bytes_ptr, bytes_cnt)
+               if err:
+                       return 0, ""
+               # Use AT&T mode (2), alternative is Intel (3)
+               ok = self.xed_format_context(2, inst.xedp, inst.bufferp, sizeof(inst.buffer), ip, 0, 0)
+               if not ok:
+                       return 0, ""
+               # Return instruction length and the disassembled instruction text
+               # For now, assume the length is in byte 166
+               return inst.xedd[166], inst.buffer.value
+
+def TryOpen(file_name):
+       try:
+               return open(file_name, "rb")
+       except:
+               return None
+
+def Is64Bit(f):
+       result = sizeof(c_void_p)
+       # ELF support only
+       pos = f.tell()
+       f.seek(0)
+       header = f.read(7)
+       f.seek(pos)
+       magic = header[0:4]
+       eclass = ord(header[4])
+       encoding = ord(header[5])
+       version = ord(header[6])
+       if magic == chr(127) + "ELF" and eclass > 0 and eclass < 3 and encoding > 0 and encoding < 3 and version == 1:
+               result = True if eclass == 2 else False
+       return result
+
+# Global data
+
+class Glb():
+
+       def __init__(self, dbref, db, dbname):
+               self.dbref = dbref
+               self.db = db
+               self.dbname = dbname
+               self.home_dir = os.path.expanduser("~")
+               self.buildid_dir = os.getenv("PERF_BUILDID_DIR")
+               if self.buildid_dir:
+                       self.buildid_dir += "/.build-id/"
+               else:
+                       self.buildid_dir = self.home_dir + "/.debug/.build-id/"
+               self.app = None
+               self.mainwindow = None
+               self.instances_to_shutdown_on_exit = weakref.WeakSet()
+               try:
+                       self.disassembler = LibXED()
+                       self.have_disassembler = True
+               except:
+                       self.have_disassembler = False
+
+       def FileFromBuildId(self, build_id):
+               file_name = self.buildid_dir + build_id[0:2] + "/" + build_id[2:] + "/elf"
+               return TryOpen(file_name)
+
+       def FileFromNamesAndBuildId(self, short_name, long_name, build_id):
+               # Assume current machine i.e. no support for virtualization
+               if short_name[0:7] == "[kernel" and os.path.basename(long_name) == "kcore":
+                       file_name = os.getenv("PERF_KCORE")
+                       f = TryOpen(file_name) if file_name else None
+                       if f:
+                               return f
+                       # For now, no special handling if long_name is /proc/kcore
+                       f = TryOpen(long_name)
+                       if f:
+                               return f
+               f = self.FileFromBuildId(build_id)
+               if f:
+                       return f
+               return None
+
+       def AddInstanceToShutdownOnExit(self, instance):
+               self.instances_to_shutdown_on_exit.add(instance)
+
+       # Shutdown any background processes or threads
+       def ShutdownInstances(self):
+               for x in self.instances_to_shutdown_on_exit:
+                       try:
+                               x.Shutdown()
+                       except:
+                               pass
+
+# Database reference
+
+class DBRef():
+
+       def __init__(self, is_sqlite3, dbname):
+               self.is_sqlite3 = is_sqlite3
+               self.dbname = dbname
+
+       def Open(self, connection_name):
+               dbname = self.dbname
+               if self.is_sqlite3:
+                       db = QSqlDatabase.addDatabase("QSQLITE", connection_name)
+               else:
+                       db = QSqlDatabase.addDatabase("QPSQL", connection_name)
+                       opts = dbname.split()
+                       for opt in opts:
+                               if "=" in opt:
+                                       opt = opt.split("=")
+                                       if opt[0] == "hostname":
+                                               db.setHostName(opt[1])
+                                       elif opt[0] == "port":
+                                               db.setPort(int(opt[1]))
+                                       elif opt[0] == "username":
+                                               db.setUserName(opt[1])
+                                       elif opt[0] == "password":
+                                               db.setPassword(opt[1])
+                                       elif opt[0] == "dbname":
+                                               dbname = opt[1]
+                               else:
+                                       dbname = opt
+
+               db.setDatabaseName(dbname)
+               if not db.open():
+                       raise Exception("Failed to open database " + dbname + " error: " + db.lastError().text())
+               return db, dbname
+
+# Main
+
+def Main():
+       if (len(sys.argv) < 2):
+               print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}"
+               raise Exception("Too few arguments")
+
+       dbname = sys.argv[1]
+       if dbname == "--help-only":
+               app = QApplication(sys.argv)
+               mainwindow = HelpOnlyWindow()
+               mainwindow.show()
+               err = app.exec_()
+               sys.exit(err)
+
+       is_sqlite3 = False
+       try:
+               f = open(dbname)
+               if f.read(15) == "SQLite format 3":
+                       is_sqlite3 = True
+               f.close()
+       except:
+               pass
+
+       dbref = DBRef(is_sqlite3, dbname)
+       db, dbname = dbref.Open("main")
+       glb = Glb(dbref, db, dbname)
+       app = QApplication(sys.argv)
+       glb.app = app
+       mainwindow = MainWindow(glb)
+       glb.mainwindow = mainwindow
+       mainwindow.show()
+       err = app.exec_()
+       glb.ShutdownInstances()
+       db.close()
+       sys.exit(err)
+
+if __name__ == "__main__":
+       Main()
index 37940665f736c7850a61fcaa107a4bdd228a7668..efd0157b9d223f14cd305c09159c5955744b3762 100644 (file)
@@ -9,7 +9,7 @@ size=112
 config=0
 sample_period=*
 sample_type=263
-read_format=0
+read_format=0|4
 disabled=1
 inherit=1
 pinned=0
index 8a33ca4f9e1f7feed87159d755ba3b4797a987b6..f0729c454f160bed941b16133b9ac437c973404d 100644 (file)
@@ -37,4 +37,3 @@ sample_freq=0
 sample_period=0
 freq=0
 write_backward=0
-sample_id_all=0
index c3b0afd67760aeff5dfe12c8a8cb36ce5b0505f3..3043130732427d145e23efd8d0ebe3a2dfaefdeb 100644 (file)
@@ -5,6 +5,7 @@ ifeq ($(SRCARCH),$(filter $(SRCARCH),x86))
 libperf-y += ioctl.o
 endif
 libperf-y += kcmp.o
+libperf-y += mount_flags.o
 libperf-y += pkey_alloc.o
 libperf-y += prctl.o
 libperf-y += sockaddr.o
index 2570152d3909781ef1a1db395527c77d7935f7a9..039c29039b2c4d752591e99d037397720497da5e 100644 (file)
@@ -24,6 +24,7 @@ struct strarray {
 }
 
 size_t strarray__scnprintf(struct strarray *sa, char *bf, size_t size, const char *intfmt, int val);
+size_t strarray__scnprintf_flags(struct strarray *sa, char *bf, size_t size, unsigned long flags);
 
 struct trace;
 struct thread;
@@ -122,6 +123,12 @@ size_t syscall_arg__scnprintf_kcmp_type(char *bf, size_t size, struct syscall_ar
 size_t syscall_arg__scnprintf_kcmp_idx(char *bf, size_t size, struct syscall_arg *arg);
 #define SCA_KCMP_IDX syscall_arg__scnprintf_kcmp_idx
 
+unsigned long syscall_arg__mask_val_mount_flags(struct syscall_arg *arg, unsigned long flags);
+#define SCAMV_MOUNT_FLAGS syscall_arg__mask_val_mount_flags
+
+size_t syscall_arg__scnprintf_mount_flags(char *bf, size_t size, struct syscall_arg *arg);
+#define SCA_MOUNT_FLAGS syscall_arg__scnprintf_mount_flags
+
 size_t syscall_arg__scnprintf_pkey_alloc_access_rights(char *bf, size_t size, struct syscall_arg *arg);
 #define SCA_PKEY_ALLOC_ACCESS_RIGHTS syscall_arg__scnprintf_pkey_alloc_access_rights
 
index d64d049ab9915162e220f91c3f59997d8abddc15..010406500c30476b229306c0565fb074a4a337b6 100644 (file)
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * trace/beauty/cone.c
  *
  *  Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
- *
- * Released under the GPL v2. (and only v2, not any later version)
  */
 
 #include "trace/beauty/beauty.h"
index 9d3816815e60f48ff8ef8f6b77faf779a6d9bd1b..9aa94fd523a9c2ddddf14f6dbd3790b559d78ee6 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
 
 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/drm/
 
index 5d6a477a64002ef5cef6ef6e169cd439e38b66d9..db5b9b4921137cfadf1e8f2a0caa3d9358641559 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 #ifndef EFD_SEMAPHORE
 #define EFD_SEMAPHORE          1
 #endif
index 9e8900c13cb1380a38e016430e572191a534b831..e6de31674e246ea8c22574eddfe6cb82e794508d 100644 (file)
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * trace/beauty/fcntl.c
  *
  *  Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
- *
- * Released under the GPL v2. (and only v2, not any later version)
  */
 
 #include "trace/beauty/beauty.h"
index c4ff6ad30b0627ae1561d67404e2d208ebf6506a..cf02ae5f0ba66eca3f2adc4f8c36ef28e4bde6c2 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 
 #include "trace/beauty/beauty.h"
 #include <linux/kernel.h>
index 61850fbc85ff33d75c074e655eaa490dbe48e537..1136bde56406e6018c0c64cb801bf27d8376cdc3 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 #include <linux/futex.h>
 
 #ifndef FUTEX_WAIT_BITSET
index 26f6b3253511e6240efb62bf958bad8c65276508..138b7d588a7083ff9161d4f52cdbdfecaec2a626 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 #include <linux/futex.h>
 
 #ifndef FUTEX_BITSET_MATCH_ANY
index 1be3b4cf082708194ca7c4c8403219eedb924c3d..eae59ad15ce3f02902977b5c840334894ff434c6 100644 (file)
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * trace/beauty/ioctl.c
  *
  *  Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
- *
- * Released under the GPL v2. (and only v2, not any later version)
  */
 
 #include "trace/beauty/beauty.h"
@@ -32,6 +31,7 @@ static size_t ioctl__scnprintf_tty_cmd(int nr, int dir, char *bf, size_t size)
        "TCSETSW2", "TCSETSF2", "TIOCGRS48", "TIOCSRS485", "TIOCGPTN", "TIOCSPTLCK",
        "TIOCGDEV", "TCSETX", "TCSETXF", "TCSETXW", "TIOCSIG", "TIOCVHANGUP", "TIOCGPKT",
        "TIOCGPTLCK", [_IOC_NR(TIOCGEXCL)] = "TIOCGEXCL", "TIOCGPTPEER",
+       "TIOCGISO7816", "TIOCSISO7816",
        [_IOC_NR(FIONCLEX)] = "FIONCLEX", "FIOCLEX", "FIOASYNC", "TIOCSERCONFIG",
        "TIOCSERGWILD", "TIOCSERSWILD", "TIOCGLCKTRMIOS", "TIOCSLCKTRMIOS",
        "TIOCSERGSTRUCT", "TIOCSERGETLSR", "TIOCSERGETMULTI", "TIOCSERSETMULTI",
index f62040eb9d5c5cebb8685509935c52fec7f6562c..b276a274f2030b0dd66637a498fa2086f8c8926c 100644 (file)
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * trace/beauty/kcmp.c
  *
  *  Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
- *
- * Released under the GPL v2. (and only v2, not any later version)
  */
 
 #include "trace/beauty/beauty.h"
index a3c304caa336572baaa59d48b684b7413918429a..df8b17486d575c8564f33b3522a675ce1b4372d5 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
 
 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
 
index c4699fd46bb64a3230232e452b366551a17c3609..4ce54f5bf7564522036f28475ec4de4a3e94fd26 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
 
 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
 
index 431639eb4d29a9c35011f6d11d20d46ec92d9b09..4527d290cdfc6499510c18fdb9bf0e066decdbc1 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
 
 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/asm-generic/
 
index 9f68077b241b9c59475cb5d8403e0002f06769a3..c534bd96ef5c9dc8a8d762de61cb73a3537ddf4c 100644 (file)
@@ -1,5 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 #include <uapi/linux/mman.h>
+#include <linux/log2.h>
 
 static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
                                               struct syscall_arg *arg)
@@ -30,50 +31,23 @@ static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
 
 #define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
 
+static size_t mmap__scnprintf_flags(unsigned long flags, char *bf, size_t size)
+{
+#include "trace/beauty/generated/mmap_flags_array.c"
+       static DEFINE_STRARRAY(mmap_flags);
+
+       return strarray__scnprintf_flags(&strarray__mmap_flags, bf, size, flags);
+}
+
 static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
                                                struct syscall_arg *arg)
 {
-       int printed = 0, flags = arg->val;
+       unsigned long flags = arg->val;
 
        if (flags & MAP_ANONYMOUS)
                arg->mask |= (1 << 4) | (1 << 5); /* Mask 4th ('fd') and 5th ('offset') args, ignored */
 
-#define        P_MMAP_FLAG(n) \
-       if (flags & MAP_##n) { \
-               printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
-               flags &= ~MAP_##n; \
-       }
-
-       P_MMAP_FLAG(SHARED);
-       P_MMAP_FLAG(PRIVATE);
-#ifdef MAP_32BIT
-       P_MMAP_FLAG(32BIT);
-#endif
-       P_MMAP_FLAG(ANONYMOUS);
-       P_MMAP_FLAG(DENYWRITE);
-       P_MMAP_FLAG(EXECUTABLE);
-       P_MMAP_FLAG(FILE);
-       P_MMAP_FLAG(FIXED);
-#ifdef MAP_FIXED_NOREPLACE
-       P_MMAP_FLAG(FIXED_NOREPLACE);
-#endif
-       P_MMAP_FLAG(GROWSDOWN);
-       P_MMAP_FLAG(HUGETLB);
-       P_MMAP_FLAG(LOCKED);
-       P_MMAP_FLAG(NONBLOCK);
-       P_MMAP_FLAG(NORESERVE);
-       P_MMAP_FLAG(POPULATE);
-       P_MMAP_FLAG(STACK);
-       P_MMAP_FLAG(UNINITIALIZED);
-#ifdef MAP_SYNC
-       P_MMAP_FLAG(SYNC);
-#endif
-#undef P_MMAP_FLAG
-
-       if (flags)
-               printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
-
-       return printed;
+       return mmap__scnprintf_flags(flags, bf, size);
 }
 
 #define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
diff --git a/tools/perf/trace/beauty/mmap_flags.sh b/tools/perf/trace/beauty/mmap_flags.sh
new file mode 100755 (executable)
index 0000000..22c3fdc
--- /dev/null
@@ -0,0 +1,32 @@
+#!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
+
+if [ $# -ne 2 ] ; then
+       [ $# -eq 1 ] && hostarch=$1 || hostarch=`uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/`
+       header_dir=tools/include/uapi/asm-generic
+       arch_header_dir=tools/arch/${hostarch}/include/uapi/asm
+else
+       header_dir=$1
+       arch_header_dir=$2
+fi
+
+arch_mman=${arch_header_dir}/mman.h
+
+# those in egrep -vw are flags, we want just the bits
+
+printf "static const char *mmap_flags[] = {\n"
+regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MAP_([[:alnum:]_]+)[[:space:]]+(0x[[:xdigit:]]+)[[:space:]]*.*'
+egrep -q $regex ${arch_mman} && \
+(egrep $regex ${arch_mman} | \
+       sed -r "s/$regex/\2 \1/g"       | \
+       xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
+egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.*' ${arch_mman} &&
+(egrep $regex ${header_dir}/mman-common.h | \
+       egrep -vw 'MAP_(UNINITIALIZED|TYPE|SHARED_VALIDATE)' | \
+       sed -r "s/$regex/\2 \1/g"       | \
+       xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
+egrep -q '#[[:space:]]*include[[:space:]]+<uapi/asm-generic/mman.h>.*' ${arch_mman} &&
+(egrep $regex ${header_dir}/mman.h | \
+       sed -r "s/$regex/\2 \1/g"       | \
+       xargs printf "\t[ilog2(%s) + 1] = \"%s\",\n")
+printf "};\n"
index d929ad7dd97be97b922ecf9d92c35c4b072a5fbe..6879d36d30048e6d08df13027071c7711ca6aa82 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
diff --git a/tools/perf/trace/beauty/mount_flags.c b/tools/perf/trace/beauty/mount_flags.c
new file mode 100644 (file)
index 0000000..712935c
--- /dev/null
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: LGPL-2.1
+/*
+ * trace/beauty/mount_flags.c
+ *
+ *  Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+ */
+
+#include "trace/beauty/beauty.h"
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <sys/mount.h>
+
+static size_t mount__scnprintf_flags(unsigned long flags, char *bf, size_t size)
+{
+#include "trace/beauty/generated/mount_flags_array.c"
+       static DEFINE_STRARRAY(mount_flags);
+
+       return strarray__scnprintf_flags(&strarray__mount_flags, bf, size, flags);
+}
+
+unsigned long syscall_arg__mask_val_mount_flags(struct syscall_arg *arg __maybe_unused, unsigned long flags)
+{
+       // do_mount in fs/namespace.c:
+       /*
+        * Pre-0.97 versions of mount() didn't have a flags word.  When the
+        * flags word was introduced its top half was required to have the
+        * magic value 0xC0ED, and this remained so until 2.4.0-test9.
+        * Therefore, if this magic number is present, it carries no
+        * information and must be discarded.
+        */
+       if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
+               flags &= ~MS_MGC_MSK;
+
+       return flags;
+}
+
+size_t syscall_arg__scnprintf_mount_flags(char *bf, size_t size, struct syscall_arg *arg)
+{
+       unsigned long flags = arg->val;
+
+       return mount__scnprintf_flags(flags, bf, size);
+}
diff --git a/tools/perf/trace/beauty/mount_flags.sh b/tools/perf/trace/beauty/mount_flags.sh
new file mode 100755 (executable)
index 0000000..4554757
--- /dev/null
@@ -0,0 +1,15 @@
+#!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
+
+[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
+
+printf "static const char *mount_flags[] = {\n"
+regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+([[:digit:]]+)[[:space:]]*.*'
+egrep $regex ${header_dir}/fs.h | egrep -v '(MSK|VERBOSE|MGC_VAL)\>' | \
+       sed -r "s/$regex/\2 \2 \1/g" | sort -n | \
+       xargs printf "\t[%s ? (ilog2(%s) + 1) : 0] = \"%s\",\n"
+regex='^[[:space:]]*#[[:space:]]*define[[:space:]]+MS_([[:alnum:]_]+)[[:space:]]+\(1<<([[:digit:]]+)\)[[:space:]]*.*'
+egrep $regex ${header_dir}/fs.h | \
+       sed -r "s/$regex/\2 \1/g" | \
+       xargs printf "\t[%s + 1] = \"%s\",\n"
+printf "};\n"
index c064d6aae659707712f2b0c177c41e0bec66e70d..1b9d6306d2749b189ecd7315aa4de34331d9bb7e 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 #include <sys/types.h>
 #include <sys/socket.h>
 
index 6aec6178a99dcd8ecd3d01eee8f4969d5d8387bf..cc673fec9184d659c6af961f0753b06a0d0dea1b 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <fcntl.h>
index 2bafd7c995ffffe394880904b78c77ff73a1b6dc..981185c1974ba58dd2df5eb65b1efeef2c6ae211 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 #ifndef PERF_FLAG_FD_NO_GROUP
 # define PERF_FLAG_FD_NO_GROUP         (1UL << 0)
 #endif
index 6492c74df928df48bca7cf49214b272eefbe13ac..9aabd9743ef6e0b34c4492c4936fda36faad08a3 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
 
 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
 
index 0313df34283040de8648cdbba73ae5d174354b36..1a6acc46807bca7e73f395c37e62a50ed8290773 100644 (file)
@@ -1,4 +1,5 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
+
 size_t syscall_arg__scnprintf_pid(char *bf, size_t size, struct syscall_arg *arg)
 {
        int pid = arg->val;
index 2ba784a3734adb2fd7088016970c627a508511f8..1b8ed4cac8153ddfc3ff616118bbfd545e48eefb 100644 (file)
@@ -1,40 +1,36 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * trace/beauty/pkey_alloc.c
  *
  *  Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
- *
- * Released under the GPL v2. (and only v2, not any later version)
  */
 
 #include "trace/beauty/beauty.h"
 #include <linux/kernel.h>
 #include <linux/log2.h>
 
-static size_t pkey_alloc__scnprintf_access_rights(int access_rights, char *bf, size_t size)
+size_t strarray__scnprintf_flags(struct strarray *sa, char *bf, size_t size, unsigned long flags)
 {
        int i, printed = 0;
 
-#include "trace/beauty/generated/pkey_alloc_access_rights_array.c"
-       static DEFINE_STRARRAY(pkey_alloc_access_rights);
-
-       if (access_rights == 0) {
-               const char *s = strarray__pkey_alloc_access_rights.entries[0];
+       if (flags == 0) {
+               const char *s = sa->entries[0];
                if (s)
                        return scnprintf(bf, size, "%s", s);
                return scnprintf(bf, size, "%d", 0);
        }
 
-       for (i = 1; i < strarray__pkey_alloc_access_rights.nr_entries; ++i) {
-               int bit = 1 << (i - 1);
+       for (i = 1; i < sa->nr_entries; ++i) {
+               unsigned long bit = 1UL << (i - 1);
 
-               if (!(access_rights & bit))
+               if (!(flags & bit))
                        continue;
 
                if (printed != 0)
                        printed += scnprintf(bf + printed, size - printed, "|");
 
-               if (strarray__pkey_alloc_access_rights.entries[i] != NULL)
-                       printed += scnprintf(bf + printed, size - printed, "%s", strarray__pkey_alloc_access_rights.entries[i]);
+               if (sa->entries[i] != NULL)
+                       printed += scnprintf(bf + printed, size - printed, "%s", sa->entries[i]);
                else
                        printed += scnprintf(bf + printed, size - printed, "0x%#", bit);
        }
@@ -42,6 +38,14 @@ static size_t pkey_alloc__scnprintf_access_rights(int access_rights, char *bf, s
        return printed;
 }
 
+static size_t pkey_alloc__scnprintf_access_rights(int access_rights, char *bf, size_t size)
+{
+#include "trace/beauty/generated/pkey_alloc_access_rights_array.c"
+       static DEFINE_STRARRAY(pkey_alloc_access_rights);
+
+       return strarray__scnprintf_flags(&strarray__pkey_alloc_access_rights, bf, size, access_rights);
+}
+
 size_t syscall_arg__scnprintf_pkey_alloc_access_rights(char *bf, size_t size, struct syscall_arg *arg)
 {
        unsigned long cmd = arg->val;
index e0a51aeb20b21a8cde5eee01336b2b11af4fef7f..f8f1b560cf8a4807d0df7f643f0ba39a7ee5cb4d 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
 
 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/asm-generic/
 
index 246130dad6c413b72ca0c961d635657facfc6c25..be7a5d3959757ec1df7c14e9221c278cb0115e18 100644 (file)
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * trace/beauty/prctl.c
  *
  *  Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
- *
- * Released under the GPL v2. (and only v2, not any later version)
  */
 
 #include "trace/beauty/beauty.h"
index f24722146ebef73061ad4339a639d65ac0929558..d32f8f1124af0aafdb8af94f18e84c1722759295 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
 
 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
 
index ba5096ae76b60906166df1b2b6ac72f1b85c4a8c..48f2b5c9aa3ed8a3900da69732585883ee701943 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 #include <sched.h>
 
 /*
index b7097fd5fed9ee72401fe651f5565b6bdb0f0d88..e36156b19c708d51de58f11396eefdc2aa8f60fe 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 #ifndef SECCOMP_SET_MODE_STRICT
 #define SECCOMP_SET_MODE_STRICT 0
 #endif
index bde18a53f090945f9edd7ed517762cc4f5a8de7d..587fec545b8a6e9a43c53dd83a1c6995ce15572a 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 #include <signal.h>
 
 static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscall_arg *arg)
index eb511bb5fbd3211697aa1821c66dde2e78f5267e..e0803b9575932420f2db3f47883af51e94e3122b 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
 
 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/sound/
 
index 6818392968b24f130f82896a41f65b15471458f0..7a464a7bf91399bf6683022f330abdacbe71bfc9 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
 
 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/sound/
 
index 71a79f72d9d929fdf48541b63477aafdc813d2b8..9410ad230f10144e77633f7c03c7b5c212f543fb 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 // Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
 
 #include "trace/beauty/beauty.h"
index 65227269384b14aa7a10f7f91008d0e3acdfe99b..d971a2596417473fb5398ab0e0114220a6faf2ce 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * trace/beauty/socket.c
  *
index a3cc24633bec53d272b90bc08d7d31b61b67f55f..de0f2f29017f2edbf09b236d9b9fdd6830bbae1b 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
 
 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
 
index bca26aef4a77a8a72d75841673c35ee568e24e54..a63a9a332aa0f89a926ce166b98950f45f3bac4f 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 #include <sys/types.h>
 #include <sys/socket.h>
 
index 5643b692af4cf60c6a791a148e1c35b30567d002..630f2760dd6667409c31d3b31cd9c0b65ad7e705 100644 (file)
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: LGPL-2.1
 /*
  * trace/beauty/statx.c
  *
  *  Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
- *
- * Released under the GPL v2. (and only v2, not any later version)
  */
 
 #include "trace/beauty/beauty.h"
index 0f6a5197d0bede8456bcf00d702ff4a51bf39b9d..439773daaf77db263edf4b7299858cc4e074b6ee 100755 (executable)
@@ -1,4 +1,5 @@
 #!/bin/sh
+# SPDX-License-Identifier: LGPL-2.1
 
 [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
 
index 8465281a093de02aa3e7e026e35eeb688634929a..42ff58ad613b8ac8557d7ddb36078a42f84a817f 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: LGPL-2.1
 #include <sys/types.h>
 #include <sys/wait.h>
 
index ecd9f9ceda77c83ed3593163df02d7221ced9e3f..b7bf201fe8a87b1b5a84a2dc9a4fdf18cd526469 100644 (file)
@@ -10,6 +10,7 @@ libperf-y += evlist.o
 libperf-y += evsel.o
 libperf-y += evsel_fprintf.o
 libperf-y += find_bit.o
+libperf-y += get_current_dir_name.o
 libperf-y += kallsyms.o
 libperf-y += levenshtein.o
 libperf-y += llvm-utils.o
index 28cd6a17491b2077815ce0d9bb86f741f7a2be6e..6936daf89dddcd61823fa95582eddd7915b14752 100644 (file)
@@ -139,6 +139,7 @@ static int arch__associate_ins_ops(struct arch* arch, const char *name, struct i
 #include "arch/x86/annotate/instructions.c"
 #include "arch/powerpc/annotate/instructions.c"
 #include "arch/s390/annotate/instructions.c"
+#include "arch/sparc/annotate/instructions.c"
 
 static struct arch architectures[] = {
        {
@@ -170,6 +171,13 @@ static struct arch architectures[] = {
                        .comment_char = '#',
                },
        },
+       {
+               .name = "sparc",
+               .init = sparc__annotate_init,
+               .objdump = {
+                       .comment_char = '#',
+               },
+       },
 };
 
 static void ins__delete(struct ins_operands *ops)
index c4617bcfd521f0ecdcf1dd3b5216014419dcdc2a..72d5ba2479bf19ba1ec9e92d576fc3b8db953ca9 100644 (file)
@@ -962,16 +962,23 @@ s64 perf_event__process_auxtrace(struct perf_session *session,
 #define PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ     64
 #define PERF_ITRACE_MAX_LAST_BRANCH_SZ         1024
 
-void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
+void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
+                                   bool no_sample)
 {
-       synth_opts->instructions = true;
        synth_opts->branches = true;
        synth_opts->transactions = true;
        synth_opts->ptwrites = true;
        synth_opts->pwr_events = true;
        synth_opts->errors = true;
-       synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
-       synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
+       if (no_sample) {
+               synth_opts->period_type = PERF_ITRACE_PERIOD_INSTRUCTIONS;
+               synth_opts->period = 1;
+               synth_opts->calls = true;
+       } else {
+               synth_opts->instructions = true;
+               synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
+               synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
+       }
        synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
        synth_opts->last_branch_sz = PERF_ITRACE_DEFAULT_LAST_BRANCH_SZ;
        synth_opts->initial_skip = 0;
@@ -999,7 +1006,7 @@ int itrace_parse_synth_opts(const struct option *opt, const char *str,
        }
 
        if (!str) {
-               itrace_synth_opts__set_default(synth_opts);
+               itrace_synth_opts__set_default(synth_opts, false);
                return 0;
        }
 
index d88f6e9eb4611ab7344eb480a11c14bd2c79afe7..8e50f96d4b23de86e4939d7276e024f7c353aa81 100644 (file)
@@ -58,6 +58,7 @@ enum itrace_period_type {
 /**
  * struct itrace_synth_opts - AUX area tracing synthesis options.
  * @set: indicates whether or not options have been set
+ * @default_no_sample: Default to no sampling.
  * @inject: indicates the event (not just the sample) must be fully synthesized
  *          because 'perf inject' will write it out
  * @instructions: whether to synthesize 'instructions' events
@@ -82,6 +83,7 @@ enum itrace_period_type {
  */
 struct itrace_synth_opts {
        bool                    set;
+       bool                    default_no_sample;
        bool                    inject;
        bool                    instructions;
        bool                    branches;
@@ -528,7 +530,8 @@ int perf_event__process_auxtrace_error(struct perf_session *session,
                                       union perf_event *event);
 int itrace_parse_synth_opts(const struct option *opt, const char *str,
                            int unset);
-void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts);
+void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts,
+                                   bool no_sample);
 
 size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
 void perf_session__auxtrace_error_inc(struct perf_session *session,
index 2ae640257fdbbe897d8c398d1dfeeb406d1c9fb1..73430b73570d51f9f2f96131208ac5f2022dee37 100644 (file)
@@ -244,6 +244,27 @@ static void cs_etm__free(struct perf_session *session)
        zfree(&aux);
 }
 
+static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
+{
+       struct machine *machine;
+
+       machine = etmq->etm->machine;
+
+       if (address >= etmq->etm->kernel_start) {
+               if (machine__is_host(machine))
+                       return PERF_RECORD_MISC_KERNEL;
+               else
+                       return PERF_RECORD_MISC_GUEST_KERNEL;
+       } else {
+               if (machine__is_host(machine))
+                       return PERF_RECORD_MISC_USER;
+               else if (perf_guest)
+                       return PERF_RECORD_MISC_GUEST_USER;
+               else
+                       return PERF_RECORD_MISC_HYPERVISOR;
+       }
+}
+
 static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
                              size_t size, u8 *buffer)
 {
@@ -258,10 +279,7 @@ static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
                return -1;
 
        machine = etmq->etm->machine;
-       if (address >= etmq->etm->kernel_start)
-               cpumode = PERF_RECORD_MISC_KERNEL;
-       else
-               cpumode = PERF_RECORD_MISC_USER;
+       cpumode = cs_etm__cpu_mode(etmq, address);
 
        thread = etmq->thread;
        if (!thread) {
@@ -653,7 +671,7 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
        struct perf_sample sample = {.ip = 0,};
 
        event->sample.header.type = PERF_RECORD_SAMPLE;
-       event->sample.header.misc = PERF_RECORD_MISC_USER;
+       event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
        event->sample.header.size = sizeof(struct perf_event_header);
 
        sample.ip = addr;
@@ -665,7 +683,7 @@ static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
        sample.cpu = etmq->packet->cpu;
        sample.flags = 0;
        sample.insn_len = 1;
-       sample.cpumode = event->header.misc;
+       sample.cpumode = event->sample.header.misc;
 
        if (etm->synth_opts.last_branch) {
                cs_etm__copy_last_branch_rb(etmq);
@@ -706,12 +724,15 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
                u64                     nr;
                struct branch_entry     entries;
        } dummy_bs;
+       u64 ip;
+
+       ip = cs_etm__last_executed_instr(etmq->prev_packet);
 
        event->sample.header.type = PERF_RECORD_SAMPLE;
-       event->sample.header.misc = PERF_RECORD_MISC_USER;
+       event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
        event->sample.header.size = sizeof(struct perf_event_header);
 
-       sample.ip = cs_etm__last_executed_instr(etmq->prev_packet);
+       sample.ip = ip;
        sample.pid = etmq->pid;
        sample.tid = etmq->tid;
        sample.addr = cs_etm__first_executed_instr(etmq->packet);
@@ -720,7 +741,7 @@ static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
        sample.period = 1;
        sample.cpu = etmq->packet->cpu;
        sample.flags = 0;
-       sample.cpumode = PERF_RECORD_MISC_USER;
+       sample.cpumode = event->sample.header.misc;
 
        /*
         * perf report cannot handle events without a branch stack
@@ -1432,7 +1453,8 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
        if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
                etm->synth_opts = *session->itrace_synth_opts;
        } else {
-               itrace_synth_opts__set_default(&etm->synth_opts);
+               itrace_synth_opts__set_default(&etm->synth_opts,
+                               session->itrace_synth_opts->default_no_sample);
                etm->synth_opts.callchain = false;
        }
 
index 1f3ccc36853030bc8ea58d35ea19bba8e225f732..d01b8355f4caba9440d0e0d4db4c6f241e801dc7 100644 (file)
@@ -63,6 +63,7 @@ struct perf_env {
        struct numa_node        *numa_nodes;
        struct memory_node      *memory_nodes;
        unsigned long long       memory_bsize;
+       u64                     clockid_res_ns;
 };
 
 extern struct perf_env perf_env;
index bc646185f8d91fe3d339264d6b0ea9925b66554f..e9c108a6b1c34fd2cc60ed5690c3d2fba595647c 100644 (file)
@@ -308,6 +308,7 @@ static int perf_event__synthesize_fork(struct perf_tool *tool,
        event->fork.pid  = tgid;
        event->fork.tid  = pid;
        event->fork.header.type = PERF_RECORD_FORK;
+       event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
 
        event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
 
index be440df296150450b4e99e2ab2c5ab83175d29aa..668d2a9ef0f4b698231c7ad0388210175f0f8dab 100644 (file)
@@ -358,7 +358,7 @@ void perf_evlist__disable(struct perf_evlist *evlist)
        struct perf_evsel *pos;
 
        evlist__for_each_entry(evlist, pos) {
-               if (!perf_evsel__is_group_leader(pos) || !pos->fd)
+               if (pos->disabled || !perf_evsel__is_group_leader(pos) || !pos->fd)
                        continue;
                perf_evsel__disable(pos);
        }
@@ -1810,3 +1810,30 @@ void perf_evlist__force_leader(struct perf_evlist *evlist)
                leader->forced_leader = true;
        }
 }
+
+struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evsel_list,
+                                                struct perf_evsel *evsel)
+{
+       struct perf_evsel *c2, *leader;
+       bool is_open = true;
+
+       leader = evsel->leader;
+       pr_debug("Weak group for %s/%d failed\n",
+                       leader->name, leader->nr_members);
+
+       /*
+        * for_each_group_member doesn't work here because it doesn't
+        * include the first entry.
+        */
+       evlist__for_each_entry(evsel_list, c2) {
+               if (c2 == evsel)
+                       is_open = false;
+               if (c2->leader == leader) {
+                       if (is_open)
+                               perf_evsel__close(c2);
+                       c2->leader = c2;
+                       c2->nr_members = 0;
+               }
+       }
+       return leader;
+}
index dc66436add98a3c795efa3ddf0889f09f1d7abe3..9919eed6d15bc1994844187d9b55e69b746312cc 100644 (file)
@@ -312,4 +312,7 @@ bool perf_evlist__exclude_kernel(struct perf_evlist *evlist);
 
 void perf_evlist__force_leader(struct perf_evlist *evlist);
 
+struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evlist,
+                                                struct perf_evsel *evsel);
+
 #endif /* __PERF_EVLIST_H */
index 29d7b97f66fbc5ae8efe67d3880263711d0a4d1a..dbc0466db3680580da689662a7c9b4bae891cf46 100644 (file)
@@ -232,6 +232,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
        evsel->leader      = evsel;
        evsel->unit        = "";
        evsel->scale       = 1.0;
+       evsel->max_events  = ULONG_MAX;
        evsel->evlist      = NULL;
        evsel->bpf_fd      = -1;
        INIT_LIST_HEAD(&evsel->node);
@@ -793,6 +794,9 @@ static void apply_config_terms(struct perf_evsel *evsel,
                case PERF_EVSEL__CONFIG_TERM_MAX_STACK:
                        max_stack = term->val.max_stack;
                        break;
+               case PERF_EVSEL__CONFIG_TERM_MAX_EVENTS:
+                       evsel->max_events = term->val.max_events;
+                       break;
                case PERF_EVSEL__CONFIG_TERM_INHERIT:
                        /*
                         * attr->inherit should has already been set by
@@ -952,7 +956,6 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
                attr->sample_freq    = 0;
                attr->sample_period  = 0;
                attr->write_backward = 0;
-               attr->sample_id_all  = 0;
        }
 
        if (opts->no_samples)
@@ -1089,7 +1092,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
                attr->exclude_user   = 1;
        }
 
-       if (evsel->own_cpus)
+       if (evsel->own_cpus || evsel->unit)
                evsel->attr.read_format |= PERF_FORMAT_ID;
 
        /*
@@ -1203,16 +1206,27 @@ int perf_evsel__append_addr_filter(struct perf_evsel *evsel, const char *filter)
 
 int perf_evsel__enable(struct perf_evsel *evsel)
 {
-       return perf_evsel__run_ioctl(evsel,
-                                    PERF_EVENT_IOC_ENABLE,
-                                    0);
+       int err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, 0);
+
+       if (!err)
+               evsel->disabled = false;
+
+       return err;
 }
 
 int perf_evsel__disable(struct perf_evsel *evsel)
 {
-       return perf_evsel__run_ioctl(evsel,
-                                    PERF_EVENT_IOC_DISABLE,
-                                    0);
+       int err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, 0);
+       /*
+        * We mark it disabled here so that tools that disable a event can
+        * ignore events after they disable it. I.e. the ring buffer may have
+        * already a few more events queued up before the kernel got the stop
+        * request.
+        */
+       if (!err)
+               evsel->disabled = true;
+
+       return err;
 }
 
 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
index 4107c39f4a54a97c7d3155a34e2814696e4ad21d..3147ca76c6fc24a3c63f59a0368e7976cfa464a4 100644 (file)
@@ -46,6 +46,7 @@ enum term_type {
        PERF_EVSEL__CONFIG_TERM_STACK_USER,
        PERF_EVSEL__CONFIG_TERM_INHERIT,
        PERF_EVSEL__CONFIG_TERM_MAX_STACK,
+       PERF_EVSEL__CONFIG_TERM_MAX_EVENTS,
        PERF_EVSEL__CONFIG_TERM_OVERWRITE,
        PERF_EVSEL__CONFIG_TERM_DRV_CFG,
        PERF_EVSEL__CONFIG_TERM_BRANCH,
@@ -65,6 +66,7 @@ struct perf_evsel_config_term {
                bool    inherit;
                bool    overwrite;
                char    *branch;
+               unsigned long max_events;
        } val;
        bool weak;
 };
@@ -99,6 +101,8 @@ struct perf_evsel {
        struct perf_counts      *prev_raw_counts;
        int                     idx;
        u32                     ids;
+       unsigned long           max_events;
+       unsigned long           nr_events_printed;
        char                    *name;
        double                  scale;
        const char              *unit;
@@ -119,6 +123,7 @@ struct perf_evsel {
        bool                    snapshot;
        bool                    supported;
        bool                    needs_swap;
+       bool                    disabled;
        bool                    no_aux_samples;
        bool                    immediate;
        bool                    system_wide;
index de322d51c7fe2c2a6e821d8e10c9dc4a7f264587..b72440bf9a7967c5e4ca1e305729c10af1aaa0f4 100644 (file)
@@ -29,6 +29,12 @@ int jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_ent
 #elif defined(__powerpc__)
 #define GEN_ELF_ARCH   EM_PPC
 #define GEN_ELF_CLASS  ELFCLASS32
+#elif defined(__sparc__) && defined(__arch64__)
+#define GEN_ELF_ARCH   EM_SPARCV9
+#define GEN_ELF_CLASS  ELFCLASS64
+#elif defined(__sparc__)
+#define GEN_ELF_ARCH   EM_SPARC
+#define GEN_ELF_CLASS  ELFCLASS32
 #else
 #error "unsupported architecture"
 #endif
diff --git a/tools/perf/util/get_current_dir_name.c b/tools/perf/util/get_current_dir_name.c
new file mode 100644 (file)
index 0000000..267aa60
--- /dev/null
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
+//
+#ifndef HAVE_GET_CURRENT_DIR_NAME
+#include "util.h"
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdlib.h>
+
+/* Android's 'bionic' library, for one, doesn't have this */
+
+char *get_current_dir_name(void)
+{
+       char pwd[PATH_MAX];
+
+       return getcwd(pwd, sizeof(pwd)) == NULL ? NULL : strdup(pwd);
+}
+#endif // HAVE_GET_CURRENT_DIR_NAME
index 1ec1d9bc2d6356bf98d053aec68331c21142907a..4fd45be95a433e32ac8839dd6f9b402be6d34b00 100644 (file)
@@ -1034,6 +1034,13 @@ static int write_auxtrace(struct feat_fd *ff,
        return err;
 }
 
+static int write_clockid(struct feat_fd *ff,
+                        struct perf_evlist *evlist __maybe_unused)
+{
+       return do_write(ff, &ff->ph->env.clockid_res_ns,
+                       sizeof(ff->ph->env.clockid_res_ns));
+}
+
 static int cpu_cache_level__sort(const void *a, const void *b)
 {
        struct cpu_cache_level *cache_a = (struct cpu_cache_level *)a;
@@ -1508,6 +1515,12 @@ static void print_cpu_topology(struct feat_fd *ff, FILE *fp)
                fprintf(fp, "# Core ID and Socket ID information is not available\n");
 }
 
+static void print_clockid(struct feat_fd *ff, FILE *fp)
+{
+       fprintf(fp, "# clockid frequency: %"PRIu64" MHz\n",
+               ff->ph->env.clockid_res_ns * 1000);
+}
+
 static void free_event_desc(struct perf_evsel *events)
 {
        struct perf_evsel *evsel;
@@ -2531,6 +2544,15 @@ out:
        return ret;
 }
 
+static int process_clockid(struct feat_fd *ff,
+                          void *data __maybe_unused)
+{
+       if (do_read_u64(ff, &ff->ph->env.clockid_res_ns))
+               return -1;
+
+       return 0;
+}
+
 struct feature_ops {
        int (*write)(struct feat_fd *ff, struct perf_evlist *evlist);
        void (*print)(struct feat_fd *ff, FILE *fp);
@@ -2590,6 +2612,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
        FEAT_OPN(CACHE,         cache,          true),
        FEAT_OPR(SAMPLE_TIME,   sample_time,    false),
        FEAT_OPR(MEM_TOPOLOGY,  mem_topology,   true),
+       FEAT_OPR(CLOCKID,       clockid,        false)
 };
 
 struct header_print_data {
index e17903caa71daba074fbaaf04252525beac2df11..0d553ddca0a3049f941d96a0ae0d68b71ea7a49c 100644 (file)
@@ -38,6 +38,7 @@ enum {
        HEADER_CACHE,
        HEADER_SAMPLE_TIME,
        HEADER_MEM_TOPOLOGY,
+       HEADER_CLOCKID,
        HEADER_LAST_FEATURE,
        HEADER_FEAT_BITS        = 256,
 };
index 7f0c83b6332bfd94ca92eac2723c700421cf119a..7b27d77306c229d2478d8ceea9e668a4cee5a24d 100644 (file)
@@ -269,6 +269,13 @@ static int intel_bts_do_fix_overlap(struct auxtrace_queue *queue,
        return 0;
 }
 
+static inline u8 intel_bts_cpumode(struct intel_bts *bts, uint64_t ip)
+{
+       return machine__kernel_ip(bts->machine, ip) ?
+              PERF_RECORD_MISC_KERNEL :
+              PERF_RECORD_MISC_USER;
+}
+
 static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
                                         struct branch *branch)
 {
@@ -281,12 +288,8 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
            bts->num_events++ <= bts->synth_opts.initial_skip)
                return 0;
 
-       event.sample.header.type = PERF_RECORD_SAMPLE;
-       event.sample.header.misc = PERF_RECORD_MISC_USER;
-       event.sample.header.size = sizeof(struct perf_event_header);
-
-       sample.cpumode = PERF_RECORD_MISC_USER;
        sample.ip = le64_to_cpu(branch->from);
+       sample.cpumode = intel_bts_cpumode(bts, sample.ip);
        sample.pid = btsq->pid;
        sample.tid = btsq->tid;
        sample.addr = le64_to_cpu(branch->to);
@@ -298,6 +301,10 @@ static int intel_bts_synth_branch_sample(struct intel_bts_queue *btsq,
        sample.insn_len = btsq->intel_pt_insn.length;
        memcpy(sample.insn, btsq->intel_pt_insn.buf, INTEL_PT_INSN_BUF_SZ);
 
+       event.sample.header.type = PERF_RECORD_SAMPLE;
+       event.sample.header.misc = sample.cpumode;
+       event.sample.header.size = sizeof(struct perf_event_header);
+
        if (bts->synth_opts.inject) {
                event.sample.header.size = bts->branches_event_size;
                ret = perf_event__synthesize_sample(&event,
@@ -910,7 +917,8 @@ int intel_bts_process_auxtrace_info(union perf_event *event,
        if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
                bts->synth_opts = *session->itrace_synth_opts;
        } else {
-               itrace_synth_opts__set_default(&bts->synth_opts);
+               itrace_synth_opts__set_default(&bts->synth_opts,
+                               session->itrace_synth_opts->default_no_sample);
                if (session->itrace_synth_opts)
                        bts->synth_opts.thread_stack =
                                session->itrace_synth_opts->thread_stack;
index 58f6a9ceb5909c1007c4e7a95aa92230da6fff41..4503f3ca45ab48d7305260cf36e51800eecaf089 100644 (file)
@@ -1474,6 +1474,8 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder)
                decoder->have_calc_cyc_to_tsc = false;
                intel_pt_calc_cyc_to_tsc(decoder, true);
        }
+
+       intel_pt_log_to("Setting timestamp", decoder->timestamp);
 }
 
 static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
@@ -1514,6 +1516,8 @@ static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder)
                decoder->timestamp = timestamp;
 
        decoder->timestamp_insn_cnt = 0;
+
+       intel_pt_log_to("Setting timestamp", decoder->timestamp);
 }
 
 /* Walk PSB+ packets when already in sync. */
index e02bc7b166a0e48f1cf537606a71ab43875c6de8..5e64da270f97684b7f42c56079a04a2653a1b890 100644 (file)
@@ -31,6 +31,11 @@ static FILE *f;
 static char log_name[MAX_LOG_NAME];
 bool intel_pt_enable_logging;
 
+void *intel_pt_log_fp(void)
+{
+       return f;
+}
+
 void intel_pt_log_enable(void)
 {
        intel_pt_enable_logging = true;
index 45b64f93f358898c6fb8d5caca92b37cd70f2e2e..cc084937f701acfa16bc3eb3623aab7e6ed429d2 100644 (file)
@@ -22,6 +22,7 @@
 
 struct intel_pt_pkt;
 
+void *intel_pt_log_fp(void);
 void intel_pt_log_enable(void);
 void intel_pt_log_disable(void);
 void intel_pt_log_set_name(const char *name);
index 48c1d415c6b069004dacddafdb759f25efb2b176..149ff361ca789e460cd896beaa439ff7b225c2a2 100644 (file)
@@ -206,6 +206,16 @@ static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
        intel_pt_dump(pt, buf, len);
 }
 
+static void intel_pt_log_event(union perf_event *event)
+{
+       FILE *f = intel_pt_log_fp();
+
+       if (!intel_pt_enable_logging || !f)
+               return;
+
+       perf_event__fprintf(event, f);
+}
+
 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
                                   struct auxtrace_buffer *b)
 {
@@ -407,6 +417,13 @@ intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
        return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
 }
 
+static inline u8 intel_pt_cpumode(struct intel_pt *pt, uint64_t ip)
+{
+       return ip >= pt->kernel_start ?
+              PERF_RECORD_MISC_KERNEL :
+              PERF_RECORD_MISC_USER;
+}
+
 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
                                   uint64_t *insn_cnt_ptr, uint64_t *ip,
                                   uint64_t to_ip, uint64_t max_insn_cnt,
@@ -429,10 +446,7 @@ static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
        if (to_ip && *ip == to_ip)
                goto out_no_cache;
 
-       if (*ip >= ptq->pt->kernel_start)
-               cpumode = PERF_RECORD_MISC_KERNEL;
-       else
-               cpumode = PERF_RECORD_MISC_USER;
+       cpumode = intel_pt_cpumode(ptq->pt, *ip);
 
        thread = ptq->thread;
        if (!thread) {
@@ -759,7 +773,8 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
        if (pt->synth_opts.callchain) {
                size_t sz = sizeof(struct ip_callchain);
 
-               sz += pt->synth_opts.callchain_sz * sizeof(u64);
+               /* Add 1 to callchain_sz for callchain context */
+               sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
                ptq->chain = zalloc(sz);
                if (!ptq->chain)
                        goto out_free;
@@ -1058,15 +1073,11 @@ static void intel_pt_prep_b_sample(struct intel_pt *pt,
                                   union perf_event *event,
                                   struct perf_sample *sample)
 {
-       event->sample.header.type = PERF_RECORD_SAMPLE;
-       event->sample.header.misc = PERF_RECORD_MISC_USER;
-       event->sample.header.size = sizeof(struct perf_event_header);
-
        if (!pt->timeless_decoding)
                sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
 
-       sample->cpumode = PERF_RECORD_MISC_USER;
        sample->ip = ptq->state->from_ip;
+       sample->cpumode = intel_pt_cpumode(pt, sample->ip);
        sample->pid = ptq->pid;
        sample->tid = ptq->tid;
        sample->addr = ptq->state->to_ip;
@@ -1075,6 +1086,10 @@ static void intel_pt_prep_b_sample(struct intel_pt *pt,
        sample->flags = ptq->flags;
        sample->insn_len = ptq->insn_len;
        memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
+
+       event->sample.header.type = PERF_RECORD_SAMPLE;
+       event->sample.header.misc = sample->cpumode;
+       event->sample.header.size = sizeof(struct perf_event_header);
 }
 
 static int intel_pt_inject_event(union perf_event *event,
@@ -1160,7 +1175,8 @@ static void intel_pt_prep_sample(struct intel_pt *pt,
 
        if (pt->synth_opts.callchain) {
                thread_stack__sample(ptq->thread, ptq->chain,
-                                    pt->synth_opts.callchain_sz, sample->ip);
+                                    pt->synth_opts.callchain_sz + 1,
+                                    sample->ip, pt->kernel_start);
                sample->callchain = ptq->chain;
        }
 
@@ -2004,9 +2020,9 @@ static int intel_pt_process_event(struct perf_session *session,
                 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
                err = intel_pt_context_switch(pt, event, sample);
 
-       intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n",
-                    perf_event__name(event->header.type), event->header.type,
-                    sample->cpu, sample->time, timestamp);
+       intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
+                    event->header.type, sample->cpu, sample->time, timestamp);
+       intel_pt_log_event(event);
 
        return err;
 }
@@ -2559,7 +2575,8 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
        if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
                pt->synth_opts = *session->itrace_synth_opts;
        } else {
-               itrace_synth_opts__set_default(&pt->synth_opts);
+               itrace_synth_opts__set_default(&pt->synth_opts,
+                               session->itrace_synth_opts->default_no_sample);
                if (use_browser != -1) {
                        pt->synth_opts.branches = false;
                        pt->synth_opts.callchain = true;
index 111ae858cbcbdff402b140c5761f9e29107dc83c..8f36ce813bc5b20308a2f799ba76192118fae3d1 100644 (file)
@@ -1708,6 +1708,7 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
        struct thread *parent = machine__findnew_thread(machine,
                                                        event->fork.ppid,
                                                        event->fork.ptid);
+       bool do_maps_clone = true;
        int err = 0;
 
        if (dump_trace)
@@ -1736,9 +1737,25 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
 
        thread = machine__findnew_thread(machine, event->fork.pid,
                                         event->fork.tid);
+       /*
+        * When synthesizing FORK events, we are trying to create thread
+        * objects for the already running tasks on the machine.
+        *
+        * Normally, for a kernel FORK event, we want to clone the parent's
+        * maps because that is what the kernel just did.
+        *
+        * But when synthesizing, this should not be done.  If we do, we end up
+        * with overlapping maps as we process the sythesized MMAP2 events that
+        * get delivered shortly thereafter.
+        *
+        * Use the FORK event misc flags in an internal way to signal this
+        * situation, so we can elide the map clone when appropriate.
+        */
+       if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
+               do_maps_clone = false;
 
        if (thread == NULL || parent == NULL ||
-           thread__fork(thread, parent, sample->time) < 0) {
+           thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
                dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
                err = -1;
        }
@@ -2140,6 +2157,27 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
        return 0;
 }
 
+static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
+                            struct callchain_cursor *cursor,
+                            struct symbol **parent,
+                            struct addr_location *root_al,
+                            u8 *cpumode, int ent)
+{
+       int err = 0;
+
+       while (--ent >= 0) {
+               u64 ip = chain->ips[ent];
+
+               if (ip >= PERF_CONTEXT_MAX) {
+                       err = add_callchain_ip(thread, cursor, parent,
+                                              root_al, cpumode, ip,
+                                              false, NULL, NULL, 0);
+                       break;
+               }
+       }
+       return err;
+}
+
 static int thread__resolve_callchain_sample(struct thread *thread,
                                            struct callchain_cursor *cursor,
                                            struct perf_evsel *evsel,
@@ -2246,6 +2284,12 @@ static int thread__resolve_callchain_sample(struct thread *thread,
        }
 
 check_calls:
+       if (callchain_param.order != ORDER_CALLEE) {
+               err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
+                                       &cpumode, chain->nr - first_call);
+               if (err)
+                       return (err < 0) ? err : 0;
+       }
        for (i = first_call, nr_entries = 0;
             i < chain_nr && nr_entries < max_stack; i++) {
                u64 ip;
@@ -2260,9 +2304,15 @@ check_calls:
                        continue;
 #endif
                ip = chain->ips[j];
-
                if (ip < PERF_CONTEXT_MAX)
                        ++nr_entries;
+               else if (callchain_param.order != ORDER_CALLEE) {
+                       err = find_prev_cpumode(chain, thread, cursor, parent,
+                                               root_al, &cpumode, j);
+                       if (err)
+                               return (err < 0) ? err : 0;
+                       continue;
+               }
 
                err = add_callchain_ip(thread, cursor, parent,
                                       root_al, &cpumode, ip,
index cf8bd123cf73fb017cde3d60c7b912743847b965..aed170bd4384ef72a012453a578497bfe912ae61 100644 (file)
@@ -18,6 +18,7 @@
 #include <stdio.h>
 #include <string.h>
 #include <unistd.h>
+#include <asm/bug.h>
 
 struct namespaces *namespaces__new(struct namespaces_event *event)
 {
@@ -186,6 +187,7 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
        char curpath[PATH_MAX];
        int oldns = -1;
        int newns = -1;
+       char *oldcwd = NULL;
 
        if (nc == NULL)
                return;
@@ -199,9 +201,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
        if (snprintf(curpath, PATH_MAX, "/proc/self/ns/mnt") >= PATH_MAX)
                return;
 
+       oldcwd = get_current_dir_name();
+       if (!oldcwd)
+               return;
+
        oldns = open(curpath, O_RDONLY);
        if (oldns < 0)
-               return;
+               goto errout;
 
        newns = open(nsi->mntns_path, O_RDONLY);
        if (newns < 0)
@@ -210,11 +216,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi,
        if (setns(newns, CLONE_NEWNS) < 0)
                goto errout;
 
+       nc->oldcwd = oldcwd;
        nc->oldns = oldns;
        nc->newns = newns;
        return;
 
 errout:
+       free(oldcwd);
        if (oldns > -1)
                close(oldns);
        if (newns > -1)
@@ -223,11 +231,16 @@ errout:
 
 void nsinfo__mountns_exit(struct nscookie *nc)
 {
-       if (nc == NULL || nc->oldns == -1 || nc->newns == -1)
+       if (nc == NULL || nc->oldns == -1 || nc->newns == -1 || !nc->oldcwd)
                return;
 
        setns(nc->oldns, CLONE_NEWNS);
 
+       if (nc->oldcwd) {
+               WARN_ON_ONCE(chdir(nc->oldcwd));
+               zfree(&nc->oldcwd);
+       }
+
        if (nc->oldns > -1) {
                close(nc->oldns);
                nc->oldns = -1;
index cae1a9a397222ca4b06a17f5bda0d4aa6774ce5d..d5f46c09ea31922fb46f59158ab9e05138aed1b1 100644 (file)
@@ -38,6 +38,7 @@ struct nsinfo {
 struct nscookie {
        int                     oldns;
        int                     newns;
+       char                    *oldcwd;
 };
 
 int nsinfo__init(struct nsinfo *nsi);
index f8cd3e7c918668cc1f593b539c6648d16b091726..59be3466d64d329ae50b9d719836f5ddbb780832 100644 (file)
@@ -926,6 +926,7 @@ static const char *config_term_names[__PARSE_EVENTS__TERM_TYPE_NR] = {
        [PARSE_EVENTS__TERM_TYPE_NOINHERIT]             = "no-inherit",
        [PARSE_EVENTS__TERM_TYPE_INHERIT]               = "inherit",
        [PARSE_EVENTS__TERM_TYPE_MAX_STACK]             = "max-stack",
+       [PARSE_EVENTS__TERM_TYPE_MAX_EVENTS]            = "nr",
        [PARSE_EVENTS__TERM_TYPE_OVERWRITE]             = "overwrite",
        [PARSE_EVENTS__TERM_TYPE_NOOVERWRITE]           = "no-overwrite",
        [PARSE_EVENTS__TERM_TYPE_DRV_CFG]               = "driver-config",
@@ -1037,6 +1038,9 @@ do {                                                                         \
        case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
                CHECK_TYPE_VAL(NUM);
                break;
+       case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
+               CHECK_TYPE_VAL(NUM);
+               break;
        default:
                err->str = strdup("unknown term");
                err->idx = term->err_term;
@@ -1084,6 +1088,7 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
        case PARSE_EVENTS__TERM_TYPE_INHERIT:
        case PARSE_EVENTS__TERM_TYPE_NOINHERIT:
        case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
+       case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
        case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
        case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
                return config_term_common(attr, term, err);
@@ -1162,6 +1167,9 @@ do {                                                              \
                case PARSE_EVENTS__TERM_TYPE_MAX_STACK:
                        ADD_CONFIG_TERM(MAX_STACK, max_stack, term->val.num);
                        break;
+               case PARSE_EVENTS__TERM_TYPE_MAX_EVENTS:
+                       ADD_CONFIG_TERM(MAX_EVENTS, max_events, term->val.num);
+                       break;
                case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
                        ADD_CONFIG_TERM(OVERWRITE, overwrite, term->val.num ? 1 : 0);
                        break;
index 4473dac27aee254fd6752cb06d9b7877a1ffdaeb..5ed035cbcbb72dcbcf5c73d39be2248c099e7452 100644 (file)
@@ -71,6 +71,7 @@ enum {
        PARSE_EVENTS__TERM_TYPE_NOINHERIT,
        PARSE_EVENTS__TERM_TYPE_INHERIT,
        PARSE_EVENTS__TERM_TYPE_MAX_STACK,
+       PARSE_EVENTS__TERM_TYPE_MAX_EVENTS,
        PARSE_EVENTS__TERM_TYPE_NOOVERWRITE,
        PARSE_EVENTS__TERM_TYPE_OVERWRITE,
        PARSE_EVENTS__TERM_TYPE_DRV_CFG,
index 5f761f3ed0f3333fba455be4e2fa447282005e51..7805c71aaae2e53dbc74c072b4e5eb2a73e6c23a 100644 (file)
@@ -269,6 +269,7 @@ time                        { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_TIME); }
 call-graph             { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CALLGRAPH); }
 stack-size             { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_STACKSIZE); }
 max-stack              { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_MAX_STACK); }
+nr                     { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_MAX_EVENTS); }
 inherit                        { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_INHERIT); }
 no-inherit             { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NOINHERIT); }
 overwrite              { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_OVERWRITE); }
index 7799788f662fdc05765915b383d13085f2a932ac..7e49baad304d78815966a6ac918f472fd817620b 100644 (file)
@@ -773,7 +773,7 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
 
                if (!is_arm_pmu_core(name)) {
                        pname = pe->pmu ? pe->pmu : "cpu";
-                       if (strncmp(pname, name, strlen(pname)))
+                       if (strcmp(pname, name))
                                continue;
                }
 
index 0281d5e2cd6703d0d0d34562a602b8d780b88926..66a84d5846c88ed912aff027943c6f8e9ff78ff2 100644 (file)
@@ -324,7 +324,17 @@ int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss)
                        plt_entry_size = 16;
                        break;
 
-               default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/sparc/xtensa need to be checked */
+               case EM_SPARC:
+                       plt_header_size = 48;
+                       plt_entry_size = 12;
+                       break;
+
+               case EM_SPARCV9:
+                       plt_header_size = 128;
+                       plt_entry_size = 32;
+                       break;
+
+               default: /* FIXME: s390/alpha/mips/parisc/poperpc/sh/xtensa need to be checked */
                        plt_header_size = shdr_plt.sh_entsize;
                        plt_entry_size = shdr_plt.sh_entsize;
                        break;
index 20f49779116bd3ad7b98991688cc1cf4201e444c..d026d215bdc63244638c26e4ac4586fa8fe3a1cb 100644 (file)
@@ -123,7 +123,8 @@ struct symbol_conf {
        const char      *vmlinux_name,
                        *kallsyms_name,
                        *source_prefix,
-                       *field_sep;
+                       *field_sep,
+                       *graph_function;
        const char      *default_guest_vmlinux_name,
                        *default_guest_kallsyms,
                        *default_guest_modules;
index c091635bf7dcb317d66f1ab8273e5cc90678c5e1..61a4286a74dc9f86c333036013d5abe914b81b3f 100644 (file)
@@ -310,20 +310,46 @@ void thread_stack__free(struct thread *thread)
        }
 }
 
+static inline u64 callchain_context(u64 ip, u64 kernel_start)
+{
+       return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL;
+}
+
 void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
-                         size_t sz, u64 ip)
+                         size_t sz, u64 ip, u64 kernel_start)
 {
-       size_t i;
+       u64 context = callchain_context(ip, kernel_start);
+       u64 last_context;
+       size_t i, j;
 
-       if (!thread || !thread->ts)
-               chain->nr = 1;
-       else
-               chain->nr = min(sz, thread->ts->cnt + 1);
+       if (sz < 2) {
+               chain->nr = 0;
+               return;
+       }
 
-       chain->ips[0] = ip;
+       chain->ips[0] = context;
+       chain->ips[1] = ip;
+
+       if (!thread || !thread->ts) {
+               chain->nr = 2;
+               return;
+       }
+
+       last_context = context;
+
+       for (i = 2, j = 1; i < sz && j <= thread->ts->cnt; i++, j++) {
+               ip = thread->ts->stack[thread->ts->cnt - j].ret_addr;
+               context = callchain_context(ip, kernel_start);
+               if (context != last_context) {
+                       if (i >= sz - 1)
+                               break;
+                       chain->ips[i++] = context;
+                       last_context = context;
+               }
+               chain->ips[i] = ip;
+       }
 
-       for (i = 1; i < chain->nr; i++)
-               chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr;
+       chain->nr = i;
 }
 
 struct call_return_processor *
index b7e41c4ebfdd98ec038f823e70051a49c3fa6a62..f97c00a8c2514dcc102f8e25d32e474815e2b030 100644 (file)
@@ -84,7 +84,7 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
                        u64 to_ip, u16 insn_len, u64 trace_nr);
 void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr);
 void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
-                         size_t sz, u64 ip);
+                         size_t sz, u64 ip, u64 kernel_start);
 int thread_stack__flush(struct thread *thread);
 void thread_stack__free(struct thread *thread);
 size_t thread_stack__depth(struct thread *thread);
index 2048d393ece6f24b19f896e75173420f47d22e48..3d9ed7d0e2818f3aedea795b4083db31a22f5de6 100644 (file)
@@ -330,7 +330,8 @@ static int thread__prepare_access(struct thread *thread)
 }
 
 static int thread__clone_map_groups(struct thread *thread,
-                                   struct thread *parent)
+                                   struct thread *parent,
+                                   bool do_maps_clone)
 {
        /* This is new thread, we share map groups for process. */
        if (thread->pid_ == parent->pid_)
@@ -341,15 +342,11 @@ static int thread__clone_map_groups(struct thread *thread,
                         thread->pid_, thread->tid, parent->pid_, parent->tid);
                return 0;
        }
-
        /* But this one is new process, copy maps. */
-       if (map_groups__clone(thread, parent->mg) < 0)
-               return -ENOMEM;
-
-       return 0;
+       return do_maps_clone ? map_groups__clone(thread, parent->mg) : 0;
 }
 
-int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
+int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
 {
        if (parent->comm_set) {
                const char *comm = thread__comm_str(parent);
@@ -362,7 +359,7 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
        }
 
        thread->ppid = parent->tid;
-       return thread__clone_map_groups(thread, parent);
+       return thread__clone_map_groups(thread, parent, do_maps_clone);
 }
 
 void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
index 07606aa6998d92252b7d63a4632750d9531356f2..30e2b4c165fe7341332b71365141c5f209ec23bd 100644 (file)
@@ -42,6 +42,8 @@ struct thread {
        void                            *addr_space;
        struct unwind_libunwind_ops     *unwind_libunwind_ops;
 #endif
+       bool                    filter;
+       int                     filter_entry_depth;
 };
 
 struct machine;
@@ -87,7 +89,7 @@ struct comm *thread__comm(const struct thread *thread);
 struct comm *thread__exec_comm(const struct thread *thread);
 const char *thread__comm_str(const struct thread *thread);
 int thread__insert_map(struct thread *thread, struct map *map);
-int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp);
+int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone);
 size_t thread__fprintf(struct thread *thread, FILE *fp);
 
 struct thread *thread__main_thread(struct machine *machine, struct thread *thread);
index 6f318b15950e8e539f60ac11b3b7606007b496c3..5eff9bfc575836e0c6efd70cc82ee8f5a69bc283 100644 (file)
@@ -45,13 +45,13 @@ static int __report_module(struct addr_location *al, u64 ip,
                Dwarf_Addr s;
 
                dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
-               if (s != al->map->start)
+               if (s != al->map->start - al->map->pgoff)
                        mod = 0;
        }
 
        if (!mod)
                mod = dwfl_report_elf(ui->dwfl, dso->short_name,
-                                     (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start,
+                                     (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start - al->map->pgoff,
                                      false);
 
        return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;
index 14508ee7707a4763c2c8b0fc89c7d4076fa829fd..ece040b799f6ead3614d6061d60dbe3c17c2cac4 100644 (file)
@@ -59,6 +59,10 @@ int fetch_kernel_version(unsigned int *puint,
 
 const char *perf_tip(const char *dirpath);
 
+#ifndef HAVE_GET_CURRENT_DIR_NAME
+char *get_current_dir_name(void);
+#endif
+
 #ifndef HAVE_SCHED_GETCPU_SUPPORT
 int sched_getcpu(void);
 #endif
index 1dd5f4fcffd53f375ba00479bfed37d867399c4a..db66a952c173958395f9134c1fa029fbbd6d3950 100644 (file)
@@ -129,7 +129,7 @@ WARNINGS += $(call cc-supports,-Wno-pointer-sign)
 WARNINGS += $(call cc-supports,-Wdeclaration-after-statement)
 WARNINGS += -Wshadow
 
-CFLAGS += -DVERSION=\"$(VERSION)\" -DPACKAGE=\"$(PACKAGE)\" \
+override CFLAGS += -DVERSION=\"$(VERSION)\" -DPACKAGE=\"$(PACKAGE)\" \
                -DPACKAGE_BUGREPORT=\"$(PACKAGE_BUGREPORT)\" -D_GNU_SOURCE
 
 UTIL_OBJS =  utils/helpers/amd.o utils/helpers/msr.o \
@@ -156,12 +156,12 @@ LIB_SRC =         lib/cpufreq.c lib/cpupower.c lib/cpuidle.c
 LIB_OBJS =     lib/cpufreq.o lib/cpupower.o lib/cpuidle.o
 LIB_OBJS :=    $(addprefix $(OUTPUT),$(LIB_OBJS))
 
-CFLAGS +=      -pipe
+override CFLAGS +=     -pipe
 
 ifeq ($(strip $(NLS)),true)
        INSTALL_NLS += install-gmo
        COMPILE_NLS += create-gmo
-       CFLAGS += -DNLS
+       override CFLAGS += -DNLS
 endif
 
 ifeq ($(strip $(CPUFREQ_BENCH)),true)
@@ -175,7 +175,7 @@ ifeq ($(strip $(STATIC)),true)
         UTIL_SRC += $(LIB_SRC)
 endif
 
-CFLAGS += $(WARNINGS)
+override CFLAGS += $(WARNINGS)
 
 ifeq ($(strip $(V)),false)
        QUIET=@
@@ -188,10 +188,10 @@ export QUIET ECHO
 
 # if DEBUG is enabled, then we do not strip or optimize
 ifeq ($(strip $(DEBUG)),true)
-       CFLAGS += -O1 -g -DDEBUG
+       override CFLAGS += -O1 -g -DDEBUG
        STRIPCMD = /bin/true -Since_we_are_debugging
 else
-       CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
+       override CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer
        STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment
 endif
 
index d79ab161cc75f91ab6c7c1f0344387f14216663f..f68b4bc5527397f285cb1b7bedcf5347ebca52cf 100644 (file)
@@ -9,7 +9,7 @@ endif
 ifeq ($(strip $(STATIC)),true)
 LIBS = -L../ -L$(OUTPUT) -lm
 OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o \
-       $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/sysfs.o
+       $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/cpupower.o
 else
 LIBS = -L../ -L$(OUTPUT) -lm -lcpupower
 OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o
index 59af84b8ef455dfb33053948ec0fdef24a70f7e9..b1b6c43644e79be755cc867fb91717ecd1e631c3 100644 (file)
@@ -13,10 +13,10 @@ INSTALL = /usr/bin/install
 default: all
 
 $(OUTPUT)centrino-decode: ../i386/centrino-decode.c
-       $(CC) $(CFLAGS) -o $@ $<
+       $(CC) $(CFLAGS) -o $@ $(LDFLAGS) $<
 
 $(OUTPUT)powernow-k8-decode: ../i386/powernow-k8-decode.c
-       $(CC) $(CFLAGS) -o $@ $<
+       $(CC) $(CFLAGS) -o $@ $(LDFLAGS) $<
 
 all: $(OUTPUT)centrino-decode $(OUTPUT)powernow-k8-decode
 
index 1b993fe1ce2372a5e6fed1d057b6b2a1dc64d5d6..0c0f3e3f0d8038e138077b40d428faff6083cff8 100644 (file)
@@ -28,7 +28,7 @@ static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname,
 
        snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s",
                         cpu, fname);
-       return sysfs_read_file(path, buf, buflen);
+       return cpupower_read_sysfs(path, buf, buflen);
 }
 
 /* helper function to write a new value to a /sys file */
index 9bd4c7655fdb2a4942d6aa0012174f232e25ee4f..852d25462388c2d61b67bf5c4ca162d591b16281 100644 (file)
@@ -319,7 +319,7 @@ static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf,
 
        snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname);
 
-       return sysfs_read_file(path, buf, buflen);
+       return cpupower_read_sysfs(path, buf, buflen);
 }
 
 
index 9c395ec924def2538e973eab7724472eea34cb8d..9711d628b0f440151e239d1986bdce4fb8be59ef 100644 (file)
@@ -15,7 +15,7 @@
 #include "cpupower.h"
 #include "cpupower_intern.h"
 
-unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen)
+unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen)
 {
        int fd;
        ssize_t numread;
@@ -95,7 +95,7 @@ static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *re
 
        snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s",
                         cpu, fname);
-       if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0)
+       if (cpupower_read_sysfs(path, linebuf, MAX_LINE_LEN) == 0)
                return -1;
        *result = strtol(linebuf, &endp, 0);
        if (endp == linebuf || errno == ERANGE)
index 92affdfbe4174e13f5a5fecded202d597b256dbc..4887c76d23f868c060364dab8795702570a91e6f 100644 (file)
@@ -3,4 +3,4 @@
 #define MAX_LINE_LEN 4096
 #define SYSFS_PATH_MAX 255
 
-unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen);
+unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen);
index 9527d47a1070ecbabc716273d6e46f34e18c1ca1..6c16ac36d482c6b71ecf8419ae1191b545e0b942 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/workqueue.h>
 #include <linux/libnvdimm.h>
+#include <linux/genalloc.h>
 #include <linux/vmalloc.h>
 #include <linux/device.h>
 #include <linux/module.h>
@@ -140,8 +141,8 @@ static u32 handle[] = {
        [6] = NFIT_DIMM_HANDLE(1, 0, 0, 0, 1),
 };
 
-static unsigned long dimm_fail_cmd_flags[NUM_DCR];
-static int dimm_fail_cmd_code[NUM_DCR];
+static unsigned long dimm_fail_cmd_flags[ARRAY_SIZE(handle)];
+static int dimm_fail_cmd_code[ARRAY_SIZE(handle)];
 
 static const struct nd_intel_smart smart_def = {
        .flags = ND_INTEL_SMART_HEALTH_VALID
@@ -205,7 +206,7 @@ struct nfit_test {
                unsigned long deadline;
                spinlock_t lock;
        } ars_state;
-       struct device *dimm_dev[NUM_DCR];
+       struct device *dimm_dev[ARRAY_SIZE(handle)];
        struct nd_intel_smart *smart;
        struct nd_intel_smart_threshold *smart_threshold;
        struct badrange badrange;
@@ -215,6 +216,8 @@ struct nfit_test {
 
 static struct workqueue_struct *nfit_wq;
 
+static struct gen_pool *nfit_pool;
+
 static struct nfit_test *to_nfit_test(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -1132,6 +1135,9 @@ static void release_nfit_res(void *data)
        list_del(&nfit_res->list);
        spin_unlock(&nfit_test_lock);
 
+       if (resource_size(&nfit_res->res) >= DIMM_SIZE)
+               gen_pool_free(nfit_pool, nfit_res->res.start,
+                               resource_size(&nfit_res->res));
        vfree(nfit_res->buf);
        kfree(nfit_res);
 }
@@ -1144,7 +1150,7 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
                        GFP_KERNEL);
        int rc;
 
-       if (!buf || !nfit_res)
+       if (!buf || !nfit_res || !*dma)
                goto err;
        rc = devm_add_action(dev, release_nfit_res, nfit_res);
        if (rc)
@@ -1164,6 +1170,8 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
 
        return nfit_res->buf;
  err:
+       if (*dma && size >= DIMM_SIZE)
+               gen_pool_free(nfit_pool, *dma, size);
        if (buf)
                vfree(buf);
        kfree(nfit_res);
@@ -1172,9 +1180,16 @@ static void *__test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma,
 
 static void *test_alloc(struct nfit_test *t, size_t size, dma_addr_t *dma)
 {
+       struct genpool_data_align data = {
+               .align = SZ_128M,
+       };
        void *buf = vmalloc(size);
 
-       *dma = (unsigned long) buf;
+       if (size >= DIMM_SIZE)
+               *dma = gen_pool_alloc_algo(nfit_pool, size,
+                               gen_pool_first_fit_align, &data);
+       else
+               *dma = (unsigned long) buf;
        return __test_alloc(t, size, dma, buf);
 }
 
@@ -2680,7 +2695,7 @@ static int nfit_test_probe(struct platform_device *pdev)
                u32 nfit_handle = __to_nfit_memdev(nfit_mem)->device_handle;
                int i;
 
-               for (i = 0; i < NUM_DCR; i++)
+               for (i = 0; i < ARRAY_SIZE(handle); i++)
                        if (nfit_handle == handle[i])
                                dev_set_drvdata(nfit_test->dimm_dev[i],
                                                nfit_mem);
@@ -2839,6 +2854,17 @@ static __init int nfit_test_init(void)
                goto err_register;
        }
 
+       nfit_pool = gen_pool_create(ilog2(SZ_4M), NUMA_NO_NODE);
+       if (!nfit_pool) {
+               rc = -ENOMEM;
+               goto err_register;
+       }
+
+       if (gen_pool_add(nfit_pool, SZ_4G, SZ_4G, NUMA_NO_NODE)) {
+               rc = -ENOMEM;
+               goto err_register;
+       }
+
        for (i = 0; i < NUM_NFITS; i++) {
                struct nfit_test *nfit_test;
                struct platform_device *pdev;
@@ -2894,6 +2920,9 @@ static __init int nfit_test_init(void)
        return 0;
 
  err_register:
+       if (nfit_pool)
+               gen_pool_destroy(nfit_pool);
+
        destroy_workqueue(nfit_wq);
        for (i = 0; i < NUM_NFITS; i++)
                if (instances[i])
@@ -2917,6 +2946,8 @@ static __exit void nfit_test_exit(void)
        platform_driver_unregister(&nfit_test_driver);
        nfit_test_teardown();
 
+       gen_pool_destroy(nfit_pool);
+
        for (i = 0; i < NUM_NFITS; i++)
                put_device(&instances[i]->pdev.dev);
        class_destroy(nfit_test_dimm);
index f1fe492c8e17d060d70009f5ff139cbf975c6f72..f0017c831e57bdf48caf0e4a334db009ad23f0bb 100644 (file)
@@ -24,6 +24,7 @@ TARGETS += memory-hotplug
 TARGETS += mount
 TARGETS += mqueue
 TARGETS += net
+TARGETS += netfilter
 TARGETS += nsfs
 TARGETS += powerpc
 TARGETS += proc
index 686e57ce40f430fe25f910d5eda49cf0c8236949..efb6c13ab0debe7e82b5615d9eb0cb49f05ad6fb 100644 (file)
@@ -154,12 +154,12 @@ static unsigned long long (*bpf_skb_ancestor_cgroup_id)(void *ctx, int level) =
        (void *) BPF_FUNC_skb_ancestor_cgroup_id;
 static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
                                             struct bpf_sock_tuple *tuple,
-                                            int size, unsigned int netns_id,
+                                            int size, unsigned long long netns_id,
                                             unsigned long long flags) =
        (void *) BPF_FUNC_sk_lookup_tcp;
 static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
                                             struct bpf_sock_tuple *tuple,
-                                            int size, unsigned int netns_id,
+                                            int size, unsigned long long netns_id,
                                             unsigned long long flags) =
        (void *) BPF_FUNC_sk_lookup_udp;
 static int (*bpf_sk_release)(struct bpf_sock *sk) =
index d3273b5b3173e123d087c624a34f5b834395da54..ae8180b11d5fe6dde18f822a262405e07479bc68 100644 (file)
@@ -11,6 +11,8 @@
 #include <bpf/bpf.h>
 #include <bpf/libbpf.h>
 
+#include "bpf_rlimit.h"
+
 const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector";
 const char *cfg_map_name = "jmp_table";
 bool cfg_attach = true;
index f42b3396d6226dfec5fddf5d3a9a2d13c5c4ffb6..38e1cbaaffdbbcfb48e4b02501eef0f18f9763b0 100644 (file)
@@ -432,11 +432,11 @@ static struct btf_raw_test raw_tests[] = {
                /* const void* */       /* [3] */
                BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
                /* typedef const void * const_void_ptr */
-               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
-               /* struct A { */        /* [4] */
+               BTF_TYPEDEF_ENC(NAME_TBD, 3),   /* [4] */
+               /* struct A { */        /* [5] */
                BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)),
                /* const_void_ptr m; */
-               BTF_MEMBER_ENC(NAME_TBD, 3, 0),
+               BTF_MEMBER_ENC(NAME_TBD, 4, 0),
                /* } */
                BTF_END_RAW,
        },
@@ -494,10 +494,10 @@ static struct btf_raw_test raw_tests[] = {
                BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0),
                /* const void* */       /* [3] */
                BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
-               /* typedef const void * const_void_ptr */       /* [4] */
-               BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
-               /* const_void_ptr[4] */ /* [5] */
-               BTF_TYPE_ARRAY_ENC(3, 1, 4),
+               /* typedef const void * const_void_ptr */
+               BTF_TYPEDEF_ENC(NAME_TBD, 3),   /* [4] */
+               /* const_void_ptr[4] */
+               BTF_TYPE_ARRAY_ENC(4, 1, 4),    /* [5] */
                BTF_END_RAW,
        },
        .str_sec = "\0const_void_ptr",
@@ -1292,6 +1292,367 @@ static struct btf_raw_test raw_tests[] = {
        .err_str = "type != 0",
 },
 
+{
+       .descr = "typedef (invalid name, name_off = 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               BTF_TYPEDEF_ENC(0, 1),                          /* [2] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__int",
+       .str_sec_size = sizeof("\0__int"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "typedef_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "typedef (invalid name, invalid identifier)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
+               BTF_TYPEDEF_ENC(NAME_TBD, 1),                   /* [2] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__!int",
+       .str_sec_size = sizeof("\0__!int"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "typedef_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "ptr type (invalid name, name_off <> 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1),      /* [2] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__int",
+       .str_sec_size = sizeof("\0__int"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "ptr_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "volatile type (invalid name, name_off <> 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 1), /* [2] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__int",
+       .str_sec_size = sizeof("\0__int"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "volatile_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "const type (invalid name, name_off <> 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 1),    /* [2] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__int",
+       .str_sec_size = sizeof("\0__int"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "const_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "restrict type (invalid name, name_off <> 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1),   /* [2] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), 2), /* [3] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__int",
+       .str_sec_size = sizeof("\0__int"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "restrict_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "fwd type (invalid name, name_off = 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0),   /* [2] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__skb",
+       .str_sec_size = sizeof("\0__skb"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "fwd_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "fwd type (invalid name, invalid identifier)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0),      /* [2] */
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__!skb",
+       .str_sec_size = sizeof("\0__!skb"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "fwd_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "array type (invalid name, name_off <> 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0),    /* [2] */
+               BTF_ARRAY_ENC(1, 1, 4),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0__skb",
+       .str_sec_size = sizeof("\0__skb"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "array_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "struct type (name_off = 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(0,
+                            BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),   /* [2] */
+               BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A",
+       .str_sec_size = sizeof("\0A"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "struct_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+},
+
+{
+       .descr = "struct type (invalid name, invalid identifier)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),   /* [2] */
+               BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A!\0B",
+       .str_sec_size = sizeof("\0A!\0B"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "struct_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "struct member (name_off = 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(0,
+                            BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),   /* [2] */
+               BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A",
+       .str_sec_size = sizeof("\0A"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "struct_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+},
+
+{
+       .descr = "struct member (invalid name, invalid identifier)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),   /* [2] */
+               BTF_MEMBER_ENC(NAME_TBD, 1, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0B*",
+       .str_sec_size = sizeof("\0A\0B*"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "struct_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "enum type (name_off = 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(0,
+                            BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
+                            sizeof(int)),                              /* [2] */
+               BTF_ENUM_ENC(NAME_TBD, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A\0B",
+       .str_sec_size = sizeof("\0A\0B"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "enum_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+},
+
+{
+       .descr = "enum type (invalid name, invalid identifier)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(NAME_TBD,
+                            BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
+                            sizeof(int)),                              /* [2] */
+               BTF_ENUM_ENC(NAME_TBD, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A!\0B",
+       .str_sec_size = sizeof("\0A!\0B"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "enum_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "enum member (invalid name, name_off = 0)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(0,
+                            BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
+                            sizeof(int)),                              /* [2] */
+               BTF_ENUM_ENC(0, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "",
+       .str_sec_size = sizeof(""),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "enum_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
+
+{
+       .descr = "enum member (invalid name, invalid identifier)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
+               BTF_TYPE_ENC(0,
+                            BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
+                            sizeof(int)),                              /* [2] */
+               BTF_ENUM_ENC(NAME_TBD, 0),
+               BTF_END_RAW,
+       },
+       .str_sec = "\0A!",
+       .str_sec_size = sizeof("\0A!"),
+       .map_type = BPF_MAP_TYPE_ARRAY,
+       .map_name = "enum_type_check_btf",
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .key_type_id = 1,
+       .value_type_id = 1,
+       .max_entries = 4,
+       .btf_load_err = true,
+       .err_str = "Invalid name",
+},
 {
        .descr = "arraymap invalid btf key (a bit field)",
        .raw_types = {
index 7887df6933998c62d6d81926066918728d704c8c..44ed7f29f8ab6cec40c83c02c01d28ca36c36492 100644 (file)
@@ -81,7 +81,10 @@ int main(int argc, char **argv)
                goto err;
        }
 
-       assert(system("ping localhost -6 -c 10000 -f -q > /dev/null") == 0);
+       if (system("which ping6 &>/dev/null") == 0)
+               assert(!system("ping6 localhost -c 10000 -f -q > /dev/null"));
+       else
+               assert(!system("ping -6 localhost -c 10000 -f -q > /dev/null"));
 
        if (bpf_prog_query(cgroup_fd, BPF_CGROUP_INET_EGRESS, 0, NULL, NULL,
                           &prog_cnt)) {
index b745bdc08c2bd841e437831e24459ff51f01c34b..e21cd736c196efcfe4d633f05661a1ddb18b8783 100644 (file)
@@ -72,7 +72,7 @@ int bpf_sk_lookup_test0(struct __sk_buff *skb)
                return TC_ACT_SHOT;
 
        tuple_len = ipv4 ? sizeof(tuple->ipv4) : sizeof(tuple->ipv6);
-       sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, 0, 0);
+       sk = bpf_sk_lookup_tcp(skb, tuple, tuple_len, BPF_F_CURRENT_NETNS, 0);
        if (sk)
                bpf_sk_release(sk);
        return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
@@ -84,7 +84,7 @@ int bpf_sk_lookup_test1(struct __sk_buff *skb)
        struct bpf_sock_tuple tuple = {};
        struct bpf_sock *sk;
 
-       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
        if (sk)
                bpf_sk_release(sk);
        return 0;
@@ -97,7 +97,7 @@ int bpf_sk_lookup_uaf(struct __sk_buff *skb)
        struct bpf_sock *sk;
        __u32 family = 0;
 
-       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
        if (sk) {
                bpf_sk_release(sk);
                family = sk->family;
@@ -112,7 +112,7 @@ int bpf_sk_lookup_modptr(struct __sk_buff *skb)
        struct bpf_sock *sk;
        __u32 family;
 
-       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
        if (sk) {
                sk += 1;
                bpf_sk_release(sk);
@@ -127,7 +127,7 @@ int bpf_sk_lookup_modptr_or_null(struct __sk_buff *skb)
        struct bpf_sock *sk;
        __u32 family;
 
-       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
        sk += 1;
        if (sk)
                bpf_sk_release(sk);
@@ -139,7 +139,7 @@ int bpf_sk_lookup_test2(struct __sk_buff *skb)
 {
        struct bpf_sock_tuple tuple = {};
 
-       bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
        return 0;
 }
 
@@ -149,7 +149,7 @@ int bpf_sk_lookup_test3(struct __sk_buff *skb)
        struct bpf_sock_tuple tuple = {};
        struct bpf_sock *sk;
 
-       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
        bpf_sk_release(sk);
        bpf_sk_release(sk);
        return 0;
@@ -161,7 +161,7 @@ int bpf_sk_lookup_test4(struct __sk_buff *skb)
        struct bpf_sock_tuple tuple = {};
        struct bpf_sock *sk;
 
-       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       sk = bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
        bpf_sk_release(sk);
        return 0;
 }
@@ -169,7 +169,7 @@ int bpf_sk_lookup_test4(struct __sk_buff *skb)
 void lookup_no_release(struct __sk_buff *skb)
 {
        struct bpf_sock_tuple tuple = {};
-       bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), 0, 0);
+       bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
 }
 
 SEC("fail_no_release_subcall")
index 42544a969abc63b734267e696c78d2b338be0ca7..a9bc6f82abc163d47876d4af1f2dcbe540a6c814 100755 (executable)
@@ -10,7 +10,7 @@ wait_for_ip()
        echo -n "Wait for testing link-local IP to become available "
        for _i in $(seq ${MAX_PING_TRIES}); do
                echo -n "."
-               if ping -6 -q -c 1 -W 1 ff02::1%${TEST_IF} >/dev/null 2>&1; then
+               if $PING6 -c 1 -W 1 ff02::1%${TEST_IF} >/dev/null 2>&1; then
                        echo " OK"
                        return
                fi
@@ -58,5 +58,6 @@ BPF_PROG_OBJ="${DIR}/test_skb_cgroup_id_kern.o"
 BPF_PROG_SECTION="cgroup_id_logger"
 BPF_PROG_ID=0
 PROG="${DIR}/test_skb_cgroup_id_user"
+type ping6 >/dev/null 2>&1 && PING6="ping6" || PING6="ping -6"
 
 main
index 9832a875a828979be26b8756cbd589d5cf5e44f1..3b9fdb8094aa28b5e5abbe99f15b1e4502869a68 100755 (executable)
@@ -4,7 +4,8 @@ set -eu
 
 ping_once()
 {
-       ping -${1} -q -c 1 -W 1 ${2%%/*} >/dev/null 2>&1
+       type ping${1} >/dev/null 2>&1 && PING="ping${1}" || PING="ping -${1}"
+       $PING -q -c 1 -W 1 ${2%%/*} >/dev/null 2>&1
 }
 
 wait_for_ip()
index 36f3d3009d1a079e57032b47b57bc306d045a3a8..df6f751cc1e81c0bcc1f68e86a5bc57e530a2452 100644 (file)
@@ -76,7 +76,7 @@ struct bpf_test {
        int fixup_percpu_cgroup_storage[MAX_FIXUPS];
        const char *errstr;
        const char *errstr_unpriv;
-       uint32_t retval;
+       uint32_t retval, retval_unpriv;
        enum {
                UNDEF,
                ACCEPT,
@@ -3084,6 +3084,8 @@ static struct bpf_test tests[] = {
                .fixup_prog1 = { 2 },
                .result = ACCEPT,
                .retval = 42,
+               /* Verifier rewrite for unpriv skips tail call here. */
+               .retval_unpriv = 2,
        },
        {
                "stack pointer arithmetic",
@@ -6454,6 +6456,256 @@ static struct bpf_test tests[] = {
                .errstr = "R1 min value is negative",
                .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
+       {
+               "map access: known scalar += value_ptr",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+                       BPF_MOV64_IMM(BPF_REG_1, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .retval = 1,
+       },
+       {
+               "map access: value_ptr += known scalar",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+                       BPF_MOV64_IMM(BPF_REG_1, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .retval = 1,
+       },
+       {
+               "map access: unknown scalar += value_ptr",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .retval = 1,
+       },
+       {
+               "map access: value_ptr += unknown scalar",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .retval = 1,
+       },
+       {
+               "map access: value_ptr += value_ptr",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = REJECT,
+               .errstr = "R0 pointer += pointer prohibited",
+       },
+       {
+               "map access: known scalar -= value_ptr",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+                       BPF_MOV64_IMM(BPF_REG_1, 4),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = REJECT,
+               .errstr = "R1 tried to subtract pointer from scalar",
+       },
+       {
+               "map access: value_ptr -= known scalar",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+                       BPF_MOV64_IMM(BPF_REG_1, 4),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = REJECT,
+               .errstr = "R0 min value is outside of the array range",
+       },
+       {
+               "map access: value_ptr -= known scalar, 2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+                       BPF_MOV64_IMM(BPF_REG_1, 6),
+                       BPF_MOV64_IMM(BPF_REG_2, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .retval = 1,
+       },
+       {
+               "map access: unknown scalar -= value_ptr",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = REJECT,
+               .errstr = "R1 tried to subtract pointer from scalar",
+       },
+       {
+               "map access: value_ptr -= unknown scalar",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = REJECT,
+               .errstr = "R0 min value is negative",
+       },
+       {
+               "map access: value_ptr -= unknown scalar, 2",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xf),
+                       BPF_ALU64_IMM(BPF_OR, BPF_REG_1, 0x7),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x7),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = ACCEPT,
+               .retval = 1,
+       },
+       {
+               "map access: value_ptr -= value_ptr",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
+                       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_0),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map_array_48b = { 3 },
+               .result = REJECT,
+               .errstr = "R0 invalid mem access 'inv'",
+               .errstr_unpriv = "R0 pointer -= pointer prohibited",
+       },
        {
                "map lookup helper access to map",
                .insns = {
@@ -8324,7 +8576,7 @@ static struct bpf_test tests[] = {
                        BPF_JMP_IMM(BPF_JA, 0, 0, -7),
                },
                .fixup_map_hash_8b = { 4 },
-               .errstr = "R0 invalid mem access 'inv'",
+               .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
@@ -10295,7 +10547,7 @@ static struct bpf_test tests[] = {
                "check deducing bounds from const, 5",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
-                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
+                       BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
                        BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                        BPF_EXIT_INSN(),
                },
@@ -13644,6 +13896,25 @@ static struct bpf_test tests[] = {
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = ACCEPT,
        },
+       {
+               "calls: ctx read at start of subprog",
+               .insns = {
+                       BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
+                       BPF_JMP_REG(BPF_JSGT, BPF_REG_0, BPF_REG_0, 0),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+                       BPF_EXIT_INSN(),
+                       BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_1, 0),
+                       BPF_MOV64_IMM(BPF_REG_0, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+               .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+               .result_unpriv = REJECT,
+               .result = ACCEPT,
+       },
 };
 
 static int probe_filter_length(const struct bpf_insn *fp)
@@ -13899,6 +14170,33 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_map_type prog_type,
        }
 }
 
+static int set_admin(bool admin)
+{
+       cap_t caps;
+       const cap_value_t cap_val = CAP_SYS_ADMIN;
+       int ret = -1;
+
+       caps = cap_get_proc();
+       if (!caps) {
+               perror("cap_get_proc");
+               return -1;
+       }
+       if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
+                               admin ? CAP_SET : CAP_CLEAR)) {
+               perror("cap_set_flag");
+               goto out;
+       }
+       if (cap_set_proc(caps)) {
+               perror("cap_set_proc");
+               goto out;
+       }
+       ret = 0;
+out:
+       if (cap_free(caps))
+               perror("cap_free");
+       return ret;
+}
+
 static void do_test_single(struct bpf_test *test, bool unpriv,
                           int *passes, int *errors)
 {
@@ -13907,6 +14205,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
        struct bpf_insn *prog = test->insns;
        int map_fds[MAX_NR_MAPS];
        const char *expected_err;
+       uint32_t expected_val;
        uint32_t retval;
        int i, err;
 
@@ -13926,10 +14225,12 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
                       test->result_unpriv : test->result;
        expected_err = unpriv && test->errstr_unpriv ?
                       test->errstr_unpriv : test->errstr;
+       expected_val = unpriv && test->retval_unpriv ?
+                      test->retval_unpriv : test->retval;
 
        reject_from_alignment = fd_prog < 0 &&
                                (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
-                               strstr(bpf_vlog, "Unknown alignment.");
+                               strstr(bpf_vlog, "misaligned");
 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
        if (reject_from_alignment) {
                printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
@@ -13959,16 +14260,20 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
                __u8 tmp[TEST_DATA_LEN << 2];
                __u32 size_tmp = sizeof(tmp);
 
+               if (unpriv)
+                       set_admin(true);
                err = bpf_prog_test_run(fd_prog, 1, test->data,
                                        sizeof(test->data), tmp, &size_tmp,
                                        &retval, NULL);
+               if (unpriv)
+                       set_admin(false);
                if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
                        printf("Unexpected bpf_prog_test_run error\n");
                        goto fail_log;
                }
-               if (!err && retval != test->retval &&
-                   test->retval != POINTER_VALUE) {
-                       printf("FAIL retval %d != %d\n", retval, test->retval);
+               if (!err && retval != expected_val &&
+                   expected_val != POINTER_VALUE) {
+                       printf("FAIL retval %d != %d\n", retval, expected_val);
                        goto fail_log;
                }
        }
@@ -14011,33 +14316,6 @@ static bool is_admin(void)
        return (sysadmin == CAP_SET);
 }
 
-static int set_admin(bool admin)
-{
-       cap_t caps;
-       const cap_value_t cap_val = CAP_SYS_ADMIN;
-       int ret = -1;
-
-       caps = cap_get_proc();
-       if (!caps) {
-               perror("cap_get_proc");
-               return -1;
-       }
-       if (cap_set_flag(caps, CAP_EFFECTIVE, 1, &cap_val,
-                               admin ? CAP_SET : CAP_CLEAR)) {
-               perror("cap_set_flag");
-               goto out;
-       }
-       if (cap_set_proc(caps)) {
-               perror("cap_set_proc");
-               goto out;
-       }
-       ret = 0;
-out:
-       if (cap_free(caps))
-               perror("cap_free");
-       return ret;
-}
-
 static void get_unpriv_disabled()
 {
        char buf[2];
index 0150bb2741eb1a0f2a3c219e7316b37753a17298..117f6f35d72fac57ad87f0ed4772a50511d45cb4 100755 (executable)
 # Thus we set MTU to 10K on all involved interfaces. Then both unicast and
 # multicast traffic uses 8K frames.
 #
-# +-----------------------+                +----------------------------------+
-# | H1                    |                |                               H2 |
-# |                       |                |  unicast --> + $h2.111           |
-# |                       |                |  traffic     | 192.0.2.129/28    |
-# |          multicast    |                |              | e-qos-map 0:1     |
-# |          traffic      |                |              |                   |
-# | $h1 + <-----          |                |              + $h2               |
-# +-----|-----------------+                +--------------|-------------------+
-#       |                                                 |
-# +-----|-------------------------------------------------|-------------------+
-# |     + $swp1                                           + $swp2             |
-# |     | >1Gbps                                          | >1Gbps            |
-# | +---|----------------+                     +----------|----------------+  |
-# | |   + $swp1.1        |                     |          + $swp2.111      |  |
+# +---------------------------+            +----------------------------------+
+# | H1                        |            |                               H2 |
+# |                           |            |  unicast --> + $h2.111           |
+# |                 multicast |            |  traffic     | 192.0.2.129/28    |
+# |                 traffic   |            |              | e-qos-map 0:1     |
+# |           $h1 + <-----    |            |              |                   |
+# | 192.0.2.65/28 |           |            |              + $h2               |
+# +---------------|-----------+            +--------------|-------------------+
+#                 |                                       |
+# +---------------|---------------------------------------|-------------------+
+# |         $swp1 +                                       + $swp2             |
+# |        >1Gbps |                                       | >1Gbps            |
+# | +-------------|------+                     +----------|----------------+  |
+# | |     $swp1.1 +      |                     |          + $swp2.111      |  |
 # | |                BR1 |             SW      | BR111                     |  |
-# | |   + $swp3.1        |                     |          + $swp3.111      |  |
-# | +---|----------------+                     +----------|----------------+  |
-# |     \_________________________________________________/                   |
+# | |     $swp3.1 +      |                     |          + $swp3.111      |  |
+# | +-------------|------+                     +----------|----------------+  |
+# |               \_______________________________________/                   |
 # |                                    |                                      |
 # |                                    + $swp3                                |
 # |                                    | 1Gbps bottleneck                     |
@@ -51,6 +51,7 @@
 #                                      |
 #                                   +--|-----------------+
 #                                   |  + $h3          H3 |
+#                                   |  | 192.0.2.66/28   |
 #                                   |  |                 |
 #                                   |  + $h3.111         |
 #                                   |    192.0.2.130/28  |
@@ -59,6 +60,7 @@
 ALL_TESTS="
        ping_ipv4
        test_mc_aware
+       test_uc_aware
 "
 
 lib_dir=$(dirname $0)/../../../net/forwarding
@@ -68,14 +70,14 @@ source $lib_dir/lib.sh
 
 h1_create()
 {
-       simple_if_init $h1
+       simple_if_init $h1 192.0.2.65/28
        mtu_set $h1 10000
 }
 
 h1_destroy()
 {
        mtu_restore $h1
-       simple_if_fini $h1
+       simple_if_fini $h1 192.0.2.65/28
 }
 
 h2_create()
@@ -97,7 +99,7 @@ h2_destroy()
 
 h3_create()
 {
-       simple_if_init $h3
+       simple_if_init $h3 192.0.2.66/28
        mtu_set $h3 10000
 
        vlan_create $h3 111 v$h3 192.0.2.130/28
@@ -108,7 +110,7 @@ h3_destroy()
        vlan_destroy $h3 111
 
        mtu_restore $h3
-       simple_if_fini $h3
+       simple_if_fini $h3 192.0.2.66/28
 }
 
 switch_create()
@@ -251,7 +253,7 @@ measure_uc_rate()
        # average ingress rate to somewhat mitigate this.
        local min_ingress=2147483648
 
-       mausezahn $h2.111 -p 8000 -A 192.0.2.129 -B 192.0.2.130 -c 0 \
+       $MZ $h2.111 -p 8000 -A 192.0.2.129 -B 192.0.2.130 -c 0 \
                -a own -b $h3mac -t udp -q &
        sleep 1
 
@@ -291,7 +293,7 @@ test_mc_aware()
        check_err $? "Could not get high enough UC-only ingress rate"
        local ucth1=${uc_rate[1]}
 
-       mausezahn $h1 -p 8000 -c 0 -a own -b bc -t udp -q &
+       $MZ $h1 -p 8000 -c 0 -a own -b bc -t udp -q &
 
        local d0=$(date +%s)
        local t0=$(ethtool_stats_get $h3 rx_octets_prio_0)
@@ -311,7 +313,7 @@ test_mc_aware()
                        ret = 100 * ($ucth1 - $ucth2) / $ucth1
                        if (ret > 0) { ret } else { 0 }
                    ")
-       check_err $(bc <<< "$deg > 10")
+       check_err $(bc <<< "$deg > 25")
 
        local interval=$((d1 - d0))
        local mc_ir=$(rate $u0 $u1 $interval)
@@ -335,6 +337,51 @@ test_mc_aware()
        echo "    egress UC throughput  $(humanize ${uc_rate_2[1]})"
        echo "    ingress MC throughput $(humanize $mc_ir)"
        echo "    egress MC throughput  $(humanize $mc_er)"
+       echo
+}
+
+test_uc_aware()
+{
+       RET=0
+
+       $MZ $h2.111 -p 8000 -A 192.0.2.129 -B 192.0.2.130 -c 0 \
+               -a own -b $h3mac -t udp -q &
+
+       local d0=$(date +%s)
+       local t0=$(ethtool_stats_get $h3 rx_octets_prio_1)
+       local u0=$(ethtool_stats_get $swp2 rx_octets_prio_1)
+       sleep 1
+
+       local attempts=50
+       local passes=0
+       local i
+
+       for ((i = 0; i < attempts; ++i)); do
+               if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 0.1; then
+                       ((passes++))
+               fi
+
+               sleep 0.1
+       done
+
+       local d1=$(date +%s)
+       local t1=$(ethtool_stats_get $h3 rx_octets_prio_1)
+       local u1=$(ethtool_stats_get $swp2 rx_octets_prio_1)
+
+       local interval=$((d1 - d0))
+       local uc_ir=$(rate $u0 $u1 $interval)
+       local uc_er=$(rate $t0 $t1 $interval)
+
+       ((attempts == passes))
+       check_err $?
+
+       # Suppress noise from killing mausezahn.
+       { kill %% && wait; } 2>/dev/null
+
+       log_test "MC performace under UC overload"
+       echo "    ingress UC throughput $(humanize ${uc_ir})"
+       echo "    egress UC throughput  $(humanize ${uc_er})"
+       echo "    sent $attempts BC ARPs, got $passes responses"
 }
 
 trap cleanup EXIT
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
new file mode 100644 (file)
index 0000000..47ed6ce
--- /dev/null
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for netfilter selftests
+
+TEST_PROGS := nft_trans_stress.sh
+
+include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
new file mode 100644 (file)
index 0000000..1017313
--- /dev/null
@@ -0,0 +1,2 @@
+CONFIG_NET_NS=y
+NF_TABLES_INET=y
diff --git a/tools/testing/selftests/netfilter/nft_trans_stress.sh b/tools/testing/selftests/netfilter/nft_trans_stress.sh
new file mode 100755 (executable)
index 0000000..f1affd1
--- /dev/null
@@ -0,0 +1,78 @@
+#!/bin/bash
+#
+# This test is for stress-testing the nf_tables config plane path vs.
+# packet path processing: Make sure we never release rules that are
+# still visible to other cpus.
+#
+# set -e
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+testns=testns1
+tables="foo bar baz quux"
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without nft tool"
+       exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+       echo "SKIP: Could not run test without ip tool"
+       exit $ksft_skip
+fi
+
+tmp=$(mktemp)
+
+for table in $tables; do
+       echo add table inet "$table" >> "$tmp"
+       echo flush table inet "$table" >> "$tmp"
+
+       echo "add chain inet $table INPUT { type filter hook input priority 0; }" >> "$tmp"
+       echo "add chain inet $table OUTPUT { type filter hook output priority 0; }" >> "$tmp"
+       for c in $(seq 1 400); do
+               chain=$(printf "chain%03u" "$c")
+               echo "add chain inet $table $chain" >> "$tmp"
+       done
+
+       for c in $(seq 1 400); do
+               chain=$(printf "chain%03u" "$c")
+               for BASE in INPUT OUTPUT; do
+                       echo "add rule inet $table $BASE counter jump $chain" >> "$tmp"
+               done
+               echo "add rule inet $table $chain counter return" >> "$tmp"
+       done
+done
+
+ip netns add "$testns"
+ip -netns "$testns" link set lo up
+
+lscpu | grep ^CPU\(s\): | ( read cpu cpunum ;
+cpunum=$((cpunum-1))
+for i in $(seq 0 $cpunum);do
+       mask=$(printf 0x%x $((1<<$i)))
+        ip netns exec "$testns" taskset $mask ping -4 127.0.0.1 -fq > /dev/null &
+        ip netns exec "$testns" taskset $mask ping -6 ::1 -fq > /dev/null &
+done)
+
+sleep 1
+
+for i in $(seq 1 10) ; do ip netns exec "$testns" nft -f "$tmp" & done
+
+for table in $tables;do
+       randsleep=$((RANDOM%10))
+       sleep $randsleep
+       ip netns exec "$testns" nft delete table inet $table 2>/dev/null
+done
+
+randsleep=$((RANDOM%10))
+sleep $randsleep
+
+pkill -9 ping
+
+wait
+
+rm -f "$tmp"
+ip netns del "$testns"
index ede4d3dae7505ef31f822bac9b613aef464f6125..689f6c8ebcd8d649055069a342e98728433fe2e7 100644 (file)
@@ -1,12 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
-TEST_PROGS := cache_shape
-
-all: $(TEST_PROGS)
-
-$(TEST_PROGS): ../harness.c ../utils.c
+TEST_GEN_PROGS := cache_shape
 
 top_srcdir = ../../../../..
 include ../../lib.mk
 
-clean:
-       rm -f $(TEST_PROGS) *.o
+$(TEST_GEN_PROGS): ../harness.c ../utils.c
index 1b0e9e9a2ddce5a3377ded7dad656202586339dc..f2fa101c5a6ac149bd93c4f8140aed76798b8e69 100644 (file)
@@ -47,8 +47,9 @@ static int ok(void)
        return 0;
 }
 
-#define REG_POISON     0x5a5aUL
-#define POISONED_REG(n)        ((REG_POISON << 48) | ((n) << 32) | (REG_POISON << 16) | (n))
+#define REG_POISON     0x5a5a
+#define POISONED_REG(n)        ((((unsigned long)REG_POISON) << 48) | ((n) << 32) | \
+                        (((unsigned long)REG_POISON) << 16) | (n))
 
 static inline void poison_regs(void)
 {
@@ -105,6 +106,20 @@ static void dump_regs(void)
        }
 }
 
+#ifdef _CALL_AIXDESC
+struct opd {
+       unsigned long ip;
+       unsigned long toc;
+       unsigned long env;
+};
+static struct opd bad_opd = {
+       .ip = BAD_NIP,
+};
+#define BAD_FUNC (&bad_opd)
+#else
+#define BAD_FUNC BAD_NIP
+#endif
+
 int test_wild_bctr(void)
 {
        int (*func_ptr)(void);
@@ -133,7 +148,7 @@ int test_wild_bctr(void)
 
                poison_regs();
 
-               func_ptr = (int (*)(void))BAD_NIP;
+               func_ptr = (int (*)(void))BAD_FUNC;
                func_ptr();
 
                FAIL_IF(1); /* we didn't segv? */
index bd5dfa509272a75b97b1dbf8ac2be00c0a1ab1cc..23f4caf48ffc6b6b84d00ac212ea6352eba4f723 100644 (file)
@@ -5,6 +5,9 @@ noarg:
 # The EBB handler is 64-bit code and everything links against it
 CFLAGS += -m64
 
+# Toolchains may build PIE by default which breaks the assembly
+LDFLAGS += -no-pie
+
 TEST_GEN_PROGS := reg_access_test event_attributes_test cycles_test    \
         cycles_with_freeze_test pmc56_overflow_test            \
         ebb_vs_cpu_event_test cpu_event_vs_ebb_test            \
index 9b35ca8e8f13d4bbe77ba24d3e061031071c39c3..8d3f006c98cc39d608f6d7a35acecd3e6a16b971 100644 (file)
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
-TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
+TEST_GEN_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
               ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
               ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
               perf-hwbreak ptrace-syscall
@@ -7,14 +7,9 @@ TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
 top_srcdir = ../../../../..
 include ../../lib.mk
 
-all: $(TEST_PROGS)
-
 CFLAGS += -m64 -I../../../../../usr/include -I../tm -mhtm -fno-pie
 
-ptrace-pkey core-pkey: child.h
-ptrace-pkey core-pkey: LDLIBS += -pthread
-
-$(TEST_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h
+$(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: child.h
+$(OUTPUT)/ptrace-pkey $(OUTPUT)/core-pkey: LDLIBS += -pthread
 
-clean:
-       rm -f $(TEST_PROGS) *.o
+$(TEST_GEN_PROGS): ../harness.c ../utils.c ../lib/reg.S ptrace.h
index 327fa943c7f3663f9b305a4a1c4f3b572d866b3f..dbdffa2e2c8248f39652c6803245452926ed1790 100644 (file)
@@ -67,8 +67,8 @@ trans:
                "3: ;"
                : [res] "=r" (result), [texasr] "=r" (texasr)
                : [gpr_1]"i"(GPR_1), [gpr_2]"i"(GPR_2), [gpr_4]"i"(GPR_4),
-               [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "r" (&a),
-               [flt_2] "r" (&b), [flt_4] "r" (&d)
+               [sprn_texasr] "i" (SPRN_TEXASR), [flt_1] "b" (&a),
+               [flt_4] "b" (&d)
                : "memory", "r5", "r6", "r7",
                "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
                "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
index 44690f1bb26ae17230e146798efd80aeee155b28..85861c46b4457db2636c6f46cb551958cdfb73eb 100644 (file)
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0+
 
 TEST_GEN_PROGS := rfi_flush
+top_srcdir = ../../../../..
 
 CFLAGS += -I../../../../../usr/include
 
index 564ed45bbf731e2cbbed7e04967a22a6d00fb01e..0a7d0afb26b88529406fe2543dde1d5a6e7a1092 100644 (file)
@@ -49,6 +49,7 @@ int rfi_flush_test(void)
        struct perf_event_read v;
        __u64 l1d_misses_total = 0;
        unsigned long iterations = 100000, zero_size = 24 * 1024;
+       unsigned long l1d_misses_expected;
        int rfi_flush_org, rfi_flush;
 
        SKIP_IF(geteuid() != 0);
@@ -71,6 +72,12 @@ int rfi_flush_test(void)
 
        iter = repetitions;
 
+       /*
+        * We expect to see l1d miss for each cacheline access when rfi_flush
+        * is set. Allow a small variation on this.
+        */
+       l1d_misses_expected = iterations * (zero_size / CACHELINE_SIZE - 2);
+
 again:
        FAIL_IF(perf_event_reset(fd));
 
@@ -78,10 +85,9 @@ again:
 
        FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v));
 
-       /* Expect at least zero_size/CACHELINE_SIZE misses per iteration */
-       if (v.l1d_misses >= (iterations * zero_size / CACHELINE_SIZE) && rfi_flush)
+       if (rfi_flush && v.l1d_misses >= l1d_misses_expected)
                passes++;
-       else if (v.l1d_misses < iterations && !rfi_flush)
+       else if (!rfi_flush && v.l1d_misses < (l1d_misses_expected / 2))
                passes++;
 
        l1d_misses_total += v.l1d_misses;
@@ -92,13 +98,15 @@ again:
        if (passes < repetitions) {
                printf("FAIL (L1D misses with rfi_flush=%d: %llu %c %lu) [%d/%d failures]\n",
                       rfi_flush, l1d_misses_total, rfi_flush ? '<' : '>',
-                      rfi_flush ? (repetitions * iterations * zero_size / CACHELINE_SIZE) : iterations,
+                      rfi_flush ? repetitions * l1d_misses_expected :
+                      repetitions * l1d_misses_expected / 2,
                       repetitions - passes, repetitions);
                rc = 1;
        } else
                printf("PASS (L1D misses with rfi_flush=%d: %llu %c %lu) [%d/%d pass]\n",
                       rfi_flush, l1d_misses_total, rfi_flush ? '>' : '<',
-                      rfi_flush ? (repetitions * iterations * zero_size / CACHELINE_SIZE) : iterations,
+                      rfi_flush ? repetitions * l1d_misses_expected :
+                      repetitions * l1d_misses_expected / 2,
                       passes, repetitions);
 
        if (rfi_flush == rfi_flush_org) {
index 1fca25c6ace067ffb7a913508b4e13059cb04770..209a958dca127689bccd1ce8f03cde64237c1872 100644 (file)
@@ -1,15 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
-TEST_PROGS := signal signal_tm
-
-all: $(TEST_PROGS)
-
-$(TEST_PROGS): ../harness.c ../utils.c signal.S
+TEST_GEN_PROGS := signal signal_tm
 
 CFLAGS += -maltivec
-signal_tm: CFLAGS += -mhtm
+$(OUTPUT)/signal_tm: CFLAGS += -mhtm
 
 top_srcdir = ../../../../..
 include ../../lib.mk
 
-clean:
-       rm -f $(TEST_PROGS) *.o
+$(TEST_GEN_PROGS): ../harness.c ../utils.c signal.S
index fcd2dcb8972babf90209b699307bd086f08c5f90..bdc081afedb0f0788c26ad4a6914e7895b9be90a 100644 (file)
@@ -8,6 +8,7 @@ EXTRA_CLEAN = $(OUTPUT)/*.o $(OUTPUT)/check-reversed.S
 top_srcdir = ../../../../..
 include ../../lib.mk
 
+$(OUTPUT)/switch_endian_test: ASFLAGS += -I $(OUTPUT)
 $(OUTPUT)/switch_endian_test: $(OUTPUT)/check-reversed.S
 
 $(OUTPUT)/check-reversed.o: $(OUTPUT)/check.o
index 43c342845be0ee1326214319ce8dabb64a958e94..ed62f4153d3eb58e3b0c426e6ecb14a666789a96 100644 (file)
@@ -25,7 +25,6 @@
 #include "utils.h"
 
 static char auxv[4096];
-extern unsigned int dscr_insn[];
 
 int read_auxv(char *buf, ssize_t buf_size)
 {
@@ -247,7 +246,8 @@ static void sigill_handler(int signr, siginfo_t *info, void *unused)
        ucontext_t *ctx = (ucontext_t *)unused;
        unsigned long *pc = &UCONTEXT_NIA(ctx);
 
-       if (*pc == (unsigned long)&dscr_insn) {
+       /* mtspr 3,RS to check for move to DSCR below */
+       if ((*((unsigned int *)*pc) & 0xfc1fffff) == 0x7c0303a6) {
                if (!warned++)
                        printf("WARNING: Skipping over dscr setup. Consider running 'ppc64_cpu --dscr=1' manually.\n");
                *pc += 4;
@@ -271,5 +271,5 @@ void set_dscr(unsigned long val)
                init = 1;
        }
 
-       asm volatile("dscr_insn: mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
+       asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
 }
index 6f1f4a6e1ecb1effcb16d4551e3f691f625f9e0c..85744425b08d3bd39022bfd3c09116262ae54ed8 100644 (file)
@@ -13,7 +13,7 @@
  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
-/* Test readlink /proc/self/map_files/... with address 0. */
+/* Test readlink /proc/self/map_files/... with minimum address. */
 #include <errno.h>
 #include <sys/types.h>
 #include <sys/stat.h>
@@ -47,6 +47,11 @@ static void fail(const char *fmt, unsigned long a, unsigned long b)
 int main(void)
 {
        const unsigned int PAGE_SIZE = sysconf(_SC_PAGESIZE);
+#ifdef __arm__
+       unsigned long va = 2 * PAGE_SIZE;
+#else
+       unsigned long va = 0;
+#endif
        void *p;
        int fd;
        unsigned long a, b;
@@ -55,7 +60,7 @@ int main(void)
        if (fd == -1)
                return 1;
 
-       p = mmap(NULL, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
+       p = mmap((void *)va, PAGE_SIZE, PROT_NONE, MAP_PRIVATE|MAP_FILE|MAP_FIXED, fd, 0);
        if (p == MAP_FAILED) {
                if (errno == EPERM)
                        return 2;
index 87a04a8a5945c21222a21c103e5ed88bff0f5c67..7607ba3e3cbe4eba89dfe5a35badae9182b974a9 100755 (executable)
@@ -134,9 +134,9 @@ def exec_cmd(args, pm, stage, command):
     (rawout, serr) = proc.communicate()
 
     if proc.returncode != 0 and len(serr) > 0:
-        foutput = serr.decode("utf-8")
+        foutput = serr.decode("utf-8", errors="ignore")
     else:
-        foutput = rawout.decode("utf-8")
+        foutput = rawout.decode("utf-8", errors="ignore")
 
     proc.stdout.close()
     proc.stderr.close()
@@ -169,6 +169,8 @@ def prepare_env(args, pm, stage, prefix, cmdlist, output = None):
                   file=sys.stderr)
             print("\n{} *** Error message: \"{}\"".format(prefix, foutput),
                   file=sys.stderr)
+            print("returncode {}; expected {}".format(proc.returncode,
+                                                      exit_codes))
             print("\n{} *** Aborting test run.".format(prefix), file=sys.stderr)
             print("\n\n{} *** stdout ***".format(proc.stdout), file=sys.stderr)
             print("\n\n{} *** stderr ***".format(proc.stderr), file=sys.stderr)
@@ -195,12 +197,18 @@ def run_one_test(pm, args, index, tidx):
         print('-----> execute stage')
     pm.call_pre_execute()
     (p, procout) = exec_cmd(args, pm, 'execute', tidx["cmdUnderTest"])
-    exit_code = p.returncode
+    if p:
+        exit_code = p.returncode
+    else:
+        exit_code = None
+
     pm.call_post_execute()
 
-    if (exit_code != int(tidx["expExitCode"])):
+    if (exit_code is None or exit_code != int(tidx["expExitCode"])):
         result = False
-        print("exit:", exit_code, int(tidx["expExitCode"]))
+        print("exit: {!r}".format(exit_code))
+        print("exit: {}".format(int(tidx["expExitCode"])))
+        #print("exit: {!r} {}".format(exit_code, int(tidx["expExitCode"])))
         print(procout)
     else:
         if args.verbose > 0: