Merge tag 'drm-misc-next-2017-01-30' of git://anongit.freedesktop.org/git/drm-misc...
authorDave Airlie <airlied@redhat.com>
Tue, 31 Jan 2017 22:31:09 +0000 (08:31 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 31 Jan 2017 22:31:09 +0000 (08:31 +1000)
Another round of -misc stuff:
- Noralf debugfs cleanup cleanup (not yet everything, some more driver
  patches awaiting acks).
- More doc work.
- edid/infoframe fixes from Ville.
- misc 1-patch fixes all over, as usual

Noralf needs this for his tinydrm pull request.

* tag 'drm-misc-next-2017-01-30' of git://anongit.freedesktop.org/git/drm-misc: (48 commits)
  drm/vc4: Remove vc4_debugfs_cleanup()
  dma/fence: Export enable-signaling tracepoint for emission by drivers
  drm/tilcdc: Remove tilcdc_debugfs_cleanup()
  drm/tegra: Remove tegra_debugfs_cleanup()
  drm/sti: Remove drm_debugfs_remove_files() calls
  drm/radeon: Remove drm_debugfs_remove_files() call
  drm/omap: Remove omap_debugfs_cleanup()
  drm/hdlcd: Remove hdlcd_debugfs_cleanup()
  drm/etnaviv: Remove etnaviv_debugfs_cleanup()
  drm/etnaviv: allow build with COMPILE_TEST
  drm/amd/amdgpu: Remove drm_debugfs_remove_files() call
  drm/prime: Clarify DMA-BUF/GEM Object lifetime
  drm/ttm: Make sure BOs being swapped out are cacheable
  drm/atomic: Remove drm_atomic_debugfs_cleanup()
  drm: drm_minor_register(): Clean up debugfs on failure
  drm: debugfs: Remove all files automatically on cleanup
  drm/fourcc: add vivante tiled layout format modifiers
  drm/edid: Set YQ bits in the AVI infoframe according to CEA-861-F
  drm/edid: Set AVI infoframe Q even when QS=0
  drm/edid: Introduce drm_hdmi_avi_infoframe_quant_range()
  ...

748 files changed:
.mailmap
Documentation/ABI/testing/sysfs-devices-deferred_probe [deleted file]
Documentation/devicetree/bindings/display/zte,vou.txt
Documentation/devicetree/bindings/i2c/i2c.txt
Documentation/devicetree/bindings/mtd/tango-nand.txt
Documentation/devicetree/bindings/net/ti,dp83867.txt
Documentation/devicetree/bindings/spi/sh-msiof.txt
Documentation/filesystems/proc.txt
Documentation/sound/hd-audio/dp-mst.rst
Documentation/vm/page_frags [new file with mode: 0644]
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/include/asm/cache.h
arch/arc/include/asm/entry-arcv2.h
arch/arc/include/asm/module.h
arch/arc/include/asm/ptrace.h
arch/arc/include/asm/setup.h
arch/arc/kernel/intc-arcv2.c
arch/arc/kernel/intc-compact.c
arch/arc/kernel/mcip.c
arch/arc/kernel/module.c
arch/arc/mm/cache.c
arch/arc/mm/init.c
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-icev2.dts
arch/arm/boot/dts/bcm-nsp.dtsi
arch/arm/boot/dts/da850-evm.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra72-evm-revc.dts
arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/qcom-mdm9615.dtsi
arch/arm/boot/dts/sun6i-a31-hummingbird.dts
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/s3c2410_defconfig
arch/arm/include/asm/cputype.h
arch/arm/include/asm/ftrace.h
arch/arm/include/asm/types.h [deleted file]
arch/arm/include/asm/virt.h
arch/arm/include/uapi/asm/types.h [new file with mode: 0644]
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/smp_tlb.c
arch/arm/kvm/arm.c
arch/arm/mach-omap1/dma.c
arch/arm/mach-omap2/pdata-quirks.c
arch/arm/mach-ux500/pm.c
arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
arch/arm64/boot/dts/exynos/exynos5433.dtsi
arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
arch/arm64/boot/dts/xilinx/zynqmp.dtsi
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/virt.h
arch/arm64/include/uapi/asm/ptrace.h
arch/arm64/kernel/entry.S
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/traps.c
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/init.c
arch/frv/include/asm/atomic.h
arch/mn10300/include/asm/switch_to.h
arch/powerpc/include/asm/book3s/64/hash-4k.h
arch/powerpc/include/asm/book3s/64/hash.h
arch/powerpc/include/asm/hugetlb.h
arch/powerpc/include/asm/nohash/pgtable.h
arch/powerpc/include/asm/page.h
arch/powerpc/include/asm/perf_event_server.h
arch/powerpc/include/asm/pgtable-be-types.h
arch/powerpc/include/asm/pgtable-types.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugetlbpage-hash64.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/init-common.c
arch/powerpc/mm/pgtable-book3s64.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/power9-events-list.h
arch/powerpc/perf/power9-pmu.c
arch/powerpc/sysdev/xics/icp-opal.c
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/defconfig
arch/s390/include/asm/ctl_reg.h
arch/s390/kvm/kvm-s390.c
arch/tile/kernel/ptrace.c
arch/x86/boot/string.c
arch/x86/boot/string.h
arch/x86/crypto/aesni-intel_glue.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/events/amd/ibs.c
arch/x86/events/core.c
arch/x86/events/intel/core.c
arch/x86/events/intel/cstate.c
arch/x86/events/intel/ds.c
arch/x86/events/intel/rapl.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/microcode_intel.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/stacktrace.h
arch/x86/include/asm/switch_to.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/tsc.c
arch/x86/kernel/unwind_frame.c
arch/x86/kvm/emulate.c
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/x86.c
arch/x86/mm/mpx.c
arch/x86/net/bpf_jit_comp.c
arch/x86/pci/acpi.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/quirks.c
arch/x86/platform/intel-mid/device_libs/Makefile
arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c [new file with mode: 0644]
arch/x86/platform/intel-mid/device_libs/platform_spidev.c [deleted file]
block/blk-lib.c
block/blk-mq.c
block/blk-zoned.c
block/partition-generic.c
drivers/auxdisplay/Kconfig
drivers/base/base.h
drivers/base/core.c
drivers/base/dd.c
drivers/base/memory.c
drivers/block/nbd.c
drivers/block/virtio_blk.c
drivers/block/zram/zram_drv.c
drivers/char/mem.c
drivers/char/ppdev.c
drivers/char/virtio_console.c
drivers/clk/samsung/clk-exynos5420.c
drivers/clocksource/exynos_mct.c
drivers/dma/dw/Kconfig
drivers/dma/ioat/hw.h
drivers/dma/ioat/init.c
drivers/dma/omap-dma.c
drivers/dma/pl330.c
drivers/dma/sh/rcar-dmac.c
drivers/dma/stm32-dma.c
drivers/dma/ti-dma-crossbar.c
drivers/extcon/extcon.c
drivers/firmware/efi/fake_mem.c
drivers/firmware/efi/libstub/efistub.h
drivers/firmware/efi/libstub/fdt.c
drivers/firmware/efi/memmap.c
drivers/gpio/gpio-mxs.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_events.c
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
drivers/gpu/drm/arm/malidp_drv.c
drivers/gpu/drm/arm/malidp_drv.h
drivers/gpu/drm/arm/malidp_hw.c
drivers/gpu/drm/arm/malidp_hw.h
drivers/gpu/drm/arm/malidp_planes.c
drivers/gpu/drm/arm/malidp_regs.h
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
drivers/gpu/drm/cirrus/Kconfig
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/gvt/aperture_gm.c
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio.c
drivers/gpu/drm/i915/gvt/mmio.h
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/gvt/reg.h
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_fence_reg.c
drivers/gpu/drm/i915/i915_gem_fence_reg.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_internal.c
drivers/gpu/drm/i915/i915_gem_object.h
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_guc_reg.h
drivers/gpu/drm/i915/i915_guc_submission.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_sw_fence.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/i915_vgpu.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_engine_cs.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_guc_fwif.h
drivers/gpu/drm/i915/intel_guc_loader.c
drivers/gpu/drm/i915/intel_guc_log.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_hotplug.c
drivers/gpu/drm/i915/intel_huc.c [new file with mode: 0644]
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_pipe_crc.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uc.c
drivers/gpu/drm/i915/intel_uc.h
drivers/gpu/drm/meson/meson_plane.c
drivers/gpu/drm/meson/meson_venc.c
drivers/gpu/drm/meson/meson_venc_cvbs.c
drivers/gpu/drm/mgag200/mgag200_drv.c
drivers/gpu/drm/mgag200/mgag200_drv.h
drivers/gpu/drm/mgag200/mgag200_i2c.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_ringbuffer.c
drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
drivers/gpu/drm/omapdrm/dss/dispc.c
drivers/gpu/drm/omapdrm/dss/dsi.c
drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
drivers/gpu/drm/omapdrm/dss/omapdss.h
drivers/gpu/drm/omapdrm/omap_connector.c
drivers/gpu/drm/omapdrm/omap_crtc.c
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_drv.h
drivers/gpu/drm/omapdrm/omap_encoder.c
drivers/gpu/drm/omapdrm/omap_fb.c
drivers/gpu/drm/omapdrm/omap_irq.c
drivers/gpu/drm/omapdrm/omap_plane.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/sti/Makefile
drivers/gpu/drm/sti/sti_drv.c
drivers/gpu/drm/sti/sti_drv.h
drivers/gpu/drm/sti/sti_hdmi.c
drivers/gpu/drm/sti/sti_hqvdp.c
drivers/gpu/drm/sti/sti_plane.c
drivers/gpu/drm/sti/sti_plane.h
drivers/gpu/drm/sti/sti_vtac.c [deleted file]
drivers/gpu/drm/sti/sti_vtg.c
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
drivers/gpu/drm/virtio/virtgpu_fb.c
drivers/gpu/drm/zte/Kconfig
drivers/gpu/drm/zte/Makefile
drivers/gpu/drm/zte/zx_drm_drv.c
drivers/gpu/drm/zte/zx_drm_drv.h
drivers/gpu/drm/zte/zx_hdmi.c
drivers/gpu/drm/zte/zx_hdmi_regs.h
drivers/gpu/drm/zte/zx_plane.c
drivers/gpu/drm/zte/zx_plane.h
drivers/gpu/drm/zte/zx_plane_regs.h
drivers/gpu/drm/zte/zx_tvenc.c [new file with mode: 0644]
drivers/gpu/drm/zte/zx_tvenc_regs.h [new file with mode: 0644]
drivers/gpu/drm/zte/zx_vou.c
drivers/gpu/drm/zte/zx_vou.h
drivers/gpu/drm/zte/zx_vou_regs.h
drivers/hid/hid-core.c
drivers/hid/hid-corsair.c
drivers/hid/hid-cypress.c
drivers/hid/hid-ids.h
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/i2c/busses/i2c-piix4.c
drivers/i2c/i2c-core.c
drivers/i2c/i2c-dev.c
drivers/input/joydev.c
drivers/input/joystick/xpad.c
drivers/input/misc/adxl34x-i2c.c
drivers/input/mouse/alps.h
drivers/input/mouse/synaptics_i2c.c
drivers/input/rmi4/Kconfig
drivers/input/serio/i8042-x86ia64io.h
drivers/input/touchscreen/elants_i2c.c
drivers/md/md.h
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5-cache.c
drivers/md/raid5.c
drivers/memstick/core/memstick.c
drivers/misc/mei/bus-fixup.c
drivers/misc/mei/debugfs.c
drivers/misc/mei/hbm.c
drivers/misc/mei/hw.h
drivers/misc/mei/mei_dev.h
drivers/mmc/core/mmc_ops.c
drivers/mmc/host/meson-gx-mmc.c
drivers/mmc/host/mxs-mmc.c
drivers/mmc/host/sdhci-acpi.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/lpc32xx_mlc.c
drivers/mtd/nand/tango_nand.c
drivers/mtd/nand/xway_nand.c
drivers/net/appletalk/ipddp.c
drivers/net/dsa/bcm_sf2.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cavium/thunder/thunder_bgx.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/mellanox/mlx4/cq.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
drivers/net/ethernet/qualcomm/emac/emac-phy.c
drivers/net/ethernet/qualcomm/emac/emac.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/ethernet/ti/cpmac.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/atusb.c
drivers/net/phy/Kconfig
drivers/net/phy/dp83867.c
drivers/net/phy/marvell.c
drivers/net/phy/phy.c
drivers/net/usb/r8152.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wireless/intersil/orinoco/mic.c
drivers/net/wireless/intersil/orinoco/mic.h
drivers/net/wireless/intersil/orinoco/orinoco.h
drivers/net/wireless/realtek/rtlwifi/usb.c
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/pmem.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/pci/host/pci-xgene-msi.c
drivers/pci/host/pcie-designware.c
drivers/pci/probe.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-broxton.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/meson/pinctrl-meson-gxbb.c
drivers/pinctrl/meson/pinctrl-meson-gxl.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel_mid_powerbtn.c
drivers/platform/x86/mlx-platform.c
drivers/platform/x86/surface3-wmi.c
drivers/remoteproc/remoteproc_core.c
drivers/rpmsg/rpmsg_core.c
drivers/s390/virtio/virtio_ccw.c
drivers/scsi/bfa/bfad.c
drivers/scsi/bfa/bfad_bsg.c
drivers/scsi/bfa/bfad_drv.h
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qedi/Kconfig
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_nx.h
drivers/scsi/qla2xxx/qla_nx2.c
drivers/scsi/qla2xxx/qla_nx2.h
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/qla_tmpl.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/qla2xxx/tcm_qla2xxx.h
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/ses.c
drivers/scsi/snic/snic_main.c
drivers/soc/ti/wkup_m3_ipc.c
drivers/spi/Kconfig
drivers/spi/spi-armada-3700.c
drivers/spi/spi-axi-spi-engine.c
drivers/spi/spi-davinci.c
drivers/spi/spi-dw-mid.c
drivers/spi/spi-dw.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-sh-msiof.c
drivers/target/target_core_transport.c
drivers/target/target_core_xcopy.c
drivers/target/target_core_xcopy.h
drivers/thermal/rockchip_thermal.c
drivers/thermal/thermal_core.c
drivers/thermal/thermal_hwmon.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/atmel_serial.c
drivers/tty/sysrq.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/params.c
drivers/usb/dwc3/dwc3-exynos.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/gadget/udc/atmel_usba_udc.h
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/musb/musb_debugfs.c
drivers/usb/serial/ch341.c
drivers/usb/serial/kl5kusb105.c
drivers/usb/wusbcore/crypto.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/scsi.c
drivers/vhost/vsock.c
drivers/video/fbdev/core/fbcmap.c
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_ring.c
drivers/vme/bridges/vme_ca91cx42.c
drivers/xen/platform-pci.c
fs/Kconfig
fs/aio.c
fs/binfmt_elf.c
fs/btrfs/async-thread.c
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/tree-log.c
fs/btrfs/uuid-tree.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/coredump.c
fs/dax.c
fs/dcache.c
fs/direct-io.c
fs/ext2/Kconfig
fs/ext4/Kconfig
fs/f2fs/segment.c
fs/f2fs/super.c
fs/fuse/dev.c
fs/fuse/dir.c
fs/libfs.c
fs/namespace.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/ocfs2/dlmglue.c
fs/ocfs2/stackglue.c
fs/ocfs2/stackglue.h
fs/overlayfs/namei.c
fs/posix_acl.c
fs/proc/base.c
fs/proc/proc_sysctl.c
fs/romfs/super.c
fs/ubifs/Kconfig
fs/ubifs/dir.c
fs/ubifs/ioctl.c
fs/ubifs/journal.c
fs/ubifs/tnc.c
fs/userfaultfd.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_alloc.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap_btree.c
fs/xfs/libxfs/xfs_dir2.c
fs/xfs/libxfs/xfs_dir2.h
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_linux.h
fs/xfs/xfs_log.c
include/drm/drm_crtc_helper.h
include/drm/drm_dp_helper.h
include/kvm/arm_arch_timer.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/coredump.h
include/linux/cpuhotplug.h
include/linux/efi.h
include/linux/filter.h
include/linux/gfp.h
include/linux/gpio/driver.h
include/linux/i2c.h
include/linux/jump_label_ratelimit.h
include/linux/kernel.h
include/linux/memcontrol.h
include/linux/memory_hotplug.h
include/linux/mm.h
include/linux/mm_inline.h
include/linux/mmzone.h
include/linux/netdevice.h
include/linux/nmi.h
include/linux/perf_event.h
include/linux/rcupdate.h
include/linux/remoteproc.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/slab.h
include/linux/sunrpc/svc_xprt.h
include/linux/swap.h
include/linux/tcp.h
include/linux/timerfd.h
include/scsi/libfc.h
include/sound/hdmi-codec.h
include/sound/soc.h
include/target/target_core_base.h
include/trace/events/btrfs.h
include/trace/events/mmflags.h
include/uapi/drm/Kbuild
include/uapi/drm/i915_drm.h
include/uapi/linux/Kbuild
include/uapi/linux/nl80211.h
include/uapi/linux/pkt_cls.h
include/uapi/linux/tc_act/tc_bpf.h
include/uapi/linux/timerfd.h [new file with mode: 0644]
init/Kconfig
ipc/sem.c
kernel/bpf/arraymap.c
kernel/bpf/core.c
kernel/bpf/hashtab.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/capability.c
kernel/cpu.c
kernel/events/core.c
kernel/jump_label.c
kernel/memremap.c
kernel/module.c
kernel/panic.c
kernel/pid_namespace.c
kernel/rcu/rcu.h
kernel/rcu/tiny.c
kernel/rcu/tiny_plugin.h
kernel/rcu/tree.c
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h
kernel/rcu/update.c
kernel/signal.c
kernel/sysctl.c
kernel/time/tick-sched.c
kernel/time/tick-sched.h
kernel/ucount.c
kernel/watchdog.c
kernel/watchdog_hld.c
lib/Kconfig.debug
lib/ioremap.c
lib/iov_iter.c
lib/radix-tree.c
lib/swiotlb.c
mm/filemap.c
mm/huge_memory.c
mm/hugetlb.c
mm/khugepaged.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/page_alloc.c
mm/slab.c
mm/slub.c
mm/swapfile.c
mm/vmscan.c
net/Kconfig
net/ax25/ax25_subr.c
net/bridge/br_netfilter_hooks.c
net/ceph/crypto.c
net/core/dev.c
net/core/flow_dissector.c
net/core/skbuff.c
net/core/sock.c
net/dsa/dsa2.c
net/ipv4/fib_semantics.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_metrics.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/mcast.c
net/ipv6/route.c
net/ipv6/seg6_hmac.c
net/ipv6/seg6_iptunnel.c
net/iucv/af_iucv.c
net/mac80211/chan.c
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/rate.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/tx.c
net/mac80211/vht.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_payload.c
net/netfilter/nft_queue.c
net/netfilter/nft_quota.c
net/netlabel/netlabel_kapi.c
net/openvswitch/conntrack.c
net/qrtr/qrtr.c
net/sched/act_api.c
net/sched/act_bpf.c
net/sched/cls_bpf.c
net/sctp/outqueue.c
net/socket.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/svc_xprt.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/tipc/discover.c
net/tipc/link.c
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/wireless/nl80211.c
samples/bpf/sock_example.h
samples/bpf/trace_output_user.c
samples/vfio-mdev/mtty.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/patch_hdmi.c
sound/soc/codecs/nau8825.c
sound/soc/codecs/nau8825.h
sound/soc/codecs/rt5645.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/codecs/wm_adsp.c
sound/soc/dwc/designware_i2s.c
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/intel/skylake/skl-pcm.c
sound/soc/intel/skylake/skl-sst.c
sound/soc/sh/rcar/core.c
sound/soc/soc-core.c
sound/soc/soc-pcm.c
sound/soc/soc-topology.c
sound/usb/quirks.c
tools/lib/subcmd/parse-options.c
tools/lib/subcmd/parse-options.h
tools/lib/traceevent/plugin_sched_switch.c
tools/perf/Documentation/perf-record.txt
tools/perf/Makefile.perf
tools/perf/builtin-kmem.c
tools/perf/builtin-record.c
tools/perf/builtin-sched.c
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c
tools/perf/util/probe-finder.h
tools/perf/util/symbol-elf.c
tools/testing/selftests/Makefile
tools/testing/selftests/bpf/test_kmod.sh
tools/testing/selftests/net/run_netsocktests
tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
tools/testing/selftests/x86/protection_keys.c
tools/virtio/ringtest/main.h
tools/virtio/ringtest/run-on-all.sh
virt/kvm/arm/arch_timer.c
virt/kvm/arm/hyp/timer-sr.c
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic-v3.c
virt/lib/irqbypass.c

index 02d261407683dcfa483cf15247b2cf31cff0432a..67dc22ffc9a80cb4fd6abeeba3d2ec7f7ae2ba19 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -137,6 +137,7 @@ Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
 Rudolf Marek <R.Marek@sh.cvut.cz>
 Rui Saraiva <rmps@joel.ist.utl.pt>
 Sachin P Sant <ssant@in.ibm.com>
+Sarangdhar Joshi <spjoshi@codeaurora.org>
 Sam Ravnborg <sam@mars.ravnborg.org>
 Santosh Shilimkar <ssantosh@kernel.org>
 Santosh Shilimkar <santosh.shilimkar@oracle.org>
@@ -150,10 +151,13 @@ Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
 Simon Kelley <simon@thekelleys.org.uk>
 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
 Stephen Hemminger <shemminger@osdl.org>
+Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
+Subhash Jadavani <subhashj@codeaurora.org>
 Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
 Sumit Semwal <sumit.semwal@ti.com>
 Tejun Heo <htejun@gmail.com>
 Thomas Graf <tgraf@suug.ch>
+Thomas Pedersen <twp@codeaurora.org>
 Tony Luck <tony.luck@intel.com>
 Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
 Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
diff --git a/Documentation/ABI/testing/sysfs-devices-deferred_probe b/Documentation/ABI/testing/sysfs-devices-deferred_probe
deleted file mode 100644 (file)
index 58553d7..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-What:          /sys/devices/.../deferred_probe
-Date:          August 2016
-Contact:       Ben Hutchings <ben.hutchings@codethink.co.uk>
-Description:
-               The /sys/devices/.../deferred_probe attribute is
-               present for all devices.  If a driver detects during
-               probing a device that a related device is not yet
-               ready, it may defer probing of the first device.  The
-               kernel will retry probing the first device after any
-               other device is successfully probed.  This attribute
-               reads as 1 if probing of this device is currently
-               deferred, or 0 otherwise.
index 740e5bd2e4f78d03721ff205bf0406ed7b99bd34..9c356284232b0a6ef98d0a7e8d313ed757a342e5 100644 (file)
@@ -49,6 +49,15 @@ Required properties:
        "osc_clk"
        "xclk"
 
+* TV Encoder output device
+
+Required properties:
+ - compatible: should be "zte,zx296718-tvenc"
+ - reg: Physical base address and length of the TVENC device IO region
+ - zte,tvenc-power-control: the phandle to SYSCTRL block followed by two
+   integer cells.  The first cell is the offset of SYSCTRL register used
+   to control TV Encoder DAC power, and the second cell is the bit mask.
+
 Example:
 
 vou: vou@1440000 {
@@ -81,4 +90,10 @@ vou: vou@1440000 {
                         <&topcrm HDMI_XCLK>;
                clock-names = "osc_cec", "osc_clk", "xclk";
        };
+
+       tvenc: tvenc@2000 {
+               compatible = "zte,zx296718-tvenc";
+               reg = <0x2000 0x1000>;
+               zte,tvenc-power-control = <&sysctrl 0x170 0x10>;
+       };
 };
index 5fa691e6f6388320acd4199995ef0072e9e70faa..cee9d5055fa27c7593f5a9da09661c4f7eed9647 100644 (file)
@@ -62,6 +62,9 @@ wants to support one of the below features, it should adapt the bindings below.
        "irq" and "wakeup" names are recognized by I2C core, other names are
        left to individual drivers.
 
+- host-notify
+       device uses SMBus host notify protocol instead of interrupt line.
+
 - multi-master
        states that there is another master active on this bus. The OS can use
        this information to adapt power management to keep the arbitration awake
@@ -81,6 +84,11 @@ Binding may contain optional "interrupts" property, describing interrupts
 used by the device. I2C core will assign "irq" interrupt (or the very first
 interrupt if not using interrupt names) as primary interrupt for the slave.
 
+Alternatively, devices supporting SMbus Host Notify, and connected to
+adapters that support this feature, may use "host-notify" property. I2C
+core will create a virtual interrupt for Host Notify and assign it as
+primary interrupt for the slave.
+
 Also, if device is marked as a wakeup source, I2C core will set up "wakeup"
 interrupt for the device. If "wakeup" interrupt name is not present in the
 binding, then primary interrupt will be used as wakeup interrupt.
index ad5a02f2ac8c9da32eb72192ba9f313fa82d2e4b..cd1bf2ac9055fc3561dbf6da805c8aa5ca3ea49c 100644 (file)
@@ -5,7 +5,7 @@ Required properties:
 - compatible: "sigma,smp8758-nand"
 - reg: address/size of nfc_reg, nfc_mem, and pbus_reg
 - dmas: reference to the DMA channel used by the controller
-- dma-names: "nfc_sbox"
+- dma-names: "rxtx"
 - clocks: reference to the system clock
 - #address-cells: <1>
 - #size-cells: <0>
@@ -17,9 +17,9 @@ Example:
 
        nandc: nand-controller@2c000 {
                compatible = "sigma,smp8758-nand";
-               reg = <0x2c000 0x30 0x2d000 0x800 0x20000 0x1000>;
+               reg = <0x2c000 0x30>, <0x2d000 0x800>, <0x20000 0x1000>;
                dmas = <&dma0 3>;
-               dma-names = "nfc_sbox";
+               dma-names = "rxtx";
                clocks = <&clkgen SYS_CLK>;
                #address-cells = <1>;
                #size-cells = <0>;
index 85bf945b898f0d156702f16d76be315c9108e1aa..afe9630a5e7de1ff01806675d6a8908cc200d465 100644 (file)
@@ -3,9 +3,11 @@
 Required properties:
        - reg - The ID number for the phy, usually a small integer
        - ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h
-               for applicable values
+               for applicable values. Required only if interface type is
+               PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_RXID
        - ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h
-               for applicable values
+               for applicable values. Required only if interface type is
+               PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_TXID
        - ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
                for applicable values
 
index da6614c6379604bb209156cea87a4e3b27a7d1b8..dc975064fa273c3600eee7d822c94eb5fea73502 100644 (file)
@@ -1,17 +1,23 @@
 Renesas MSIOF spi controller
 
 Required properties:
-- compatible           : "renesas,msiof-<soctype>" for SoCs,
-                        "renesas,sh-msiof" for SuperH, or
-                        "renesas,sh-mobile-msiof" for SH Mobile series.
-                        Examples with soctypes are:
-                        "renesas,msiof-r8a7790" (R-Car H2)
+- compatible           : "renesas,msiof-r8a7790" (R-Car H2)
                         "renesas,msiof-r8a7791" (R-Car M2-W)
                         "renesas,msiof-r8a7792" (R-Car V2H)
                         "renesas,msiof-r8a7793" (R-Car M2-N)
                         "renesas,msiof-r8a7794" (R-Car E2)
                         "renesas,msiof-r8a7796" (R-Car M3-W)
                         "renesas,msiof-sh73a0" (SH-Mobile AG5)
+                        "renesas,sh-mobile-msiof" (generic SH-Mobile compatibile device)
+                        "renesas,rcar-gen2-msiof" (generic R-Car Gen2 compatible device)
+                        "renesas,rcar-gen3-msiof" (generic R-Car Gen3 compatible device)
+                        "renesas,sh-msiof"      (deprecated)
+
+                        When compatible with the generic version, nodes
+                        must list the SoC-specific version corresponding
+                        to the platform first followed by the generic
+                        version.
+
 - reg                  : A list of offsets and lengths of the register sets for
                         the device.
                         If only one register set is present, it is to be used
@@ -61,7 +67,8 @@ Documentation/devicetree/bindings/pinctrl/renesas,*.
 Example:
 
        msiof0: spi@e6e20000 {
-               compatible = "renesas,msiof-r8a7791";
+               compatible = "renesas,msiof-r8a7791",
+                            "renesas,rcar-gen2-msiof";
                reg = <0 0xe6e20000 0 0x0064>;
                interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
index 72624a16b79284c0f2484741144e6b90ac084064..c94b4675d021ffd374de22d7d83df61dbb6c34dd 100644 (file)
@@ -212,10 +212,11 @@ asynchronous manner and the value may not be very precise. To see a precise
 snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
 It's slow but very precise.
 
-Table 1-2: Contents of the status files (as of 4.1)
+Table 1-2: Contents of the status files (as of 4.8)
 ..............................................................................
  Field                       Content
  Name                        filename of the executable
+ Umask                       file mode creation mask
  State                       state (R is running, S is sleeping, D is sleeping
                              in an uninterruptible wait, Z is zombie,
                             T is traced or stopped)
@@ -226,7 +227,6 @@ Table 1-2: Contents of the status files (as of 4.1)
  TracerPid                   PID of process tracing this process (0 if not)
  Uid                         Real, effective, saved set, and  file system UIDs
  Gid                         Real, effective, saved set, and  file system GIDs
- Umask                       file mode creation mask
  FDSize                      number of file descriptor slots currently allocated
  Groups                      supplementary group list
  NStgid                      descendant namespace thread group ID hierarchy
@@ -236,6 +236,7 @@ Table 1-2: Contents of the status files (as of 4.1)
  VmPeak                      peak virtual memory size
  VmSize                      total program size
  VmLck                       locked memory size
+ VmPin                       pinned memory size
  VmHWM                       peak resident set size ("high water mark")
  VmRSS                       size of memory portions. It contains the three
                              following parts (VmRSS = RssAnon + RssFile + RssShmem)
index 58b72437e6c3e27573ab7d640ff8310b2250889d..1617459e332fdd92af570e0bd1092262e0832583 100644 (file)
@@ -19,6 +19,23 @@ PCM
 ===
 To be added
 
+Pin Initialization
+==================
+Each pin may have several device entries (virtual pins). On Intel platform,
+the device entries number is dynamically changed. If DP MST hub is connected,
+it is in DP MST mode, and the device entries number is 3. Otherwise, the
+device entries number is 1.
+
+To simplify the implementation, all the device entries will be initialized
+when bootup no matter whether it is in DP MST mode or not.
+
+Connection list
+===============
+DP MST reuses connection list code. The code can be reused because
+device entries on the same pin have the same connection list.
+
+This means DP MST gets the device entry connection list without the
+device entry setting.
 
 Jack
 ====
diff --git a/Documentation/vm/page_frags b/Documentation/vm/page_frags
new file mode 100644 (file)
index 0000000..a671456
--- /dev/null
@@ -0,0 +1,42 @@
+Page fragments
+--------------
+
+A page fragment is an arbitrary-length arbitrary-offset area of memory
+which resides within a 0 or higher order compound page.  Multiple
+fragments within that page are individually refcounted, in the page's
+reference counter.
+
+The page_frag functions, page_frag_alloc and page_frag_free, provide a
+simple allocation framework for page fragments.  This is used by the
+network stack and network device drivers to provide a backing region of
+memory for use as either an sk_buff->head, or to be used in the "frags"
+portion of skb_shared_info.
+
+In order to make use of the page fragment APIs a backing page fragment
+cache is needed.  This provides a central point for the fragment allocation
+and tracks allows multiple calls to make use of a cached page.  The
+advantage to doing this is that multiple calls to get_page can be avoided
+which can be expensive at allocation time.  However due to the nature of
+this caching it is required that any calls to the cache be protected by
+either a per-cpu limitation, or a per-cpu limitation and forcing interrupts
+to be disabled when executing the fragment allocation.
+
+The network stack uses two separate caches per CPU to handle fragment
+allocation.  The netdev_alloc_cache is used by callers making use of the
+__netdev_alloc_frag and __netdev_alloc_skb calls.  The napi_alloc_cache is
+used by callers of the __napi_alloc_frag and __napi_alloc_skb calls.  The
+main difference between these two calls is the context in which they may be
+called.  The "netdev" prefixed functions are usable in any context as these
+functions will disable interrupts, while the "napi" prefixed functions are
+only usable within the softirq context.
+
+Many network device drivers use a similar methodology for allocating page
+fragments, but the page fragments are cached at the ring or descriptor
+level.  In order to enable these cases it is necessary to provide a generic
+way of tearing down a page cache.  For this reason __page_frag_cache_drain
+was implemented.  It allows for freeing multiple references from a single
+page via a single call.  The advantage to doing this is that it allows for
+cleaning up the multiple references that were added to a page in order to
+avoid calling get_page per allocation.
+
+Alexander Duyck, Nov 29, 2016.
index bdc4843d4dc504d3841ed8eab2dd1c40f226a412..e4d5ff62f2ec985415d68f25e92655b2a3e08efe 100644 (file)
@@ -81,7 +81,6 @@ Descriptions of section entries:
        Q: Patchwork web based patch tracking system site
        T: SCM tree type and location.
           Type is one of: git, hg, quilt, stgit, topgit
-       B: Bug tracking system location.
        S: Status, one of the following:
           Supported:   Someone is actually paid to look after this.
           Maintained:  Someone actually looks after it.
@@ -977,6 +976,7 @@ M:  Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.armlinux.org.uk/
 S:     Maintained
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git
 F:     arch/arm/
 
 ARM SUB-ARCHITECTURES
@@ -1154,6 +1154,7 @@ ARM/CLKDEV SUPPORT
 M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git clkdev
 F:     arch/arm/include/asm/clkdev.h
 F:     drivers/clk/clkdev.c
 
@@ -1689,6 +1690,7 @@ M:        Krzysztof Kozlowski <krzk@kernel.org>
 R:     Javier Martinez Canillas <javier@osg.samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+Q:     https://patchwork.kernel.org/project/linux-samsung-soc/list/
 S:     Maintained
 F:     arch/arm/boot/dts/s3c*
 F:     arch/arm/boot/dts/s5p*
@@ -2194,14 +2196,6 @@ L:       alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Supported
 F:     sound/soc/atmel
 
-ATMEL DMA DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Supported
-F:     drivers/dma/at_hdmac.c
-F:     drivers/dma/at_hdmac_regs.h
-F:     include/linux/platform_data/dma-atmel.h
-
 ATMEL XDMA DRIVER
 M:     Ludovic Desroches <ludovic.desroches@atmel.com>
 L:     linux-arm-kernel@lists.infradead.org
@@ -4106,18 +4100,24 @@ F:      drivers/gpu/drm/bridge/
 
 DRM DRIVER FOR BOCHS VIRTUAL GPU
 M:     Gerd Hoffmann <kraxel@redhat.com>
-S:     Odd Fixes
+L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
+S:     Maintained
 F:     drivers/gpu/drm/bochs/
 
 DRM DRIVER FOR QEMU'S CIRRUS DEVICE
 M:     Dave Airlie <airlied@redhat.com>
-S:     Odd Fixes
+M:     Gerd Hoffmann <kraxel@redhat.com>
+L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
+S:     Obsolete
+W:     https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
 F:     drivers/gpu/drm/cirrus/
 
 RADEON and AMDGPU DRM DRIVERS
 M:     Alex Deucher <alexander.deucher@amd.com>
 M:     Christian König <christian.koenig@amd.com>
-L:     dri-devel@lists.freedesktop.org
+L:     amd-gfx@lists.freedesktop.org
 T:     git git://people.freedesktop.org/~agd5f/linux
 S:     Supported
 F:     drivers/gpu/drm/radeon/
@@ -4304,7 +4304,10 @@ F:       Documentation/devicetree/bindings/display/renesas,du.txt
 
 DRM DRIVER FOR QXL VIRTUAL GPU
 M:     Dave Airlie <airlied@redhat.com>
-S:     Odd Fixes
+M:     Gerd Hoffmann <kraxel@redhat.com>
+L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
+S:     Maintained
 F:     drivers/gpu/drm/qxl/
 F:     include/uapi/drm/qxl_drm.h
 
@@ -7706,8 +7709,10 @@ F:       drivers/net/dsa/mv88e6xxx/
 F:     Documentation/devicetree/bindings/net/dsa/marvell.txt
 
 MARVELL ARMADA DRM SUPPORT
-M:     Russell King <rmk+kernel@armlinux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-devel
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-fixes
 F:     drivers/gpu/drm/armada/
 F:     include/uapi/drm/armada_drm.h
 F:     Documentation/devicetree/bindings/display/armada/
@@ -8179,6 +8184,15 @@ S:       Maintained
 F:     drivers/tty/serial/atmel_serial.c
 F:     include/linux/atmel_serial.h
 
+MICROCHIP / ATMEL DMA DRIVER
+M:     Ludovic Desroches <ludovic.desroches@microchip.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     dmaengine@vger.kernel.org
+S:     Supported
+F:     drivers/dma/at_hdmac.c
+F:     drivers/dma/at_hdmac_regs.h
+F:     include/linux/platform_data/dma-atmel.h
+
 MICROCHIP / ATMEL ISC DRIVER
 M:     Songjun Wu <songjun.wu@microchip.com>
 L:     linux-media@vger.kernel.org
@@ -8903,8 +8917,10 @@ S:       Supported
 F:     drivers/nfc/nxp-nci
 
 NXP TDA998X DRM DRIVER
-M:     Russell King <rmk+kernel@armlinux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Supported
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes
 F:     drivers/gpu/drm/i2c/tda998x_drv.c
 F:     include/drm/i2c/tda998x.h
 
@@ -13085,6 +13101,7 @@ M:      David Airlie <airlied@linux.ie>
 M:     Gerd Hoffmann <kraxel@redhat.com>
 L:     dri-devel@lists.freedesktop.org
 L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
 S:     Maintained
 F:     drivers/gpu/drm/virtio/
 F:     include/uapi/linux/virtio_gpu.h
@@ -13436,6 +13453,7 @@ F:      arch/x86/
 
 X86 PLATFORM DRIVERS
 M:     Darren Hart <dvhart@infradead.org>
+M:     Andy Shevchenko <andy@infradead.org>
 L:     platform-driver-x86@vger.kernel.org
 T:     git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
 S:     Maintained
@@ -13607,6 +13625,7 @@ F:      drivers/net/hamradio/z8530.h
 
 ZBUD COMPRESSED PAGE ALLOCATOR
 M:     Seth Jennings <sjenning@redhat.com>
+M:     Dan Streetman <ddstreet@ieee.org>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zbud.c
@@ -13662,6 +13681,7 @@ F:      Documentation/vm/zsmalloc.txt
 
 ZSWAP COMPRESSED SWAP CACHING
 M:     Seth Jennings <sjenning@redhat.com>
+M:     Dan Streetman <ddstreet@ieee.org>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zswap.c
index 5f1a84735ff61a18fddd4bbad964f8ac89ecc8e4..098840012b9bb4604d82c5269e53274170ae656f 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 4
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
-NAME = Roaring Lionus
+EXTRAVERSION = -rc5
+NAME = Anniversary Edition
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
index c75d29077e4a654276219883629444deec89c955..283099c9560aa296377d588e0402d5e0b49884b4 100644 (file)
@@ -29,7 +29,7 @@ config ARC
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select HAVE_MEMBLOCK
-       select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
+       select HAVE_MOD_ARCH_SPECIFIC
        select HAVE_OPROFILE
        select HAVE_PERF_EVENTS
        select HANDLE_DOMAIN_IRQ
index b3410ff6a62dbcc589ffa411f326d6954c8ba80c..5008021fba9894c0ba44251ddfd41a716d6c6cca 100644 (file)
@@ -67,7 +67,7 @@ extern unsigned long perip_base, perip_end;
 #define ARC_REG_IC_PTAG_HI     0x1F
 
 /* Bit val in IC_CTRL */
-#define IC_CTRL_CACHE_DISABLE   0x1
+#define IC_CTRL_DIS            0x1
 
 /* Data cache related Auxiliary registers */
 #define ARC_REG_DC_BCR         0x72    /* Build Config reg */
@@ -80,8 +80,9 @@ extern unsigned long perip_base, perip_end;
 #define ARC_REG_DC_PTAG_HI     0x5F
 
 /* Bit val in DC_CTRL */
-#define DC_CTRL_INV_MODE_FLUSH  0x40
-#define DC_CTRL_FLUSH_STATUS    0x100
+#define DC_CTRL_DIS            0x001
+#define DC_CTRL_INV_MODE_FLUSH 0x040
+#define DC_CTRL_FLUSH_STATUS   0x100
 
 /*System-level cache (L2 cache) related Auxiliary registers */
 #define ARC_REG_SLC_CFG                0x901
@@ -92,8 +93,8 @@ extern unsigned long perip_base, perip_end;
 #define ARC_REG_SLC_RGN_END    0x916
 
 /* Bit val in SLC_CONTROL */
+#define SLC_CTRL_DIS           0x001
 #define SLC_CTRL_IM            0x040
-#define SLC_CTRL_DISABLE       0x001
 #define SLC_CTRL_BUSY          0x100
 #define SLC_CTRL_RGN_OP_INV    0x200
 
index b5ff87e6f4b71352fc8990624114c025b6c5ccb0..aee1a77934cf694e37ae579347a37bc167e43762 100644 (file)
@@ -16,6 +16,7 @@
        ;
        ; Now manually save: r12, sp, fp, gp, r25
 
+       PUSH    r30
        PUSH    r12
 
        ; Saving pt_regs->sp correctly requires some extra work due to the way
@@ -72,6 +73,7 @@
        POPAX   AUX_USER_SP
 1:
        POP     r12
+       POP     r30
 
 .endm
 
index 6e91d8b339c3616b59d7b389353c477acba8a418..567590ea8f6c9166d7cef4643529ae621910ddce 100644 (file)
 
 #include <asm-generic/module.h>
 
-#ifdef CONFIG_ARC_DW2_UNWIND
 struct mod_arch_specific {
+#ifdef CONFIG_ARC_DW2_UNWIND
        void *unw_info;
        int unw_sec_idx;
+#endif
        const char *secstr;
 };
-#endif
 
 #define MODULE_PROC_FAMILY "ARC700"
 
index 69095da1fcfd1e35f16234aaf473896194064d38..47111d565a959d117ab9e2c7c9eea3b852137971 100644 (file)
@@ -84,7 +84,7 @@ struct pt_regs {
        unsigned long fp;
        unsigned long sp;       /* user/kernel sp depending on where we came from  */
 
-       unsigned long r12;
+       unsigned long r12, r30;
 
        /*------- Below list auto saved by h/w -----------*/
        unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
index cb954cdab07087bc6b49e72c8d117c77b905595c..c568a9df82b1a397d73d3fa3c0c523804a2856f2 100644 (file)
@@ -31,6 +31,7 @@ extern int root_mountflags, end_mem;
 
 void setup_processor(void);
 void __init setup_arch_memory(void);
+long __init arc_get_mem_sz(void);
 
 /* Helpers used in arc_*_mumbojumbo routines */
 #define IS_AVAIL1(v, s)                ((v) ? s : "")
index 994dca7014db645b32cfb22753cb25bae4c46566..ecef0fb0b66c37d814b2adc2cc207ef178903bef 100644 (file)
@@ -77,20 +77,20 @@ void arc_init_IRQ(void)
 
 static void arcv2_irq_mask(struct irq_data *data)
 {
-       write_aux_reg(AUX_IRQ_SELECT, data->irq);
+       write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
        write_aux_reg(AUX_IRQ_ENABLE, 0);
 }
 
 static void arcv2_irq_unmask(struct irq_data *data)
 {
-       write_aux_reg(AUX_IRQ_SELECT, data->irq);
+       write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
        write_aux_reg(AUX_IRQ_ENABLE, 1);
 }
 
 void arcv2_irq_enable(struct irq_data *data)
 {
        /* set default priority */
-       write_aux_reg(AUX_IRQ_SELECT, data->irq);
+       write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
        write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
 
        /*
index ce9deb953ca90e1a0309ccd69f9608e8fd32cf70..8c1fd5c007822409ff9bc07e7dbec4eaa9fdad09 100644 (file)
@@ -57,7 +57,7 @@ static void arc_irq_mask(struct irq_data *data)
        unsigned int ienb;
 
        ienb = read_aux_reg(AUX_IENABLE);
-       ienb &= ~(1 << data->irq);
+       ienb &= ~(1 << data->hwirq);
        write_aux_reg(AUX_IENABLE, ienb);
 }
 
@@ -66,7 +66,7 @@ static void arc_irq_unmask(struct irq_data *data)
        unsigned int ienb;
 
        ienb = read_aux_reg(AUX_IENABLE);
-       ienb |= (1 << data->irq);
+       ienb |= (1 << data->hwirq);
        write_aux_reg(AUX_IENABLE, ienb);
 }
 
index 560c4afc2af4882f7e64191350be90efef808754..9274f8ade8c7ce58cdb6b943c9efed3bd995a3bb 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/smp.h>
 #include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
 #include <linux/spinlock.h>
 #include <soc/arc/mcip.h>
 #include <asm/irqflags-arcv2.h>
@@ -221,10 +222,13 @@ static irq_hw_number_t idu_first_hwirq;
 static void idu_cascade_isr(struct irq_desc *desc)
 {
        struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
+       struct irq_chip *core_chip = irq_desc_get_chip(desc);
        irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
        irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
 
+       chained_irq_enter(core_chip, desc);
        generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
+       chained_irq_exit(core_chip, desc);
 }
 
 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
index 42e964db29677877438f8b9bc8e6225cc5f64174..3d99a60913325d1ac5229b7b6e5933caddb0cbbc 100644 (file)
@@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
 #ifdef CONFIG_ARC_DW2_UNWIND
        mod->arch.unw_sec_idx = 0;
        mod->arch.unw_info = NULL;
-       mod->arch.secstr = secstr;
 #endif
+       mod->arch.secstr = secstr;
        return 0;
 }
 
@@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
 
        }
 
+#ifdef CONFIG_ARC_DW2_UNWIND
        if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
                module->arch.unw_sec_idx = tgtsec;
+#endif
 
        return 0;
 
index ec86ac0e33213b889cd6100e10e95fda8f3c31e4..d408fa21a07c9937a0e2956a6e12a7895ffef684 100644 (file)
@@ -23,7 +23,7 @@
 
 static int l2_line_sz;
 static int ioc_exists;
-int slc_enable = 1, ioc_enable = 0;
+int slc_enable = 1, ioc_enable = 1;
 unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
 unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
 
@@ -271,7 +271,11 @@ void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
 
 /*
  * For ARC700 MMUv3 I-cache and D-cache flushes
- * Also reused for HS38 aliasing I-cache configuration
+ *  - ARC700 programming model requires paddr and vaddr be passed in seperate
+ *    AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
+ *    caches actually alias or not.
+ * -  For HS38, only the aliasing I-cache configuration uses the PTAG reg
+ *    (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
  */
 static inline
 void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
@@ -458,6 +462,21 @@ static inline void __dc_entire_op(const int op)
        __after_dc_op(op);
 }
 
+static inline void __dc_disable(void)
+{
+       const int r = ARC_REG_DC_CTRL;
+
+       __dc_entire_op(OP_FLUSH_N_INV);
+       write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
+}
+
+static void __dc_enable(void)
+{
+       const int r = ARC_REG_DC_CTRL;
+
+       write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
+}
+
 /* For kernel mappings cache operation: index is same as paddr */
 #define __dc_line_op_k(p, sz, op)      __dc_line_op(p, p, sz, op)
 
@@ -483,6 +502,8 @@ static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
 #else
 
 #define __dc_entire_op(op)
+#define __dc_disable()
+#define __dc_enable()
 #define __dc_line_op(paddr, vaddr, sz, op)
 #define __dc_line_op_k(paddr, sz, op)
 
@@ -597,6 +618,40 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
 #endif
 }
 
+noinline static void slc_entire_op(const int op)
+{
+       unsigned int ctrl, r = ARC_REG_SLC_CTRL;
+
+       ctrl = read_aux_reg(r);
+
+       if (!(op & OP_FLUSH))           /* i.e. OP_INV */
+               ctrl &= ~SLC_CTRL_IM;   /* clear IM: Disable flush before Inv */
+       else
+               ctrl |= SLC_CTRL_IM;
+
+       write_aux_reg(r, ctrl);
+
+       write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
+
+       /* Important to wait for flush to complete */
+       while (read_aux_reg(r) & SLC_CTRL_BUSY);
+}
+
+static inline void arc_slc_disable(void)
+{
+       const int r = ARC_REG_SLC_CTRL;
+
+       slc_entire_op(OP_FLUSH_N_INV);
+       write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
+}
+
+static inline void arc_slc_enable(void)
+{
+       const int r = ARC_REG_SLC_CTRL;
+
+       write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
+}
+
 /***********************************************************
  * Exported APIs
  */
@@ -923,21 +978,54 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
        return 0;
 }
 
-void arc_cache_init(void)
+/*
+ * IO-Coherency (IOC) setup rules:
+ *
+ * 1. Needs to be at system level, so only once by Master core
+ *    Non-Masters need not be accessing caches at that time
+ *    - They are either HALT_ON_RESET and kick started much later or
+ *    - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
+ *      doesn't perturb caches or coherency unit
+ *
+ * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
+ *    otherwise any straggler data might behave strangely post IOC enabling
+ *
+ * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
+ *    Coherency transactions
+ */
+noinline void __init arc_ioc_setup(void)
 {
-       unsigned int __maybe_unused cpu = smp_processor_id();
-       char str[256];
+       unsigned int ap_sz;
 
-       printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+       /* Flush + invalidate + disable L1 dcache */
+       __dc_disable();
+
+       /* Flush + invalidate SLC */
+       if (read_aux_reg(ARC_REG_SLC_BCR))
+               slc_entire_op(OP_FLUSH_N_INV);
+
+       /* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */
+       write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
 
        /*
-        * Only master CPU needs to execute rest of function:
-        *  - Assume SMP so all cores will have same cache config so
-        *    any geomtry checks will be same for all
-        *  - IOC setup / dma callbacks only need to be setup once
+        * IOC Aperture size:
+        *   decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M
+        * TBD: fix for PGU + 1GB of low mem
+        * TBD: fix for PAE
         */
-       if (cpu)
-               return;
+       ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2;
+       write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz);
+
+       write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
+       write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
+
+       /* Re-enable L1 dcache */
+       __dc_enable();
+}
+
+void __init arc_cache_init_master(void)
+{
+       unsigned int __maybe_unused cpu = smp_processor_id();
 
        if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
                struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
@@ -985,30 +1073,14 @@ void arc_cache_init(void)
                }
        }
 
-       if (is_isa_arcv2() && l2_line_sz && !slc_enable) {
-
-               /* IM set : flush before invalidate */
-               write_aux_reg(ARC_REG_SLC_CTRL,
-                       read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM);
+       /* Note that SLC disable not formally supported till HS 3.0 */
+       if (is_isa_arcv2() && l2_line_sz && !slc_enable)
+               arc_slc_disable();
 
-               write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
-
-               /* Important to wait for flush to complete */
-               while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
-               write_aux_reg(ARC_REG_SLC_CTRL,
-                       read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
-       }
+       if (is_isa_arcv2() && ioc_enable)
+               arc_ioc_setup();
 
        if (is_isa_arcv2() && ioc_enable) {
-               /* IO coherency base - 0x8z */
-               write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
-               /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
-               write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
-               /* Enable partial writes */
-               write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
-               /* Enable IO coherency */
-               write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
-
                __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
                __dma_cache_inv = __dma_cache_inv_ioc;
                __dma_cache_wback = __dma_cache_wback_ioc;
@@ -1022,3 +1094,20 @@ void arc_cache_init(void)
                __dma_cache_wback = __dma_cache_wback_l1;
        }
 }
+
+void __ref arc_cache_init(void)
+{
+       unsigned int __maybe_unused cpu = smp_processor_id();
+       char str[256];
+
+       printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+
+       /*
+        * Only master CPU needs to execute rest of function:
+        *  - Assume SMP so all cores will have same cache config so
+        *    any geomtry checks will be same for all
+        *  - IOC setup / dma callbacks only need to be setup once
+        */
+       if (!cpu)
+               arc_cache_init_master();
+}
index 399e2f223d25303f1294c35b52862e584fbcb1eb..8c9415ed62804d0a8b4a35df176f1b43e6c1be44 100644 (file)
@@ -40,6 +40,11 @@ struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
 #endif
 
+long __init arc_get_mem_sz(void)
+{
+       return low_mem_sz;
+}
+
 /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
 static int __init setup_mem_sz(char *str)
 {
index 7327250f0bb66e716dacd07d35019c858f38cf68..f10fe8526239552a676df4a4bfb1ae6a21b1aa41 100644 (file)
@@ -846,6 +846,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
        sun8i-a83t-allwinner-h8homlet-v2.dtb \
        sun8i-a83t-cubietruck-plus.dtb \
        sun8i-h3-bananapi-m2-plus.dtb \
+       sun8i-h3-nanopi-m1.dtb  \
        sun8i-h3-nanopi-neo.dtb \
        sun8i-h3-orangepi-2.dtb \
        sun8i-h3-orangepi-lite.dtb \
index 1463df3b5b195544f7fafc97f3a7dc62efa01f4f..8ed46f9d79b75f1f64e466eba59beb254dbd1f48 100644 (file)
                        AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */
                        AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */
                        AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */
-                       AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE5) /* (C15) spi0_cs1.mmc0_sdcd */
                >;
        };
 
index b6142bda661e1a701c66c6590970553ce5255fcb..15f07f9af3b3df082f8fd0676ebc843c5b097ab5 100644 (file)
 
        axi {
                compatible = "simple-bus";
-               ranges = <0x00000000 0x18000000 0x0011c40a>;
+               ranges = <0x00000000 0x18000000 0x0011c40c>;
                #address-cells = <1>;
                #size-cells = <1>;
 
index 41de15fe15a2f0dfa241748b2b49ef90be48b323..78492a0bbbab94ffec665bc36a205e48c21e0070 100644 (file)
@@ -99,6 +99,7 @@
                                #size-cells = <1>;
                                compatible = "m25p64";
                                spi-max-frequency = <30000000>;
+                               m25p,fast-read;
                                reg = <0>;
                                partition@0 {
                                        label = "U-Boot-SPL";
index 1faf24acd521d3cc4ed22d24c8fe0ff469e9a30a..5ba161679e01f97c868266aee23b34c62d079c3a 100644 (file)
                        phy-names = "sata-phy";
                        clocks = <&sata_ref_clk>;
                        ti,hwmods = "sata";
+                       ports-implemented = <0x1>;
                };
 
                rtc: rtc@48838000 {
index c3d939c9666cabb03752806cab94c002073a4d65..3f808a47df03dfad2b892fd6f833d57f2b18b439 100644 (file)
@@ -75,6 +75,6 @@
                ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
                ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
                ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
-               ti,min-output-imepdance;
+               ti,min-output-impedance;
        };
 };
index 34887a10c5f1712e62c0b953405ef330676a6940..47ba97229a48f23b247a0bd8bc2e874fa5c14c73 100644 (file)
                compatible = "fsl,imx6q-nitrogen6_max-sgtl5000",
                             "fsl,imx-audio-sgtl5000";
                model = "imx6q-nitrogen6_max-sgtl5000";
-               pinctrl-names = "default";
-               pinctrl-0 = <&pinctrl_sgtl5000>;
                ssi-controller = <&ssi1>;
                audio-codec = <&codec>;
                audio-routing =
 
        codec: sgtl5000@0a {
                compatible = "fsl,sgtl5000";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_sgtl5000>;
                reg = <0x0a>;
                clocks = <&clks IMX6QDL_CLK_CKO>;
                VDDA-supply = <&reg_2p5v>;
index d80f21abea62b5bfbd15c4999b01f282b269e973..31d4cc62dbc71bc3b5a8e6aa4a327d9b356fbfed 100644 (file)
                compatible = "fsl,imx6q-nitrogen6_som2-sgtl5000",
                             "fsl,imx-audio-sgtl5000";
                model = "imx6q-nitrogen6_som2-sgtl5000";
-               pinctrl-names = "default";
-               pinctrl-0 = <&pinctrl_sgtl5000>;
                ssi-controller = <&ssi1>;
                audio-codec = <&codec>;
                audio-routing =
 
        codec: sgtl5000@0a {
                compatible = "fsl,sgtl5000";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_sgtl5000>;
                reg = <0x0a>;
                clocks = <&clks IMX6QDL_CLK_CKO>;
                VDDA-supply = <&reg_2p5v>;
index da8598402ab8b33137a74fac30a69432155fac17..38faa90007d7f0c7e3042a90aef486321cc4bfa0 100644 (file)
 &mmc1 {
        interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
        pinctrl-names = "default";
-       pinctrl-0 = <&mmc1_pins &mmc1_cd>;
+       pinctrl-0 = <&mmc1_pins>;
        wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>;                /* gpio_126 */
        cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>;              /* gpio_110 */
        vmmc-supply = <&vmmc1>;
                        OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0)        /* sdmmc1_dat1.sdmmc1_dat1 */
                        OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0)        /* sdmmc1_dat2.sdmmc1_dat2 */
                        OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0)        /* sdmmc1_dat3.sdmmc1_dat3 */
-                       OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 sdmmc1_wp*/
+                       OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 */
+                       OMAP3_CORE1_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
                >;
        };
 
                        OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4)       /* sys_boot6.gpio_8 */
                >;
        };
-
-       mmc1_cd: pinmux_mmc1_cd {
-               pinctrl-single,pins = <
-                       OMAP3_WKUP_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4)  /* cam_d11.gpio_110 */
-               >;
-       };
 };
 
 
index 7cd92babc41a688cf8bb474972d053ea7b8b33d6..0844737b72b27c53b0c8ed88940abf5d2937d637 100644 (file)
                        phy-names = "sata-phy";
                        clocks = <&sata_ref_clk>;
                        ti,hwmods = "sata";
+                       ports-implemented = <0x1>;
                };
 
                dss: dss@58000000 {
index 5ae4ec59e6ea4533ec35485a0a0bb361acdfffaf..c852b69229c977281a6f7e7192d88c3275e80817 100644 (file)
                };
 
                amba {
-                       compatible = "arm,amba-bus";
+                       compatible = "simple-bus";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
index 735914f6ae446a4766f484ab888826efc99eefb1..7cae328398b12f962421cdaa774b5892246032a6 100644 (file)
        cpu-supply = <&reg_dcdc3>;
 };
 
+&de {
+       status = "okay";
+};
+
 &ehci0 {
        status = "okay";
 };
index 2b26175d55d1b7e3cc27611eb5883f4bc913d575..e78faaf9243c862d6eba52d43851f9451cfd0476 100644 (file)
        de: display-engine {
                compatible = "allwinner,sun6i-a31-display-engine";
                allwinner,pipelines = <&fe0>;
+               status = "disabled";
        };
 
        soc@01c00000 {
index 5ea4915f6d75b93eb06c974ab1b6abd009f7f769..10d307408f237f21d15fdbe2bf84de5b4db40f0a 100644 (file)
@@ -56,7 +56,7 @@
 };
 
 &pio {
-       mmc2_pins_nrst: mmc2@0 {
+       mmc2_pins_nrst: mmc2-rst-pin {
                allwinner,pins = "PC16";
                allwinner,function = "gpio_out";
                allwinner,drive = <SUN4I_PINCTRL_10_MA>;
index b01a4385129472ef83fb1bdbb90513a4b357a312..028d2b70e3b5b882feb185d3b037d1c53a3d0983 100644 (file)
@@ -471,7 +471,7 @@ CONFIG_MESON_WATCHDOG=y
 CONFIG_DW_WATCHDOG=y
 CONFIG_DIGICOLOR_WATCHDOG=y
 CONFIG_BCM2835_WDT=y
-CONFIG_BCM47XX_WATCHDOG=y
+CONFIG_BCM47XX_WDT=y
 CONFIG_BCM7038_WDT=m
 CONFIG_BCM_KONA_WDT=y
 CONFIG_MFD_ACT8945A=y
@@ -893,7 +893,7 @@ CONFIG_BCM2835_MBOX=y
 CONFIG_RASPBERRYPI_FIRMWARE=y
 CONFIG_EFI_VARS=m
 CONFIG_EFI_CAPSULE_LOADER=m
-CONFIG_CONFIG_BCM47XX_NVRAM=y
+CONFIG_BCM47XX_NVRAM=y
 CONFIG_BCM47XX_SPROM=y
 CONFIG_EXT4_FS=y
 CONFIG_AUTOFS4_FS=y
index 4364040ed69689dcc1d4c9a25aab9c8308e1b77c..1e6c48dd7b11830acc9a03a8bd700a47f8dbcc01 100644 (file)
@@ -86,9 +86,9 @@ CONFIG_IPV6_TUNNEL=m
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
index 522b5feb4eaa34dcbd0e66b7f417a6e857076db8..b62eaeb147aa9a0b8caa73bf96a73a1d02e5f708 100644 (file)
@@ -94,6 +94,9 @@
 #define ARM_CPU_XSCALE_ARCH_V2         0x4000
 #define ARM_CPU_XSCALE_ARCH_V3         0x6000
 
+/* Qualcomm implemented cores */
+#define ARM_CPU_PART_SCORPION          0x510002d0
+
 extern unsigned int processor_id;
 
 #ifdef CONFIG_CPU_CP15
index bfe2a2f5a644e80a9f80f71b49d727604c41e728..22b73112b75f2070e440068184f9655cff781afe 100644 (file)
@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
 
 #define ftrace_return_address(n) return_address(n)
 
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+
+static inline bool arch_syscall_match_sym_name(const char *sym,
+                                              const char *name)
+{
+       if (!strcmp(sym, "sys_mmap2"))
+               sym = "sys_mmap_pgoff";
+       else if (!strcmp(sym, "sys_statfs64_wrapper"))
+               sym = "sys_statfs64";
+       else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
+               sym = "sys_fstatfs64";
+       else if (!strcmp(sym, "sys_arm_fadvise64_64"))
+               sym = "sys_fadvise64_64";
+
+       /* Ignore case since sym may start with "SyS" instead of "sys" */
+       return !strcasecmp(sym, name);
+}
+
 #endif /* ifndef __ASSEMBLY__ */
 
 #endif /* _ASM_ARM_FTRACE */
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h
deleted file mode 100644 (file)
index a53cdb8..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-#ifndef _ASM_TYPES_H
-#define _ASM_TYPES_H
-
-#include <asm-generic/int-ll64.h>
-
-/*
- * The C99 types uintXX_t that are usually defined in 'stdint.h' are not as
- * unambiguous on ARM as you would expect. For the types below, there is a
- * difference on ARM between GCC built for bare metal ARM, GCC built for glibc
- * and the kernel itself, which results in build errors if you try to build with
- * -ffreestanding and include 'stdint.h' (such as when you include 'arm_neon.h'
- * in order to use NEON intrinsics)
- *
- * As the typedefs for these types in 'stdint.h' are based on builtin defines
- * supplied by GCC, we can tweak these to align with the kernel's idea of those
- * types, so 'linux/types.h' and 'stdint.h' can be safely included from the same
- * source file (provided that -ffreestanding is used).
- *
- *                    int32_t         uint32_t               uintptr_t
- * bare metal GCC     long            unsigned long          unsigned int
- * glibc GCC          int             unsigned int           unsigned int
- * kernel             int             unsigned int           unsigned long
- */
-
-#ifdef __INT32_TYPE__
-#undef __INT32_TYPE__
-#define __INT32_TYPE__         int
-#endif
-
-#ifdef __UINT32_TYPE__
-#undef __UINT32_TYPE__
-#define __UINT32_TYPE__        unsigned int
-#endif
-
-#ifdef __UINTPTR_TYPE__
-#undef __UINTPTR_TYPE__
-#define __UINTPTR_TYPE__       unsigned long
-#endif
-
-#endif /* _ASM_TYPES_H */
index a2e75b84e2ae6b4d3f37d7879bc024832c17d3c9..6dae1956c74d761beb8f1cb5f12dc1a8e912c4b5 100644 (file)
@@ -80,6 +80,11 @@ static inline bool is_kernel_in_hyp_mode(void)
        return false;
 }
 
+static inline bool has_vhe(void)
+{
+       return false;
+}
+
 /* The section containing the hypervisor idmap text */
 extern char __hyp_idmap_text_start[];
 extern char __hyp_idmap_text_end[];
diff --git a/arch/arm/include/uapi/asm/types.h b/arch/arm/include/uapi/asm/types.h
new file mode 100644 (file)
index 0000000..9435a42
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef _UAPI_ASM_TYPES_H
+#define _UAPI_ASM_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+
+/*
+ * The C99 types uintXX_t that are usually defined in 'stdint.h' are not as
+ * unambiguous on ARM as you would expect. For the types below, there is a
+ * difference on ARM between GCC built for bare metal ARM, GCC built for glibc
+ * and the kernel itself, which results in build errors if you try to build with
+ * -ffreestanding and include 'stdint.h' (such as when you include 'arm_neon.h'
+ * in order to use NEON intrinsics)
+ *
+ * As the typedefs for these types in 'stdint.h' are based on builtin defines
+ * supplied by GCC, we can tweak these to align with the kernel's idea of those
+ * types, so 'linux/types.h' and 'stdint.h' can be safely included from the same
+ * source file (provided that -ffreestanding is used).
+ *
+ *                    int32_t         uint32_t               uintptr_t
+ * bare metal GCC     long            unsigned long          unsigned int
+ * glibc GCC          int             unsigned int           unsigned int
+ * kernel             int             unsigned int           unsigned long
+ */
+
+#ifdef __INT32_TYPE__
+#undef __INT32_TYPE__
+#define __INT32_TYPE__         int
+#endif
+
+#ifdef __UINT32_TYPE__
+#undef __UINT32_TYPE__
+#define __UINT32_TYPE__        unsigned int
+#endif
+
+#ifdef __UINTPTR_TYPE__
+#undef __UINTPTR_TYPE__
+#define __UINTPTR_TYPE__       unsigned long
+#endif
+
+#endif /* _UAPI_ASM_TYPES_H */
index 188180b5523de09f55647d2d31547684c450a8c7..be3b3fbd382fbbd4a4ef4baa34b5d3f906ab3562 100644 (file)
@@ -1063,6 +1063,22 @@ static int __init arch_hw_breakpoint_init(void)
                return 0;
        }
 
+       /*
+        * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
+        * whenever a WFI is issued, even if the core is not powered down, in
+        * violation of the architecture.  When DBGPRSR.SPD is set, accesses to
+        * breakpoint and watchpoint registers are treated as undefined, so
+        * this results in boot time and runtime failures when these are
+        * accessed and we unexpectedly take a trap.
+        *
+        * It's not clear if/how this can be worked around, so we blacklist
+        * Scorpion CPUs to avoid these issues.
+       */
+       if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
+               pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
+               return 0;
+       }
+
        has_ossr = core_has_os_save_restore();
 
        /* Determine how many BRPs/WRPs are available. */
index 22313cb5336257cffa870b15e21279a1b4684e99..9af0701f7094be972b0f8c60a0a6c8417ed844eb 100644 (file)
@@ -9,6 +9,7 @@
  */
 #include <linux/preempt.h>
 #include <linux/smp.h>
+#include <linux/uaccess.h>
 
 #include <asm/smp_plat.h>
 #include <asm/tlbflush.h>
@@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
 static inline void ipi_flush_tlb_page(void *arg)
 {
        struct tlb_args *ta = (struct tlb_args *)arg;
+       unsigned int __ua_flags = uaccess_save_and_enable();
 
        local_flush_tlb_page(ta->ta_vma, ta->ta_start);
+
+       uaccess_restore(__ua_flags);
 }
 
 static inline void ipi_flush_tlb_kernel_page(void *arg)
@@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
 static inline void ipi_flush_tlb_range(void *arg)
 {
        struct tlb_args *ta = (struct tlb_args *)arg;
+       unsigned int __ua_flags = uaccess_save_and_enable();
 
        local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+
+       uaccess_restore(__ua_flags);
 }
 
 static inline void ipi_flush_tlb_kernel_range(void *arg)
index 11676787ad49042d5049ccd376d153d47ef7bd06..9d7446456e0c4217e0931f3640eb006cb0fb83d7 100644 (file)
@@ -1099,6 +1099,9 @@ static void cpu_init_hyp_mode(void *dummy)
        __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
        __cpu_init_stage2();
 
+       if (is_kernel_in_hyp_mode())
+               kvm_timer_init_vhe();
+
        kvm_arm_init_debug();
 }
 
index f6ba589cd312ecb257829d81f7f04c967b60e216..c821c1d5610ef25c56e1c0873452a019a602380d 100644 (file)
@@ -32,7 +32,6 @@
 #include "soc.h"
 
 #define OMAP1_DMA_BASE                 (0xfffed800)
-#define OMAP1_LOGICAL_DMA_CH_COUNT     17
 
 static u32 enable_1510_mode;
 
@@ -348,8 +347,6 @@ static int __init omap1_system_dma_init(void)
                goto exit_iounmap;
        }
 
-       d->lch_count            = OMAP1_LOGICAL_DMA_CH_COUNT;
-
        /* Valid attributes for omap1 plus processors */
        if (cpu_is_omap15xx())
                d->dev_caps = ENABLE_1510_MODE;
@@ -366,13 +363,14 @@ static int __init omap1_system_dma_init(void)
        d->dev_caps             |= CLEAR_CSR_ON_READ;
        d->dev_caps             |= IS_WORD_16;
 
-       if (cpu_is_omap15xx())
-               d->chan_count = 9;
-       else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
-               if (!(d->dev_caps & ENABLE_1510_MODE))
-                       d->chan_count = 16;
+       /* available logical channels */
+       if (cpu_is_omap15xx()) {
+               d->lch_count = 9;
+       } else {
+               if (d->dev_caps & ENABLE_1510_MODE)
+                       d->lch_count = 9;
                else
-                       d->chan_count = 9;
+                       d->lch_count = 16;
        }
 
        p = dma_plat_info;
index 477910a48448d653b2de22339d60c61b967aa8e3..70c004794880e0efd567e269eebc0f7484ca5140 100644 (file)
@@ -161,7 +161,7 @@ static struct ti_st_plat_data wilink7_pdata = {
        .nshutdown_gpio = 162,
        .dev_name = "/dev/ttyO1",
        .flow_cntrl = 1,
-       .baud_rate = 300000,
+       .baud_rate = 3000000,
 };
 
 static struct platform_device wl128x_device = {
index 8538910db202ab6a13e0e3588f27b2a3157c4bc2..a970e7fcba9e02fe6e2651cd5cfca76321d26314 100644 (file)
@@ -134,8 +134,8 @@ bool prcmu_pending_irq(void)
  */
 bool prcmu_is_cpu_in_wfi(int cpu)
 {
-       return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 :
-                    PRCM_ARM_WFI_STANDBY_WFI0;
+       return readl(PRCM_ARM_WFI_STANDBY) &
+               (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
 }
 
 /*
index 238fbeacd330ab7a8bf7a2a6fdf7f1a6e0365499..5d28e1cdc9986a18de73efc9c3a988c4556f1d88 100644 (file)
        };
 };
 
+&scpi_clocks {
+       status = "disabled";
+};
+
 &uart_AO {
        status = "okay";
        pinctrl-0 = <&uart_ao_a_pins>;
index 596240c38a9cdd7720077a8f97a5c0111366e550..b35307321b63981cc7c8dee4042d4c0ba99b7ee9 100644 (file)
@@ -55,7 +55,7 @@
                mboxes = <&mailbox 1 &mailbox 2>;
                shmem = <&cpu_scp_lpri &cpu_scp_hpri>;
 
-               clocks {
+               scpi_clocks: clocks {
                        compatible = "arm,scpi-clocks";
 
                        scpi_dvfs: scpi_clocks@0 {
index 64226d5ae4715172a12c8509022fb6f13814d318..135890cd8a859708c9adf6d3e184bd7ec093a3e0 100644 (file)
                };
 
                amba {
-                       compatible = "arm,amba-bus";
+                       compatible = "simple-bus";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
index 358089687a69b5946851ef7c5ac5c0a776d9d161..ef1b9e573af0f5bd2ed6792ba5c9450a64ecaa45 100644 (file)
@@ -27,7 +27,7 @@
                stdout-path = "serial0:115200n8";
        };
 
-       memory {
+       memory@0 {
                device_type = "memory";
                reg = <0x0 0x0 0x0 0x40000000>;
        };
index 68a908334c7b12846e74c16d5c7168d9276747dd..54dc28351c8cb85a0abbd4bbad554d56dbcbac79 100644 (file)
@@ -72,7 +72,7 @@
                             <1 10 0xf08>;
        };
 
-       amba_apu {
+       amba_apu: amba_apu@0 {
                compatible = "simple-bus";
                #address-cells = <2>;
                #size-cells = <1>;
                };
 
                i2c0: i2c@ff020000 {
-                       compatible = "cdns,i2c-r1p10";
+                       compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
                        status = "disabled";
                        interrupt-parent = <&gic>;
                        interrupts = <0 17 4>;
                };
 
                i2c1: i2c@ff030000 {
-                       compatible = "cdns,i2c-r1p10";
+                       compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
                        status = "disabled";
                        interrupt-parent = <&gic>;
                        interrupts = <0 18 4>;
index 446f6c46d4b17b352ef695665409c6863d470706..3a4301163e04a26d979bf506f8c4a537a2dd9715 100644 (file)
@@ -164,22 +164,25 @@ lr        .req    x30             // link register
 
 /*
  * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
- * <symbol> is within the range +/- 4 GB of the PC.
+ * <symbol> is within the range +/- 4 GB of the PC when running
+ * in core kernel context. In module context, a movz/movk sequence
+ * is used, since modules may be loaded far away from the kernel
+ * when KASLR is in effect.
  */
        /*
         * @dst: destination register (64 bit wide)
         * @sym: name of the symbol
-        * @tmp: optional scratch register to be used if <dst> == sp, which
-        *       is not allowed in an adrp instruction
         */
-       .macro  adr_l, dst, sym, tmp=
-       .ifb    \tmp
+       .macro  adr_l, dst, sym
+#ifndef MODULE
        adrp    \dst, \sym
        add     \dst, \dst, :lo12:\sym
-       .else
-       adrp    \tmp, \sym
-       add     \dst, \tmp, :lo12:\sym
-       .endif
+#else
+       movz    \dst, #:abs_g3:\sym
+       movk    \dst, #:abs_g2_nc:\sym
+       movk    \dst, #:abs_g1_nc:\sym
+       movk    \dst, #:abs_g0_nc:\sym
+#endif
        .endm
 
        /*
@@ -190,6 +193,7 @@ lr  .req    x30             // link register
         *       the address
         */
        .macro  ldr_l, dst, sym, tmp=
+#ifndef MODULE
        .ifb    \tmp
        adrp    \dst, \sym
        ldr     \dst, [\dst, :lo12:\sym]
@@ -197,6 +201,15 @@ lr .req    x30             // link register
        adrp    \tmp, \sym
        ldr     \dst, [\tmp, :lo12:\sym]
        .endif
+#else
+       .ifb    \tmp
+       adr_l   \dst, \sym
+       ldr     \dst, [\dst]
+       .else
+       adr_l   \tmp, \sym
+       ldr     \dst, [\tmp]
+       .endif
+#endif
        .endm
 
        /*
@@ -206,8 +219,13 @@ lr .req    x30             // link register
         *       while <src> needs to be preserved.
         */
        .macro  str_l, src, sym, tmp
+#ifndef MODULE
        adrp    \tmp, \sym
        str     \src, [\tmp, :lo12:\sym]
+#else
+       adr_l   \tmp, \sym
+       str     \src, [\tmp]
+#endif
        .endm
 
        /*
index bfe632808d7724c0a51562efb60501e67f6bf157..90c39a6623797dd2c5309b8d70e7975cb938524c 100644 (file)
@@ -222,7 +222,7 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define _virt_addr_valid(kaddr)        pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 #else
 #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(page)   (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
+#define __page_to_voff(kaddr)  (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
 
 #define page_to_virt(page)     ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
 #define virt_to_page(vaddr)    ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
index fea10736b11fbd1204602c3b695da34ea255bf78..439f6b5d31f67576e57e67cdf1e8284bf57c08a7 100644 (file)
@@ -47,6 +47,7 @@
 #include <asm/ptrace.h>
 #include <asm/sections.h>
 #include <asm/sysreg.h>
+#include <asm/cpufeature.h>
 
 /*
  * __boot_cpu_mode records what mode CPUs were booted in.
@@ -80,6 +81,14 @@ static inline bool is_kernel_in_hyp_mode(void)
        return read_sysreg(CurrentEL) == CurrentEL_EL2;
 }
 
+static inline bool has_vhe(void)
+{
+       if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
+               return true;
+
+       return false;
+}
+
 #ifdef CONFIG_ARM64_VHE
 extern void verify_cpu_run_el(void);
 #else
index b5c3933ed44163b2fb489a00553195afc1ff6806..d1ff83dfe5deae580cdb0ebf93f25b32a63f5a74 100644 (file)
@@ -77,6 +77,7 @@ struct user_fpsimd_state {
        __uint128_t     vregs[32];
        __u32           fpsr;
        __u32           fpcr;
+       __u32           __reserved[2];
 };
 
 struct user_hwdebug_state {
index 923841ffe4a981669be9bea76d09540244550a1f..43512d4d7df219b4b7093927fc1d9107004b6775 100644 (file)
@@ -683,7 +683,7 @@ el0_inv:
        mov     x0, sp
        mov     x1, #BAD_SYNC
        mov     x2, x25
-       bl      bad_mode
+       bl      bad_el0_sync
        b       ret_to_user
 ENDPROC(el0_sync)
 
index fc35e06ccaaca863ad680ff09166c3f2ba034c54..a22161ccf4470afa644850cd47c18b561da0d0e1 100644 (file)
@@ -551,6 +551,8 @@ static int hw_break_set(struct task_struct *target,
        /* (address, ctrl) registers */
        limit = regset->n * regset->size;
        while (count && offset < limit) {
+               if (count < PTRACE_HBP_ADDR_SZ)
+                       return -EINVAL;
                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
                                         offset, offset + PTRACE_HBP_ADDR_SZ);
                if (ret)
@@ -560,6 +562,8 @@ static int hw_break_set(struct task_struct *target,
                        return ret;
                offset += PTRACE_HBP_ADDR_SZ;
 
+               if (!count)
+                       break;
                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
                                         offset, offset + PTRACE_HBP_CTRL_SZ);
                if (ret)
@@ -596,7 +600,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
                   const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       struct user_pt_regs newregs;
+       struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
        if (ret)
@@ -626,7 +630,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
                   const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       struct user_fpsimd_state newstate;
+       struct user_fpsimd_state newstate =
+               target->thread.fpsimd_state.user_fpsimd;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
        if (ret)
@@ -650,7 +655,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
                   const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       unsigned long tls;
+       unsigned long tls = target->thread.tp_value;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
        if (ret)
@@ -676,7 +681,8 @@ static int system_call_set(struct task_struct *target,
                           unsigned int pos, unsigned int count,
                           const void *kbuf, const void __user *ubuf)
 {
-       int syscallno, ret;
+       int syscallno = task_pt_regs(target)->syscallno;
+       int ret;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
        if (ret)
@@ -948,7 +954,7 @@ static int compat_tls_set(struct task_struct *target,
                          const void __user *ubuf)
 {
        int ret;
-       compat_ulong_t tls;
+       compat_ulong_t tls = target->thread.tp_value;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
        if (ret)
index 5b830be79c0120a331e32bdf417b45f4c0bb8911..659b2e6b6cf767ff94d2512ad0897aa236469139 100644 (file)
@@ -604,17 +604,34 @@ const char *esr_get_class_string(u32 esr)
 }
 
 /*
- * bad_mode handles the impossible case in the exception vector.
+ * bad_mode handles the impossible case in the exception vector. This is always
+ * fatal.
  */
 asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
 {
-       siginfo_t info;
-       void __user *pc = (void __user *)instruction_pointer(regs);
        console_verbose();
 
        pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
                handler[reason], smp_processor_id(), esr,
                esr_get_class_string(esr));
+
+       die("Oops - bad mode", regs, 0);
+       local_irq_disable();
+       panic("bad mode");
+}
+
+/*
+ * bad_el0_sync handles unexpected, but potentially recoverable synchronous
+ * exceptions taken from EL0. Unlike bad_mode, this returns.
+ */
+asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
+{
+       siginfo_t info;
+       void __user *pc = (void __user *)instruction_pointer(regs);
+       console_verbose();
+
+       pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
+               smp_processor_id(), esr, esr_get_class_string(esr));
        __show_regs(regs);
 
        info.si_signo = SIGILL;
@@ -622,7 +639,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
        info.si_code  = ILL_ILLOPC;
        info.si_addr  = pc;
 
-       arm64_notify_die("Oops - bad mode", regs, &info, 0);
+       current->thread.fault_address = 0;
+       current->thread.fault_code = 0;
+
+       force_sig_info(info.si_signo, &info, current);
 }
 
 void __pte_error(const char *file, int line, unsigned long val)
index 964b7549af5cc7f827c57fe17709e7575a471c56..e25584d723960e73fb8eec8d1a5f48fa57197582 100644 (file)
@@ -239,7 +239,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
                ncontig = find_num_contig(vma->vm_mm, addr, cpte,
                                          *cpte, &pgsize);
                for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
-                       changed = ptep_set_access_flags(vma, addr, cpte,
+                       changed |= ptep_set_access_flags(vma, addr, cpte,
                                                        pfn_pte(pfn,
                                                                hugeprot),
                                                        dirty);
index 716d1226ba6925babc28bc6c14dc175e739c7f57..380ebe70509347f8c9329433a625527b65442aa6 100644 (file)
@@ -404,6 +404,8 @@ void __init mem_init(void)
        if (swiotlb_force == SWIOTLB_FORCE ||
            max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
                swiotlb_init(1);
+       else
+               swiotlb_force = SWIOTLB_NO_FORCE;
 
        set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
 
index 1c2a5e264fc71cfd52f2acb0b24ddb1aff792be7..e93c9494503ac8fc3cfaa8167ea3523abb3e2925 100644 (file)
@@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v)
 #define atomic64_sub_and_test(i,v)     (atomic64_sub_return((i), (v)) == 0)
 #define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
 #define atomic64_inc_and_test(v)       (atomic64_inc_return((v)) == 0)
-
+#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
 
 #define atomic_cmpxchg(v, old, new)    (cmpxchg(&(v)->counter, old, new))
 #define atomic_xchg(v, new)            (xchg(&(v)->counter, new))
@@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
        return c;
 }
 
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+{
+       long long c, old;
+
+       c = atomic64_read(v);
+       for (;;) {
+               if (unlikely(c == u))
+                       break;
+               old = atomic64_cmpxchg(v, c, c + i);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != u;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+       long long c, old, dec;
+
+       c = atomic64_read(v);
+       for (;;) {
+               dec = c - 1;
+               if (unlikely(dec < 0))
+                       break;
+               old = atomic64_cmpxchg((v), c, dec);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+               return dec;
+}
+
 #define ATOMIC_OP(op)                                                  \
 static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
 {                                                                      \
index 393d311735c8b573bd5702eac1dcaaac1103600e..67e333aa7629c406745564cb24acc5903733ec41 100644 (file)
@@ -16,7 +16,7 @@
 struct task_struct;
 struct thread_struct;
 
-#if !defined(CONFIG_LAZY_SAVE_FPU)
+#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
 struct fpu_state_struct;
 extern asmlinkage void fpu_save(struct fpu_state_struct *);
 #define switch_fpu(prev, next)                                         \
index 1c64bc6330bc0b9f70b71038e678fd110e6e1456..0c4e470571ca0faa74d3e9fa38fa57a384cab4bf 100644 (file)
 #ifdef CONFIG_HUGETLB_PAGE
 static inline int hash__hugepd_ok(hugepd_t hpd)
 {
+       unsigned long hpdval = hpd_val(hpd);
        /*
         * if it is not a pte and have hugepd shift mask
         * set, then it is a hugepd directory pointer
         */
-       if (!(hpd.pd & _PAGE_PTE) &&
-           ((hpd.pd & HUGEPD_SHIFT_MASK) != 0))
+       if (!(hpdval & _PAGE_PTE) &&
+           ((hpdval & HUGEPD_SHIFT_MASK) != 0))
                return true;
        return false;
 }
index f61cad3de4e69ec093674355332f7d3ee093cf2b..4c935f7504f783532d3521d04142dfdb5831f943 100644 (file)
@@ -201,6 +201,10 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
                                              unsigned long phys);
 extern void hash__vmemmap_remove_mapping(unsigned long start,
                                     unsigned long page_size);
+
+int hash__create_section_mapping(unsigned long start, unsigned long end);
+int hash__remove_section_mapping(unsigned long start, unsigned long end);
+
 #endif /* !__ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
index ede215167d1ad4f37f4d732c0eeaeee5cbd58bd4..7f4025a6c69ea5b71b340989b35197d8f9d17b91 100644 (file)
@@ -21,12 +21,12 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
         * We have only four bits to encode, MMU page size
         */
        BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
-       return __va(hpd.pd & HUGEPD_ADDR_MASK);
+       return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
 }
 
 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
 {
-       return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
+       return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
 }
 
 static inline unsigned int hugepd_shift(hugepd_t hpd)
@@ -52,18 +52,20 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
 {
        BUG_ON(!hugepd_ok(hpd));
 #ifdef CONFIG_PPC_8xx
-       return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
+       return (pte_t *)__va(hpd_val(hpd) &
+                            ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
 #else
-       return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
+       return (pte_t *)((hpd_val(hpd) &
+                         ~HUGEPD_SHIFT_MASK) | PD_HUGE);
 #endif
 }
 
 static inline unsigned int hugepd_shift(hugepd_t hpd)
 {
 #ifdef CONFIG_PPC_8xx
-       return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17;
+       return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
 #else
-       return hpd.pd & HUGEPD_SHIFT_MASK;
+       return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
 #endif
 }
 
index 172849727054e179ea1bd58e24e2d85118ed8f91..0cd8a3852763292eabe905b33960f888e875c978 100644 (file)
@@ -227,9 +227,10 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 static inline int hugepd_ok(hugepd_t hpd)
 {
 #ifdef CONFIG_PPC_8xx
-       return ((hpd.pd & 0x4) != 0);
+       return ((hpd_val(hpd) & 0x4) != 0);
 #else
-       return (hpd.pd > 0);
+       /* We clear the top bit to indicate hugepd */
+       return ((hpd_val(hpd) & PD_HUGE) ==  0);
 #endif
 }
 
index 56398e7e61004d7766f8e77627df54e73f7e75bf..47120bf2670c49f224744445c5dc154f0de60b31 100644 (file)
@@ -294,15 +294,12 @@ extern long long virt_phys_offset;
 #include <asm/pgtable-types.h>
 #endif
 
-typedef struct { signed long pd; } hugepd_t;
 
 #ifndef CONFIG_HUGETLB_PAGE
 #define is_hugepd(pdep)                (0)
 #define pgd_huge(pgd)          (0)
 #endif /* CONFIG_HUGETLB_PAGE */
 
-#define __hugepd(x) ((hugepd_t) { (x) })
-
 struct page;
 extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
 extern void copy_user_page(void *to, void *from, unsigned long vaddr,
index e157489ee7a1220156868099903d45509bedd7e6..ae0a23091a9b347d3e31ed96330772bc9caa3020 100644 (file)
@@ -65,6 +65,7 @@ struct power_pmu {
 #define PPMU_HAS_SSLOT         0x00000020 /* Has sampled slot in MMCRA */
 #define PPMU_HAS_SIER          0x00000040 /* Has SIER */
 #define PPMU_ARCH_207S         0x00000080 /* PMC is architecture v2.07S */
+#define PPMU_NO_SIAR           0x00000100 /* Do not use SIAR */
 
 /*
  * Values for flags to get_alternatives()
index 49c0a5a80efa2948764e247d3cbd76fc6fdf06a0..9c0f5db5cf461a92e72185701b4cbc1df168dfcc 100644 (file)
@@ -104,4 +104,12 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new)
        return pmd_raw(old) == prev;
 }
 
+typedef struct { __be64 pdbe; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) })
+
+static inline unsigned long hpd_val(hugepd_t x)
+{
+       return be64_to_cpu(x.pdbe);
+}
+
 #endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
index e7f4f3e0fcde94ba237fa2a269c32d40458cc8f2..8bd3b13fe2fb2e8bd1c5762c4e080c9cd921edaa 100644 (file)
@@ -66,4 +66,11 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
 }
 #endif
 
+typedef struct { unsigned long pd; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { (x) })
+static inline unsigned long hpd_val(hugepd_t x)
+{
+       return x.pd;
+}
+
 #endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
index c56ea8c84abb1771ff65f66ba91ffff02bff5fae..c4ced1d01d579fe0b7aeb13a71b21eb2ffb7072c 100644 (file)
 #define PPC_INST_MCRXR                 0x7c000400
 #define PPC_INST_MCRXR_MASK            0xfc0007fe
 #define PPC_INST_MFSPR_PVR             0x7c1f42a6
-#define PPC_INST_MFSPR_PVR_MASK                0xfc1fffff
+#define PPC_INST_MFSPR_PVR_MASK                0xfc1ffffe
 #define PPC_INST_MFTMR                 0x7c0002dc
 #define PPC_INST_MSGSND                        0x7c00019c
 #define PPC_INST_MSGCLR                        0x7c0001dc
 #define PPC_INST_RFDI                  0x4c00004e
 #define PPC_INST_RFMCI                 0x4c00004c
 #define PPC_INST_MFSPR_DSCR            0x7c1102a6
-#define PPC_INST_MFSPR_DSCR_MASK       0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_MASK       0xfc1ffffe
 #define PPC_INST_MTSPR_DSCR            0x7c1103a6
-#define PPC_INST_MTSPR_DSCR_MASK       0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_MASK       0xfc1ffffe
 #define PPC_INST_MFSPR_DSCR_USER       0x7c0302a6
-#define PPC_INST_MFSPR_DSCR_USER_MASK  0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_USER_MASK  0xfc1ffffe
 #define PPC_INST_MTSPR_DSCR_USER       0x7c0303a6
-#define PPC_INST_MTSPR_DSCR_USER_MASK  0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_USER_MASK  0xfc1ffffe
 #define PPC_INST_MFVSRD                        0x7c000066
 #define PPC_INST_MTVSRD                        0x7c000166
 #define PPC_INST_SLBFEE                        0x7c0007a7
index 8180bfd7ab931c5b6d16a1c1b7cb73689206571b..9de7f79e702b1d755bdc855a2da82876959eeade 100644 (file)
@@ -298,9 +298,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
         *
         * For pHyp, we have to enable IO for log retrieval. Otherwise,
         * 0xFF's is always returned from PCI config space.
+        *
+        * When the @severity is EEH_LOG_PERM, the PE is going to be
+        * removed. Prior to that, the drivers for devices included in
+        * the PE will be closed. The drivers rely on working IO path
+        * to bring the devices to quiet state. Otherwise, PCI traffic
+        * from those devices after they are removed is like to cause
+        * another unexpected EEH error.
         */
        if (!(pe->type & EEH_PE_PHB)) {
-               if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
+               if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
+                   severity == EEH_LOG_PERM)
                        eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
 
                /*
index e4744ff38a1782de7aa60305fc3f72dc9c109cba..925a4ef9055932174b4dc5a8f0424b330149132b 100644 (file)
@@ -463,6 +463,10 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
 
        flush_fp_to_thread(target);
 
+       for (i = 0; i < 32 ; i++)
+               buf[i] = target->thread.TS_FPR(i);
+       buf[32] = target->thread.fp_state.fpscr;
+
        /* copy to local buffer then write that out */
        i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
        if (i)
@@ -672,6 +676,9 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
        flush_altivec_to_thread(target);
        flush_vsx_to_thread(target);
 
+       for (i = 0; i < 32 ; i++)
+               buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                 buf, 0, 32 * sizeof(double));
        if (!ret)
@@ -1019,6 +1026,10 @@ static int tm_cfpr_set(struct task_struct *target,
        flush_fp_to_thread(target);
        flush_altivec_to_thread(target);
 
+       for (i = 0; i < 32; i++)
+               buf[i] = target->thread.TS_CKFPR(i);
+       buf[32] = target->thread.ckfp_state.fpscr;
+
        /* copy to local buffer then write that out */
        i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
        if (i)
@@ -1283,6 +1294,9 @@ static int tm_cvsx_set(struct task_struct *target,
        flush_altivec_to_thread(target);
        flush_vsx_to_thread(target);
 
+       for (i = 0; i < 32 ; i++)
+               buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
+
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                 buf, 0, 32 * sizeof(double));
        if (!ret)
index 80334937e14fdf8519236a9930e73707ef373d86..67e19a0821be25c8e5f458857e9fda31ca42adb6 100644 (file)
@@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void)
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-int create_section_mapping(unsigned long start, unsigned long end)
+int hash__create_section_mapping(unsigned long start, unsigned long end)
 {
        int rc = htab_bolt_mapping(start, end, __pa(start),
                                   pgprot_val(PAGE_KERNEL), mmu_linear_psize,
@@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned long end)
        return rc;
 }
 
-int remove_section_mapping(unsigned long start, unsigned long end)
+int hash__remove_section_mapping(unsigned long start, unsigned long end)
 {
        int rc = htab_remove_mapping(start, end, mmu_linear_psize,
                                     mmu_kernel_ssize);
index d5026f3800b6129bc4c05ef303b4c37d80065cc0..37b5f91e381b77d545b5c336e60aecb0194dcd7d 100644 (file)
@@ -125,11 +125,14 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
 int hugepd_ok(hugepd_t hpd)
 {
        bool is_hugepd;
+       unsigned long hpdval;
+
+       hpdval = hpd_val(hpd);
 
        /*
         * We should not find this format in page directory, warn otherwise.
         */
-       is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
+       is_hugepd = (((hpdval & 0x3) == 0x0) && ((hpdval & HUGEPD_SHIFT_MASK) != 0));
        WARN(is_hugepd, "Found wrong page directory format\n");
        return 0;
 }
index 289df38fb7e08bcde6228276fb9d8415042bdd30..8c3389cbcd12216ef7b71884163322255f76ea68 100644 (file)
@@ -53,7 +53,7 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES];
 static unsigned nr_gpages;
 #endif
 
-#define hugepd_none(hpd)       ((hpd).pd == 0)
+#define hugepd_none(hpd)       (hpd_val(hpd) == 0)
 
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 {
@@ -103,24 +103,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
        for (i = 0; i < num_hugepd; i++, hpdp++) {
                if (unlikely(!hugepd_none(*hpdp)))
                        break;
-               else
+               else {
 #ifdef CONFIG_PPC_BOOK3S_64
-                       hpdp->pd = __pa(new) |
-                                  (shift_to_mmu_psize(pshift) << 2);
+                       *hpdp = __hugepd(__pa(new) |
+                                        (shift_to_mmu_psize(pshift) << 2));
 #elif defined(CONFIG_PPC_8xx)
-                       hpdp->pd = __pa(new) |
-                                  (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
-                                                             _PMD_PAGE_512K) |
-                                  _PMD_PRESENT;
+                       *hpdp = __hugepd(__pa(new) |
+                                        (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
+                                         _PMD_PAGE_512K) | _PMD_PRESENT);
 #else
                        /* We use the old format for PPC_FSL_BOOK3E */
-                       hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
+                       *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
 #endif
+               }
        }
        /* If we bailed from the for loop early, an error occurred, clean up */
        if (i < num_hugepd) {
                for (i = i - 1 ; i >= 0; i--, hpdp--)
-                       hpdp->pd = 0;
+                       *hpdp = __hugepd(0);
                kmem_cache_free(cachep, new);
        }
        spin_unlock(&mm->page_table_lock);
@@ -454,7 +454,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
                return;
 
        for (i = 0; i < num_hugepd; i++, hpdp++)
-               hpdp->pd = 0;
+               *hpdp = __hugepd(0);
 
        if (shift >= pdshift)
                hugepd_free(tlb, hugepte);
@@ -810,12 +810,8 @@ static int __init hugetlbpage_init(void)
                 * if we have pdshift and shift value same, we don't
                 * use pgt cache for hugepd.
                 */
-               if (pdshift > shift) {
+               if (pdshift > shift)
                        pgtable_cache_add(pdshift - shift, NULL);
-                       if (!PGT_CACHE(pdshift - shift))
-                               panic("hugetlbpage_init(): could not create "
-                                     "pgtable cache for %d bit pagesize\n", shift);
-               }
 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
                else if (!hugepte_cache) {
                        /*
@@ -852,9 +848,6 @@ static int __init hugetlbpage_init(void)
        else if (mmu_psize_defs[MMU_PAGE_2M].shift)
                HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
 #endif
-       else
-               panic("%s: Unable to set default huge page size\n", __func__);
-
        return 0;
 }
 
index a175cd82ae8c5f2807508e5a2ee2daf34e7fc05a..f2108c40e697dfa5128dc7b8f742e8a2db8063fd 100644 (file)
@@ -78,8 +78,12 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
        align = max_t(unsigned long, align, minalign);
        name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
        new = kmem_cache_create(name, table_size, align, 0, ctor);
+       if (!new)
+               panic("Could not allocate pgtable cache for order %d", shift);
+
        kfree(name);
        pgtable_cache[shift - 1] = new;
+
        pr_debug("Allocated pgtable cache for order %d\n", shift);
 }
 
@@ -88,7 +92,7 @@ void pgtable_cache_init(void)
 {
        pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
 
-       if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
+       if (PMD_CACHE_INDEX && !PGT_CACHE(PMD_CACHE_INDEX))
                pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
        /*
         * In all current configs, when the PUD index exists it's the
@@ -97,11 +101,4 @@ void pgtable_cache_init(void)
         */
        if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
                pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
-
-       if (!PGT_CACHE(PGD_INDEX_SIZE))
-               panic("Couldn't allocate pgd cache");
-       if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
-               panic("Couldn't allocate pmd pgtable caches");
-       if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
-               panic("Couldn't allocate pud pgtable caches");
 }
index ebf9782bacf97dffb1a88a71416b7235c840638e..653ff6c74ebe3d112ed13a70006032d3e7a5dd23 100644 (file)
@@ -126,3 +126,21 @@ void mmu_cleanup_all(void)
        else if (mmu_hash_ops.hpte_clear_all)
                mmu_hash_ops.hpte_clear_all();
 }
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int create_section_mapping(unsigned long start, unsigned long end)
+{
+       if (radix_enabled())
+               return -ENODEV;
+
+       return hash__create_section_mapping(start, end);
+}
+
+int remove_section_mapping(unsigned long start, unsigned long end)
+{
+       if (radix_enabled())
+               return -ENODEV;
+
+       return hash__remove_section_mapping(start, end);
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
index fd3e4034c04d2207a30cc82d6c65dffc6094c603..270eb9b74e2e13eff5f37b441f76a37a019ff73e 100644 (file)
@@ -295,6 +295,8 @@ static inline void perf_read_regs(struct pt_regs *regs)
         */
        if (TRAP(regs) != 0xf00)
                use_siar = 0;
+       else if ((ppmu->flags & PPMU_NO_SIAR))
+               use_siar = 0;
        else if (marked)
                use_siar = 1;
        else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
index 6447dc1c3d896cea18615d3b5bacc4bb6285fbb1..929b56d47ad9bf6bc79f7ae9dd0ab27b2142427d 100644 (file)
@@ -16,7 +16,7 @@ EVENT(PM_CYC,                                 0x0001e)
 EVENT(PM_ICT_NOSLOT_CYC,                       0x100f8)
 EVENT(PM_CMPLU_STALL,                          0x1e054)
 EVENT(PM_INST_CMPL,                            0x00002)
-EVENT(PM_BRU_CMPL,                             0x40060)
+EVENT(PM_BRU_CMPL,                             0x10012)
 EVENT(PM_BR_MPRED_CMPL,                                0x400f6)
 
 /* All L1 D cache load references counted at finish, gated by reject */
index 346010e8d463d36d2c411839ecb550d9534f61b1..7332634e18c95212f1448c578f2726b8ebf3aee0 100644 (file)
@@ -384,7 +384,7 @@ static struct power_pmu power9_isa207_pmu = {
        .bhrb_filter_map        = power9_bhrb_filter_map,
        .get_constraint         = isa207_get_constraint,
        .disable_pmc            = isa207_disable_pmc,
-       .flags                  = PPMU_HAS_SIER | PPMU_ARCH_207S,
+       .flags                  = PPMU_NO_SIAR | PPMU_ARCH_207S,
        .n_generic              = ARRAY_SIZE(power9_generic_events),
        .generic_events         = power9_generic_events,
        .cache_events           = &power9_cache_events,
index d38e86fd5720f181ac10cc869b35e8b093ca5803..60c57657c772fef576e5c4703dfb2a17203978a7 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/xics.h>
 #include <asm/io.h>
 #include <asm/opal.h>
+#include <asm/kvm_ppc.h>
 
 static void icp_opal_teardown_cpu(void)
 {
@@ -39,7 +40,26 @@ static void icp_opal_flush_ipi(void)
         * Should we be flagging idle loop instead?
         * Or creating some task to be scheduled?
         */
-       opal_int_eoi((0x00 << 24) | XICS_IPI);
+       if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
+               force_external_irq_replay();
+}
+
+static unsigned int icp_opal_get_xirr(void)
+{
+       unsigned int kvm_xirr;
+       __be32 hw_xirr;
+       int64_t rc;
+
+       /* Handle an interrupt latched by KVM first */
+       kvm_xirr = kvmppc_get_xics_latch();
+       if (kvm_xirr)
+               return kvm_xirr;
+
+       /* Then ask OPAL */
+       rc = opal_int_get_xirr(&hw_xirr, false);
+       if (rc < 0)
+               return 0;
+       return be32_to_cpu(hw_xirr);
 }
 
 static unsigned int icp_opal_get_irq(void)
@@ -47,12 +67,8 @@ static unsigned int icp_opal_get_irq(void)
        unsigned int xirr;
        unsigned int vec;
        unsigned int irq;
-       int64_t rc;
 
-       rc = opal_int_get_xirr(&xirr, false);
-       if (rc < 0)
-               return 0;
-       xirr = be32_to_cpu(xirr);
+       xirr = icp_opal_get_xirr();
        vec = xirr & 0x00ffffff;
        if (vec == XICS_IRQ_SPURIOUS)
                return 0;
@@ -67,7 +83,8 @@ static unsigned int icp_opal_get_irq(void)
        xics_mask_unknown_vec(vec);
 
        /* We might learn about it later, so EOI it */
-       opal_int_eoi(xirr);
+       if (opal_int_eoi(xirr) > 0)
+               force_external_irq_replay();
 
        return 0;
 }
index e659daffe368836baf4db7908cdb4cb83575f0d1..e00975361fec00fb89ad916f1b8a15539a449a6c 100644 (file)
@@ -69,7 +69,7 @@ CONFIG_CMA=y
 CONFIG_CMA_DEBUG=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_MEM_SOFT_DIRTY=y
-CONFIG_ZPOOL=m
+CONFIG_ZSWAP=y
 CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
 CONFIG_ZSMALLOC_STAT=y
@@ -141,8 +141,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
 CONFIG_NF_CONNTRACK_TIMEOUT=y
 CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -159,13 +157,12 @@ CONFIG_NF_TABLES=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -219,7 +216,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
 CONFIG_NETFILTER_XT_MATCH_REALM=m
 CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
 CONFIG_NETFILTER_XT_MATCH_STATE=m
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
 CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -258,7 +254,6 @@ CONFIG_IP_VS_NQ=m
 CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
 CONFIG_NF_TABLES_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -436,7 +431,6 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
@@ -480,6 +474,7 @@ CONFIG_VIRTIO_BALLOON=m
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
 CONFIG_JBD2_DEBUG=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
@@ -592,14 +587,12 @@ CONFIG_LOCK_STAT=y
 CONFIG_DEBUG_LOCKDEP=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
-CONFIG_DEBUG_LIST=y
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_DEBUG_CREDENTIALS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=300
 CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
 CONFIG_PM_NOTIFIER_ERROR_INJECT=m
 CONFIG_FAULT_INJECTION=y
 CONFIG_FAILSLAB=y
@@ -618,6 +611,7 @@ CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_UPROBE_EVENT=y
 CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_TEST_LIST_SORT=y
@@ -630,6 +624,7 @@ CONFIG_TEST_STRING_HELPERS=y
 CONFIG_TEST_KSTRTOX=y
 CONFIG_DMA_API_DEBUG=y
 CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
@@ -640,16 +635,18 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
 CONFIG_SECURITY_SELINUX_DISABLE=y
 CONFIG_IMA=y
 CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_RSA=m
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
@@ -673,11 +670,13 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_ZCRYPT=m
 CONFIG_CRYPTO_SHA1_S390=m
 CONFIG_CRYPTO_SHA256_S390=m
index 95ceac50bc6512313cf2d7cda8090c88596ae865..f05d2d6e10872a417cfb67a9624d7d74f56e5cc6 100644 (file)
@@ -12,6 +12,7 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
+# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
@@ -54,8 +55,9 @@ CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
+CONFIG_LIVEPATCH=y
 CONFIG_TUNE_ZEC12=y
-CONFIG_NR_CPUS=256
+CONFIG_NR_CPUS=512
 CONFIG_NUMA=y
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
@@ -65,6 +67,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CLEANCACHE=y
 CONFIG_FRONTSWAP=y
 CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
 CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
@@ -136,8 +139,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
 CONFIG_NF_CONNTRACK_TIMEOUT=y
 CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +155,12 @@ CONFIG_NF_TABLES=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +214,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
 CONFIG_NETFILTER_XT_MATCH_REALM=m
 CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
 CONFIG_NETFILTER_XT_MATCH_STATE=m
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
 CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +252,6 @@ CONFIG_IP_VS_NQ=m
 CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
 CONFIG_NF_TABLES_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -430,7 +428,6 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
@@ -460,6 +457,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
 CONFIG_RAW_DRIVER=m
 CONFIG_HANGCHECK_TIMER=m
 CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
@@ -473,6 +471,7 @@ CONFIG_VIRTIO_BALLOON=m
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
 CONFIG_JBD2_DEBUG=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
@@ -495,6 +494,7 @@ CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
+CONFIG_OVERLAY_FS_REDIRECT_DIR=y
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
 CONFIG_ISO9660_FS=y
@@ -551,25 +551,27 @@ CONFIG_FRAME_WARN=1024
 CONFIG_UNUSED_SYMBOLS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
 CONFIG_PANIC_ON_OOPS=y
 CONFIG_TIMER_STATS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
-CONFIG_PM_NOTIFIER_ERROR_INJECT=m
 CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
-# CONFIG_KPROBE_EVENT is not set
+CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
-CONFIG_RBTREE_TEST=m
-CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_S390_PTDUMP=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
@@ -577,18 +579,25 @@ CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
 CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
 CONFIG_IMA=y
+CONFIG_IMA_WRITE_POLICY=y
 CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_USER=m
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
@@ -598,6 +607,7 @@ CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -612,10 +622,13 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_ZCRYPT=m
 CONFIG_CRYPTO_SHA1_S390=m
 CONFIG_CRYPTO_SHA256_S390=m
@@ -624,9 +637,6 @@ CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
 CONFIG_CORDIC=m
index bc7b176f57950d1f4f9d17cfe8721229fe4e34c2..2cf87343b59030f76267e47dc88672e536bf6e9b 100644 (file)
@@ -65,6 +65,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CLEANCACHE=y
 CONFIG_FRONTSWAP=y
 CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
 CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
@@ -136,8 +137,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
 CONFIG_NF_CONNTRACK_TIMEOUT=y
 CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +153,12 @@ CONFIG_NF_TABLES=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +212,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
 CONFIG_NETFILTER_XT_MATCH_REALM=m
 CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
 CONFIG_NETFILTER_XT_MATCH_STATE=m
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
 CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +250,6 @@ CONFIG_IP_VS_NQ=m
 CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
 CONFIG_NF_TABLES_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -430,7 +426,6 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
@@ -474,6 +469,7 @@ CONFIG_VIRTIO_BALLOON=m
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
 CONFIG_JBD2_DEBUG=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
@@ -496,6 +492,7 @@ CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
+CONFIG_OVERLAY_FS_REDIRECT_DIR=y
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
 CONFIG_ISO9660_FS=y
@@ -563,12 +560,16 @@ CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_UPROBE_EVENT=y
 CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_S390_PTDUMP=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
@@ -576,18 +577,25 @@ CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
 CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
 CONFIG_IMA=y
+CONFIG_IMA_WRITE_POLICY=y
 CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_USER=m
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
@@ -597,6 +605,7 @@ CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -611,10 +620,13 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_ZCRYPT=m
 CONFIG_CRYPTO_SHA1_S390=m
 CONFIG_CRYPTO_SHA256_S390=m
@@ -623,9 +635,6 @@ CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
 CONFIG_CORDIC=m
index 2d40ef0a6295d9a93c3527d6592eba2780736a2e..d00e368fb5e6ef949b6fb070321fa3a6dc9c8501 100644 (file)
@@ -38,7 +38,6 @@ CONFIG_JUMP_LABEL=y
 CONFIG_STATIC_KEYS_SELFTEST=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
 CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
@@ -130,8 +129,11 @@ CONFIG_DUMMY=m
 CONFIG_EQUALIZER=m
 CONFIG_TUN=m
 CONFIG_VIRTIO_NET=y
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
+CONFIG_DEVKMEM=y
 CONFIG_RAW_DRIVER=m
 CONFIG_VIRTIO_BALLOON=y
 CONFIG_EXT4_FS=y
@@ -183,7 +185,6 @@ CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_KPROBES_SANITY_TEST=y
 CONFIG_S390_PTDUMP=y
 CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_AUTHENC=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
index d7697ab802f6c94813a27394baa255fa26a93ddc..8e136b88cdf4f13460b960f8db9d02e1ae88324f 100644 (file)
@@ -15,7 +15,9 @@
        BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
        asm volatile(                                                   \
                "       lctlg   %1,%2,%0\n"                             \
-               : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+               :                                                       \
+               : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high)    \
+               : "memory");                                            \
 }
 
 #define __ctl_store(array, low, high) {                                        \
index bec71e902be3f8030697084540130227ecb55088..6484a250021e2717c448eb8f4f0b987169c264f1 100644 (file)
@@ -916,7 +916,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
        memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
               S390_ARCH_FAC_LIST_SIZE_BYTE);
        memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
-              S390_ARCH_FAC_LIST_SIZE_BYTE);
+              sizeof(S390_lowcore.stfle_fac_list));
        if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
                ret = -EFAULT;
        kfree(mach);
@@ -1437,7 +1437,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        /* Populate the facility mask initially. */
        memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
-              S390_ARCH_FAC_LIST_SIZE_BYTE);
+              sizeof(S390_lowcore.stfle_fac_list));
        for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
                if (i < kvm_s390_fac_list_mask_size())
                        kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
index d89b7011667cb4f1a6f3ad55238d2c815e229c41..e279572824b15e07616b98215fb51c1fa65f4c9f 100644 (file)
@@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
                          const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       struct pt_regs regs;
+       struct pt_regs regs = *task_pt_regs(target);
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
                                 sizeof(regs));
index cc3bd583dce1abc5fafe6b92417b8a713cd4cdfd..9e240fcba784b085bd55e77dc79c08d21d1a23e2 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/types.h>
 #include "ctype.h"
+#include "string.h"
 
 int memcmp(const void *s1, const void *s2, size_t len)
 {
index 725e820602b1781308b863a10c105dd3d2734d31..113588ddb43f8d7e7be66283118a2f33c46fb7e1 100644 (file)
@@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len);
 #define memset(d,c,l) __builtin_memset(d,c,l)
 #define memcmp __builtin_memcmp
 
+extern int strcmp(const char *str1, const char *str2);
+extern int strncmp(const char *cs, const char *ct, size_t count);
+extern size_t strlen(const char *s);
+extern char *strstr(const char *s1, const char *s2);
+extern size_t strnlen(const char *s, size_t maxlen);
+extern unsigned int atou(const char *s);
+extern unsigned long long simple_strtoull(const char *cp, char **endp,
+                                         unsigned int base);
+
 #endif /* BOOT_STRING_H */
index 31c34ee131f34505872a018b75a8489f1ac9c854..6ef688a1ef3e0f022032e5317662b9011c8f74c4 100644 (file)
@@ -1020,7 +1020,8 @@ struct {
        const char *basename;
        struct simd_skcipher_alg *simd;
 } aesni_simd_skciphers2[] = {
-#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
+#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
+    IS_BUILTIN(CONFIG_CRYPTO_PCBC)
        {
                .algname        = "pcbc(aes)",
                .drvname        = "pcbc-aes-aesni",
index 701d29f8e4d300ac01553e5d8317a5e40dbdcc6d..57f7ec35216ef5e2a78efa0d303f8f343b010d68 100644 (file)
@@ -254,23 +254,6 @@ ENTRY(__switch_to_asm)
        jmp     __switch_to
 END(__switch_to_asm)
 
-/*
- * The unwinder expects the last frame on the stack to always be at the same
- * offset from the end of the page, which allows it to validate the stack.
- * Calling schedule_tail() directly would break that convention because its an
- * asmlinkage function so its argument has to be pushed on the stack.  This
- * wrapper creates a proper "end of stack" frame header before the call.
- */
-ENTRY(schedule_tail_wrapper)
-       FRAME_BEGIN
-
-       pushl   %eax
-       call    schedule_tail
-       popl    %eax
-
-       FRAME_END
-       ret
-ENDPROC(schedule_tail_wrapper)
 /*
  * A newly forked process directly context switches into this address.
  *
@@ -279,15 +262,24 @@ ENDPROC(schedule_tail_wrapper)
  * edi: kernel thread arg
  */
 ENTRY(ret_from_fork)
-       call    schedule_tail_wrapper
+       FRAME_BEGIN             /* help unwinder find end of stack */
+
+       /*
+        * schedule_tail() is asmlinkage so we have to put its 'prev' argument
+        * on the stack.
+        */
+       pushl   %eax
+       call    schedule_tail
+       popl    %eax
 
        testl   %ebx, %ebx
        jnz     1f              /* kernel threads are uncommon */
 
 2:
        /* When we fork, we trace the syscall return in the child, too. */
-       movl    %esp, %eax
+       leal    FRAME_OFFSET(%esp), %eax
        call    syscall_return_slowpath
+       FRAME_END
        jmp     restore_all
 
        /* kernel thread */
index 5b219707c2f236cfcf1a856cf9a5023dd93775c3..044d18ebc43ce96a512abd1e5f36eb8dfaee0636 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/smap.h>
 #include <asm/pgtable_types.h>
 #include <asm/export.h>
+#include <asm/frame.h>
 #include <linux/err.h>
 
 .code64
@@ -408,17 +409,19 @@ END(__switch_to_asm)
  * r12: kernel thread arg
  */
 ENTRY(ret_from_fork)
+       FRAME_BEGIN                     /* help unwinder find end of stack */
        movq    %rax, %rdi
-       call    schedule_tail                   /* rdi: 'prev' task parameter */
+       call    schedule_tail           /* rdi: 'prev' task parameter */
 
-       testq   %rbx, %rbx                      /* from kernel_thread? */
-       jnz     1f                              /* kernel threads are uncommon */
+       testq   %rbx, %rbx              /* from kernel_thread? */
+       jnz     1f                      /* kernel threads are uncommon */
 
 2:
-       movq    %rsp, %rdi
+       leaq    FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */
        call    syscall_return_slowpath /* returns with IRQs disabled */
        TRACE_IRQS_ON                   /* user mode is traced as IRQS on */
        SWAPGS
+       FRAME_END
        jmp     restore_regs_and_iret
 
 1:
index 05612a2529c8bba1e9aa9131a4cabaebe96d5736..496e60391fac68e231ebac4fae9ff74ae867ffc0 100644 (file)
@@ -1010,7 +1010,7 @@ static __init int amd_ibs_init(void)
         * all online cpus.
         */
        cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
-                         "perf/x86/amd/ibs:STARTING",
+                         "perf/x86/amd/ibs:starting",
                          x86_pmu_amd_ibs_starting_cpu,
                          x86_pmu_amd_ibs_dying_cpu);
 
index 019c5887b698af2a5fbf322c7cc5d6d3f8ab7e57..1635c0c8df23a697a93a8a7bd0dbbfd534c3caef 100644 (file)
@@ -505,6 +505,10 @@ int x86_pmu_hw_config(struct perf_event *event)
 
                if (event->attr.precise_ip > precise)
                        return -EOPNOTSUPP;
+
+               /* There's no sense in having PEBS for non sampling events: */
+               if (!is_sampling_event(event))
+                       return -EINVAL;
        }
        /*
         * check that PEBS LBR correction does not conflict with
index 86138267b68a77753737ffd4c4be2ece6a24cc84..eb1484c86bb4b4611450c49df01aab1a18d8fa5f 100644 (file)
@@ -3176,13 +3176,16 @@ static void intel_pmu_cpu_starting(int cpu)
 
        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
                for_each_cpu(i, topology_sibling_cpumask(cpu)) {
+                       struct cpu_hw_events *sibling;
                        struct intel_excl_cntrs *c;
 
-                       c = per_cpu(cpu_hw_events, i).excl_cntrs;
+                       sibling = &per_cpu(cpu_hw_events, i);
+                       c = sibling->excl_cntrs;
                        if (c && c->core_id == core_id) {
                                cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
                                cpuc->excl_cntrs = c;
-                               cpuc->excl_thread_id = 1;
+                               if (!sibling->excl_thread_id)
+                                       cpuc->excl_thread_id = 1;
                                break;
                        }
                }
@@ -3987,7 +3990,7 @@ __init int intel_pmu_init(void)
                     x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
                x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
        }
-       x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
+       x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
 
        if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
                WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
index fec8a461bdef6da49c0e7f46c655f235e7fc1275..1076c9a77292d77e5dbb34adbd5ce526121daa73 100644 (file)
@@ -434,6 +434,7 @@ static struct pmu cstate_core_pmu = {
        .stop           = cstate_pmu_event_stop,
        .read           = cstate_pmu_event_update,
        .capabilities   = PERF_PMU_CAP_NO_INTERRUPT,
+       .module         = THIS_MODULE,
 };
 
 static struct pmu cstate_pkg_pmu = {
@@ -447,6 +448,7 @@ static struct pmu cstate_pkg_pmu = {
        .stop           = cstate_pmu_event_stop,
        .read           = cstate_pmu_event_update,
        .capabilities   = PERF_PMU_CAP_NO_INTERRUPT,
+       .module         = THIS_MODULE,
 };
 
 static const struct cstate_model nhm_cstates __initconst = {
index be202390bbd37b00106864123a647786497ce2cd..9dfeeeca0ea8f11a3beb4c23eeb1d115e77189db 100644 (file)
@@ -1389,9 +1389,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
                        continue;
 
                /* log dropped samples number */
-               if (error[bit])
+               if (error[bit]) {
                        perf_log_lost_samples(event, error[bit]);
 
+                       if (perf_event_account_interrupt(event))
+                               x86_pmu_stop(event, 0);
+               }
+
                if (counts[bit]) {
                        __intel_pmu_pebs_event(event, iregs, base,
                                               top, bit, counts[bit]);
index bd34124449b08a4be95f114b3295c22e4d57cd34..17c3564d087a48bc24e41417fe6f128b5d7b9f0d 100644 (file)
@@ -697,6 +697,7 @@ static int __init init_rapl_pmus(void)
        rapl_pmus->pmu.start            = rapl_pmu_event_start;
        rapl_pmus->pmu.stop             = rapl_pmu_event_stop;
        rapl_pmus->pmu.read             = rapl_pmu_event_read;
+       rapl_pmus->pmu.module           = THIS_MODULE;
        return 0;
 }
 
index 97c246f84dea1e79d0f4517376763dcb88a18aea..8c4ccdc3a3f3607ee0af4f4006029df3000e0839 100644 (file)
@@ -733,6 +733,7 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
                        .start          = uncore_pmu_event_start,
                        .stop           = uncore_pmu_event_stop,
                        .read           = uncore_pmu_event_read,
+                       .module         = THIS_MODULE,
                };
        } else {
                pmu->pmu = *pmu->type->pmu;
index e6832be714bc6e76965e1faa7cedcc3ad309c8fa..dae2fedc16015e691ad6ae85ce4bc4122011eefc 100644 (file)
@@ -2686,7 +2686,7 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
 
 void hswep_uncore_cpu_init(void)
 {
-       int pkg = topology_phys_to_logical_pkg(0);
+       int pkg = boot_cpu_data.logical_proc_id;
 
        if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
                hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
index 34a46dc076d3610212e6f5c9f0abfb2ab9bc3629..8167fdb67ae846a0da668006159abfc56f3d99fc 100644 (file)
@@ -57,7 +57,7 @@
 #define INTEL_FAM6_ATOM_SILVERMONT2    0x4D /* Avaton/Rangely */
 #define INTEL_FAM6_ATOM_AIRMONT                0x4C /* CherryTrail / Braswell */
 #define INTEL_FAM6_ATOM_MERRIFIELD     0x4A /* Tangier */
-#define INTEL_FAM6_ATOM_MOOREFIELD     0x5A /* Annidale */
+#define INTEL_FAM6_ATOM_MOOREFIELD     0x5A /* Anniedale */
 #define INTEL_FAM6_ATOM_GOLDMONT       0x5C
 #define INTEL_FAM6_ATOM_DENVERTON      0x5F /* Goldmont Microserver */
 
index 195becc6f78074f23f25dc7d5d14da4bd90aac76..e793fc9a9b20c36d2e0d0e8e2a359fd1c70f2d8e 100644 (file)
@@ -52,6 +52,21 @@ struct extended_sigtable {
 
 #define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
 
+static inline u32 intel_get_microcode_revision(void)
+{
+       u32 rev, dummy;
+
+       native_wrmsrl(MSR_IA32_UCODE_REV, 0);
+
+       /* As documented in the SDM: Do a CPUID 1 here */
+       native_cpuid_eax(1);
+
+       /* get the current revision from MSR 0x8B */
+       native_rdmsr(MSR_IA32_UCODE_REV, dummy, rev);
+
+       return rev;
+}
+
 #ifdef CONFIG_MICROCODE_INTEL
 extern void __init load_ucode_intel_bsp(void);
 extern void load_ucode_intel_ap(void);
index eaf100508c36203ad888c0b4484ba264e20bd0ad..1be64da0384ed8d5dea85563633c2740f5587888 100644 (file)
@@ -219,6 +219,24 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
            : "memory");
 }
 
+#define native_cpuid_reg(reg)                                  \
+static inline unsigned int native_cpuid_##reg(unsigned int op) \
+{                                                              \
+       unsigned int eax = op, ebx, ecx = 0, edx;               \
+                                                               \
+       native_cpuid(&eax, &ebx, &ecx, &edx);                   \
+                                                               \
+       return reg;                                             \
+}
+
+/*
+ * Native CPUID functions returning a single datum.
+ */
+native_cpuid_reg(eax)
+native_cpuid_reg(ebx)
+native_cpuid_reg(ecx)
+native_cpuid_reg(edx)
+
 static inline void load_cr3(pgd_t *pgdir)
 {
        write_cr3(__pa(pgdir));
index a3269c897ec578508fd41c6f7ab9b3622934e623..2e41c50ddf47f414c8f2e4b0a99965ceafffb4bf 100644 (file)
@@ -58,7 +58,7 @@ get_frame_pointer(struct task_struct *task, struct pt_regs *regs)
        if (task == current)
                return __builtin_frame_address(0);
 
-       return (unsigned long *)((struct inactive_task_frame *)task->thread.sp)->bp;
+       return &((struct inactive_task_frame *)task->thread.sp)->bp;
 }
 #else
 static inline unsigned long *
index 5cb436acd46315b75ba40aacc780373ea2cc4f7c..fcc5cd387fd17a59e5f28ba5fc8fb65fc57ae5af 100644 (file)
@@ -36,7 +36,10 @@ static inline void prepare_switch_to(struct task_struct *prev,
 
 asmlinkage void ret_from_fork(void);
 
-/* data that is pointed to by thread.sp */
+/*
+ * This is the structure pointed to by thread.sp for an inactive task.  The
+ * order of the fields must match the code in __switch_to_asm().
+ */
 struct inactive_task_frame {
 #ifdef CONFIG_X86_64
        unsigned long r15;
@@ -48,6 +51,11 @@ struct inactive_task_frame {
        unsigned long di;
 #endif
        unsigned long bx;
+
+       /*
+        * These two fields must be together.  They form a stack frame header,
+        * needed by get_frame_pointer().
+        */
        unsigned long bp;
        unsigned long ret_addr;
 };
index 945e512a112a321771797869b39c496d7f5c2b8f..1e35dd06b090ee91189cb5a52fdf026f2ca5e74b 100644 (file)
@@ -1875,6 +1875,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
        .irq_ack                = irq_chip_ack_parent,
        .irq_eoi                = ioapic_ack_level,
        .irq_set_affinity       = ioapic_set_affinity,
+       .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
@@ -1886,6 +1887,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
        .irq_ack                = irq_chip_ack_parent,
        .irq_eoi                = ioapic_ir_ack_level,
        .irq_set_affinity       = ioapic_set_affinity,
+       .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
index 71cae73a507617a2559aa0815a9e178a22f33385..1d3167269a6717902149171fe755123a5c654eb6 100644 (file)
@@ -309,15 +309,8 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
 
        /* get information required for multi-node processors */
        if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
-               u32 eax, ebx, ecx, edx;
 
-               cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
-               node_id = ecx & 7;
-
-               /* get compute unit information */
-               smp_num_siblings = ((ebx >> 8) & 3) + 1;
-               c->x86_max_cores /= smp_num_siblings;
-               c->cpu_core_id = ebx & 0xff;
+               node_id = cpuid_ecx(0x8000001e) & 7;
 
                /*
                 * We may have multiple LLCs if L3 caches exist, so check if we
index dc1697ca5191ce87cd9f27a78236ede3897243ab..9bab7a8a42936e32270e3573a17a1cd87fb580e0 100644 (file)
@@ -1221,7 +1221,7 @@ static __init int setup_disablecpuid(char *arg)
 {
        int bit;
 
-       if (get_option(&arg, &bit) && bit < NCAPINTS*32)
+       if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
                setup_clear_cpu_cap(bit);
        else
                return 0;
index fcd484d2bb034a4533d3c4355d3ace82c92060dd..203f860d2ab3339c11ddc34d976f56ffa6ef069c 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/bugs.h>
 #include <asm/cpu.h>
 #include <asm/intel-family.h>
+#include <asm/microcode_intel.h>
 
 #ifdef CONFIG_X86_64
 #include <linux/topology.h>
@@ -78,14 +79,8 @@ static void early_init_intel(struct cpuinfo_x86 *c)
                (c->x86 == 0x6 && c->x86_model >= 0x0e))
                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
 
-       if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
-               unsigned lower_word;
-
-               wrmsr(MSR_IA32_UCODE_REV, 0, 0);
-               /* Required by the SDM */
-               sync_core();
-               rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
-       }
+       if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
+               c->microcode = intel_get_microcode_revision();
 
        /*
         * Atom erratum AAE44/AAF40/AAG38/AAH41:
index b624b54912e11e4e58992b9e1ca2d7cab20ce9e4..3f329b74e040c23b6b85dfd12a85f80d630c63ac 100644 (file)
@@ -150,7 +150,7 @@ static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
 {
        struct ucode_patch *p;
 
-       p = kzalloc(size, GFP_KERNEL);
+       p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
        if (!p)
                return ERR_PTR(-ENOMEM);
 
@@ -368,26 +368,6 @@ next:
        return patch;
 }
 
-static void cpuid_1(void)
-{
-       /*
-        * According to the Intel SDM, Volume 3, 9.11.7:
-        *
-        *   CPUID returns a value in a model specific register in
-        *   addition to its usual register return values. The
-        *   semantics of CPUID cause it to deposit an update ID value
-        *   in the 64-bit model-specific register at address 08BH
-        *   (IA32_BIOS_SIGN_ID). If no update is present in the
-        *   processor, the value in the MSR remains unmodified.
-        *
-        * Use native_cpuid -- this code runs very early and we don't
-        * want to mess with paravirt.
-        */
-       unsigned int eax = 1, ebx, ecx = 0, edx;
-
-       native_cpuid(&eax, &ebx, &ecx, &edx);
-}
-
 static int collect_cpu_info_early(struct ucode_cpu_info *uci)
 {
        unsigned int val[2];
@@ -410,15 +390,8 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
                native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
                csig.pf = 1 << ((val[1] >> 18) & 7);
        }
-       native_wrmsrl(MSR_IA32_UCODE_REV, 0);
-
-       /* As documented in the SDM: Do a CPUID 1 here */
-       cpuid_1();
 
-       /* get the current revision from MSR 0x8B */
-       native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
-
-       csig.rev = val[1];
+       csig.rev = intel_get_microcode_revision();
 
        uci->cpu_sig = csig;
        uci->valid = 1;
@@ -602,7 +575,7 @@ static inline void print_ucode(struct ucode_cpu_info *uci)
 static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
 {
        struct microcode_intel *mc;
-       unsigned int val[2];
+       u32 rev;
 
        mc = uci->mc;
        if (!mc)
@@ -610,21 +583,16 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
 
        /* write microcode via MSR 0x79 */
        native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
-       native_wrmsrl(MSR_IA32_UCODE_REV, 0);
 
-       /* As documented in the SDM: Do a CPUID 1 here */
-       cpuid_1();
-
-       /* get the current revision from MSR 0x8B */
-       native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
-       if (val[1] != mc->hdr.rev)
+       rev = intel_get_microcode_revision();
+       if (rev != mc->hdr.rev)
                return -1;
 
 #ifdef CONFIG_X86_64
        /* Flush global tlb. This is precaution. */
        flush_tlb_early();
 #endif
-       uci->cpu_sig.rev = val[1];
+       uci->cpu_sig.rev = rev;
 
        if (early)
                print_ucode(uci);
@@ -804,8 +772,8 @@ static int apply_microcode_intel(int cpu)
        struct microcode_intel *mc;
        struct ucode_cpu_info *uci;
        struct cpuinfo_x86 *c;
-       unsigned int val[2];
        static int prev_rev;
+       u32 rev;
 
        /* We should bind the task to the CPU */
        if (WARN_ON(raw_smp_processor_id() != cpu))
@@ -822,33 +790,28 @@ static int apply_microcode_intel(int cpu)
 
        /* write microcode via MSR 0x79 */
        wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
-       wrmsrl(MSR_IA32_UCODE_REV, 0);
-
-       /* As documented in the SDM: Do a CPUID 1 here */
-       cpuid_1();
 
-       /* get the current revision from MSR 0x8B */
-       rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
+       rev = intel_get_microcode_revision();
 
-       if (val[1] != mc->hdr.rev) {
+       if (rev != mc->hdr.rev) {
                pr_err("CPU%d update to revision 0x%x failed\n",
                       cpu, mc->hdr.rev);
                return -1;
        }
 
-       if (val[1] != prev_rev) {
+       if (rev != prev_rev) {
                pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
-                       val[1],
+                       rev,
                        mc->hdr.date & 0xffff,
                        mc->hdr.date >> 24,
                        (mc->hdr.date >> 16) & 0xff);
-               prev_rev = val[1];
+               prev_rev = rev;
        }
 
        c = &cpu_data(cpu);
 
-       uci->cpu_sig.rev = val[1];
-       c->microcode = val[1];
+       uci->cpu_sig.rev = rev;
+       c->microcode = rev;
 
        return 0;
 }
@@ -860,7 +823,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
        u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
        int new_rev = uci->cpu_sig.rev;
        unsigned int leftover = size;
-       unsigned int curr_mc_size = 0;
+       unsigned int curr_mc_size = 0, new_mc_size = 0;
        unsigned int csig, cpf;
 
        while (leftover) {
@@ -901,6 +864,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
                        vfree(new_mc);
                        new_rev = mc_header.rev;
                        new_mc  = mc;
+                       new_mc_size = mc_size;
                        mc = NULL;      /* trigger new vmalloc */
                }
 
@@ -926,7 +890,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
         * permanent memory. So it will be loaded early when a CPU is hot added
         * or resumes.
         */
-       save_mc_for_early(new_mc, curr_mc_size);
+       save_mc_for_early(new_mc, new_mc_size);
 
        pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
                 cpu, new_rev, uci->cpu_sig.rev);
index be3a49ee035650f1f1eac68c07ad0e8898c40585..e41af597aed8e2e454965f8459f26b372c091b4f 100644 (file)
@@ -694,6 +694,7 @@ unsigned long native_calibrate_tsc(void)
                        crystal_khz = 24000;    /* 24.0 MHz */
                        break;
                case INTEL_FAM6_SKYLAKE_X:
+               case INTEL_FAM6_ATOM_DENVERTON:
                        crystal_khz = 25000;    /* 25.0 MHz */
                        break;
                case INTEL_FAM6_ATOM_GOLDMONT:
index 4443e499f2790923e6510bc57a88d5a9e0578051..23d15565d02ad780529091e5004557081bb62844 100644 (file)
@@ -6,6 +6,21 @@
 
 #define FRAME_HEADER_SIZE (sizeof(long) * 2)
 
+/*
+ * This disables KASAN checking when reading a value from another task's stack,
+ * since the other task could be running on another CPU and could have poisoned
+ * the stack in the meantime.
+ */
+#define READ_ONCE_TASK_STACK(task, x)                  \
+({                                                     \
+       unsigned long val;                              \
+       if (task == current)                            \
+               val = READ_ONCE(x);                     \
+       else                                            \
+               val = READ_ONCE_NOCHECK(x);             \
+       val;                                            \
+})
+
 static void unwind_dump(struct unwind_state *state, unsigned long *sp)
 {
        static bool dumped_before = false;
@@ -48,7 +63,8 @@ unsigned long unwind_get_return_address(struct unwind_state *state)
        if (state->regs && user_mode(state->regs))
                return 0;
 
-       addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p,
+       addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
+       addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr,
                                     addr_p);
 
        return __kernel_text_address(addr) ? addr : 0;
@@ -162,7 +178,7 @@ bool unwind_next_frame(struct unwind_state *state)
        if (state->regs)
                next_bp = (unsigned long *)state->regs->bp;
        else
-               next_bp = (unsigned long *)*state->bp;
+               next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task,*state->bp);
 
        /* is the next frame pointer an encoded pointer to pt_regs? */
        regs = decode_frame_pointer(next_bp);
@@ -207,6 +223,16 @@ bool unwind_next_frame(struct unwind_state *state)
        return true;
 
 bad_address:
+       /*
+        * When unwinding a non-current task, the task might actually be
+        * running on another CPU, in which case it could be modifying its
+        * stack while we're reading it.  This is generally not a problem and
+        * can be ignored as long as the caller understands that unwinding
+        * another task will not always succeed.
+        */
+       if (state->task != current)
+               goto the_end;
+
        if (state->regs) {
                printk_deferred_once(KERN_WARNING
                        "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
index 56628a44668b7ec43cee087af817ebb1725cf7cd..cedbba0f3402d2343ce069fe3ed6a07f44f68907 100644 (file)
@@ -818,6 +818,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
        return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
 }
 
+static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
+                              struct segmented_address addr,
+                              void *data,
+                              unsigned int size)
+{
+       int rc;
+       ulong linear;
+
+       rc = linearize(ctxt, addr, size, true, &linear);
+       if (rc != X86EMUL_CONTINUE)
+               return rc;
+       return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
+}
+
 /*
  * Prefetch the remaining bytes of the instruction without crossing page
  * boundary if they are not in fetch_cache yet.
@@ -1571,7 +1585,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
                                    &ctxt->exception);
 }
 
-/* Does not support long mode */
 static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
                                     u16 selector, int seg, u8 cpl,
                                     enum x86_transfer_type transfer,
@@ -1608,20 +1621,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
 
        rpl = selector & 3;
 
-       /* NULL selector is not valid for TR, CS and SS (except for long mode) */
-       if ((seg == VCPU_SREG_CS
-            || (seg == VCPU_SREG_SS
-                && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
-            || seg == VCPU_SREG_TR)
-           && null_selector)
-               goto exception;
-
        /* TR should be in GDT only */
        if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
                goto exception;
 
-       if (null_selector) /* for NULL selector skip all following checks */
+       /* NULL selector is not valid for TR, CS and (except for long mode) SS */
+       if (null_selector) {
+               if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
+                       goto exception;
+
+               if (seg == VCPU_SREG_SS) {
+                       if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
+                               goto exception;
+
+                       /*
+                        * ctxt->ops->set_segment expects the CPL to be in
+                        * SS.DPL, so fake an expand-up 32-bit data segment.
+                        */
+                       seg_desc.type = 3;
+                       seg_desc.p = 1;
+                       seg_desc.s = 1;
+                       seg_desc.dpl = cpl;
+                       seg_desc.d = 1;
+                       seg_desc.g = 1;
+               }
+
+               /* Skip all following checks */
                goto load;
+       }
 
        ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
        if (ret != X86EMUL_CONTINUE)
@@ -1737,6 +1764,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
                                   u16 selector, int seg)
 {
        u8 cpl = ctxt->ops->cpl(ctxt);
+
+       /*
+        * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
+        * they can load it at CPL<3 (Intel's manual says only LSS can,
+        * but it's wrong).
+        *
+        * However, the Intel manual says that putting IST=1/DPL=3 in
+        * an interrupt gate will result in SS=3 (the AMD manual instead
+        * says it doesn't), so allow SS=3 in __load_segment_descriptor
+        * and only forbid it here.
+        */
+       if (seg == VCPU_SREG_SS && selector == 3 &&
+           ctxt->mode == X86EMUL_MODE_PROT64)
+               return emulate_exception(ctxt, GP_VECTOR, 0, true);
+
        return __load_segment_descriptor(ctxt, selector, seg, cpl,
                                         X86_TRANSFER_NONE, NULL);
 }
@@ -3685,8 +3727,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
        }
        /* Disable writeback. */
        ctxt->dst.type = OP_NONE;
-       return segmented_write(ctxt, ctxt->dst.addr.mem,
-                              &desc_ptr, 2 + ctxt->op_bytes);
+       return segmented_write_std(ctxt, ctxt->dst.addr.mem,
+                                  &desc_ptr, 2 + ctxt->op_bytes);
 }
 
 static int em_sgdt(struct x86_emulate_ctxt *ctxt)
@@ -3932,7 +3974,7 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
        else
                size = offsetof(struct fxregs_state, xmm_space[0]);
 
-       return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+       return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
 }
 
 static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
@@ -3974,7 +4016,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
-       rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
+       rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
index 5fe290c1b7d892729e7cfd637433b8171423f407..2f6ef5121a4ca36ac73639e5ffc6aa86624067d2 100644 (file)
@@ -2426,3 +2426,9 @@ void kvm_lapic_init(void)
        jump_label_rate_limit(&apic_hw_disabled, HZ);
        jump_label_rate_limit(&apic_sw_disabled, HZ);
 }
+
+void kvm_lapic_exit(void)
+{
+       static_key_deferred_flush(&apic_hw_disabled);
+       static_key_deferred_flush(&apic_sw_disabled);
+}
index e0c80233b3e17a1793cbafd422688091034b15c5..ff8039d616723fd79c493c0a1369658e440224b2 100644 (file)
@@ -110,6 +110,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
 
 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
 void kvm_lapic_init(void);
+void kvm_lapic_exit(void);
 
 #define VEC_POS(v) ((v) & (32 - 1))
 #define REG_POS(v) (((v) >> 5) << 4)
index 2f22810a7e0c8e3106c77849a6a3ea78a2b00a0c..d153be8929a68440ae5e5894497cbb7fa1ab9913 100644 (file)
@@ -3342,6 +3342,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
 
        switch (cap->cap) {
        case KVM_CAP_HYPERV_SYNIC:
+               if (!irqchip_in_kernel(vcpu->kvm))
+                       return -EINVAL;
                return kvm_hv_activate_synic(vcpu);
        default:
                return -EINVAL;
@@ -6045,6 +6047,7 @@ out:
 
 void kvm_arch_exit(void)
 {
+       kvm_lapic_exit();
        perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
 
        if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
@@ -6168,7 +6171,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
 
        kvm_x86_ops->patch_hypercall(vcpu, instruction);
 
-       return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
+       return emulator_write_emulated(ctxt, rip, instruction, 3,
+               &ctxt->exception);
 }
 
 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
index 324e5713d386f0ed235b0394f71c0290df870fae..af59f808742f94df84ab0aaed1ac2db3ea27e016 100644 (file)
@@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
         * We were not able to extract an address from the instruction,
         * probably because there was something invalid in it.
         */
-       if (info->si_addr == (void *)-1) {
+       if (info->si_addr == (void __user *)-1) {
                err = -EINVAL;
                goto err_out;
        }
index e76d1af60f7ad76a12fa4f01cf61b3c508ae0453..bb660e53cbd6ba51eace412622fdd7342939ddc1 100644 (file)
@@ -1172,6 +1172,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
                set_memory_ro((unsigned long)header, header->pages);
                prog->bpf_func = (void *)image;
                prog->jited = 1;
+       } else {
+               prog = orig_prog;
        }
 
 out_addrs:
index 3cd69832d7f4c6f3743bbb3a190c39672c89461d..3961103e91760a14d24eec1ad3caffa5ad1f2adb 100644 (file)
@@ -114,6 +114,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
                        DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
                },
        },
+       /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
+       {
+               .callback = set_nouse_crs,
+               .ident = "Supermicro X8DTH",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
+                       DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
+               },
+       },
 
        /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
        {
index 936a488d6cf6df3c2aadbbdbc036b8eb06701cb0..274dfc48184977db435a9c5c78607cd8a182067c 100644 (file)
@@ -210,6 +210,70 @@ int __init efi_memblock_x86_reserve_range(void)
        return 0;
 }
 
+#define OVERFLOW_ADDR_SHIFT    (64 - EFI_PAGE_SHIFT)
+#define OVERFLOW_ADDR_MASK     (U64_MAX << OVERFLOW_ADDR_SHIFT)
+#define U64_HIGH_BIT           (~(U64_MAX >> 1))
+
+static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
+{
+       u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
+       u64 end_hi = 0;
+       char buf[64];
+
+       if (md->num_pages == 0) {
+               end = 0;
+       } else if (md->num_pages > EFI_PAGES_MAX ||
+                  EFI_PAGES_MAX - md->num_pages <
+                  (md->phys_addr >> EFI_PAGE_SHIFT)) {
+               end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
+                       >> OVERFLOW_ADDR_SHIFT;
+
+               if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
+                       end_hi += 1;
+       } else {
+               return true;
+       }
+
+       pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
+
+       if (end_hi) {
+               pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
+                       i, efi_md_typeattr_format(buf, sizeof(buf), md),
+                       md->phys_addr, end_hi, end);
+       } else {
+               pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
+                       i, efi_md_typeattr_format(buf, sizeof(buf), md),
+                       md->phys_addr, end);
+       }
+       return false;
+}
+
+static void __init efi_clean_memmap(void)
+{
+       efi_memory_desc_t *out = efi.memmap.map;
+       const efi_memory_desc_t *in = out;
+       const efi_memory_desc_t *end = efi.memmap.map_end;
+       int i, n_removal;
+
+       for (i = n_removal = 0; in < end; i++) {
+               if (efi_memmap_entry_valid(in, i)) {
+                       if (out != in)
+                               memcpy(out, in, efi.memmap.desc_size);
+                       out = (void *)out + efi.memmap.desc_size;
+               } else {
+                       n_removal++;
+               }
+               in = (void *)in + efi.memmap.desc_size;
+       }
+
+       if (n_removal > 0) {
+               u64 size = efi.memmap.nr_map - n_removal;
+
+               pr_warn("Removing %d invalid memory map entries.\n", n_removal);
+               efi_memmap_install(efi.memmap.phys_map, size);
+       }
+}
+
 void __init efi_print_memmap(void)
 {
        efi_memory_desc_t *md;
@@ -472,6 +536,8 @@ void __init efi_init(void)
                }
        }
 
+       efi_clean_memmap();
+
        if (efi_enabled(EFI_DBG))
                efi_print_memmap();
 }
index 10aca63a50d7bbff9fa94dec60e0df00591ccbd5..30031d5293c483202c526d5045cda23be6617359 100644 (file)
@@ -214,7 +214,7 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
 
        new_size = efi.memmap.desc_size * num_entries;
 
-       new_phys = memblock_alloc(new_size, 0);
+       new_phys = efi_memmap_alloc(num_entries);
        if (!new_phys) {
                pr_err("Could not allocate boot services memmap\n");
                return;
@@ -355,7 +355,7 @@ void __init efi_free_boot_services(void)
        }
 
        new_size = efi.memmap.desc_size * num_entries;
-       new_phys = memblock_alloc(new_size, 0);
+       new_phys = efi_memmap_alloc(num_entries);
        if (!new_phys) {
                pr_err("Failed to allocate new EFI memmap\n");
                return;
index 61b5ed2b7d400339053d933d82c3938c94bbc766..90e4f2a6625b6ae2a87cf7c5ec9792a5459cd0e1 100644 (file)
@@ -15,7 +15,7 @@ obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
 obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
 obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
 # SPI Devices
-obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o
+obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_mrfld_spidev.o
 # I2C Devices
 obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
 obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
new file mode 100644 (file)
index 0000000..27186ad
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * spidev platform data initilization file
+ *
+ * (C) Copyright 2014, 2016 Intel Corporation
+ * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *         Dan O'Donovan <dan@emutex.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/sfi.h>
+#include <linux/spi/pxa2xx_spi.h>
+#include <linux/spi/spi.h>
+
+#include <asm/intel-mid.h>
+
+#define MRFLD_SPI_DEFAULT_DMA_BURST    8
+#define MRFLD_SPI_DEFAULT_TIMEOUT      500
+
+/* GPIO pin for spidev chipselect */
+#define MRFLD_SPIDEV_GPIO_CS           111
+
+static struct pxa2xx_spi_chip spidev_spi_chip = {
+       .dma_burst_size         = MRFLD_SPI_DEFAULT_DMA_BURST,
+       .timeout                = MRFLD_SPI_DEFAULT_TIMEOUT,
+       .gpio_cs                = MRFLD_SPIDEV_GPIO_CS,
+};
+
+static void __init *spidev_platform_data(void *info)
+{
+       struct spi_board_info *spi_info = info;
+
+       if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
+               return ERR_PTR(-ENODEV);
+
+       spi_info->mode = SPI_MODE_0;
+       spi_info->controller_data = &spidev_spi_chip;
+
+       return NULL;
+}
+
+static const struct devs_id spidev_dev_id __initconst = {
+       .name                   = "spidev",
+       .type                   = SFI_DEV_TYPE_SPI,
+       .delay                  = 0,
+       .get_platform_data      = &spidev_platform_data,
+};
+
+sfi_device(spidev_dev_id);
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_spidev.c
deleted file mode 100644 (file)
index 30c601b..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * spidev platform data initilization file
- *
- * (C) Copyright 2014, 2016 Intel Corporation
- * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
- *         Dan O'Donovan <dan@emutex.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/init.h>
-#include <linux/sfi.h>
-#include <linux/spi/pxa2xx_spi.h>
-#include <linux/spi/spi.h>
-
-#include <asm/intel-mid.h>
-
-#define MRFLD_SPI_DEFAULT_DMA_BURST    8
-#define MRFLD_SPI_DEFAULT_TIMEOUT      500
-
-/* GPIO pin for spidev chipselect */
-#define MRFLD_SPIDEV_GPIO_CS           111
-
-static struct pxa2xx_spi_chip spidev_spi_chip = {
-       .dma_burst_size         = MRFLD_SPI_DEFAULT_DMA_BURST,
-       .timeout                = MRFLD_SPI_DEFAULT_TIMEOUT,
-       .gpio_cs                = MRFLD_SPIDEV_GPIO_CS,
-};
-
-static void __init *spidev_platform_data(void *info)
-{
-       struct spi_board_info *spi_info = info;
-
-       spi_info->mode = SPI_MODE_0;
-       spi_info->controller_data = &spidev_spi_chip;
-
-       return NULL;
-}
-
-static const struct devs_id spidev_dev_id __initconst = {
-       .name                   = "spidev",
-       .type                   = SFI_DEV_TYPE_SPI,
-       .delay                  = 0,
-       .get_platform_data      = &spidev_platform_data,
-};
-
-sfi_device(spidev_dev_id);
index ed89c8f4b2a0497af0e45f075e4781dbb5cf87d2..f8c82a9b401222c84d5d014ac339439a1708caba 100644 (file)
@@ -301,13 +301,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
        if ((sector | nr_sects) & bs_mask)
                return -EINVAL;
 
-       if (discard) {
-               ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
-                               BLKDEV_DISCARD_ZERO, biop);
-               if (ret == 0 || (ret && ret != -EOPNOTSUPP))
-                       goto out;
-       }
-
        ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
                        biop);
        if (ret == 0 || (ret && ret != -EOPNOTSUPP))
@@ -370,6 +363,12 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
        struct bio *bio = NULL;
        struct blk_plug plug;
 
+       if (discard) {
+               if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
+                               BLKDEV_DISCARD_ZERO))
+                       return 0;
+       }
+
        blk_start_plug(&plug);
        ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
                        &bio, discard);
index a8e67a155d04f6d937e890473b1647fbe95d210f..c3400b5444a7da9842622cb4b0c94b2f1b5ddd64 100644 (file)
@@ -912,7 +912,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
 static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
 {
        LIST_HEAD(rq_list);
-       LIST_HEAD(driver_list);
 
        if (unlikely(blk_mq_hctx_stopped(hctx)))
                return;
index 472211fa183a6488ef93676a2f89b2f345947c9a..3bd15d8095b101233455d7985b834f828d89be92 100644 (file)
@@ -16,7 +16,7 @@
 static inline sector_t blk_zone_start(struct request_queue *q,
                                      sector_t sector)
 {
-       sector_t zone_mask = blk_queue_zone_size(q) - 1;
+       sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
 
        return sector & ~zone_mask;
 }
@@ -222,7 +222,7 @@ int blkdev_reset_zones(struct block_device *bdev,
                return -EINVAL;
 
        /* Check alignment (handle eventual smaller last zone) */
-       zone_sectors = blk_queue_zone_size(q);
+       zone_sectors = blk_queue_zone_sectors(q);
        if (sector & (zone_sectors - 1))
                return -EINVAL;
 
index d7beb6bbbf668fe62083cece4d7a6cb1eafe9443..7afb9907821fb7abe61d76ac1f3ce408bb14dbd5 100644 (file)
@@ -434,7 +434,7 @@ static bool part_zone_aligned(struct gendisk *disk,
                              struct block_device *bdev,
                              sector_t from, sector_t size)
 {
-       unsigned int zone_size = bdev_zone_size(bdev);
+       unsigned int zone_sectors = bdev_zone_sectors(bdev);
 
        /*
         * If this function is called, then the disk is a zoned block device
@@ -446,7 +446,7 @@ static bool part_zone_aligned(struct gendisk *disk,
         * regular block devices (no zone operation) and their zone size will
         * be reported as 0. Allow this case.
         */
-       if (!zone_size)
+       if (!zone_sectors)
                return true;
 
        /*
@@ -455,24 +455,24 @@ static bool part_zone_aligned(struct gendisk *disk,
         * use it. Check the zone size too: it should be a power of 2 number
         * of sectors.
         */
-       if (WARN_ON_ONCE(!is_power_of_2(zone_size))) {
+       if (WARN_ON_ONCE(!is_power_of_2(zone_sectors))) {
                u32 rem;
 
-               div_u64_rem(from, zone_size, &rem);
+               div_u64_rem(from, zone_sectors, &rem);
                if (rem)
                        return false;
                if ((from + size) < get_capacity(disk)) {
-                       div_u64_rem(size, zone_size, &rem);
+                       div_u64_rem(size, zone_sectors, &rem);
                        if (rem)
                                return false;
                }
 
        } else {
 
-               if (from & (zone_size - 1))
+               if (from & (zone_sectors - 1))
                        return false;
                if ((from + size) < get_capacity(disk) &&
-                   (size & (zone_size - 1)))
+                   (size & (zone_sectors - 1)))
                        return false;
 
        }
index 4ef4c5caed4f57bece96aa4efdae4a2ea34313f1..8a8e403644d6e0d6ce838105dfceb30ae5625460 100644 (file)
@@ -132,9 +132,9 @@ config HT16K33
        tristate "Holtek Ht16K33 LED controller with keyscan"
        depends on FB && OF && I2C && INPUT
        select FB_SYS_FOPS
-       select FB_CFB_FILLRECT
-       select FB_CFB_COPYAREA
-       select FB_CFB_IMAGEBLIT
+       select FB_SYS_FILLRECT
+       select FB_SYS_COPYAREA
+       select FB_SYS_IMAGEBLIT
        select INPUT_MATRIXKMAP
        select FB_BACKLIGHT
        help
index ada9dce34e6d8a8373d3d08d5c28b3f6b55ac427..e19b1008e5fb08b1099e5f5d8f0dc86dfc24bde5 100644 (file)
@@ -141,8 +141,6 @@ extern void device_unblock_probing(void);
 extern struct kset *devices_kset;
 extern void devices_kset_move_last(struct device *dev);
 
-extern struct device_attribute dev_attr_deferred_probe;
-
 #if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
 extern void module_add_driver(struct module *mod, struct device_driver *drv);
 extern void module_remove_driver(struct device_driver *drv);
index 020ea7f0552073c62c91e55d62e0c0151917251e..8c25e68e67d7354005dc81a92342eb10519449ed 100644 (file)
@@ -1060,14 +1060,8 @@ static int device_add_attrs(struct device *dev)
                        goto err_remove_dev_groups;
        }
 
-       error = device_create_file(dev, &dev_attr_deferred_probe);
-       if (error)
-               goto err_remove_online;
-
        return 0;
 
- err_remove_online:
-       device_remove_file(dev, &dev_attr_online);
  err_remove_dev_groups:
        device_remove_groups(dev, dev->groups);
  err_remove_type_groups:
@@ -1085,7 +1079,6 @@ static void device_remove_attrs(struct device *dev)
        struct class *class = dev->class;
        const struct device_type *type = dev->type;
 
-       device_remove_file(dev, &dev_attr_deferred_probe);
        device_remove_file(dev, &dev_attr_online);
        device_remove_groups(dev, dev->groups);
 
index a8b258e5407bae02c48b03b39941ee238c22a2e3..a1fbf55c4d3abbea786ef5585b96d769ca3a144a 100644 (file)
@@ -53,19 +53,6 @@ static LIST_HEAD(deferred_probe_pending_list);
 static LIST_HEAD(deferred_probe_active_list);
 static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
 
-static ssize_t deferred_probe_show(struct device *dev,
-                                  struct device_attribute *attr, char *buf)
-{
-       bool value;
-
-       mutex_lock(&deferred_probe_mutex);
-       value = !list_empty(&dev->p->deferred_probe);
-       mutex_unlock(&deferred_probe_mutex);
-
-       return sprintf(buf, "%d\n", value);
-}
-DEVICE_ATTR_RO(deferred_probe);
-
 /*
  * In some cases, like suspend to RAM or hibernation, It might be reasonable
  * to prohibit probing of devices as it could be unsafe.
index 8ab8ea1253e62310a68d9e6bf039d8d866ee4019..dacb6a8418aa927e8d75a86470b35b414bf48598 100644 (file)
@@ -408,14 +408,14 @@ static ssize_t show_valid_zones(struct device *dev,
        sprintf(buf, "%s", zone->name);
 
        /* MMOP_ONLINE_KERNEL */
-       zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL);
+       zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
        }
 
        /* MMOP_ONLINE_MOVABLE */
-       zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE);
+       zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
index 38c576f76d36fca1f43bc37b6c733ba6bb222849..9fd06eeb1a17b3880e2ad574f4ca081903a67961 100644 (file)
@@ -271,7 +271,7 @@ static inline int sock_send_bvec(struct nbd_device *nbd, int index,
 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 {
        struct request *req = blk_mq_rq_from_pdu(cmd);
-       int result, flags;
+       int result;
        struct nbd_request request;
        unsigned long size = blk_rq_bytes(req);
        struct bio *bio;
@@ -310,7 +310,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
        if (type != NBD_CMD_WRITE)
                return 0;
 
-       flags = 0;
        bio = req->bio;
        while (bio) {
                struct bio *next = bio->bi_next;
@@ -319,9 +318,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 
                bio_for_each_segment(bvec, bio, iter) {
                        bool is_last = !next && bio_iter_last(bvec, iter);
+                       int flags = is_last ? 0 : MSG_MORE;
 
-                       if (is_last)
-                               flags = MSG_MORE;
                        dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
                                cmd, bvec.bv_len);
                        result = sock_send_bvec(nbd, index, &bvec, flags);
@@ -1042,6 +1040,7 @@ static int __init nbd_init(void)
                return -ENOMEM;
 
        for (i = 0; i < nbds_max; i++) {
+               struct request_queue *q;
                struct gendisk *disk = alloc_disk(1 << part_shift);
                if (!disk)
                        goto out;
@@ -1067,12 +1066,13 @@ static int __init nbd_init(void)
                 * every gendisk to have its very own request_queue struct.
                 * These structs are big so we dynamically allocate them.
                 */
-               disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set);
-               if (!disk->queue) {
+               q = blk_mq_init_queue(&nbd_dev[i].tag_set);
+               if (IS_ERR(q)) {
                        blk_mq_free_tag_set(&nbd_dev[i].tag_set);
                        put_disk(disk);
                        goto out;
                }
+               disk->queue = q;
 
                /*
                 * Tell the block layer that we are not a rotational device
index 5545a679abd8887123fc83d57beb37dede77a685..10332c24f9610d7e80b154bf36eebb0354ac4576 100644 (file)
@@ -56,6 +56,7 @@ struct virtblk_req {
        struct virtio_blk_outhdr out_hdr;
        struct virtio_scsi_inhdr in_hdr;
        u8 status;
+       u8 sense[SCSI_SENSE_BUFFERSIZE];
        struct scatterlist sg[];
 };
 
@@ -102,7 +103,8 @@ static int __virtblk_add_req(struct virtqueue *vq,
        }
 
        if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
-               sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
+               memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
+               sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
                sgs[num_out + num_in++] = &sense;
                sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
                sgs[num_out + num_in++] = &inhdr;
@@ -628,11 +630,12 @@ static int virtblk_probe(struct virtio_device *vdev)
        if (err)
                goto out_put_disk;
 
-       q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
+       q = blk_mq_init_queue(&vblk->tag_set);
        if (IS_ERR(q)) {
                err = -ENOMEM;
                goto out_free_tags;
        }
+       vblk->disk->queue = q;
 
        q->queuedata = vblk;
 
index 15f58ab44d0b429fc7b5ebf0ba7e1cdfcc6c914d..e5ab7d9e8c452f2feac3e2f374c6e1639317756b 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/genhd.h>
 #include <linux/highmem.h>
 #include <linux/slab.h>
+#include <linux/backing-dev.h>
 #include <linux/string.h>
 #include <linux/vmalloc.h>
 #include <linux/err.h>
@@ -112,6 +113,14 @@ static inline bool is_partial_io(struct bio_vec *bvec)
        return bvec->bv_len != PAGE_SIZE;
 }
 
+static void zram_revalidate_disk(struct zram *zram)
+{
+       revalidate_disk(zram->disk);
+       /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
+       zram->disk->queue->backing_dev_info.capabilities |=
+               BDI_CAP_STABLE_WRITES;
+}
+
 /*
  * Check if request is within bounds and aligned on zram logical blocks.
  */
@@ -1095,15 +1104,9 @@ static ssize_t disksize_store(struct device *dev,
        zram->comp = comp;
        zram->disksize = disksize;
        set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+       zram_revalidate_disk(zram);
        up_write(&zram->init_lock);
 
-       /*
-        * Revalidate disk out of the init_lock to avoid lockdep splat.
-        * It's okay because disk's capacity is protected by init_lock
-        * so that revalidate_disk always sees up-to-date capacity.
-        */
-       revalidate_disk(zram->disk);
-
        return len;
 
 out_destroy_comp:
@@ -1149,7 +1152,7 @@ static ssize_t reset_store(struct device *dev,
        /* Make sure all the pending I/O are finished */
        fsync_bdev(bdev);
        zram_reset_device(zram);
-       revalidate_disk(zram->disk);
+       zram_revalidate_disk(zram);
        bdput(bdev);
 
        mutex_lock(&bdev->bd_mutex);
index 5bb1985ec484aef267e3c551c3068ec0dee14b1c..6d9cc2d39d22306fd68f30bac6f4a60e6cfa5a87 100644 (file)
@@ -381,9 +381,6 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
        char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
        int err = 0;
 
-       if (!pfn_valid(PFN_DOWN(p)))
-               return -EIO;
-
        read = 0;
        if (p < (unsigned long) high_memory) {
                low_count = count;
@@ -412,6 +409,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
                         * by the kernel or data corruption may occur
                         */
                        kbuf = xlate_dev_kmem_ptr((void *)p);
+                       if (!virt_addr_valid(kbuf))
+                               return -ENXIO;
 
                        if (copy_to_user(buf, kbuf, sz))
                                return -EFAULT;
@@ -482,6 +481,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
                 * corruption may occur.
                 */
                ptr = xlate_dev_kmem_ptr((void *)p);
+               if (!virt_addr_valid(ptr))
+                       return -ENXIO;
 
                copied = copy_from_user(ptr, buf, sz);
                if (copied) {
@@ -512,9 +513,6 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
        char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
        int err = 0;
 
-       if (!pfn_valid(PFN_DOWN(p)))
-               return -EIO;
-
        if (p < (unsigned long) high_memory) {
                unsigned long to_write = min_t(unsigned long, count,
                                               (unsigned long)high_memory - p);
index 02819e0703c8eef503baf8333c88f105a591444b..87885d146dbb02ed9ed08979dda96579bd5bc652 100644 (file)
@@ -290,6 +290,7 @@ static int register_device(int minor, struct pp_struct *pp)
        struct pardevice *pdev = NULL;
        char *name;
        struct pardev_cb ppdev_cb;
+       int rc = 0;
 
        name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
        if (name == NULL)
@@ -298,8 +299,8 @@ static int register_device(int minor, struct pp_struct *pp)
        port = parport_find_number(minor);
        if (!port) {
                pr_warn("%s: no associated port!\n", name);
-               kfree(name);
-               return -ENXIO;
+               rc = -ENXIO;
+               goto err;
        }
 
        memset(&ppdev_cb, 0, sizeof(ppdev_cb));
@@ -308,16 +309,18 @@ static int register_device(int minor, struct pp_struct *pp)
        ppdev_cb.private = pp;
        pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
        parport_put_port(port);
-       kfree(name);
 
        if (!pdev) {
                pr_warn("%s: failed to register device!\n", name);
-               return -ENXIO;
+               rc = -ENXIO;
+               goto err;
        }
 
        pp->pdev = pdev;
        dev_dbg(&pdev->dev, "registered pardevice\n");
-       return 0;
+err:
+       kfree(name);
+       return rc;
 }
 
 static enum ieee1284_phase init_phase(int mode)
index 8b00e79c2683a4812126fb73c7134ea157554ef2..17857beb489294b2c1b5bbd068a18ba013ab3fae 100644 (file)
@@ -1862,7 +1862,7 @@ static void config_work_handler(struct work_struct *work)
 {
        struct ports_device *portdev;
 
-       portdev = container_of(work, struct ports_device, control_work);
+       portdev = container_of(work, struct ports_device, config_work);
        if (!use_multiport(portdev)) {
                struct virtio_device *vdev;
                struct port *port;
index 8c8b495cbf0d502daa51c5c1403125b4e7ea15f3..cdc092a1d9effd7815fbd43314977059a52e0ec8 100644 (file)
@@ -586,7 +586,7 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
        GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
                                GATE_BUS_TOP, 24, 0, 0),
        GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
-                               GATE_BUS_TOP, 27, 0, 0),
+                               GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
 };
 
 static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -956,20 +956,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
        GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0),
 
        GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
-                       GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0),
+                       GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0),
        GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2",
                        GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0),
 
        GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d",
                        GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0),
        GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d",
-                       GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0),
+                       GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0),
        GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
                        GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
        GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
                        GATE_BUS_TOP, 5, 0, 0),
        GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
-                       GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0),
+                       GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
        GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
                        GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
        GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
@@ -983,20 +983,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
        GATE(0, "aclk166", "mout_user_aclk166",
                        GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
-                       GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0),
+                       GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
        GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
                        GATE_BUS_TOP, 16, 0, 0),
        GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
                        GATE_BUS_TOP, 17, 0, 0),
        GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
-                       GATE_BUS_TOP, 18, 0, 0),
+                       GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
        GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
                        GATE_BUS_TOP, 28, 0, 0),
        GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m",
                        GATE_BUS_TOP, 29, 0, 0),
 
        GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
-                       SRC_MASK_TOP2, 24, 0, 0),
+                       SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0),
 
        GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
                        SRC_MASK_TOP7, 20, 0, 0),
index 4da1dc2278bd7fc34caa9e00d29f71ec1ebd015f..670ff0f25b6712ea8e2875a5ecc2a4c46cb1c554 100644 (file)
@@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
        if (mct_int_type == MCT_INT_SPI) {
                if (evt->irq != -1)
                        disable_irq_nosync(evt->irq);
+               exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
        } else {
                disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
        }
index e00c9b022964702c076b1a192df35b9bbbf7e113..5a37b9fcf40ddf813a6cf5d2c401bc221fd5e83b 100644 (file)
@@ -24,5 +24,5 @@ config DW_DMAC_PCI
        select DW_DMAC_CORE
        help
          Support the Synopsys DesignWare AHB DMA controller on the
-         platfroms that enumerate it as a PCI device. For example,
+         platforms that enumerate it as a PCI device. For example,
          Intel Medfield has integrated this GPDMA controller.
index 8e67895bcca3a2db74e5b72e00374e2118ce95f7..abcc51b343cecd1629700233e8ca9d8882da2dff 100644 (file)
@@ -64,6 +64,8 @@
 #define PCI_DEVICE_ID_INTEL_IOAT_BDX8  0x6f2e
 #define PCI_DEVICE_ID_INTEL_IOAT_BDX9  0x6f2f
 
+#define PCI_DEVICE_ID_INTEL_IOAT_SKX   0x2021
+
 #define IOAT_VER_1_2            0x12    /* Version 1.2 */
 #define IOAT_VER_2_0            0x20    /* Version 2.0 */
 #define IOAT_VER_3_0            0x30    /* Version 3.0 */
index 90eddd9f07e4fc1b354bad70a935a8cfec20e246..cc5259b881d47ffa48322e2a537397317e8b8be1 100644 (file)
@@ -106,6 +106,8 @@ static struct pci_device_id ioat_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
 
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
+
        /* I/OAT v3.3 platforms */
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
@@ -243,10 +245,15 @@ static bool is_bdx_ioat(struct pci_dev *pdev)
        }
 }
 
+static inline bool is_skx_ioat(struct pci_dev *pdev)
+{
+       return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
+}
+
 static bool is_xeon_cb32(struct pci_dev *pdev)
 {
        return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
-               is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
+               is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
 }
 
 bool is_bwd_ioat(struct pci_dev *pdev)
@@ -693,7 +700,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
        /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
        ioat_chan->completion =
                dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
-                               GFP_KERNEL, &ioat_chan->completion_dma);
+                               GFP_NOWAIT, &ioat_chan->completion_dma);
        if (!ioat_chan->completion)
                return -ENOMEM;
 
@@ -703,7 +710,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
               ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
 
        order = IOAT_MAX_ORDER;
-       ring = ioat_alloc_ring(c, order, GFP_KERNEL);
+       ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
        if (!ring)
                return -ENOMEM;
 
@@ -1357,6 +1364,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        device->version = readb(device->reg_base + IOAT_VER_OFFSET);
        if (device->version >= IOAT_VER_3_0) {
+               if (is_skx_ioat(pdev))
+                       device->version = IOAT_VER_3_2;
                err = ioat3_dma_probe(device, ioat_dca_enabled);
 
                if (device->version >= IOAT_VER_3_3)
index ac68666cd3f4e99cd3bc2f4ae346f5e78cd218ce..daf479cce69158e480757d269630d4bed7d6bca2 100644 (file)
@@ -938,21 +938,14 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
                d->ccr |= CCR_DST_AMODE_POSTINC;
                if (port_window) {
                        d->ccr |= CCR_SRC_AMODE_DBLIDX;
-                       d->ei = 1;
-                       /*
-                        * One frame covers the port_window and by  configure
-                        * the source frame index to be -1 * (port_window - 1)
-                        * we instruct the sDMA that after a frame is processed
-                        * it should move back to the start of the window.
-                        */
-                       d->fi = -(port_window_bytes - 1);
 
                        if (port_window_bytes >= 64)
-                               d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+                               d->csdp |= CSDP_SRC_BURST_64;
                        else if (port_window_bytes >= 32)
-                               d->csdp = CSDP_SRC_BURST_32 | CSDP_SRC_PACKED;
+                               d->csdp |= CSDP_SRC_BURST_32;
                        else if (port_window_bytes >= 16)
-                               d->csdp = CSDP_SRC_BURST_16 | CSDP_SRC_PACKED;
+                               d->csdp |= CSDP_SRC_BURST_16;
+
                } else {
                        d->ccr |= CCR_SRC_AMODE_CONSTANT;
                }
@@ -962,13 +955,21 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
                d->ccr |= CCR_SRC_AMODE_POSTINC;
                if (port_window) {
                        d->ccr |= CCR_DST_AMODE_DBLIDX;
+                       d->ei = 1;
+                       /*
+                        * One frame covers the port_window and by  configure
+                        * the source frame index to be -1 * (port_window - 1)
+                        * we instruct the sDMA that after a frame is processed
+                        * it should move back to the start of the window.
+                        */
+                       d->fi = -(port_window_bytes - 1);
 
                        if (port_window_bytes >= 64)
-                               d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+                               d->csdp |= CSDP_DST_BURST_64;
                        else if (port_window_bytes >= 32)
-                               d->csdp = CSDP_DST_BURST_32 | CSDP_DST_PACKED;
+                               d->csdp |= CSDP_DST_BURST_32;
                        else if (port_window_bytes >= 16)
-                               d->csdp = CSDP_DST_BURST_16 | CSDP_DST_PACKED;
+                               d->csdp |= CSDP_DST_BURST_16;
                } else {
                        d->ccr |= CCR_DST_AMODE_CONSTANT;
                }
@@ -1017,7 +1018,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
                osg->addr = sg_dma_address(sgent);
                osg->en = en;
                osg->fn = sg_dma_len(sgent) / frame_bytes;
-               if (port_window && dir == DMA_MEM_TO_DEV) {
+               if (port_window && dir == DMA_DEV_TO_MEM) {
                        osg->ei = 1;
                        /*
                         * One frame covers the port_window and by  configure
@@ -1452,6 +1453,7 @@ static int omap_dma_probe(struct platform_device *pdev)
        struct omap_dmadev *od;
        struct resource *res;
        int rc, i, irq;
+       u32 lch_count;
 
        od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
        if (!od)
@@ -1494,20 +1496,31 @@ static int omap_dma_probe(struct platform_device *pdev)
        spin_lock_init(&od->lock);
        spin_lock_init(&od->irq_lock);
 
-       if (!pdev->dev.of_node) {
-               od->dma_requests = od->plat->dma_attr->lch_count;
-               if (unlikely(!od->dma_requests))
-                       od->dma_requests = OMAP_SDMA_REQUESTS;
-       } else if (of_property_read_u32(pdev->dev.of_node, "dma-requests",
-                                       &od->dma_requests)) {
+       /* Number of DMA requests */
+       od->dma_requests = OMAP_SDMA_REQUESTS;
+       if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
+                                                     "dma-requests",
+                                                     &od->dma_requests)) {
                dev_info(&pdev->dev,
                         "Missing dma-requests property, using %u.\n",
                         OMAP_SDMA_REQUESTS);
-               od->dma_requests = OMAP_SDMA_REQUESTS;
        }
 
-       od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests,
-                                  sizeof(*od->lch_map), GFP_KERNEL);
+       /* Number of available logical channels */
+       if (!pdev->dev.of_node) {
+               lch_count = od->plat->dma_attr->lch_count;
+               if (unlikely(!lch_count))
+                       lch_count = OMAP_SDMA_CHANNELS;
+       } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
+                                       &lch_count)) {
+               dev_info(&pdev->dev,
+                        "Missing dma-channels property, using %u.\n",
+                        OMAP_SDMA_CHANNELS);
+               lch_count = OMAP_SDMA_CHANNELS;
+       }
+
+       od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map),
+                                  GFP_KERNEL);
        if (!od->lch_map)
                return -ENOMEM;
 
index 87fd01539fcb74daa0c80a62a268311b7125dce1..740bbb942594873b08deecb59c801460dcc868ab 100644 (file)
@@ -448,6 +448,9 @@ struct dma_pl330_chan {
 
        /* for cyclic capability */
        bool cyclic;
+
+       /* for runtime pm tracking */
+       bool active;
 };
 
 struct pl330_dmac {
@@ -2033,6 +2036,7 @@ static void pl330_tasklet(unsigned long data)
                _stop(pch->thread);
                spin_unlock(&pch->thread->dmac->lock);
                power_down = true;
+               pch->active = false;
        } else {
                /* Make sure the PL330 Channel thread is active */
                spin_lock(&pch->thread->dmac->lock);
@@ -2052,6 +2056,7 @@ static void pl330_tasklet(unsigned long data)
                        desc->status = PREP;
                        list_move_tail(&desc->node, &pch->work_list);
                        if (power_down) {
+                               pch->active = true;
                                spin_lock(&pch->thread->dmac->lock);
                                _start(pch->thread);
                                spin_unlock(&pch->thread->dmac->lock);
@@ -2166,6 +2171,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
        unsigned long flags;
        struct pl330_dmac *pl330 = pch->dmac;
        LIST_HEAD(list);
+       bool power_down = false;
 
        pm_runtime_get_sync(pl330->ddma.dev);
        spin_lock_irqsave(&pch->lock, flags);
@@ -2176,6 +2182,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
        pch->thread->req[0].desc = NULL;
        pch->thread->req[1].desc = NULL;
        pch->thread->req_running = -1;
+       power_down = pch->active;
+       pch->active = false;
 
        /* Mark all desc done */
        list_for_each_entry(desc, &pch->submitted_list, node) {
@@ -2193,6 +2201,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
        list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
        spin_unlock_irqrestore(&pch->lock, flags);
        pm_runtime_mark_last_busy(pl330->ddma.dev);
+       if (power_down)
+               pm_runtime_put_autosuspend(pl330->ddma.dev);
        pm_runtime_put_autosuspend(pl330->ddma.dev);
 
        return 0;
@@ -2357,6 +2367,7 @@ static void pl330_issue_pending(struct dma_chan *chan)
                 * updated on work_list emptiness status.
                 */
                WARN_ON(list_empty(&pch->submitted_list));
+               pch->active = true;
                pm_runtime_get_sync(pch->dmac->ddma.dev);
        }
        list_splice_tail_init(&pch->submitted_list, &pch->work_list);
index 2e441d0ccd79a37a5486d394310839c2cdc47f01..4c357d47546594c6bd0c9b1ca16e27c658a720cf 100644 (file)
@@ -986,6 +986,7 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
 {
        struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
        struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
+       struct rcar_dmac_chan_map *map = &rchan->map;
        struct rcar_dmac_desc_page *page, *_page;
        struct rcar_dmac_desc *desc;
        LIST_HEAD(list);
@@ -1019,6 +1020,13 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
                free_page((unsigned long)page);
        }
 
+       /* Remove slave mapping if present. */
+       if (map->slave.xfer_size) {
+               dma_unmap_resource(chan->device->dev, map->addr,
+                                  map->slave.xfer_size, map->dir, 0);
+               map->slave.xfer_size = 0;
+       }
+
        pm_runtime_put(chan->device->dev);
 }
 
index 3688d0873a3e1c844470c31105ecabd3b5d49be3..3056ce7f8c69d01c61fe3ab0eeff6ad299f538f7 100644 (file)
@@ -880,7 +880,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
        struct virt_dma_desc *vdesc;
        enum dma_status status;
        unsigned long flags;
-       u32 residue;
+       u32 residue = 0;
 
        status = dma_cookie_status(c, cookie, state);
        if ((status == DMA_COMPLETE) || (!state))
@@ -888,16 +888,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
 
        spin_lock_irqsave(&chan->vchan.lock, flags);
        vdesc = vchan_find_desc(&chan->vchan, cookie);
-       if (cookie == chan->desc->vdesc.tx.cookie) {
+       if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
                residue = stm32_dma_desc_residue(chan, chan->desc,
                                                 chan->next_sg);
-       } else if (vdesc) {
+       else if (vdesc)
                residue = stm32_dma_desc_residue(chan,
                                                 to_stm32_dma_desc(vdesc), 0);
-       } else {
-               residue = 0;
-       }
-
        dma_set_residue(state, residue);
 
        spin_unlock_irqrestore(&chan->vchan.lock, flags);
@@ -972,21 +968,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
        struct stm32_dma_chan *chan;
        struct dma_chan *c;
 
-       if (dma_spec->args_count < 3)
+       if (dma_spec->args_count < 4)
                return NULL;
 
        cfg.channel_id = dma_spec->args[0];
        cfg.request_line = dma_spec->args[1];
        cfg.stream_config = dma_spec->args[2];
-       cfg.threshold = 0;
+       cfg.threshold = dma_spec->args[3];
 
        if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
                                STM32_DMA_MAX_REQUEST_ID))
                return NULL;
 
-       if (dma_spec->args_count > 3)
-               cfg.threshold = dma_spec->args[3];
-
        chan = &dmadev->chan[cfg.channel_id];
 
        c = dma_get_slave_channel(&chan->vchan.chan);
index 3f24aeb48c0e6735f13995ded459cac776b2620f..2403475a37cf9203eb0651ae7f412ef5f63f45d6 100644 (file)
@@ -149,6 +149,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
        match = of_match_node(ti_am335x_master_match, dma_node);
        if (!match) {
                dev_err(&pdev->dev, "DMA master is not supported\n");
+               of_node_put(dma_node);
                return -EINVAL;
        }
 
@@ -339,6 +340,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
        match = of_match_node(ti_dra7_master_match, dma_node);
        if (!match) {
                dev_err(&pdev->dev, "DMA master is not supported\n");
+               of_node_put(dma_node);
                return -EINVAL;
        }
 
index 78298460d1686a79864c57d228ac6af5e114acf9..7c1e3a7b14e0c1f540a0bbe7d6b924b124561729 100644 (file)
@@ -453,7 +453,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
                dev_err(&edev->dev, "out of memory in extcon_set_state\n");
                kobject_uevent(&edev->dev.kobj, KOBJ_CHANGE);
 
-               return 0;
+               return -ENOMEM;
        }
 
        length = name_show(&edev->dev, NULL, prop_buf);
index 520a40e5e0e431129cf1fe66a7604d32d7e20061..6c7d60c239b5b459b1f45e1d1110f7ed5a265cb6 100644 (file)
@@ -71,8 +71,7 @@ void __init efi_fake_memmap(void)
        }
 
        /* allocate memory for new EFI memmap */
-       new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map,
-                                       PAGE_SIZE);
+       new_memmap_phy = efi_memmap_alloc(new_nr_map);
        if (!new_memmap_phy)
                return;
 
index b98824e3800abbb57b110d7cc92c0ca8417791d2..0e2a96b12cb3647635db19912ec0ab4004f71572 100644 (file)
@@ -39,14 +39,6 @@ efi_status_t efi_file_close(void *handle);
 
 unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
 
-efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
-                       unsigned long orig_fdt_size,
-                       void *fdt, int new_fdt_size, char *cmdline_ptr,
-                       u64 initrd_addr, u64 initrd_size,
-                       efi_memory_desc_t *memory_map,
-                       unsigned long map_size, unsigned long desc_size,
-                       u32 desc_ver);
-
 efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
                                            void *handle,
                                            unsigned long *new_fdt_addr,
index a6a93116a8f053f6c14911376ffa6da7f1dff44e..921dfa047202952c9064cd39971e68e0e3c28b49 100644 (file)
 
 #include "efistub.h"
 
-efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
-                       unsigned long orig_fdt_size,
-                       void *fdt, int new_fdt_size, char *cmdline_ptr,
-                       u64 initrd_addr, u64 initrd_size,
-                       efi_memory_desc_t *memory_map,
-                       unsigned long map_size, unsigned long desc_size,
-                       u32 desc_ver)
+static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
+                              unsigned long orig_fdt_size,
+                              void *fdt, int new_fdt_size, char *cmdline_ptr,
+                              u64 initrd_addr, u64 initrd_size)
 {
        int node, num_rsv;
        int status;
@@ -101,25 +98,23 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
        if (status)
                goto fdt_set_fail;
 
-       fdt_val64 = cpu_to_fdt64((u64)(unsigned long)memory_map);
+       fdt_val64 = U64_MAX; /* placeholder */
        status = fdt_setprop(fdt, node, "linux,uefi-mmap-start",
                             &fdt_val64,  sizeof(fdt_val64));
        if (status)
                goto fdt_set_fail;
 
-       fdt_val32 = cpu_to_fdt32(map_size);
+       fdt_val32 = U32_MAX; /* placeholder */
        status = fdt_setprop(fdt, node, "linux,uefi-mmap-size",
                             &fdt_val32,  sizeof(fdt_val32));
        if (status)
                goto fdt_set_fail;
 
-       fdt_val32 = cpu_to_fdt32(desc_size);
        status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size",
                             &fdt_val32, sizeof(fdt_val32));
        if (status)
                goto fdt_set_fail;
 
-       fdt_val32 = cpu_to_fdt32(desc_ver);
        status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver",
                             &fdt_val32, sizeof(fdt_val32));
        if (status)
@@ -148,6 +143,43 @@ fdt_set_fail:
        return EFI_LOAD_ERROR;
 }
 
+static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
+{
+       int node = fdt_path_offset(fdt, "/chosen");
+       u64 fdt_val64;
+       u32 fdt_val32;
+       int err;
+
+       if (node < 0)
+               return EFI_LOAD_ERROR;
+
+       fdt_val64 = cpu_to_fdt64((unsigned long)*map->map);
+       err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start",
+                                 &fdt_val64, sizeof(fdt_val64));
+       if (err)
+               return EFI_LOAD_ERROR;
+
+       fdt_val32 = cpu_to_fdt32(*map->map_size);
+       err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size",
+                                 &fdt_val32, sizeof(fdt_val32));
+       if (err)
+               return EFI_LOAD_ERROR;
+
+       fdt_val32 = cpu_to_fdt32(*map->desc_size);
+       err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size",
+                                 &fdt_val32, sizeof(fdt_val32));
+       if (err)
+               return EFI_LOAD_ERROR;
+
+       fdt_val32 = cpu_to_fdt32(*map->desc_ver);
+       err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver",
+                                 &fdt_val32, sizeof(fdt_val32));
+       if (err)
+               return EFI_LOAD_ERROR;
+
+       return EFI_SUCCESS;
+}
+
 #ifndef EFI_FDT_ALIGN
 #define EFI_FDT_ALIGN EFI_PAGE_SIZE
 #endif
@@ -243,20 +275,10 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
                        goto fail;
                }
 
-               /*
-                * Now that we have done our final memory allocation (and free)
-                * we can get the memory map key  needed for
-                * exit_boot_services().
-                */
-               status = efi_get_memory_map(sys_table, &map);
-               if (status != EFI_SUCCESS)
-                       goto fail_free_new_fdt;
-
                status = update_fdt(sys_table,
                                    (void *)fdt_addr, fdt_size,
                                    (void *)*new_fdt_addr, new_fdt_size,
-                                   cmdline_ptr, initrd_addr, initrd_size,
-                                   memory_map, map_size, desc_size, desc_ver);
+                                   cmdline_ptr, initrd_addr, initrd_size);
 
                /* Succeeding the first time is the expected case. */
                if (status == EFI_SUCCESS)
@@ -266,20 +288,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
                        /*
                         * We need to allocate more space for the new
                         * device tree, so free existing buffer that is
-                        * too small.  Also free memory map, as we will need
-                        * to get new one that reflects the free/alloc we do
-                        * on the device tree buffer.
+                        * too small.
                         */
                        efi_free(sys_table, new_fdt_size, *new_fdt_addr);
-                       sys_table->boottime->free_pool(memory_map);
                        new_fdt_size += EFI_PAGE_SIZE;
                } else {
                        pr_efi_err(sys_table, "Unable to construct new device tree.\n");
-                       goto fail_free_mmap;
+                       goto fail_free_new_fdt;
                }
        }
 
-       sys_table->boottime->free_pool(memory_map);
        priv.runtime_map = runtime_map;
        priv.runtime_entry_count = &runtime_entry_count;
        status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@@ -288,6 +306,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
        if (status == EFI_SUCCESS) {
                efi_set_virtual_address_map_t *svam;
 
+               status = update_fdt_memmap((void *)*new_fdt_addr, &map);
+               if (status != EFI_SUCCESS) {
+                       /*
+                        * The kernel won't get far without the memory map, but
+                        * may still be able to print something meaningful so
+                        * return success here.
+                        */
+                       return EFI_SUCCESS;
+               }
+
                /* Install the new virtual address map */
                svam = sys_table->runtime->set_virtual_address_map;
                status = svam(runtime_entry_count * desc_size, desc_size,
@@ -319,9 +347,6 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
 
        pr_efi_err(sys_table, "Exit boot services failed.\n");
 
-fail_free_mmap:
-       sys_table->boottime->free_pool(memory_map);
-
 fail_free_new_fdt:
        efi_free(sys_table, new_fdt_size, *new_fdt_addr);
 
index f03ddecd232b542ce4c7163506b69da0d29ad2c7..78686443cb378abf616c7cfe595b82d3a51c9cd6 100644 (file)
@@ -9,6 +9,44 @@
 #include <linux/efi.h>
 #include <linux/io.h>
 #include <asm/early_ioremap.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+
+static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
+{
+       return memblock_alloc(size, 0);
+}
+
+static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
+{
+       unsigned int order = get_order(size);
+       struct page *p = alloc_pages(GFP_KERNEL, order);
+
+       if (!p)
+               return 0;
+
+       return PFN_PHYS(page_to_pfn(p));
+}
+
+/**
+ * efi_memmap_alloc - Allocate memory for the EFI memory map
+ * @num_entries: Number of entries in the allocated map.
+ *
+ * Depending on whether mm_init() has already been invoked or not,
+ * either memblock or "normal" page allocation is used.
+ *
+ * Returns the physical address of the allocated memory map on
+ * success, zero on failure.
+ */
+phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
+{
+       unsigned long size = num_entries * efi.memmap.desc_size;
+
+       if (slab_is_available())
+               return __efi_memmap_alloc_late(size);
+
+       return __efi_memmap_alloc_early(size);
+}
 
 /**
  * __efi_memmap_init - Common code for mapping the EFI memory map
index 1e8fde8cb803d7406dd98d51673b63eb69fdfd23..2292742eac8f5441642469f8fa05e04ca0979afb 100644 (file)
@@ -205,7 +205,7 @@ static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
        return 0;
 }
 
-static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
+static int mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
 {
        struct irq_chip_generic *gc;
        struct irq_chip_type *ct;
index f4c26c7826cdfc0b5163632e596c0e664586549b..a07ae9e37930767643302ccbec4a7284275a0f25 100644 (file)
@@ -1317,12 +1317,12 @@ void gpiochip_remove(struct gpio_chip *chip)
 
        /* FIXME: should the legacy sysfs handling be moved to gpio_device? */
        gpiochip_sysfs_unregister(gdev);
+       gpiochip_free_hogs(chip);
        /* Numb the device, cancelling all outstanding operations */
        gdev->chip = NULL;
        gpiochip_irqchip_remove(chip);
        acpi_gpiochip_remove(chip);
        gpiochip_remove_pin_ranges(chip);
-       gpiochip_free_hogs(chip);
        of_gpiochip_remove(chip);
        /*
         * We accept no more calls into the driver from this point, so
@@ -1723,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
 }
 
 /**
- * _gpiochip_irqchip_add() - adds an irqchip to a gpiochip
+ * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip
  * @gpiochip: the gpiochip to add the irqchip to
  * @irqchip: the irqchip to add to the gpiochip
  * @first_irq: if not dynamically assigned, the base (first) IRQ to
@@ -1749,13 +1749,13 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
  * the pins on the gpiochip can generate a unique IRQ. Everything else
  * need to be open coded.
  */
-int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
-                         struct irq_chip *irqchip,
-                         unsigned int first_irq,
-                         irq_flow_handler_t handler,
-                         unsigned int type,
-                         bool nested,
-                         struct lock_class_key *lock_key)
+int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+                            struct irq_chip *irqchip,
+                            unsigned int first_irq,
+                            irq_flow_handler_t handler,
+                            unsigned int type,
+                            bool nested,
+                            struct lock_class_key *lock_key)
 {
        struct device_node *of_node;
        bool irq_base_set = false;
@@ -1840,7 +1840,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add);
+EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
 
 #else /* CONFIG_GPIOLIB_IRQCHIP */
 
index 9ada56c16a589d01e43162f656c0a25c8d926284..4c851fde1e82231d04366b1916da170d0b24cb0e 100644 (file)
@@ -840,6 +840,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
                                else if (type == CGS_UCODE_ID_SMU_SK)
                                        strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
                                break;
+                       case CHIP_POLARIS12:
+                               strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+                               break;
                        default:
                                DRM_ERROR("SMC firmware not supported\n");
                                return -EINVAL;
index 31ca817f16b63d3208526277923481387cdb673c..2201303b9262edf0f00e8ec31fba8c915f4808f1 100644 (file)
@@ -73,6 +73,7 @@ static const char *amdgpu_asic_name[] = {
        "STONEY",
        "POLARIS10",
        "POLARIS11",
+       "POLARIS12",
        "LAST",
 };
 
@@ -1277,6 +1278,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
        case CHIP_FIJI:
        case CHIP_POLARIS11:
        case CHIP_POLARIS10:
+       case CHIP_POLARIS12:
        case CHIP_CARRIZO:
        case CHIP_STONEY:
                if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
index ec4783881597bf7ccedfbe6f17cc2569f24cbd75..ba2816b3cd07253eb954ed21659a16247d46ead0 100644 (file)
@@ -418,6 +418,13 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
        {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
        {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+       /* Polaris12 */
+       {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+       {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+       {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+       {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+       {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+       {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
 
        {0, 0, 0}
 };
index fc592c2b0e166825100c1908691f02ac2f49a642..95a568df8551e589701b9cb2d1be6c04d97bf14a 100644 (file)
@@ -98,6 +98,7 @@ static int amdgpu_pp_early_init(void *handle)
        switch (adev->asic_type) {
        case CHIP_POLARIS11:
        case CHIP_POLARIS10:
+       case CHIP_POLARIS12:
        case CHIP_TONGA:
        case CHIP_FIJI:
        case CHIP_TOPAZ:
index a81dfaeeb8c0713e61b66046cf35fb8f89f86161..1d564beb0fde67541eab9be4ee76a39eb89197e1 100644 (file)
@@ -65,6 +65,7 @@
 #define FIRMWARE_STONEY                "amdgpu/stoney_uvd.bin"
 #define FIRMWARE_POLARIS10     "amdgpu/polaris10_uvd.bin"
 #define FIRMWARE_POLARIS11     "amdgpu/polaris11_uvd.bin"
+#define FIRMWARE_POLARIS12     "amdgpu/polaris12_uvd.bin"
 
 /**
  * amdgpu_uvd_cs_ctx - Command submission parser context
@@ -98,6 +99,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI);
 MODULE_FIRMWARE(FIRMWARE_STONEY);
 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
+MODULE_FIRMWARE(FIRMWARE_POLARIS12);
 
 static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
 
@@ -149,6 +151,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
        case CHIP_POLARIS11:
                fw_name = FIRMWARE_POLARIS11;
                break;
+       case CHIP_POLARIS12:
+               fw_name = FIRMWARE_POLARIS12;
+               break;
        default:
                return -EINVAL;
        }
index 69b66b9e7f57e4ffaab3f83d6c17fb3a7161df34..8fec802d3908c05459c1e0ca2737cd19450ce9a4 100644 (file)
@@ -52,6 +52,7 @@
 #define FIRMWARE_STONEY                "amdgpu/stoney_vce.bin"
 #define FIRMWARE_POLARIS10     "amdgpu/polaris10_vce.bin"
 #define FIRMWARE_POLARIS11         "amdgpu/polaris11_vce.bin"
+#define FIRMWARE_POLARIS12         "amdgpu/polaris12_vce.bin"
 
 #ifdef CONFIG_DRM_AMDGPU_CIK
 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -66,6 +67,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI);
 MODULE_FIRMWARE(FIRMWARE_STONEY);
 MODULE_FIRMWARE(FIRMWARE_POLARIS10);
 MODULE_FIRMWARE(FIRMWARE_POLARIS11);
+MODULE_FIRMWARE(FIRMWARE_POLARIS12);
 
 static void amdgpu_vce_idle_work_handler(struct work_struct *work);
 
@@ -121,6 +123,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
        case CHIP_POLARIS11:
                fw_name = FIRMWARE_POLARIS11;
                break;
+       case CHIP_POLARIS12:
+               fw_name = FIRMWARE_POLARIS12;
+               break;
 
        default:
                return -EINVAL;
index 84afaae97e659cb129779619495891e0b52875cf..d4452d8f76caa18d84936e4d5b644a12c2ea04df 100644 (file)
@@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                      int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v10_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v10_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v10_0_show_cursor(crtc);
@@ -2620,7 +2617,6 @@ unpin:
 static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v10_0_lock_cursor(crtc, true);
@@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
                dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                             amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v10_0_show_cursor(crtc);
 
                dce_v10_0_lock_cursor(crtc, false);
index 7a7fa96d2e4990d75c7a6a9c5bdd85f4aa79101b..1cf1d9d1aec1348e08b2c83add375d7616be7b98 100644 (file)
@@ -167,6 +167,7 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
                                                 (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                amdgpu_program_register_sequence(adev,
                                                 polaris11_golden_settings_a11,
                                                 (const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
@@ -608,6 +609,7 @@ static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
                num_crtc = 6;
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                num_crtc = 5;
                break;
        default:
@@ -1589,6 +1591,7 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
                adev->mode_info.audio.num_pins = 8;
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                adev->mode_info.audio.num_pins = 6;
                break;
        default:
@@ -2388,7 +2391,8 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
        int pll;
 
        if ((adev->asic_type == CHIP_POLARIS10) ||
-           (adev->asic_type == CHIP_POLARIS11)) {
+           (adev->asic_type == CHIP_POLARIS11) ||
+           (adev->asic_type == CHIP_POLARIS12)) {
                struct amdgpu_encoder *amdgpu_encoder =
                        to_amdgpu_encoder(amdgpu_crtc->encoder);
                struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -2528,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -2553,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                      int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -2594,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v11_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -2603,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v11_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v11_0_show_cursor(crtc);
@@ -2636,7 +2637,6 @@ unpin:
 static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v11_0_lock_cursor(crtc, true);
@@ -2644,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
                dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                             amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v11_0_show_cursor(crtc);
 
                dce_v11_0_lock_cursor(crtc, false);
@@ -2822,7 +2818,8 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
                return -EINVAL;
 
        if ((adev->asic_type == CHIP_POLARIS10) ||
-           (adev->asic_type == CHIP_POLARIS11)) {
+           (adev->asic_type == CHIP_POLARIS11) ||
+           (adev->asic_type == CHIP_POLARIS12)) {
                struct amdgpu_encoder *amdgpu_encoder =
                        to_amdgpu_encoder(amdgpu_crtc->encoder);
                int encoder_mode =
@@ -2992,6 +2989,7 @@ static int dce_v11_0_early_init(void *handle)
                adev->mode_info.num_dig = 6;
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                adev->mode_info.num_hpd = 5;
                adev->mode_info.num_dig = 5;
                break;
@@ -3101,7 +3099,8 @@ static int dce_v11_0_hw_init(void *handle)
        amdgpu_atombios_crtc_powergate_init(adev);
        amdgpu_atombios_encoder_init_dig(adev);
        if ((adev->asic_type == CHIP_POLARIS10) ||
-           (adev->asic_type == CHIP_POLARIS11)) {
+           (adev->asic_type == CHIP_POLARIS11) ||
+           (adev->asic_type == CHIP_POLARIS12)) {
                amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
                                                   DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
                amdgpu_atombios_crtc_set_dce_clock(adev, 0,
index 44f024c9b9aa008ec5fb55043fe00c817199cf4a..809aa94a0cc1f936a02c950898d840d2a6bd13a9 100644 (file)
@@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
        struct amdgpu_device *adev = crtc->dev->dev_private;
        int xorigin = 0, yorigin = 0;
 
+       int w = amdgpu_crtc->cursor_width;
+
        amdgpu_crtc->cursor_x = x;
        amdgpu_crtc->cursor_y = y;
 
@@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                     int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v6_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v6_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v6_0_show_cursor(crtc);
@@ -1986,7 +1985,6 @@ unpin:
 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v6_0_lock_cursor(crtc, true);
@@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
                dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                            amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v6_0_show_cursor(crtc);
                dce_v6_0_lock_cursor(crtc, false);
        }
index 30945fe55ac7aaa2b4b18818b1db9c60c7614518..d2590d75aa1129d0b790b753da6e2c8163aaeccd 100644 (file)
@@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                     int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v8_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v8_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v8_0_show_cursor(crtc);
@@ -2471,7 +2468,6 @@ unpin:
 static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v8_0_lock_cursor(crtc, true);
@@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
                dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                            amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v8_0_show_cursor(crtc);
 
                dce_v8_0_lock_cursor(crtc, false);
index d0ec00986f3826c32957ab432c677c53c24091c2..373374164bd59edc0970e3f73a77ca31b17652e7 100644 (file)
@@ -139,6 +139,13 @@ MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
 
+MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
+
 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
 {
        {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
@@ -689,6 +696,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
                                                 (const u32)ARRAY_SIZE(tonga_golden_common_all));
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                amdgpu_program_register_sequence(adev,
                                                 golden_settings_polaris11_a11,
                                                 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -903,6 +911,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
        case CHIP_POLARIS10:
                chip_name = "polaris10";
                break;
+       case CHIP_POLARIS12:
+               chip_name = "polaris12";
+               break;
        case CHIP_STONEY:
                chip_name = "stoney";
                break;
@@ -1768,6 +1779,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
                gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                ret = amdgpu_atombios_get_gfx_info(adev);
                if (ret)
                        return ret;
@@ -2682,6 +2694,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
 
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
                                PIPE_CONFIG(ADDR_SURF_P4_16x16) |
                                TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
@@ -3503,6 +3516,7 @@ gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
                *rconf1 |= 0x0;
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                *rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
                          SE_XSEL(1) | SE_YSEL(1);
                *rconf1 |= 0x0;
@@ -4021,7 +4035,8 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
                        cz_enable_cp_power_gating(adev, true);
                else
                        cz_enable_cp_power_gating(adev, false);
-       } else if (adev->asic_type == CHIP_POLARIS11) {
+       } else if ((adev->asic_type == CHIP_POLARIS11) ||
+                  (adev->asic_type == CHIP_POLARIS12)) {
                gfx_v8_0_init_csb(adev);
                gfx_v8_0_init_save_restore_list(adev);
                gfx_v8_0_enable_save_restore_machine(adev);
@@ -4095,7 +4110,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
                 RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
        WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
        if (adev->asic_type == CHIP_POLARIS11 ||
-           adev->asic_type == CHIP_POLARIS10) {
+           adev->asic_type == CHIP_POLARIS10 ||
+           adev->asic_type == CHIP_POLARIS12) {
                tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
                tmp &= ~0x3;
                WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
@@ -4283,6 +4299,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
                amdgpu_ring_write(ring, 0x0000002A);
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                amdgpu_ring_write(ring, 0x16000012);
                amdgpu_ring_write(ring, 0x00000000);
                break;
@@ -4664,7 +4681,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
                            (adev->asic_type == CHIP_FIJI) ||
                            (adev->asic_type == CHIP_STONEY) ||
                            (adev->asic_type == CHIP_POLARIS11) ||
-                           (adev->asic_type == CHIP_POLARIS10)) {
+                           (adev->asic_type == CHIP_POLARIS10) ||
+                           (adev->asic_type == CHIP_POLARIS12)) {
                                WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
                                       AMDGPU_DOORBELL_KIQ << 2);
                                WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
@@ -4700,7 +4718,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
                mqd->cp_hqd_persistent_state = tmp;
                if (adev->asic_type == CHIP_STONEY ||
                        adev->asic_type == CHIP_POLARIS11 ||
-                       adev->asic_type == CHIP_POLARIS10) {
+                       adev->asic_type == CHIP_POLARIS10 ||
+                       adev->asic_type == CHIP_POLARIS12) {
                        tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL);
                        tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1);
                        WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp);
@@ -5279,7 +5298,8 @@ static int gfx_v8_0_late_init(void *handle)
 static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
                                                       bool enable)
 {
-       if (adev->asic_type == CHIP_POLARIS11)
+       if ((adev->asic_type == CHIP_POLARIS11) ||
+           (adev->asic_type == CHIP_POLARIS12))
                /* Send msg to SMU via Powerplay */
                amdgpu_set_powergating_state(adev,
                                             AMD_IP_BLOCK_TYPE_SMC,
@@ -5353,6 +5373,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
                        gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
                        gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
                else
index 45a573e63d4ae9778cff0b022ef53e22163e9a7b..e2b0b1646f995fd94d12e17a8cb1258bec34061f 100644 (file)
@@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin");
 MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
 MODULE_FIRMWARE("radeon/verde_mc.bin");
 MODULE_FIRMWARE("radeon/oland_mc.bin");
+MODULE_FIRMWARE("radeon/si58_mc.bin");
 
 #define MC_SEQ_MISC0__MT__MASK   0xf0000000
 #define MC_SEQ_MISC0__MT__GDDR1  0x10000000
@@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
        const char *chip_name;
        char fw_name[30];
        int err;
+       bool is_58_fw = false;
 
        DRM_DEBUG("\n");
 
@@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
        default: BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+       /* this memory configuration requires special firmware */
+       if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+               is_58_fw = true;
+
+       if (is_58_fw)
+               snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+       else
+               snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
        err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
        if (err)
                goto out;
@@ -463,19 +472,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
        WREG32(mmVM_CONTEXT1_CNTL,
               VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
               (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
-              ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) |
-              VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
+              ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
+       if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
+               gmc_v6_0_set_fault_enable_default(adev, false);
+       else
+               gmc_v6_0_set_fault_enable_default(adev, true);
 
        gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
        dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -754,7 +755,10 @@ static int gmc_v6_0_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+       if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+               return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+       else
+               return 0;
 }
 
 static int gmc_v6_0_sw_init(void *handle)
index 0daac3a5be79572e65a2fff812453c2bd541940c..476bc9f1954b9daee41d154a94b17ee0896e59ed 100644 (file)
@@ -46,6 +46,7 @@ static int gmc_v8_0_wait_for_idle(void *handle);
 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
 
 static const u32 golden_settings_tonga_a11[] =
 {
@@ -130,6 +131,7 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
                                                 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                amdgpu_program_register_sequence(adev,
                                                 golden_settings_polaris11_a11,
                                                 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -225,6 +227,9 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
        case CHIP_POLARIS10:
                chip_name = "polaris10";
                break;
+       case CHIP_POLARIS12:
+               chip_name = "polaris12";
+               break;
        case CHIP_FIJI:
        case CHIP_CARRIZO:
        case CHIP_STONEY:
index 1170a64a3184f6788c58cf1f6038bacaf45c4e62..034ace79ed492951d1ed06b9dc1543419b1948bf 100644 (file)
@@ -60,6 +60,8 @@ MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
 
 
 static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
@@ -206,6 +208,7 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
                                                 (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
                break;
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                amdgpu_program_register_sequence(adev,
                                                 golden_settings_polaris11_a11,
                                                 (const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -278,6 +281,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
        case CHIP_POLARIS10:
                chip_name = "polaris10";
                break;
+       case CHIP_POLARIS12:
+               chip_name = "polaris12";
+               break;
        case CHIP_CARRIZO:
                chip_name = "carrizo";
                break;
index 6c65a1a2de798b5630d6b356065a32fe578608c5..6e150db8f380417870cea60283ecacaf1c14fa7d 100644 (file)
@@ -56,7 +56,6 @@
 #define BIOS_SCRATCH_4                                    0x5cd
 
 MODULE_FIRMWARE("radeon/tahiti_smc.bin");
-MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
 MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
 MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
 MODULE_FIRMWARE("radeon/verde_smc.bin");
@@ -65,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin");
 MODULE_FIRMWARE("radeon/oland_k_smc.bin");
 MODULE_FIRMWARE("radeon/hainan_smc.bin");
 MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
 
 union power_info {
        struct _ATOM_POWERPLAY_INFO info;
@@ -3488,30 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                    (adev->pdev->device == 0x6817) ||
                    (adev->pdev->device == 0x6806))
                        max_mclk = 120000;
-       } else if (adev->asic_type == CHIP_VERDE) {
-               if ((adev->pdev->revision == 0x81) ||
-                   (adev->pdev->revision == 0x83) ||
-                   (adev->pdev->revision == 0x87) ||
-                   (adev->pdev->device == 0x6820) ||
-                   (adev->pdev->device == 0x6821) ||
-                   (adev->pdev->device == 0x6822) ||
-                   (adev->pdev->device == 0x6823) ||
-                   (adev->pdev->device == 0x682A) ||
-                   (adev->pdev->device == 0x682B)) {
-                       max_sclk = 75000;
-                       max_mclk = 80000;
-               }
-       } else if (adev->asic_type == CHIP_OLAND) {
-               if ((adev->pdev->revision == 0xC7) ||
-                   (adev->pdev->revision == 0x80) ||
-                   (adev->pdev->revision == 0x81) ||
-                   (adev->pdev->revision == 0x83) ||
-                   (adev->pdev->revision == 0x87) ||
-                   (adev->pdev->device == 0x6604) ||
-                   (adev->pdev->device == 0x6605)) {
-                       max_sclk = 75000;
-                       max_mclk = 80000;
-               }
        } else if (adev->asic_type == CHIP_HAINAN) {
                if ((adev->pdev->revision == 0x81) ||
                    (adev->pdev->revision == 0x83) ||
@@ -3520,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                    (adev->pdev->device == 0x6665) ||
                    (adev->pdev->device == 0x6667)) {
                        max_sclk = 75000;
-                       max_mclk = 80000;
                }
        }
        /* Apply dpm quirks */
@@ -7687,50 +7662,51 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
                chip_name = "tahiti";
                break;
        case CHIP_PITCAIRN:
-               if ((adev->pdev->revision == 0x81) ||
-                   (adev->pdev->device == 0x6810) ||
-                   (adev->pdev->device == 0x6811) ||
-                   (adev->pdev->device == 0x6816) ||
-                   (adev->pdev->device == 0x6817) ||
-                   (adev->pdev->device == 0x6806))
+               if ((adev->pdev->revision == 0x81) &&
+                   ((adev->pdev->device == 0x6810) ||
+                   (adev->pdev->device == 0x6811)))
                        chip_name = "pitcairn_k";
                else
                        chip_name = "pitcairn";
                break;
        case CHIP_VERDE:
-               if ((adev->pdev->revision == 0x81) ||
-                   (adev->pdev->revision == 0x83) ||
-                   (adev->pdev->revision == 0x87) ||
-                   (adev->pdev->device == 0x6820) ||
-                   (adev->pdev->device == 0x6821) ||
-                   (adev->pdev->device == 0x6822) ||
-                   (adev->pdev->device == 0x6823) ||
-                   (adev->pdev->device == 0x682A) ||
-                   (adev->pdev->device == 0x682B))
+               if (((adev->pdev->device == 0x6820) &&
+                       ((adev->pdev->revision == 0x81) ||
+                       (adev->pdev->revision == 0x83))) ||
+                   ((adev->pdev->device == 0x6821) &&
+                       ((adev->pdev->revision == 0x83) ||
+                       (adev->pdev->revision == 0x87))) ||
+                   ((adev->pdev->revision == 0x87) &&
+                       ((adev->pdev->device == 0x6823) ||
+                       (adev->pdev->device == 0x682b))))
                        chip_name = "verde_k";
                else
                        chip_name = "verde";
                break;
        case CHIP_OLAND:
-               if ((adev->pdev->revision == 0xC7) ||
-                   (adev->pdev->revision == 0x80) ||
-                   (adev->pdev->revision == 0x81) ||
-                   (adev->pdev->revision == 0x83) ||
-                   (adev->pdev->revision == 0x87) ||
-                   (adev->pdev->device == 0x6604) ||
-                   (adev->pdev->device == 0x6605))
+               if (((adev->pdev->revision == 0x81) &&
+                       ((adev->pdev->device == 0x6600) ||
+                       (adev->pdev->device == 0x6604) ||
+                       (adev->pdev->device == 0x6605) ||
+                       (adev->pdev->device == 0x6610))) ||
+                   ((adev->pdev->revision == 0x83) &&
+                       (adev->pdev->device == 0x6610)))
                        chip_name = "oland_k";
                else
                        chip_name = "oland";
                break;
        case CHIP_HAINAN:
-               if ((adev->pdev->revision == 0x81) ||
-                   (adev->pdev->revision == 0x83) ||
-                   (adev->pdev->revision == 0xC3) ||
-                   (adev->pdev->device == 0x6664) ||
-                   (adev->pdev->device == 0x6665) ||
-                   (adev->pdev->device == 0x6667))
+               if (((adev->pdev->revision == 0x81) &&
+                       (adev->pdev->device == 0x6660)) ||
+                   ((adev->pdev->revision == 0x83) &&
+                       ((adev->pdev->device == 0x6660) ||
+                       (adev->pdev->device == 0x6663) ||
+                       (adev->pdev->device == 0x6665) ||
+                        (adev->pdev->device == 0x6667))))
                        chip_name = "hainan_k";
+               else if ((adev->pdev->revision == 0xc3) &&
+                        (adev->pdev->device == 0x6665))
+                       chip_name = "banks_k_2";
                else
                        chip_name = "hainan";
                break;
index 96444e4d862af3f011c3b3de8481aafe39529414..7fb9137dd89b1c2bc064c3ea516c243721b5dafe 100644 (file)
 #include "smu/smu_7_0_1_sh_mask.h"
 
 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
-static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
 static int uvd_v4_2_start(struct amdgpu_device *adev);
 static void uvd_v4_2_stop(struct amdgpu_device *adev);
 static int uvd_v4_2_set_clockgating_state(void *handle,
                                enum amd_clockgating_state state);
+static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
+                            bool sw_mode);
 /**
  * uvd_v4_2_ring_get_rptr - get read pointer
  *
@@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle)
 
        return r;
 }
-
+static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
+                                bool enable);
 /**
  * uvd_v4_2_hw_init - start and test UVD block
  *
@@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle)
        uint32_t tmp;
        int r;
 
-       uvd_v4_2_init_cg(adev);
-       uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
+       uvd_v4_2_enable_mgcg(adev, true);
        amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
        r = uvd_v4_2_start(adev);
        if (r)
@@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
        struct amdgpu_ring *ring = &adev->uvd.ring;
        uint32_t rb_bufsz;
        int i, j, r;
-
        /* disable byte swapping */
        u32 lmi_swap_cntl = 0;
        u32 mp_swap_cntl = 0;
 
+       WREG32(mmUVD_CGC_GATE, 0);
+       uvd_v4_2_set_dcm(adev, true);
+
        uvd_v4_2_mc_resume(adev);
 
        /* disable interupt */
@@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev)
 
        /* Unstall UMC and register bus */
        WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+       uvd_v4_2_set_dcm(adev, false);
 }
 
 /**
@@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
        WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
 }
 
-static void uvd_v4_2_init_cg(struct amdgpu_device *adev)
-{
-       bool hw_mode = true;
-
-       if (hw_mode) {
-               uvd_v4_2_set_dcm(adev, false);
-       } else {
-               u32 tmp = RREG32(mmUVD_CGC_CTRL);
-               tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
-               WREG32(mmUVD_CGC_CTRL, tmp);
-       }
-}
-
 static bool uvd_v4_2_is_idle(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
 static int uvd_v4_2_set_clockgating_state(void *handle,
                                          enum amd_clockgating_state state)
 {
-       bool gate = false;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
-               return 0;
-
-       if (state == AMD_CG_STATE_GATE)
-               gate = true;
-
-       uvd_v4_2_enable_mgcg(adev, gate);
-
        return 0;
 }
 
@@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle,
         */
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
-               return 0;
-
        if (state == AMD_PG_STATE_GATE) {
                uvd_v4_2_stop(adev);
                return 0;
index a79e283590fbe7fc9b627a097ccec81b99d1a66c..6de6becce74567d962eb498dcd33cbd1aedc62c6 100644 (file)
@@ -791,15 +791,10 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
-       static int curstate = -1;
 
        if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
                return 0;
 
-       if (curstate == state)
-               return 0;
-
-       curstate = state;
        if (enable) {
                /* wait for STATUS to clear */
                if (uvd_v5_0_wait_for_idle(handle))
index 6b3293a1c7b8b92d9d47d780de7c883a0beac195..37ca685e5a9a9e358eaab6d32b9d5758fec90565 100644 (file)
 
 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT    0x04
 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK      0x10
+#define GRBM_GFX_INDEX__VCE_ALL_PIPE           0x07
+
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0        0x8616
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1        0x8617
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2        0x8618
+#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
+
 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK  0x02
 
 #define VCE_V3_0_FW_SIZE       (384 * 1024)
@@ -54,6 +58,9 @@
 
 #define FW_52_8_3      ((52 << 24) | (8 << 16) | (3 << 8))
 
+#define GET_VCE_INSTANCE(i)  ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
+                                       | GRBM_GFX_INDEX__VCE_ALL_PIPE)
+
 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
                WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
 
                data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
-               data &= ~0xffc00000;
+               data &= ~0x3ff;
                WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
 
                data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
@@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
-               WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
                vce_v3_0_mc_resume(adev, idx);
                WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
 
@@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
                }
        }
 
-       WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
@@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
-               WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
 
                if (adev->asic_type >= CHIP_STONEY)
                        WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
@@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
                        vce_v3_0_set_vce_sw_clock_gating(adev, false);
        }
 
-       WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
@@ -320,11 +327,12 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
 {
        u32 tmp;
 
-       /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */
+       /* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
        if ((adev->asic_type == CHIP_FIJI) ||
            (adev->asic_type == CHIP_STONEY) ||
            (adev->asic_type == CHIP_POLARIS10) ||
-           (adev->asic_type == CHIP_POLARIS11))
+           (adev->asic_type == CHIP_POLARIS11) ||
+           (adev->asic_type == CHIP_POLARIS12))
                return AMDGPU_VCE_HARVEST_VCE1;
 
        /* Tonga and CZ are dual or single pipe */
@@ -585,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle)
         * VCE team suggest use bit 3--bit 6 for busy status check
         */
        mutex_lock(&adev->grbm_idx_mutex);
-       WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+       WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
        if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
        }
-       WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
+       WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
        if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
        }
-       WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+       WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
        mutex_unlock(&adev->grbm_idx_mutex);
 
        if (srbm_soft_reset) {
@@ -733,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
                if (adev->vce.harvest_config & (1 << i))
                        continue;
 
-               WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
 
                if (enable) {
                        /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -752,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
                vce_v3_0_set_vce_sw_clock_gating(adev, enable);
        }
 
-       WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
index bf088d6d9bf1f96430d7afda1ec3fbeae6e82768..c2ac54f1134179457db3823eb64161817ee8f5b3 100644 (file)
@@ -88,6 +88,7 @@ MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
 MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
 
 /*
  * Indirect registers accessor
@@ -312,6 +313,7 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
                break;
        case CHIP_POLARIS11:
        case CHIP_POLARIS10:
+       case CHIP_POLARIS12:
        default:
                break;
        }
@@ -671,6 +673,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
        case CHIP_TONGA:
        case CHIP_POLARIS11:
        case CHIP_POLARIS10:
+       case CHIP_POLARIS12:
        case CHIP_CARRIZO:
        case CHIP_STONEY:
                asic_register_table = cz_allowed_read_registers;
@@ -994,6 +997,11 @@ static int vi_common_early_init(void *handle)
                adev->pg_flags = 0;
                adev->external_rev_id = adev->rev_id + 0x50;
                break;
+       case CHIP_POLARIS12:
+               adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
+               adev->pg_flags = 0;
+               adev->external_rev_id = adev->rev_id + 0x64;
+               break;
        case CHIP_CARRIZO:
                adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
                        AMD_CG_SUPPORT_GFX_MGCG |
@@ -1346,6 +1354,7 @@ static int vi_common_set_clockgating_state(void *handle,
        case CHIP_TONGA:
        case CHIP_POLARIS10:
        case CHIP_POLARIS11:
+       case CHIP_POLARIS12:
                vi_common_set_clockgating_state_by_smu(adev, state);
        default:
                break;
@@ -1429,6 +1438,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
                break;
        case CHIP_POLARIS11:
        case CHIP_POLARIS10:
+       case CHIP_POLARIS12:
                amdgpu_ip_block_add(adev, &vi_common_ip_block);
                amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
                amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
index ee3e04e10dae3380c09c715673dfb79773746e73..6316aad43a737cbeb8595c0f8f76fdb2450045b3 100644 (file)
@@ -486,7 +486,7 @@ static int kfd_ioctl_dbg_register(struct file *filep,
        return status;
 }
 
-static int kfd_ioctl_dbg_unrgesiter(struct file *filep,
+static int kfd_ioctl_dbg_unregister(struct file *filep,
                                struct kfd_process *p, void *data)
 {
        struct kfd_ioctl_dbg_unregister_args *args = data;
@@ -498,7 +498,7 @@ static int kfd_ioctl_dbg_unrgesiter(struct file *filep,
                return -EINVAL;
 
        if (dev->device_info->asic_family == CHIP_CARRIZO) {
-               pr_debug("kfd_ioctl_dbg_unrgesiter not supported on CZ\n");
+               pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n");
                return -EINVAL;
        }
 
@@ -892,7 +892,7 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
                        kfd_ioctl_dbg_register, 0),
 
        AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_UNREGISTER,
-                       kfd_ioctl_dbg_unrgesiter, 0),
+                       kfd_ioctl_dbg_unregister, 0),
 
        AMDKFD_IOCTL_DEF(AMDKFD_IOC_DBG_ADDRESS_WATCH,
                        kfd_ioctl_dbg_address_watch, 0),
index a6a4b2b1c0d901bb6d59497f5595355490d3f74e..6a3470f849989a7bf77ccaff862ad792daa25408 100644 (file)
@@ -739,8 +739,10 @@ int kfd_wait_on_events(struct kfd_process *p,
                struct kfd_event_data event_data;
 
                if (copy_from_user(&event_data, &events[i],
-                               sizeof(struct kfd_event_data)))
+                               sizeof(struct kfd_event_data))) {
+                       ret = -EFAULT;
                        goto fail;
+               }
 
                ret = init_event_waiter(p, &event_waiters[i],
                                event_data.event_id, i);
index c02469ada9f131f417e2a4fdb1d0c6b1fac4eafa..85f358764bbc8f2475e0bbf36df5013312354cab 100644 (file)
@@ -23,7 +23,7 @@
 #ifndef __AMD_SHARED_H__
 #define __AMD_SHARED_H__
 
-#define AMD_MAX_USEC_TIMEOUT           100000  /* 100 ms */
+#define AMD_MAX_USEC_TIMEOUT           200000  /* 200 ms */
 
 /*
  * Supported ASIC types
@@ -46,6 +46,7 @@ enum amd_asic_type {
        CHIP_STONEY,
        CHIP_POLARIS10,
        CHIP_POLARIS11,
+       CHIP_POLARIS12,
        CHIP_LAST,
 };
 
index b0c63c5f54c9ca95bc29eb069191455d1b56c0cd..6bb79c94cb9ffb5d7bec4fb104f9e32b7c3b3046 100644 (file)
@@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
                                cgs_set_clockgating_state(
                                                        hwmgr->device,
                                                        AMD_IP_BLOCK_TYPE_VCE,
-                                                       AMD_CG_STATE_UNGATE);
+                                                       AMD_CG_STATE_GATE);
                                cgs_set_powergating_state(
                                                        hwmgr->device,
                                                        AMD_IP_BLOCK_TYPE_VCE,
@@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
                                cgs_set_clockgating_state(
                                                        hwmgr->device,
                                                        AMD_IP_BLOCK_TYPE_VCE,
-                                                       AMD_PG_STATE_GATE);
+                                                       AMD_PG_STATE_UNGATE);
                                cz_dpm_update_vce_dpm(hwmgr);
                                cz_enable_disable_vce_dpm(hwmgr, true);
                                return 0;
index 4b14f259a147039e8e0eacc92da77f6266a2571e..0fb4e8c8f5e13866120de7325dd801c59f1d940f 100644 (file)
@@ -1402,14 +1402,22 @@ int  cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
                                             cz_hwmgr->vce_dpm.hard_min_clk,
                                                PPSMC_MSG_SetEclkHardMin));
        } else {
-               /*EPR# 419220 -HW limitation to to */
-               cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
-               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                           PPSMC_MSG_SetEclkHardMin,
-                                           cz_get_eclk_level(hwmgr,
-                                    cz_hwmgr->vce_dpm.hard_min_clk,
-                                         PPSMC_MSG_SetEclkHardMin));
-
+               /*Program HardMin based on the vce_arbiter.ecclk */
+               if (hwmgr->vce_arbiter.ecclk == 0) {
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                           PPSMC_MSG_SetEclkHardMin, 0);
+               /* disable ECLK DPM 0. Otherwise VCE could hang if
+                * switching SCLK from DPM 0 to 6/7 */
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                       PPSMC_MSG_SetEclkSoftMin, 1);
+               } else {
+                       cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                               PPSMC_MSG_SetEclkHardMin,
+                                               cz_get_eclk_level(hwmgr,
+                                               cz_hwmgr->vce_dpm.hard_min_clk,
+                                               PPSMC_MSG_SetEclkHardMin));
+               }
        }
        return 0;
 }
index dc6700aee18f624266a47d773abfb20bea573e03..b03606405a53399ea1a6755fecc637fa9bc09b23 100644 (file)
@@ -95,6 +95,7 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
                        break;
                case CHIP_POLARIS11:
                case CHIP_POLARIS10:
+               case CHIP_POLARIS12:
                        polaris_set_asic_special_caps(hwmgr);
                        hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
                        break;
@@ -745,7 +746,7 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
        phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                PHM_PlatformCaps_TablelessHardwareInterface);
 
-       if (hwmgr->chip_id == CHIP_POLARIS11)
+       if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12))
                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                        PHM_PlatformCaps_SPLLShutdownSupport);
        return 0;
index 26477f0f09dcc7045db5626abb7a9da8b4a2b3cc..6cd1287a7a8fda1cfe38482cd47899223496b2fd 100644 (file)
@@ -521,7 +521,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
                                PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
                                result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
                                PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
-                       } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+                       } else if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12)) {
                                result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
                                PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
                                result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
index e5812aa456f3b1440e525f0592d299672db14759..6e618aa20719d27f56b05db5ed2a76b7b7829c97 100644 (file)
@@ -65,6 +65,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
                        break;
                case CHIP_POLARIS11:
                case CHIP_POLARIS10:
+               case CHIP_POLARIS12:
                        polaris10_smum_init(smumgr);
                        break;
                default:
index 32f746e313790499e031e9709765f35ebc28b27e..99fb0ab391919846201396c9d35c44caf6e378de 100644 (file)
@@ -22,7 +22,6 @@
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_of.h>
@@ -256,6 +255,60 @@ static const struct of_device_id  malidp_drm_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, malidp_drm_of_match);
 
+static bool malidp_is_compatible_hw_id(struct malidp_hw_device *hwdev,
+                                      const struct of_device_id *dev_id)
+{
+       u32 core_id;
+       const char *compatstr_dp500 = "arm,mali-dp500";
+       bool is_dp500;
+       bool dt_is_dp500;
+
+       /*
+        * The DP500 CORE_ID register is in a different location, so check it
+        * first. If the product id field matches, then this is DP500, otherwise
+        * check the DP550/650 CORE_ID register.
+        */
+       core_id = malidp_hw_read(hwdev, MALIDP500_DC_BASE + MALIDP_DE_CORE_ID);
+       /* Offset 0x18 will never read 0x500 on products other than DP500. */
+       is_dp500 = (MALIDP_PRODUCT_ID(core_id) == 0x500);
+       dt_is_dp500 = strnstr(dev_id->compatible, compatstr_dp500,
+                             sizeof(dev_id->compatible)) != NULL;
+       if (is_dp500 != dt_is_dp500) {
+               DRM_ERROR("Device-tree expects %s, but hardware %s DP500.\n",
+                         dev_id->compatible, is_dp500 ? "is" : "is not");
+               return false;
+       } else if (!dt_is_dp500) {
+               u16 product_id;
+               char buf[32];
+
+               core_id = malidp_hw_read(hwdev,
+                                        MALIDP550_DC_BASE + MALIDP_DE_CORE_ID);
+               product_id = MALIDP_PRODUCT_ID(core_id);
+               snprintf(buf, sizeof(buf), "arm,mali-dp%X", product_id);
+               if (!strnstr(dev_id->compatible, buf,
+                            sizeof(dev_id->compatible))) {
+                       DRM_ERROR("Device-tree expects %s, but hardware is DP%03X.\n",
+                                 dev_id->compatible, product_id);
+                       return false;
+               }
+       }
+       return true;
+}
+
+static bool malidp_has_sufficient_address_space(const struct resource *res,
+                                               const struct of_device_id *dev_id)
+{
+       resource_size_t res_size = resource_size(res);
+       const char *compatstr_dp500 = "arm,mali-dp500";
+
+       if (!strnstr(dev_id->compatible, compatstr_dp500,
+                    sizeof(dev_id->compatible)))
+               return res_size >= MALIDP550_ADDR_SPACE_SIZE;
+       else if (res_size < MALIDP500_ADDR_SPACE_SIZE)
+               return false;
+       return true;
+}
+
 #define MAX_OUTPUT_CHANNELS    3
 
 static int malidp_bind(struct device *dev)
@@ -266,6 +319,7 @@ static int malidp_bind(struct device *dev)
        struct malidp_drm *malidp;
        struct malidp_hw_device *hwdev;
        struct platform_device *pdev = to_platform_device(dev);
+       struct of_device_id const *dev_id;
        /* number of lines for the R, G and B output */
        u8 output_width[MAX_OUTPUT_CHANNELS];
        int ret = 0, i;
@@ -286,7 +340,6 @@ static int malidp_bind(struct device *dev)
        memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
        malidp->dev = hwdev;
 
-       INIT_LIST_HEAD(&malidp->event_list);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        hwdev->regs = devm_ioremap_resource(dev, res);
@@ -329,6 +382,23 @@ static int malidp_bind(struct device *dev)
        clk_prepare_enable(hwdev->aclk);
        clk_prepare_enable(hwdev->mclk);
 
+       dev_id = of_match_device(malidp_drm_of_match, dev);
+       if (!dev_id) {
+               ret = -EINVAL;
+               goto query_hw_fail;
+       }
+
+       if (!malidp_has_sufficient_address_space(res, dev_id)) {
+               DRM_ERROR("Insufficient address space in device-tree.\n");
+               ret = -EINVAL;
+               goto query_hw_fail;
+       }
+
+       if (!malidp_is_compatible_hw_id(hwdev, dev_id)) {
+               ret = -EINVAL;
+               goto query_hw_fail;
+       }
+
        ret = hwdev->query_hw(hwdev);
        if (ret) {
                DRM_ERROR("Invalid HW configuration\n");
index 9fc8a2e405e4e7679efedd57b2ef39ddef1841ac..dbc617c6e4ef35006fb40148b83e6776fab1ef41 100644 (file)
 
 #include <linux/mutex.h>
 #include <linux/wait.h>
+#include <drm/drmP.h>
 #include "malidp_hw.h"
 
 struct malidp_drm {
        struct malidp_hw_device *dev;
        struct drm_fbdev_cma *fbdev;
-       struct list_head event_list;
        struct drm_crtc crtc;
        wait_queue_head_t wq;
        atomic_t config_valid;
index 4bdf531f78440fa8f124017dd4517c1aaf488333..488aedf5b58d54e7997b2339c75b7a90f30dcfc1 100644 (file)
@@ -21,7 +21,7 @@
 #include "malidp_drv.h"
 #include "malidp_hw.h"
 
-static const struct malidp_input_format malidp500_de_formats[] = {
+static const struct malidp_format_id malidp500_de_formats[] = {
        /*    fourcc,   layers supporting the format,     internal id  */
        { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  0 },
        { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2,  1 },
@@ -69,21 +69,21 @@ static const struct malidp_input_format malidp500_de_formats[] = {
        { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) },    \
        { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }
 
-static const struct malidp_input_format malidp550_de_formats[] = {
+static const struct malidp_format_id malidp550_de_formats[] = {
        MALIDP_COMMON_FORMATS,
 };
 
 static const struct malidp_layer malidp500_layers[] = {
-       { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE },
-       { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE },
-       { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE },
+       { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
+       { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE, MALIDP_DE_LG_STRIDE },
+       { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE, MALIDP_DE_LG_STRIDE },
 };
 
 static const struct malidp_layer malidp550_layers[] = {
-       { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE },
-       { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE },
-       { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE },
-       { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE },
+       { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
+       { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
+       { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
+       { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 },
 };
 
 #define MALIDP_DE_DEFAULT_PREFETCH_START       5
@@ -436,8 +436,8 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
                                .irq_mask = MALIDP500_DE_IRQ_CONF_VALID,
                                .vsync_irq = MALIDP500_DE_IRQ_CONF_VALID,
                        },
-                       .input_formats = malidp500_de_formats,
-                       .n_input_formats = ARRAY_SIZE(malidp500_de_formats),
+                       .pixel_formats = malidp500_de_formats,
+                       .n_pixel_formats = ARRAY_SIZE(malidp500_de_formats),
                        .bus_align_bytes = 8,
                },
                .query_hw = malidp500_query_hw,
@@ -447,6 +447,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
                .set_config_valid = malidp500_set_config_valid,
                .modeset = malidp500_modeset,
                .rotmem_required = malidp500_rotmem_required,
+               .features = MALIDP_DEVICE_LV_HAS_3_STRIDES,
        },
        [MALIDP_550] = {
                .map = {
@@ -469,8 +470,8 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
                                .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
                                .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
                        },
-                       .input_formats = malidp550_de_formats,
-                       .n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+                       .pixel_formats = malidp550_de_formats,
+                       .n_pixel_formats = ARRAY_SIZE(malidp550_de_formats),
                        .bus_align_bytes = 8,
                },
                .query_hw = malidp550_query_hw,
@@ -480,6 +481,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
                .set_config_valid = malidp550_set_config_valid,
                .modeset = malidp550_modeset,
                .rotmem_required = malidp550_rotmem_required,
+               .features = 0,
        },
        [MALIDP_650] = {
                .map = {
@@ -503,8 +505,8 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
                                .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
                                .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
                        },
-                       .input_formats = malidp550_de_formats,
-                       .n_input_formats = ARRAY_SIZE(malidp550_de_formats),
+                       .pixel_formats = malidp550_de_formats,
+                       .n_pixel_formats = ARRAY_SIZE(malidp550_de_formats),
                        .bus_align_bytes = 16,
                },
                .query_hw = malidp650_query_hw,
@@ -514,6 +516,7 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
                .set_config_valid = malidp550_set_config_valid,
                .modeset = malidp550_modeset,
                .rotmem_required = malidp550_rotmem_required,
+               .features = 0,
        },
 };
 
@@ -522,10 +525,10 @@ u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
 {
        unsigned int i;
 
-       for (i = 0; i < map->n_input_formats; i++) {
-               if (((map->input_formats[i].layer & layer_id) == layer_id) &&
-                   (map->input_formats[i].format == format))
-                       return map->input_formats[i].id;
+       for (i = 0; i < map->n_pixel_formats; i++) {
+               if (((map->pixel_formats[i].layer & layer_id) == layer_id) &&
+                   (map->pixel_formats[i].format == format))
+                       return map->pixel_formats[i].id;
        }
 
        return MALIDP_INVALID_FORMAT_ID;
index 087e1202db3d1201822def3693a1797a19a53c3f..00974b59407df59915c5751d5845228d040f91b7 100644 (file)
@@ -35,7 +35,7 @@ enum {
        DE_SMART = BIT(4),
 };
 
-struct malidp_input_format {
+struct malidp_format_id {
        u32 format;             /* DRM fourcc */
        u8 layer;               /* bitmask of layers supporting it */
        u8 id;                  /* used internally */
@@ -58,6 +58,7 @@ struct malidp_layer {
        u16 id;                 /* layer ID */
        u16 base;               /* address offset for the register bank */
        u16 ptr;                /* address offset for the pointer register */
+       u16 stride_offset;      /* Offset to the first stride register. */
 };
 
 /* regmap features */
@@ -85,14 +86,18 @@ struct malidp_hw_regmap {
        const struct malidp_irq_map se_irq_map;
        const struct malidp_irq_map dc_irq_map;
 
-       /* list of supported input formats for each layer */
-       const struct malidp_input_format *input_formats;
-       const u8 n_input_formats;
+       /* list of supported pixel formats for each layer */
+       const struct malidp_format_id *pixel_formats;
+       const u8 n_pixel_formats;
 
        /* pitch alignment requirement in bytes */
        const u8 bus_align_bytes;
 };
 
+/* device features */
+/* Unlike DP550/650, DP500 has 3 stride registers in its video layer. */
+#define MALIDP_DEVICE_LV_HAS_3_STRIDES BIT(0)
+
 struct malidp_hw_device {
        const struct malidp_hw_regmap map;
        void __iomem *regs;
index eff2fe47e26a71fad70c7c681de43dcc2fcfadd8..414aada10fe5e7d43392aa835b4c01aba594bcb7 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_gem_cma_helper.h>
@@ -36,7 +37,6 @@
 #define   LAYER_V_VAL(x)               (((x) & 0x1fff) << 16)
 #define MALIDP_LAYER_COMP_SIZE         0x010
 #define MALIDP_LAYER_OFFSET            0x014
-#define MALIDP_LAYER_STRIDE            0x018
 
 /*
  * This 4-entry look-up-table is used to determine the full 8-bit alpha value
@@ -67,13 +67,14 @@ drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
                return NULL;
 
        state = kmalloc(sizeof(*state), GFP_KERNEL);
-       if (state) {
-               m_state = to_malidp_plane_state(plane->state);
-               __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
-               state->rotmem_size = m_state->rotmem_size;
-               state->format = m_state->format;
-               state->n_planes = m_state->n_planes;
-       }
+       if (!state)
+               return NULL;
+
+       m_state = to_malidp_plane_state(plane->state);
+       __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+       state->rotmem_size = m_state->rotmem_size;
+       state->format = m_state->format;
+       state->n_planes = m_state->n_planes;
 
        return &state->base;
 }
@@ -102,8 +103,10 @@ static int malidp_de_plane_check(struct drm_plane *plane,
 {
        struct malidp_plane *mp = to_malidp_plane(plane);
        struct malidp_plane_state *ms = to_malidp_plane_state(state);
+       struct drm_crtc_state *crtc_state;
        struct drm_framebuffer *fb;
-       int i;
+       struct drm_rect clip = { 0 };
+       int i, ret;
        u32 src_w, src_h;
 
        if (!state->crtc || !state->fb)
@@ -131,8 +134,17 @@ static int malidp_de_plane_check(struct drm_plane *plane,
        if ((state->crtc_w > mp->hwdev->max_line_size) ||
            (state->crtc_h > mp->hwdev->max_line_size) ||
            (state->crtc_w < mp->hwdev->min_line_size) ||
-           (state->crtc_h < mp->hwdev->min_line_size) ||
-           (state->crtc_w != src_w) || (state->crtc_h != src_h))
+           (state->crtc_h < mp->hwdev->min_line_size))
+               return -EINVAL;
+
+       /*
+        * DP550/650 video layers can accept 3 plane formats only if
+        * fb->pitches[1] == fb->pitches[2] since they don't have a
+        * third plane stride register.
+        */
+       if (ms->n_planes == 3 &&
+           !(mp->hwdev->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
+           (state->fb->pitches[1] != state->fb->pitches[2]))
                return -EINVAL;
 
        /* packed RGB888 / BGR888 can't be rotated or flipped */
@@ -141,6 +153,16 @@ static int malidp_de_plane_check(struct drm_plane *plane,
             fb->format->format == DRM_FORMAT_BGR888))
                return -EINVAL;
 
+       crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+       clip.x2 = crtc_state->adjusted_mode.hdisplay;
+       clip.y2 = crtc_state->adjusted_mode.vdisplay;
+       ret = drm_plane_helper_check_state(state, &clip,
+                                          DRM_PLANE_HELPER_NO_SCALING,
+                                          DRM_PLANE_HELPER_NO_SCALING,
+                                          true, true);
+       if (ret)
+               return ret;
+
        ms->rotmem_size = 0;
        if (state->rotation & MALIDP_ROTATED_MASK) {
                int val;
@@ -157,6 +179,25 @@ static int malidp_de_plane_check(struct drm_plane *plane,
        return 0;
 }
 
+static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
+                                       int num_planes, unsigned int pitches[3])
+{
+       int i;
+       int num_strides = num_planes;
+
+       if (!mp->layer->stride_offset)
+               return;
+
+       if (num_planes == 3)
+               num_strides = (mp->hwdev->features &
+                              MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
+
+       for (i = 0; i < num_strides; ++i)
+               malidp_hw_write(mp->hwdev, pitches[i],
+                               mp->layer->base +
+                               mp->layer->stride_offset + i * 4);
+}
+
 static void malidp_de_plane_update(struct drm_plane *plane,
                                   struct drm_plane_state *old_state)
 {
@@ -174,13 +215,8 @@ static void malidp_de_plane_update(struct drm_plane *plane,
        /* convert src values from Q16 fixed point to integer */
        src_w = plane->state->src_w >> 16;
        src_h = plane->state->src_h >> 16;
-       if (plane->state->rotation & MALIDP_ROTATED_MASK) {
-               dest_w = plane->state->crtc_h;
-               dest_h = plane->state->crtc_w;
-       } else {
-               dest_w = plane->state->crtc_w;
-               dest_h = plane->state->crtc_h;
-       }
+       dest_w = plane->state->crtc_w;
+       dest_h = plane->state->crtc_h;
 
        malidp_hw_write(mp->hwdev, ms->format, mp->layer->base);
 
@@ -189,11 +225,12 @@ static void malidp_de_plane_update(struct drm_plane *plane,
                ptr = mp->layer->ptr + (i << 4);
 
                obj = drm_fb_cma_get_gem_obj(plane->state->fb, i);
+               obj->paddr += plane->state->fb->offsets[i];
                malidp_hw_write(mp->hwdev, lower_32_bits(obj->paddr), ptr);
                malidp_hw_write(mp->hwdev, upper_32_bits(obj->paddr), ptr + 4);
-               malidp_hw_write(mp->hwdev, plane->state->fb->pitches[i],
-                               mp->layer->base + MALIDP_LAYER_STRIDE);
        }
+       malidp_de_set_plane_pitches(mp, ms->n_planes,
+                                   plane->state->fb->pitches);
 
        malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
                        mp->layer->base + MALIDP_LAYER_SIZE);
@@ -211,11 +248,12 @@ static void malidp_de_plane_update(struct drm_plane *plane,
 
        /* setup the rotation and axis flip bits */
        if (plane->state->rotation & DRM_ROTATE_MASK)
-               val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET;
+               val |= ilog2(plane->state->rotation & DRM_ROTATE_MASK) <<
+                      LAYER_ROT_OFFSET;
        if (plane->state->rotation & DRM_REFLECT_X)
-               val |= LAYER_V_FLIP;
-       if (plane->state->rotation & DRM_REFLECT_Y)
                val |= LAYER_H_FLIP;
+       if (plane->state->rotation & DRM_REFLECT_Y)
+               val |= LAYER_V_FLIP;
 
        /*
         * always enable pixel alpha blending until we have a way to change
@@ -258,7 +296,7 @@ int malidp_de_planes_init(struct drm_device *drm)
        u32 *formats;
        int ret, i, j, n;
 
-       formats = kcalloc(map->n_input_formats, sizeof(*formats), GFP_KERNEL);
+       formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL);
        if (!formats) {
                ret = -ENOMEM;
                goto cleanup;
@@ -274,9 +312,9 @@ int malidp_de_planes_init(struct drm_device *drm)
                }
 
                /* build the list of DRM supported formats based on the map */
-               for (n = 0, j = 0;  j < map->n_input_formats; j++) {
-                       if ((map->input_formats[j].layer & id) == id)
-                               formats[n++] = map->input_formats[j].format;
+               for (n = 0, j = 0;  j < map->n_pixel_formats; j++) {
+                       if ((map->pixel_formats[j].layer & id) == id)
+                               formats[n++] = map->pixel_formats[j].format;
                }
 
                plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
index 73fecb38f9550b6cf7aa1e91cd310395ebf719ea..aff6d4a84e998c6cc1d01e3067d0f52712daa145 100644 (file)
 #define MALIDP_DE_SYNC_WIDTH           0x8
 #define MALIDP_DE_HV_ACTIVE            0xc
 
+/* Stride register offsets relative to Lx_BASE */
+#define MALIDP_DE_LG_STRIDE            0x18
+#define MALIDP_DE_LV_STRIDE0           0x18
+
 /* macros to set values into registers */
 #define MALIDP_DE_H_FRONTPORCH(x)      (((x) & 0xfff) << 0)
 #define MALIDP_DE_H_BACKPORCH(x)       (((x) & 0x3ff) << 16)
 #define MALIDP_DE_H_ACTIVE(x)          (((x) & 0x1fff) << 0)
 #define MALIDP_DE_V_ACTIVE(x)          (((x) & 0x1fff) << 16)
 
+#define MALIDP_PRODUCT_ID(__core_id) ((u32)(__core_id) >> 16)
+
 /* register offsets and bits specific to DP500 */
+#define MALIDP500_ADDR_SPACE_SIZE      0x01000
 #define MALIDP500_DC_BASE              0x00000
 #define MALIDP500_DC_CONTROL           0x0000c
 #define   MALIDP500_DC_CONFIG_REQ      (1 << 17)
 #define MALIDP500_CONFIG_ID            0x00fd4
 
 /* register offsets and bits specific to DP550/DP650 */
+#define MALIDP550_ADDR_SPACE_SIZE      0x10000
 #define MALIDP550_DE_CONTROL           0x00010
 #define MALIDP550_DE_LINE_COUNTER      0x00014
 #define MALIDP550_DE_AXI_CONTROL       0x00018
index cbd0070265c9e07ff1c0f5a77eb067e1cbb83869..0bf32d6ac39ba0ddce7b149520e4f5c84a0c1fc9 100644 (file)
@@ -431,15 +431,8 @@ static void atmel_hlcdc_fb_output_poll_changed(struct drm_device *dev)
 {
        struct atmel_hlcdc_dc *dc = dev->dev_private;
 
-       if (dc->fbdev) {
+       if (dc->fbdev)
                drm_fbdev_cma_hotplug_event(dc->fbdev);
-       } else {
-               dc->fbdev = drm_fbdev_cma_init(dev, 24,
-                               dev->mode_config.num_crtc,
-                               dev->mode_config.num_connector);
-               if (IS_ERR(dc->fbdev))
-                       dc->fbdev = NULL;
-       }
 }
 
 struct atmel_hlcdc_dc_commit {
@@ -653,10 +646,13 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
 
        platform_set_drvdata(pdev, dev);
 
-       drm_kms_helper_poll_init(dev);
+       dc->fbdev = drm_fbdev_cma_init(dev, 24,
+                       dev->mode_config.num_crtc,
+                       dev->mode_config.num_connector);
+       if (IS_ERR(dc->fbdev))
+               dc->fbdev = NULL;
 
-       /* force connectors detection */
-       drm_helper_hpd_irq_event(dev);
+       drm_kms_helper_poll_init(dev);
 
        return 0;
 
index 02b97bf64ee4ea962033ffd51714dc09600f289e..e7cd1056ff2d3b6cdc45ec4a98a94ea351248534 100644 (file)
@@ -1385,6 +1385,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
 
        pm_runtime_enable(dev);
 
+       pm_runtime_get_sync(dev);
        phy_power_on(dp->phy);
 
        analogix_dp_init_dp(dp);
@@ -1417,9 +1418,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
                goto err_disable_pm_runtime;
        }
 
+       phy_power_off(dp->phy);
+       pm_runtime_put(dev);
+
        return 0;
 
 err_disable_pm_runtime:
+
+       phy_power_off(dp->phy);
+       pm_runtime_put(dev);
        pm_runtime_disable(dev);
 
        return ret;
index ca3809851377ff92593e9c50cca327e76e143385..fc78c90ee9317635c66fb60ffb89bfb79d89e42a 100644 (file)
@@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU
         This is a KMS driver for emulated cirrus device in qemu.
         It is *NOT* intended for real cirrus devices. This requires
         the modesetting userspace X.org driver.
+
+        Cirrus is obsolete, the hardware was designed in the 90ies
+        and can't keep up with todays needs.  More background:
+        https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
+
+        Better alternatives are:
+          - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
+          - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
+          - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
index 43049882b811e93da0bc7b5f5beabcb8ed005667..9a08445a7a7ac61bd2d6d5cca9cea70fa7359773 100644 (file)
@@ -1232,8 +1232,10 @@ int drm_atomic_helper_commit(struct drm_device *dev,
 
        if (!nonblock) {
                ret = drm_atomic_helper_wait_for_fences(dev, state, true);
-               if (ret)
+               if (ret) {
+                       drm_atomic_helper_cleanup_planes(dev, state);
                        return ret;
+               }
        }
 
        /*
index 18c1b2cbfcdb5da260301553f32d9b680778bab2..6cbd67f4fbc5e70817a8b5c810f8ac2f6cef99e1 100644 (file)
@@ -598,6 +598,8 @@ static void drm_dev_release(struct kref *ref)
 {
        struct drm_device *dev = container_of(ref, struct drm_device, ref);
 
+       drm_vblank_cleanup(dev);
+
        if (drm_core_check_feature(dev, DRIVER_GEM))
                drm_gem_destroy(dev);
 
@@ -805,8 +807,6 @@ void drm_dev_unregister(struct drm_device *dev)
        if (dev->agp)
                drm_pci_agp_destroy(dev);
 
-       drm_vblank_cleanup(dev);
-
        list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
                drm_legacy_rmmap(dev, r_list->map);
 
index 30c716ca236b1fe2ff34b9bfa2af6468aa287ef8..a8616b1a8d22d3a4e0c03c9314416a6817a58dfb 100644 (file)
@@ -1480,6 +1480,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
                return NULL;
 
        mode->type |= DRM_MODE_TYPE_USERDEF;
+       /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
+       if (cmd->xres == 1366 && mode->hdisplay == 1368) {
+               mode->hdisplay = 1366;
+               mode->hsync_start--;
+               mode->hsync_end--;
+               drm_mode_set_name(mode);
+       }
        drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
        return mode;
 }
index 060211ac74a1c90a5142301426864c8497a9279f..93381454bdf7b92d6718f65b02c1a76bb674509b 100644 (file)
@@ -115,25 +115,28 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
 
 #define DRM_OUTPUT_POLL_PERIOD (10*HZ)
 /**
- * drm_kms_helper_poll_enable_locked - re-enable output polling.
+ * drm_kms_helper_poll_enable - re-enable output polling.
  * @dev: drm_device
  *
- * This function re-enables the output polling work without
- * locking the mode_config mutex.
+ * This function re-enables the output polling work, after it has been
+ * temporarily disabled using drm_kms_helper_poll_disable(), for example over
+ * suspend/resume.
  *
- * This is like drm_kms_helper_poll_enable() however it is to be
- * called from a context where the mode_config mutex is locked
- * already.
+ * Drivers can call this helper from their device resume implementation. It is
+ * an error to call this when the output polling support has not yet been set
+ * up.
+ *
+ * Note that calls to enable and disable polling must be strictly ordered, which
+ * is automatically the case when they're only call from suspend/resume
+ * callbacks.
  */
-void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
+void drm_kms_helper_poll_enable(struct drm_device *dev)
 {
        bool poll = false;
        struct drm_connector *connector;
        struct drm_connector_list_iter conn_iter;
        unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
 
-       WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
-
        if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
                return;
 
@@ -146,14 +149,24 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
        drm_connector_list_iter_put(&conn_iter);
 
        if (dev->mode_config.delayed_event) {
+               /*
+                * FIXME:
+                *
+                * Use short (1s) delay to handle the initial delayed event.
+                * This delay should not be needed, but Optimus/nouveau will
+                * fail in a mysterious way if the delayed event is handled as
+                * soon as possible like it is done in
+                * drm_helper_probe_single_connector_modes() in case the poll
+                * was enabled before.
+                */
                poll = true;
-               delay = 0;
+               delay = HZ;
        }
 
        if (poll)
                schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
 }
-EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
+EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
 static enum drm_connector_status
 drm_connector_detect(struct drm_connector *connector, bool force)
@@ -280,7 +293,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
 
        /* Re-enable polling in case the global poll config changed. */
        if (drm_kms_helper_poll != dev->mode_config.poll_running)
-               drm_kms_helper_poll_enable_locked(dev);
+               drm_kms_helper_poll_enable(dev);
 
        dev->mode_config.poll_running = drm_kms_helper_poll;
 
@@ -474,8 +487,12 @@ out:
  * This function disables the output polling work.
  *
  * Drivers can call this helper from their device suspend implementation. It is
- * not an error to call this even when output polling isn't enabled or arlready
- * disabled.
+ * not an error to call this even when output polling isn't enabled or already
+ * disabled. Polling is re-enabled by calling drm_kms_helper_poll_enable().
+ *
+ * Note that calls to enable and disable polling must be strictly ordered, which
+ * is automatically the case when they're only call from suspend/resume
+ * callbacks.
  */
 void drm_kms_helper_poll_disable(struct drm_device *dev)
 {
@@ -485,24 +502,6 @@ void drm_kms_helper_poll_disable(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_kms_helper_poll_disable);
 
-/**
- * drm_kms_helper_poll_enable - re-enable output polling.
- * @dev: drm_device
- *
- * This function re-enables the output polling work.
- *
- * Drivers can call this helper from their device resume implementation. It is
- * an error to call this when the output polling support has not yet been set
- * up.
- */
-void drm_kms_helper_poll_enable(struct drm_device *dev)
-{
-       mutex_lock(&dev->mode_config.mutex);
-       drm_kms_helper_poll_enable_locked(dev);
-       mutex_unlock(&dev->mode_config.mutex);
-}
-EXPORT_SYMBOL(drm_kms_helper_poll_enable);
-
 /**
  * drm_kms_helper_poll_init - initialize and enable output polling
  * @dev: drm_device
index ae2733a609bae3c8f4fff5772ccf755123d77e91..f503af462dadd715afcca7df677fb93a92050161 100644 (file)
@@ -117,9 +117,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
                struct list_head list;
                bool found;
 
+               /*
+                * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
+                * drm_mm into giving out a low IOVA after address space
+                * rollover. This needs a proper fix.
+                */
                ret = drm_mm_insert_node_in_range(&mmu->mm, node,
                        size, 0, mmu->last_iova, ~0UL,
-                       DRM_MM_SEARCH_DEFAULT);
+                       mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
 
                if (ret != -ENOSPC)
                        break;
index 31eafc546d6c06c8232bd0f363d207fd75188ba6..d69af00bdd6ab2332e9088402a0e25d90759c027 100644 (file)
@@ -46,7 +46,8 @@ enum decon_flag_bits {
        BIT_CLKS_ENABLED,
        BIT_IRQS_ENABLED,
        BIT_WIN_UPDATED,
-       BIT_SUSPENDED
+       BIT_SUSPENDED,
+       BIT_REQUEST_UPDATE
 };
 
 struct decon_context {
@@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
                m->crtc_vsync_end = m->crtc_vsync_start + 1;
        }
 
-       decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0);
-
-       /* enable clock gate */
-       val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
-       writel(val, ctx->addr + DECON_CMU);
-
        if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
                decon_setup_trigger(ctx);
 
@@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
 
        /* window enable */
        decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
+       set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
 }
 
 static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
                return;
 
        decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
+       set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
 }
 
 static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
        for (i = ctx->first_win; i < WINDOWS_NR; i++)
                decon_shadow_protect_win(ctx, i, false);
 
-       /* standalone update */
-       decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+       if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
+               decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
 
        if (ctx->out_type & IFTYPE_I80)
                set_bit(BIT_WIN_UPDATED, &ctx->flags);
index 5196509e71cf38aecdb7354fd220069537ec8a27..74ca2e8b2494aceec16ba449bf6c3b98acf77f9d 100644 (file)
@@ -56,7 +56,9 @@ i915-y += i915_cmd_parser.o \
 
 # general-purpose microcontroller (GuC) support
 i915-y += intel_uc.o \
+         intel_guc_log.o \
          intel_guc_loader.o \
+         intel_huc.o \
          i915_guc_submission.o
 
 # autogenerated null render state
index 7d33b607bc89dc22ee5f530b040af69e347414d5..7311aeab16f7ae4b1256cdc60e1e5d4f95acc474 100644 (file)
 #include "i915_drv.h"
 #include "gvt.h"
 
-#define MB_TO_BYTES(mb) ((mb) << 20ULL)
-#define BYTES_TO_MB(b) ((b) >> 20ULL)
-
-#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
-#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
-#define HOST_FENCE 4
-
 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
 {
        struct intel_gvt *gvt = vgpu->gvt;
        struct drm_i915_private *dev_priv = gvt->dev_priv;
-       u32 alloc_flag, search_flag;
+       unsigned int flags;
        u64 start, end, size;
        struct drm_mm_node *node;
-       int retried = 0;
        int ret;
 
        if (high_gm) {
-               search_flag = DRM_MM_SEARCH_BELOW;
-               alloc_flag = DRM_MM_CREATE_TOP;
                node = &vgpu->gm.high_gm_node;
                size = vgpu_hidden_sz(vgpu);
                start = gvt_hidden_gmadr_base(gvt);
                end = gvt_hidden_gmadr_end(gvt);
+               flags = PIN_HIGH;
        } else {
-               search_flag = DRM_MM_SEARCH_DEFAULT;
-               alloc_flag = DRM_MM_CREATE_DEFAULT;
                node = &vgpu->gm.low_gm_node;
                size = vgpu_aperture_sz(vgpu);
                start = gvt_aperture_gmadr_base(gvt);
                end = gvt_aperture_gmadr_end(gvt);
+               flags = PIN_MAPPABLE;
        }
 
        mutex_lock(&dev_priv->drm.struct_mutex);
-search_again:
-       ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
-                                                 node, size, 4096,
-                                                 I915_COLOR_UNEVICTABLE,
-                                                 start, end, search_flag,
-                                                 alloc_flag);
-       if (ret) {
-               ret = i915_gem_evict_something(&dev_priv->ggtt.base,
-                                              size, 4096,
-                                              I915_COLOR_UNEVICTABLE,
-                                              start, end, 0);
-               if (ret == 0 && ++retried < 3)
-                       goto search_again;
-
-               gvt_err("fail to alloc %s gm space from host, retried %d\n",
-                               high_gm ? "high" : "low", retried);
-       }
+       ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node,
+                                 size, 4096, I915_COLOR_UNEVICTABLE,
+                                 start, end, flags);
        mutex_unlock(&dev_priv->drm.struct_mutex);
+       if (ret)
+               gvt_err("fail to alloc %s gm space from host\n",
+                       high_gm ? "high" : "low");
+
        return ret;
 }
 
@@ -168,6 +148,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
        POSTING_READ(fence_reg_lo);
 }
 
+static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
+{
+       int i;
+
+       for (i = 0; i < vgpu_fence_sz(vgpu); i++)
+               intel_vgpu_write_fence(vgpu, i, 0);
+}
+
 static void free_vgpu_fence(struct intel_vgpu *vgpu)
 {
        struct intel_gvt *gvt = vgpu->gvt;
@@ -181,9 +169,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
        intel_runtime_pm_get(dev_priv);
 
        mutex_lock(&dev_priv->drm.struct_mutex);
+       _clear_vgpu_fence(vgpu);
        for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
                reg = vgpu->fence.regs[i];
-               intel_vgpu_write_fence(vgpu, i, 0);
                list_add_tail(&reg->link,
                              &dev_priv->mm.fence_list);
        }
@@ -211,13 +199,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
                        continue;
                list_del(pos);
                vgpu->fence.regs[i] = reg;
-               intel_vgpu_write_fence(vgpu, i, 0);
                if (++i == vgpu_fence_sz(vgpu))
                        break;
        }
        if (i != vgpu_fence_sz(vgpu))
                goto out_free_fence;
 
+       _clear_vgpu_fence(vgpu);
+
        mutex_unlock(&dev_priv->drm.struct_mutex);
        intel_runtime_pm_put(dev_priv);
        return 0;
@@ -316,6 +305,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
        free_resource(vgpu);
 }
 
+/**
+ * intel_vgpu_reset_resource - reset resource state owned by a vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is used to reset resource state owned by a vGPU.
+ *
+ */
+void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
+{
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+       intel_runtime_pm_get(dev_priv);
+       _clear_vgpu_fence(vgpu);
+       intel_runtime_pm_put(dev_priv);
+}
+
 /**
  * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
  * @vgpu: vGPU
index 711c31c8d8b46c3c51e4f93a741daecf4b28ce31..4a6a2ed65732e1fde39457148165274deda52db6 100644 (file)
@@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
        }
        return 0;
 }
+
+/**
+ * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
+ *
+ * @vgpu: a vGPU
+ * @primary: is the vGPU presented as primary
+ *
+ */
+void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+                              bool primary)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       const struct intel_gvt_device_info *info = &gvt->device_info;
+       u16 *gmch_ctl;
+       int i;
+
+       memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
+              info->cfg_space_size);
+
+       if (!primary) {
+               vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
+                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
+               vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
+                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
+       }
+
+       /* Show guest that there isn't any stolen memory.*/
+       gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
+       *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
+
+       intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
+                                gvt_aperture_pa_base(gvt), true);
+
+       vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
+                                            | PCI_COMMAND_MEMORY
+                                            | PCI_COMMAND_MASTER);
+       /*
+        * Clear the bar upper 32bit and let guest to assign the new value
+        */
+       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
+       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+       memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
+
+       for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
+               vgpu->cfg_space.bar[i].size = pci_resource_len(
+                                             gvt->dev_priv->drm.pdev, i * 2);
+               vgpu->cfg_space.bar[i].tracked = false;
+       }
+}
+
+/**
+ * intel_vgpu_reset_cfg_space - reset vGPU configuration space
+ *
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
+{
+       u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
+       bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
+                               INTEL_GVT_PCI_CLASS_VGA_OTHER;
+
+       if (cmd & PCI_COMMAND_MEMORY) {
+               trap_gttmmio(vgpu, false);
+               map_aperture(vgpu, false);
+       }
+
+       /**
+        * Currently we only do such reset when vGPU is not
+        * owned by any VM, so we simply restore entire cfg
+        * space to default value.
+        */
+       intel_vgpu_init_cfg_space(vgpu, primary);
+}
index 6c5fdf5b2ce2a9d407839a3a28a7e067a5630d8d..47dec4acf7ff12951eb592e2b115953e961f6bdf 100644 (file)
@@ -240,15 +240,8 @@ static inline int get_pse_type(int type)
 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
 {
        void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
-       u64 pte;
 
-#ifdef readq
-       pte = readq(addr);
-#else
-       pte = ioread32(addr);
-       pte |= (u64)ioread32(addr + 4) << 32;
-#endif
-       return pte;
+       return readq(addr);
 }
 
 static void write_pte64(struct drm_i915_private *dev_priv,
@@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv,
 {
        void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
 
-#ifdef writeq
        writeq(pte, addr);
-#else
-       iowrite32((u32)pte, addr);
-       iowrite32(pte >> 32, addr + 4);
-#endif
+
        I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
        POSTING_READ(GFX_FLSH_CNTL_GEN6);
 }
@@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
                        info->gtt_entry_size;
                mem = kzalloc(mm->has_shadow_page_table ?
                        mm->page_table_entry_size * 2
-                               : mm->page_table_entry_size,
-                       GFP_ATOMIC);
+                               : mm->page_table_entry_size, GFP_KERNEL);
                if (!mem)
                        return -ENOMEM;
                mm->virtual_page_table = mem;
@@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
        struct intel_vgpu_mm *mm;
        int ret;
 
-       mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
+       mm = kzalloc(sizeof(*mm), GFP_KERNEL);
        if (!mm) {
                ret = -ENOMEM;
                goto fail;
@@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
        struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
        int page_entry_num = GTT_PAGE_SIZE >>
                                vgpu->gvt->device_info.gtt_entry_size_shift;
-       struct page *scratch_pt;
+       void *scratch_pt;
        unsigned long mfn;
        int i;
-       void *p;
 
        if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
                return -EINVAL;
 
-       scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+       scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
        if (!scratch_pt) {
                gvt_err("fail to allocate scratch page\n");
                return -ENOMEM;
        }
 
-       p = kmap_atomic(scratch_pt);
-       mfn = intel_gvt_hypervisor_virt_to_mfn(p);
+       mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
        if (mfn == INTEL_GVT_INVALID_ADDR) {
-               gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
-               kunmap_atomic(p);
-               __free_page(scratch_pt);
+               gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
+               free_page((unsigned long)scratch_pt);
                return -EFAULT;
        }
        gtt->scratch_pt[type].page_mfn = mfn;
-       gtt->scratch_pt[type].page = scratch_pt;
+       gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
        gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
                        vgpu->id, type, mfn);
 
@@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
         * scratch_pt[type] indicate the scratch pt/scratch page used by the
         * 'type' pt.
         * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
-        * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
+        * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
         * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
         */
        if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
@@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
                        se.val64 |= PPAT_CACHED_INDEX;
 
                for (i = 0; i < page_entry_num; i++)
-                       ops->set_entry(p, &se, i, false, 0, vgpu);
+                       ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
        }
 
-       kunmap_atomic(p);
-
        return 0;
 }
 
@@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
 int intel_gvt_init_gtt(struct intel_gvt *gvt)
 {
        int ret;
-       void *page_addr;
+       void *page;
 
        gvt_dbg_core("init gtt\n");
 
@@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
                return -ENODEV;
        }
 
-       gvt->gtt.scratch_ggtt_page =
-               alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
-       if (!gvt->gtt.scratch_ggtt_page) {
+       page = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!page) {
                gvt_err("fail to allocate scratch ggtt page\n");
                return -ENOMEM;
        }
+       gvt->gtt.scratch_ggtt_page = virt_to_page(page);
 
-       page_addr = page_address(gvt->gtt.scratch_ggtt_page);
-
-       gvt->gtt.scratch_ggtt_mfn =
-               intel_gvt_hypervisor_virt_to_mfn(page_addr);
+       gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
        if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
                gvt_err("fail to translate scratch ggtt page\n");
                __free_page(gvt->gtt.scratch_ggtt_page);
@@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
        for (offset = 0; offset < num_entries; offset++)
                ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
 }
+
+/**
+ * intel_vgpu_reset_gtt - reset the all GTT related status
+ * @vgpu: a vGPU
+ * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
+ *
+ * This function is called from vfio core to reset reset all
+ * GTT related status, including GGTT, PPGTT, scratch page.
+ *
+ */
+void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
+{
+       int i;
+
+       ppgtt_free_all_shadow_page(vgpu);
+       if (!dmlr)
+               return;
+
+       intel_vgpu_reset_ggtt(vgpu);
+
+       /* clear scratch page for security */
+       for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+               if (vgpu->gtt.scratch_pt[i].page != NULL)
+                       memset(page_address(vgpu->gtt.scratch_pt[i].page),
+                               0, PAGE_SIZE);
+       }
+}
index b315ab3593ec37f2e73faf564a6d6c9fee9e7c81..f88eb5e89bea09f7b6e8aba2e521748d54d28b77 100644 (file)
@@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
 
 extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
 extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
 
 extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
index 398877c3d2fd98a19ba8f45712ef444a4fcb0896..e6bf5c533fbe5c795a7cef6baef2815aea797ce7 100644 (file)
@@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
        intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
        intel_gvt_clean_vgpu_types(gvt);
 
+       idr_destroy(&gvt->vgpu_idr);
+
        kfree(dev_priv->gvt);
        dev_priv->gvt = NULL;
 }
@@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 
        gvt_dbg_core("init gvt device\n");
 
+       idr_init(&gvt->vgpu_idr);
+
        mutex_init(&gvt->lock);
        gvt->dev_priv = dev_priv;
 
@@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 
        ret = intel_gvt_setup_mmio_info(gvt);
        if (ret)
-               return ret;
+               goto out_clean_idr;
 
        ret = intel_gvt_load_firmware(gvt);
        if (ret)
@@ -313,6 +317,8 @@ out_free_firmware:
        intel_gvt_free_firmware(gvt);
 out_clean_mmio_info:
        intel_gvt_clean_mmio_info(gvt);
+out_clean_idr:
+       idr_destroy(&gvt->vgpu_idr);
        kfree(gvt);
        return ret;
 }
index 0af17016f33f24f40d338715e5c78bfac8058e92..e227caf5859ebdfd2c420bc994d42a5734ba4272 100644 (file)
@@ -323,6 +323,7 @@ struct intel_vgpu_creation_params {
 
 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
                              struct intel_vgpu_creation_params *param);
+void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
 void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
        u32 fence, u64 value);
@@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
                                         struct intel_vgpu_type *type);
 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
+                                unsigned int engine_mask);
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
 
 
@@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
                             unsigned long *g_index);
 
+void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+               bool primary);
+void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
+
 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes);
 
@@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
 
 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
-int setup_vgpu_mmio(struct intel_vgpu *vgpu);
 void populate_pvinfo_page(struct intel_vgpu *vgpu);
 
 struct intel_gvt_ops {
index 57fb8e3cbd1fcc2276c48079881e5b42536af1e3..1d450627ff654025b56119a181864ed4f2b1c607 100644 (file)
@@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
 static int new_mmio_info(struct intel_gvt *gvt,
                u32 offset, u32 flags, u32 size,
                u32 addr_mask, u32 ro_mask, u32 device,
-               void *read, void *write)
+               int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
+               int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
 {
        struct intel_gvt_mmio_info *info, *p;
        u32 start, end, i;
@@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
                default:
                        /*should not hit here*/
                        gvt_err("invalid forcewake offset 0x%x\n", offset);
-                       return 1;
+                       return -EINVAL;
                }
        } else {
                ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
@@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
        return 0;
 }
 
-static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
-               void *p_data, unsigned int bytes, unsigned long bitmap)
-{
-       struct intel_gvt_workload_scheduler *scheduler =
-               &vgpu->gvt->scheduler;
-
-       vgpu->resetting = true;
-
-       intel_vgpu_stop_schedule(vgpu);
-       /*
-        * The current_vgpu will set to NULL after stopping the
-        * scheduler when the reset is triggered by current vgpu.
-        */
-       if (scheduler->current_vgpu == NULL) {
-               mutex_unlock(&vgpu->gvt->lock);
-               intel_gvt_wait_vgpu_idle(vgpu);
-               mutex_lock(&vgpu->gvt->lock);
-       }
-
-       intel_vgpu_reset_execlist(vgpu, bitmap);
-
-       /* full GPU reset */
-       if (bitmap == 0xff) {
-               mutex_unlock(&vgpu->gvt->lock);
-               intel_vgpu_clean_gtt(vgpu);
-               mutex_lock(&vgpu->gvt->lock);
-               setup_vgpu_mmio(vgpu);
-               populate_pvinfo_page(vgpu);
-               intel_vgpu_init_gtt(vgpu);
-       }
-
-       vgpu->resetting = false;
-
-       return 0;
-}
-
 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
-               void *p_data, unsigned int bytes)
+                           void *p_data, unsigned int bytes)
 {
+       unsigned int engine_mask = 0;
        u32 data;
-       u64 bitmap = 0;
 
        write_vreg(vgpu, offset, p_data, bytes);
        data = vgpu_vreg(vgpu, offset);
 
        if (data & GEN6_GRDOM_FULL) {
                gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
-               bitmap = 0xff;
-       }
-       if (data & GEN6_GRDOM_RENDER) {
-               gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
-               bitmap |= (1 << RCS);
-       }
-       if (data & GEN6_GRDOM_MEDIA) {
-               gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
-               bitmap |= (1 << VCS);
-       }
-       if (data & GEN6_GRDOM_BLT) {
-               gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
-               bitmap |= (1 << BCS);
-       }
-       if (data & GEN6_GRDOM_VECS) {
-               gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
-               bitmap |= (1 << VECS);
-       }
-       if (data & GEN8_GRDOM_MEDIA2) {
-               gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
-               if (HAS_BSD2(vgpu->gvt->dev_priv))
-                       bitmap |= (1 << VCS2);
+               engine_mask = ALL_ENGINES;
+       } else {
+               if (data & GEN6_GRDOM_RENDER) {
+                       gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
+                       engine_mask |= (1 << RCS);
+               }
+               if (data & GEN6_GRDOM_MEDIA) {
+                       gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
+                       engine_mask |= (1 << VCS);
+               }
+               if (data & GEN6_GRDOM_BLT) {
+                       gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
+                       engine_mask |= (1 << BCS);
+               }
+               if (data & GEN6_GRDOM_VECS) {
+                       gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
+                       engine_mask |= (1 << VECS);
+               }
+               if (data & GEN8_GRDOM_MEDIA2) {
+                       gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
+                       if (HAS_BSD2(vgpu->gvt->dev_priv))
+                               engine_mask |= (1 << VCS2);
+               }
        }
-       return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
+
+       intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
+
+       return 0;
 }
 
 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
@@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
        return 0;
 }
 
-static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
        u32 data;
@@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
 {
-       int rc = 0;
        unsigned int id = 0;
 
        write_vreg(vgpu, offset, p_data, bytes);
@@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
                id = VECS;
                break;
        default:
-               rc = -EINVAL;
-               break;
+               return -EINVAL;
        }
        set_bit(id, (void *)vgpu->tlb_handle_pending);
 
-       return rc;
+       return 0;
 }
 
 static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
index faaae07ae487277973533bbf907b6eefc2632a48..0c9234a87a20b3d0d6a1f7ddf9e789719a6244cd 100644 (file)
@@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
        struct intel_vgpu_type *type;
        struct device *pdev;
        void *gvt;
+       int ret;
 
        pdev = mdev_parent_dev(mdev);
        gvt = kdev_to_i915(pdev)->gvt;
@@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
        if (!type) {
                gvt_err("failed to find type %s to create\n",
                                                kobject_name(kobj));
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        vgpu = intel_gvt_ops->vgpu_create(gvt, type);
        if (IS_ERR_OR_NULL(vgpu)) {
-               gvt_err("create intel vgpu failed\n");
-               return -EINVAL;
+               ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
+               gvt_err("failed to create intel vgpu: %d\n", ret);
+               goto out;
        }
 
        INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
@@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 
        gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
                     dev_name(mdev_dev(mdev)));
-       return 0;
+       ret = 0;
+
+out:
+       return ret;
 }
 
 static int intel_vgpu_remove(struct mdev_device *mdev)
index 09c9450a19462e940eb4df2e71af1e10974ef878..4df078bc5d042b1f4fc411fbb0f98c83a3cba729 100644 (file)
@@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
        if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
                goto err;
 
-       mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
-       if (!mmio && !vgpu->mmio.disable_warn_untrack) {
-               gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
-                               vgpu->id, offset, bytes, *(u32 *)p_data);
-
-               if (offset == 0x206c) {
-                       gvt_err("------------------------------------------\n");
-                       gvt_err("vgpu%d: likely triggers a gfx reset\n",
-                       vgpu->id);
-                       gvt_err("------------------------------------------\n");
-                       vgpu->mmio.disable_warn_untrack = true;
-               }
-       }
-
        if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
                if (WARN_ON(!IS_ALIGNED(offset, bytes)))
                        goto err;
        }
 
+       mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
        if (mmio) {
                if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
                        if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
@@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
                                goto err;
                }
                ret = mmio->read(vgpu, offset, p_data, bytes);
-       } else
+       } else {
                ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
 
+               if (!vgpu->mmio.disable_warn_untrack) {
+                       gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
+                               vgpu->id, offset, bytes, *(u32 *)p_data);
+
+                       if (offset == 0x206c) {
+                               gvt_err("------------------------------------------\n");
+                               gvt_err("vgpu%d: likely triggers a gfx reset\n",
+                                       vgpu->id);
+                               gvt_err("------------------------------------------\n");
+                               vgpu->mmio.disable_warn_untrack = true;
+                       }
+               }
+       }
+
        if (ret)
                goto err;
 
@@ -302,3 +303,56 @@ err:
        mutex_unlock(&gvt->lock);
        return ret;
 }
+
+
+/**
+ * intel_vgpu_reset_mmio - reset virtual MMIO space
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       const struct intel_gvt_device_info *info = &gvt->device_info;
+
+       memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
+       memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
+
+       vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+
+       /* set the bit 0:2(Core C-State ) to C0 */
+       vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+}
+
+/**
+ * intel_vgpu_init_mmio - init MMIO  space
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
+{
+       const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+
+       vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+       if (!vgpu->mmio.vreg)
+               return -ENOMEM;
+
+       vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
+
+       intel_vgpu_reset_mmio(vgpu);
+
+       return 0;
+}
+
+/**
+ * intel_vgpu_clean_mmio - clean MMIO space
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
+{
+       vfree(vgpu->mmio.vreg);
+       vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+}
index 87d5b5e366a3c97e7b2da79c1b8e66d97823e916..3bc620f56f351e774dc8658c9f06c79d0b24446b 100644 (file)
@@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
        *offset; \
 })
 
+int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
+
 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
 
 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
index 81cd921770c6db7748ad8e880180c3fb6f4c448a..d9fb41ab71198cb19b1ade4796f687af49444c80 100644 (file)
@@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
                        vgpu->id))
                return -EINVAL;
 
-       vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
-                       GFP_DMA32 | __GFP_ZERO,
-                       INTEL_GVT_OPREGION_PORDER);
+       vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
+                       __GFP_ZERO,
+                       get_order(INTEL_GVT_OPREGION_SIZE));
 
        if (!vgpu_opregion(vgpu)->va)
                return -ENOMEM;
@@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
        if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
                map_vgpu_opregion(vgpu, false);
                free_pages((unsigned long)vgpu_opregion(vgpu)->va,
-                               INTEL_GVT_OPREGION_PORDER);
+                               get_order(INTEL_GVT_OPREGION_SIZE));
 
                vgpu_opregion(vgpu)->va = NULL;
        }
index 0dfe789d8f02b64ade88381b2a69fbca09f2862a..fbd023a16f18163d6dcb52bcf795675e3c16a4f7 100644 (file)
@@ -50,8 +50,7 @@
 #define INTEL_GVT_OPREGION_PARM                   0x204
 
 #define INTEL_GVT_OPREGION_PAGES       2
-#define INTEL_GVT_OPREGION_PORDER      1
-#define INTEL_GVT_OPREGION_SIZE                (2 * 4096)
+#define INTEL_GVT_OPREGION_SIZE                (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
 
 #define VGT_SPRSTRIDE(pipe)    _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
 
index fd2b026f7ecde98a6da7c97ce855d972939a27b9..7ea68a75dc4676b0074c6340ec29b698e2181e4a 100644 (file)
@@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 {
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct intel_vgpu_workload *workload;
+       struct intel_vgpu *vgpu;
        int event;
 
        mutex_lock(&gvt->lock);
 
        workload = scheduler->current_workload[ring_id];
+       vgpu = workload->vgpu;
 
-       if (!workload->status && !workload->vgpu->resetting) {
+       if (!workload->status && !vgpu->resetting) {
                wait_event(workload->shadow_ctx_status_wq,
                           !atomic_read(&workload->shadow_ctx_active));
 
@@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
                for_each_set_bit(event, workload->pending_events,
                                 INTEL_GVT_EVENT_MAX)
-                       intel_vgpu_trigger_virtual_event(workload->vgpu,
-                                       event);
+                       intel_vgpu_trigger_virtual_event(vgpu, event);
        }
 
        gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
        scheduler->current_workload[ring_id] = NULL;
 
-       atomic_dec(&workload->vgpu->running_workload_num);
-
        list_del_init(&workload->list);
        workload->complete(workload);
 
+       atomic_dec(&vgpu->running_workload_num);
        wake_up(&scheduler->workload_complete_wq);
        mutex_unlock(&gvt->lock);
 }
@@ -459,11 +459,11 @@ complete:
                gvt_dbg_sched("will complete workload %p\n, status: %d\n",
                                workload, workload->status);
 
-               complete_current_workload(gvt, ring_id);
-
                if (workload->req)
                        i915_gem_request_put(fetch_and_zero(&workload->req));
 
+               complete_current_workload(gvt, ring_id);
+
                if (need_force_wake)
                        intel_uncore_forcewake_put(gvt->dev_priv,
                                        FORCEWAKE_ALL);
index 536d2b9d577732f57a1775f54bdb4a7bce8a7e39..7295bc8e12fb240eeaf6f9434d2bba713453ad3c 100644 (file)
 #include "gvt.h"
 #include "i915_pvinfo.h"
 
-static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
-{
-       vfree(vgpu->mmio.vreg);
-       vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
-}
-
-int setup_vgpu_mmio(struct intel_vgpu *vgpu)
-{
-       struct intel_gvt *gvt = vgpu->gvt;
-       const struct intel_gvt_device_info *info = &gvt->device_info;
-
-       if (vgpu->mmio.vreg)
-               memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
-       else {
-               vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
-               if (!vgpu->mmio.vreg)
-                       return -ENOMEM;
-       }
-
-       vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
-
-       memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
-       memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
-
-       vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
-
-       /* set the bit 0:2(Core C-State ) to C0 */
-       vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
-       return 0;
-}
-
-static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
-       struct intel_vgpu_creation_params *param)
-{
-       struct intel_gvt *gvt = vgpu->gvt;
-       const struct intel_gvt_device_info *info = &gvt->device_info;
-       u16 *gmch_ctl;
-       int i;
-
-       memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
-              info->cfg_space_size);
-
-       if (!param->primary) {
-               vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
-                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
-               vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
-                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
-       }
-
-       /* Show guest that there isn't any stolen memory.*/
-       gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
-       *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
-
-       intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
-                                gvt_aperture_pa_base(gvt), true);
-
-       vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
-                                            | PCI_COMMAND_MEMORY
-                                            | PCI_COMMAND_MASTER);
-       /*
-        * Clear the bar upper 32bit and let guest to assign the new value
-        */
-       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
-       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
-       memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
-
-       for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
-               vgpu->cfg_space.bar[i].size = pci_resource_len(
-                                             gvt->dev_priv->drm.pdev, i * 2);
-               vgpu->cfg_space.bar[i].tracked = false;
-       }
-}
-
 void populate_pvinfo_page(struct intel_vgpu *vgpu)
 {
        /* setup the ballooning information */
@@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
                if (low_avail / min_low == 0)
                        break;
                gvt->types[i].low_gm_size = min_low;
-               gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
+               gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
                gvt->types[i].fence = 4;
                gvt->types[i].max_instance = low_avail / min_low;
                gvt->types[i].avail_instance = gvt->types[i].max_instance;
@@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
         */
        low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
                gvt->gm.vgpu_allocated_low_gm_size;
-       high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
+       high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
                gvt->gm.vgpu_allocated_high_gm_size;
        fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
                gvt->fence.vgpu_allocated_fence_num;
@@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        intel_vgpu_clean_gtt(vgpu);
        intel_gvt_hypervisor_detach_vgpu(vgpu);
        intel_vgpu_free_resource(vgpu);
-       clean_vgpu_mmio(vgpu);
+       intel_vgpu_clean_mmio(vgpu);
        vfree(vgpu);
 
        intel_gvt_update_vgpu_types(gvt);
@@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        vgpu->gvt = gvt;
        bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
 
-       setup_vgpu_cfg_space(vgpu, param);
+       intel_vgpu_init_cfg_space(vgpu, param->primary);
 
-       ret = setup_vgpu_mmio(vgpu);
+       ret = intel_vgpu_init_mmio(vgpu);
        if (ret)
-               goto out_free_vgpu;
+               goto out_clean_idr;
 
        ret = intel_vgpu_alloc_resource(vgpu, param);
        if (ret)
@@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu:
 out_clean_vgpu_resource:
        intel_vgpu_free_resource(vgpu);
 out_clean_vgpu_mmio:
-       clean_vgpu_mmio(vgpu);
+       intel_vgpu_clean_mmio(vgpu);
+out_clean_idr:
+       idr_remove(&gvt->vgpu_idr, vgpu->id);
 out_free_vgpu:
        vfree(vgpu);
        mutex_unlock(&gvt->lock);
@@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
 }
 
 /**
- * intel_gvt_reset_vgpu - reset a virtual GPU
+ * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
+ * @vgpu: virtual GPU
+ * @dmlr: vGPU Device Model Level Reset or GT Reset
+ * @engine_mask: engines to reset for GT reset
+ *
+ * This function is called when user wants to reset a virtual GPU through
+ * device model reset or GT reset. The caller should hold the gvt lock.
+ *
+ * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
+ * the whole vGPU to default state as when it is created. This vGPU function
+ * is required both for functionary and security concerns.The ultimate goal
+ * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
+ * assign a vGPU to a virtual machine we must isse such reset first.
+ *
+ * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
+ * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
+ * Unlike the FLR, GT reset only reset particular resource of a vGPU per
+ * the reset request. Guest driver can issue a GT reset by programming the
+ * virtual GDRST register to reset specific virtual GPU engine or all
+ * engines.
+ *
+ * The parameter dev_level is to identify if we will do DMLR or GT reset.
+ * The parameter engine_mask is to specific the engines that need to be
+ * resetted. If value ALL_ENGINES is given for engine_mask, it means
+ * the caller requests a full GT reset that we will reset all virtual
+ * GPU engines. For FLR, engine_mask is ignored.
+ */
+void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
+                                unsigned int engine_mask)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+       gvt_dbg_core("------------------------------------------\n");
+       gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
+                    vgpu->id, dmlr, engine_mask);
+       vgpu->resetting = true;
+
+       intel_vgpu_stop_schedule(vgpu);
+       /*
+        * The current_vgpu will set to NULL after stopping the
+        * scheduler when the reset is triggered by current vgpu.
+        */
+       if (scheduler->current_vgpu == NULL) {
+               mutex_unlock(&gvt->lock);
+               intel_gvt_wait_vgpu_idle(vgpu);
+               mutex_lock(&gvt->lock);
+       }
+
+       intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
+
+       /* full GPU reset or device model level reset */
+       if (engine_mask == ALL_ENGINES || dmlr) {
+               intel_vgpu_reset_gtt(vgpu, dmlr);
+               intel_vgpu_reset_resource(vgpu);
+               intel_vgpu_reset_mmio(vgpu);
+               populate_pvinfo_page(vgpu);
+
+               if (dmlr)
+                       intel_vgpu_reset_cfg_space(vgpu);
+       }
+
+       vgpu->resetting = false;
+       gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
+       gvt_dbg_core("------------------------------------------\n");
+}
+
+/**
+ * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
  * @vgpu: virtual GPU
  *
  * This function is called when user wants to reset a virtual GPU.
@@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
  */
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
 {
+       mutex_lock(&vgpu->gvt->lock);
+       intel_gvt_reset_vgpu_locked(vgpu, true, 0);
+       mutex_unlock(&vgpu->gvt->lock);
 }
index 9d7b5a8c8dea9c4d808ea350f0e96a1a6debb277..fa69d72fdcb9ff45e6590402bb59b9b325d65c38 100644 (file)
@@ -159,8 +159,35 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
                seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
                           i915_vma_is_ggtt(vma) ? "g" : "pp",
                           vma->node.start, vma->node.size);
-               if (i915_vma_is_ggtt(vma))
-                       seq_printf(m, ", type: %u", vma->ggtt_view.type);
+               if (i915_vma_is_ggtt(vma)) {
+                       switch (vma->ggtt_view.type) {
+                       case I915_GGTT_VIEW_NORMAL:
+                               seq_puts(m, ", normal");
+                               break;
+
+                       case I915_GGTT_VIEW_PARTIAL:
+                               seq_printf(m, ", partial [%08llx+%x]",
+                                          vma->ggtt_view.partial.offset << PAGE_SHIFT,
+                                          vma->ggtt_view.partial.size << PAGE_SHIFT);
+                               break;
+
+                       case I915_GGTT_VIEW_ROTATED:
+                               seq_printf(m, ", rotated [(%ux%u, stride=%u, offset=%u), (%ux%u, stride=%u, offset=%u)]",
+                                          vma->ggtt_view.rotated.plane[0].width,
+                                          vma->ggtt_view.rotated.plane[0].height,
+                                          vma->ggtt_view.rotated.plane[0].stride,
+                                          vma->ggtt_view.rotated.plane[0].offset,
+                                          vma->ggtt_view.rotated.plane[1].width,
+                                          vma->ggtt_view.rotated.plane[1].height,
+                                          vma->ggtt_view.rotated.plane[1].stride,
+                                          vma->ggtt_view.rotated.plane[1].offset);
+                               break;
+
+                       default:
+                               MISSING_CASE(vma->ggtt_view.type);
+                               break;
+                       }
+               }
                if (vma->fence)
                        seq_printf(m, " , fence: %d%s",
                                   vma->fence->id,
@@ -2325,10 +2352,40 @@ static int i915_llc(struct seq_file *m, void *data)
        return 0;
 }
 
+static int i915_huc_load_status_info(struct seq_file *m, void *data)
+{
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+
+       if (!HAS_HUC_UCODE(dev_priv))
+               return 0;
+
+       seq_puts(m, "HuC firmware status:\n");
+       seq_printf(m, "\tpath: %s\n", huc_fw->path);
+       seq_printf(m, "\tfetch: %s\n",
+               intel_uc_fw_status_repr(huc_fw->fetch_status));
+       seq_printf(m, "\tload: %s\n",
+               intel_uc_fw_status_repr(huc_fw->load_status));
+       seq_printf(m, "\tversion wanted: %d.%d\n",
+               huc_fw->major_ver_wanted, huc_fw->minor_ver_wanted);
+       seq_printf(m, "\tversion found: %d.%d\n",
+               huc_fw->major_ver_found, huc_fw->minor_ver_found);
+       seq_printf(m, "\theader: offset is %d; size = %d\n",
+               huc_fw->header_offset, huc_fw->header_size);
+       seq_printf(m, "\tuCode: offset is %d; size = %d\n",
+               huc_fw->ucode_offset, huc_fw->ucode_size);
+       seq_printf(m, "\tRSA: offset is %d; size = %d\n",
+               huc_fw->rsa_offset, huc_fw->rsa_size);
+
+       seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
+
+       return 0;
+}
+
 static int i915_guc_load_status_info(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+       struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
        u32 tmp, i;
 
        if (!HAS_GUC_UCODE(dev_priv))
@@ -2336,15 +2393,15 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
 
        seq_printf(m, "GuC firmware status:\n");
        seq_printf(m, "\tpath: %s\n",
-               guc_fw->guc_fw_path);
+               guc_fw->path);
        seq_printf(m, "\tfetch: %s\n",
-               intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
+               intel_uc_fw_status_repr(guc_fw->fetch_status));
        seq_printf(m, "\tload: %s\n",
-               intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+               intel_uc_fw_status_repr(guc_fw->load_status));
        seq_printf(m, "\tversion wanted: %d.%d\n",
-               guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+               guc_fw->major_ver_wanted, guc_fw->minor_ver_wanted);
        seq_printf(m, "\tversion found: %d.%d\n",
-               guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found);
+               guc_fw->major_ver_found, guc_fw->minor_ver_found);
        seq_printf(m, "\theader: offset is %d; size = %d\n",
                guc_fw->header_offset, guc_fw->header_size);
        seq_printf(m, "\tuCode: offset is %d; size = %d\n",
@@ -2532,6 +2589,29 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_control_fops,
                        i915_guc_log_control_get, i915_guc_log_control_set,
                        "%lld\n");
 
+static const char *psr2_live_status(u32 val)
+{
+       static const char * const live_status[] = {
+               "IDLE",
+               "CAPTURE",
+               "CAPTURE_FS",
+               "SLEEP",
+               "BUFON_FW",
+               "ML_UP",
+               "SU_STANDBY",
+               "FAST_SLEEP",
+               "DEEP_SLEEP",
+               "BUF_ON",
+               "TG_ON"
+       };
+
+       val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT;
+       if (val < ARRAY_SIZE(live_status))
+               return live_status[val];
+
+       return "unknown";
+}
+
 static int i915_edp_psr_status(struct seq_file *m, void *data)
 {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2606,6 +2686,12 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 
                seq_printf(m, "Performance_Counter: %u\n", psrperf);
        }
+       if (dev_priv->psr.psr2_support) {
+               u32 psr2 = I915_READ(EDP_PSR2_STATUS_CTL);
+
+               seq_printf(m, "EDP_PSR2_STATUS_CTL: %x [%s]\n",
+                          psr2, psr2_live_status(psr2));
+       }
        mutex_unlock(&dev_priv->psr.lock);
 
        intel_runtime_pm_put(dev_priv);
@@ -4553,6 +4639,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
        {"i915_guc_info", i915_guc_info, 0},
        {"i915_guc_load_status", i915_guc_load_status_info, 0},
        {"i915_guc_log_dump", i915_guc_log_dump, 0},
+       {"i915_huc_load_status", i915_huc_load_status_info, 0},
        {"i915_frequency_info", i915_frequency_info, 0},
        {"i915_hangcheck_info", i915_hangcheck_info, 0},
        {"i915_drpc_info", i915_drpc_info, 0},
index 4d22b4b479b89948dde2740f3f3d08cf5f28cb7c..4ae69ebe166e5f23c84779748c84af16f4f8143f 100644 (file)
@@ -49,6 +49,7 @@
 #include "i915_trace.h"
 #include "i915_vgpu.h"
 #include "intel_drv.h"
+#include "intel_uc.h"
 
 static struct drm_driver driver;
 
@@ -315,6 +316,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
        case I915_PARAM_MIN_EU_IN_POOL:
                value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
                break;
+       case I915_PARAM_HUC_STATUS:
+               /* The register is already force-woken. We dont need
+                * any rpm here
+                */
+               value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
+               break;
        case I915_PARAM_MMAP_GTT_VERSION:
                /* Though we've started our numbering from 1, and so class all
                 * earlier versions as 0, in effect their value is undefined as
@@ -599,6 +606,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
        if (ret)
                goto cleanup_irq;
 
+       intel_huc_init(dev_priv);
        intel_guc_init(dev_priv);
 
        ret = i915_gem_init(dev_priv);
@@ -627,6 +635,7 @@ cleanup_gem:
        i915_gem_fini(dev_priv);
 cleanup_irq:
        intel_guc_fini(dev_priv);
+       intel_huc_fini(dev_priv);
        drm_irq_uninstall(dev);
        intel_teardown_gmbus(dev_priv);
 cleanup_csr:
@@ -1114,7 +1123,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
        /* Reveal our presence to userspace */
        if (drm_dev_register(dev, 0) == 0) {
                i915_debugfs_register(dev_priv);
-               i915_guc_register(dev_priv);
+               i915_guc_log_register(dev_priv);
                i915_setup_sysfs(dev_priv);
 
                /* Depends on sysfs having been initialized */
@@ -1158,7 +1167,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
        i915_perf_unregister(dev_priv);
 
        i915_teardown_sysfs(dev_priv);
-       i915_guc_unregister(dev_priv);
+       i915_guc_log_unregister(dev_priv);
        i915_debugfs_unregister(dev_priv);
        drm_dev_unregister(&dev_priv->drm);
 
@@ -1314,6 +1323,7 @@ void i915_driver_unload(struct drm_device *dev)
        drain_workqueue(dev_priv->wq);
 
        intel_guc_fini(dev_priv);
+       intel_huc_fini(dev_priv);
        i915_gem_fini(dev_priv);
        intel_fbc_cleanup_cfb(dev_priv);
 
@@ -1471,7 +1481,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
 
        intel_display_set_init_power(dev_priv, false);
 
-       fw_csr = !IS_BROXTON(dev_priv) &&
+       fw_csr = !IS_GEN9_LP(dev_priv) &&
                suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
        /*
         * In case of firmware assisted context save/restore don't manually
@@ -1484,7 +1494,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
                intel_power_domains_suspend(dev_priv);
 
        ret = 0;
-       if (IS_BROXTON(dev_priv))
+       if (IS_GEN9_LP(dev_priv))
                bxt_enable_dc9(dev_priv);
        else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                hsw_enable_pc8(dev_priv);
@@ -1692,7 +1702,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
        intel_uncore_early_sanitize(dev_priv, true);
 
-       if (IS_BROXTON(dev_priv)) {
+       if (IS_GEN9_LP(dev_priv)) {
                if (!dev_priv->suspended_to_idle)
                        gen9_sanitize_dc_state(dev_priv);
                bxt_disable_dc9(dev_priv);
@@ -1702,7 +1712,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
        intel_uncore_sanitize(dev_priv);
 
-       if (IS_BROXTON(dev_priv) ||
+       if (IS_GEN9_LP(dev_priv) ||
            !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
                intel_power_domains_init_hw(dev_priv, true);
 
@@ -1728,25 +1738,9 @@ static int i915_resume_switcheroo(struct drm_device *dev)
        return i915_drm_resume(dev);
 }
 
-static void disable_engines_irq(struct drm_i915_private *dev_priv)
-{
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       /* Ensure irq handler finishes, and not run again. */
-       disable_irq(dev_priv->drm.irq);
-       for_each_engine(engine, dev_priv, id)
-               tasklet_kill(&engine->irq_tasklet);
-}
-
-static void enable_engines_irq(struct drm_i915_private *dev_priv)
-{
-       enable_irq(dev_priv->drm.irq);
-}
-
 /**
  * i915_reset - reset chip after a hang
- * @dev: drm device to reset
+ * @dev_priv: device private to reset
  *
  * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
  * on failure.
@@ -1776,12 +1770,15 @@ void i915_reset(struct drm_i915_private *dev_priv)
        error->reset_count++;
 
        pr_notice("drm/i915: Resetting chip after gpu hang\n");
-       i915_gem_reset_prepare(dev_priv);
+       disable_irq(dev_priv->drm.irq);
+       ret = i915_gem_reset_prepare(dev_priv);
+       if (ret) {
+               DRM_ERROR("GPU recovery failed\n");
+               intel_gpu_reset(dev_priv, ALL_ENGINES);
+               goto error;
+       }
 
-       disable_engines_irq(dev_priv);
        ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
-       enable_engines_irq(dev_priv);
-
        if (ret) {
                if (ret != -ENODEV)
                        DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -1816,6 +1813,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
        i915_queue_hangcheck(dev_priv);
 
 wakeup:
+       enable_irq(dev_priv->drm.irq);
        wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
        return;
 
@@ -2326,7 +2324,7 @@ static int intel_runtime_suspend(struct device *kdev)
        intel_runtime_pm_disable_interrupts(dev_priv);
 
        ret = 0;
-       if (IS_BROXTON(dev_priv)) {
+       if (IS_GEN9_LP(dev_priv)) {
                bxt_display_core_uninit(dev_priv);
                bxt_enable_dc9(dev_priv);
        } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2411,7 +2409,7 @@ static int intel_runtime_resume(struct device *kdev)
        if (IS_GEN6(dev_priv))
                intel_init_pch_refclk(dev_priv);
 
-       if (IS_BROXTON(dev_priv)) {
+       if (IS_GEN9_LP(dev_priv)) {
                bxt_disable_dc9(dev_priv);
                bxt_display_core_init(dev_priv, true);
                if (dev_priv->csr.dmc_payload &&
@@ -2549,8 +2547,8 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
        DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
        DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
index 24eb162319384eadf6e053bdb97e8e9e3a974bf5..244628065f94d727e56accb5cb0c1c0ed19a8718 100644 (file)
@@ -79,8 +79,8 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20170109"
-#define DRIVER_TIMESTAMP       1483953121
+#define DRIVER_DATE            "20170123"
+#define DRIVER_TIMESTAMP       1485156432
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
@@ -1070,6 +1070,8 @@ struct intel_fbc {
        struct work_struct underrun_work;
 
        struct intel_fbc_state_cache {
+               struct i915_vma *vma;
+
                struct {
                        unsigned int mode_flags;
                        uint32_t hsw_bdw_pixel_rate;
@@ -1083,15 +1085,14 @@ struct intel_fbc {
                } plane;
 
                struct {
-                       u64 ilk_ggtt_offset;
                        const struct drm_format_info *format;
                        unsigned int stride;
-                       int fence_reg;
-                       unsigned int tiling_mode;
                } fb;
        } state_cache;
 
        struct intel_fbc_reg_params {
+               struct i915_vma *vma;
+
                struct {
                        enum pipe pipe;
                        enum plane plane;
@@ -1099,10 +1100,8 @@ struct intel_fbc {
                } crtc;
 
                struct {
-                       u64 ggtt_offset;
                        const struct drm_format_info *format;
                        unsigned int stride;
-                       int fence_reg;
                } fb;
 
                int cfb_size;
@@ -1155,6 +1154,9 @@ struct i915_psr {
        bool psr2_support;
        bool aux_frame_sync;
        bool link_standby;
+       bool y_cord_support;
+       bool colorimetry_support;
+       bool alpm;
 };
 
 enum intel_pch {
@@ -1810,6 +1812,7 @@ struct intel_pipe_crc {
        enum intel_pipe_crc_source source;
        int head, tail;
        wait_queue_head_t wq;
+       int skipped;
 };
 
 struct i915_frontbuffer_tracking {
@@ -2070,6 +2073,7 @@ struct drm_i915_private {
 
        struct intel_gvt *gvt;
 
+       struct intel_huc huc;
        struct intel_guc guc;
 
        struct intel_csr csr;
@@ -2844,6 +2848,7 @@ intel_info(const struct drm_i915_private *dev_priv)
 #define HAS_GUC(dev_priv)      ((dev_priv)->info.has_guc)
 #define HAS_GUC_UCODE(dev_priv)        (HAS_GUC(dev_priv))
 #define HAS_GUC_SCHED(dev_priv)        (HAS_GUC(dev_priv))
+#define HAS_HUC_UCODE(dev_priv)        (HAS_GUC(dev_priv))
 
 #define HAS_RESOURCE_STREAMER(dev_priv) ((dev_priv)->info.has_resource_streamer)
 
@@ -3102,10 +3107,10 @@ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file_priv);
 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                           struct drm_file *file_priv);
-int i915_gem_set_tiling(struct drm_device *dev, void *data,
-                       struct drm_file *file_priv);
-int i915_gem_get_tiling(struct drm_device *dev, void *data,
-                       struct drm_file *file_priv);
+int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
+int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file_priv);
 void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
 int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
                           struct drm_file *file);
@@ -3324,7 +3329,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
        return READ_ONCE(error->reset_count);
 }
 
-void i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
+int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
 void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
 void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
 void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
@@ -3361,11 +3366,6 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 int i915_gem_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
-u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
-                          int tiling_mode);
-u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
-                               int tiling_mode, bool fenced);
-
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
                                    enum i915_cache_level cache_level);
 
@@ -3375,36 +3375,12 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
                                struct drm_gem_object *gem_obj, int flags);
 
-struct i915_vma *
-i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-                    struct i915_address_space *vm,
-                    const struct i915_ggtt_view *view);
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-                                 struct i915_address_space *vm,
-                                 const struct i915_ggtt_view *view);
-
 static inline struct i915_hw_ppgtt *
 i915_vm_to_ppgtt(struct i915_address_space *vm)
 {
        return container_of(vm, struct i915_hw_ppgtt, base);
 }
 
-static inline struct i915_vma *
-i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
-                       const struct i915_ggtt_view *view)
-{
-       return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
-}
-
-static inline unsigned long
-i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
-                           const struct i915_ggtt_view *view)
-{
-       return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
-}
-
 /* i915_gem_fence_reg.c */
 int __must_check i915_vma_get_fence(struct i915_vma *vma);
 int __must_check i915_vma_put_fence(struct i915_vma *vma);
@@ -3472,8 +3448,9 @@ int __must_check i915_gem_evict_something(struct i915_address_space *vm,
                                          unsigned cache_level,
                                          u64 start, u64 end,
                                          unsigned flags);
-int __must_check i915_gem_evict_for_vma(struct i915_vma *vma,
-                                       unsigned int flags);
+int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
+                                        struct drm_mm_node *node,
+                                        unsigned int flags);
 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 
 /* belongs in i915_gem_gtt.h */
@@ -3507,7 +3484,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
 /* i915_gem_internal.c */
 struct drm_i915_gem_object *
 i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
-                               unsigned int size);
+                               phys_addr_t size);
 
 /* i915_gem_shrinker.c */
 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
@@ -3532,6 +3509,11 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
                i915_gem_object_is_tiled(obj);
 }
 
+u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
+                       unsigned int tiling, unsigned int stride);
+u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
+                            unsigned int tiling, unsigned int stride);
+
 /* i915_debugfs.c */
 #ifdef CONFIG_DEBUG_FS
 int i915_debugfs_register(struct drm_i915_private *dev_priv);
index dc00d9ae6d923ab0898b5b956d3cef0a8fd9da2d..a07b627329234a18c443011000dbac06c0de242d 100644 (file)
@@ -1696,12 +1696,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
 
 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
 {
-       u64 size;
-
-       size = i915_gem_object_get_stride(obj);
-       size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
-
-       return size >> PAGE_SHIFT;
+       return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
 }
 
 /**
@@ -1754,6 +1749,29 @@ int i915_gem_mmap_gtt_version(void)
        return 1;
 }
 
+static inline struct i915_ggtt_view
+compute_partial_view(struct drm_i915_gem_object *obj,
+                    pgoff_t page_offset,
+                    unsigned int chunk)
+{
+       struct i915_ggtt_view view;
+
+       if (i915_gem_object_is_tiled(obj))
+               chunk = roundup(chunk, tile_row_pages(obj));
+
+       view.type = I915_GGTT_VIEW_PARTIAL;
+       view.partial.offset = rounddown(page_offset, chunk);
+       view.partial.size =
+               min_t(unsigned int, chunk,
+                     (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
+
+       /* If the partial covers the entire object, just create a normal VMA. */
+       if (chunk >= obj->base.size >> PAGE_SHIFT)
+               view.type = I915_GGTT_VIEW_NORMAL;
+
+       return view;
+}
+
 /**
  * i915_gem_fault - fault a page into the GTT
  * @area: CPU VMA in question
@@ -1830,26 +1848,9 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
        /* Now pin it into the GTT as needed */
        vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
        if (IS_ERR(vma)) {
-               struct i915_ggtt_view view;
-               unsigned int chunk_size;
-
                /* Use a partial view if it is bigger than available space */
-               chunk_size = MIN_CHUNK_PAGES;
-               if (i915_gem_object_is_tiled(obj))
-                       chunk_size = roundup(chunk_size, tile_row_pages(obj));
-
-               memset(&view, 0, sizeof(view));
-               view.type = I915_GGTT_VIEW_PARTIAL;
-               view.params.partial.offset = rounddown(page_offset, chunk_size);
-               view.params.partial.size =
-                       min_t(unsigned int, chunk_size,
-                             vma_pages(area) - view.params.partial.offset);
-
-               /* If the partial covers the entire object, just create a
-                * normal VMA.
-                */
-               if (chunk_size >= obj->base.size >> PAGE_SHIFT)
-                       view.type = I915_GGTT_VIEW_NORMAL;
+               struct i915_ggtt_view view =
+                       compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
 
                /* Userspace is now writing through an untracked VMA, abandon
                 * all hope that the hardware is able to track future writes.
@@ -1878,7 +1879,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
 
        /* Finally, remap it using the new GTT offset */
        ret = remap_io_mapping(area,
-                              area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
+                              area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
                               (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
                               min_t(u64, vma->size, area->vm_end - area->vm_start),
                               &ggtt->mappable);
@@ -2021,69 +2022,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
        }
 }
 
-/**
- * i915_gem_get_ggtt_size - return required global GTT size for an object
- * @dev_priv: i915 device
- * @size: object size
- * @tiling_mode: tiling mode
- *
- * Return the required global GTT size for an object, taking into account
- * potential fence register mapping.
- */
-u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
-                          u64 size, int tiling_mode)
-{
-       u64 ggtt_size;
-
-       GEM_BUG_ON(size == 0);
-
-       if (INTEL_GEN(dev_priv) >= 4 ||
-           tiling_mode == I915_TILING_NONE)
-               return size;
-
-       /* Previous chips need a power-of-two fence region when tiling */
-       if (IS_GEN3(dev_priv))
-               ggtt_size = 1024*1024;
-       else
-               ggtt_size = 512*1024;
-
-       while (ggtt_size < size)
-               ggtt_size <<= 1;
-
-       return ggtt_size;
-}
-
-/**
- * i915_gem_get_ggtt_alignment - return required global GTT alignment
- * @dev_priv: i915 device
- * @size: object size
- * @tiling_mode: tiling mode
- * @fenced: is fenced alignment required or not
- *
- * Return the required global GTT alignment for an object, taking into account
- * potential fence register mapping.
- */
-u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
-                               int tiling_mode, bool fenced)
-{
-       GEM_BUG_ON(size == 0);
-
-       /*
-        * Minimum alignment is 4k (GTT page size), but might be greater
-        * if a fence register is needed for the object.
-        */
-       if (INTEL_GEN(dev_priv) >= 4 ||
-           (!fenced && (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))) ||
-           tiling_mode == I915_TILING_NONE)
-               return 4096;
-
-       /*
-        * Previous chips need to be aligned to the size of the smallest
-        * fence register that can contain the object.
-        */
-       return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
-}
-
 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
@@ -2666,13 +2604,52 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
                if (__i915_gem_request_completed(request))
                        continue;
 
+               GEM_BUG_ON(request->engine != engine);
                return request;
        }
 
        return NULL;
 }
 
-static void reset_request(struct drm_i915_gem_request *request)
+static bool engine_stalled(struct intel_engine_cs *engine)
+{
+       if (!engine->hangcheck.stalled)
+               return false;
+
+       /* Check for possible seqno movement after hang declaration */
+       if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
+               DRM_DEBUG_DRIVER("%s pardoned\n", engine->name);
+               return false;
+       }
+
+       return true;
+}
+
+int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       int err = 0;
+
+       /* Ensure irq handler finishes, and not run again. */
+       for_each_engine(engine, dev_priv, id) {
+               struct drm_i915_gem_request *request;
+
+               tasklet_kill(&engine->irq_tasklet);
+
+               if (engine_stalled(engine)) {
+                       request = i915_gem_find_active_request(engine);
+                       if (request && request->fence.error == -EIO)
+                               err = -EIO; /* Previous reset failed! */
+               }
+       }
+
+       i915_gem_revoke_fences(dev_priv);
+
+       return err;
+}
+
+static void skip_request(struct drm_i915_gem_request *request)
 {
        void *vaddr = request->ring->vaddr;
        u32 head;
@@ -2687,20 +2664,74 @@ static void reset_request(struct drm_i915_gem_request *request)
                head = 0;
        }
        memset(vaddr + head, 0, request->postfix - head);
+
+       dma_fence_set_error(&request->fence, -EIO);
 }
 
-void i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
+static void engine_skip_context(struct drm_i915_gem_request *request)
 {
-       i915_gem_revoke_fences(dev_priv);
+       struct intel_engine_cs *engine = request->engine;
+       struct i915_gem_context *hung_ctx = request->ctx;
+       struct intel_timeline *timeline;
+       unsigned long flags;
+
+       timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
+
+       spin_lock_irqsave(&engine->timeline->lock, flags);
+       spin_lock(&timeline->lock);
+
+       list_for_each_entry_continue(request, &engine->timeline->requests, link)
+               if (request->ctx == hung_ctx)
+                       skip_request(request);
+
+       list_for_each_entry(request, &timeline->requests, link)
+               skip_request(request);
+
+       spin_unlock(&timeline->lock);
+       spin_unlock_irqrestore(&engine->timeline->lock, flags);
+}
+
+/* Returns true if the request was guilty of hang */
+static bool i915_gem_reset_request(struct drm_i915_gem_request *request)
+{
+       /* Read once and return the resolution */
+       const bool guilty = engine_stalled(request->engine);
+
+       /* The guilty request will get skipped on a hung engine.
+        *
+        * Users of client default contexts do not rely on logical
+        * state preserved between batches so it is safe to execute
+        * queued requests following the hang. Non default contexts
+        * rely on preserved state, so skipping a batch loses the
+        * evolution of the state and it needs to be considered corrupted.
+        * Executing more queued batches on top of corrupted state is
+        * risky. But we take the risk by trying to advance through
+        * the queued requests in order to make the client behaviour
+        * more predictable around resets, by not throwing away random
+        * amount of batches it has prepared for execution. Sophisticated
+        * clients can use gem_reset_stats_ioctl and dma fence status
+        * (exported via sync_file info ioctl on explicit fences) to observe
+        * when it loses the context state and should rebuild accordingly.
+        *
+        * The context ban, and ultimately the client ban, mechanism are safety
+        * valves if client submission ends up resulting in nothing more than
+        * subsequent hangs.
+        */
+
+       if (guilty) {
+               i915_gem_context_mark_guilty(request->ctx);
+               skip_request(request);
+       } else {
+               i915_gem_context_mark_innocent(request->ctx);
+               dma_fence_set_error(&request->fence, -EAGAIN);
+       }
+
+       return guilty;
 }
 
 static void i915_gem_reset_engine(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request;
-       struct i915_gem_context *hung_ctx;
-       struct intel_timeline *timeline;
-       unsigned long flags;
-       bool ring_hung;
 
        if (engine->irq_seqno_barrier)
                engine->irq_seqno_barrier(engine);
@@ -2709,22 +2740,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
        if (!request)
                return;
 
-       hung_ctx = request->ctx;
-
-       ring_hung = engine->hangcheck.stalled;
-       if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
-               DRM_DEBUG_DRIVER("%s pardoned, was guilty? %s\n",
-                                engine->name,
-                                yesno(ring_hung));
-               ring_hung = false;
-       }
-
-       if (ring_hung)
-               i915_gem_context_mark_guilty(hung_ctx);
-       else
-               i915_gem_context_mark_innocent(hung_ctx);
-
-       if (!ring_hung)
+       if (!i915_gem_reset_request(request))
                return;
 
        DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
@@ -2734,34 +2750,8 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
        engine->reset_hw(engine, request);
 
        /* If this context is now banned, skip all of its pending requests. */
-       if (!i915_gem_context_is_banned(hung_ctx))
-               return;
-
-       /* Users of the default context do not rely on logical state
-        * preserved between batches. They have to emit full state on
-        * every batch and so it is safe to execute queued requests following
-        * the hang.
-        *
-        * Other contexts preserve state, now corrupt. We want to skip all
-        * queued requests that reference the corrupt context.
-        */
-       if (i915_gem_context_is_default(hung_ctx))
-               return;
-
-       timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
-
-       spin_lock_irqsave(&engine->timeline->lock, flags);
-       spin_lock(&timeline->lock);
-
-       list_for_each_entry_continue(request, &engine->timeline->requests, link)
-               if (request->ctx == hung_ctx)
-                       reset_request(request);
-
-       list_for_each_entry(request, &timeline->requests, link)
-               reset_request(request);
-
-       spin_unlock(&timeline->lock);
-       spin_unlock_irqrestore(&engine->timeline->lock, flags);
+       if (i915_gem_context_is_banned(request->ctx))
+               engine_skip_context(request);
 }
 
 void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
@@ -2788,12 +2778,16 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
 
 static void nop_submit_request(struct drm_i915_gem_request *request)
 {
+       dma_fence_set_error(&request->fence, -EIO);
        i915_gem_request_submit(request);
        intel_engine_init_global_seqno(request->engine, request->global_seqno);
 }
 
-static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
+static void engine_set_wedged(struct intel_engine_cs *engine)
 {
+       struct drm_i915_gem_request *request;
+       unsigned long flags;
+
        /* We need to be sure that no thread is running the old callback as
         * we install the nop handler (otherwise we would submit a request
         * to hardware that will never complete). In order to prevent this
@@ -2802,6 +2796,12 @@ static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
         */
        engine->submit_request = nop_submit_request;
 
+       /* Mark all executing requests as skipped */
+       spin_lock_irqsave(&engine->timeline->lock, flags);
+       list_for_each_entry(request, &engine->timeline->requests, link)
+               dma_fence_set_error(&request->fence, -EIO);
+       spin_unlock_irqrestore(&engine->timeline->lock, flags);
+
        /* Mark all pending requests as complete so that any concurrent
         * (lockless) lookup doesn't try and wait upon the request as we
         * reset it.
@@ -2837,7 +2837,7 @@ static int __i915_gem_set_wedged_BKL(void *data)
        enum intel_engine_id id;
 
        for_each_engine(engine, i915, id)
-               i915_gem_cleanup_engine(engine);
+               engine_set_wedged(engine);
 
        return 0;
 }
@@ -3397,7 +3397,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_caching *args = data;
        struct drm_i915_gem_object *obj;
        enum i915_cache_level level;
-       int ret;
+       int ret = 0;
 
        switch (args->caching) {
        case I915_CACHING_NONE:
@@ -3422,20 +3422,29 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       ret = i915_mutex_lock_interruptible(dev);
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj)
+               return -ENOENT;
+
+       if (obj->cache_level == level)
+               goto out;
+
+       ret = i915_gem_object_wait(obj,
+                                  I915_WAIT_INTERRUPTIBLE,
+                                  MAX_SCHEDULE_TIMEOUT,
+                                  to_rps_client(file));
        if (ret)
-               return ret;
+               goto out;
 
-       obj = i915_gem_object_lookup(file, args->handle);
-       if (!obj) {
-               ret = -ENOENT;
-               goto unlock;
-       }
+       ret = i915_mutex_lock_interruptible(dev);
+       if (ret)
+               goto out;
 
        ret = i915_gem_object_set_cache_level(obj, level);
-       i915_gem_object_put(obj);
-unlock:
        mutex_unlock(&dev->struct_mutex);
+
+out:
+       i915_gem_object_put(obj);
        return ret;
 }
 
@@ -3485,7 +3494,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
         * try to preserve the existing ABI).
         */
        vma = ERR_PTR(-ENOSPC);
-       if (view->type == I915_GGTT_VIEW_NORMAL)
+       if (!view || view->type == I915_GGTT_VIEW_NORMAL)
                vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
                                               PIN_MAPPABLE | PIN_NONBLOCK);
        if (IS_ERR(vma)) {
@@ -3544,11 +3553,10 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
                return;
 
        if (--vma->obj->pin_display == 0)
-               vma->display_alignment = 0;
+               vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
 
        /* Bump the LRU to try and avoid premature eviction whilst flipping  */
-       if (!i915_vma_is_active(vma))
-               list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+       i915_gem_object_bump_inactive_ggtt(vma->obj);
 
        i915_vma_unpin(vma);
 }
@@ -3679,8 +3687,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 
        lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-       vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
-       if (IS_ERR(vma))
+       vma = i915_vma_instance(obj, vm, view);
+       if (unlikely(IS_ERR(vma)))
                return vma;
 
        if (i915_vma_misplaced(vma, size, alignment, flags)) {
@@ -3689,10 +3697,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
                        return ERR_PTR(-ENOSPC);
 
                if (flags & PIN_MAPPABLE) {
-                       u32 fence_size;
-
-                       fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
-                                                           i915_gem_object_get_tiling(obj));
                        /* If the required space is larger than the available
                         * aperture, we will not able to find a slot for the
                         * object and unbinding the object now will be in
@@ -3700,7 +3704,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
                         * the object in and out of the Global GTT and
                         * waste a lot of cycles under the mutex.
                         */
-                       if (fence_size > dev_priv->ggtt.mappable_end)
+                       if (vma->fence_size > dev_priv->ggtt.mappable_end)
                                return ERR_PTR(-E2BIG);
 
                        /* If NONBLOCK is set the caller is optimistically
@@ -3719,7 +3723,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
                         * we could try to minimise harm to others.
                         */
                        if (flags & PIN_NONBLOCK &&
-                           fence_size > dev_priv->ggtt.mappable_end / 2)
+                           vma->fence_size > dev_priv->ggtt.mappable_end / 2)
                                return ERR_PTR(-ENOSPC);
                }
 
@@ -4193,7 +4197,8 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
        enum intel_engine_id id;
 
        for_each_engine(engine, dev_priv, id)
-               GEM_BUG_ON(!i915_gem_context_is_kernel(engine->last_retired_context));
+               GEM_BUG_ON(engine->last_retired_context &&
+                          !i915_gem_context_is_kernel(engine->last_retired_context));
 }
 
 int i915_gem_suspend(struct drm_i915_private *dev_priv)
index 40a6939e39565349bf90cca67edc512deab0c522..17f90c6182081c932652715ed34724f1c71b373d 100644 (file)
@@ -97,7 +97,7 @@
  * part. It should be safe to decrease this, but it's more future proof as is.
  */
 #define GEN6_CONTEXT_ALIGN (64<<10)
-#define GEN7_CONTEXT_ALIGN 4096
+#define GEN7_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
 
 static size_t get_context_alignment(struct drm_i915_private *dev_priv)
 {
@@ -205,27 +205,6 @@ alloc_context_obj(struct drm_i915_private *dev_priv, u64 size)
        return obj;
 }
 
-static void i915_ppgtt_close(struct i915_address_space *vm)
-{
-       struct list_head *phases[] = {
-               &vm->active_list,
-               &vm->inactive_list,
-               &vm->unbound_list,
-               NULL,
-       }, **phase;
-
-       GEM_BUG_ON(vm->closed);
-       vm->closed = true;
-
-       for (phase = phases; *phase; phase++) {
-               struct i915_vma *vma, *vn;
-
-               list_for_each_entry_safe(vma, vn, *phase, vm_link)
-                       if (!i915_vma_is_closed(vma))
-                               i915_vma_close(vma);
-       }
-}
-
 static void context_close(struct i915_gem_context *ctx)
 {
        i915_gem_context_set_closed(ctx);
@@ -290,7 +269,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
                        goto err_out;
                }
 
-               vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
                if (IS_ERR(vma)) {
                        i915_gem_object_put(obj);
                        ret = PTR_ERR(vma);
@@ -341,7 +320,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
        if (HAS_GUC(dev_priv) && i915.enable_guc_loading)
                ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
        else
-               ctx->ggtt_offset_bias = 4096;
+               ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
 
        return ctx;
 
@@ -456,7 +435,8 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
                dev_priv->hw_context_size = 0;
        } else if (HAS_HW_CONTEXTS(dev_priv)) {
                dev_priv->hw_context_size =
-                       round_up(get_context_size(dev_priv), 4096);
+                       round_up(get_context_size(dev_priv),
+                                I915_GTT_PAGE_SIZE);
                if (dev_priv->hw_context_size > (1<<20)) {
                        DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
                                         dev_priv->hw_context_size);
@@ -897,6 +877,26 @@ int i915_switch_context(struct drm_i915_gem_request *req)
        return do_rcs_switch(req);
 }
 
+static bool engine_has_kernel_context(struct intel_engine_cs *engine)
+{
+       struct i915_gem_timeline *timeline;
+
+       list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
+               struct intel_timeline *tl;
+
+               if (timeline == &engine->i915->gt.global_timeline)
+                       continue;
+
+               tl = &timeline->engine[engine->id];
+               if (i915_gem_active_peek(&tl->last_request,
+                                        &engine->i915->drm.struct_mutex))
+                       return false;
+       }
+
+       return (!engine->last_retired_context ||
+               i915_gem_context_is_kernel(engine->last_retired_context));
+}
+
 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 {
        struct intel_engine_cs *engine;
@@ -905,10 +905,15 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
+       i915_gem_retire_requests(dev_priv);
+
        for_each_engine(engine, dev_priv, id) {
                struct drm_i915_gem_request *req;
                int ret;
 
+               if (engine_has_kernel_context(engine))
+                       continue;
+
                req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
                if (IS_ERR(req))
                        return PTR_ERR(req);
index 026ebc5a452a2d76d2a8c29b37f725c23cbd5002..a43e44e18042d4ada2c62d65d1969fffb2838628 100644 (file)
@@ -231,7 +231,8 @@ found:
 
 /**
  * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
- * @target: address space and range to evict for
+ * @vm: address space to evict from
+ * @target: range (and color) to evict for
  * @flags: additional flags to control the eviction algorithm
  *
  * This function will try to evict vmas that overlap the target node.
@@ -239,18 +240,20 @@ found:
  * To clarify: This is for freeing up virtual address space, not for freeing
  * memory in e.g. the shrinker.
  */
-int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
+int i915_gem_evict_for_node(struct i915_address_space *vm,
+                           struct drm_mm_node *target,
+                           unsigned int flags)
 {
        LIST_HEAD(eviction_list);
        struct drm_mm_node *node;
-       u64 start = target->node.start;
-       u64 end = start + target->node.size;
+       u64 start = target->start;
+       u64 end = start + target->size;
        struct i915_vma *vma, *next;
        bool check_color;
        int ret = 0;
 
-       lockdep_assert_held(&target->vm->i915->drm.struct_mutex);
-       trace_i915_gem_evict_vma(target, flags);
+       lockdep_assert_held(&vm->i915->drm.struct_mutex);
+       trace_i915_gem_evict_node(vm, target, flags);
 
        /* Retire before we search the active list. Although we have
         * reasonable accuracy in our retirement lists, we may have
@@ -258,18 +261,18 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
         * retiring.
         */
        if (!(flags & PIN_NONBLOCK))
-               i915_gem_retire_requests(target->vm->i915);
+               i915_gem_retire_requests(vm->i915);
 
-       check_color = target->vm->mm.color_adjust;
+       check_color = vm->mm.color_adjust;
        if (check_color) {
                /* Expand search to cover neighbouring guard pages (or lack!) */
-               if (start > target->vm->start)
-                       start -= 4096;
-               if (end < target->vm->start + target->vm->total)
-                       end += 4096;
+               if (start > vm->start)
+                       start -= I915_GTT_PAGE_SIZE;
+               if (end < vm->start + vm->total)
+                       end += I915_GTT_PAGE_SIZE;
        }
 
-       drm_mm_for_each_node_in_range(node, &target->vm->mm, start, end) {
+       drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
                /* If we find any non-objects (!vma), we cannot evict them */
                if (node->color == I915_COLOR_UNEVICTABLE) {
                        ret = -ENOSPC;
@@ -285,12 +288,12 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
                 * those as well to make room for our guard pages.
                 */
                if (check_color) {
-                       if (vma->node.start + vma->node.size == target->node.start) {
-                               if (vma->node.color == target->node.color)
+                       if (vma->node.start + vma->node.size == node->start) {
+                               if (vma->node.color == node->color)
                                        continue;
                        }
-                       if (vma->node.start == target->node.start + target->node.size) {
-                               if (vma->node.color == target->node.color)
+                       if (vma->node.start == node->start + node->size) {
+                               if (vma->node.color == node->color)
                                        continue;
                        }
                }
@@ -302,7 +305,7 @@ int i915_gem_evict_for_vma(struct i915_vma *target, unsigned int flags)
                }
 
                /* Overlap of objects in the same batch? */
-               if (i915_vma_is_pinned(vma)) {
+               if (i915_vma_is_pinned(vma) || !list_empty(&vma->exec_list)) {
                        ret = -ENOSPC;
                        if (vma->exec_entry &&
                            vma->exec_entry->flags & EXEC_OBJECT_PINNED)
index a5fe299da1d366252d100b2829a1b854d784c37f..c66e90571031de456df9303818f10fcbf3e210c9 100644 (file)
@@ -184,7 +184,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
                 * from the (obj, vm) we don't run the risk of creating
                 * duplicated vmas for the same vm.
                 */
-               vma = i915_gem_obj_lookup_or_create_vma(obj, vm, NULL);
+               vma = i915_vma_instance(obj, vm, NULL);
                if (unlikely(IS_ERR(vma))) {
                        DRM_DEBUG("Failed to lookup VMA\n");
                        ret = PTR_ERR(vma);
@@ -438,7 +438,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
                        memset(&cache->node, 0, sizeof(cache->node));
                        ret = drm_mm_insert_node_in_range_generic
                                (&ggtt->base.mm, &cache->node,
-                                4096, 0, I915_COLOR_UNEVICTABLE,
+                                PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
                                 0, ggtt->mappable_end,
                                 DRM_MM_SEARCH_DEFAULT,
                                 DRM_MM_CREATE_DEFAULT);
@@ -851,8 +851,7 @@ eb_vma_misplaced(struct i915_vma *vma)
        WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
                !i915_vma_is_ggtt(vma));
 
-       if (entry->alignment &&
-           vma->node.start & (entry->alignment - 1))
+       if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
                return true;
 
        if (vma->node.size < entry->pad_to_size)
index 775059e19ab9d2b72fa42f09f3741f9a91cb7aad..fadbe8f4c74553363370b3dca6425429bf50bca6 100644 (file)
@@ -77,16 +77,17 @@ static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
 
        val = 0;
        if (vma) {
-               unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
-               bool is_y_tiled = tiling == I915_TILING_Y;
                unsigned int stride = i915_gem_object_get_stride(vma->obj);
-               u32 row_size = stride * (is_y_tiled ? 32 : 8);
-               u32 size = rounddown((u32)vma->node.size, row_size);
 
-               val = ((vma->node.start + size - 4096) & 0xfffff000) << 32;
-               val |= vma->node.start & 0xfffff000;
+               GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+               GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
+               GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
+               GEM_BUG_ON(!IS_ALIGNED(stride, 128));
+
+               val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
+               val |= vma->node.start;
                val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
-               if (is_y_tiled)
+               if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
                        val |= BIT(I965_FENCE_TILING_Y_SHIFT);
                val |= I965_FENCE_REG_VALID;
        }
@@ -122,31 +123,24 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
                unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
                bool is_y_tiled = tiling == I915_TILING_Y;
                unsigned int stride = i915_gem_object_get_stride(vma->obj);
-               int pitch_val;
-               int tile_width;
 
-               WARN((vma->node.start & ~I915_FENCE_START_MASK) ||
-                    !is_power_of_2(vma->node.size) ||
-                    (vma->node.start & (vma->node.size - 1)),
-                    "object 0x%08llx [fenceable? %d] not 1M or pot-size (0x%08llx) aligned\n",
-                    vma->node.start,
-                    i915_vma_is_map_and_fenceable(vma),
-                    vma->node.size);
+               GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+               GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
+               GEM_BUG_ON(!is_power_of_2(vma->fence_size));
+               GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
 
                if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
-                       tile_width = 128;
+                       stride /= 128;
                else
-                       tile_width = 512;
-
-               /* Note: pitch better be a power of two tile widths */
-               pitch_val = stride / tile_width;
-               pitch_val = ffs(pitch_val) - 1;
+                       stride /= 512;
+               GEM_BUG_ON(!is_power_of_2(stride));
 
                val = vma->node.start;
                if (is_y_tiled)
                        val |= BIT(I830_FENCE_TILING_Y_SHIFT);
-               val |= I915_FENCE_SIZE_BITS(vma->node.size);
-               val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+               val |= I915_FENCE_SIZE_BITS(vma->fence_size);
+               val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
+
                val |= I830_FENCE_REG_VALID;
        }
 
@@ -166,25 +160,19 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
 
        val = 0;
        if (vma) {
-               unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
-               bool is_y_tiled = tiling == I915_TILING_Y;
                unsigned int stride = i915_gem_object_get_stride(vma->obj);
-               u32 pitch_val;
 
-               WARN((vma->node.start & ~I830_FENCE_START_MASK) ||
-                    !is_power_of_2(vma->node.size) ||
-                    (vma->node.start & (vma->node.size - 1)),
-                    "object 0x%08llx not 512K or pot-size 0x%08llx aligned\n",
-                    vma->node.start, vma->node.size);
-
-               pitch_val = stride / 128;
-               pitch_val = ffs(pitch_val) - 1;
+               GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+               GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
+               GEM_BUG_ON(!is_power_of_2(vma->fence_size));
+               GEM_BUG_ON(!is_power_of_2(stride / 128));
+               GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
 
                val = vma->node.start;
-               if (is_y_tiled)
+               if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
                        val |= BIT(I830_FENCE_TILING_Y_SHIFT);
-               val |= I830_FENCE_SIZE_BITS(vma->node.size);
-               val |= pitch_val << I830_FENCE_PITCH_SHIFT;
+               val |= I830_FENCE_SIZE_BITS(vma->fence_size);
+               val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
                val |= I830_FENCE_REG_VALID;
        }
 
index 22c4a2d01adfdaa5e66ff71e6533503000119ec1..99a31ded4dfdfcc8eac68c390be31a76df27feee 100644 (file)
@@ -30,6 +30,8 @@
 struct drm_i915_private;
 struct i915_vma;
 
+#define I965_FENCE_PAGE 4096UL
+
 struct drm_i915_fence_reg {
        struct list_head link;
        struct drm_i915_private *i915;
index f698006fe8836abe5f3537e6b0690bb3c0f51673..e808aad203d82f7e0f48fc8667d0fa6b456a21db 100644 (file)
  *
  */
 
+#include <linux/log2.h>
+#include <linux/random.h>
 #include <linux/seq_file.h>
 #include <linux/stop_machine.h>
+
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
+
 #include "i915_drv.h"
 #include "i915_vgpu.h"
 #include "i915_trace.h"
 static int
 i915_get_ggtt_vma_pages(struct i915_vma *vma);
 
-const struct i915_ggtt_view i915_ggtt_view_normal = {
-       .type = I915_GGTT_VIEW_NORMAL,
-};
-const struct i915_ggtt_view i915_ggtt_view_rotated = {
-       .type = I915_GGTT_VIEW_ROTATED,
-};
+static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
+{
+       /* Note that as an uncached mmio write, this should flush the
+        * WCB of the writes into the GGTT before it triggers the invalidate.
+        */
+       I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+}
+
+static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
+{
+       gen6_ggtt_invalidate(dev_priv);
+       I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+}
+
+static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
+{
+       intel_gtt_chipset_flush();
+}
+
+static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
+{
+       i915->ggtt.invalidate(i915);
+}
 
 int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
                                int enable_ppgtt)
@@ -329,7 +350,7 @@ static int __setup_page_dma(struct drm_i915_private *dev_priv,
                return -ENOMEM;
 
        p->daddr = dma_map_page(kdev,
-                               p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
+                               p->page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 
        if (dma_mapping_error(kdev, p->daddr)) {
                __free_page(p->page);
@@ -353,7 +374,7 @@ static void cleanup_page_dma(struct drm_i915_private *dev_priv,
        if (WARN_ON(!p->page))
                return;
 
-       dma_unmap_page(&pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+       dma_unmap_page(&pdev->dev, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
        __free_page(p->page);
        memset(p, 0, sizeof(*p));
 }
@@ -626,10 +647,10 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
 }
 
 static void
-gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
-                         struct i915_page_directory_pointer *pdp,
-                         struct i915_page_directory *pd,
-                         int index)
+gen8_setup_pdpe(struct i915_hw_ppgtt *ppgtt,
+               struct i915_page_directory_pointer *pdp,
+               struct i915_page_directory *pd,
+               int index)
 {
        gen8_ppgtt_pdpe_t *page_directorypo;
 
@@ -642,10 +663,10 @@ gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
 }
 
 static void
-gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
-                                 struct i915_pml4 *pml4,
-                                 struct i915_page_directory_pointer *pdp,
-                                 int index)
+gen8_setup_pml4e(struct i915_hw_ppgtt *ppgtt,
+                struct i915_pml4 *pml4,
+                struct i915_page_directory_pointer *pdp,
+                int index)
 {
        gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
 
@@ -793,9 +814,6 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct i915_page_directory *pd;
        uint64_t pdpe;
-       gen8_ppgtt_pdpe_t *pdpe_vaddr;
-       gen8_ppgtt_pdpe_t scratch_pdpe =
-               gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
 
        gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
                if (WARN_ON(!pdp->page_directory[pdpe]))
@@ -803,11 +821,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
 
                if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
                        __clear_bit(pdpe, pdp->used_pdpes);
-                       if (USES_FULL_48BIT_PPGTT(dev_priv)) {
-                               pdpe_vaddr = kmap_px(pdp);
-                               pdpe_vaddr[pdpe] = scratch_pdpe;
-                               kunmap_px(ppgtt, pdpe_vaddr);
-                       }
+                       gen8_setup_pdpe(ppgtt, pdp, vm->scratch_pd, pdpe);
                        free_pd(vm->i915, pd);
                }
        }
@@ -832,9 +846,6 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
        struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
        struct i915_page_directory_pointer *pdp;
        uint64_t pml4e;
-       gen8_ppgtt_pml4e_t *pml4e_vaddr;
-       gen8_ppgtt_pml4e_t scratch_pml4e =
-               gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC);
 
        GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->i915));
 
@@ -844,9 +855,7 @@ static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
 
                if (gen8_ppgtt_clear_pdp(vm, pdp, start, length)) {
                        __clear_bit(pml4e, pml4->used_pml4es);
-                       pml4e_vaddr = kmap_px(pml4);
-                       pml4e_vaddr[pml4e] = scratch_pml4e;
-                       kunmap_px(ppgtt, pml4e_vaddr);
+                       gen8_setup_pml4e(ppgtt, pml4, vm->scratch_pdp, pml4e);
                        free_pdp(vm->i915, pdp);
                }
        }
@@ -1366,7 +1375,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
 
                kunmap_px(ppgtt, page_directory);
                __set_bit(pdpe, pdp->used_pdpes);
-               gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
+               gen8_setup_pdpe(ppgtt, pdp, pd, pdpe);
        }
 
        free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
@@ -1425,7 +1434,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
                if (ret)
                        goto err_out;
 
-               gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
+               gen8_setup_pml4e(ppgtt, pml4, pdp, pml4e);
        }
 
        bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
@@ -2044,7 +2053,6 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
        struct i915_address_space *vm = &ppgtt->base;
        struct drm_i915_private *dev_priv = ppgtt->base.i915;
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       bool retried = false;
        int ret;
 
        /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
@@ -2057,29 +2065,14 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
        if (ret)
                return ret;
 
-alloc:
-       ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, &ppgtt->node,
-                                                 GEN6_PD_SIZE, GEN6_PD_ALIGN,
-                                                 I915_COLOR_UNEVICTABLE,
-                                                 0, ggtt->base.total,
-                                                 DRM_MM_TOPDOWN);
-       if (ret == -ENOSPC && !retried) {
-               ret = i915_gem_evict_something(&ggtt->base,
-                                              GEN6_PD_SIZE, GEN6_PD_ALIGN,
-                                              I915_COLOR_UNEVICTABLE,
-                                              0, ggtt->base.total,
-                                              0);
-               if (ret)
-                       goto err_out;
-
-               retried = true;
-               goto alloc;
-       }
-
+       ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
+                                 GEN6_PD_SIZE, GEN6_PD_ALIGN,
+                                 I915_COLOR_UNEVICTABLE,
+                                 0, ggtt->base.total,
+                                 PIN_HIGH);
        if (ret)
                goto err_out;
 
-
        if (ppgtt->node.start < ggtt->mappable_end)
                DRM_DEBUG("Forced to use aperture for PDEs\n");
 
@@ -2267,6 +2260,27 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
        return ppgtt;
 }
 
+void i915_ppgtt_close(struct i915_address_space *vm)
+{
+       struct list_head *phases[] = {
+               &vm->active_list,
+               &vm->inactive_list,
+               &vm->unbound_list,
+               NULL,
+       }, **phase;
+
+       GEM_BUG_ON(vm->closed);
+       vm->closed = true;
+
+       for (phase = phases; *phase; phase++) {
+               struct i915_vma *vma, *vn;
+
+               list_for_each_entry_safe(vma, vn, *phase, vm_link)
+                       if (!i915_vma_is_closed(vma))
+                               i915_vma_close(vma);
+       }
+}
+
 void i915_ppgtt_release(struct kref *kref)
 {
        struct i915_hw_ppgtt *ppgtt =
@@ -2331,16 +2345,6 @@ void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
                POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
 }
 
-static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
-{
-       if (INTEL_INFO(dev_priv)->gen < 6) {
-               intel_gtt_chipset_flush();
-       } else {
-               I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-               POSTING_READ(GFX_FLSH_CNTL_GEN6);
-       }
-}
-
 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
 {
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
@@ -2355,7 +2359,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
 
        ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
 
-       i915_ggtt_flush(dev_priv);
+       i915_ggtt_invalidate(dev_priv);
 }
 
 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
@@ -2394,15 +2398,13 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
                                  enum i915_cache_level level,
                                  u32 unused)
 {
-       struct drm_i915_private *dev_priv = vm->i915;
+       struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
        gen8_pte_t __iomem *pte =
-               (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
-               (offset >> PAGE_SHIFT);
+               (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
 
        gen8_set_pte(pte, gen8_pte_encode(addr, level));
 
-       I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-       POSTING_READ(GFX_FLSH_CNTL_GEN6);
+       ggtt->invalidate(vm->i915);
 }
 
 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
@@ -2410,7 +2412,6 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
                                     uint64_t start,
                                     enum i915_cache_level level, u32 unused)
 {
-       struct drm_i915_private *dev_priv = vm->i915;
        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
        struct sgt_iter sgt_iter;
        gen8_pte_t __iomem *gtt_entries;
@@ -2439,8 +2440,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
         * want to flush the TLBs only after we're certain all the PTE updates
         * have finished.
         */
-       I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-       POSTING_READ(GFX_FLSH_CNTL_GEN6);
+       ggtt->invalidate(vm->i915);
 }
 
 struct insert_entries {
@@ -2475,15 +2475,13 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
                                  enum i915_cache_level level,
                                  u32 flags)
 {
-       struct drm_i915_private *dev_priv = vm->i915;
+       struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
        gen6_pte_t __iomem *pte =
-               (gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
-               (offset >> PAGE_SHIFT);
+               (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
 
        iowrite32(vm->pte_encode(addr, level, flags), pte);
 
-       I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-       POSTING_READ(GFX_FLSH_CNTL_GEN6);
+       ggtt->invalidate(vm->i915);
 }
 
 /*
@@ -2497,7 +2495,6 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
                                     uint64_t start,
                                     enum i915_cache_level level, u32 flags)
 {
-       struct drm_i915_private *dev_priv = vm->i915;
        struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
        struct sgt_iter sgt_iter;
        gen6_pte_t __iomem *gtt_entries;
@@ -2525,8 +2522,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
         * want to flush the TLBs only after we're certain all the PTE updates
         * have finished.
         */
-       I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-       POSTING_READ(GFX_FLSH_CNTL_GEN6);
+       ggtt->invalidate(vm->i915);
 }
 
 static void nop_clear_range(struct i915_address_space *vm,
@@ -2723,11 +2719,11 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node,
                                  u64 *end)
 {
        if (node->color != color)
-               *start += 4096;
+               *start += I915_GTT_PAGE_SIZE;
 
        node = list_next_entry(node, node_list);
        if (node->allocated && node->color != color)
-               *end -= 4096;
+               *end -= I915_GTT_PAGE_SIZE;
 }
 
 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
@@ -2754,7 +2750,7 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
        /* Reserve a mappable slot for our lockless error capture */
        ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
                                                  &ggtt->error_capture,
-                                                 4096, 0,
+                                                 PAGE_SIZE, 0,
                                                  I915_COLOR_UNEVICTABLE,
                                                  0, ggtt->mappable_end,
                                                  0, 0);
@@ -3086,6 +3082,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
        if (IS_CHERRYVIEW(dev_priv))
                ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
 
+       ggtt->invalidate = gen6_ggtt_invalidate;
+
        return ggtt_probe_common(ggtt, size);
 }
 
@@ -3123,6 +3121,8 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
        ggtt->base.unbind_vma = ggtt_unbind_vma;
        ggtt->base.cleanup = gen6_gmch_remove;
 
+       ggtt->invalidate = gen6_ggtt_invalidate;
+
        if (HAS_EDRAM(dev_priv))
                ggtt->base.pte_encode = iris_pte_encode;
        else if (IS_HASWELL(dev_priv))
@@ -3166,6 +3166,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
        ggtt->base.unbind_vma = ggtt_unbind_vma;
        ggtt->base.cleanup = i915_gmch_remove;
 
+       ggtt->invalidate = gmch_ggtt_invalidate;
+
        if (unlikely(ggtt->do_idle_maps))
                DRM_INFO("applying Ironlake quirks for intel_iommu\n");
 
@@ -3284,6 +3286,16 @@ int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
        return 0;
 }
 
+void i915_ggtt_enable_guc(struct drm_i915_private *i915)
+{
+       i915->ggtt.invalidate = guc_ggtt_invalidate;
+}
+
+void i915_ggtt_disable_guc(struct drm_i915_private *i915)
+{
+       i915->ggtt.invalidate = gen6_ggtt_invalidate;
+}
+
 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
 {
        struct i915_ggtt *ggtt = &dev_priv->ggtt;
@@ -3347,52 +3359,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
                }
        }
 
-       i915_ggtt_flush(dev_priv);
-}
-
-struct i915_vma *
-i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
-                   struct i915_address_space *vm,
-                   const struct i915_ggtt_view *view)
-{
-       struct rb_node *rb;
-
-       rb = obj->vma_tree.rb_node;
-       while (rb) {
-               struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
-               long cmp;
-
-               cmp = i915_vma_compare(vma, vm, view);
-               if (cmp == 0)
-                       return vma;
-
-               if (cmp < 0)
-                       rb = rb->rb_right;
-               else
-                       rb = rb->rb_left;
-       }
-
-       return NULL;
-}
-
-struct i915_vma *
-i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
-                                 struct i915_address_space *vm,
-                                 const struct i915_ggtt_view *view)
-{
-       struct i915_vma *vma;
-
-       lockdep_assert_held(&obj->base.dev->struct_mutex);
-       GEM_BUG_ON(view && !i915_is_ggtt(vm));
-
-       vma = i915_gem_obj_to_vma(obj, vm, view);
-       if (!vma) {
-               vma = i915_vma_create(obj, vm, view);
-               GEM_BUG_ON(vma != i915_gem_obj_to_vma(obj, vm, view));
-       }
-
-       GEM_BUG_ON(i915_vma_is_closed(vma));
-       return vma;
+       i915_ggtt_invalidate(dev_priv);
 }
 
 static struct scatterlist *
@@ -3492,7 +3459,7 @@ intel_partial_pages(const struct i915_ggtt_view *view,
 {
        struct sg_table *st;
        struct scatterlist *sg, *iter;
-       unsigned int count = view->params.partial.size;
+       unsigned int count = view->partial.size;
        unsigned int offset;
        int ret = -ENOMEM;
 
@@ -3504,9 +3471,7 @@ intel_partial_pages(const struct i915_ggtt_view *view,
        if (ret)
                goto err_sg_alloc;
 
-       iter = i915_gem_object_get_sg(obj,
-                                     view->params.partial.offset,
-                                     &offset);
+       iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
        GEM_BUG_ON(!iter);
 
        sg = st->sgl;
@@ -3558,7 +3523,8 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
                vma->pages = vma->obj->mm.pages;
        else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
                vma->pages =
-                       intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
+                       intel_rotate_fb_obj_pages(&vma->ggtt_view.rotated,
+                                                 vma->obj);
        else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
                vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
        else
@@ -3579,3 +3545,207 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
        return ret;
 }
 
+/**
+ * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
+ * @vm: the &struct i915_address_space
+ * @node: the &struct drm_mm_node (typically i915_vma.mode)
+ * @size: how much space to allocate inside the GTT,
+ *        must be #I915_GTT_PAGE_SIZE aligned
+ * @offset: where to insert inside the GTT,
+ *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
+ *          (@offset + @size) must fit within the address space
+ * @color: color to apply to node, if this node is not from a VMA,
+ *         color must be #I915_COLOR_UNEVICTABLE
+ * @flags: control search and eviction behaviour
+ *
+ * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
+ * the address space (using @size and @color). If the @node does not fit, it
+ * tries to evict any overlapping nodes from the GTT, including any
+ * neighbouring nodes if the colors do not match (to ensure guard pages between
+ * differing domains). See i915_gem_evict_for_node() for the gory details
+ * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
+ * evicting active overlapping objects, and any overlapping node that is pinned
+ * or marked as unevictable will also result in failure.
+ *
+ * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
+ * asked to wait for eviction and interrupted.
+ */
+int i915_gem_gtt_reserve(struct i915_address_space *vm,
+                        struct drm_mm_node *node,
+                        u64 size, u64 offset, unsigned long color,
+                        unsigned int flags)
+{
+       int err;
+
+       GEM_BUG_ON(!size);
+       GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+       GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
+       GEM_BUG_ON(range_overflows(offset, size, vm->total));
+       GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+       GEM_BUG_ON(drm_mm_node_allocated(node));
+
+       node->size = size;
+       node->start = offset;
+       node->color = color;
+
+       err = drm_mm_reserve_node(&vm->mm, node);
+       if (err != -ENOSPC)
+               return err;
+
+       err = i915_gem_evict_for_node(vm, node, flags);
+       if (err == 0)
+               err = drm_mm_reserve_node(&vm->mm, node);
+
+       return err;
+}
+
+static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
+{
+       u64 range, addr;
+
+       GEM_BUG_ON(range_overflows(start, len, end));
+       GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
+
+       range = round_down(end - len, align) - round_up(start, align);
+       if (range) {
+               if (sizeof(unsigned long) == sizeof(u64)) {
+                       addr = get_random_long();
+               } else {
+                       addr = get_random_int();
+                       if (range > U32_MAX) {
+                               addr <<= 32;
+                               addr |= get_random_int();
+                       }
+               }
+               div64_u64_rem(addr, range, &addr);
+               start += addr;
+       }
+
+       return round_up(start, align);
+}
+
+/**
+ * i915_gem_gtt_insert - insert a node into an address_space (GTT)
+ * @vm: the &struct i915_address_space
+ * @node: the &struct drm_mm_node (typically i915_vma.node)
+ * @size: how much space to allocate inside the GTT,
+ *        must be #I915_GTT_PAGE_SIZE aligned
+ * @alignment: required alignment of starting offset, may be 0 but
+ *             if specified, this must be a power-of-two and at least
+ *             #I915_GTT_MIN_ALIGNMENT
+ * @color: color to apply to node
+ * @start: start of any range restriction inside GTT (0 for all),
+ *         must be #I915_GTT_PAGE_SIZE aligned
+ * @end: end of any range restriction inside GTT (U64_MAX for all),
+ *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
+ * @flags: control search and eviction behaviour
+ *
+ * i915_gem_gtt_insert() first searches for an available hole into which
+ * is can insert the node. The hole address is aligned to @alignment and
+ * its @size must then fit entirely within the [@start, @end] bounds. The
+ * nodes on either side of the hole must match @color, or else a guard page
+ * will be inserted between the two nodes (or the node evicted). If no
+ * suitable hole is found, first a victim is randomly selected and tested
+ * for eviction, otherwise then the LRU list of objects within the GTT
+ * is scanned to find the first set of replacement nodes to create the hole.
+ * Those old overlapping nodes are evicted from the GTT (and so must be
+ * rebound before any future use). Any node that is currently pinned cannot
+ * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
+ * active and #PIN_NONBLOCK is specified, that node is also skipped when
+ * searching for an eviction candidate. See i915_gem_evict_something() for
+ * the gory details on the eviction algorithm.
+ *
+ * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
+ * asked to wait for eviction and interrupted.
+ */
+int i915_gem_gtt_insert(struct i915_address_space *vm,
+                       struct drm_mm_node *node,
+                       u64 size, u64 alignment, unsigned long color,
+                       u64 start, u64 end, unsigned int flags)
+{
+       u32 search_flag, alloc_flag;
+       u64 offset;
+       int err;
+
+       lockdep_assert_held(&vm->i915->drm.struct_mutex);
+       GEM_BUG_ON(!size);
+       GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+       GEM_BUG_ON(alignment && !is_power_of_2(alignment));
+       GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
+       GEM_BUG_ON(start >= end);
+       GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
+       GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
+       GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+       GEM_BUG_ON(drm_mm_node_allocated(node));
+
+       if (unlikely(range_overflows(start, size, end)))
+               return -ENOSPC;
+
+       if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
+               return -ENOSPC;
+
+       if (flags & PIN_HIGH) {
+               search_flag = DRM_MM_SEARCH_BELOW;
+               alloc_flag = DRM_MM_CREATE_TOP;
+       } else {
+               search_flag = DRM_MM_SEARCH_DEFAULT;
+               alloc_flag = DRM_MM_CREATE_DEFAULT;
+       }
+
+       /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
+        * so we know that we always have a minimum alignment of 4096.
+        * The drm_mm range manager is optimised to return results
+        * with zero alignment, so where possible use the optimal
+        * path.
+        */
+       BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
+       if (alignment <= I915_GTT_MIN_ALIGNMENT)
+               alignment = 0;
+
+       err = drm_mm_insert_node_in_range_generic(&vm->mm, node,
+                                                 size, alignment, color,
+                                                 start, end,
+                                                 search_flag, alloc_flag);
+       if (err != -ENOSPC)
+               return err;
+
+       /* No free space, pick a slot at random.
+        *
+        * There is a pathological case here using a GTT shared between
+        * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
+        *
+        *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
+        *         (64k objects)             (448k objects)
+        *
+        * Now imagine that the eviction LRU is ordered top-down (just because
+        * pathology meets real life), and that we need to evict an object to
+        * make room inside the aperture. The eviction scan then has to walk
+        * the 448k list before it finds one within range. And now imagine that
+        * it has to search for a new hole between every byte inside the memcpy,
+        * for several simultaneous clients.
+        *
+        * On a full-ppgtt system, if we have run out of available space, there
+        * will be lots and lots of objects in the eviction list! Again,
+        * searching that LRU list may be slow if we are also applying any
+        * range restrictions (e.g. restriction to low 4GiB) and so, for
+        * simplicity and similarilty between different GTT, try the single
+        * random replacement first.
+        */
+       offset = random_offset(start, end,
+                              size, alignment ?: I915_GTT_MIN_ALIGNMENT);
+       err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
+       if (err != -ENOSPC)
+               return err;
+
+       /* Randomly selected placement is pinned, do a search */
+       err = i915_gem_evict_something(vm, size, alignment, color,
+                                      start, end, flags);
+       if (err)
+               return err;
+
+       search_flag = DRM_MM_SEARCH_DEFAULT;
+       return drm_mm_insert_node_in_range_generic(&vm->mm, node,
+                                                  size, alignment, color,
+                                                  start, end,
+                                                  search_flag, alloc_flag);
+}
index 9e91d7e6149c7874898ed70b16e272b8a0149487..3c5ef5358cef7f943cd2acf8e14c35e8c8d1fb3e 100644 (file)
@@ -40,6 +40,9 @@
 #include "i915_gem_timeline.h"
 #include "i915_gem_request.h"
 
+#define I915_GTT_PAGE_SIZE 4096UL
+#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
+
 #define I915_FENCE_REG_NONE -1
 #define I915_MAX_NUM_FENCES 32
 /* 32 fences + sign bit for FENCE_REG_NONE */
@@ -142,34 +145,57 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
 
 struct sg_table;
 
-enum i915_ggtt_view_type {
-       I915_GGTT_VIEW_NORMAL = 0,
-       I915_GGTT_VIEW_ROTATED,
-       I915_GGTT_VIEW_PARTIAL,
-};
-
 struct intel_rotation_info {
-       struct {
+       struct intel_rotation_plane_info {
                /* tiles */
                unsigned int width, height, stride, offset;
        } plane[2];
+} __packed;
+
+static inline void assert_intel_rotation_info_is_packed(void)
+{
+       BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
+}
+
+struct intel_partial_info {
+       u64 offset;
+       unsigned int size;
+} __packed;
+
+static inline void assert_intel_partial_info_is_packed(void)
+{
+       BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
+}
+
+enum i915_ggtt_view_type {
+       I915_GGTT_VIEW_NORMAL = 0,
+       I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
+       I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
 };
 
+static inline void assert_i915_ggtt_view_type_is_unique(void)
+{
+       /* As we encode the size of each branch inside the union into its type,
+        * we have to be careful that each branch has a unique size.
+        */
+       switch ((enum i915_ggtt_view_type)0) {
+       case I915_GGTT_VIEW_NORMAL:
+       case I915_GGTT_VIEW_PARTIAL:
+       case I915_GGTT_VIEW_ROTATED:
+               /* gcc complains if these are identical cases */
+               break;
+       }
+}
+
 struct i915_ggtt_view {
        enum i915_ggtt_view_type type;
-
        union {
-               struct {
-                       u64 offset;
-                       unsigned int size;
-               } partial;
+               /* Members need to contain no holes/padding */
+               struct intel_partial_info partial;
                struct intel_rotation_info rotated;
-       } params;
+       };
 };
 
-extern const struct i915_ggtt_view i915_ggtt_view_normal;
-extern const struct i915_ggtt_view i915_ggtt_view_rotated;
-
 enum i915_cache_level;
 
 struct i915_vma;
@@ -333,6 +359,7 @@ struct i915_ggtt {
 
        /** "Graphics Stolen Memory" holds the global PTEs */
        void __iomem *gsm;
+       void (*invalidate)(struct drm_i915_private *dev_priv);
 
        bool do_idle_maps;
 
@@ -501,6 +528,8 @@ i915_vm_to_ggtt(struct i915_address_space *vm)
 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
+void i915_ggtt_enable_guc(struct drm_i915_private *i915);
+void i915_ggtt_disable_guc(struct drm_i915_private *i915);
 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
 void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
 
@@ -509,6 +538,7 @@ void i915_ppgtt_release(struct kref *kref);
 struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
                                        struct drm_i915_file_private *fpriv,
                                        const char *name);
+void i915_ppgtt_close(struct i915_address_space *vm);
 static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
 {
        if (ppgtt)
@@ -529,6 +559,16 @@ int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
                               struct sg_table *pages);
 
+int i915_gem_gtt_reserve(struct i915_address_space *vm,
+                        struct drm_mm_node *node,
+                        u64 size, u64 offset, unsigned long color,
+                        unsigned int flags);
+
+int i915_gem_gtt_insert(struct i915_address_space *vm,
+                       struct drm_mm_node *node,
+                       u64 size, u64 alignment, unsigned long color,
+                       u64 start, u64 end, unsigned int flags);
+
 /* Flags used by pin/bind&friends. */
 #define PIN_NONBLOCK           BIT(0)
 #define PIN_MAPPABLE           BIT(1)
@@ -543,6 +583,6 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
 #define PIN_HIGH               BIT(9)
 #define PIN_OFFSET_BIAS                BIT(10)
 #define PIN_OFFSET_FIXED       BIT(11)
-#define PIN_OFFSET_MASK                (~4095)
+#define PIN_OFFSET_MASK                (-I915_GTT_PAGE_SIZE)
 
 #endif
index 2222863e505fd67b970e2a70005565ef01673498..17ce53d0d092fbeb60a8a52d7839887613490de2 100644 (file)
@@ -151,10 +151,16 @@ static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
  */
 struct drm_i915_gem_object *
 i915_gem_object_create_internal(struct drm_i915_private *i915,
-                               unsigned int size)
+                               phys_addr_t size)
 {
        struct drm_i915_gem_object *obj;
 
+       GEM_BUG_ON(!size);
+       GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+       if (overflows_type(size, obj->base.size))
+               return ERR_PTR(-E2BIG);
+
        obj = i915_gem_object_alloc(i915);
        if (!obj)
                return ERR_PTR(-ENOMEM);
index 6a368de9d81e7be2d6e775b3cca7c7187e6a4c88..290eaa7fc9eb9243524b4be6818242fe5a47715c 100644 (file)
@@ -317,6 +317,29 @@ i915_gem_object_get_stride(struct drm_i915_gem_object *obj)
        return obj->tiling_and_stride & STRIDE_MASK;
 }
 
+static inline unsigned int
+i915_gem_tile_height(unsigned int tiling)
+{
+       GEM_BUG_ON(!tiling);
+       return tiling == I915_TILING_Y ? 32 : 8;
+}
+
+static inline unsigned int
+i915_gem_object_get_tile_height(struct drm_i915_gem_object *obj)
+{
+       return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
+}
+
+static inline unsigned int
+i915_gem_object_get_tile_row_size(struct drm_i915_gem_object *obj)
+{
+       return (i915_gem_object_get_stride(obj) *
+               i915_gem_object_get_tile_height(obj));
+}
+
+int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
+                              unsigned int tiling, unsigned int stride);
+
 static inline struct intel_engine_cs *
 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
 {
index 5af19b0bf71366e7471fa5805104a5e47839c629..b42c81b42487aabe43961770772219f6bf0e31f8 100644 (file)
@@ -187,20 +187,20 @@ int i915_gem_render_state_init(struct intel_engine_cs *engine)
        if (!rodata)
                return 0;
 
-       if (rodata->batch_items * 4 > 4096)
+       if (rodata->batch_items * 4 > PAGE_SIZE)
                return -EINVAL;
 
        so = kmalloc(sizeof(*so), GFP_KERNEL);
        if (!so)
                return -ENOMEM;
 
-       obj = i915_gem_object_create_internal(engine->i915, 4096);
+       obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
        if (IS_ERR(obj)) {
                ret = PTR_ERR(obj);
                goto err_free;
        }
 
-       so->vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+       so->vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
        if (IS_ERR(so->vma)) {
                ret = PTR_ERR(so->vma);
                goto err_obj;
index 99056b948edabe5d1945573a192727fd56af5454..72b7f7d9461def216a4283267de74d32384bef43 100644 (file)
@@ -307,26 +307,6 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
        } while (tmp != req);
 }
 
-static int i915_gem_check_wedge(struct drm_i915_private *dev_priv)
-{
-       struct i915_gpu_error *error = &dev_priv->gpu_error;
-
-       if (i915_terminally_wedged(error))
-               return -EIO;
-
-       if (i915_reset_in_progress(error)) {
-               /* Non-interruptible callers can't handle -EAGAIN, hence return
-                * -EIO unconditionally for these.
-                */
-               if (!dev_priv->mm.interruptible)
-                       return -EIO;
-
-               return -EAGAIN;
-       }
-
-       return 0;
-}
-
 static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
 {
        struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
@@ -521,12 +501,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
        /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
-        * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
-        * and restart.
+        * EIO if the GPU is already wedged.
         */
-       ret = i915_gem_check_wedge(dev_priv);
-       if (ret)
-               return ERR_PTR(ret);
+       if (i915_terminally_wedged(&dev_priv->gpu_error))
+               return ERR_PTR(-EIO);
 
        /* Pinning the contexts may generate requests in order to acquire
         * GGTT space, so do this first before we reserve a seqno for
@@ -851,6 +829,13 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
        lockdep_assert_held(&request->i915->drm.struct_mutex);
        trace_i915_gem_request_add(request);
 
+       /* Make sure that no request gazumped us - if it was allocated after
+        * our i915_gem_request_alloc() and called __i915_add_request() before
+        * us, the timeline will hold its seqno which is later than ours.
+        */
+       GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
+                                    request->fence.seqno));
+
        /*
         * To ensure that this call will not fail, space for its emissions
         * should already have been reserved in the ring buffer. Let the ring
index f1a1d33febcdf56f08c75a6e325ee9e012c3a8ba..127d698e7c848fb9deca09670d42df8724aa1f93 100644 (file)
@@ -647,8 +647,9 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
                        stolen_offset, gtt_offset, size);
 
        /* KISS and expect everything to be page-aligned */
-       if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
-           WARN_ON(stolen_offset & 4095))
+       if (WARN_ON(size == 0) ||
+           WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
+           WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
                return NULL;
 
        stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
@@ -682,7 +683,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
        if (ret)
                goto err;
 
-       vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base, NULL);
+       vma = i915_vma_instance(obj, &ggtt->base, NULL);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err_pages;
@@ -693,15 +694,16 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
         * setting up the GTT space. The actual reservation will occur
         * later.
         */
-       vma->node.start = gtt_offset;
-       vma->node.size = size;
-
-       ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
+       ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
+                                  size, gtt_offset, obj->cache_level,
+                                  0);
        if (ret) {
                DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
                goto err_pages;
        }
 
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
        vma->pages = obj->mm.pages;
        vma->flags |= I915_VMA_GLOBAL_BIND;
        __i915_vma_set_map_and_fenceable(vma);
index 62ad375de6cac9eeb098355939405350e406364a..b1361cfd4c5c2e0d6fb7cb1b34c33e87036e0a24 100644 (file)
@@ -34,8 +34,8 @@
 /**
  * DOC: buffer object tiling
  *
- * i915_gem_set_tiling() and i915_gem_get_tiling() is the userspace interface to
- * declare fence register requirements.
+ * i915_gem_set_tiling_ioctl() and i915_gem_get_tiling_ioctl() is the userspace
+ * interface to declare fence register requirements.
  *
  * In principle GEM doesn't care at all about the internal data layout of an
  * object, and hence it also doesn't care about tiling or swizzling. There's two
  * invovlement.
  */
 
+/**
+ * i915_gem_fence_size - required global GTT size for a fence
+ * @i915: i915 device
+ * @size: object size
+ * @tiling: tiling mode
+ * @stride: tiling stride
+ *
+ * Return the required global GTT size for a fence (view of a tiled object),
+ * taking into account potential fence register mapping.
+ */
+u32 i915_gem_fence_size(struct drm_i915_private *i915,
+                       u32 size, unsigned int tiling, unsigned int stride)
+{
+       u32 ggtt_size;
+
+       GEM_BUG_ON(!size);
+
+       if (tiling == I915_TILING_NONE)
+               return size;
+
+       GEM_BUG_ON(!stride);
+
+       if (INTEL_GEN(i915) >= 4) {
+               stride *= i915_gem_tile_height(tiling);
+               GEM_BUG_ON(!IS_ALIGNED(stride, I965_FENCE_PAGE));
+               return roundup(size, stride);
+       }
+
+       /* Previous chips need a power-of-two fence region when tiling */
+       if (IS_GEN3(i915))
+               ggtt_size = 1024*1024;
+       else
+               ggtt_size = 512*1024;
+
+       while (ggtt_size < size)
+               ggtt_size <<= 1;
+
+       return ggtt_size;
+}
+
+/**
+ * i915_gem_fence_alignment - required global GTT alignment for a fence
+ * @i915: i915 device
+ * @size: object size
+ * @tiling: tiling mode
+ * @stride: tiling stride
+ *
+ * Return the required global GTT alignment for a fence (a view of a tiled
+ * object), taking into account potential fence register mapping.
+ */
+u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size,
+                            unsigned int tiling, unsigned int stride)
+{
+       GEM_BUG_ON(!size);
+
+       /*
+        * Minimum alignment is 4k (GTT page size), but might be greater
+        * if a fence register is needed for the object.
+        */
+       if (tiling == I915_TILING_NONE)
+               return I915_GTT_MIN_ALIGNMENT;
+
+       if (INTEL_GEN(i915) >= 4)
+               return I965_FENCE_PAGE;
+
+       /*
+        * Previous chips need to be aligned to the size of the smallest
+        * fence register that can contain the object.
+        */
+       return i915_gem_fence_size(i915, size, tiling, stride);
+}
+
 /* Check pitch constriants for all chips & tiling formats */
 static bool
-i915_tiling_ok(struct drm_i915_private *dev_priv,
-              int stride, int size, int tiling_mode)
+i915_tiling_ok(struct drm_i915_gem_object *obj,
+              unsigned int tiling, unsigned int stride)
 {
-       int tile_width;
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       unsigned int tile_width;
 
        /* Linear is always fine */
-       if (tiling_mode == I915_TILING_NONE)
+       if (tiling == I915_TILING_NONE)
                return true;
 
-       if (tiling_mode > I915_TILING_LAST)
+       if (tiling > I915_TILING_LAST)
                return false;
 
-       if (IS_GEN2(dev_priv) ||
-           (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev_priv)))
-               tile_width = 128;
-       else
-               tile_width = 512;
-
        /* check maximum stride & object size */
        /* i965+ stores the end address of the gtt mapping in the fence
         * reg, so dont bother to check the size */
-       if (INTEL_GEN(dev_priv) >= 7) {
+       if (INTEL_GEN(i915) >= 7) {
                if (stride / 128 > GEN7_FENCE_MAX_PITCH_VAL)
                        return false;
-       } else if (INTEL_GEN(dev_priv) >= 4) {
+       } else if (INTEL_GEN(i915) >= 4) {
                if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
                        return false;
        } else {
                if (stride > 8192)
                        return false;
 
-               if (IS_GEN3(dev_priv)) {
-                       if (size > I830_FENCE_MAX_SIZE_VAL << 20)
+               if (IS_GEN3(i915)) {
+                       if (obj->base.size > I830_FENCE_MAX_SIZE_VAL << 20)
                                return false;
                } else {
-                       if (size > I830_FENCE_MAX_SIZE_VAL << 19)
+                       if (obj->base.size > I830_FENCE_MAX_SIZE_VAL << 19)
                                return false;
                }
        }
 
-       if (stride < tile_width)
+       if (IS_GEN2(i915) ||
+           (tiling == I915_TILING_Y && HAS_128_BYTE_Y_TILING(i915)))
+               tile_width = 128;
+       else
+               tile_width = 512;
+
+       if (!IS_ALIGNED(stride, tile_width))
                return false;
 
        /* 965+ just needs multiples of tile width */
-       if (INTEL_GEN(dev_priv) >= 4) {
-               if (stride & (tile_width - 1))
-                       return false;
+       if (INTEL_GEN(i915) >= 4)
                return true;
-       }
 
        /* Pre-965 needs power of two tile widths */
-       if (stride & (stride - 1))
-               return false;
-
-       return true;
+       return is_power_of_2(stride);
 }
 
-static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
+static bool i915_vma_fence_prepare(struct i915_vma *vma,
+                                  int tiling_mode, unsigned int stride)
 {
-       struct drm_i915_private *dev_priv = vma->vm->i915;
-       u32 size;
+       struct drm_i915_private *i915 = vma->vm->i915;
+       u32 size, alignment;
 
        if (!i915_vma_is_map_and_fenceable(vma))
                return true;
 
-       if (INTEL_GEN(dev_priv) == 3) {
-               if (vma->node.start & ~I915_FENCE_START_MASK)
-                       return false;
-       } else {
-               if (vma->node.start & ~I830_FENCE_START_MASK)
-                       return false;
-       }
-
-       size = i915_gem_get_ggtt_size(dev_priv, vma->size, tiling_mode);
+       size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride);
        if (vma->node.size < size)
                return false;
 
-       if (vma->node.start & (size - 1))
+       alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
+       if (!IS_ALIGNED(vma->node.start, alignment))
                return false;
 
        return true;
@@ -145,20 +206,20 @@ static bool i915_vma_fence_prepare(struct i915_vma *vma, int tiling_mode)
 
 /* Make the current GTT allocation valid for the change in tiling. */
 static int
-i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
+i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj,
+                             int tiling_mode, unsigned int stride)
 {
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        struct i915_vma *vma;
        int ret;
 
        if (tiling_mode == I915_TILING_NONE)
                return 0;
 
-       if (INTEL_GEN(dev_priv) >= 4)
-               return 0;
-
        list_for_each_entry(vma, &obj->vma_list, obj_link) {
-               if (i915_vma_fence_prepare(vma, tiling_mode))
+               if (!i915_vma_is_ggtt(vma))
+                       break;
+
+               if (i915_vma_fence_prepare(vma, tiling_mode, stride))
                        continue;
 
                ret = i915_vma_unbind(vma);
@@ -169,8 +230,100 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
        return 0;
 }
 
+int
+i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
+                          unsigned int tiling, unsigned int stride)
+{
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       struct i915_vma *vma;
+       int err;
+
+       /* Make sure we don't cross-contaminate obj->tiling_and_stride */
+       BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
+
+       GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride));
+       GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE));
+       lockdep_assert_held(&i915->drm.struct_mutex);
+
+       if ((tiling | stride) == obj->tiling_and_stride)
+               return 0;
+
+       if (obj->framebuffer_references)
+               return -EBUSY;
+
+       /* We need to rebind the object if its current allocation
+        * no longer meets the alignment restrictions for its new
+        * tiling mode. Otherwise we can just leave it alone, but
+        * need to ensure that any fence register is updated before
+        * the next fenced (either through the GTT or by the BLT unit
+        * on older GPUs) access.
+        *
+        * After updating the tiling parameters, we then flag whether
+        * we need to update an associated fence register. Note this
+        * has to also include the unfenced register the GPU uses
+        * whilst executing a fenced command for an untiled object.
+        */
+
+       err = i915_gem_object_fence_prepare(obj, tiling, stride);
+       if (err)
+               return err;
+
+       /* If the memory has unknown (i.e. varying) swizzling, we pin the
+        * pages to prevent them being swapped out and causing corruption
+        * due to the change in swizzling.
+        */
+       mutex_lock(&obj->mm.lock);
+       if (obj->mm.pages &&
+           obj->mm.madv == I915_MADV_WILLNEED &&
+           i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+               if (tiling == I915_TILING_NONE) {
+                       GEM_BUG_ON(!obj->mm.quirked);
+                       __i915_gem_object_unpin_pages(obj);
+                       obj->mm.quirked = false;
+               }
+               if (!i915_gem_object_is_tiled(obj)) {
+                       GEM_BUG_ON(!obj->mm.quirked);
+                       __i915_gem_object_pin_pages(obj);
+                       obj->mm.quirked = true;
+               }
+       }
+       mutex_unlock(&obj->mm.lock);
+
+       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+               if (!i915_vma_is_ggtt(vma))
+                       break;
+
+               vma->fence_size =
+                       i915_gem_fence_size(i915, vma->size, tiling, stride);
+               vma->fence_alignment =
+                       i915_gem_fence_alignment(i915,
+                                                vma->size, tiling, stride);
+
+               if (vma->fence)
+                       vma->fence->dirty = true;
+       }
+
+       obj->tiling_and_stride = tiling | stride;
+
+       /* Force the fence to be reacquired for GTT access */
+       i915_gem_release_mmap(obj);
+
+       /* Try to preallocate memory required to save swizzling on put-pages */
+       if (i915_gem_object_needs_bit17_swizzle(obj)) {
+               if (!obj->bit_17) {
+                       obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
+                                             sizeof(long), GFP_KERNEL);
+               }
+       } else {
+               kfree(obj->bit_17);
+               obj->bit_17 = NULL;
+       }
+
+       return 0;
+}
+
 /**
- * i915_gem_set_tiling - IOCTL handler to set tiling mode
+ * i915_gem_set_tiling_ioctl - IOCTL handler to set tiling mode
  * @dev: DRM device
  * @data: data pointer for the ioctl
  * @file: DRM file for the ioctl call
@@ -184,30 +337,19 @@ i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode)
  * Zero on success, negative errno on failure.
  */
 int
-i915_gem_set_tiling(struct drm_device *dev, void *data,
-                  struct drm_file *file)
+i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file)
 {
        struct drm_i915_gem_set_tiling *args = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
-       int err = 0;
-
-       /* Make sure we don't cross-contaminate obj->tiling_and_stride */
-       BUILD_BUG_ON(I915_TILING_LAST & STRIDE_MASK);
+       int err;
 
        obj = i915_gem_object_lookup(file, args->handle);
        if (!obj)
                return -ENOENT;
 
-       if (!i915_tiling_ok(dev_priv,
-                           args->stride, obj->base.size, args->tiling_mode)) {
-               i915_gem_object_put(obj);
-               return -EINVAL;
-       }
-
-       mutex_lock(&dev->struct_mutex);
-       if (obj->pin_display || obj->framebuffer_references) {
-               err = -EBUSY;
+       if (!i915_tiling_ok(obj, args->tiling_mode, args->stride)) {
+               err = -EINVAL;
                goto err;
        }
 
@@ -216,9 +358,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                args->stride = 0;
        } else {
                if (args->tiling_mode == I915_TILING_X)
-                       args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+                       args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_x;
                else
-                       args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+                       args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_y;
 
                /* Hide bit 17 swizzling from the user.  This prevents old Mesa
                 * from aborting the application on sw fallbacks to bit 17,
@@ -240,79 +382,24 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                }
        }
 
-       if (args->tiling_mode != i915_gem_object_get_tiling(obj) ||
-           args->stride != i915_gem_object_get_stride(obj)) {
-               /* We need to rebind the object if its current allocation
-                * no longer meets the alignment restrictions for its new
-                * tiling mode. Otherwise we can just leave it alone, but
-                * need to ensure that any fence register is updated before
-                * the next fenced (either through the GTT or by the BLT unit
-                * on older GPUs) access.
-                *
-                * After updating the tiling parameters, we then flag whether
-                * we need to update an associated fence register. Note this
-                * has to also include the unfenced register the GPU uses
-                * whilst executing a fenced command for an untiled object.
-                */
+       err = mutex_lock_interruptible(&dev->struct_mutex);
+       if (err)
+               goto err;
 
-               err = i915_gem_object_fence_prepare(obj, args->tiling_mode);
-               if (!err) {
-                       struct i915_vma *vma;
-
-                       mutex_lock(&obj->mm.lock);
-                       if (obj->mm.pages &&
-                           obj->mm.madv == I915_MADV_WILLNEED &&
-                           dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
-                               if (args->tiling_mode == I915_TILING_NONE) {
-                                       GEM_BUG_ON(!obj->mm.quirked);
-                                       __i915_gem_object_unpin_pages(obj);
-                                       obj->mm.quirked = false;
-                               }
-                               if (!i915_gem_object_is_tiled(obj)) {
-                                       GEM_BUG_ON(!obj->mm.quirked);
-                                       __i915_gem_object_pin_pages(obj);
-                                       obj->mm.quirked = true;
-                               }
-                       }
-                       mutex_unlock(&obj->mm.lock);
-
-                       list_for_each_entry(vma, &obj->vma_list, obj_link) {
-                               if (!vma->fence)
-                                       continue;
-
-                               vma->fence->dirty = true;
-                       }
-                       obj->tiling_and_stride =
-                               args->stride | args->tiling_mode;
-
-                       /* Force the fence to be reacquired for GTT access */
-                       i915_gem_release_mmap(obj);
-               }
-       }
-       /* we have to maintain this existing ABI... */
+       err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride);
+       mutex_unlock(&dev->struct_mutex);
+
+       /* We have to maintain this existing ABI... */
        args->stride = i915_gem_object_get_stride(obj);
        args->tiling_mode = i915_gem_object_get_tiling(obj);
 
-       /* Try to preallocate memory required to save swizzling on put-pages */
-       if (i915_gem_object_needs_bit17_swizzle(obj)) {
-               if (obj->bit_17 == NULL) {
-                       obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
-                                             sizeof(long), GFP_KERNEL);
-               }
-       } else {
-               kfree(obj->bit_17);
-               obj->bit_17 = NULL;
-       }
-
 err:
        i915_gem_object_put(obj);
-       mutex_unlock(&dev->struct_mutex);
-
        return err;
 }
 
 /**
- * i915_gem_get_tiling - IOCTL handler to get tiling mode
+ * i915_gem_get_tiling_ioctl - IOCTL handler to get tiling mode
  * @dev: DRM device
  * @data: data pointer for the ioctl
  * @file: DRM file for the ioctl call
@@ -325,8 +412,8 @@ err:
  * Zero on success, negative errno on failure.
  */
 int
-i915_gem_get_tiling(struct drm_device *dev, void *data,
-                  struct drm_file *file)
+i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
+                         struct drm_file *file)
 {
        struct drm_i915_gem_get_tiling *args = data;
        struct drm_i915_private *dev_priv = to_i915(dev);
index 396c6f0fd0334e5274529b1b633090f12c0cc5b3..9cd22cda17afbc0d24b5def0d50e04c56c81101a 100644 (file)
@@ -121,6 +121,7 @@ static void __i915_error_advance(struct drm_i915_error_state_buf *e,
        e->pos += len;
 }
 
+__printf(2, 0)
 static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
                               const char *f, va_list args)
 {
index 6a0adafe05234c89bb936212f4144ddf3217144a..35cf9918d09a44abf759c267665ce837bec270e8 100644 (file)
 #define   DMA_ADDRESS_SPACE_GTT                  (8 << 16)
 #define DMA_COPY_SIZE                  _MMIO(0xc310)
 #define DMA_CTRL                       _MMIO(0xc314)
+#define   HUC_UKERNEL                    (1<<9)
 #define   UOS_MOVE                       (1<<4)
 #define   START_DMA                      (1<<0)
 #define DMA_GUC_WOPCM_OFFSET           _MMIO(0xc340)
+#define   HUC_LOADING_AGENT_VCR                  (0<<1)
+#define   HUC_LOADING_AGENT_GUC                  (1<<1)
 #define   GUC_WOPCM_OFFSET_VALUE         0x80000       /* 512KB */
 #define GUC_MAX_IDLE_COUNT             _MMIO(0xC3E4)
 
+#define HUC_STATUS2             _MMIO(0xD3B0)
+#define   HUC_FW_VERIFIED       (1<<7)
+
 /* Defines WOPCM space available to GuC firmware */
 #define GUC_WOPCM_SIZE                 _MMIO(0xc050)
 /* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
index 710fbb9fc63fe567ab10ada8c6423205f7dbf59f..8ced9e26f0758d061bb1ab307a8a6f541cc7e3f6 100644 (file)
@@ -22,8 +22,6 @@
  *
  */
 #include <linux/circ_buf.h>
-#include <linux/debugfs.h>
-#include <linux/relay.h>
 #include "i915_drv.h"
 #include "intel_uc.h"
 
@@ -545,7 +543,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
  */
 
 /**
- * guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
+ * intel_guc_allocate_vma() - Allocate a GGTT VMA for GuC usage
  * @guc:       the guc
  * @size:      size of area to allocate (both virtual space and memory)
  *
@@ -557,7 +555,7 @@ static void i915_guc_submit(struct drm_i915_gem_request *rq)
  *
  * Return:     A i915_vma if successful, otherwise an ERR_PTR.
  */
-static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
+struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
 {
        struct drm_i915_private *dev_priv = guc_to_i915(guc);
        struct drm_i915_gem_object *obj;
@@ -568,7 +566,7 @@ static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
        if (IS_ERR(obj))
                return ERR_CAST(obj);
 
-       vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
        if (IS_ERR(vma))
                goto err;
 
@@ -579,9 +577,6 @@ static struct i915_vma *guc_allocate_vma(struct intel_guc *guc, u32 size)
                goto err;
        }
 
-       /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
-       I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
-
        return vma;
 
 err:
@@ -721,7 +716,7 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
        }
 
        /* The first page is doorbell/proc_desc. Two followed pages are wq. */
-       vma = guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
+       vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
        if (IS_ERR(vma))
                goto err;
 
@@ -777,488 +772,7 @@ err:
        return NULL;
 }
 
-/*
- * Sub buffer switch callback. Called whenever relay has to switch to a new
- * sub buffer, relay stays on the same sub buffer if 0 is returned.
- */
-static int subbuf_start_callback(struct rchan_buf *buf,
-                                void *subbuf,
-                                void *prev_subbuf,
-                                size_t prev_padding)
-{
-       /* Use no-overwrite mode by default, where relay will stop accepting
-        * new data if there are no empty sub buffers left.
-        * There is no strict synchronization enforced by relay between Consumer
-        * and Producer. In overwrite mode, there is a possibility of getting
-        * inconsistent/garbled data, the producer could be writing on to the
-        * same sub buffer from which Consumer is reading. This can't be avoided
-        * unless Consumer is fast enough and can always run in tandem with
-        * Producer.
-        */
-       if (relay_buf_full(buf))
-               return 0;
-
-       return 1;
-}
-
-/*
- * file_create() callback. Creates relay file in debugfs.
- */
-static struct dentry *create_buf_file_callback(const char *filename,
-                                              struct dentry *parent,
-                                              umode_t mode,
-                                              struct rchan_buf *buf,
-                                              int *is_global)
-{
-       struct dentry *buf_file;
-
-       /* This to enable the use of a single buffer for the relay channel and
-        * correspondingly have a single file exposed to User, through which
-        * it can collect the logs in order without any post-processing.
-        * Need to set 'is_global' even if parent is NULL for early logging.
-        */
-       *is_global = 1;
-
-       if (!parent)
-               return NULL;
-
-       /* Not using the channel filename passed as an argument, since for each
-        * channel relay appends the corresponding CPU number to the filename
-        * passed in relay_open(). This should be fine as relay just needs a
-        * dentry of the file associated with the channel buffer and that file's
-        * name need not be same as the filename passed as an argument.
-        */
-       buf_file = debugfs_create_file("guc_log", mode,
-                                      parent, buf, &relay_file_operations);
-       return buf_file;
-}
-
-/*
- * file_remove() default callback. Removes relay file in debugfs.
- */
-static int remove_buf_file_callback(struct dentry *dentry)
-{
-       debugfs_remove(dentry);
-       return 0;
-}
-
-/* relay channel callbacks */
-static struct rchan_callbacks relay_callbacks = {
-       .subbuf_start = subbuf_start_callback,
-       .create_buf_file = create_buf_file_callback,
-       .remove_buf_file = remove_buf_file_callback,
-};
-
-static void guc_log_remove_relay_file(struct intel_guc *guc)
-{
-       relay_close(guc->log.relay_chan);
-}
-
-static int guc_log_create_relay_channel(struct intel_guc *guc)
-{
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       struct rchan *guc_log_relay_chan;
-       size_t n_subbufs, subbuf_size;
-
-       /* Keep the size of sub buffers same as shared log buffer */
-       subbuf_size = guc->log.vma->obj->base.size;
-
-       /* Store up to 8 snapshots, which is large enough to buffer sufficient
-        * boot time logs and provides enough leeway to User, in terms of
-        * latency, for consuming the logs from relay. Also doesn't take
-        * up too much memory.
-        */
-       n_subbufs = 8;
-
-       guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
-                                       n_subbufs, &relay_callbacks, dev_priv);
-       if (!guc_log_relay_chan) {
-               DRM_ERROR("Couldn't create relay chan for GuC logging\n");
-               return -ENOMEM;
-       }
-
-       GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
-       guc->log.relay_chan = guc_log_relay_chan;
-       return 0;
-}
-
-static int guc_log_create_relay_file(struct intel_guc *guc)
-{
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       struct dentry *log_dir;
-       int ret;
-
-       /* For now create the log file in /sys/kernel/debug/dri/0 dir */
-       log_dir = dev_priv->drm.primary->debugfs_root;
-
-       /* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
-        * not mounted and so can't create the relay file.
-        * The relay API seems to fit well with debugfs only, for availing relay
-        * there are 3 requirements which can be met for debugfs file only in a
-        * straightforward/clean manner :-
-        * i)   Need the associated dentry pointer of the file, while opening the
-        *      relay channel.
-        * ii)  Should be able to use 'relay_file_operations' fops for the file.
-        * iii) Set the 'i_private' field of file's inode to the pointer of
-        *      relay channel buffer.
-        */
-       if (!log_dir) {
-               DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
-               return -ENODEV;
-       }
-
-       ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
-       if (ret) {
-               DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
-               return ret;
-       }
-
-       return 0;
-}
-
-static void guc_move_to_next_buf(struct intel_guc *guc)
-{
-       /* Make sure the updates made in the sub buffer are visible when
-        * Consumer sees the following update to offset inside the sub buffer.
-        */
-       smp_wmb();
-
-       /* All data has been written, so now move the offset of sub buffer. */
-       relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
-
-       /* Switch to the next sub buffer */
-       relay_flush(guc->log.relay_chan);
-}
-
-static void *guc_get_write_buffer(struct intel_guc *guc)
-{
-       if (!guc->log.relay_chan)
-               return NULL;
-
-       /* Just get the base address of a new sub buffer and copy data into it
-        * ourselves. NULL will be returned in no-overwrite mode, if all sub
-        * buffers are full. Could have used the relay_write() to indirectly
-        * copy the data, but that would have been bit convoluted, as we need to
-        * write to only certain locations inside a sub buffer which cannot be
-        * done without using relay_reserve() along with relay_write(). So its
-        * better to use relay_reserve() alone.
-        */
-       return relay_reserve(guc->log.relay_chan, 0);
-}
-
-static bool
-guc_check_log_buf_overflow(struct intel_guc *guc,
-                          enum guc_log_buffer_type type, unsigned int full_cnt)
-{
-       unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
-       bool overflow = false;
-
-       if (full_cnt != prev_full_cnt) {
-               overflow = true;
-
-               guc->log.prev_overflow_count[type] = full_cnt;
-               guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
-
-               if (full_cnt < prev_full_cnt) {
-                       /* buffer_full_cnt is a 4 bit counter */
-                       guc->log.total_overflow_count[type] += 16;
-               }
-               DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
-       }
-
-       return overflow;
-}
-
-static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
-{
-       switch (type) {
-       case GUC_ISR_LOG_BUFFER:
-               return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
-       case GUC_DPC_LOG_BUFFER:
-               return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
-       case GUC_CRASH_DUMP_LOG_BUFFER:
-               return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
-       default:
-               MISSING_CASE(type);
-       }
-
-       return 0;
-}
-
-static void guc_read_update_log_buffer(struct intel_guc *guc)
-{
-       unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
-       struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
-       struct guc_log_buffer_state log_buf_state_local;
-       enum guc_log_buffer_type type;
-       void *src_data, *dst_data;
-       bool new_overflow;
-
-       if (WARN_ON(!guc->log.buf_addr))
-               return;
-
-       /* Get the pointer to shared GuC log buffer */
-       log_buf_state = src_data = guc->log.buf_addr;
-
-       /* Get the pointer to local buffer to store the logs */
-       log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
-
-       /* Actual logs are present from the 2nd page */
-       src_data += PAGE_SIZE;
-       dst_data += PAGE_SIZE;
-
-       for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
-               /* Make a copy of the state structure, inside GuC log buffer
-                * (which is uncached mapped), on the stack to avoid reading
-                * from it multiple times.
-                */
-               memcpy(&log_buf_state_local, log_buf_state,
-                      sizeof(struct guc_log_buffer_state));
-               buffer_size = guc_get_log_buffer_size(type);
-               read_offset = log_buf_state_local.read_ptr;
-               write_offset = log_buf_state_local.sampled_write_ptr;
-               full_cnt = log_buf_state_local.buffer_full_cnt;
-
-               /* Bookkeeping stuff */
-               guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
-               new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
-
-               /* Update the state of shared log buffer */
-               log_buf_state->read_ptr = write_offset;
-               log_buf_state->flush_to_file = 0;
-               log_buf_state++;
-
-               if (unlikely(!log_buf_snapshot_state))
-                       continue;
-
-               /* First copy the state structure in snapshot buffer */
-               memcpy(log_buf_snapshot_state, &log_buf_state_local,
-                      sizeof(struct guc_log_buffer_state));
-
-               /* The write pointer could have been updated by GuC firmware,
-                * after sending the flush interrupt to Host, for consistency
-                * set write pointer value to same value of sampled_write_ptr
-                * in the snapshot buffer.
-                */
-               log_buf_snapshot_state->write_ptr = write_offset;
-               log_buf_snapshot_state++;
-
-               /* Now copy the actual logs. */
-               if (unlikely(new_overflow)) {
-                       /* copy the whole buffer in case of overflow */
-                       read_offset = 0;
-                       write_offset = buffer_size;
-               } else if (unlikely((read_offset > buffer_size) ||
-                                   (write_offset > buffer_size))) {
-                       DRM_ERROR("invalid log buffer state\n");
-                       /* copy whole buffer as offsets are unreliable */
-                       read_offset = 0;
-                       write_offset = buffer_size;
-               }
-
-               /* Just copy the newly written data */
-               if (read_offset > write_offset) {
-                       i915_memcpy_from_wc(dst_data, src_data, write_offset);
-                       bytes_to_copy = buffer_size - read_offset;
-               } else {
-                       bytes_to_copy = write_offset - read_offset;
-               }
-               i915_memcpy_from_wc(dst_data + read_offset,
-                                   src_data + read_offset, bytes_to_copy);
-
-               src_data += buffer_size;
-               dst_data += buffer_size;
-       }
-
-       if (log_buf_snapshot_state)
-               guc_move_to_next_buf(guc);
-       else {
-               /* Used rate limited to avoid deluge of messages, logs might be
-                * getting consumed by User at a slow rate.
-                */
-               DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
-               guc->log.capture_miss_count++;
-       }
-}
-
-static void guc_capture_logs_work(struct work_struct *work)
-{
-       struct drm_i915_private *dev_priv =
-               container_of(work, struct drm_i915_private, guc.log.flush_work);
-
-       i915_guc_capture_logs(dev_priv);
-}
-
-static void guc_log_cleanup(struct intel_guc *guc)
-{
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
-       /* First disable the flush interrupt */
-       gen9_disable_guc_interrupts(dev_priv);
 
-       if (guc->log.flush_wq)
-               destroy_workqueue(guc->log.flush_wq);
-
-       guc->log.flush_wq = NULL;
-
-       if (guc->log.relay_chan)
-               guc_log_remove_relay_file(guc);
-
-       guc->log.relay_chan = NULL;
-
-       if (guc->log.buf_addr)
-               i915_gem_object_unpin_map(guc->log.vma->obj);
-
-       guc->log.buf_addr = NULL;
-}
-
-static int guc_log_create_extras(struct intel_guc *guc)
-{
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       void *vaddr;
-       int ret;
-
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
-       /* Nothing to do */
-       if (i915.guc_log_level < 0)
-               return 0;
-
-       if (!guc->log.buf_addr) {
-               /* Create a WC (Uncached for read) vmalloc mapping of log
-                * buffer pages, so that we can directly get the data
-                * (up-to-date) from memory.
-                */
-               vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
-               if (IS_ERR(vaddr)) {
-                       ret = PTR_ERR(vaddr);
-                       DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
-                       return ret;
-               }
-
-               guc->log.buf_addr = vaddr;
-       }
-
-       if (!guc->log.relay_chan) {
-               /* Create a relay channel, so that we have buffers for storing
-                * the GuC firmware logs, the channel will be linked with a file
-                * later on when debugfs is registered.
-                */
-               ret = guc_log_create_relay_channel(guc);
-               if (ret)
-                       return ret;
-       }
-
-       if (!guc->log.flush_wq) {
-               INIT_WORK(&guc->log.flush_work, guc_capture_logs_work);
-
-                /*
-                * GuC log buffer flush work item has to do register access to
-                * send the ack to GuC and this work item, if not synced before
-                * suspend, can potentially get executed after the GFX device is
-                * suspended.
-                * By marking the WQ as freezable, we don't have to bother about
-                * flushing of this work item from the suspend hooks, the pending
-                * work item if any will be either executed before the suspend
-                * or scheduled later on resume. This way the handling of work
-                * item can be kept same between system suspend & rpm suspend.
-                */
-               guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
-                                                           WQ_HIGHPRI | WQ_FREEZABLE);
-               if (guc->log.flush_wq == NULL) {
-                       DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
-                       return -ENOMEM;
-               }
-       }
-
-       return 0;
-}
-
-static void guc_log_create(struct intel_guc *guc)
-{
-       struct i915_vma *vma;
-       unsigned long offset;
-       uint32_t size, flags;
-
-       if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
-               i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
-
-       /* The first page is to save log buffer state. Allocate one
-        * extra page for others in case for overlap */
-       size = (1 + GUC_LOG_DPC_PAGES + 1 +
-               GUC_LOG_ISR_PAGES + 1 +
-               GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
-
-       vma = guc->log.vma;
-       if (!vma) {
-               /* We require SSE 4.1 for fast reads from the GuC log buffer and
-                * it should be present on the chipsets supporting GuC based
-                * submisssions.
-                */
-               if (WARN_ON(!i915_has_memcpy_from_wc())) {
-                       /* logging will not be enabled */
-                       i915.guc_log_level = -1;
-                       return;
-               }
-
-               vma = guc_allocate_vma(guc, size);
-               if (IS_ERR(vma)) {
-                       /* logging will be off */
-                       i915.guc_log_level = -1;
-                       return;
-               }
-
-               guc->log.vma = vma;
-
-               if (guc_log_create_extras(guc)) {
-                       guc_log_cleanup(guc);
-                       i915_vma_unpin_and_release(&guc->log.vma);
-                       i915.guc_log_level = -1;
-                       return;
-               }
-       }
-
-       /* each allocated unit is a page */
-       flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
-               (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
-               (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
-               (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
-
-       offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
-       guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
-}
-
-static int guc_log_late_setup(struct intel_guc *guc)
-{
-       struct drm_i915_private *dev_priv = guc_to_i915(guc);
-       int ret;
-
-       lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
-       if (i915.guc_log_level < 0)
-               return -EINVAL;
-
-       /* If log_level was set as -1 at boot time, then setup needed to
-        * handle log buffer flush interrupts would not have been done yet,
-        * so do that now.
-        */
-       ret = guc_log_create_extras(guc);
-       if (ret)
-               goto err;
-
-       ret = guc_log_create_relay_file(guc);
-       if (ret)
-               goto err;
-
-       return 0;
-err:
-       guc_log_cleanup(guc);
-       /* logging will remain off */
-       i915.guc_log_level = -1;
-       return ret;
-}
 
 static void guc_policies_init(struct guc_policies *policies)
 {
@@ -1301,7 +815,7 @@ static void guc_addon_create(struct intel_guc *guc)
 
        vma = guc->ads_vma;
        if (!vma) {
-               vma = guc_allocate_vma(guc, PAGE_ALIGN(size));
+               vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(size));
                if (IS_ERR(vma))
                        return;
 
@@ -1376,13 +890,13 @@ int i915_guc_submission_init(struct drm_i915_private *dev_priv)
        if (guc->ctx_pool_vma)
                return 0; /* already allocated */
 
-       vma = guc_allocate_vma(guc, gemsize);
+       vma = intel_guc_allocate_vma(guc, gemsize);
        if (IS_ERR(vma))
                return PTR_ERR(vma);
 
        guc->ctx_pool_vma = vma;
        ida_init(&guc->ctx_ids);
-       guc_log_create(guc);
+       intel_guc_log_create(guc);
        guc_addon_create(guc);
 
        guc->execbuf_client = guc_client_alloc(dev_priv,
@@ -1484,7 +998,7 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv)
        struct i915_gem_context *ctx;
        u32 data[3];
 
-       if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
+       if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
                return 0;
 
        gen9_disable_guc_interrupts(dev_priv);
@@ -1511,7 +1025,7 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
        struct i915_gem_context *ctx;
        u32 data[3];
 
-       if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
+       if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
                return 0;
 
        if (i915.guc_log_level >= 0)
@@ -1527,103 +1041,4 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
        return intel_guc_send(guc, data, ARRAY_SIZE(data));
 }
 
-void i915_guc_capture_logs(struct drm_i915_private *dev_priv)
-{
-       guc_read_update_log_buffer(&dev_priv->guc);
-
-       /* Generally device is expected to be active only at this
-        * time, so get/put should be really quick.
-        */
-       intel_runtime_pm_get(dev_priv);
-       intel_guc_log_flush_complete(&dev_priv->guc);
-       intel_runtime_pm_put(dev_priv);
-}
-
-void i915_guc_flush_logs(struct drm_i915_private *dev_priv)
-{
-       if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
-               return;
-
-       /* First disable the interrupts, will be renabled afterwards */
-       gen9_disable_guc_interrupts(dev_priv);
-
-       /* Before initiating the forceful flush, wait for any pending/ongoing
-        * flush to complete otherwise forceful flush may not actually happen.
-        */
-       flush_work(&dev_priv->guc.log.flush_work);
-
-       /* Ask GuC to update the log buffer state */
-       intel_guc_log_flush(&dev_priv->guc);
-
-       /* GuC would have updated log buffer by now, so capture it */
-       i915_guc_capture_logs(dev_priv);
-}
-
-void i915_guc_unregister(struct drm_i915_private *dev_priv)
-{
-       if (!i915.enable_guc_submission)
-               return;
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
-       guc_log_cleanup(&dev_priv->guc);
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-}
-
-void i915_guc_register(struct drm_i915_private *dev_priv)
-{
-       if (!i915.enable_guc_submission)
-               return;
-
-       mutex_lock(&dev_priv->drm.struct_mutex);
-       guc_log_late_setup(&dev_priv->guc);
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-}
-
-int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
-{
-       union guc_log_control log_param;
-       int ret;
-
-       log_param.value = control_val;
-
-       if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
-           log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
-               return -EINVAL;
-
-       /* This combination doesn't make sense & won't have any effect */
-       if (!log_param.logging_enabled && (i915.guc_log_level < 0))
-               return 0;
-
-       ret = intel_guc_log_control(&dev_priv->guc, log_param.value);
-       if (ret < 0) {
-               DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
-               return ret;
-       }
-
-       i915.guc_log_level = log_param.verbosity;
-
-       /* If log_level was set as -1 at boot time, then the relay channel file
-        * wouldn't have been created by now and interrupts also would not have
-        * been enabled.
-        */
-       if (!dev_priv->guc.log.relay_chan) {
-               ret = guc_log_late_setup(&dev_priv->guc);
-               if (!ret)
-                       gen9_enable_guc_interrupts(dev_priv);
-       } else if (!log_param.logging_enabled) {
-               /* Once logging is disabled, GuC won't generate logs & send an
-                * interrupt. But there could be some data in the log buffer
-                * which is yet to be captured. So request GuC to update the log
-                * buffer state and then collect the left over logs.
-                */
-               i915_guc_flush_logs(dev_priv);
-
-               /* As logging is disabled, update log level to reflect that */
-               i915.guc_log_level = -1;
-       } else {
-               /* In case interrupts were disabled, enable them now */
-               gen9_enable_guc_interrupts(dev_priv);
-       }
-
-       return ret;
-}
index a0e70f5b3aad8e096ca0d191076887a505aed258..6fefc34ef602e9ee758c980bb37e2b1d63e6746c 100644 (file)
@@ -1170,6 +1170,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
                        adj *= 2;
                else /* CHV needs even encode values */
                        adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
+
+               if (new_delay >= dev_priv->rps.max_freq_softlimit)
+                       adj = 0;
                /*
                 * For better performance, jump directly
                 * to RPe if we're below it.
@@ -1191,6 +1194,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
                        adj *= 2;
                else /* CHV needs even encode values */
                        adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
+
+               if (new_delay <= dev_priv->rps.min_freq_softlimit)
+                       adj = 0;
        } else { /* unknown event */
                adj = 0;
        }
@@ -1553,41 +1559,68 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
 {
        struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
        struct intel_pipe_crc_entry *entry;
+       struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+       struct drm_driver *driver = dev_priv->drm.driver;
+       uint32_t crcs[5];
        int head, tail;
 
        spin_lock(&pipe_crc->lock);
+       if (pipe_crc->source) {
+               if (!pipe_crc->entries) {
+                       spin_unlock(&pipe_crc->lock);
+                       DRM_DEBUG_KMS("spurious interrupt\n");
+                       return;
+               }
 
-       if (!pipe_crc->entries) {
-               spin_unlock(&pipe_crc->lock);
-               DRM_DEBUG_KMS("spurious interrupt\n");
-               return;
-       }
-
-       head = pipe_crc->head;
-       tail = pipe_crc->tail;
+               head = pipe_crc->head;
+               tail = pipe_crc->tail;
 
-       if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
-               spin_unlock(&pipe_crc->lock);
-               DRM_ERROR("CRC buffer overflowing\n");
-               return;
-       }
+               if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
+                       spin_unlock(&pipe_crc->lock);
+                       DRM_ERROR("CRC buffer overflowing\n");
+                       return;
+               }
 
-       entry = &pipe_crc->entries[head];
+               entry = &pipe_crc->entries[head];
 
-       entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
-                                                                pipe);
-       entry->crc[0] = crc0;
-       entry->crc[1] = crc1;
-       entry->crc[2] = crc2;
-       entry->crc[3] = crc3;
-       entry->crc[4] = crc4;
+               entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe);
+               entry->crc[0] = crc0;
+               entry->crc[1] = crc1;
+               entry->crc[2] = crc2;
+               entry->crc[3] = crc3;
+               entry->crc[4] = crc4;
 
-       head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
-       pipe_crc->head = head;
+               head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
+               pipe_crc->head = head;
 
-       spin_unlock(&pipe_crc->lock);
+               spin_unlock(&pipe_crc->lock);
 
-       wake_up_interruptible(&pipe_crc->wq);
+               wake_up_interruptible(&pipe_crc->wq);
+       } else {
+               /*
+                * For some not yet identified reason, the first CRC is
+                * bonkers. So let's just wait for the next vblank and read
+                * out the buggy result.
+                *
+                * On CHV sometimes the second CRC is bonkers as well, so
+                * don't trust that one either.
+                */
+               if (pipe_crc->skipped == 0 ||
+                   (IS_CHERRYVIEW(dev_priv) && pipe_crc->skipped == 1)) {
+                       pipe_crc->skipped++;
+                       spin_unlock(&pipe_crc->lock);
+                       return;
+               }
+               spin_unlock(&pipe_crc->lock);
+               crcs[0] = crc0;
+               crcs[1] = crc1;
+               crcs[2] = crc2;
+               crcs[3] = crc3;
+               crcs[4] = crc4;
+               drm_crtc_add_crc_entry(&crtc->base, true,
+                                      drm_accurate_vblank_count(&crtc->base),
+                                      crcs);
+       }
 }
 #else
 static inline void
@@ -2703,12 +2736,13 @@ static void i915_clear_error_registers(struct drm_i915_private *dev_priv)
  * i915_handle_error - handle a gpu error
  * @dev_priv: i915 device private
  * @engine_mask: mask representing engines that are hung
+ * @fmt: Error message format string
+ *
  * Do some basic checking of register state at error time and
  * dump it to the syslog.  Also call i915_capture_error_state() to make
  * sure we get a record and make it available in debugfs.  Fire a uevent
  * so userspace knows something bad happened (should trigger collection
  * of a ring dump etc.).
- * @fmt: Error message format string
  */
 void i915_handle_error(struct drm_i915_private *dev_priv,
                       u32 engine_mask,
index 00970aa77afa1fa5940da9aaca95dc09c32cac6c..72f9f36ae5ce57c22edc1f31064433d635dd9a8a 100644 (file)
@@ -3597,9 +3597,12 @@ enum {
 #define   EDP_PSR_PERF_CNT_MASK                0xffffff
 
 #define EDP_PSR_DEBUG_CTL              _MMIO(dev_priv->psr_mmio_base + 0x60)
-#define   EDP_PSR_DEBUG_MASK_LPSP      (1<<27)
-#define   EDP_PSR_DEBUG_MASK_MEMUP     (1<<26)
-#define   EDP_PSR_DEBUG_MASK_HPD       (1<<25)
+#define   EDP_PSR_DEBUG_MASK_MAX_SLEEP         (1<<28)
+#define   EDP_PSR_DEBUG_MASK_LPSP              (1<<27)
+#define   EDP_PSR_DEBUG_MASK_MEMUP             (1<<26)
+#define   EDP_PSR_DEBUG_MASK_HPD               (1<<25)
+#define   EDP_PSR_DEBUG_MASK_DISP_REG_WRITE    (1<<16)
+#define   EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15)
 
 #define EDP_PSR2_CTL                   _MMIO(0x6f900)
 #define   EDP_PSR2_ENABLE              (1<<31)
@@ -3614,6 +3617,11 @@ enum {
 #define   EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
 #define   EDP_PSR2_FRAME_BEFORE_SU_MASK        (0xf<<4)
 #define   EDP_PSR2_IDLE_MASK           0xf
+#define   EDP_FRAMES_BEFORE_SU_ENTRY   (1<<4)
+
+#define EDP_PSR2_STATUS_CTL            _MMIO(0x6f940)
+#define EDP_PSR2_STATUS_STATE_MASK     (0xf<<28)
+#define EDP_PSR2_STATUS_STATE_SHIFT    28
 
 /* VGA port control */
 #define ADPA                   _MMIO(0x61100)
@@ -6446,6 +6454,12 @@ enum {
 #define  BDW_DPRS_MASK_VBLANK_SRD      (1 << 0)
 #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
 
+#define CHICKEN_TRANS_A         0x420c0
+#define CHICKEN_TRANS_B         0x420c4
+#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
+#define PSR2_VSC_ENABLE_PROG_HEADER    (1<<12)
+#define PSR2_ADD_VERTICAL_LINE_COUNT   (1<<15)
+
 #define DISP_ARB_CTL   _MMIO(0x45000)
 #define  DISP_FBC_MEMORY_WAKE          (1<<31)
 #define  DISP_TILE_SURFACE_SWIZZLING   (1<<13)
index f5a88092dacfa2523ce0e2161f8ec47dcacd4580..40f4e5efaf837e52ad299bf66790383e424975d8 100644 (file)
@@ -63,6 +63,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
 static inline void debug_fence_free(struct i915_sw_fence *fence)
 {
        debug_object_free(fence, &i915_sw_fence_debug_descr);
+       smp_wmb(); /* flush the change in state before reallocation */
 }
 
 static inline void debug_fence_assert(struct i915_sw_fence *fence)
index 40c0ac70d79d8b6aa3fc653af7311e3b478334bb..376ac957cd1c8fa528edc83b5682581ce7f50e36 100644 (file)
@@ -58,7 +58,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv,
 
                if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
                        units <<= 8;
-       } else if (IS_BROXTON(dev_priv)) {
+       } else if (IS_GEN9_LP(dev_priv)) {
                units = 1;
                div = 1200;             /* 833.33ns */
        }
index 18ae37c411fd4226b13bb2192323543d77b76c65..4461df5a94feac71e65e8c4d1cc6d326492019a6 100644 (file)
@@ -450,9 +450,9 @@ TRACE_EVENT(i915_gem_evict_vm,
            TP_printk("dev=%d, vm=%p", __entry->dev, __entry->vm)
 );
 
-TRACE_EVENT(i915_gem_evict_vma,
-           TP_PROTO(struct i915_vma *vma, unsigned int flags),
-           TP_ARGS(vma, flags),
+TRACE_EVENT(i915_gem_evict_node,
+           TP_PROTO(struct i915_address_space *vm, struct drm_mm_node *node, unsigned int flags),
+           TP_ARGS(vm, node, flags),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
@@ -464,11 +464,11 @@ TRACE_EVENT(i915_gem_evict_vma,
                            ),
 
            TP_fast_assign(
-                          __entry->dev = vma->vm->i915->drm.primary->index;
-                          __entry->vm = vma->vm;
-                          __entry->start = vma->node.start;
-                          __entry->size = vma->node.size;
-                          __entry->color = vma->node.color;
+                          __entry->dev = vm->i915->drm.primary->index;
+                          __entry->vm = vm;
+                          __entry->start = node->start;
+                          __entry->size = node->size;
+                          __entry->color = node->color;
                           __entry->flags = flags;
                          ),
 
index dae340cfc6c76f617795e248b74550fcc54bb0ae..d0abfd08a01c38e221f15301da8c8130ff5d8d0a 100644 (file)
@@ -116,22 +116,20 @@ void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
        memset(&bl_info, 0, sizeof(bl_info));
 }
 
-static int vgt_balloon_space(struct drm_mm *mm,
+static int vgt_balloon_space(struct i915_ggtt *ggtt,
                             struct drm_mm_node *node,
                             unsigned long start, unsigned long end)
 {
        unsigned long size = end - start;
 
-       if (start == end)
+       if (start >= end)
                return -EINVAL;
 
        DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
                 start, end, size / 1024);
-
-       node->start = start;
-       node->size = size;
-
-       return drm_mm_reserve_node(mm, node);
+       return i915_gem_gtt_reserve(&ggtt->base, node,
+                                   size, start, I915_COLOR_UNEVICTABLE,
+                                   0);
 }
 
 /**
@@ -214,10 +212,8 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
 
        /* Unmappable graphic memory ballooning */
        if (unmappable_base > ggtt->mappable_end) {
-               ret = vgt_balloon_space(&ggtt->base.mm,
-                                       &bl_info.space[2],
-                                       ggtt->mappable_end,
-                                       unmappable_base);
+               ret = vgt_balloon_space(ggtt, &bl_info.space[2],
+                                       ggtt->mappable_end, unmappable_base);
 
                if (ret)
                        goto err;
@@ -228,18 +224,15 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
         * because it is reserved to the guard page.
         */
        if (unmappable_end < ggtt_end - PAGE_SIZE) {
-               ret = vgt_balloon_space(&ggtt->base.mm,
-                                       &bl_info.space[3],
-                                       unmappable_end,
-                                       ggtt_end - PAGE_SIZE);
+               ret = vgt_balloon_space(ggtt, &bl_info.space[3],
+                                       unmappable_end, ggtt_end - PAGE_SIZE);
                if (ret)
                        goto err;
        }
 
        /* Mappable graphic memory ballooning */
        if (mappable_base > ggtt->base.start) {
-               ret = vgt_balloon_space(&ggtt->base.mm,
-                                       &bl_info.space[0],
+               ret = vgt_balloon_space(ggtt, &bl_info.space[0],
                                        ggtt->base.start, mappable_base);
 
                if (ret)
@@ -247,10 +240,8 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
        }
 
        if (mappable_end < ggtt->mappable_end) {
-               ret = vgt_balloon_space(&ggtt->base.mm,
-                                       &bl_info.space[1],
-                                       mappable_end,
-                                       ggtt->mappable_end);
+               ret = vgt_balloon_space(ggtt, &bl_info.space[1],
+                                       mappable_end, ggtt->mappable_end);
 
                if (ret)
                        goto err;
index 58f2483362ad5f226b5d6bfe0ff39a6e0717a9cb..155906e848120ae2e1de533d81658080c546888d 100644 (file)
@@ -45,6 +45,7 @@ i915_vma_retire(struct i915_gem_active *active,
        if (i915_vma_is_active(vma))
                return;
 
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
        if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
                WARN_ON(i915_vma_unbind(vma));
@@ -69,17 +70,15 @@ i915_vma_retire(struct i915_gem_active *active,
 }
 
 static struct i915_vma *
-__i915_vma_create(struct drm_i915_gem_object *obj,
-                 struct i915_address_space *vm,
-                 const struct i915_ggtt_view *view)
+vma_create(struct drm_i915_gem_object *obj,
+          struct i915_address_space *vm,
+          const struct i915_ggtt_view *view)
 {
        struct i915_vma *vma;
        struct rb_node *rb, **p;
        int i;
 
-       GEM_BUG_ON(vm->closed);
-
-       vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
+       vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
        if (vma == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -87,29 +86,50 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
        for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
                init_request_active(&vma->last_read[i], i915_vma_retire);
        init_request_active(&vma->last_fence, NULL);
-       list_add(&vma->vm_link, &vm->unbound_list);
        vma->vm = vm;
        vma->obj = obj;
        vma->size = obj->base.size;
+       vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
 
-       if (view) {
+       if (view && view->type != I915_GGTT_VIEW_NORMAL) {
                vma->ggtt_view = *view;
                if (view->type == I915_GGTT_VIEW_PARTIAL) {
                        GEM_BUG_ON(range_overflows_t(u64,
-                                                    view->params.partial.offset,
-                                                    view->params.partial.size,
+                                                    view->partial.offset,
+                                                    view->partial.size,
                                                     obj->base.size >> PAGE_SHIFT));
-                       vma->size = view->params.partial.size;
+                       vma->size = view->partial.size;
                        vma->size <<= PAGE_SHIFT;
                        GEM_BUG_ON(vma->size >= obj->base.size);
                } else if (view->type == I915_GGTT_VIEW_ROTATED) {
-                       vma->size =
-                               intel_rotation_info_size(&view->params.rotated);
+                       vma->size = intel_rotation_info_size(&view->rotated);
                        vma->size <<= PAGE_SHIFT;
                }
        }
 
+       if (unlikely(vma->size > vm->total))
+               goto err_vma;
+
+       GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
+
        if (i915_is_ggtt(vm)) {
+               if (unlikely(overflows_type(vma->size, u32)))
+                       goto err_vma;
+
+               vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
+                                                     i915_gem_object_get_tiling(obj),
+                                                     i915_gem_object_get_stride(obj));
+               if (unlikely(vma->fence_size < vma->size || /* overflow */
+                            vma->fence_size > vm->total))
+                       goto err_vma;
+
+               GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
+
+               vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
+                                                               i915_gem_object_get_tiling(obj),
+                                                               i915_gem_object_get_stride(obj));
+               GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
+
                vma->flags |= I915_VMA_GGTT;
                list_add(&vma->obj_link, &obj->vma_list);
        } else {
@@ -131,20 +151,74 @@ __i915_vma_create(struct drm_i915_gem_object *obj,
        }
        rb_link_node(&vma->obj_node, rb, p);
        rb_insert_color(&vma->obj_node, &obj->vma_tree);
+       list_add(&vma->vm_link, &vm->unbound_list);
 
        return vma;
+
+err_vma:
+       kmem_cache_free(vm->i915->vmas, vma);
+       return ERR_PTR(-E2BIG);
 }
 
+static struct i915_vma *
+vma_lookup(struct drm_i915_gem_object *obj,
+          struct i915_address_space *vm,
+          const struct i915_ggtt_view *view)
+{
+       struct rb_node *rb;
+
+       rb = obj->vma_tree.rb_node;
+       while (rb) {
+               struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
+               long cmp;
+
+               cmp = i915_vma_compare(vma, vm, view);
+               if (cmp == 0)
+                       return vma;
+
+               if (cmp < 0)
+                       rb = rb->rb_right;
+               else
+                       rb = rb->rb_left;
+       }
+
+       return NULL;
+}
+
+/**
+ * i915_vma_instance - return the singleton instance of the VMA
+ * @obj: parent &struct drm_i915_gem_object to be mapped
+ * @vm: address space in which the mapping is located
+ * @view: additional mapping requirements
+ *
+ * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
+ * the same @view characteristics. If a match is not found, one is created.
+ * Once created, the VMA is kept until either the object is freed, or the
+ * address space is closed.
+ *
+ * Must be called with struct_mutex held.
+ *
+ * Returns the vma, or an error pointer.
+ */
 struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
-               struct i915_address_space *vm,
-               const struct i915_ggtt_view *view)
+i915_vma_instance(struct drm_i915_gem_object *obj,
+                 struct i915_address_space *vm,
+                 const struct i915_ggtt_view *view)
 {
+       struct i915_vma *vma;
+
        lockdep_assert_held(&obj->base.dev->struct_mutex);
        GEM_BUG_ON(view && !i915_is_ggtt(vm));
-       GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
+       GEM_BUG_ON(vm->closed);
+
+       vma = vma_lookup(obj, vm, view);
+       if (!vma)
+               vma = vma_create(obj, vm, view);
 
-       return __i915_vma_create(obj, vm, view);
+       GEM_BUG_ON(!IS_ERR(vma) && i915_vma_is_closed(vma));
+       GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
+       GEM_BUG_ON(!IS_ERR(vma) && vma_lookup(obj, vm, view) != vma);
+       return vma;
 }
 
 /**
@@ -195,6 +269,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
                        return ret;
        }
 
+       trace_i915_vma_bind(vma, bind_flags);
        ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
        if (ret)
                return ret;
@@ -258,7 +333,8 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
        if (vma->node.size < size)
                return true;
 
-       if (alignment && vma->node.start & (alignment - 1))
+       GEM_BUG_ON(alignment && !is_power_of_2(alignment));
+       if (alignment && !IS_ALIGNED(vma->node.start, alignment))
                return true;
 
        if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
@@ -277,31 +353,24 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 
 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 {
-       struct drm_i915_gem_object *obj = vma->obj;
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        bool mappable, fenceable;
-       u32 fence_size, fence_alignment;
-
-       fence_size = i915_gem_get_ggtt_size(dev_priv,
-                                           vma->size,
-                                           i915_gem_object_get_tiling(obj));
-       fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
-                                                     vma->size,
-                                                     i915_gem_object_get_tiling(obj),
-                                                     true);
-
-       fenceable = (vma->node.size == fence_size &&
-                    (vma->node.start & (fence_alignment - 1)) == 0);
 
-       mappable = (vma->node.start + fence_size <=
-                   dev_priv->ggtt.mappable_end);
+       GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+       GEM_BUG_ON(!vma->fence_size);
 
        /*
         * Explicitly disable for rotated VMA since the display does not
         * need the fence and the VMA is not accessible to other users.
         */
-       if (mappable && fenceable &&
-           vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
+       if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
+               return;
+
+       fenceable = (vma->node.size >= vma->fence_size &&
+                    IS_ALIGNED(vma->node.start, vma->fence_alignment));
+
+       mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
+
+       if (mappable && fenceable)
                vma->flags |= I915_VMA_CAN_FENCE;
        else
                vma->flags &= ~I915_VMA_CAN_FENCE;
@@ -368,22 +437,26 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
        GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 
        size = max(size, vma->size);
-       if (flags & PIN_MAPPABLE)
-               size = i915_gem_get_ggtt_size(dev_priv, size,
-                                             i915_gem_object_get_tiling(obj));
+       alignment = max(alignment, vma->display_alignment);
+       if (flags & PIN_MAPPABLE) {
+               size = max_t(typeof(size), size, vma->fence_size);
+               alignment = max_t(typeof(alignment),
+                                 alignment, vma->fence_alignment);
+       }
 
-       alignment = max(max(alignment, vma->display_alignment),
-                       i915_gem_get_ggtt_alignment(dev_priv, size,
-                                                   i915_gem_object_get_tiling(obj),
-                                                   flags & PIN_MAPPABLE));
+       GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+       GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
+       GEM_BUG_ON(!is_power_of_2(alignment));
 
        start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
+       GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
 
        end = vma->vm->total;
        if (flags & PIN_MAPPABLE)
                end = min_t(u64, end, dev_priv->ggtt.mappable_end);
        if (flags & PIN_ZONE_4G)
-               end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
+               end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
+       GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
 
        /* If binding the object/GGTT view requires more space than the entire
         * aperture has, reject it early before evicting everything in a vain
@@ -403,65 +476,28 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 
        if (flags & PIN_OFFSET_FIXED) {
                u64 offset = flags & PIN_OFFSET_MASK;
-               if (offset & (alignment - 1) ||
+               if (!IS_ALIGNED(offset, alignment) ||
                    range_overflows(offset, size, end)) {
                        ret = -EINVAL;
                        goto err_unpin;
                }
 
-               vma->node.start = offset;
-               vma->node.size = size;
-               vma->node.color = obj->cache_level;
-               ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
-               if (ret) {
-                       ret = i915_gem_evict_for_vma(vma, flags);
-                       if (ret == 0)
-                               ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
-                       if (ret)
-                               goto err_unpin;
-               }
+               ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
+                                          size, offset, obj->cache_level,
+                                          flags);
+               if (ret)
+                       goto err_unpin;
        } else {
-               u32 search_flag, alloc_flag;
-
-               if (flags & PIN_HIGH) {
-                       search_flag = DRM_MM_SEARCH_BELOW;
-                       alloc_flag = DRM_MM_CREATE_TOP;
-               } else {
-                       search_flag = DRM_MM_SEARCH_DEFAULT;
-                       alloc_flag = DRM_MM_CREATE_DEFAULT;
-               }
-
-               /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
-                * so we know that we always have a minimum alignment of 4096.
-                * The drm_mm range manager is optimised to return results
-                * with zero alignment, so where possible use the optimal
-                * path.
-                */
-               if (alignment <= 4096)
-                       alignment = 0;
-
-search_free:
-               ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
-                                                         &vma->node,
-                                                         size, alignment,
-                                                         obj->cache_level,
-                                                         start, end,
-                                                         search_flag,
-                                                         alloc_flag);
-               if (ret) {
-                       ret = i915_gem_evict_something(vma->vm, size, alignment,
-                                                      obj->cache_level,
-                                                      start, end,
-                                                      flags);
-                       if (ret == 0)
-                               goto search_free;
-
+               ret = i915_gem_gtt_insert(vma->vm, &vma->node,
+                                         size, alignment, obj->cache_level,
+                                         start, end, flags);
+               if (ret)
                        goto err_unpin;
-               }
 
                GEM_BUG_ON(vma->node.start < start);
                GEM_BUG_ON(vma->node.start + vma->node.size > end);
        }
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
 
        list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
@@ -504,6 +540,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
        if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
                __i915_vma_set_map_and_fenceable(vma);
 
+       GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
        GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
        return 0;
 
index e3b2b3b1e0568dd7c00b3309520cad6311c0b28c..e39d922cfb6fd1292741b1684226dcb0b22b40e6 100644 (file)
@@ -55,6 +55,9 @@ struct i915_vma {
        u64 size;
        u64 display_alignment;
 
+       u32 fence_size;
+       u32 fence_alignment;
+
        unsigned int flags;
        /**
         * How many users have pinned this object in GTT space. The following
@@ -109,9 +112,9 @@ struct i915_vma {
 };
 
 struct i915_vma *
-i915_vma_create(struct drm_i915_gem_object *obj,
-               struct i915_address_space *vm,
-               const struct i915_ggtt_view *view);
+i915_vma_instance(struct drm_i915_gem_object *obj,
+                 struct i915_address_space *vm,
+                 const struct i915_ggtt_view *view);
 
 void i915_vma_unpin_and_release(struct i915_vma **p_vma);
 
@@ -196,15 +199,30 @@ i915_vma_compare(struct i915_vma *vma,
        if (cmp)
                return cmp;
 
+       BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL != 0);
+       cmp = vma->ggtt_view.type;
        if (!view)
-               return vma->ggtt_view.type;
+               return cmp;
 
-       if (vma->ggtt_view.type != view->type)
-               return vma->ggtt_view.type - view->type;
+       cmp -= view->type;
+       if (cmp)
+               return cmp;
 
-       return memcmp(&vma->ggtt_view.params,
-                     &view->params,
-                     sizeof(view->params));
+       /* ggtt_view.type also encodes its size so that we both distinguish
+        * different views using it as a "type" and also use a compact (no
+        * accessing of uninitialised padding bytes) memcmp without storing
+        * an extra parameter or adding more code.
+        *
+        * To ensure that the memcmp is valid for all branches of the union,
+        * even though the code looks like it is just comparing one branch,
+        * we assert above that all branches have the same address, and that
+        * each branch has a unique type/size.
+        */
+       BUILD_BUG_ON(I915_GGTT_VIEW_NORMAL >= I915_GGTT_VIEW_PARTIAL);
+       BUILD_BUG_ON(I915_GGTT_VIEW_PARTIAL >= I915_GGTT_VIEW_ROTATED);
+       BUILD_BUG_ON(offsetof(typeof(*view), rotated) !=
+                    offsetof(typeof(*view), partial));
+       return memcmp(&vma->ggtt_view.partial, &view->partial, view->type);
 }
 
 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
@@ -229,8 +247,11 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
        /* Pin early to prevent the shrinker/eviction logic from destroying
         * our vma as we insert and bind.
         */
-       if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
+       if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) {
+               GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+               GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
                return 0;
+       }
 
        return __i915_vma_do_pin(vma, size, alignment, flags);
 }
index 4612ffd555a790e2323e741d59edd0d77ef8c800..41fd94e62d3cbdab8eec3543a25ac1fe297d03f9 100644 (file)
@@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
 
        __drm_atomic_helper_plane_duplicate_state(plane, state);
 
+       intel_state->vma = NULL;
+
        return state;
 }
 
@@ -100,6 +102,24 @@ void
 intel_plane_destroy_state(struct drm_plane *plane,
                          struct drm_plane_state *state)
 {
+       struct i915_vma *vma;
+
+       vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
+
+       /*
+        * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
+        * We currently don't clear all planes during driver unload, so we have
+        * to be able to unpin vma here for now.
+        *
+        * Normally this can only happen during unload when kmscon is disabled
+        * and userspace doesn't attempt to set a framebuffer at all.
+        */
+       if (vma) {
+               mutex_lock(&plane->dev->struct_mutex);
+               intel_unpin_fb_vma(vma);
+               mutex_unlock(&plane->dev->struct_mutex);
+       }
+
        drm_atomic_helper_plane_destroy_state(plane, state);
 }
 
index 9cbb8d8363b4486cdb57d05f0c3e3c4b8db5d103..0085bc745f6aa5256cf21c1ae76dd2cbce80e387 100644 (file)
  * low-power state and comes back to normal.
  */
 
+#define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin"
+MODULE_FIRMWARE(I915_CSR_GLK);
+#define GLK_CSR_VERSION_REQUIRED       CSR_VERSION(1, 1)
+
 #define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
 MODULE_FIRMWARE(I915_CSR_KBL);
 #define KBL_CSR_VERSION_REQUIRED       CSR_VERSION(1, 1)
@@ -286,7 +290,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 
        csr->version = css_header->version;
 
-       if (IS_KABYLAKE(dev_priv)) {
+       if (IS_GEMINILAKE(dev_priv)) {
+               required_version = GLK_CSR_VERSION_REQUIRED;
+       } else if (IS_KABYLAKE(dev_priv)) {
                required_version = KBL_CSR_VERSION_REQUIRED;
        } else if (IS_SKYLAKE(dev_priv)) {
                required_version = SKL_CSR_VERSION_REQUIRED;
@@ -435,7 +441,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
        if (!HAS_CSR(dev_priv))
                return;
 
-       if (IS_KABYLAKE(dev_priv))
+       if (IS_GEMINILAKE(dev_priv))
+               csr->fw_path = I915_CSR_GLK;
+       else if (IS_KABYLAKE(dev_priv))
                csr->fw_path = I915_CSR_KBL;
        else if (IS_SKYLAKE(dev_priv))
                csr->fw_path = I915_CSR_SKL;
index f642f6ded4ae7496f7fa5177a519664e607cb51d..fcf81815daff3abb53ecbb95c46a06701e3f526b 100644 (file)
@@ -192,7 +192,7 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
                (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
                hweight8(sseu->slice_mask) > 1;
        sseu->has_subslice_pg =
-               IS_BROXTON(dev_priv) && sseu_subslice_total(sseu) > 1;
+               IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1;
        sseu->has_eu_pg = sseu->eu_per_subslice > 2;
 
        if (IS_BROXTON(dev_priv)) {
index 1eb63699e115257db3cc205d58b9c2471d0d459f..b3e773c9f87240c15795d2073abfffd7ed302ba8 100644 (file)
@@ -2137,11 +2137,10 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
                        const struct drm_framebuffer *fb,
                        unsigned int rotation)
 {
+       view->type = I915_GGTT_VIEW_NORMAL;
        if (drm_rotation_90_or_270(rotation)) {
-               *view = i915_ggtt_view_rotated;
-               view->params.rotated = to_intel_framebuffer(fb)->rot_info;
-       } else {
-               *view = i915_ggtt_view_normal;
+               view->type = I915_GGTT_VIEW_ROTATED;
+               view->rotated = to_intel_framebuffer(fb)->rot_info;
        }
 }
 
@@ -2235,24 +2234,19 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
                        i915_vma_pin_fence(vma);
        }
 
+       i915_vma_get(vma);
 err:
        intel_runtime_pm_put(dev_priv);
        return vma;
 }
 
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+void intel_unpin_fb_vma(struct i915_vma *vma)
 {
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct i915_ggtt_view view;
-       struct i915_vma *vma;
-
-       WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
-
-       intel_fill_fb_ggtt_view(&view, fb, rotation);
-       vma = i915_gem_object_to_ggtt(obj, &view);
+       lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 
        i915_vma_unpin_fence(vma);
        i915_gem_object_unpin_from_display_plane(vma);
+       i915_vma_put(vma);
 }
 
 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
@@ -2745,7 +2739,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        struct drm_device *dev = intel_crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_crtc *c;
-       struct intel_crtc *i;
        struct drm_i915_gem_object *obj;
        struct drm_plane *primary = intel_crtc->base.primary;
        struct drm_plane_state *plane_state = primary->state;
@@ -2770,20 +2763,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
         * an fb with another CRTC instead
         */
        for_each_crtc(dev, c) {
-               i = to_intel_crtc(c);
+               struct intel_plane_state *state;
 
                if (c == &intel_crtc->base)
                        continue;
 
-               if (!i->active)
+               if (!to_intel_crtc(c)->active)
                        continue;
 
-               fb = c->primary->fb;
-               if (!fb)
+               state = to_intel_plane_state(c->primary->state);
+               if (!state->vma)
                        continue;
 
-               obj = intel_fb_obj(fb);
-               if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
+               if (intel_plane_ggtt_offset(state) == plane_config->base) {
+                       fb = c->primary->fb;
                        drm_framebuffer_reference(fb);
                        goto valid_fb;
                }
@@ -2796,7 +2789,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
         * simplest solution is to just disable the primary plane now and
         * pretend the BIOS never had it enabled.
         */
-       to_intel_plane_state(plane_state)->base.visible = false;
+       plane_state->visible = false;
        crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
        intel_pre_disable_primary_noatomic(&intel_crtc->base);
        intel_plane->disable_plane(primary, &intel_crtc->base);
@@ -2804,6 +2797,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        return;
 
 valid_fb:
+       mutex_lock(&dev->struct_mutex);
+       intel_state->vma =
+               intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+       mutex_unlock(&dev->struct_mutex);
+       if (IS_ERR(intel_state->vma)) {
+               DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
+                         intel_crtc->pipe, PTR_ERR(intel_state->vma));
+
+               intel_state->vma = NULL;
+               drm_framebuffer_unreference(fb);
+               return;
+       }
+
        plane_state->src_x = 0;
        plane_state->src_y = 0;
        plane_state->src_w = fb->width << 16;
@@ -2966,6 +2972,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
        unsigned int rotation = plane_state->base.rotation;
        int ret;
 
+       if (!plane_state->base.visible)
+               return 0;
+
        /* Rotate src coordinates to match rotated GTT view */
        if (drm_rotation_90_or_270(rotation))
                drm_rect_rotate(&plane_state->base.src,
@@ -3096,13 +3105,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        if (INTEL_GEN(dev_priv) >= 4) {
                I915_WRITE(DSPSURF(plane),
-                          intel_fb_gtt_offset(fb, rotation) +
+                          intel_plane_ggtt_offset(plane_state) +
                           intel_crtc->dspaddr_offset);
                I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
                I915_WRITE(DSPLINOFF(plane), linear_offset);
        } else {
                I915_WRITE(DSPADDR(plane),
-                          intel_fb_gtt_offset(fb, rotation) +
+                          intel_plane_ggtt_offset(plane_state) +
                           intel_crtc->dspaddr_offset);
        }
        POSTING_READ(reg);
@@ -3199,7 +3208,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
 
        I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
        I915_WRITE(DSPSURF(plane),
-                  intel_fb_gtt_offset(fb, rotation) +
+                  intel_plane_ggtt_offset(plane_state) +
                   intel_crtc->dspaddr_offset);
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
@@ -3222,23 +3231,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
        }
 }
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
-                       unsigned int rotation)
-{
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       struct i915_ggtt_view view;
-       struct i915_vma *vma;
-
-       intel_fill_fb_ggtt_view(&view, fb, rotation);
-
-       vma = i915_gem_object_to_ggtt(obj, &view);
-       if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
-                view.type))
-               return -1;
-
-       return i915_ggtt_offset(vma);
-}
-
 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
 {
        struct drm_device *dev = intel_crtc->base.dev;
@@ -3434,7 +3426,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
        }
 
        I915_WRITE(PLANE_SURF(pipe, plane_id),
-                  intel_fb_gtt_offset(fb, rotation) + surf_addr);
+                  intel_plane_ggtt_offset(plane_state) + surf_addr);
 
        POSTING_READ(PLANE_SURF(pipe, plane_id));
 }
@@ -3558,23 +3550,19 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
        state = drm_atomic_helper_duplicate_state(dev, ctx);
        if (IS_ERR(state)) {
                ret = PTR_ERR(state);
-               state = NULL;
                DRM_ERROR("Duplicating state failed with %i\n", ret);
-               goto err;
+               return;
        }
 
        ret = drm_atomic_helper_disable_all(dev, ctx);
        if (ret) {
                DRM_ERROR("Suspending crtc's failed with %i\n", ret);
-               goto err;
+               drm_atomic_state_put(state);
+               return;
        }
 
        dev_priv->modeset_restore_state = state;
        state->acquire_ctx = ctx;
-       return;
-
-err:
-       drm_atomic_state_put(state);
 }
 
 void intel_finish_reset(struct drm_i915_private *dev_priv)
@@ -6883,13 +6871,13 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
        if (!intel_crtc->active)
                return;
 
-       if (to_intel_plane_state(crtc->primary->state)->base.visible) {
+       if (crtc->primary->state->visible) {
                WARN_ON(intel_crtc->flip_work);
 
                intel_pre_disable_primary_noatomic(crtc);
 
                intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
-               to_intel_plane_state(crtc->primary->state)->base.visible = false;
+               crtc->primary->state->visible = false;
        }
 
        state = drm_atomic_state_alloc(crtc->dev);
@@ -8403,7 +8391,6 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
        mode->type = DRM_MODE_TYPE_DRIVER;
 
        mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
-       mode->flags |= pipe_config->base.adjusted_mode.flags;
 
        mode->hsync = drm_mode_hsync(mode);
        mode->vrefresh = drm_mode_vrefresh(mode);
@@ -11288,6 +11275,7 @@ found:
        }
 
        old->restore_state = restore_state;
+       drm_atomic_state_put(state);
 
        /* let the connector get through one full cycle before testing */
        intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -11567,7 +11555,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
                flush_work(&work->mmio_work);
 
        mutex_lock(&dev->struct_mutex);
-       intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
+       intel_unpin_fb_vma(work->old_vma);
        i915_gem_object_put(work->pending_flip_obj);
        mutex_unlock(&dev->struct_mutex);
 
@@ -12277,8 +12265,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                goto cleanup_pending;
        }
 
-       work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
-       work->gtt_offset += intel_crtc->dspaddr_offset;
+       work->old_vma = to_intel_plane_state(primary->state)->vma;
+       to_intel_plane_state(primary->state)->vma = vma;
+
+       work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
        work->rotation = crtc->primary->state->rotation;
 
        /*
@@ -12333,7 +12323,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 cleanup_request:
        i915_add_request_no_flush(request);
 cleanup_unpin:
-       intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
+       to_intel_plane_state(primary->state)->vma = work->old_vma;
+       intel_unpin_fb_vma(vma);
 cleanup_pending:
        atomic_dec(&intel_crtc->unpin_work_count);
 unlock:
@@ -12463,7 +12454,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
        }
 
        was_visible = old_plane_state->base.visible;
-       visible = to_intel_plane_state(plane_state)->base.visible;
+       visible = plane_state->visible;
 
        if (!was_crtc_enabled && WARN_ON(was_visible))
                was_visible = false;
@@ -12479,7 +12470,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
         * only combine the results from all planes in the current place?
         */
        if (!is_crtc_enabled)
-               to_intel_plane_state(plane_state)->base.visible = visible = false;
+               plane_state->visible = visible = false;
 
        if (!was_visible && !visible)
                return 0;
@@ -14737,6 +14728,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
        .page_flip = intel_crtc_page_flip,
        .atomic_duplicate_state = intel_crtc_duplicate_state,
        .atomic_destroy_state = intel_crtc_destroy_state,
+       .set_crc_source = intel_crtc_set_crc_source,
 };
 
 /**
@@ -14833,6 +14825,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
                        DRM_DEBUG_KMS("failed to pin object\n");
                        return PTR_ERR(vma);
                }
+
+               to_intel_plane_state(new_state)->vma = vma;
        }
 
        return 0;
@@ -14851,19 +14845,12 @@ void
 intel_cleanup_plane_fb(struct drm_plane *plane,
                       struct drm_plane_state *old_state)
 {
-       struct drm_i915_private *dev_priv = to_i915(plane->dev);
-       struct intel_plane_state *old_intel_state;
-       struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
-       struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
-
-       old_intel_state = to_intel_plane_state(old_state);
-
-       if (!obj && !old_obj)
-               return;
+       struct i915_vma *vma;
 
-       if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
-           !INTEL_INFO(dev_priv)->cursor_needs_physical))
-               intel_unpin_fb_obj(old_state->fb, old_state->rotation);
+       /* Should only be called after a successful intel_prepare_plane_fb()! */
+       vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
+       if (vma)
+               intel_unpin_fb_vma(vma);
 }
 
 int
@@ -15015,6 +15002,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct drm_framebuffer *old_fb;
        struct drm_crtc_state *crtc_state = crtc->state;
+       struct i915_vma *old_vma;
 
        /*
         * When crtc is inactive or there is a modeset pending,
@@ -15086,9 +15074,12 @@ intel_legacy_cursor_update(struct drm_plane *plane,
                        ret = PTR_ERR(vma);
                        goto out_unlock;
                }
+
+               to_intel_plane_state(new_plane_state)->vma = vma;
        }
 
        old_fb = old_plane_state->fb;
+       old_vma = to_intel_plane_state(old_plane_state)->vma;
 
        i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
                          intel_plane->frontbuffer_bit);
@@ -15098,6 +15089,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
        *to_intel_plane_state(old_plane_state) = *to_intel_plane_state(new_plane_state);
        new_plane_state->fence = NULL;
        new_plane_state->fb = old_fb;
+       to_intel_plane_state(new_plane_state)->vma = old_vma;
 
        intel_plane->update_plane(plane,
                                  to_intel_crtc_state(crtc->state),
@@ -15336,7 +15328,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
        if (!obj)
                addr = 0;
        else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
-               addr = i915_gem_object_ggtt_offset(obj, NULL);
+               addr = intel_plane_ggtt_offset(state);
        else
                addr = obj->phys_handle->busaddr;
 
@@ -16840,7 +16832,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
                 * Temporarily change the plane mapping and disable everything
                 * ...  */
                plane = crtc->plane;
-               to_intel_plane_state(crtc->base.primary->state)->base.visible = true;
+               crtc->base.primary->state->visible = true;
                crtc->plane = !plane;
                intel_crtc_disable_noatomic(&crtc->base);
                crtc->plane = plane;
@@ -16992,7 +16984,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
        dev_priv->active_crtcs = 0;
 
        for_each_intel_crtc(dev, crtc) {
-               struct intel_crtc_state *crtc_state = crtc->config;
+               struct intel_crtc_state *crtc_state =
+                       to_intel_crtc_state(crtc->base.state);
 
                __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
                memset(crtc_state, 0, sizeof(*crtc_state));
@@ -17011,7 +17004,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 
                DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
                              crtc->base.base.id, crtc->base.name,
-                             enableddisabled(crtc->active));
+                             enableddisabled(crtc_state->base.active));
        }
 
        for (i = 0; i < dev_priv->num_shared_dpll; i++) {
@@ -17021,7 +17014,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                                                  &pll->state.hw_state);
                pll->state.crtc_mask = 0;
                for_each_intel_crtc(dev, crtc) {
-                       if (crtc->active && crtc->config->shared_dpll == pll)
+                       struct intel_crtc_state *crtc_state =
+                               to_intel_crtc_state(crtc->base.state);
+
+                       if (crtc_state->base.active &&
+                           crtc_state->shared_dpll == pll)
                                pll->state.crtc_mask |= 1 << crtc->pipe;
                }
                pll->active_mask = pll->state.crtc_mask;
@@ -17034,11 +17031,14 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                pipe = 0;
 
                if (encoder->get_hw_state(encoder, &pipe)) {
+                       struct intel_crtc_state *crtc_state;
+
                        crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+                       crtc_state = to_intel_crtc_state(crtc->base.state);
 
                        encoder->base.crtc = &crtc->base;
-                       crtc->config->output_types |= 1 << encoder->type;
-                       encoder->get_config(encoder, crtc->config);
+                       crtc_state->output_types |= 1 << encoder->type;
+                       encoder->get_config(encoder, crtc_state);
                } else {
                        encoder->base.crtc = NULL;
                }
@@ -17079,14 +17079,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
        }
 
        for_each_intel_crtc(dev, crtc) {
+               struct intel_crtc_state *crtc_state =
+                       to_intel_crtc_state(crtc->base.state);
                int pixclk = 0;
 
-               crtc->base.hwmode = crtc->config->base.adjusted_mode;
+               crtc->base.hwmode = crtc_state->base.adjusted_mode;
 
                memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
-               if (crtc->base.state->active) {
-                       intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
-                       intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
+               if (crtc_state->base.active) {
+                       intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
+                       intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
                        WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
 
                        /*
@@ -17098,17 +17100,17 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                         * set a flag to indicate that a full recalculation is
                         * needed on the next commit.
                         */
-                       crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
+                       crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
 
                        if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
-                               pixclk = ilk_pipe_pixel_rate(crtc->config);
+                               pixclk = ilk_pipe_pixel_rate(crtc_state);
                        else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-                               pixclk = crtc->config->base.adjusted_mode.crtc_clock;
+                               pixclk = crtc_state->base.adjusted_mode.crtc_clock;
                        else
                                WARN_ON(dev_priv->display.modeset_calc_cdclk);
 
                        /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
-                       if (IS_BROADWELL(dev_priv) && crtc->config->ips_enabled)
+                       if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
                                pixclk = DIV_ROUND_UP(pixclk * 100, 95);
 
                        drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
@@ -17117,7 +17119,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
 
                dev_priv->min_pixclk[crtc->pipe] = pixclk;
 
-               intel_pipe_config_sanity_check(dev_priv, crtc->config);
+               intel_pipe_config_sanity_check(dev_priv, crtc_state);
        }
 }
 
@@ -17218,47 +17220,19 @@ void intel_display_resume(struct drm_device *dev)
 
        if (ret)
                DRM_ERROR("Restoring old state failed with %i\n", ret);
-       drm_atomic_state_put(state);
+       if (state)
+               drm_atomic_state_put(state);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_crtc *c;
-       struct drm_i915_gem_object *obj;
 
        intel_init_gt_powersave(dev_priv);
 
        intel_modeset_init_hw(dev);
 
        intel_setup_overlay(dev_priv);
-
-       /*
-        * Make sure any fbs we allocated at startup are properly
-        * pinned & fenced.  When we do the allocation it's too early
-        * for this.
-        */
-       for_each_crtc(dev, c) {
-               struct i915_vma *vma;
-
-               obj = intel_fb_obj(c->primary->fb);
-               if (obj == NULL)
-                       continue;
-
-               mutex_lock(&dev->struct_mutex);
-               vma = intel_pin_and_fence_fb_obj(c->primary->fb,
-                                                c->primary->state->rotation);
-               mutex_unlock(&dev->struct_mutex);
-               if (IS_ERR(vma)) {
-                       DRM_ERROR("failed to pin boot fb on pipe %d\n",
-                                 to_intel_crtc(c)->pipe);
-                       drm_framebuffer_unreference(c->primary->fb);
-                       c->primary->fb = NULL;
-                       c->primary->crtc = c->primary->state->crtc = NULL;
-                       update_state_fb(c->primary);
-                       c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
-               }
-       }
 }
 
 int intel_connector_register(struct drm_connector *connector)
index be89aca0dbe8f971793d2c508e1ee26fcca61b6c..3d8ac8aa721432426e8a86b21d82319829a5b8f4 100644 (file)
@@ -3044,6 +3044,32 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
                                DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
 }
 
+static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
+{
+       uint8_t psr_caps = 0;
+
+       drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps);
+       return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+}
+
+static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
+{
+       uint8_t dprx = 0;
+
+       drm_dp_dpcd_readb(&intel_dp->aux,
+                       DP_DPRX_FEATURE_ENUMERATION_LIST,
+                       &dprx);
+       return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
+}
+
+static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
+{
+       uint8_t alpm_caps = 0;
+
+       drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &alpm_caps);
+       return alpm_caps & DP_ALPM_CAP;
+}
+
 /* These are source-specific values. */
 uint8_t
 intel_dp_voltage_max(struct intel_dp *intel_dp)
@@ -3414,7 +3440,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
        if (HAS_DDI(dev_priv)) {
                signal_levels = ddi_signal_levels(intel_dp);
 
-               if (IS_BROXTON(dev_priv))
+               if (IS_GEN9_LP(dev_priv))
                        signal_levels = 0;
                else
                        mask = DDI_BUF_EMP_MASK;
@@ -3622,6 +3648,16 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
                dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
                DRM_DEBUG_KMS("PSR2 %s on sink",
                              dev_priv->psr.psr2_support ? "supported" : "not supported");
+
+               if (dev_priv->psr.psr2_support) {
+                       dev_priv->psr.y_cord_support =
+                               intel_dp_get_y_cord_status(intel_dp);
+                       dev_priv->psr.colorimetry_support =
+                               intel_dp_get_colorimetry_status(intel_dp);
+                       dev_priv->psr.alpm =
+                               intel_dp_get_alpm_status(intel_dp);
+               }
+
        }
 
        /* Read the eDP Display control capabilities registers */
index 6b02dac6ea26bb9f524a2a6b2925cab3ad77d661..0cec0013ace044b4fb891038314fef695241dbde 100644 (file)
@@ -376,6 +376,7 @@ struct intel_atomic_state {
 struct intel_plane_state {
        struct drm_plane_state base;
        struct drm_rect clip;
+       struct i915_vma *vma;
 
        struct {
                u32 offset;
@@ -1067,6 +1068,7 @@ struct intel_flip_work {
        struct work_struct mmio_work;
 
        struct drm_crtc *crtc;
+       struct i915_vma *old_vma;
        struct drm_framebuffer *old_fb;
        struct drm_i915_gem_object *pending_flip_obj;
        struct drm_pending_vblank_event *event;
@@ -1302,7 +1304,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
                                    struct drm_modeset_acquire_ctx *ctx);
 struct i915_vma *
 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
+void intel_unpin_fb_vma(struct i915_vma *vma);
 struct drm_framebuffer *
 __intel_framebuffer_create(struct drm_device *dev,
                           struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1391,7 +1393,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
 
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation);
+static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
+{
+       return i915_ggtt_offset(state->vma);
+}
 
 u32 skl_plane_ctl_format(uint32_t pixel_format);
 u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
@@ -1880,5 +1885,11 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
 /* intel_pipe_crc.c */
 int intel_pipe_crc_create(struct drm_minor *minor);
 void intel_pipe_crc_cleanup(struct drm_minor *minor);
+#ifdef CONFIG_DEBUG_FS
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
+                             size_t *values_cnt);
+#else
+#define intel_crtc_set_crc_source NULL
+#endif
 extern const struct file_operations i915_display_crc_ctl_fops;
 #endif /* __INTEL_DRV_H__ */
index 97bbbc3d6aa8c3407ff76a60e498a723dd13a03d..371acf109e343295ae060c4cf49908bf5607118e 100644 (file)
@@ -264,7 +264,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
                return PTR_ERR(obj);
        }
 
-       vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err_unref;
index 26a81a9e9c1d1beb805c764c1a51c30e5a65482b..89fe5c8464df761600ae6ed4f18fc5a1a1e9893c 100644 (file)
@@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
        if (IS_I945GM(dev_priv))
                fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
        fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-       fbc_ctl |= params->fb.fence_reg;
+       fbc_ctl |= params->vma->fence->id;
        I915_WRITE(FBC_CONTROL, fbc_ctl);
 }
 
@@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
        else
                dpfc_ctl |= DPFC_CTL_LIMIT_1X;
 
-       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
-               dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
+       if (params->vma->fence) {
+               dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
                I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
        } else {
                I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
                break;
        }
 
-       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+       if (params->vma->fence) {
                dpfc_ctl |= DPFC_CTL_FENCE_EN;
                if (IS_GEN5(dev_priv))
-                       dpfc_ctl |= params->fb.fence_reg;
+                       dpfc_ctl |= params->vma->fence->id;
                if (IS_GEN6(dev_priv)) {
                        I915_WRITE(SNB_DPFC_CTL_SA,
-                                  SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+                                  SNB_CPU_FENCE_ENABLE |
+                                  params->vma->fence->id);
                        I915_WRITE(DPFC_CPU_FENCE_OFFSET,
                                   params->crtc.fence_y_offset);
                }
@@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
        }
 
        I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
-       I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
+       I915_WRITE(ILK_FBC_RT_BASE,
+                  i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
        /* enable it... */
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
@@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
                break;
        }
 
-       if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+       if (params->vma->fence) {
                dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
                I915_WRITE(SNB_DPFC_CTL_SA,
-                          SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+                          SNB_CPU_FENCE_ENABLE |
+                          params->vma->fence->id);
                I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
        } else {
                I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
        return effective_w <= max_w && effective_h <= max_h;
 }
 
-/* XXX replace me when we have VMA tracking for intel_plane_state */
-static int get_fence_id(struct drm_framebuffer *fb)
-{
-       struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
-
-       return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
-}
-
 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
                                         struct intel_crtc_state *crtc_state,
                                         struct intel_plane_state *plane_state)
@@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        struct intel_fbc *fbc = &dev_priv->fbc;
        struct intel_fbc_state_cache *cache = &fbc->state_cache;
        struct drm_framebuffer *fb = plane_state->base.fb;
-       struct drm_i915_gem_object *obj;
+
+       cache->vma = NULL;
 
        cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
        if (!cache->plane.visible)
                return;
 
-       obj = intel_fb_obj(fb);
-
-       /* FIXME: We lack the proper locking here, so only run this on the
-        * platforms that need. */
-       if (IS_GEN(dev_priv, 5, 6))
-               cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
        cache->fb.format = fb->format;
        cache->fb.stride = fb->pitches[0];
-       cache->fb.fence_reg = get_fence_id(fb);
-       cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
+
+       cache->vma = plane_state->vma;
 }
 
 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
                return false;
        }
 
-       if (!cache->plane.visible) {
+       if (!cache->vma) {
                fbc->no_fbc_reason = "primary plane not visible";
                return false;
        }
@@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
         * so have no fence associated with it) due to aperture constaints
         * at the time of pinning.
         */
-       if (cache->fb.tiling_mode != I915_TILING_X ||
-           cache->fb.fence_reg == I915_FENCE_REG_NONE) {
+       if (!cache->vma->fence) {
                fbc->no_fbc_reason = "framebuffer not tiled or fenced";
                return false;
        }
@@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
         * zero. */
        memset(params, 0, sizeof(*params));
 
+       params->vma = cache->vma;
+
        params->crtc.pipe = crtc->pipe;
        params->crtc.plane = crtc->plane;
        params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
 
        params->fb.format = cache->fb.format;
        params->fb.stride = cache->fb.stride;
-       params->fb.fence_reg = cache->fb.fence_reg;
 
        params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
-
-       params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
 }
 
 static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
@@ -1296,7 +1284,7 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
 
        for_each_intel_crtc(&dev_priv->drm, crtc)
                if (intel_crtc_active(crtc) &&
-                   to_intel_plane_state(crtc->base.primary->state)->base.visible)
+                   crtc->base.primary->state->visible)
                        dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
 }
 
index bdefa61f2e6057cc4f7165374e7ee42b56589ea0..e0d9e72cf3d1dfee344896c120a9937f4a8b5824 100644 (file)
@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 out_destroy_fbi:
        drm_fb_helper_release_fbi(helper);
 out_unpin:
-       intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+       intel_unpin_fb_vma(vma);
 out_unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
 
        if (ifbdev->fb) {
                mutex_lock(&ifbdev->helper.dev->struct_mutex);
-               intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+               intel_unpin_fb_vma(ifbdev->vma);
                mutex_unlock(&ifbdev->helper.dev->struct_mutex);
 
                drm_framebuffer_remove(&ifbdev->fb->base);
@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
 {
        struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
 
+       if (!ifbdev)
+               return;
+
        ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
 }
 
index 3202b32b56383f9eccd32180e7d95591e65be49a..25691f0e4c50df6ff8389f7accf1ece317277bd3 100644 (file)
  * The GuC firmware layout looks like this:
  *
  *     +-------------------------------+
- *     |        guc_css_header         |
+ *     |         uc_css_header         |
  *     |                               |
  *     | contains major/minor version  |
  *     +-------------------------------+
  * 3. Length info of each component can be found in header, in dwords.
  * 4. Modulus and exponent key are not required by driver. They may not appear
  *    in fw. So driver will load a truncated firmware in this case.
+ *
+ * HuC firmware layout is same as GuC firmware.
+ *
+ * HuC firmware css header is different. However, the only difference is where
+ * the version information is saved. The uc_css_header is unified to support
+ * both. Driver should get HuC version from uc_css_header.huc_sw_version, while
+ * uc_css_header.guc_sw_version for GuC.
  */
 
-struct guc_css_header {
+struct uc_css_header {
        uint32_t module_type;
        /* header_size includes all non-uCode bits, including css_header, rsa
         * key, modulus key and exponent data. */
@@ -205,8 +212,16 @@ struct guc_css_header {
 
        char username[8];
        char buildnumber[12];
-       uint32_t device_id;
-       uint32_t guc_sw_version;
+       union {
+               struct {
+                       uint32_t branch_client_version;
+                       uint32_t sw_version;
+       } guc;
+               struct {
+                       uint32_t sw_version;
+                       uint32_t reserved;
+       } huc;
+       };
        uint32_t prod_preprod_fw;
        uint32_t reserved[12];
        uint32_t header_info;
@@ -490,6 +505,7 @@ enum intel_guc_action {
        INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
        INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
        INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003,
+       INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
        INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000,
        INTEL_GUC_ACTION_LIMIT
 };
index 35d5690f47a2e3ab87dc1e8e136eb64b328bac13..2f1cf9aea04ed779f2d25b4f7464c29ecac0d90e 100644 (file)
  * 512K. In order to exclude 0-512K address space from GGTT, all gfx objects
  * used by GuC is pinned with PIN_OFFSET_BIAS along with size of WOPCM.
  *
- * Firmware log:
- * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
- * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
- * i915_guc_load_status will print out firmware loading status and scratch
- * registers value.
- *
  */
 
 #define SKL_FW_MAJOR 6
@@ -81,16 +75,16 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
 MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
 
 /* User-friendly representation of an enum */
-const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
+const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
 {
        switch (status) {
-       case GUC_FIRMWARE_FAIL:
+       case INTEL_UC_FIRMWARE_FAIL:
                return "FAIL";
-       case GUC_FIRMWARE_NONE:
+       case INTEL_UC_FIRMWARE_NONE:
                return "NONE";
-       case GUC_FIRMWARE_PENDING:
+       case INTEL_UC_FIRMWARE_PENDING:
                return "PENDING";
-       case GUC_FIRMWARE_SUCCESS:
+       case INTEL_UC_FIRMWARE_SUCCESS:
                return "SUCCESS";
        default:
                return "UNKNOWN!";
@@ -278,7 +272,7 @@ static inline bool guc_ucode_response(struct drm_i915_private *dev_priv,
 static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
                              struct i915_vma *vma)
 {
-       struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+       struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
        unsigned long offset;
        struct sg_table *sg = vma->pages;
        u32 status, rsa[UOS_RSA_SCRATCH_MAX_COUNT];
@@ -334,12 +328,12 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv,
        return ret;
 }
 
-static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
+u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv)
 {
        u32 wopcm_size = GUC_WOPCM_TOP;
 
        /* On BXT, the top of WOPCM is reserved for RC6 context */
-       if (IS_BROXTON(dev_priv))
+       if (IS_GEN9_LP(dev_priv))
                wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
 
        return wopcm_size;
@@ -350,29 +344,27 @@ static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
  */
 static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 {
-       struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+       struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
        struct i915_vma *vma;
        int ret;
 
-       ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
+       ret = i915_gem_object_set_to_gtt_domain(guc_fw->obj, false);
        if (ret) {
                DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
                return ret;
        }
 
-       vma = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
+       vma = i915_gem_object_ggtt_pin(guc_fw->obj, NULL, 0, 0,
+                                      PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
        if (IS_ERR(vma)) {
                DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
                return PTR_ERR(vma);
        }
 
-       /* Invalidate GuC TLB to let GuC take the latest updates to GTT. */
-       I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
-
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
        /* init WOPCM */
-       I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
+       I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
        I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
 
        /* Enable MIA caching. GuC clock gating is disabled. */
@@ -388,7 +380,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
        if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
                I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);
 
-       if (IS_BROXTON(dev_priv))
+       if (IS_GEN9_LP(dev_priv))
                I915_WRITE(GEN9LP_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
        else
                I915_WRITE(GEN9_GT_PM_CONFIG, GT_DOORBELL_ENABLE);
@@ -450,14 +442,14 @@ static int guc_hw_reset(struct drm_i915_private *dev_priv)
  */
 int intel_guc_setup(struct drm_i915_private *dev_priv)
 {
-       struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
-       const char *fw_path = guc_fw->guc_fw_path;
+       struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+       const char *fw_path = guc_fw->path;
        int retries, ret, err;
 
        DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
                fw_path,
-               intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
-               intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+               intel_uc_fw_status_repr(guc_fw->fetch_status),
+               intel_uc_fw_status_repr(guc_fw->load_status));
 
        /* Loading forbidden, or no firmware to load? */
        if (!i915.enable_guc_loading) {
@@ -475,10 +467,10 @@ int intel_guc_setup(struct drm_i915_private *dev_priv)
        }
 
        /* Fetch failed, or already fetched but failed to load? */
-       if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
+       if (guc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) {
                err = -EIO;
                goto fail;
-       } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
+       } else if (guc_fw->load_status == INTEL_UC_FIRMWARE_FAIL) {
                err = -ENOEXEC;
                goto fail;
        }
@@ -486,11 +478,14 @@ int intel_guc_setup(struct drm_i915_private *dev_priv)
        guc_interrupts_release(dev_priv);
        gen9_reset_guc_interrupts(dev_priv);
 
-       guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
+       /* We need to notify the guc whenever we change the GGTT */
+       i915_ggtt_enable_guc(dev_priv);
+
+       guc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
 
        DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
-               intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
-               intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+               intel_uc_fw_status_repr(guc_fw->fetch_status),
+               intel_uc_fw_status_repr(guc_fw->load_status));
 
        err = i915_guc_submission_init(dev_priv);
        if (err)
@@ -511,6 +506,7 @@ int intel_guc_setup(struct drm_i915_private *dev_priv)
                if (err)
                        goto fail;
 
+               intel_huc_load(dev_priv);
                err = guc_ucode_xfer(dev_priv);
                if (!err)
                        break;
@@ -522,11 +518,13 @@ int intel_guc_setup(struct drm_i915_private *dev_priv)
                         "retry %d more time(s)\n", err, retries);
        }
 
-       guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
+       guc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
 
        DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
-               intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
-               intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
+               intel_uc_fw_status_repr(guc_fw->fetch_status),
+               intel_uc_fw_status_repr(guc_fw->load_status));
+
+       intel_guc_auth_huc(dev_priv);
 
        if (i915.enable_guc_submission) {
                if (i915.guc_log_level >= 0)
@@ -541,12 +539,13 @@ int intel_guc_setup(struct drm_i915_private *dev_priv)
        return 0;
 
 fail:
-       if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
-               guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
+       if (guc_fw->load_status == INTEL_UC_FIRMWARE_PENDING)
+               guc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
 
        guc_interrupts_release(dev_priv);
        i915_guc_submission_disable(dev_priv);
        i915_guc_submission_fini(dev_priv);
+       i915_ggtt_disable_guc(dev_priv);
 
        /*
         * We've failed to load the firmware :(
@@ -587,93 +586,108 @@ fail:
        return ret;
 }
 
-static void guc_fw_fetch(struct drm_i915_private *dev_priv,
-                        struct intel_guc_fw *guc_fw)
+void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
+                        struct intel_uc_fw *uc_fw)
 {
        struct pci_dev *pdev = dev_priv->drm.pdev;
        struct drm_i915_gem_object *obj;
        const struct firmware *fw = NULL;
-       struct guc_css_header *css;
+       struct uc_css_header *css;
        size_t size;
        int err;
 
-       DRM_DEBUG_DRIVER("before requesting firmware: GuC fw fetch status %s\n",
-               intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
+       DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
+               intel_uc_fw_status_repr(uc_fw->fetch_status));
 
-       err = request_firmware(&fw, guc_fw->guc_fw_path, &pdev->dev);
+       err = request_firmware(&fw, uc_fw->path, &pdev->dev);
        if (err)
                goto fail;
        if (!fw)
                goto fail;
 
-       DRM_DEBUG_DRIVER("fetch GuC fw from %s succeeded, fw %p\n",
-               guc_fw->guc_fw_path, fw);
+       DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
+               uc_fw->path, fw);
 
        /* Check the size of the blob before examining buffer contents */
-       if (fw->size < sizeof(struct guc_css_header)) {
+       if (fw->size < sizeof(struct uc_css_header)) {
                DRM_NOTE("Firmware header is missing\n");
                goto fail;
        }
 
-       css = (struct guc_css_header *)fw->data;
+       css = (struct uc_css_header *)fw->data;
 
        /* Firmware bits always start from header */
-       guc_fw->header_offset = 0;
-       guc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
+       uc_fw->header_offset = 0;
+       uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
                css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
 
-       if (guc_fw->header_size != sizeof(struct guc_css_header)) {
+       if (uc_fw->header_size != sizeof(struct uc_css_header)) {
                DRM_NOTE("CSS header definition mismatch\n");
                goto fail;
        }
 
        /* then, uCode */
-       guc_fw->ucode_offset = guc_fw->header_offset + guc_fw->header_size;
-       guc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+       uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
+       uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
 
        /* now RSA */
        if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
                DRM_NOTE("RSA key size is bad\n");
                goto fail;
        }
-       guc_fw->rsa_offset = guc_fw->ucode_offset + guc_fw->ucode_size;
-       guc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+       uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
+       uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
 
        /* At least, it should have header, uCode and RSA. Size of all three. */
-       size = guc_fw->header_size + guc_fw->ucode_size + guc_fw->rsa_size;
+       size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
        if (fw->size < size) {
                DRM_NOTE("Missing firmware components\n");
                goto fail;
        }
 
-       /* Header and uCode will be loaded to WOPCM. Size of the two. */
-       size = guc_fw->header_size + guc_fw->ucode_size;
-       if (size > guc_wopcm_size(dev_priv)) {
-               DRM_NOTE("Firmware is too large to fit in WOPCM\n");
-               goto fail;
-       }
-
        /*
         * The GuC firmware image has the version number embedded at a well-known
         * offset within the firmware blob; note that major / minor version are
         * TWO bytes each (i.e. u16), although all pointers and offsets are defined
         * in terms of bytes (u8).
         */
-       guc_fw->guc_fw_major_found = css->guc_sw_version >> 16;
-       guc_fw->guc_fw_minor_found = css->guc_sw_version & 0xFFFF;
-
-       if (guc_fw->guc_fw_major_found != guc_fw->guc_fw_major_wanted ||
-           guc_fw->guc_fw_minor_found < guc_fw->guc_fw_minor_wanted) {
-               DRM_NOTE("GuC firmware version %d.%d, required %d.%d\n",
-                       guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
-                       guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+       switch (uc_fw->fw) {
+       case INTEL_UC_FW_TYPE_GUC:
+               /* Header and uCode will be loaded to WOPCM. Size of the two. */
+               size = uc_fw->header_size + uc_fw->ucode_size;
+
+               /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
+               if (size > intel_guc_wopcm_size(dev_priv)) {
+                       DRM_ERROR("Firmware is too large to fit in WOPCM\n");
+                       goto fail;
+               }
+               uc_fw->major_ver_found = css->guc.sw_version >> 16;
+               uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
+               break;
+
+       case INTEL_UC_FW_TYPE_HUC:
+               uc_fw->major_ver_found = css->huc.sw_version >> 16;
+               uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
+               break;
+
+       default:
+               DRM_ERROR("Unknown firmware type %d\n", uc_fw->fw);
+               err = -ENOEXEC;
+               goto fail;
+       }
+
+       if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
+           uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
+               DRM_NOTE("uC firmware version %d.%d, required %d.%d\n",
+                       uc_fw->major_ver_found, uc_fw->minor_ver_found,
+                       uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
                err = -ENOEXEC;
                goto fail;
        }
 
        DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
-                       guc_fw->guc_fw_major_found, guc_fw->guc_fw_minor_found,
-                       guc_fw->guc_fw_major_wanted, guc_fw->guc_fw_minor_wanted);
+                       uc_fw->major_ver_found, uc_fw->minor_ver_found,
+                       uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
 
        mutex_lock(&dev_priv->drm.struct_mutex);
        obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
@@ -683,31 +697,31 @@ static void guc_fw_fetch(struct drm_i915_private *dev_priv,
                goto fail;
        }
 
-       guc_fw->guc_fw_obj = obj;
-       guc_fw->guc_fw_size = fw->size;
+       uc_fw->obj = obj;
+       uc_fw->size = fw->size;
 
-       DRM_DEBUG_DRIVER("GuC fw fetch status SUCCESS, obj %p\n",
-                       guc_fw->guc_fw_obj);
+       DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
+                       uc_fw->obj);
 
        release_firmware(fw);
-       guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_SUCCESS;
+       uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
        return;
 
 fail:
-       DRM_WARN("Failed to fetch valid GuC firmware from %s (error %d)\n",
-                guc_fw->guc_fw_path, err);
-       DRM_DEBUG_DRIVER("GuC fw fetch status FAIL; err %d, fw %p, obj %p\n",
-               err, fw, guc_fw->guc_fw_obj);
+       DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
+                uc_fw->path, err);
+       DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
+               err, fw, uc_fw->obj);
 
        mutex_lock(&dev_priv->drm.struct_mutex);
-       obj = guc_fw->guc_fw_obj;
+       obj = uc_fw->obj;
        if (obj)
                i915_gem_object_put(obj);
-       guc_fw->guc_fw_obj = NULL;
+       uc_fw->obj = NULL;
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
        release_firmware(fw);           /* OK even if fw is NULL */
-       guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
+       uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
 }
 
 /**
@@ -721,7 +735,7 @@ fail:
  */
 void intel_guc_init(struct drm_i915_private *dev_priv)
 {
-       struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+       struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
        const char *fw_path;
 
        if (!HAS_GUC(dev_priv)) {
@@ -739,23 +753,23 @@ void intel_guc_init(struct drm_i915_private *dev_priv)
                fw_path = NULL;
        } else if (IS_SKYLAKE(dev_priv)) {
                fw_path = I915_SKL_GUC_UCODE;
-               guc_fw->guc_fw_major_wanted = SKL_FW_MAJOR;
-               guc_fw->guc_fw_minor_wanted = SKL_FW_MINOR;
+               guc_fw->major_ver_wanted = SKL_FW_MAJOR;
+               guc_fw->minor_ver_wanted = SKL_FW_MINOR;
        } else if (IS_BROXTON(dev_priv)) {
                fw_path = I915_BXT_GUC_UCODE;
-               guc_fw->guc_fw_major_wanted = BXT_FW_MAJOR;
-               guc_fw->guc_fw_minor_wanted = BXT_FW_MINOR;
+               guc_fw->major_ver_wanted = BXT_FW_MAJOR;
+               guc_fw->minor_ver_wanted = BXT_FW_MINOR;
        } else if (IS_KABYLAKE(dev_priv)) {
                fw_path = I915_KBL_GUC_UCODE;
-               guc_fw->guc_fw_major_wanted = KBL_FW_MAJOR;
-               guc_fw->guc_fw_minor_wanted = KBL_FW_MINOR;
+               guc_fw->major_ver_wanted = KBL_FW_MAJOR;
+               guc_fw->minor_ver_wanted = KBL_FW_MINOR;
        } else {
                fw_path = "";   /* unknown device */
        }
 
-       guc_fw->guc_fw_path = fw_path;
-       guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
-       guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
+       guc_fw->path = fw_path;
+       guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+       guc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
 
        /* Early (and silent) return if GuC loading is disabled */
        if (!i915.enable_guc_loading)
@@ -765,9 +779,9 @@ void intel_guc_init(struct drm_i915_private *dev_priv)
        if (*fw_path == '\0')
                return;
 
-       guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
+       guc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
        DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
-       guc_fw_fetch(dev_priv, guc_fw);
+       intel_uc_fw_fetch(dev_priv, guc_fw);
        /* status must now be FAIL or SUCCESS */
 }
 
@@ -777,17 +791,17 @@ void intel_guc_init(struct drm_i915_private *dev_priv)
  */
 void intel_guc_fini(struct drm_i915_private *dev_priv)
 {
-       struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
+       struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
 
        mutex_lock(&dev_priv->drm.struct_mutex);
        guc_interrupts_release(dev_priv);
        i915_guc_submission_disable(dev_priv);
        i915_guc_submission_fini(dev_priv);
 
-       if (guc_fw->guc_fw_obj)
-               i915_gem_object_put(guc_fw->guc_fw_obj);
-       guc_fw->guc_fw_obj = NULL;
+       if (guc_fw->obj)
+               i915_gem_object_put(guc_fw->obj);
+       guc_fw->obj = NULL;
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
-       guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
+       guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
new file mode 100644 (file)
index 0000000..5c0f9a4
--- /dev/null
@@ -0,0 +1,658 @@
+/*
+ * Copyright Â© 2014-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/relay.h>
+#include "i915_drv.h"
+
+static void guc_log_capture_logs(struct intel_guc *guc);
+
+/**
+ * DOC: GuC firmware log
+ *
+ * Firmware log is enabled by setting i915.guc_log_level to non-negative level.
+ * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from
+ * i915_guc_load_status will print out firmware loading status and scratch
+ * registers value.
+ *
+ */
+
+static int guc_log_flush_complete(struct intel_guc *guc)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE
+       };
+
+       return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+static int guc_log_flush(struct intel_guc *guc)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
+               0
+       };
+
+       return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+static int guc_log_control(struct intel_guc *guc, u32 control_val)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
+               control_val
+       };
+
+       return intel_guc_send(guc, action, ARRAY_SIZE(action));
+}
+
+
+/*
+ * Sub buffer switch callback. Called whenever relay has to switch to a new
+ * sub buffer, relay stays on the same sub buffer if 0 is returned.
+ */
+static int subbuf_start_callback(struct rchan_buf *buf,
+                                void *subbuf,
+                                void *prev_subbuf,
+                                size_t prev_padding)
+{
+       /* Use no-overwrite mode by default, where relay will stop accepting
+        * new data if there are no empty sub buffers left.
+        * There is no strict synchronization enforced by relay between Consumer
+        * and Producer. In overwrite mode, there is a possibility of getting
+        * inconsistent/garbled data, the producer could be writing on to the
+        * same sub buffer from which Consumer is reading. This can't be avoided
+        * unless Consumer is fast enough and can always run in tandem with
+        * Producer.
+        */
+       if (relay_buf_full(buf))
+               return 0;
+
+       return 1;
+}
+
+/*
+ * file_create() callback. Creates relay file in debugfs.
+ */
+static struct dentry *create_buf_file_callback(const char *filename,
+                                              struct dentry *parent,
+                                              umode_t mode,
+                                              struct rchan_buf *buf,
+                                              int *is_global)
+{
+       struct dentry *buf_file;
+
+       /* This to enable the use of a single buffer for the relay channel and
+        * correspondingly have a single file exposed to User, through which
+        * it can collect the logs in order without any post-processing.
+        * Need to set 'is_global' even if parent is NULL for early logging.
+        */
+       *is_global = 1;
+
+       if (!parent)
+               return NULL;
+
+       /* Not using the channel filename passed as an argument, since for each
+        * channel relay appends the corresponding CPU number to the filename
+        * passed in relay_open(). This should be fine as relay just needs a
+        * dentry of the file associated with the channel buffer and that file's
+        * name need not be same as the filename passed as an argument.
+        */
+       buf_file = debugfs_create_file("guc_log", mode,
+                                      parent, buf, &relay_file_operations);
+       return buf_file;
+}
+
+/*
+ * file_remove() default callback. Removes relay file in debugfs.
+ */
+static int remove_buf_file_callback(struct dentry *dentry)
+{
+       debugfs_remove(dentry);
+       return 0;
+}
+
+/* relay channel callbacks */
+static struct rchan_callbacks relay_callbacks = {
+       .subbuf_start = subbuf_start_callback,
+       .create_buf_file = create_buf_file_callback,
+       .remove_buf_file = remove_buf_file_callback,
+};
+
+static void guc_log_remove_relay_file(struct intel_guc *guc)
+{
+       relay_close(guc->log.relay_chan);
+}
+
+static int guc_log_create_relay_channel(struct intel_guc *guc)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct rchan *guc_log_relay_chan;
+       size_t n_subbufs, subbuf_size;
+
+       /* Keep the size of sub buffers same as shared log buffer */
+       subbuf_size = guc->log.vma->obj->base.size;
+
+       /* Store up to 8 snapshots, which is large enough to buffer sufficient
+        * boot time logs and provides enough leeway to User, in terms of
+        * latency, for consuming the logs from relay. Also doesn't take
+        * up too much memory.
+        */
+       n_subbufs = 8;
+
+       guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
+                                       n_subbufs, &relay_callbacks, dev_priv);
+       if (!guc_log_relay_chan) {
+               DRM_ERROR("Couldn't create relay chan for GuC logging\n");
+               return -ENOMEM;
+       }
+
+       GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
+       guc->log.relay_chan = guc_log_relay_chan;
+       return 0;
+}
+
+static int guc_log_create_relay_file(struct intel_guc *guc)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct dentry *log_dir;
+       int ret;
+
+       /* For now create the log file in /sys/kernel/debug/dri/0 dir */
+       log_dir = dev_priv->drm.primary->debugfs_root;
+
+       /* If /sys/kernel/debug/dri/0 location do not exist, then debugfs is
+        * not mounted and so can't create the relay file.
+        * The relay API seems to fit well with debugfs only, for availing relay
+        * there are 3 requirements which can be met for debugfs file only in a
+        * straightforward/clean manner :-
+        * i)   Need the associated dentry pointer of the file, while opening the
+        *      relay channel.
+        * ii)  Should be able to use 'relay_file_operations' fops for the file.
+        * iii) Set the 'i_private' field of file's inode to the pointer of
+        *      relay channel buffer.
+        */
+       if (!log_dir) {
+               DRM_ERROR("Debugfs dir not available yet for GuC log file\n");
+               return -ENODEV;
+       }
+
+       ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
+       if (ret) {
+               DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void guc_move_to_next_buf(struct intel_guc *guc)
+{
+       /* Make sure the updates made in the sub buffer are visible when
+        * Consumer sees the following update to offset inside the sub buffer.
+        */
+       smp_wmb();
+
+       /* All data has been written, so now move the offset of sub buffer. */
+       relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
+
+       /* Switch to the next sub buffer */
+       relay_flush(guc->log.relay_chan);
+}
+
+static void *guc_get_write_buffer(struct intel_guc *guc)
+{
+       if (!guc->log.relay_chan)
+               return NULL;
+
+       /* Just get the base address of a new sub buffer and copy data into it
+        * ourselves. NULL will be returned in no-overwrite mode, if all sub
+        * buffers are full. Could have used the relay_write() to indirectly
+        * copy the data, but that would have been bit convoluted, as we need to
+        * write to only certain locations inside a sub buffer which cannot be
+        * done without using relay_reserve() along with relay_write(). So its
+        * better to use relay_reserve() alone.
+        */
+       return relay_reserve(guc->log.relay_chan, 0);
+}
+
+static bool guc_check_log_buf_overflow(struct intel_guc *guc,
+                                      enum guc_log_buffer_type type,
+                                      unsigned int full_cnt)
+{
+       unsigned int prev_full_cnt = guc->log.prev_overflow_count[type];
+       bool overflow = false;
+
+       if (full_cnt != prev_full_cnt) {
+               overflow = true;
+
+               guc->log.prev_overflow_count[type] = full_cnt;
+               guc->log.total_overflow_count[type] += full_cnt - prev_full_cnt;
+
+               if (full_cnt < prev_full_cnt) {
+                       /* buffer_full_cnt is a 4 bit counter */
+                       guc->log.total_overflow_count[type] += 16;
+               }
+               DRM_ERROR_RATELIMITED("GuC log buffer overflow\n");
+       }
+
+       return overflow;
+}
+
+static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
+{
+       switch (type) {
+       case GUC_ISR_LOG_BUFFER:
+               return (GUC_LOG_ISR_PAGES + 1) * PAGE_SIZE;
+       case GUC_DPC_LOG_BUFFER:
+               return (GUC_LOG_DPC_PAGES + 1) * PAGE_SIZE;
+       case GUC_CRASH_DUMP_LOG_BUFFER:
+               return (GUC_LOG_CRASH_PAGES + 1) * PAGE_SIZE;
+       default:
+               MISSING_CASE(type);
+       }
+
+       return 0;
+}
+
+static void guc_read_update_log_buffer(struct intel_guc *guc)
+{
+       unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
+       struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
+       struct guc_log_buffer_state log_buf_state_local;
+       enum guc_log_buffer_type type;
+       void *src_data, *dst_data;
+       bool new_overflow;
+
+       if (WARN_ON(!guc->log.buf_addr))
+               return;
+
+       /* Get the pointer to shared GuC log buffer */
+       log_buf_state = src_data = guc->log.buf_addr;
+
+       /* Get the pointer to local buffer to store the logs */
+       log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
+
+       /* Actual logs are present from the 2nd page */
+       src_data += PAGE_SIZE;
+       dst_data += PAGE_SIZE;
+
+       for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+               /* Make a copy of the state structure, inside GuC log buffer
+                * (which is uncached mapped), on the stack to avoid reading
+                * from it multiple times.
+                */
+               memcpy(&log_buf_state_local, log_buf_state,
+                      sizeof(struct guc_log_buffer_state));
+               buffer_size = guc_get_log_buffer_size(type);
+               read_offset = log_buf_state_local.read_ptr;
+               write_offset = log_buf_state_local.sampled_write_ptr;
+               full_cnt = log_buf_state_local.buffer_full_cnt;
+
+               /* Bookkeeping stuff */
+               guc->log.flush_count[type] += log_buf_state_local.flush_to_file;
+               new_overflow = guc_check_log_buf_overflow(guc, type, full_cnt);
+
+               /* Update the state of shared log buffer */
+               log_buf_state->read_ptr = write_offset;
+               log_buf_state->flush_to_file = 0;
+               log_buf_state++;
+
+               if (unlikely(!log_buf_snapshot_state))
+                       continue;
+
+               /* First copy the state structure in snapshot buffer */
+               memcpy(log_buf_snapshot_state, &log_buf_state_local,
+                      sizeof(struct guc_log_buffer_state));
+
+               /* The write pointer could have been updated by GuC firmware,
+                * after sending the flush interrupt to Host, for consistency
+                * set write pointer value to same value of sampled_write_ptr
+                * in the snapshot buffer.
+                */
+               log_buf_snapshot_state->write_ptr = write_offset;
+               log_buf_snapshot_state++;
+
+               /* Now copy the actual logs. */
+               if (unlikely(new_overflow)) {
+                       /* copy the whole buffer in case of overflow */
+                       read_offset = 0;
+                       write_offset = buffer_size;
+               } else if (unlikely((read_offset > buffer_size) ||
+                                   (write_offset > buffer_size))) {
+                       DRM_ERROR("invalid log buffer state\n");
+                       /* copy whole buffer as offsets are unreliable */
+                       read_offset = 0;
+                       write_offset = buffer_size;
+               }
+
+               /* Just copy the newly written data */
+               if (read_offset > write_offset) {
+                       i915_memcpy_from_wc(dst_data, src_data, write_offset);
+                       bytes_to_copy = buffer_size - read_offset;
+               } else {
+                       bytes_to_copy = write_offset - read_offset;
+               }
+               i915_memcpy_from_wc(dst_data + read_offset,
+                                   src_data + read_offset, bytes_to_copy);
+
+               src_data += buffer_size;
+               dst_data += buffer_size;
+       }
+
+       if (log_buf_snapshot_state)
+               guc_move_to_next_buf(guc);
+       else {
+               /* Used rate limited to avoid deluge of messages, logs might be
+                * getting consumed by User at a slow rate.
+                */
+               DRM_ERROR_RATELIMITED("no sub-buffer to capture logs\n");
+               guc->log.capture_miss_count++;
+       }
+}
+
+static void guc_log_cleanup(struct intel_guc *guc)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+       /* First disable the flush interrupt */
+       gen9_disable_guc_interrupts(dev_priv);
+
+       if (guc->log.flush_wq)
+               destroy_workqueue(guc->log.flush_wq);
+
+       guc->log.flush_wq = NULL;
+
+       if (guc->log.relay_chan)
+               guc_log_remove_relay_file(guc);
+
+       guc->log.relay_chan = NULL;
+
+       if (guc->log.buf_addr)
+               i915_gem_object_unpin_map(guc->log.vma->obj);
+
+       guc->log.buf_addr = NULL;
+}
+
+static void capture_logs_work(struct work_struct *work)
+{
+       struct intel_guc *guc =
+               container_of(work, struct intel_guc, log.flush_work);
+
+       guc_log_capture_logs(guc);
+}
+
+static int guc_log_create_extras(struct intel_guc *guc)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       void *vaddr;
+       int ret;
+
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+       /* Nothing to do */
+       if (i915.guc_log_level < 0)
+               return 0;
+
+       if (!guc->log.buf_addr) {
+               /* Create a WC (Uncached for read) vmalloc mapping of log
+                * buffer pages, so that we can directly get the data
+                * (up-to-date) from memory.
+                */
+               vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
+               if (IS_ERR(vaddr)) {
+                       ret = PTR_ERR(vaddr);
+                       DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
+                       return ret;
+               }
+
+               guc->log.buf_addr = vaddr;
+       }
+
+       if (!guc->log.relay_chan) {
+               /* Create a relay channel, so that we have buffers for storing
+                * the GuC firmware logs, the channel will be linked with a file
+                * later on when debugfs is registered.
+                */
+               ret = guc_log_create_relay_channel(guc);
+               if (ret)
+                       return ret;
+       }
+
+       if (!guc->log.flush_wq) {
+               INIT_WORK(&guc->log.flush_work, capture_logs_work);
+
+                /*
+                * GuC log buffer flush work item has to do register access to
+                * send the ack to GuC and this work item, if not synced before
+                * suspend, can potentially get executed after the GFX device is
+                * suspended.
+                * By marking the WQ as freezable, we don't have to bother about
+                * flushing of this work item from the suspend hooks, the pending
+                * work item if any will be either executed before the suspend
+                * or scheduled later on resume. This way the handling of work
+                * item can be kept same between system suspend & rpm suspend.
+                */
+               guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
+                                                           WQ_HIGHPRI | WQ_FREEZABLE);
+               if (guc->log.flush_wq == NULL) {
+                       DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
+                       return -ENOMEM;
+               }
+       }
+
+       return 0;
+}
+
+void intel_guc_log_create(struct intel_guc *guc)
+{
+       struct i915_vma *vma;
+       unsigned long offset;
+       uint32_t size, flags;
+
+       if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
+               i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
+
+       /* The first page is to save log buffer state. Allocate one
+        * extra page for others in case for overlap */
+       size = (1 + GUC_LOG_DPC_PAGES + 1 +
+               GUC_LOG_ISR_PAGES + 1 +
+               GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
+
+       vma = guc->log.vma;
+       if (!vma) {
+               /* We require SSE 4.1 for fast reads from the GuC log buffer and
+                * it should be present on the chipsets supporting GuC based
+                * submisssions.
+                */
+               if (WARN_ON(!i915_has_memcpy_from_wc())) {
+                       /* logging will not be enabled */
+                       i915.guc_log_level = -1;
+                       return;
+               }
+
+               vma = intel_guc_allocate_vma(guc, size);
+               if (IS_ERR(vma)) {
+                       /* logging will be off */
+                       i915.guc_log_level = -1;
+                       return;
+               }
+
+               guc->log.vma = vma;
+
+               if (guc_log_create_extras(guc)) {
+                       guc_log_cleanup(guc);
+                       i915_vma_unpin_and_release(&guc->log.vma);
+                       i915.guc_log_level = -1;
+                       return;
+               }
+       }
+
+       /* each allocated unit is a page */
+       flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
+               (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
+               (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
+               (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
+
+       offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
+       guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+}
+
+static int guc_log_late_setup(struct intel_guc *guc)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       int ret;
+
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+       if (i915.guc_log_level < 0)
+               return -EINVAL;
+
+       /* If log_level was set as -1 at boot time, then setup needed to
+        * handle log buffer flush interrupts would not have been done yet,
+        * so do that now.
+        */
+       ret = guc_log_create_extras(guc);
+       if (ret)
+               goto err;
+
+       ret = guc_log_create_relay_file(guc);
+       if (ret)
+               goto err;
+
+       return 0;
+err:
+       guc_log_cleanup(guc);
+       /* logging will remain off */
+       i915.guc_log_level = -1;
+       return ret;
+}
+
+static void guc_log_capture_logs(struct intel_guc *guc)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+       guc_read_update_log_buffer(guc);
+
+       /* Generally device is expected to be active only at this
+        * time, so get/put should be really quick.
+        */
+       intel_runtime_pm_get(dev_priv);
+       guc_log_flush_complete(guc);
+       intel_runtime_pm_put(dev_priv);
+}
+
+static void guc_flush_logs(struct intel_guc *guc)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+
+       if (!i915.enable_guc_submission || (i915.guc_log_level < 0))
+               return;
+
+       /* First disable the interrupts, will be renabled afterwards */
+       gen9_disable_guc_interrupts(dev_priv);
+
+       /* Before initiating the forceful flush, wait for any pending/ongoing
+        * flush to complete otherwise forceful flush may not actually happen.
+        */
+       flush_work(&guc->log.flush_work);
+
+       /* Ask GuC to update the log buffer state */
+       guc_log_flush(guc);
+
+       /* GuC would have updated log buffer by now, so capture it */
+       guc_log_capture_logs(guc);
+}
+
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
+{
+       struct intel_guc *guc = &dev_priv->guc;
+
+       union guc_log_control log_param;
+       int ret;
+
+       log_param.value = control_val;
+
+       if (log_param.verbosity < GUC_LOG_VERBOSITY_MIN ||
+           log_param.verbosity > GUC_LOG_VERBOSITY_MAX)
+               return -EINVAL;
+
+       /* This combination doesn't make sense & won't have any effect */
+       if (!log_param.logging_enabled && (i915.guc_log_level < 0))
+               return 0;
+
+       ret = guc_log_control(guc, log_param.value);
+       if (ret < 0) {
+               DRM_DEBUG_DRIVER("guc_logging_control action failed %d\n", ret);
+               return ret;
+       }
+
+       i915.guc_log_level = log_param.verbosity;
+
+       /* If log_level was set as -1 at boot time, then the relay channel file
+        * wouldn't have been created by now and interrupts also would not have
+        * been enabled.
+        */
+       if (!dev_priv->guc.log.relay_chan) {
+               ret = guc_log_late_setup(guc);
+               if (!ret)
+                       gen9_enable_guc_interrupts(dev_priv);
+       } else if (!log_param.logging_enabled) {
+               /* Once logging is disabled, GuC won't generate logs & send an
+                * interrupt. But there could be some data in the log buffer
+                * which is yet to be captured. So request GuC to update the log
+                * buffer state and then collect the left over logs.
+                */
+               guc_flush_logs(guc);
+
+               /* As logging is disabled, update log level to reflect that */
+               i915.guc_log_level = -1;
+       } else {
+               /* In case interrupts were disabled, enable them now */
+               gen9_enable_guc_interrupts(dev_priv);
+       }
+
+       return ret;
+}
+
+void i915_guc_log_register(struct drm_i915_private *dev_priv)
+{
+       if (!i915.enable_guc_submission)
+               return;
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       guc_log_late_setup(&dev_priv->guc);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+}
+
+void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
+{
+       if (!i915.enable_guc_submission)
+               return;
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       guc_log_cleanup(&dev_priv->guc);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+}
index 3d546c019de058c4a925d2a689f2bf4b9ff2c6f7..b62e3f8ad415f6173470c90a3cb1b35b04f91c4b 100644 (file)
@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
 
        /* Enable polling and queue hotplug re-enabling. */
        if (hpd_disabled) {
-               drm_kms_helper_poll_enable_locked(dev);
+               drm_kms_helper_poll_enable(dev);
                mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
                                 msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
        }
@@ -511,7 +511,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
        }
 
        if (enabled)
-               drm_kms_helper_poll_enable_locked(dev);
+               drm_kms_helper_poll_enable(dev);
 
        mutex_unlock(&dev->mode_config.mutex);
 
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
new file mode 100644 (file)
index 0000000..c144609
--- /dev/null
@@ -0,0 +1,338 @@
+/*
+ * Copyright Â© 2016-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "i915_drv.h"
+#include "intel_uc.h"
+
+/**
+ * DOC: HuC Firmware
+ *
+ * Motivation:
+ * GEN9 introduces a new dedicated firmware for usage in media HEVC (High
+ * Efficiency Video Coding) operations. Userspace can use the firmware
+ * capabilities by adding HuC specific commands to batch buffers.
+ *
+ * Implementation:
+ * The same firmware loader is used as the GuC. However, the actual
+ * loading to HW is deferred until GEM initialization is done.
+ *
+ * Note that HuC firmware loading must be done before GuC loading.
+ */
+
+#define BXT_HUC_FW_MAJOR 01
+#define BXT_HUC_FW_MINOR 07
+#define BXT_BLD_NUM 1398
+
+#define SKL_HUC_FW_MAJOR 01
+#define SKL_HUC_FW_MINOR 07
+#define SKL_BLD_NUM 1398
+
+#define KBL_HUC_FW_MAJOR 02
+#define KBL_HUC_FW_MINOR 00
+#define KBL_BLD_NUM 1810
+
+#define HUC_FW_PATH(platform, major, minor, bld_num) \
+       "i915/" __stringify(platform) "_huc_ver" __stringify(major) "_" \
+       __stringify(minor) "_" __stringify(bld_num) ".bin"
+
+#define I915_SKL_HUC_UCODE HUC_FW_PATH(skl, SKL_HUC_FW_MAJOR, \
+       SKL_HUC_FW_MINOR, SKL_BLD_NUM)
+MODULE_FIRMWARE(I915_SKL_HUC_UCODE);
+
+#define I915_BXT_HUC_UCODE HUC_FW_PATH(bxt, BXT_HUC_FW_MAJOR, \
+       BXT_HUC_FW_MINOR, BXT_BLD_NUM)
+MODULE_FIRMWARE(I915_BXT_HUC_UCODE);
+
+#define I915_KBL_HUC_UCODE HUC_FW_PATH(kbl, KBL_HUC_FW_MAJOR, \
+       KBL_HUC_FW_MINOR, KBL_BLD_NUM)
+MODULE_FIRMWARE(I915_KBL_HUC_UCODE);
+
+/**
+ * huc_ucode_xfer() - DMA's the firmware
+ * @dev_priv: the drm_i915_private device
+ *
+ * Transfer the firmware image to RAM for execution by the microcontroller.
+ *
+ * Return: 0 on success, non-zero on failure
+ */
+static int huc_ucode_xfer(struct drm_i915_private *dev_priv)
+{
+       struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+       struct i915_vma *vma;
+       unsigned long offset = 0;
+       u32 size;
+       int ret;
+
+       ret = i915_gem_object_set_to_gtt_domain(huc_fw->obj, false);
+       if (ret) {
+               DRM_DEBUG_DRIVER("set-domain failed %d\n", ret);
+               return ret;
+       }
+
+       vma = i915_gem_object_ggtt_pin(huc_fw->obj, NULL, 0, 0,
+                               PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+       if (IS_ERR(vma)) {
+               DRM_DEBUG_DRIVER("pin failed %d\n", (int)PTR_ERR(vma));
+               return PTR_ERR(vma);
+       }
+
+       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+       /* init WOPCM */
+       I915_WRITE(GUC_WOPCM_SIZE, intel_guc_wopcm_size(dev_priv));
+       I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE |
+                       HUC_LOADING_AGENT_GUC);
+
+       /* Set the source address for the uCode */
+       offset = guc_ggtt_offset(vma) + huc_fw->header_offset;
+       I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
+       I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+
+       /* Hardware doesn't look at destination address for HuC. Set it to 0,
+        * but still program the correct address space.
+        */
+       I915_WRITE(DMA_ADDR_1_LOW, 0);
+       I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+
+       size = huc_fw->header_size + huc_fw->ucode_size;
+       I915_WRITE(DMA_COPY_SIZE, size);
+
+       /* Start the DMA */
+       I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
+
+       /* Wait for DMA to finish */
+       ret = wait_for((I915_READ(DMA_CTRL) & START_DMA) == 0, 100);
+
+       DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
+
+       /* Disable the bits once DMA is over */
+       I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
+
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+       /*
+        * We keep the object pages for reuse during resume. But we can unpin it
+        * now that DMA has completed, so it doesn't continue to take up space.
+        */
+       i915_vma_unpin(vma);
+
+       return ret;
+}
+
+/**
+ * intel_huc_init() - initiate HuC firmware loading request
+ * @dev_priv: the drm_i915_private device
+ *
+ * Called early during driver load, but after GEM is initialised. The loading
+ * will continue only when driver explicitly specify firmware name and version.
+ * All other cases are considered as INTEL_UC_FIRMWARE_NONE either because HW
+ * is not capable or driver yet support it. And there will be no error message
+ * for INTEL_UC_FIRMWARE_NONE cases.
+ *
+ * The DMA-copying to HW is done later when intel_huc_load() is called.
+ */
+void intel_huc_init(struct drm_i915_private *dev_priv)
+{
+       struct intel_huc *huc = &dev_priv->huc;
+       struct intel_uc_fw *huc_fw = &huc->fw;
+       const char *fw_path = NULL;
+
+       huc_fw->path = NULL;
+       huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+       huc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
+       huc_fw->fw = INTEL_UC_FW_TYPE_HUC;
+
+       if (!HAS_HUC_UCODE(dev_priv))
+               return;
+
+       if (IS_SKYLAKE(dev_priv)) {
+               fw_path = I915_SKL_HUC_UCODE;
+               huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
+               huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
+       } else if (IS_BROXTON(dev_priv)) {
+               fw_path = I915_BXT_HUC_UCODE;
+               huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
+               huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
+       } else if (IS_KABYLAKE(dev_priv)) {
+               fw_path = I915_KBL_HUC_UCODE;
+               huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
+               huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
+       }
+
+       huc_fw->path = fw_path;
+       huc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
+
+       DRM_DEBUG_DRIVER("HuC firmware pending, path %s\n", fw_path);
+
+       WARN(huc_fw->path == NULL, "HuC present but no fw path\n");
+
+       intel_uc_fw_fetch(dev_priv, huc_fw);
+}
+
+/**
+ * intel_huc_load() - load HuC uCode to device
+ * @dev_priv: the drm_i915_private device
+ *
+ * Called from guc_setup() during driver loading and also after a GPU reset.
+ * Be note that HuC loading must be done before GuC loading.
+ *
+ * The firmware image should have already been fetched into memory by the
+ * earlier call to intel_huc_init(), so here we need only check that
+ * is succeeded, and then transfer the image to the h/w.
+ *
+ * Return:     non-zero code on error
+ */
+int intel_huc_load(struct drm_i915_private *dev_priv)
+{
+       struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+       int err;
+
+       if (huc_fw->fetch_status == INTEL_UC_FIRMWARE_NONE)
+               return 0;
+
+       DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
+               huc_fw->path,
+               intel_uc_fw_status_repr(huc_fw->fetch_status),
+               intel_uc_fw_status_repr(huc_fw->load_status));
+
+       if (huc_fw->fetch_status == INTEL_UC_FIRMWARE_SUCCESS &&
+           huc_fw->load_status == INTEL_UC_FIRMWARE_FAIL)
+               return -ENOEXEC;
+
+       huc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
+
+       switch (huc_fw->fetch_status) {
+       case INTEL_UC_FIRMWARE_FAIL:
+               /* something went wrong :( */
+               err = -EIO;
+               goto fail;
+
+       case INTEL_UC_FIRMWARE_NONE:
+       case INTEL_UC_FIRMWARE_PENDING:
+       default:
+               /* "can't happen" */
+               WARN_ONCE(1, "HuC fw %s invalid fetch_status %s [%d]\n",
+                       huc_fw->path,
+                       intel_uc_fw_status_repr(huc_fw->fetch_status),
+                       huc_fw->fetch_status);
+               err = -ENXIO;
+               goto fail;
+
+       case INTEL_UC_FIRMWARE_SUCCESS:
+               break;
+       }
+
+       err = huc_ucode_xfer(dev_priv);
+       if (err)
+               goto fail;
+
+       huc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
+
+       DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
+               huc_fw->path,
+               intel_uc_fw_status_repr(huc_fw->fetch_status),
+               intel_uc_fw_status_repr(huc_fw->load_status));
+
+       return 0;
+
+fail:
+       if (huc_fw->load_status == INTEL_UC_FIRMWARE_PENDING)
+               huc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
+
+       DRM_ERROR("Failed to complete HuC uCode load with ret %d\n", err);
+
+       return err;
+}
+
+/**
+ * intel_huc_fini() - clean up resources allocated for HuC
+ * @dev_priv: the drm_i915_private device
+ *
+ * Cleans up by releasing the huc firmware GEM obj.
+ */
+void intel_huc_fini(struct drm_i915_private *dev_priv)
+{
+       struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       if (huc_fw->obj)
+               i915_gem_object_put(huc_fw->obj);
+       huc_fw->obj = NULL;
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+
+       huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+}
+
+/**
+ * intel_guc_auth_huc() - authenticate ucode
+ * @dev_priv: the drm_i915_device
+ *
+ * Triggers a HuC fw authentication request to the GuC via intel_guc_action_
+ * authenticate_huc interface.
+ */
+void intel_guc_auth_huc(struct drm_i915_private *dev_priv)
+{
+       struct intel_guc *guc = &dev_priv->guc;
+       struct intel_huc *huc = &dev_priv->huc;
+       struct i915_vma *vma;
+       int ret;
+       u32 data[2];
+
+       if (huc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
+               return;
+
+       vma = i915_gem_object_ggtt_pin(huc->fw.obj, NULL, 0, 0,
+                               PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+       if (IS_ERR(vma)) {
+               DRM_ERROR("failed to pin huc fw object %d\n",
+                               (int)PTR_ERR(vma));
+               return;
+       }
+
+       /* Specify auth action and where public signature is. */
+       data[0] = INTEL_GUC_ACTION_AUTHENTICATE_HUC;
+       data[1] = guc_ggtt_offset(vma) + huc->fw.rsa_offset;
+
+       ret = intel_guc_send(guc, data, ARRAY_SIZE(data));
+       if (ret) {
+               DRM_ERROR("HuC: GuC did not ack Auth request %d\n", ret);
+               goto out;
+       }
+
+       /* Check authentication status, it should be done by now */
+       ret = intel_wait_for_register(dev_priv,
+                               HUC_STATUS2,
+                               HUC_FW_VERIFIED,
+                               HUC_FW_VERIFIED,
+                               50);
+
+       if (ret) {
+               DRM_ERROR("HuC: Authentication failed %d\n", ret);
+               goto out;
+       }
+
+out:
+       i915_vma_unpin(vma);
+}
+
index 6db246ad2f13c5ab0d5b52fd73adb495d8158d19..432ee495dec2288a33e47b893476fbaaf3d76a28 100644 (file)
@@ -811,12 +811,6 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
 
        ce->state->obj->mm.dirty = true;
 
-       /* Invalidate GuC TLB. */
-       if (i915.enable_guc_submission) {
-               struct drm_i915_private *dev_priv = ctx->i915;
-               I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
-       }
-
        i915_gem_context_get(ctx);
        return 0;
 
@@ -970,18 +964,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
                                                uint32_t *batch,
                                                uint32_t index)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
        uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
 
-       /*
-        * WaDisableLSQCROPERFforOCL:kbl
-        * This WA is implemented in skl_init_clock_gating() but since
-        * this batch updates GEN8_L3SQCREG4 with default value we need to
-        * set this bit here to retain the WA during flush.
-        */
-       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
-               l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
-
        wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
                                   MI_SRM_LRM_GLOBAL_GTT));
        wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
@@ -1241,7 +1225,7 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
        if (IS_ERR(obj))
                return PTR_ERR(obj);
 
-       vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
        if (IS_ERR(vma)) {
                err = PTR_ERR(vma);
                goto err;
@@ -1927,7 +1911,7 @@ int logical_render_ring_init(struct intel_engine_cs *engine)
        engine->emit_breadcrumb = gen8_emit_breadcrumb_render;
        engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_render_sz;
 
-       ret = intel_engine_create_scratch(engine, 4096);
+       ret = intel_engine_create_scratch(engine, PAGE_SIZE);
        if (ret)
                return ret;
 
@@ -2103,19 +2087,12 @@ static void execlists_init_reg_state(u32 *reg_state,
        ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
                       0);
 
-       if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+       if (ppgtt && USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
                /* 64b PPGTT (48bit canonical)
                 * PDP0_DESCRIPTOR contains the base address to PML4 and
                 * other PDP Descriptors are ignored.
                 */
                ASSIGN_CTX_PML4(ppgtt, reg_state);
-       } else {
-               /* 32b PPGTT
-                * PDP*_DESCRIPTOR contains the base address of space supported.
-                * With dynamic page allocation, PDPs may not be allocated at
-                * this point. Point the unallocated PDPs to the scratch page
-                */
-               execlists_update_context_pdps(ppgtt, reg_state);
        }
 
        if (engine->id == RCS) {
@@ -2209,7 +2186,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 
        WARN_ON(ce->state);
 
-       context_size = round_up(intel_lr_context_size(engine), 4096);
+       context_size = round_up(intel_lr_context_size(engine),
+                               I915_GTT_PAGE_SIZE);
 
        /* One extra page as the sharing data between driver and GuC */
        context_size += PAGE_SIZE * LRC_PPHWSP_PN;
@@ -2220,7 +2198,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
                return PTR_ERR(ctx_obj);
        }
 
-       vma = i915_vma_create(ctx_obj, &ctx->i915->ggtt.base, NULL);
+       vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto error_deref_obj;
index 01ba36ea125e439a7d457c3a74d94c843d34cf49..0c852c024227d67c3037b6784c3394cecbeb6546 100644 (file)
@@ -26,7 +26,7 @@
 
 #include "intel_ringbuffer.h"
 
-#define GEN8_LR_CONTEXT_ALIGN 4096
+#define GEN8_LR_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
 
 /* Execlists regs */
 #define RING_ELSP(engine)                      _MMIO((engine)->mmio_base + 0x230)
index 4473a611c664b5511d9de55a2e4f05c665a69e6c..0608fad7f593e18849ccf3d64c1ce60dc786dfdc 100644 (file)
@@ -811,8 +811,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        if (ret != 0)
                return ret;
 
-       vma = i915_gem_object_pin_to_display_plane(new_bo, 0,
-                                                  &i915_ggtt_view_normal);
+       vma = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
        if (IS_ERR(vma))
                return PTR_ERR(vma);
 
index ef0c0e195164d7f4f756029097498775767af75f..c0b1f99da37b34c914fe180c856bec51dc4c333f 100644 (file)
@@ -560,14 +560,14 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
        state = drm_atomic_state_alloc(dev);
        if (!state) {
                ret = -ENOMEM;
-               goto out;
+               goto unlock;
        }
 
        state->acquire_ctx = drm_modeset_legacy_acquire_ctx(&crtc->base);
        pipe_config = intel_atomic_get_crtc_state(state, crtc);
        if (IS_ERR(pipe_config)) {
                ret = PTR_ERR(pipe_config);
-               goto out;
+               goto put_state;
        }
 
        pipe_config->pch_pfit.force_thru = enable;
@@ -576,10 +576,12 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
                pipe_config->base.connectors_changed = true;
 
        ret = drm_atomic_commit(state);
-out:
+
+put_state:
+       drm_atomic_state_put(state);
+unlock:
        WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
        drm_modeset_unlock_all(dev);
-       drm_atomic_state_put(state);
 }
 
 static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
@@ -613,6 +615,22 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
        return 0;
 }
 
+static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
+                              enum pipe pipe,
+                              enum intel_pipe_crc_source *source, u32 *val)
+{
+       if (IS_GEN2(dev_priv))
+               return i8xx_pipe_crc_ctl_reg(source, val);
+       else if (INTEL_GEN(dev_priv) < 5)
+               return i9xx_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+       else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
+               return ilk_pipe_crc_ctl_reg(source, val);
+       else
+               return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
+}
+
 static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
                               enum pipe pipe,
                               enum intel_pipe_crc_source source)
@@ -636,17 +654,7 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
                return -EIO;
        }
 
-       if (IS_GEN2(dev_priv))
-               ret = i8xx_pipe_crc_ctl_reg(&source, &val);
-       else if (INTEL_GEN(dev_priv) < 5)
-               ret = i9xx_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
-       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               ret = vlv_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
-       else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
-               ret = ilk_pipe_crc_ctl_reg(&source, &val);
-       else
-               ret = ivb_pipe_crc_ctl_reg(dev_priv, pipe, &source, &val);
-
+       ret = get_new_crc_ctl_reg(dev_priv, pipe, &source, &val);
        if (ret != 0)
                goto out;
 
@@ -687,7 +695,7 @@ static int pipe_crc_set_source(struct drm_i915_private *dev_priv,
        POSTING_READ(PIPE_CRC_CTL(pipe));
 
        /* real source -> none transition */
-       if (source == INTEL_PIPE_CRC_SOURCE_NONE) {
+       if (!source) {
                struct intel_pipe_crc_entry *entries;
                struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
                                                                  pipe);
@@ -809,6 +817,11 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
 {
        int i;
 
+       if (!buf) {
+               *s = INTEL_PIPE_CRC_SOURCE_NONE;
+               return 0;
+       }
+
        for (i = 0; i < ARRAY_SIZE(pipe_crc_sources); i++)
                if (!strcmp(buf, pipe_crc_sources[i])) {
                        *s = i;
@@ -937,3 +950,62 @@ void intel_pipe_crc_cleanup(struct drm_minor *minor)
                drm_debugfs_remove_files(info_list, 1, minor);
        }
 }
+
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
+                             size_t *values_cnt)
+{
+       struct drm_i915_private *dev_priv = crtc->dev->dev_private;
+       struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       enum intel_display_power_domain power_domain;
+       enum intel_pipe_crc_source source;
+       u32 val = 0; /* shut up gcc */
+       int ret = 0;
+
+       if (display_crc_ctl_parse_source(source_name, &source) < 0) {
+               DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
+               return -EINVAL;
+       }
+
+       power_domain = POWER_DOMAIN_PIPE(crtc->index);
+       if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+               DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
+               return -EIO;
+       }
+
+       ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
+       if (ret != 0)
+               goto out;
+
+       if (source) {
+               /*
+                * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+                * enabled and disabled dynamically based on package C states,
+                * user space can't make reliable use of the CRCs, so let's just
+                * completely disable it.
+                */
+               hsw_disable_ips(intel_crtc);
+       }
+
+       I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
+       POSTING_READ(PIPE_CRC_CTL(crtc->index));
+
+       if (!source) {
+               if (IS_G4X(dev_priv))
+                       g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
+               else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+                       vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
+               else if (IS_HASWELL(dev_priv) && crtc->index == PIPE_A)
+                       hsw_trans_edp_pipe_A_crc_wa(dev_priv, false);
+
+               hsw_enable_ips(intel_crtc);
+       }
+
+       pipe_crc->skipped = 0;
+       *values_cnt = 5;
+
+out:
+       intel_display_power_put(dev_priv, power_domain);
+
+       return ret;
+}
index 6aca8ff1498943009d8983e03c15249471c3d170..c3780d0d2baf752ce9d590b6f6c8db67674ec745 100644 (file)
@@ -122,13 +122,26 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
 static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
 {
        struct edp_vsc_psr psr_vsc;
+       struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
        memset(&psr_vsc, 0, sizeof(psr_vsc));
        psr_vsc.sdp_header.HB0 = 0;
        psr_vsc.sdp_header.HB1 = 0x7;
-       psr_vsc.sdp_header.HB2 = 0x3;
-       psr_vsc.sdp_header.HB3 = 0xb;
+       if (dev_priv->psr.colorimetry_support &&
+               dev_priv->psr.y_cord_support) {
+               psr_vsc.sdp_header.HB2 = 0x5;
+               psr_vsc.sdp_header.HB3 = 0x13;
+       } else if (dev_priv->psr.y_cord_support) {
+               psr_vsc.sdp_header.HB2 = 0x4;
+               psr_vsc.sdp_header.HB3 = 0xe;
+       } else {
+               psr_vsc.sdp_header.HB2 = 0x3;
+               psr_vsc.sdp_header.HB3 = 0xc;
+       }
+
        intel_psr_write_vsc(intel_dp, &psr_vsc);
 }
 
@@ -196,7 +209,11 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
                drm_dp_dpcd_writeb(&intel_dp->aux,
                                DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
                                DP_AUX_FRAME_SYNC_ENABLE);
-
+       /* Enable ALPM at sink for psr2 */
+       if (dev_priv->psr.psr2_support && dev_priv->psr.alpm)
+               drm_dp_dpcd_writeb(&intel_dp->aux,
+                               DP_RECEIVER_ALPM_CONFIG,
+                               DP_ALPM_ENABLE);
        if (dev_priv->psr.link_standby)
                drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
                                   DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
@@ -248,7 +265,7 @@ static void vlv_psr_activate(struct intel_dp *intel_dp)
                   VLV_EDP_PSR_ACTIVE_ENTRY);
 }
 
-static void hsw_psr_enable_source(struct intel_dp *intel_dp)
+static void intel_enable_source_psr1(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_device *dev = dig_port->base.base.dev;
@@ -299,14 +316,31 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
                val |= EDP_PSR_TP1_TP2_SEL;
 
        I915_WRITE(EDP_PSR_CTL, val);
+}
 
-       if (!dev_priv->psr.psr2_support)
-               return;
+static void intel_enable_source_psr2(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       /*
+        * Let's respect VBT in case VBT asks a higher idle_frame value.
+        * Let's use 6 as the minimum to cover all known cases including
+        * the off-by-one issue that HW has in some cases. Also there are
+        * cases where sink should be able to train
+        * with the 5 or 6 idle patterns.
+        */
+       uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+       uint32_t val;
+
+       val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
 
        /* FIXME: selective update is probably totally broken because it doesn't
         * mesh at all with our frontbuffer tracking. And the hw alone isn't
         * good enough. */
-       val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
+       val |= EDP_PSR2_ENABLE |
+               EDP_SU_TRACK_ENABLE |
+               EDP_FRAMES_BEFORE_SU_ENTRY;
 
        if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
                val |= EDP_PSR2_TP2_TIME_2500;
@@ -320,6 +354,19 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
        I915_WRITE(EDP_PSR2_CTL, val);
 }
 
+static void hsw_psr_enable_source(struct intel_dp *intel_dp)
+{
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       struct drm_device *dev = dig_port->base.base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       /* psr1 and psr2 are mutually exclusive.*/
+       if (dev_priv->psr.psr2_support)
+               intel_enable_source_psr2(intel_dp);
+       else
+               intel_enable_source_psr1(intel_dp);
+}
+
 static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -387,6 +434,22 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
                return false;
        }
 
+       /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
+       if (intel_crtc->config->pipe_src_w > 3200 ||
+                               intel_crtc->config->pipe_src_h > 2000) {
+               dev_priv->psr.psr2_support = false;
+               return false;
+       }
+
+       /*
+        * FIXME:enable psr2 only for y-cordinate psr2 panels
+        * After gtc implementation , remove this restriction.
+        */
+       if (!dev_priv->psr.y_cord_support &&  dev_priv->psr.psr2_support) {
+               DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
+               return false;
+       }
+
        dev_priv->psr.source_ok = true;
        return true;
 }
@@ -397,7 +460,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
 
-       WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+       if (dev_priv->psr.psr2_support)
+               WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
+       else
+               WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
        WARN_ON(dev_priv->psr.active);
        lockdep_assert_held(&dev_priv->psr.lock);
 
@@ -426,6 +492,8 @@ void intel_psr_enable(struct intel_dp *intel_dp)
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
+       enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
+       u32 chicken;
 
        if (!HAS_PSR(dev_priv)) {
                DRM_DEBUG_KMS("PSR not supported on this platform\n");
@@ -449,26 +517,34 @@ void intel_psr_enable(struct intel_dp *intel_dp)
        dev_priv->psr.busy_frontbuffer_bits = 0;
 
        if (HAS_DDI(dev_priv)) {
-               hsw_psr_setup_vsc(intel_dp);
-
                if (dev_priv->psr.psr2_support) {
-                       /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
-                       if (crtc->config->pipe_src_w > 3200 ||
-                               crtc->config->pipe_src_h > 2000)
-                               dev_priv->psr.psr2_support = false;
-                       else
-                               skl_psr_setup_su_vsc(intel_dp);
+                       skl_psr_setup_su_vsc(intel_dp);
+                       chicken = PSR2_VSC_ENABLE_PROG_HEADER;
+                       if (dev_priv->psr.y_cord_support)
+                               chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
+                       I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
+                       I915_WRITE(EDP_PSR_DEBUG_CTL,
+                                  EDP_PSR_DEBUG_MASK_MEMUP |
+                                  EDP_PSR_DEBUG_MASK_HPD |
+                                  EDP_PSR_DEBUG_MASK_LPSP |
+                                  EDP_PSR_DEBUG_MASK_MAX_SLEEP |
+                                  EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
+               } else {
+                       /* set up vsc header for psr1 */
+                       hsw_psr_setup_vsc(intel_dp);
+                       /*
+                        * Per Spec: Avoid continuous PSR exit by masking MEMUP
+                        * and HPD. also mask LPSP to avoid dependency on other
+                        * drivers that might block runtime_pm besides
+                        * preventing  other hw tracking issues now we can rely
+                        * on frontbuffer tracking.
+                        */
+                       I915_WRITE(EDP_PSR_DEBUG_CTL,
+                                  EDP_PSR_DEBUG_MASK_MEMUP |
+                                  EDP_PSR_DEBUG_MASK_HPD |
+                                  EDP_PSR_DEBUG_MASK_LPSP);
                }
 
-               /*
-                * Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD.
-                * Also mask LPSP to avoid dependency on other drivers that
-                * might block runtime_pm besides preventing other hw tracking
-                * issues now we can rely on frontbuffer tracking.
-                */
-               I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
-                          EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
-
                /* Enable PSR on the panel */
                hsw_psr_enable_sink(intel_dp);
 
@@ -544,20 +620,42 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
        struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (dev_priv->psr.active) {
-               I915_WRITE(EDP_PSR_CTL,
-                          I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
+               i915_reg_t psr_ctl;
+               u32 psr_status_mask;
+
+               if (dev_priv->psr.aux_frame_sync)
+                       drm_dp_dpcd_writeb(&intel_dp->aux,
+                                       DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
+                                       0);
+
+               if (dev_priv->psr.psr2_support) {
+                       psr_ctl = EDP_PSR2_CTL;
+                       psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
+
+                       I915_WRITE(psr_ctl,
+                                  I915_READ(psr_ctl) &
+                                  ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
+
+               } else {
+                       psr_ctl = EDP_PSR_STATUS_CTL;
+                       psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
+
+                       I915_WRITE(psr_ctl,
+                                  I915_READ(psr_ctl) & ~EDP_PSR_ENABLE);
+               }
 
                /* Wait till PSR is idle */
                if (intel_wait_for_register(dev_priv,
-                                           EDP_PSR_STATUS_CTL,
-                                           EDP_PSR_STATUS_STATE_MASK,
-                                           0,
+                                           psr_ctl, psr_status_mask, 0,
                                            2000))
                        DRM_ERROR("Timed out waiting for PSR Idle State\n");
 
                dev_priv->psr.active = false;
        } else {
-               WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+               if (dev_priv->psr.psr2_support)
+                       WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
+               else
+                       WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
        }
 }
 
@@ -608,13 +706,24 @@ static void intel_psr_work(struct work_struct *work)
         * and be ready for re-enable.
         */
        if (HAS_DDI(dev_priv)) {
-               if (intel_wait_for_register(dev_priv,
-                                           EDP_PSR_STATUS_CTL,
-                                           EDP_PSR_STATUS_STATE_MASK,
-                                           0,
-                                           50)) {
-                       DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
-                       return;
+               if (dev_priv->psr.psr2_support) {
+                       if (intel_wait_for_register(dev_priv,
+                                               EDP_PSR2_STATUS_CTL,
+                                               EDP_PSR2_STATUS_STATE_MASK,
+                                               0,
+                                               50)) {
+                               DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
+                               return;
+                       }
+               } else {
+                       if (intel_wait_for_register(dev_priv,
+                                               EDP_PSR_STATUS_CTL,
+                                               EDP_PSR_STATUS_STATE_MASK,
+                                               0,
+                                               50)) {
+                               DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+                               return;
+                       }
                }
        } else {
                if (intel_wait_for_register(dev_priv,
@@ -656,11 +765,19 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
                return;
 
        if (HAS_DDI(dev_priv)) {
-               val = I915_READ(EDP_PSR_CTL);
-
-               WARN_ON(!(val & EDP_PSR_ENABLE));
-
-               I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
+               if (dev_priv->psr.aux_frame_sync)
+                       drm_dp_dpcd_writeb(&intel_dp->aux,
+                                       DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
+                                       0);
+               if (dev_priv->psr.psr2_support) {
+                       val = I915_READ(EDP_PSR2_CTL);
+                       WARN_ON(!(val & EDP_PSR2_ENABLE));
+                       I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
+               } else {
+                       val = I915_READ(EDP_PSR_CTL);
+                       WARN_ON(!(val & EDP_PSR_ENABLE));
+                       I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
+               }
        } else {
                val = I915_READ(VLV_PSRCTL(pipe));
 
index 0971ac396b6081756ac8bcc11f67845f76b4652e..69035e4f9b3b76a06c4705c9429cbe44c2093ba3 100644 (file)
@@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
                WA_SET_BIT_MASKED(HDC_CHICKEN0,
                                  HDC_FENCE_DEST_SLM_DISABLE);
 
-       /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
-        * involving this register should also be added to WA batch as required.
-        */
-       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
-               /* WaDisableLSQCROPERFforOCL:kbl */
-               I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
-                          GEN8_LQSC_RO_PERF_DIS);
-
        /* WaToEnableHwFixForPushConstHWBug:kbl */
        if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
                WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
@@ -1736,7 +1728,7 @@ static int init_status_page(struct intel_engine_cs *engine)
        void *vaddr;
        int ret;
 
-       obj = i915_gem_object_create_internal(engine->i915, 4096);
+       obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
        if (IS_ERR(obj)) {
                DRM_ERROR("Failed to allocate status page\n");
                return PTR_ERR(obj);
@@ -1746,7 +1738,7 @@ static int init_status_page(struct intel_engine_cs *engine)
        if (ret)
                goto err;
 
-       vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
                goto err;
@@ -1777,7 +1769,7 @@ static int init_status_page(struct intel_engine_cs *engine)
 
        engine->status_page.vma = vma;
        engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
-       engine->status_page.page_addr = memset(vaddr, 0, 4096);
+       engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
 
        DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
                         engine->name, i915_ggtt_offset(vma));
@@ -1880,7 +1872,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
        /* mark ring buffers as read-only from GPU side by default */
        obj->gt_ro = 1;
 
-       vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+       vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
        if (IS_ERR(vma))
                goto err;
 
@@ -2049,7 +2041,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
        }
 
        /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
-       ret = intel_ring_pin(ring, 4096);
+       ret = intel_ring_pin(ring, I915_GTT_PAGE_SIZE);
        if (ret) {
                intel_ring_free(ring);
                goto error;
@@ -2466,11 +2458,11 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
        if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore) {
                struct i915_vma *vma;
 
-               obj = i915_gem_object_create(dev_priv, 4096);
+               obj = i915_gem_object_create(dev_priv, PAGE_SIZE);
                if (IS_ERR(obj))
                        goto err;
 
-               vma = i915_vma_create(obj, &dev_priv->ggtt.base, NULL);
+               vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL);
                if (IS_ERR(vma))
                        goto err_obj;
 
@@ -2683,7 +2675,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
                return ret;
 
        if (INTEL_GEN(dev_priv) >= 6) {
-               ret = intel_engine_create_scratch(engine, 4096);
+               ret = intel_engine_create_scratch(engine, PAGE_SIZE);
                if (ret)
                        return ret;
        } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
index 7031bc733d971e4f70d32fe9abc2b50aa9a51bc7..9ef54688872a86a70ab020a64b7209e040de70e0 100644 (file)
@@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane,
 
        I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl);
        I915_WRITE(PLANE_SURF(pipe, plane_id),
-                  intel_fb_gtt_offset(fb, rotation) + surf_addr);
+                  intel_plane_ggtt_offset(plane_state) + surf_addr);
        POSTING_READ(PLANE_SURF(pipe, plane_id));
 }
 
@@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane,
        I915_WRITE(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
        I915_WRITE(SPCNTR(pipe, plane_id), sprctl);
        I915_WRITE(SPSURF(pipe, plane_id),
-                  intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+                  intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
        POSTING_READ(SPSURF(pipe, plane_id));
 }
 
@@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
                I915_WRITE(SPRSCALE(pipe), sprscale);
        I915_WRITE(SPRCTL(pipe), sprctl);
        I915_WRITE(SPRSURF(pipe),
-                  intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+                  intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
        POSTING_READ(SPRSURF(pipe));
 }
 
@@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane,
        I915_WRITE(DVSSCALE(pipe), dvsscale);
        I915_WRITE(DVSCNTR(pipe), dvscntr);
        I915_WRITE(DVSSURF(pipe),
-                  intel_fb_gtt_offset(fb, rotation) + dvssurf_offset);
+                  intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
        POSTING_READ(DVSSURF(pipe));
 }
 
index c6be352209556a0da074b9129f4a8e7a2fbb1a61..c46bc8594f22c1f15d75301483441594670b6a8f 100644 (file)
@@ -114,29 +114,3 @@ int intel_guc_sample_forcewake(struct intel_guc *guc)
        return intel_guc_send(guc, action, ARRAY_SIZE(action));
 }
 
-int intel_guc_log_flush_complete(struct intel_guc *guc)
-{
-       u32 action[] = { INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE };
-
-       return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
-
-int intel_guc_log_flush(struct intel_guc *guc)
-{
-       u32 action[] = {
-               INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH,
-               0
-       };
-
-       return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
-
-int intel_guc_log_control(struct intel_guc *guc, u32 control_val)
-{
-       u32 action[] = {
-               INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING,
-               control_val
-       };
-
-       return intel_guc_send(guc, action, ARRAY_SIZE(action));
-}
index 9490a8e049c37053deeb3ba327c738a4d2353d6e..d74f4d3ad8dccf2d025cb38a6096315a2f1055cd 100644 (file)
@@ -93,29 +93,35 @@ struct i915_guc_client {
        uint64_t submissions[I915_NUM_ENGINES];
 };
 
-enum intel_guc_fw_status {
-       GUC_FIRMWARE_FAIL = -1,
-       GUC_FIRMWARE_NONE = 0,
-       GUC_FIRMWARE_PENDING,
-       GUC_FIRMWARE_SUCCESS
+enum intel_uc_fw_status {
+       INTEL_UC_FIRMWARE_FAIL = -1,
+       INTEL_UC_FIRMWARE_NONE = 0,
+       INTEL_UC_FIRMWARE_PENDING,
+       INTEL_UC_FIRMWARE_SUCCESS
+};
+
+enum intel_uc_fw_type {
+       INTEL_UC_FW_TYPE_GUC,
+       INTEL_UC_FW_TYPE_HUC
 };
 
 /*
  * This structure encapsulates all the data needed during the process
  * of fetching, caching, and loading the firmware image into the GuC.
  */
-struct intel_guc_fw {
-       const char *                    guc_fw_path;
-       size_t                          guc_fw_size;
-       struct drm_i915_gem_object *    guc_fw_obj;
-       enum intel_guc_fw_status        guc_fw_fetch_status;
-       enum intel_guc_fw_status        guc_fw_load_status;
-
-       uint16_t                        guc_fw_major_wanted;
-       uint16_t                        guc_fw_minor_wanted;
-       uint16_t                        guc_fw_major_found;
-       uint16_t                        guc_fw_minor_found;
-
+struct intel_uc_fw {
+       const char *path;
+       size_t size;
+       struct drm_i915_gem_object *obj;
+       enum intel_uc_fw_status fetch_status;
+       enum intel_uc_fw_status load_status;
+
+       uint16_t major_ver_wanted;
+       uint16_t minor_ver_wanted;
+       uint16_t major_ver_found;
+       uint16_t minor_ver_found;
+
+       enum intel_uc_fw_type fw;
        uint32_t header_size;
        uint32_t header_offset;
        uint32_t rsa_size;
@@ -141,7 +147,7 @@ struct intel_guc_log {
 };
 
 struct intel_guc {
-       struct intel_guc_fw guc_fw;
+       struct intel_uc_fw fw;
        struct intel_guc_log log;
 
        /* intel_guc_recv interrupt related state */
@@ -170,21 +176,28 @@ struct intel_guc {
        struct mutex send_mutex;
 };
 
+struct intel_huc {
+       /* Generic uC firmware management */
+       struct intel_uc_fw fw;
+
+       /* HuC-specific additions */
+};
+
 /* intel_uc.c */
 void intel_uc_init_early(struct drm_i915_private *dev_priv);
 int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
 int intel_guc_sample_forcewake(struct intel_guc *guc);
-int intel_guc_log_flush_complete(struct intel_guc *guc);
-int intel_guc_log_flush(struct intel_guc *guc);
-int intel_guc_log_control(struct intel_guc *guc, u32 control_val);
 
 /* intel_guc_loader.c */
 extern void intel_guc_init(struct drm_i915_private *dev_priv);
 extern int intel_guc_setup(struct drm_i915_private *dev_priv);
 extern void intel_guc_fini(struct drm_i915_private *dev_priv);
-extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
+extern const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status);
 extern int intel_guc_suspend(struct drm_i915_private *dev_priv);
 extern int intel_guc_resume(struct drm_i915_private *dev_priv);
+void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
+       struct intel_uc_fw *uc_fw);
+u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
 
 /* i915_guc_submission.c */
 int i915_guc_submission_init(struct drm_i915_private *dev_priv);
@@ -193,10 +206,12 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *rq);
 void i915_guc_wq_unreserve(struct drm_i915_gem_request *request);
 void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
 void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
-void i915_guc_capture_logs(struct drm_i915_private *dev_priv);
-void i915_guc_flush_logs(struct drm_i915_private *dev_priv);
-void i915_guc_register(struct drm_i915_private *dev_priv);
-void i915_guc_unregister(struct drm_i915_private *dev_priv);
+struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
+
+/* intel_guc_log.c */
+void intel_guc_log_create(struct intel_guc *guc);
+void i915_guc_log_register(struct drm_i915_private *dev_priv);
+void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
 int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
 
 static inline u32 guc_ggtt_offset(struct i915_vma *vma)
@@ -207,4 +222,10 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
        return offset;
 }
 
+/* intel_huc.c */
+void intel_huc_init(struct drm_i915_private *dev_priv);
+void intel_huc_fini(struct drm_i915_private  *dev_priv);
+int intel_huc_load(struct drm_i915_private *dev_priv);
+void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
+
 #endif
index 642b2fab42ff45341e7820b3765bdd5fa210c08c..a32d3b6e2e12e482f16d4f23eedea11cb8f5da18 100644 (file)
@@ -51,6 +51,9 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
        struct drm_crtc_state *crtc_state;
        struct drm_rect clip = { 0, };
 
+       if (!state->crtc)
+               return 0;
+
        crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
        if (IS_ERR(crtc_state))
                return PTR_ERR(crtc_state);
index d836b2274531f83fcd49ea86d649ca49fb405489..f7c87017222048eff2bd17be8eb4f6c83f52b4a7 100644 (file)
  * - TV Panel encoding via ENCT
  */
 
+/* HHI Registers */
+#define HHI_VDAC_CNTL0         0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL1         0x2F8 /* 0xbe offset in data sheet */
+#define HHI_HDMI_PHY_CNTL0     0x3a0 /* 0xe8 offset in data sheet */
+
 struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
        .mode_tag = MESON_VENC_MODE_CVBS_PAL,
        .hso_begin = 3,
@@ -242,6 +247,20 @@ void meson_venc_disable_vsync(struct meson_drm *priv)
 
 void meson_venc_init(struct meson_drm *priv)
 {
+       /* Disable CVBS VDAC */
+       regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
+       regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+
+       /* Power Down Dacs */
+       writel_relaxed(0xff, priv->io_base + _REG(VENC_VDAC_SETTING));
+
+       /* Disable HDMI PHY */
+       regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
+
+       /* Disable HDMI */
+       writel_bits_relaxed(0x3, 0,
+                           priv->io_base + _REG(VPU_HDMI_SETTING));
+
        /* Disable all encoders */
        writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
        writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
index c809c085fd78abd61f15f3c9d2a19f14ff173d00..a2bcc70a03efaaac6ee32058f747e57161222b23 100644 (file)
@@ -167,7 +167,7 @@ static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
 
        /* Disable CVBS VDAC */
        regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
-       regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+       regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
 }
 
 static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
index b0b874264f9d84fad2ab4a183fd9f8be41bb4dfd..9ac007880328b7659130b81b196ae92d7230929c 100644 (file)
@@ -36,6 +36,7 @@ static const struct pci_device_id pciidlist[] = {
        { PCI_VENDOR_ID_MATROX, 0x533, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH },
        { PCI_VENDOR_ID_MATROX, 0x534, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_ER },
        { PCI_VENDOR_ID_MATROX, 0x536, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EW3 },
+       { PCI_VENDOR_ID_MATROX, 0x538, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_EH3 },
        {0,}
 };
 
index 0d6e998d63e6e761c109264f73e0e30a49eea046..c88b6ec88dd2b2ee56d68f383079559a4cbec626 100644 (file)
@@ -180,6 +180,7 @@ enum mga_type {
        G200_WB,
        G200_EV,
        G200_EH,
+       G200_EH3,
        G200_ER,
        G200_EW3,
 };
index 10535e3b75f2ff3118a57bbceed65f74d278c62a..77d1c4771786fc0ec18c02bd9351c9a89f4b17d8 100644 (file)
@@ -106,6 +106,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
                clock = 2;
                break;
        case G200_EH:
+       case G200_EH3:
        case G200_ER:
                data = 2;
                clock = 1;
index 067dfbc91b1c179288254579b7f00ff8509b8c41..3938120e505126b86c45cfc5db181f60eb94efb6 100644 (file)
@@ -497,34 +497,70 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
        bool pll_locked = false;
 
        m = n = p = 0;
-       vcomax = 800000;
-       vcomin = 400000;
-       pllreffreq = 33333;
 
-       delta = 0xffffffff;
+       if (mdev->type == G200_EH3) {
+               vcomax = 3000000;
+               vcomin = 1500000;
+               pllreffreq = 25000;
 
-       for (testp = 16; testp > 0; testp >>= 1) {
-               if (clock * testp > vcomax)
-                       continue;
-               if (clock * testp < vcomin)
-                       continue;
+               delta = 0xffffffff;
 
-               for (testm = 1; testm < 33; testm++) {
-                       for (testn = 17; testn < 257; testn++) {
-                               computed = (pllreffreq * testn) /
-                                       (testm * testp);
+               testp = 0;
+
+               for (testm = 150; testm >= 6; testm--) {
+                       if (clock * testm > vcomax)
+                               continue;
+                       if (clock * testm < vcomin)
+                               continue;
+                       for (testn = 120; testn >= 60; testn--) {
+                               computed = (pllreffreq * testn) / testm;
                                if (computed > clock)
                                        tmpdelta = computed - clock;
                                else
                                        tmpdelta = clock - computed;
                                if (tmpdelta < delta) {
                                        delta = tmpdelta;
-                                       n = testn - 1;
-                                       m = (testm - 1);
-                                       p = testp - 1;
+                                       n = testn;
+                                       m = testm;
+                                       p = testp;
+                               }
+                               if (delta == 0)
+                                       break;
+                       }
+                       if (delta == 0)
+                               break;
+               }
+       } else {
+
+               vcomax = 800000;
+               vcomin = 400000;
+               pllreffreq = 33333;
+
+               delta = 0xffffffff;
+
+               for (testp = 16; testp > 0; testp >>= 1) {
+                       if (clock * testp > vcomax)
+                               continue;
+                       if (clock * testp < vcomin)
+                               continue;
+
+                       for (testm = 1; testm < 33; testm++) {
+                               for (testn = 17; testn < 257; testn++) {
+                                       computed = (pllreffreq * testn) /
+                                               (testm * testp);
+                                       if (computed > clock)
+                                               tmpdelta = computed - clock;
+                                       else
+                                               tmpdelta = clock - computed;
+                                       if (tmpdelta < delta) {
+                                               delta = tmpdelta;
+                                               n = testn - 1;
+                                               m = (testm - 1);
+                                               p = testp - 1;
+                                       }
+                                       if ((clock * testp) >= 600000)
+                                               p |= 0x80;
                                }
-                               if ((clock * testp) >= 600000)
-                                       p |= 0x80;
                        }
                }
        }
@@ -674,6 +710,7 @@ static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
                return mga_g200ev_set_plls(mdev, clock);
                break;
        case G200_EH:
+       case G200_EH3:
                return mga_g200eh_set_plls(mdev, clock);
                break;
        case G200_ER:
@@ -933,6 +970,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
                option2 = 0x0000b000;
                break;
        case G200_EH:
+       case G200_EH3:
                dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
                                             MGA1064_MISC_CTL_DAC_RAM_CS;
                option = 0x00000120;
@@ -979,7 +1017,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
                if ((mdev->type == G200_EV ||
                    mdev->type == G200_WB ||
                    mdev->type == G200_EH ||
-                   mdev->type == G200_EW3) &&
+                   mdev->type == G200_EW3 ||
+                   mdev->type == G200_EH3) &&
                    (i >= 0x44) && (i <= 0x4e))
                        continue;
 
index a18126150e1136ea380dcdddc89aabe293d7fcbb..686a580c711a99bfe61b5867e5adc75891e5e0a0 100644 (file)
@@ -213,7 +213,14 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 void adreno_flush(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       uint32_t wptr = get_wptr(gpu->rb);
+       uint32_t wptr;
+
+       /*
+        * Mask wptr value that we calculate to fit in the HW range. This is
+        * to account for the possibility that the last command fit exactly into
+        * the ringbuffer and rb->next hasn't wrapped to zero yet
+        */
+       wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
 
        /* ensure writes to ringbuffer have hit system memory: */
        mb();
@@ -338,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 {
        struct adreno_platform_config *config = pdev->dev.platform_data;
        struct msm_gpu *gpu = &adreno_gpu->base;
-       struct msm_mmu *mmu;
        int ret;
 
        adreno_gpu->funcs = funcs;
@@ -378,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                return ret;
        }
 
-       mmu = gpu->aspace->mmu;
-       if (mmu) {
+       if (gpu->aspace && gpu->aspace->mmu) {
+               struct msm_mmu *mmu = gpu->aspace->mmu;
                ret = mmu->funcs->attach(mmu, iommu_ports,
                                ARRAY_SIZE(iommu_ports));
                if (ret)
index 5f6cd8745dbce78d7fec38bb6fc4b37e89c57457..c396d459a9d062769471fdc84d01120c8d1e8525 100644 (file)
@@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
 
 static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
 {
-       int i;
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
-       struct drm_plane *plane;
-       struct drm_plane_state *plane_state;
-
-       for_each_plane_in_state(state, plane, plane_state, i)
-               mdp5_plane_complete_commit(plane, plane_state);
 
        if (mdp5_kms->smp)
                mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
index 17b0cc10117109bbc25a27de6a5e74d9b6f6c479..cdfc63d90c7b4bf4b7f1f116b410c347560663ef 100644 (file)
@@ -104,8 +104,6 @@ struct mdp5_plane_state {
 
        /* assigned by crtc blender */
        enum mdp_mixer_stage_id stage;
-
-       bool pending : 1;
 };
 #define to_mdp5_plane_state(x) \
                container_of(x, struct mdp5_plane_state, base)
@@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
 void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
 
 uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
-void mdp5_plane_complete_commit(struct drm_plane *plane,
-       struct drm_plane_state *state);
 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
 struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
 
index 75247ea4335b5963567adf5c652bd9e45bc68639..b9fb111d34280169bdca69b1db0bbead6d7a6454 100644 (file)
@@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
        drm_printf(p, "\tzpos=%u\n", pstate->zpos);
        drm_printf(p, "\talpha=%u\n", pstate->alpha);
        drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
-       drm_printf(p, "\tpending=%u\n", pstate->pending);
 }
 
 static void mdp5_plane_reset(struct drm_plane *plane)
@@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
        if (mdp5_state && mdp5_state->base.fb)
                drm_framebuffer_reference(mdp5_state->base.fb);
 
-       mdp5_state->pending = false;
-
        return &mdp5_state->base;
 }
 
@@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
        DBG("%s: check (%d -> %d)", plane->name,
                        plane_enabled(old_state), plane_enabled(state));
 
-       /* We don't allow faster-than-vblank updates.. if we did add this
-        * some day, we would need to disallow in cases where hwpipe
-        * changes
-        */
-       if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
-               return -EBUSY;
-
        max_width = config->hw->lm.max_width << 16;
        max_height = config->hw->lm.max_height << 16;
 
@@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
                                     struct drm_plane_state *old_state)
 {
        struct drm_plane_state *state = plane->state;
-       struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
 
        DBG("%s: update", plane->name);
 
-       mdp5_state->pending = true;
-
        if (plane_enabled(state)) {
                int ret;
 
@@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
        return pstate->hwpipe->flush_mask;
 }
 
-/* called after vsync in thread context */
-void mdp5_plane_complete_commit(struct drm_plane *plane,
-       struct drm_plane_state *state)
-{
-       struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
-
-       pstate->pending = false;
-}
-
 /* initialize plane */
 struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
 {
index d8bc59c7e26142b377623a6736c575b30f6eeea2..8098677a39167f51f5582893adf3694023c85768 100644 (file)
@@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj)
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
        for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
+               if (!priv->aspace[id])
+                       continue;
                msm_gem_unmap_vma(priv->aspace[id],
                                &msm_obj->domain[id], msm_obj->sgt);
        }
index 166e84e4f0d48f80eacb992c3ccc8964af89b28d..489676568a10d15ac959093e6a09ee3f133abe45 100644 (file)
@@ -106,7 +106,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
                        pagefault_disable();
                }
 
-               if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
+               if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
+                       !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
                        DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
                        ret = -EINVAL;
                        goto out_unlock;
@@ -290,7 +291,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
 {
        uint32_t i, last_offset = 0;
        uint32_t *ptr;
-       int ret;
+       int ret = 0;
 
        if (offset % 4) {
                DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
@@ -318,12 +319,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
 
                ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
                if (ret)
-                       return -EFAULT;
+                       goto out;
 
                if (submit_reloc.submit_offset % 4) {
                        DRM_ERROR("non-aligned reloc offset: %u\n",
                                        submit_reloc.submit_offset);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto out;
                }
 
                /* offset in dwords: */
@@ -332,12 +334,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
                if ((off >= (obj->base.size / 4)) ||
                                (off < last_offset)) {
                        DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto out;
                }
 
                ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
                if (ret)
-                       return ret;
+                       goto out;
 
                if (valid)
                        continue;
@@ -354,9 +357,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
                last_offset = off;
        }
 
+out:
        msm_gem_put_vaddr_locked(&obj->base);
 
-       return 0;
+       return ret;
 }
 
 static void submit_cleanup(struct msm_gem_submit *submit)
index f326cf6a32e64473461e0b7859101f1a404c0318..67b34e069abf383d4c533ab7d348fd5d576ad525 100644 (file)
@@ -23,7 +23,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
        struct msm_ringbuffer *ring;
        int ret;
 
-       size = ALIGN(size, 4);   /* size should be dword aligned */
+       if (WARN_ON(!is_power_of_2(size)))
+               return ERR_PTR(-EINVAL);
 
        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
        if (!ring) {
index dc026a8437123f44df161e57eb8ec7045439c25f..a2bb855a2851f7c8df668a9669ac4b50afc36273 100644 (file)
@@ -1253,7 +1253,7 @@ static int dsicm_probe(struct platform_device *pdev)
        dsicm_hw_reset(ddata);
 
        if (ddata->use_dsi_backlight) {
-               memset(&props, 0, sizeof(struct backlight_properties));
+               memset(&props, 0, sizeof(props));
                props.max_brightness = 255;
 
                props.type = BACKLIGHT_RAW;
index 746cb8d9cba1f580945a48bf593e6ea9fb3feb2a..5ab39e0060f2ae9b24c986282be795aa5ec9c755 100644 (file)
@@ -909,6 +909,7 @@ static struct spi_driver acx565akm_driver = {
 
 module_spi_driver(acx565akm_driver);
 
+MODULE_ALIAS("spi:sony,acx565akm");
 MODULE_AUTHOR("Nokia Corporation");
 MODULE_DESCRIPTION("acx565akm LCD Driver");
 MODULE_LICENSE("GPL");
index c839f6456db2f8a9508e67931b7b80c27e69c9e7..5554b72cf56a78d4b469ffe651df697b7034ddf0 100644 (file)
@@ -620,6 +620,19 @@ u32 dispc_wb_get_framedone_irq(void)
        return DISPC_IRQ_FRAMEDONEWB;
 }
 
+void dispc_mgr_enable(enum omap_channel channel, bool enable)
+{
+       mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable);
+       /* flush posted write */
+       mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
+}
+EXPORT_SYMBOL(dispc_mgr_enable);
+
+static bool dispc_mgr_is_enabled(enum omap_channel channel)
+{
+       return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
+}
+
 bool dispc_mgr_go_busy(enum omap_channel channel)
 {
        return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1;
@@ -2901,20 +2914,6 @@ enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channe
 }
 EXPORT_SYMBOL(dispc_mgr_get_supported_outputs);
 
-void dispc_mgr_enable(enum omap_channel channel, bool enable)
-{
-       mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable);
-       /* flush posted write */
-       mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
-}
-EXPORT_SYMBOL(dispc_mgr_enable);
-
-bool dispc_mgr_is_enabled(enum omap_channel channel)
-{
-       return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
-}
-EXPORT_SYMBOL(dispc_mgr_is_enabled);
-
 void dispc_wb_enable(bool enable)
 {
        dispc_ovl_enable(OMAP_DSS_WB, enable);
index f060bda31235fb4e9443639f2620f47fb5dbb3ef..f74615d005a8fe29dabda24c406389a832f0050f 100644 (file)
@@ -4336,7 +4336,7 @@ static void print_dsi_vm(const char *str,
 
        wc = DIV_ROUND_UP(t->hact * t->bitspp, 8);
        pps = DIV_ROUND_UP(wc + 6, t->ndl); /* pixel packet size */
-       bl = t->hss + t->hsa + t->hse + t->hbp + t->hfront_porch;
+       bl = t->hss + t->hsa + t->hse + t->hbp + t->hfp;
        tot = bl + pps;
 
 #define TO_DSI_T(x) ((u32)div64_u64((u64)x * 1000000000llu, byteclk))
@@ -4345,14 +4345,14 @@ static void print_dsi_vm(const char *str,
                        "%u/%u/%u/%u/%u/%u = %u + %u = %u\n",
                        str,
                        byteclk,
-                       t->hss, t->hsa, t->hse, t->hbp, pps, t->hfront_porch,
+                       t->hss, t->hsa, t->hse, t->hbp, pps, t->hfp,
                        bl, pps, tot,
                        TO_DSI_T(t->hss),
                        TO_DSI_T(t->hsa),
                        TO_DSI_T(t->hse),
                        TO_DSI_T(t->hbp),
                        TO_DSI_T(pps),
-                       TO_DSI_T(t->hfront_porch),
+                       TO_DSI_T(t->hfp),
 
                        TO_DSI_T(bl),
                        TO_DSI_T(pps),
@@ -4367,7 +4367,7 @@ static void print_dispc_vm(const char *str, const struct videomode *vm)
        int hact, bl, tot;
 
        hact = vm->hactive;
-       bl = vm->hsync_len + vm->hbp + vm->hfront_porch;
+       bl = vm->hsync_len + vm->hback_porch + vm->hfront_porch;
        tot = hact + bl;
 
 #define TO_DISPC_T(x) ((u32)div64_u64((u64)x * 1000000000llu, pck))
@@ -4376,10 +4376,10 @@ static void print_dispc_vm(const char *str, const struct videomode *vm)
                        "%u/%u/%u/%u = %u + %u = %u\n",
                        str,
                        pck,
-                       vm->hsync_len, vm->hbp, hact, vm->hfront_porch,
+                       vm->hsync_len, vm->hback_porch, hact, vm->hfront_porch,
                        bl, hact, tot,
                        TO_DISPC_T(vm->hsync_len),
-                       TO_DISPC_T(vm->hbp),
+                       TO_DISPC_T(vm->hback_porch),
                        TO_DISPC_T(hact),
                        TO_DISPC_T(vm->hfront_porch),
                        TO_DISPC_T(bl),
@@ -4401,12 +4401,12 @@ static void print_dsi_dispc_vm(const char *str,
        dsi_tput = (u64)byteclk * t->ndl * 8;
        pck = (u32)div64_u64(dsi_tput, t->bitspp);
        dsi_hact = DIV_ROUND_UP(DIV_ROUND_UP(t->hact * t->bitspp, 8) + 6, t->ndl);
-       dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfront_porch;
+       dsi_htot = t->hss + t->hsa + t->hse + t->hbp + dsi_hact + t->hfp;
 
        vm.pixelclock = pck;
        vm.hsync_len = div64_u64((u64)(t->hsa + t->hse) * pck, byteclk);
-       vm.hbp = div64_u64((u64)t->hbp * pck, byteclk);
-       vm.hfront_porch = div64_u64((u64)t->hfront_porch * pck, byteclk);
+       vm.hback_porch = div64_u64((u64)t->hbp * pck, byteclk);
+       vm.hfront_porch = div64_u64((u64)t->hfp * pck, byteclk);
        vm.hactive = t->hact;
 
        print_dispc_vm(str, &vm);
index 136d30484d02345430a1875d49acffa836c8a059..bf626acae2712b3a7eff53ec148a210f910f5c6d 100644 (file)
@@ -119,8 +119,7 @@ static void __init omapdss_omapify_node(struct device_node *node)
 
 static void __init omapdss_add_to_list(struct device_node *node, bool root)
 {
-       struct dss_conv_node *n = kmalloc(sizeof(struct dss_conv_node),
-               GFP_KERNEL);
+       struct dss_conv_node *n = kmalloc(sizeof(*n), GFP_KERNEL);
        if (n) {
                n->node = node;
                n->root = root;
index b420dde8c0fba14b2c63b71acc89d21317543666..5b3b961127bd2db756297eca3d0ba3f8a9bf2e59 100644 (file)
@@ -856,7 +856,6 @@ int dispc_runtime_get(void);
 void dispc_runtime_put(void);
 
 void dispc_mgr_enable(enum omap_channel channel, bool enable);
-bool dispc_mgr_is_enabled(enum omap_channel channel);
 u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
 u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
 u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel);
index 2580e8673908a8b72847aefe4e17954b71315c9a..f90e2d22c5ecaf09640eec2793185dd31888be71 100644 (file)
@@ -162,7 +162,7 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
 
                dssdrv->get_timings(dssdev, &t);
 
-               if (memcmp(&vm, &t, sizeof(struct videomode)))
+               if (memcmp(&vm, &t, sizeof(vm)))
                        r = -EINVAL;
                else
                        r = 0;
@@ -217,7 +217,7 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
 
        omap_dss_get_device(dssdev);
 
-       omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL);
+       omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL);
        if (!omap_connector)
                goto fail;
 
@@ -240,8 +240,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
        connector->interlace_allowed = 1;
        connector->doublescan_allowed = 0;
 
-       drm_connector_register(connector);
-
        return connector;
 
 fail:
index 8dea89030e66e879949c88ce20c28aa7dd8cbc0f..dd47dc191e6b1ce10f41818156cb532e857c2ba2 100644 (file)
@@ -36,26 +36,18 @@ struct omap_crtc {
 
        struct videomode vm;
 
-       struct omap_drm_irq vblank_irq;
-       struct omap_drm_irq error_irq;
-
        bool ignore_digit_sync_lost;
 
+       bool enabled;
        bool pending;
        wait_queue_head_t pending_wait;
+       struct drm_pending_vblank_event *event;
 };
 
 /* -----------------------------------------------------------------------------
  * Helper Functions
  */
 
-uint32_t pipe2vbl(struct drm_crtc *crtc)
-{
-       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
-
-       return dispc_mgr_get_vsync_irq(omap_crtc->channel);
-}
-
 struct videomode *omap_crtc_timings(struct drm_crtc *crtc)
 {
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -68,6 +60,19 @@ enum omap_channel omap_crtc_channel(struct drm_crtc *crtc)
        return omap_crtc->channel;
 }
 
+static bool omap_crtc_is_pending(struct drm_crtc *crtc)
+{
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       unsigned long flags;
+       bool pending;
+
+       spin_lock_irqsave(&crtc->dev->event_lock, flags);
+       pending = omap_crtc->pending;
+       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+       return pending;
+}
+
 int omap_crtc_wait_pending(struct drm_crtc *crtc)
 {
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -77,7 +82,7 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc)
         * a single frame refresh even on slower displays.
         */
        return wait_event_timeout(omap_crtc->pending_wait,
-                                 !omap_crtc->pending,
+                                 !omap_crtc_is_pending(crtc),
                                  msecs_to_jiffies(250));
 }
 
@@ -135,14 +140,15 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
        u32 framedone_irq, vsync_irq;
        int ret;
 
+       if (WARN_ON(omap_crtc->enabled == enable))
+               return;
+
        if (omap_crtc_output[channel]->output_type == OMAP_DISPLAY_TYPE_HDMI) {
                dispc_mgr_enable(channel, enable);
+               omap_crtc->enabled = enable;
                return;
        }
 
-       if (dispc_mgr_is_enabled(channel) == enable)
-               return;
-
        if (omap_crtc->channel == OMAP_DSS_CHANNEL_DIGIT) {
                /*
                 * Digit output produces some sync lost interrupts during the
@@ -173,6 +179,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
        }
 
        dispc_mgr_enable(channel, enable);
+       omap_crtc->enabled = enable;
 
        ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
        if (ret) {
@@ -259,26 +266,9 @@ static const struct dss_mgr_ops mgr_ops = {
  * Setup, Flush and Page Flip
  */
 
-static void omap_crtc_complete_page_flip(struct drm_crtc *crtc)
+void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus)
 {
-       struct drm_pending_vblank_event *event;
-       struct drm_device *dev = crtc->dev;
-       unsigned long flags;
-
-       event = crtc->state->event;
-
-       if (!event)
-               return;
-
-       spin_lock_irqsave(&dev->event_lock, flags);
-       drm_crtc_send_vblank_event(crtc, event);
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
-{
-       struct omap_crtc *omap_crtc =
-                       container_of(irq, struct omap_crtc, error_irq);
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
 
        if (omap_crtc->ignore_digit_sync_lost) {
                irqstatus &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
@@ -289,29 +279,38 @@ static void omap_crtc_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
        DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_crtc->name, irqstatus);
 }
 
-static void omap_crtc_vblank_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
+void omap_crtc_vblank_irq(struct drm_crtc *crtc)
 {
-       struct omap_crtc *omap_crtc =
-                       container_of(irq, struct omap_crtc, vblank_irq);
-       struct drm_device *dev = omap_crtc->base.dev;
+       struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       bool pending;
 
-       if (dispc_mgr_go_busy(omap_crtc->channel))
+       spin_lock(&crtc->dev->event_lock);
+       /*
+        * If the dispc is busy we're racing the flush operation. Try again on
+        * the next vblank interrupt.
+        */
+       if (dispc_mgr_go_busy(omap_crtc->channel)) {
+               spin_unlock(&crtc->dev->event_lock);
                return;
+       }
 
-       DBG("%s: apply done", omap_crtc->name);
-
-       __omap_irq_unregister(dev, &omap_crtc->vblank_irq);
+       /* Send the vblank event if one has been requested. */
+       if (omap_crtc->event) {
+               drm_crtc_send_vblank_event(crtc, omap_crtc->event);
+               omap_crtc->event = NULL;
+       }
 
-       rmb();
-       WARN_ON(!omap_crtc->pending);
+       pending = omap_crtc->pending;
        omap_crtc->pending = false;
-       wmb();
+       spin_unlock(&crtc->dev->event_lock);
 
-       /* wake up userspace */
-       omap_crtc_complete_page_flip(&omap_crtc->base);
+       if (pending)
+               drm_crtc_vblank_put(crtc);
 
-       /* wake up omap_atomic_complete */
+       /* Wake up omap_atomic_complete. */
        wake_up(&omap_crtc->pending_wait);
+
+       DBG("%s: apply done", omap_crtc->name);
 }
 
 /* -----------------------------------------------------------------------------
@@ -324,9 +323,6 @@ static void omap_crtc_destroy(struct drm_crtc *crtc)
 
        DBG("%s", omap_crtc->name);
 
-       WARN_ON(omap_crtc->vblank_irq.registered);
-       omap_irq_unregister(crtc->dev, &omap_crtc->error_irq);
-
        drm_crtc_cleanup(crtc);
 
        kfree(omap_crtc);
@@ -335,17 +331,18 @@ static void omap_crtc_destroy(struct drm_crtc *crtc)
 static void omap_crtc_enable(struct drm_crtc *crtc)
 {
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+       int ret;
 
        DBG("%s", omap_crtc->name);
 
-       rmb();
+       spin_lock_irq(&crtc->dev->event_lock);
+       drm_crtc_vblank_on(crtc);
+       ret = drm_crtc_vblank_get(crtc);
+       WARN_ON(ret != 0);
+
        WARN_ON(omap_crtc->pending);
        omap_crtc->pending = true;
-       wmb();
-
-       omap_irq_register(crtc->dev, &omap_crtc->vblank_irq);
-
-       drm_crtc_vblank_on(crtc);
+       spin_unlock_irq(&crtc->dev->event_lock);
 }
 
 static void omap_crtc_disable(struct drm_crtc *crtc)
@@ -390,16 +387,15 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc,
 }
 
 static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
-                                  struct drm_crtc_state *old_crtc_state)
+                                  struct drm_crtc_state *old_crtc_state)
 {
 }
 
 static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
-                                  struct drm_crtc_state *old_crtc_state)
+                                  struct drm_crtc_state *old_crtc_state)
 {
        struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
-
-       WARN_ON(omap_crtc->vblank_irq.registered);
+       int ret;
 
        if (crtc->state->color_mgmt_changed) {
                struct drm_color_lut *lut = NULL;
@@ -414,18 +410,30 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
                dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
        }
 
-       if (dispc_mgr_is_enabled(omap_crtc->channel)) {
+       /*
+        * Only flush the CRTC if it is currently enabled. CRTCs that require a
+        * mode set are disabled prior plane updates and enabled afterwards.
+        * They are thus not active (regardless of what their CRTC core state
+        * reports) and the DRM core could thus call this function even though
+        * the CRTC is currently disabled. Do nothing in that case.
+        */
+       if (!omap_crtc->enabled)
+               return;
+
+       DBG("%s: GO", omap_crtc->name);
 
-               DBG("%s: GO", omap_crtc->name);
+       ret = drm_crtc_vblank_get(crtc);
+       WARN_ON(ret != 0);
 
-               rmb();
-               WARN_ON(omap_crtc->pending);
-               omap_crtc->pending = true;
-               wmb();
+       spin_lock_irq(&crtc->dev->event_lock);
+       dispc_mgr_go(omap_crtc->channel);
 
-               dispc_mgr_go(omap_crtc->channel);
-               omap_irq_register(crtc->dev, &omap_crtc->vblank_irq);
-       }
+       WARN_ON(omap_crtc->pending);
+       omap_crtc->pending = true;
+
+       if (crtc->state->event)
+               omap_crtc->event = crtc->state->event;
+       spin_unlock_irq(&crtc->dev->event_lock);
 }
 
 static bool omap_crtc_is_plane_prop(struct drm_crtc *crtc,
@@ -546,14 +554,6 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
        omap_crtc->channel = channel;
        omap_crtc->name = channel_names[channel];
 
-       omap_crtc->vblank_irq.irqmask = pipe2vbl(crtc);
-       omap_crtc->vblank_irq.irq = omap_crtc_vblank_irq;
-
-       omap_crtc->error_irq.irqmask =
-                       dispc_mgr_get_sync_lost_irq(channel);
-       omap_crtc->error_irq.irq = omap_crtc_error_irq;
-       omap_irq_register(dev, &omap_crtc->error_irq);
-
        ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
                                        &omap_crtc_funcs, NULL);
        if (ret < 0) {
index 4ceed7a9762f1a8309f84441154550878a8e4bbc..3cab06661a0830f802f89b70b9162cc2eb5b4859 100644 (file)
@@ -224,7 +224,7 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
        int rows = (1 + area->y1 - area->y0);
        int i = columns*rows;
 
-       pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
+       pat = alloc_dma(txn, sizeof(*pat), &pat_pa);
 
        if (txn->last_pat)
                txn->last_pat->next_pa = (uint32_t)pat_pa;
@@ -735,7 +735,7 @@ static int omap_dmm_probe(struct platform_device *dev)
 
        /* alloc engines */
        omap_dmm->engines = kcalloc(omap_dmm->num_engines,
-                                   sizeof(struct refill_engine), GFP_KERNEL);
+                                   sizeof(*omap_dmm->engines), GFP_KERNEL);
        if (!omap_dmm->engines) {
                ret = -ENOMEM;
                goto fail;
index 42330e0c3324d04946a723220ec62dcac724eba4..afe8f05b927b2c43170a58f5564be44c01c0fc60 100644 (file)
@@ -96,7 +96,8 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit)
        dispc_runtime_get();
 
        drm_atomic_helper_commit_modeset_disables(dev, old_state);
-       drm_atomic_helper_commit_planes(dev, old_state, 0);
+       drm_atomic_helper_commit_planes(dev, old_state,
+                                       DRM_PLANE_COMMIT_ACTIVE_ONLY);
        drm_atomic_helper_commit_modeset_enables(dev, old_state);
 
        omap_atomic_wait_for_completion(dev, old_state);
@@ -315,8 +316,6 @@ static int omap_modeset_init(struct drm_device *dev)
 
        drm_mode_config_init(dev);
 
-       omap_drm_irq_install(dev);
-
        ret = omap_modeset_init_properties(dev);
        if (ret < 0)
                return ret;
@@ -489,12 +488,9 @@ static int omap_modeset_init(struct drm_device *dev)
 
        drm_mode_config_reset(dev);
 
-       return 0;
-}
+       omap_drm_irq_install(dev);
 
-static void omap_modeset_free(struct drm_device *dev)
-{
-       drm_mode_config_cleanup(dev);
+       return 0;
 }
 
 /*
@@ -632,93 +628,6 @@ static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] =
  * drm driver funcs
  */
 
-/**
- * load - setup chip and create an initial config
- * @dev: DRM device
- * @flags: startup flags
- *
- * The driver load routine has to do several things:
- *   - initialize the memory manager
- *   - allocate initial config memory
- *   - setup the DRM framebuffer with the allocated memory
- */
-static int dev_load(struct drm_device *dev, unsigned long flags)
-{
-       struct omap_drm_platform_data *pdata = dev->dev->platform_data;
-       struct omap_drm_private *priv;
-       unsigned int i;
-       int ret;
-
-       DBG("load: dev=%p", dev);
-
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv)
-               return -ENOMEM;
-
-       priv->omaprev = pdata->omaprev;
-
-       dev->dev_private = priv;
-
-       priv->wq = alloc_ordered_workqueue("omapdrm", 0);
-       init_waitqueue_head(&priv->commit.wait);
-       spin_lock_init(&priv->commit.lock);
-
-       spin_lock_init(&priv->list_lock);
-       INIT_LIST_HEAD(&priv->obj_list);
-
-       omap_gem_init(dev);
-
-       ret = omap_modeset_init(dev);
-       if (ret) {
-               dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
-               dev->dev_private = NULL;
-               kfree(priv);
-               return ret;
-       }
-
-       /* Initialize vblank handling, start with all CRTCs disabled. */
-       ret = drm_vblank_init(dev, priv->num_crtcs);
-       if (ret)
-               dev_warn(dev->dev, "could not init vblank\n");
-
-       for (i = 0; i < priv->num_crtcs; i++)
-               drm_crtc_vblank_off(priv->crtcs[i]);
-
-       priv->fbdev = omap_fbdev_init(dev);
-
-       /* store off drm_device for use in pm ops */
-       dev_set_drvdata(dev->dev, dev);
-
-       drm_kms_helper_poll_init(dev);
-
-       return 0;
-}
-
-static void dev_unload(struct drm_device *dev)
-{
-       struct omap_drm_private *priv = dev->dev_private;
-
-       DBG("unload: dev=%p", dev);
-
-       drm_kms_helper_poll_fini(dev);
-
-       if (priv->fbdev)
-               omap_fbdev_free(dev);
-
-       omap_modeset_free(dev);
-       omap_gem_deinit(dev);
-
-       destroy_workqueue(priv->wq);
-
-       drm_vblank_cleanup(dev);
-       omap_drm_irq_uninstall(dev);
-
-       kfree(dev->dev_private);
-       dev->dev_private = NULL;
-
-       dev_set_drvdata(dev->dev, NULL);
-}
-
 static int dev_open(struct drm_device *dev, struct drm_file *file)
 {
        file->driver_priv = NULL;
@@ -803,8 +712,6 @@ static const struct file_operations omapdriver_fops = {
 static struct drm_driver omap_drm_driver = {
        .driver_features = DRIVER_MODESET | DRIVER_GEM  | DRIVER_PRIME |
                DRIVER_ATOMIC,
-       .load = dev_load,
-       .unload = dev_unload,
        .open = dev_open,
        .lastclose = dev_lastclose,
        .get_vblank_counter = drm_vblank_no_hw_counter,
@@ -833,30 +740,125 @@ static struct drm_driver omap_drm_driver = {
        .patchlevel = DRIVER_PATCHLEVEL,
 };
 
-static int pdev_probe(struct platform_device *device)
+static int pdev_probe(struct platform_device *pdev)
 {
-       int r;
+       struct omap_drm_platform_data *pdata = pdev->dev.platform_data;
+       struct omap_drm_private *priv;
+       struct drm_device *ddev;
+       unsigned int i;
+       int ret;
+
+       DBG("%s", pdev->name);
 
        if (omapdss_is_initialized() == false)
                return -EPROBE_DEFER;
 
        omap_crtc_pre_init();
 
-       r = omap_connect_dssdevs();
-       if (r) {
-               omap_crtc_pre_uninit();
-               return r;
+       ret = omap_connect_dssdevs();
+       if (ret)
+               goto err_crtc_uninit;
+
+       /* Allocate and initialize the driver private structure. */
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv) {
+               ret = -ENOMEM;
+               goto err_disconnect_dssdevs;
        }
 
-       DBG("%s", device->name);
-       return drm_platform_init(&omap_drm_driver, device);
+       priv->omaprev = pdata->omaprev;
+       priv->wq = alloc_ordered_workqueue("omapdrm", 0);
+
+       init_waitqueue_head(&priv->commit.wait);
+       spin_lock_init(&priv->commit.lock);
+       spin_lock_init(&priv->list_lock);
+       INIT_LIST_HEAD(&priv->obj_list);
+
+       /* Allocate and initialize the DRM device. */
+       ddev = drm_dev_alloc(&omap_drm_driver, &pdev->dev);
+       if (IS_ERR(ddev)) {
+               ret = PTR_ERR(ddev);
+               goto err_free_priv;
+       }
+
+       ddev->dev_private = priv;
+       platform_set_drvdata(pdev, ddev);
+
+       omap_gem_init(ddev);
+
+       ret = omap_modeset_init(ddev);
+       if (ret) {
+               dev_err(&pdev->dev, "omap_modeset_init failed: ret=%d\n", ret);
+               goto err_free_drm_dev;
+       }
+
+       /* Initialize vblank handling, start with all CRTCs disabled. */
+       ret = drm_vblank_init(ddev, priv->num_crtcs);
+       if (ret) {
+               dev_err(&pdev->dev, "could not init vblank\n");
+               goto err_cleanup_modeset;
+       }
+
+       for (i = 0; i < priv->num_crtcs; i++)
+               drm_crtc_vblank_off(priv->crtcs[i]);
+
+       priv->fbdev = omap_fbdev_init(ddev);
+
+       drm_kms_helper_poll_init(ddev);
+
+       /*
+        * Register the DRM device with the core and the connectors with
+        * sysfs.
+        */
+       ret = drm_dev_register(ddev, 0);
+       if (ret)
+               goto err_cleanup_helpers;
+
+       return 0;
+
+err_cleanup_helpers:
+       drm_kms_helper_poll_fini(ddev);
+       if (priv->fbdev)
+               omap_fbdev_free(ddev);
+err_cleanup_modeset:
+       drm_mode_config_cleanup(ddev);
+       omap_drm_irq_uninstall(ddev);
+err_free_drm_dev:
+       omap_gem_deinit(ddev);
+       drm_dev_unref(ddev);
+err_free_priv:
+       destroy_workqueue(priv->wq);
+       kfree(priv);
+err_disconnect_dssdevs:
+       omap_disconnect_dssdevs();
+err_crtc_uninit:
+       omap_crtc_pre_uninit();
+       return ret;
 }
 
-static int pdev_remove(struct platform_device *device)
+static int pdev_remove(struct platform_device *pdev)
 {
+       struct drm_device *ddev = platform_get_drvdata(pdev);
+       struct omap_drm_private *priv = ddev->dev_private;
+
        DBG("");
 
-       drm_put_dev(platform_get_drvdata(device));
+       drm_dev_unregister(ddev);
+
+       drm_kms_helper_poll_fini(ddev);
+
+       if (priv->fbdev)
+               omap_fbdev_free(ddev);
+
+       drm_mode_config_cleanup(ddev);
+
+       omap_drm_irq_uninstall(ddev);
+       omap_gem_deinit(ddev);
+
+       drm_dev_unref(ddev);
+
+       destroy_workqueue(priv->wq);
+       kfree(priv);
 
        omap_disconnect_dssdevs();
        omap_crtc_pre_uninit();
index 8b113ba39da09fbe900b4ee7e0717d5056bfe21c..36d93ce84a294f867e94a63a87d2e5f799da589e 100644 (file)
@@ -48,19 +48,6 @@ struct omap_drm_window {
        uint32_t src_w, src_h;
 };
 
-/* For transiently registering for different DSS irqs that various parts
- * of the KMS code need during setup/configuration.  We these are not
- * necessarily the same as what drm_vblank_get/put() are requesting, and
- * the hysteresis in drm_vblank_put() is not necessarily desirable for
- * internal housekeeping related irq usage.
- */
-struct omap_drm_irq {
-       struct list_head node;
-       uint32_t irqmask;
-       bool registered;
-       void (*irq)(struct omap_drm_irq *irq, uint32_t irqstatus);
-};
-
 /* For KMS code that needs to wait for a certain # of IRQs:
  */
 struct omap_irq_wait;
@@ -101,9 +88,9 @@ struct omap_drm_private {
        struct drm_property *zorder_prop;
 
        /* irq handling: */
-       struct list_head irq_list;    /* list of omap_drm_irq */
-       uint32_t vblank_mask;         /* irq bits set for userspace vblank */
-       struct omap_drm_irq error_handler;
+       spinlock_t wait_lock;           /* protects the wait_list */
+       struct list_head wait_list;     /* list of omap_irq_wait */
+       uint32_t irq_mask;              /* enabled irqs in addition to wait_list */
 
        /* atomic commit */
        struct {
@@ -127,10 +114,6 @@ int omap_gem_resume(struct device *dev);
 
 int omap_irq_enable_vblank(struct drm_device *dev, unsigned int pipe);
 void omap_irq_disable_vblank(struct drm_device *dev, unsigned int pipe);
-void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
-void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
-void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq);
-void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq);
 void omap_drm_irq_uninstall(struct drm_device *dev);
 int omap_drm_irq_install(struct drm_device *dev);
 
@@ -154,6 +137,8 @@ void omap_crtc_pre_uninit(void);
 struct drm_crtc *omap_crtc_init(struct drm_device *dev,
                struct drm_plane *plane, enum omap_channel channel, int id);
 int omap_crtc_wait_pending(struct drm_crtc *crtc);
+void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus);
+void omap_crtc_vblank_irq(struct drm_crtc *crtc);
 
 struct drm_plane *omap_plane_init(struct drm_device *dev,
                int id, enum drm_plane_type type,
@@ -232,32 +217,6 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
                struct dma_buf *buffer);
 
 /* map crtc to vblank mask */
-uint32_t pipe2vbl(struct drm_crtc *crtc);
 struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
 
-/* should these be made into common util helpers?
- */
-
-static inline int objects_lookup(
-               struct drm_file *filp, uint32_t pixel_format,
-               struct drm_gem_object **bos, const uint32_t *handles)
-{
-       int i, n = drm_format_num_planes(pixel_format);
-
-       for (i = 0; i < n; i++) {
-               bos[i] = drm_gem_object_lookup(filp, handles[i]);
-               if (!bos[i])
-                       goto fail;
-
-       }
-
-       return 0;
-
-fail:
-       while (--i > 0)
-               drm_gem_object_unreference_unlocked(bos[i]);
-
-       return -ENOENT;
-}
-
 #endif /* __OMAP_DRV_H__ */
index a20f30039aee6c158c9ca7e89e6324a09a3ca667..86c977b7189a567afc2314c7bb4c140a40181a77 100644 (file)
@@ -117,7 +117,7 @@ static int omap_encoder_update(struct drm_encoder *encoder,
 
                dssdrv->get_timings(dssdev, &t);
 
-               if (memcmp(vm, &t, sizeof(struct videomode)))
+               if (memcmp(vm, &t, sizeof(*vm)))
                        ret = -EINVAL;
                else
                        ret = 0;
index bd6b94c38613f093ddbb2cf98835b35d552b83e9..29dc677dd4d3e74c5a369d619bfbfe4930108a54 100644 (file)
  * framebuffer funcs
  */
 
-/* per-format info: */
-struct format {
+/* DSS to DRM formats mapping */
+static const struct {
        enum omap_color_mode dss_format;
        uint32_t pixel_format;
-       struct {
-               int stride_bpp;           /* this times width is stride */
-               int sub_y;                /* sub-sample in y dimension */
-       } planes[4];
-       bool yuv;
-};
-
-static const struct format formats[] = {
+} formats[] = {
        /* 16bpp [A]RGB: */
-       { OMAP_DSS_COLOR_RGB16,       DRM_FORMAT_RGB565,   {{2, 1}}, false }, /* RGB16-565 */
-       { OMAP_DSS_COLOR_RGB12U,      DRM_FORMAT_RGBX4444, {{2, 1}}, false }, /* RGB12x-4444 */
-       { OMAP_DSS_COLOR_RGBX16,      DRM_FORMAT_XRGB4444, {{2, 1}}, false }, /* xRGB12-4444 */
-       { OMAP_DSS_COLOR_RGBA16,      DRM_FORMAT_RGBA4444, {{2, 1}}, false }, /* RGBA12-4444 */
-       { OMAP_DSS_COLOR_ARGB16,      DRM_FORMAT_ARGB4444, {{2, 1}}, false }, /* ARGB16-4444 */
-       { OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555, {{2, 1}}, false }, /* xRGB15-1555 */
-       { OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555, {{2, 1}}, false }, /* ARGB16-1555 */
+       { OMAP_DSS_COLOR_RGB16,       DRM_FORMAT_RGB565 },   /* RGB16-565 */
+       { OMAP_DSS_COLOR_RGB12U,      DRM_FORMAT_RGBX4444 }, /* RGB12x-4444 */
+       { OMAP_DSS_COLOR_RGBX16,      DRM_FORMAT_XRGB4444 }, /* xRGB12-4444 */
+       { OMAP_DSS_COLOR_RGBA16,      DRM_FORMAT_RGBA4444 }, /* RGBA12-4444 */
+       { OMAP_DSS_COLOR_ARGB16,      DRM_FORMAT_ARGB4444 }, /* ARGB16-4444 */
+       { OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555 }, /* xRGB15-1555 */
+       { OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555 }, /* ARGB16-1555 */
        /* 24bpp RGB: */
-       { OMAP_DSS_COLOR_RGB24P,      DRM_FORMAT_RGB888,   {{3, 1}}, false }, /* RGB24-888 */
+       { OMAP_DSS_COLOR_RGB24P,      DRM_FORMAT_RGB888 },   /* RGB24-888 */
        /* 32bpp [A]RGB: */
-       { OMAP_DSS_COLOR_RGBX32,      DRM_FORMAT_RGBX8888, {{4, 1}}, false }, /* RGBx24-8888 */
-       { OMAP_DSS_COLOR_RGB24U,      DRM_FORMAT_XRGB8888, {{4, 1}}, false }, /* xRGB24-8888 */
-       { OMAP_DSS_COLOR_RGBA32,      DRM_FORMAT_RGBA8888, {{4, 1}}, false }, /* RGBA32-8888 */
-       { OMAP_DSS_COLOR_ARGB32,      DRM_FORMAT_ARGB8888, {{4, 1}}, false }, /* ARGB32-8888 */
+       { OMAP_DSS_COLOR_RGBX32,      DRM_FORMAT_RGBX8888 }, /* RGBx24-8888 */
+       { OMAP_DSS_COLOR_RGB24U,      DRM_FORMAT_XRGB8888 }, /* xRGB24-8888 */
+       { OMAP_DSS_COLOR_RGBA32,      DRM_FORMAT_RGBA8888 }, /* RGBA32-8888 */
+       { OMAP_DSS_COLOR_ARGB32,      DRM_FORMAT_ARGB8888 }, /* ARGB32-8888 */
        /* YUV: */
-       { OMAP_DSS_COLOR_NV12,        DRM_FORMAT_NV12,     {{1, 1}, {1, 2}}, true },
-       { OMAP_DSS_COLOR_YUV2,        DRM_FORMAT_YUYV,     {{2, 1}}, true },
-       { OMAP_DSS_COLOR_UYVY,        DRM_FORMAT_UYVY,     {{2, 1}}, true },
+       { OMAP_DSS_COLOR_NV12,        DRM_FORMAT_NV12 },
+       { OMAP_DSS_COLOR_YUV2,        DRM_FORMAT_YUYV },
+       { OMAP_DSS_COLOR_UYVY,        DRM_FORMAT_UYVY },
 };
 
 /* convert from overlay's pixel formats bitmask to an array of fourcc's */
@@ -89,8 +82,9 @@ struct plane {
 struct omap_framebuffer {
        struct drm_framebuffer base;
        int pin_count;
-       const struct format *format;
-       struct plane planes[4];
+       const struct drm_format_info *format;
+       enum omap_color_mode dss_format;
+       struct plane planes[2];
        /* lock for pinning (pin_count and planes.paddr) */
        struct mutex lock;
 };
@@ -128,13 +122,13 @@ static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
 };
 
 static uint32_t get_linear_addr(struct plane *plane,
-               const struct format *format, int n, int x, int y)
+               const struct drm_format_info *format, int n, int x, int y)
 {
        uint32_t offset;
 
-       offset = plane->offset +
-                       (x * format->planes[n].stride_bpp) +
-                       (y * plane->pitch / format->planes[n].sub_y);
+       offset = plane->offset
+              + (x * format->cpp[n] / (n == 0 ? 1 : format->hsub))
+              + (y * plane->pitch / (n == 0 ? 1 : format->vsub));
 
        return plane->paddr + offset;
 }
@@ -153,11 +147,11 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
                struct omap_drm_window *win, struct omap_overlay_info *info)
 {
        struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
-       const struct format *format = omap_fb->format;
+       const struct drm_format_info *format = omap_fb->format;
        struct plane *plane = &omap_fb->planes[0];
        uint32_t x, y, orient = 0;
 
-       info->color_mode = format->dss_format;
+       info->color_mode = omap_fb->dss_format;
 
        info->pos_x      = win->crtc_x;
        info->pos_y      = win->crtc_y;
@@ -231,9 +225,9 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
        }
 
        /* convert to pixels: */
-       info->screen_width /= format->planes[0].stride_bpp;
+       info->screen_width /= format->cpp[0];
 
-       if (format->dss_format == OMAP_DSS_COLOR_NV12) {
+       if (omap_fb->dss_format == OMAP_DSS_COLOR_NV12) {
                plane = &omap_fb->planes[1];
 
                if (info->rotation_type == OMAP_DSS_ROT_TILER) {
@@ -360,47 +354,58 @@ void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
 struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
                struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd)
 {
+       unsigned int num_planes = drm_format_num_planes(mode_cmd->pixel_format);
        struct drm_gem_object *bos[4];
        struct drm_framebuffer *fb;
-       int ret;
+       int i;
 
-       ret = objects_lookup(file, mode_cmd->pixel_format,
-                       bos, mode_cmd->handles);
-       if (ret)
-               return ERR_PTR(ret);
+       for (i = 0; i < num_planes; i++) {
+               bos[i] = drm_gem_object_lookup(file, mode_cmd->handles[i]);
+               if (!bos[i]) {
+                       fb = ERR_PTR(-ENOENT);
+                       goto error;
+               }
+       }
 
        fb = omap_framebuffer_init(dev, mode_cmd, bos);
-       if (IS_ERR(fb)) {
-               int i, n = drm_format_num_planes(mode_cmd->pixel_format);
-               for (i = 0; i < n; i++)
-                       drm_gem_object_unreference_unlocked(bos[i]);
-               return fb;
-       }
+       if (IS_ERR(fb))
+               goto error;
+
+       return fb;
+
+error:
+       while (--i > 0)
+               drm_gem_object_unreference_unlocked(bos[i]);
+
        return fb;
 }
 
 struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
                const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
 {
+       const struct drm_format_info *format = NULL;
        struct omap_framebuffer *omap_fb = NULL;
        struct drm_framebuffer *fb = NULL;
-       const struct format *format = NULL;
-       int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
+       enum omap_color_mode dss_format = 0;
+       unsigned int pitch = mode_cmd->pitches[0];
+       int ret, i;
 
        DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
                        dev, mode_cmd, mode_cmd->width, mode_cmd->height,
                        (char *)&mode_cmd->pixel_format);
 
+       format = drm_format_info(mode_cmd->pixel_format);
+
        for (i = 0; i < ARRAY_SIZE(formats); i++) {
                if (formats[i].pixel_format == mode_cmd->pixel_format) {
-                       format = &formats[i];
+                       dss_format = formats[i].dss_format;
                        break;
                }
        }
 
-       if (!format) {
-               dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
-                               (char *)&mode_cmd->pixel_format);
+       if (!format || !dss_format) {
+               dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n",
+                       (char *)&mode_cmd->pixel_format);
                ret = -EINVAL;
                goto fail;
        }
@@ -413,40 +418,39 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
 
        fb = &omap_fb->base;
        omap_fb->format = format;
+       omap_fb->dss_format = dss_format;
        mutex_init(&omap_fb->lock);
 
-       for (i = 0; i < n; i++) {
-               struct plane *plane = &omap_fb->planes[i];
-               int size, pitch = mode_cmd->pitches[i];
-
-               if (pitch < (mode_cmd->width * format->planes[i].stride_bpp)) {
-                       dev_err(dev->dev, "provided buffer pitch is too small! %d < %d\n",
-                                       pitch, mode_cmd->width * format->planes[i].stride_bpp);
-                       ret = -EINVAL;
-                       goto fail;
-               }
+       /*
+        * The code below assumes that no format use more than two planes, and
+        * that the two planes of multiplane formats need the same number of
+        * bytes per pixel.
+        */
+       if (format->num_planes == 2 && pitch != mode_cmd->pitches[1]) {
+               dev_dbg(dev->dev, "pitches differ between planes 0 and 1\n");
+               ret = -EINVAL;
+               goto fail;
+       }
 
-               if (pitch % format->planes[i].stride_bpp != 0) {
-                       dev_err(dev->dev,
-                               "buffer pitch (%d bytes) is not a multiple of pixel size (%d bytes)\n",
-                               pitch, format->planes[i].stride_bpp);
-                       ret = -EINVAL;
-                       goto fail;
-               }
+       if (pitch % format->cpp[0]) {
+               dev_dbg(dev->dev,
+                       "buffer pitch (%u bytes) is not a multiple of pixel size (%u bytes)\n",
+                       pitch, format->cpp[0]);
+               ret = -EINVAL;
+               goto fail;
+       }
 
-               size = pitch * mode_cmd->height / format->planes[i].sub_y;
+       for (i = 0; i < format->num_planes; i++) {
+               struct plane *plane = &omap_fb->planes[i];
+               unsigned int vsub = i == 0 ? 1 : format->vsub;
+               unsigned int size;
 
-               if (size > (omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i])) {
-                       dev_err(dev->dev, "provided buffer object is too small! %d < %d\n",
-                                       bos[i]->size - mode_cmd->offsets[i], size);
-                       ret = -EINVAL;
-                       goto fail;
-               }
+               size = pitch * mode_cmd->height / vsub;
 
-               if (i > 0 && pitch != mode_cmd->pitches[i - 1]) {
-                       dev_err(dev->dev,
-                               "pitches are not the same between framebuffer planes %d != %d\n",
-                               pitch, mode_cmd->pitches[i - 1]);
+               if (size > omap_gem_mmap_size(bos[i]) - mode_cmd->offsets[i]) {
+                       dev_dbg(dev->dev,
+                               "provided buffer object is too small! %d < %d\n",
+                               bos[i]->size - mode_cmd->offsets[i], size);
                        ret = -EINVAL;
                        goto fail;
                }
index 60e1e8016708ec2f03fd09e8fb6e51e559b7b3a4..9adfa7c99695f320e4bf8e0063e2605d2f0717c6 100644 (file)
 
 #include "omap_drv.h"
 
-static DEFINE_SPINLOCK(list_lock);
-
-static void omap_irq_error_handler(struct omap_drm_irq *irq,
-               uint32_t irqstatus)
-{
-       DRM_ERROR("errors: %08x\n", irqstatus);
-}
+struct omap_irq_wait {
+       struct list_head node;
+       wait_queue_head_t wq;
+       uint32_t irqmask;
+       int count;
+};
 
-/* call with list_lock and dispc runtime held */
+/* call with wait_lock and dispc runtime held */
 static void omap_irq_update(struct drm_device *dev)
 {
        struct omap_drm_private *priv = dev->dev_private;
-       struct omap_drm_irq *irq;
-       uint32_t irqmask = priv->vblank_mask;
+       struct omap_irq_wait *wait;
+       uint32_t irqmask = priv->irq_mask;
 
-       assert_spin_locked(&list_lock);
+       assert_spin_locked(&priv->wait_lock);
 
-       list_for_each_entry(irq, &priv->irq_list, node)
-               irqmask |= irq->irqmask;
+       list_for_each_entry(wait, &priv->wait_list, node)
+               irqmask |= wait->irqmask;
 
        DBG("irqmask=%08x", irqmask);
 
@@ -45,90 +44,48 @@ static void omap_irq_update(struct drm_device *dev)
        dispc_read_irqenable();        /* flush posted write */
 }
 
-void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
-{
-       struct omap_drm_private *priv = dev->dev_private;
-       unsigned long flags;
-
-       spin_lock_irqsave(&list_lock, flags);
-
-       if (!WARN_ON(irq->registered)) {
-               irq->registered = true;
-               list_add(&irq->node, &priv->irq_list);
-               omap_irq_update(dev);
-       }
-
-       spin_unlock_irqrestore(&list_lock, flags);
-}
-
-void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
-{
-       dispc_runtime_get();
-
-       __omap_irq_register(dev, irq);
-
-       dispc_runtime_put();
-}
-
-void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
+static void omap_irq_wait_handler(struct omap_irq_wait *wait)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&list_lock, flags);
-
-       if (!WARN_ON(!irq->registered)) {
-               irq->registered = false;
-               list_del(&irq->node);
-               omap_irq_update(dev);
-       }
-
-       spin_unlock_irqrestore(&list_lock, flags);
-}
-
-void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
-{
-       dispc_runtime_get();
-
-       __omap_irq_unregister(dev, irq);
-
-       dispc_runtime_put();
-}
-
-struct omap_irq_wait {
-       struct omap_drm_irq irq;
-       int count;
-};
-
-static DECLARE_WAIT_QUEUE_HEAD(wait_event);
-
-static void wait_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
-{
-       struct omap_irq_wait *wait =
-                       container_of(irq, struct omap_irq_wait, irq);
        wait->count--;
-       wake_up_all(&wait_event);
+       wake_up(&wait->wq);
 }
 
 struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
                uint32_t irqmask, int count)
 {
+       struct omap_drm_private *priv = dev->dev_private;
        struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
-       wait->irq.irq = wait_irq;
-       wait->irq.irqmask = irqmask;
+       unsigned long flags;
+
+       init_waitqueue_head(&wait->wq);
+       wait->irqmask = irqmask;
        wait->count = count;
-       omap_irq_register(dev, &wait->irq);
+
+       spin_lock_irqsave(&priv->wait_lock, flags);
+       list_add(&wait->node, &priv->wait_list);
+       omap_irq_update(dev);
+       spin_unlock_irqrestore(&priv->wait_lock, flags);
+
        return wait;
 }
 
 int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
                unsigned long timeout)
 {
-       int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout);
-       omap_irq_unregister(dev, &wait->irq);
+       struct omap_drm_private *priv = dev->dev_private;
+       unsigned long flags;
+       int ret;
+
+       ret = wait_event_timeout(wait->wq, (wait->count <= 0), timeout);
+
+       spin_lock_irqsave(&priv->wait_lock, flags);
+       list_del(&wait->node);
+       omap_irq_update(dev);
+       spin_unlock_irqrestore(&priv->wait_lock, flags);
+
        kfree(wait);
-       if (ret == 0)
-               return -1;
-       return 0;
+
+       return ret == 0 ? -1 : 0;
 }
 
 /**
@@ -152,10 +109,10 @@ int omap_irq_enable_vblank(struct drm_device *dev, unsigned int pipe)
 
        DBG("dev=%p, crtc=%u", dev, pipe);
 
-       spin_lock_irqsave(&list_lock, flags);
-       priv->vblank_mask |= pipe2vbl(crtc);
+       spin_lock_irqsave(&priv->wait_lock, flags);
+       priv->irq_mask |= dispc_mgr_get_vsync_irq(omap_crtc_channel(crtc));
        omap_irq_update(dev);
-       spin_unlock_irqrestore(&list_lock, flags);
+       spin_unlock_irqrestore(&priv->wait_lock, flags);
 
        return 0;
 }
@@ -177,17 +134,66 @@ void omap_irq_disable_vblank(struct drm_device *dev, unsigned int pipe)
 
        DBG("dev=%p, crtc=%u", dev, pipe);
 
-       spin_lock_irqsave(&list_lock, flags);
-       priv->vblank_mask &= ~pipe2vbl(crtc);
+       spin_lock_irqsave(&priv->wait_lock, flags);
+       priv->irq_mask &= ~dispc_mgr_get_vsync_irq(omap_crtc_channel(crtc));
        omap_irq_update(dev);
-       spin_unlock_irqrestore(&list_lock, flags);
+       spin_unlock_irqrestore(&priv->wait_lock, flags);
+}
+
+static void omap_irq_fifo_underflow(struct omap_drm_private *priv,
+                                   u32 irqstatus)
+{
+       static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
+                                     DEFAULT_RATELIMIT_BURST);
+       static const struct {
+               const char *name;
+               u32 mask;
+       } sources[] = {
+               { "gfx", DISPC_IRQ_GFX_FIFO_UNDERFLOW },
+               { "vid1", DISPC_IRQ_VID1_FIFO_UNDERFLOW },
+               { "vid2", DISPC_IRQ_VID2_FIFO_UNDERFLOW },
+               { "vid3", DISPC_IRQ_VID3_FIFO_UNDERFLOW },
+       };
+
+       const u32 mask = DISPC_IRQ_GFX_FIFO_UNDERFLOW
+                      | DISPC_IRQ_VID1_FIFO_UNDERFLOW
+                      | DISPC_IRQ_VID2_FIFO_UNDERFLOW
+                      | DISPC_IRQ_VID3_FIFO_UNDERFLOW;
+       unsigned int i;
+
+       spin_lock(&priv->wait_lock);
+       irqstatus &= priv->irq_mask & mask;
+       spin_unlock(&priv->wait_lock);
+
+       if (!irqstatus)
+               return;
+
+       if (!__ratelimit(&_rs))
+               return;
+
+       DRM_ERROR("FIFO underflow on ");
+
+       for (i = 0; i < ARRAY_SIZE(sources); ++i) {
+               if (sources[i].mask & irqstatus)
+                       pr_cont("%s ", sources[i].name);
+       }
+
+       pr_cont("(0x%08x)\n", irqstatus);
+}
+
+static void omap_irq_ocp_error_handler(u32 irqstatus)
+{
+       if (!(irqstatus & DISPC_IRQ_OCP_ERR))
+               return;
+
+       DRM_ERROR("OCP error\n");
 }
 
 static irqreturn_t omap_irq_handler(int irq, void *arg)
 {
        struct drm_device *dev = (struct drm_device *) arg;
        struct omap_drm_private *priv = dev->dev_private;
-       struct omap_drm_irq *handler, *n;
+       struct omap_irq_wait *wait, *n;
        unsigned long flags;
        unsigned int id;
        u32 irqstatus;
@@ -200,24 +206,37 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
 
        for (id = 0; id < priv->num_crtcs; id++) {
                struct drm_crtc *crtc = priv->crtcs[id];
+               enum omap_channel channel = omap_crtc_channel(crtc);
 
-               if (irqstatus & pipe2vbl(crtc))
+               if (irqstatus & dispc_mgr_get_vsync_irq(channel)) {
                        drm_handle_vblank(dev, id);
+                       omap_crtc_vblank_irq(crtc);
+               }
+
+               if (irqstatus & dispc_mgr_get_sync_lost_irq(channel))
+                       omap_crtc_error_irq(crtc, irqstatus);
        }
 
-       spin_lock_irqsave(&list_lock, flags);
-       list_for_each_entry_safe(handler, n, &priv->irq_list, node) {
-               if (handler->irqmask & irqstatus) {
-                       spin_unlock_irqrestore(&list_lock, flags);
-                       handler->irq(handler, handler->irqmask & irqstatus);
-                       spin_lock_irqsave(&list_lock, flags);
-               }
+       omap_irq_ocp_error_handler(irqstatus);
+       omap_irq_fifo_underflow(priv, irqstatus);
+
+       spin_lock_irqsave(&priv->wait_lock, flags);
+       list_for_each_entry_safe(wait, n, &priv->wait_list, node) {
+               if (wait->irqmask & irqstatus)
+                       omap_irq_wait_handler(wait);
        }
-       spin_unlock_irqrestore(&list_lock, flags);
+       spin_unlock_irqrestore(&priv->wait_lock, flags);
 
        return IRQ_HANDLED;
 }
 
+static const u32 omap_underflow_irqs[] = {
+       [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
+       [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
+       [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
+       [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
+};
+
 /*
  * We need a special version, instead of just using drm_irq_install(),
  * because we need to register the irq via omapdss.  Once omapdss and
@@ -228,10 +247,25 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
 int omap_drm_irq_install(struct drm_device *dev)
 {
        struct omap_drm_private *priv = dev->dev_private;
-       struct omap_drm_irq *error_handler = &priv->error_handler;
+       unsigned int num_mgrs = dss_feat_get_num_mgrs();
+       unsigned int max_planes;
+       unsigned int i;
        int ret;
 
-       INIT_LIST_HEAD(&priv->irq_list);
+       spin_lock_init(&priv->wait_lock);
+       INIT_LIST_HEAD(&priv->wait_list);
+
+       priv->irq_mask = DISPC_IRQ_OCP_ERR;
+
+       max_planes = min(ARRAY_SIZE(priv->planes),
+                        ARRAY_SIZE(omap_underflow_irqs));
+       for (i = 0; i < max_planes; ++i) {
+               if (priv->planes[i])
+                       priv->irq_mask |= omap_underflow_irqs[i];
+       }
+
+       for (i = 0; i < num_mgrs; ++i)
+               priv->irq_mask |= dispc_mgr_get_sync_lost_irq(i);
 
        dispc_runtime_get();
        dispc_clear_irqstatus(0xffffffff);
@@ -241,16 +275,6 @@ int omap_drm_irq_install(struct drm_device *dev)
        if (ret < 0)
                return ret;
 
-       error_handler->irq = omap_irq_error_handler;
-       error_handler->irqmask = DISPC_IRQ_OCP_ERR;
-
-       /* for now ignore DISPC_IRQ_SYNC_LOST_DIGIT.. really I think
-        * we just need to ignore it while enabling tv-out
-        */
-       error_handler->irqmask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
-
-       omap_irq_register(dev, error_handler);
-
        dev->irq_enabled = true;
 
        return 0;
index 82b2c23d67692d5bddd1d1bf392da385f89a8a5d..386d90af70f7bf06b871bfd00ff5928dd40d345c 100644 (file)
@@ -43,8 +43,6 @@ struct omap_plane {
 
        uint32_t nformats;
        uint32_t formats[32];
-
-       struct omap_drm_irq error_irq;
 };
 
 struct omap_plane_state {
@@ -204,8 +202,6 @@ static void omap_plane_destroy(struct drm_plane *plane)
 
        DBG("%s", omap_plane->name);
 
-       omap_irq_unregister(plane->dev, &omap_plane->error_irq);
-
        drm_plane_cleanup(plane);
 
        kfree(omap_plane);
@@ -332,14 +328,6 @@ static const struct drm_plane_funcs omap_plane_funcs = {
        .atomic_get_property = omap_plane_atomic_get_property,
 };
 
-static void omap_plane_error_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
-{
-       struct omap_plane *omap_plane =
-                       container_of(irq, struct omap_plane, error_irq);
-       DRM_ERROR_RATELIMITED("%s: errors: %08x\n", omap_plane->name,
-               irqstatus);
-}
-
 static const char *plane_names[] = {
        [OMAP_DSS_GFX] = "gfx",
        [OMAP_DSS_VIDEO1] = "vid1",
@@ -347,13 +335,6 @@ static const char *plane_names[] = {
        [OMAP_DSS_VIDEO3] = "vid3",
 };
 
-static const uint32_t error_irqs[] = {
-       [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
-       [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
-       [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
-       [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
-};
-
 /* initialize plane */
 struct drm_plane *omap_plane_init(struct drm_device *dev,
                int id, enum drm_plane_type type,
@@ -377,10 +358,6 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
 
        plane = &omap_plane->base;
 
-       omap_plane->error_irq.irqmask = error_irqs[id];
-       omap_plane->error_irq.irq = omap_plane_error_irq;
-       omap_irq_register(dev, &omap_plane->error_irq);
-
        ret = drm_universal_plane_init(dev, plane, possible_crtcs,
                                       &omap_plane_funcs, omap_plane->formats,
                                       omap_plane->nformats, type, NULL);
@@ -394,7 +371,6 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
        return plane;
 
 error:
-       omap_irq_unregister(plane->dev, &omap_plane->error_irq);
        kfree(omap_plane);
        return NULL;
 }
index ad4d7b8b832271a8e28a1b59dc496fa19049ed34..414776811e71e0a4e9f7c97309b6c173c8f502a3 100644 (file)
@@ -50,7 +50,6 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin");
 MODULE_FIRMWARE("radeon/tahiti_mc.bin");
 MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
 MODULE_FIRMWARE("radeon/tahiti_smc.bin");
-MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
 
 MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
 MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
@@ -115,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin");
 MODULE_FIRMWARE("radeon/hainan_rlc.bin");
 MODULE_FIRMWARE("radeon/hainan_smc.bin");
 MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+
+MODULE_FIRMWARE("radeon/si58_mc.bin");
 
 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
 static void si_pcie_gen3_enable(struct radeon_device *rdev);
@@ -1651,15 +1653,14 @@ static int si_init_microcode(struct radeon_device *rdev)
        int err;
        int new_fw = 0;
        bool new_smc = false;
+       bool si58_fw = false;
+       bool banks2_fw = false;
 
        DRM_DEBUG("\n");
 
        switch (rdev->family) {
        case CHIP_TAHITI:
                chip_name = "TAHITI";
-               /* XXX: figure out which Tahitis need the new ucode */
-               if (0)
-                       new_smc = true;
                new_chip_name = "tahiti";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1671,12 +1672,9 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_PITCAIRN:
                chip_name = "PITCAIRN";
-               if ((rdev->pdev->revision == 0x81) ||
-                   (rdev->pdev->device == 0x6810) ||
-                   (rdev->pdev->device == 0x6811) ||
-                   (rdev->pdev->device == 0x6816) ||
-                   (rdev->pdev->device == 0x6817) ||
-                   (rdev->pdev->device == 0x6806))
+               if ((rdev->pdev->revision == 0x81) &&
+                   ((rdev->pdev->device == 0x6810) ||
+                    (rdev->pdev->device == 0x6811)))
                        new_smc = true;
                new_chip_name = "pitcairn";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1689,15 +1687,15 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_VERDE:
                chip_name = "VERDE";
-               if ((rdev->pdev->revision == 0x81) ||
-                   (rdev->pdev->revision == 0x83) ||
-                   (rdev->pdev->revision == 0x87) ||
-                   (rdev->pdev->device == 0x6820) ||
-                   (rdev->pdev->device == 0x6821) ||
-                   (rdev->pdev->device == 0x6822) ||
-                   (rdev->pdev->device == 0x6823) ||
-                   (rdev->pdev->device == 0x682A) ||
-                   (rdev->pdev->device == 0x682B))
+               if (((rdev->pdev->device == 0x6820) &&
+                    ((rdev->pdev->revision == 0x81) ||
+                     (rdev->pdev->revision == 0x83))) ||
+                   ((rdev->pdev->device == 0x6821) &&
+                    ((rdev->pdev->revision == 0x83) ||
+                     (rdev->pdev->revision == 0x87))) ||
+                   ((rdev->pdev->revision == 0x87) &&
+                    ((rdev->pdev->device == 0x6823) ||
+                     (rdev->pdev->device == 0x682b))))
                        new_smc = true;
                new_chip_name = "verde";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1710,13 +1708,13 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_OLAND:
                chip_name = "OLAND";
-               if ((rdev->pdev->revision == 0xC7) ||
-                   (rdev->pdev->revision == 0x80) ||
-                   (rdev->pdev->revision == 0x81) ||
-                   (rdev->pdev->revision == 0x83) ||
-                   (rdev->pdev->revision == 0x87) ||
-                   (rdev->pdev->device == 0x6604) ||
-                   (rdev->pdev->device == 0x6605))
+               if (((rdev->pdev->revision == 0x81) &&
+                    ((rdev->pdev->device == 0x6600) ||
+                     (rdev->pdev->device == 0x6604) ||
+                     (rdev->pdev->device == 0x6605) ||
+                     (rdev->pdev->device == 0x6610))) ||
+                   ((rdev->pdev->revision == 0x83) &&
+                    (rdev->pdev->device == 0x6610)))
                        new_smc = true;
                new_chip_name = "oland";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1728,13 +1726,17 @@ static int si_init_microcode(struct radeon_device *rdev)
                break;
        case CHIP_HAINAN:
                chip_name = "HAINAN";
-               if ((rdev->pdev->revision == 0x81) ||
-                   (rdev->pdev->revision == 0x83) ||
-                   (rdev->pdev->revision == 0xC3) ||
-                   (rdev->pdev->device == 0x6664) ||
-                   (rdev->pdev->device == 0x6665) ||
-                   (rdev->pdev->device == 0x6667))
+               if (((rdev->pdev->revision == 0x81) &&
+                    (rdev->pdev->device == 0x6660)) ||
+                   ((rdev->pdev->revision == 0x83) &&
+                    ((rdev->pdev->device == 0x6660) ||
+                     (rdev->pdev->device == 0x6663) ||
+                     (rdev->pdev->device == 0x6665) ||
+                     (rdev->pdev->device == 0x6667))))
                        new_smc = true;
+               else if ((rdev->pdev->revision == 0xc3) &&
+                        (rdev->pdev->device == 0x6665))
+                       banks2_fw = true;
                new_chip_name = "hainan";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1746,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev)
        default: BUG();
        }
 
+       /* this memory configuration requires special firmware */
+       if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+               si58_fw = true;
+
        DRM_INFO("Loading %s Microcode\n", new_chip_name);
 
        snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
@@ -1849,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev)
                }
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
+       if (si58_fw)
+               snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+       else
+               snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
        err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
        if (err) {
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
@@ -1880,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev)
                }
        }
 
-       if (new_smc)
+       if (banks2_fw)
+               snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
+       else if (new_smc)
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
        else
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
index 8b5e697f2549ee3f81dff2cb8cc8a05e7543c856..2944916f7102ae0395d11157f366c199d398076d 100644 (file)
@@ -3008,30 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                    (rdev->pdev->device == 0x6817) ||
                    (rdev->pdev->device == 0x6806))
                        max_mclk = 120000;
-       } else if (rdev->family == CHIP_VERDE) {
-               if ((rdev->pdev->revision == 0x81) ||
-                   (rdev->pdev->revision == 0x83) ||
-                   (rdev->pdev->revision == 0x87) ||
-                   (rdev->pdev->device == 0x6820) ||
-                   (rdev->pdev->device == 0x6821) ||
-                   (rdev->pdev->device == 0x6822) ||
-                   (rdev->pdev->device == 0x6823) ||
-                   (rdev->pdev->device == 0x682A) ||
-                   (rdev->pdev->device == 0x682B)) {
-                       max_sclk = 75000;
-                       max_mclk = 80000;
-               }
-       } else if (rdev->family == CHIP_OLAND) {
-               if ((rdev->pdev->revision == 0xC7) ||
-                   (rdev->pdev->revision == 0x80) ||
-                   (rdev->pdev->revision == 0x81) ||
-                   (rdev->pdev->revision == 0x83) ||
-                   (rdev->pdev->revision == 0x87) ||
-                   (rdev->pdev->device == 0x6604) ||
-                   (rdev->pdev->device == 0x6605)) {
-                       max_sclk = 75000;
-                       max_mclk = 80000;
-               }
        } else if (rdev->family == CHIP_HAINAN) {
                if ((rdev->pdev->revision == 0x81) ||
                    (rdev->pdev->revision == 0x83) ||
@@ -3040,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                    (rdev->pdev->device == 0x6665) ||
                    (rdev->pdev->device == 0x6667)) {
                        max_sclk = 75000;
-                       max_mclk = 80000;
                }
        }
        /* Apply dpm quirks */
index d20f7c0b4eac62488674b46ffa8cebf80fdc7f62..c35db12435c323f7444d4f78c29e9d8497437406 100644 (file)
@@ -13,7 +13,6 @@ sti-drm-y := \
        sti_dvo.o \
        sti_awg_utils.o \
        sti_vtg.o \
-       sti_vtac.o \
        sti_hda.o \
        sti_tvout.o \
        sti_hqvdp.o \
index d3db22488f96c6bd8a80832e1566a317064d7b7b..acc056644cd01f56f7ac9b9fefeebb10849b9f60 100644 (file)
@@ -217,19 +217,7 @@ static void sti_output_poll_changed(struct drm_device *ddev)
 {
        struct sti_private *private = ddev->dev_private;
 
-       if (!ddev->mode_config.num_connector)
-               return;
-
-       if (private->fbdev) {
-               drm_fbdev_cma_hotplug_event(private->fbdev);
-               return;
-       }
-
-       private->fbdev = drm_fbdev_cma_init(ddev, 32,
-                                           ddev->mode_config.num_crtc,
-                                           ddev->mode_config.num_connector);
-       if (IS_ERR(private->fbdev))
-               private->fbdev = NULL;
+       drm_fbdev_cma_hotplug_event(private->fbdev);
 }
 
 static const struct drm_mode_config_funcs sti_mode_config_funcs = {
@@ -346,6 +334,8 @@ static void sti_cleanup(struct drm_device *ddev)
 static int sti_bind(struct device *dev)
 {
        struct drm_device *ddev;
+       struct sti_private *private;
+       struct drm_fbdev_cma *fbdev;
        int ret;
 
        ddev = drm_dev_alloc(&sti_driver, dev);
@@ -368,6 +358,17 @@ static int sti_bind(struct device *dev)
 
        drm_mode_config_reset(ddev);
 
+       private = ddev->dev_private;
+       if (ddev->mode_config.num_connector) {
+               fbdev = drm_fbdev_cma_init(ddev, 32, ddev->mode_config.num_crtc,
+                                          ddev->mode_config.num_connector);
+               if (IS_ERR(fbdev)) {
+                       DRM_DEBUG_DRIVER("Warning: fails to create fbdev\n");
+                       fbdev = NULL;
+               }
+               private->fbdev = fbdev;
+       }
+
        return 0;
 
 err_register:
@@ -440,7 +441,6 @@ static struct platform_driver sti_platform_driver = {
 
 static struct platform_driver * const drivers[] = {
        &sti_tvout_driver,
-       &sti_vtac_driver,
        &sti_hqvdp_driver,
        &sti_hdmi_driver,
        &sti_hda_driver,
index 78ebe5e30f53ca782c6928266f08989a9fcdac76..4c75845cc9ab53750da814ee21540ba01f8ce547 100644 (file)
@@ -34,7 +34,6 @@ struct sti_private {
 };
 
 extern struct platform_driver sti_tvout_driver;
-extern struct platform_driver sti_vtac_driver;
 extern struct platform_driver sti_hqvdp_driver;
 extern struct platform_driver sti_hdmi_driver;
 extern struct platform_driver sti_hda_driver;
index 4a8bd620b90c51322b2491c187b7d440e0a22e1b..c9151849d604f89820058f344198b2a0b2e8904f 100644 (file)
@@ -781,6 +781,95 @@ static void sti_hdmi_disable(struct drm_bridge *bridge)
        hdmi->enabled = false;
 }
 
+/**
+ * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent
+ * clocks. None-coherent clocks means that audio and TMDS clocks have not the
+ * same source (drifts between clocks). In this case assumption is that CTS is
+ * automatically calculated by hardware.
+ *
+ * @audio_fs: audio frame clock frequency in Hz
+ *
+ * Values computed are based on table described in HDMI specification 1.4b
+ *
+ * Returns n value.
+ */
+static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs)
+{
+       unsigned int n;
+
+       switch (audio_fs) {
+       case 32000:
+               n = 4096;
+               break;
+       case 44100:
+               n = 6272;
+               break;
+       case 48000:
+               n = 6144;
+               break;
+       case 88200:
+               n = 6272 * 2;
+               break;
+       case 96000:
+               n = 6144 * 2;
+               break;
+       case 176400:
+               n = 6272 * 4;
+               break;
+       case 192000:
+               n = 6144 * 4;
+               break;
+       default:
+               /* Not pre-defined, recommended value: 128 * fs / 1000 */
+               n = (audio_fs * 128) / 1000;
+       }
+
+       return n;
+}
+
+static int hdmi_audio_configure(struct sti_hdmi *hdmi)
+{
+       int audio_cfg, n;
+       struct hdmi_audio_params *params = &hdmi->audio;
+       struct hdmi_audio_infoframe *info = &params->cea;
+
+       DRM_DEBUG_DRIVER("\n");
+
+       if (!hdmi->enabled)
+               return 0;
+
+       /* update N parameter */
+       n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate);
+
+       DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n",
+                        params->sample_rate, hdmi->mode.clock * 1000, n);
+       hdmi_write(hdmi, n, HDMI_AUDN);
+
+       /* update HDMI registers according to configuration */
+       audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
+                   HDMI_AUD_CFG_ONE_BIT_INVALID;
+
+       switch (info->channels) {
+       case 8:
+               audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
+       case 6:
+               audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
+       case 4:
+               audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
+       case 2:
+               audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
+               break;
+       default:
+               DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n",
+                         info->channels);
+               return -EINVAL;
+       }
+
+       hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
+
+       return hdmi_audio_infoframe_config(hdmi);
+}
+
 static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
 {
        struct sti_hdmi *hdmi = bridge->driver_private;
@@ -819,9 +908,12 @@ static void sti_hdmi_pre_enable(struct drm_bridge *bridge)
        if (hdmi_avi_infoframe_config(hdmi))
                DRM_ERROR("Unable to configure AVI infoframe\n");
 
-       /* Program AUDIO infoframe */
-       if (hdmi_audio_infoframe_config(hdmi))
-               DRM_ERROR("Unable to configure AUDIO infoframe\n");
+       if (hdmi->audio.enabled) {
+               if (hdmi_audio_configure(hdmi))
+                       DRM_ERROR("Unable to configure audio\n");
+       } else {
+               hdmi_audio_infoframe_config(hdmi);
+       }
 
        /* Program VS infoframe */
        if (hdmi_vendor_infoframe_config(hdmi))
@@ -1071,97 +1163,6 @@ static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
        return NULL;
 }
 
-/**
- * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent
- * clocks. None-coherent clocks means that audio and TMDS clocks have not the
- * same source (drifts between clocks). In this case assumption is that CTS is
- * automatically calculated by hardware.
- *
- * @audio_fs: audio frame clock frequency in Hz
- *
- * Values computed are based on table described in HDMI specification 1.4b
- *
- * Returns n value.
- */
-static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs)
-{
-       unsigned int n;
-
-       switch (audio_fs) {
-       case 32000:
-               n = 4096;
-               break;
-       case 44100:
-               n = 6272;
-               break;
-       case 48000:
-               n = 6144;
-               break;
-       case 88200:
-               n = 6272 * 2;
-               break;
-       case 96000:
-               n = 6144 * 2;
-               break;
-       case 176400:
-               n = 6272 * 4;
-               break;
-       case 192000:
-               n = 6144 * 4;
-               break;
-       default:
-               /* Not pre-defined, recommended value: 128 * fs / 1000 */
-               n = (audio_fs * 128) / 1000;
-       }
-
-       return n;
-}
-
-static int hdmi_audio_configure(struct sti_hdmi *hdmi,
-                               struct hdmi_audio_params *params)
-{
-       int audio_cfg, n;
-       struct hdmi_audio_infoframe *info = &params->cea;
-
-       DRM_DEBUG_DRIVER("\n");
-
-       if (!hdmi->enabled)
-               return 0;
-
-       /* update N parameter */
-       n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate);
-
-       DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n",
-                        params->sample_rate, hdmi->mode.clock * 1000, n);
-       hdmi_write(hdmi, n, HDMI_AUDN);
-
-       /* update HDMI registers according to configuration */
-       audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
-                   HDMI_AUD_CFG_ONE_BIT_INVALID;
-
-       switch (info->channels) {
-       case 8:
-               audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
-       case 6:
-               audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
-       case 4:
-               audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
-       case 2:
-               audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
-               break;
-       default:
-               DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n",
-                         info->channels);
-               return -EINVAL;
-       }
-
-       hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
-
-       hdmi->audio = *params;
-
-       return hdmi_audio_infoframe_config(hdmi);
-}
-
 static void hdmi_audio_shutdown(struct device *dev, void *data)
 {
        struct sti_hdmi *hdmi = dev_get_drvdata(dev);
@@ -1185,17 +1186,9 @@ static int hdmi_audio_hw_params(struct device *dev,
 {
        struct sti_hdmi *hdmi = dev_get_drvdata(dev);
        int ret;
-       struct hdmi_audio_params audio = {
-               .sample_width = params->sample_width,
-               .sample_rate = params->sample_rate,
-               .cea = params->cea,
-       };
 
        DRM_DEBUG_DRIVER("\n");
 
-       if (!hdmi->enabled)
-               return 0;
-
        if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv ||
            daifmt->frame_clk_inv || daifmt->bit_clk_master ||
            daifmt->frame_clk_master) {
@@ -1206,9 +1199,13 @@ static int hdmi_audio_hw_params(struct device *dev,
                return -EINVAL;
        }
 
-       audio.enabled = true;
+       hdmi->audio.sample_width = params->sample_width;
+       hdmi->audio.sample_rate = params->sample_rate;
+       hdmi->audio.cea = params->cea;
+
+       hdmi->audio.enabled = true;
 
-       ret = hdmi_audio_configure(hdmi, &audio);
+       ret = hdmi_audio_configure(hdmi);
        if (ret < 0)
                return ret;
 
index becf10d255c49634dca6cb95fb6c188fc33d69f5..4376fd8a8e529b17ecddffecb21fb6ce9ccf01f0 100644 (file)
@@ -332,6 +332,7 @@ struct sti_hqvdp_cmd {
  * @hqvdp_cmd_paddr:   physical address of hqvdp_cmd
  * @vtg:               vtg for main data path
  * @xp70_initialized:  true if xp70 is already initialized
+ * @vtg_registered:    true if registered to VTG
  */
 struct sti_hqvdp {
        struct device *dev;
@@ -347,6 +348,7 @@ struct sti_hqvdp {
        u32 hqvdp_cmd_paddr;
        struct sti_vtg *vtg;
        bool xp70_initialized;
+       bool vtg_registered;
 };
 
 #define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, plane)
@@ -771,7 +773,7 @@ static void sti_hqvdp_disable(struct sti_hqvdp *hqvdp)
                DRM_ERROR("XP70 could not revert to idle\n");
 
        hqvdp->plane.status = STI_PLANE_DISABLED;
-       hqvdp->xp70_initialized = false;
+       hqvdp->vtg_registered = false;
 }
 
 /**
@@ -1064,10 +1066,11 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
                return -EINVAL;
        }
 
-       if (!hqvdp->xp70_initialized) {
+       if (!hqvdp->xp70_initialized)
                /* Start HQVDP XP70 coprocessor */
                sti_hqvdp_start_xp70(hqvdp);
 
+       if (!hqvdp->vtg_registered) {
                /* Prevent VTG shutdown */
                if (clk_prepare_enable(hqvdp->clk_pix_main)) {
                        DRM_ERROR("Failed to prepare/enable pix main clk\n");
@@ -1081,6 +1084,7 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
                        DRM_ERROR("Cannot register VTG notifier\n");
                        return -EINVAL;
                }
+               hqvdp->vtg_registered = true;
        }
 
        DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
@@ -1113,6 +1117,21 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
        if (!crtc || !fb)
                return;
 
+       if ((oldstate->fb == state->fb) &&
+           (oldstate->crtc_x == state->crtc_x) &&
+           (oldstate->crtc_y == state->crtc_y) &&
+           (oldstate->crtc_w == state->crtc_w) &&
+           (oldstate->crtc_h == state->crtc_h) &&
+           (oldstate->src_x == state->src_x) &&
+           (oldstate->src_y == state->src_y) &&
+           (oldstate->src_w == state->src_w) &&
+           (oldstate->src_h == state->src_h)) {
+               /* No change since last update, do not post cmd */
+               DRM_DEBUG_DRIVER("No change, not posting cmd\n");
+               plane->status = STI_PLANE_UPDATED;
+               return;
+       }
+
        mode = &crtc->mode;
        dst_x = state->crtc_x;
        dst_y = state->crtc_y;
index ca4b3719a64a05699eaf85fffbfcb98b379d3af0..427d8f58c6b106164de2f3e8074a798613c71f24 100644 (file)
@@ -65,9 +65,18 @@ void sti_plane_update_fps(struct sti_plane *plane,
 
        fps->last_timestamp = now;
        fps->last_frame_counter = fps->curr_frame_counter;
-       fpks = (num_frames * 1000000) / ms_since_last;
-       snprintf(plane->fps_info.fps_str, FPS_LENGTH, "%-6s @ %d.%.3d fps",
-                sti_plane_to_str(plane), fpks / 1000, fpks % 1000);
+
+       if (plane->drm_plane.fb) {
+               fpks = (num_frames * 1000000) / ms_since_last;
+               snprintf(plane->fps_info.fps_str, FPS_LENGTH,
+                        "%-8s %4dx%-4d %.4s @ %3d.%-3.3d fps (%s)",
+                        plane->drm_plane.name,
+                        plane->drm_plane.fb->width,
+                        plane->drm_plane.fb->height,
+                        (char *)&plane->drm_plane.fb->format->format,
+                        fpks / 1000, fpks % 1000,
+                        sti_plane_to_str(plane));
+       }
 
        if (fps->curr_field_counter) {
                /* Compute number of field updates */
@@ -75,7 +84,7 @@ void sti_plane_update_fps(struct sti_plane *plane,
                fps->last_field_counter = fps->curr_field_counter;
                fipks = (num_fields * 1000000) / ms_since_last;
                snprintf(plane->fps_info.fips_str,
-                        FPS_LENGTH, " - %d.%.3d field/sec",
+                        FPS_LENGTH, " - %3d.%-3.3d field/sec",
                         fipks / 1000, fipks % 1000);
        } else {
                plane->fps_info.fips_str[0] = '\0';
index ce3e8d6c88bbf7b13748be464a78d4a98bbdcbb3..c36c13faaa18f784b09ee1fc655762c5a932fb29 100644 (file)
@@ -48,7 +48,7 @@ enum sti_plane_status {
        STI_PLANE_DISABLED,
 };
 
-#define FPS_LENGTH 64
+#define FPS_LENGTH 128
 struct sti_fps_info {
        bool output;
        unsigned int curr_frame_counter;
diff --git a/drivers/gpu/drm/sti/sti_vtac.c b/drivers/gpu/drm/sti/sti_vtac.c
deleted file mode 100644 (file)
index cf7fe8a..0000000
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Copyright (C) STMicroelectronics SA 2014
- * Author: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics.
- * License terms:  GNU General Public License (GPL), version 2
- */
-
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/platform_device.h>
-
-#include <drm/drmP.h>
-
-#include "sti_drv.h"
-
-/* registers offset */
-#define VTAC_CONFIG                     0x00
-#define VTAC_RX_FIFO_CONFIG             0x04
-#define VTAC_FIFO_CONFIG_VAL            0x04
-
-#define VTAC_SYS_CFG8521                0x824
-#define VTAC_SYS_CFG8522                0x828
-
-/* Number of phyts per pixel */
-#define VTAC_2_5_PPP                    0x0005
-#define VTAC_3_PPP                      0x0006
-#define VTAC_4_PPP                      0x0008
-#define VTAC_5_PPP                      0x000A
-#define VTAC_6_PPP                      0x000C
-#define VTAC_13_PPP                     0x001A
-#define VTAC_14_PPP                     0x001C
-#define VTAC_15_PPP                     0x001E
-#define VTAC_16_PPP                     0x0020
-#define VTAC_17_PPP                     0x0022
-#define VTAC_18_PPP                     0x0024
-
-/* enable bits */
-#define VTAC_ENABLE                     0x3003
-
-#define VTAC_TX_PHY_ENABLE_CLK_PHY      BIT(0)
-#define VTAC_TX_PHY_ENABLE_CLK_DLL      BIT(1)
-#define VTAC_TX_PHY_PLL_NOT_OSC_MODE    BIT(3)
-#define VTAC_TX_PHY_RST_N_DLL_SWITCH    BIT(4)
-#define VTAC_TX_PHY_PROG_N3             BIT(9)
-
-
-/**
- * VTAC mode structure
- *
- * @vid_in_width: Video Data Resolution
- * @phyts_width: Width of phyt buses(phyt low and phyt high).
- * @phyts_per_pixel: Number of phyts sent per pixel
- */
-struct sti_vtac_mode {
-       u32 vid_in_width;
-       u32 phyts_width;
-       u32 phyts_per_pixel;
-};
-
-static const struct sti_vtac_mode vtac_mode_main = {
-       .vid_in_width = 0x2,
-       .phyts_width = 0x2,
-       .phyts_per_pixel = VTAC_5_PPP,
-};
-static const struct sti_vtac_mode vtac_mode_aux = {
-       .vid_in_width = 0x1,
-       .phyts_width = 0x0,
-       .phyts_per_pixel = VTAC_17_PPP,
-};
-
-/**
- * VTAC structure
- *
- * @dev: pointer to device structure
- * @regs: ioremapped registers for RX and TX devices
- * @phy_regs: phy registers for TX device
- * @clk: clock
- * @mode: main or auxillary configuration mode
- */
-struct sti_vtac {
-       struct device *dev;
-       void __iomem *regs;
-       void __iomem *phy_regs;
-       struct clk *clk;
-       const struct sti_vtac_mode *mode;
-};
-
-static void sti_vtac_rx_set_config(struct sti_vtac *vtac)
-{
-       u32 config;
-
-       /* Enable VTAC clock */
-       if (clk_prepare_enable(vtac->clk))
-               DRM_ERROR("Failed to prepare/enable vtac_rx clock.\n");
-
-       writel(VTAC_FIFO_CONFIG_VAL, vtac->regs + VTAC_RX_FIFO_CONFIG);
-
-       config = VTAC_ENABLE;
-       config |= vtac->mode->vid_in_width << 4;
-       config |= vtac->mode->phyts_width << 16;
-       config |= vtac->mode->phyts_per_pixel << 23;
-       writel(config, vtac->regs + VTAC_CONFIG);
-}
-
-static void sti_vtac_tx_set_config(struct sti_vtac *vtac)
-{
-       u32 phy_config;
-       u32 config;
-
-       /* Enable VTAC clock */
-       if (clk_prepare_enable(vtac->clk))
-               DRM_ERROR("Failed to prepare/enable vtac_tx clock.\n");
-
-       /* Configure vtac phy */
-       phy_config = 0x00000000;
-       writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8522);
-       phy_config = VTAC_TX_PHY_ENABLE_CLK_PHY;
-       writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
-       phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
-       phy_config |= VTAC_TX_PHY_PROG_N3;
-       writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
-       phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
-       phy_config |= VTAC_TX_PHY_ENABLE_CLK_DLL;
-       writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
-       phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
-       phy_config |= VTAC_TX_PHY_RST_N_DLL_SWITCH;
-       writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
-       phy_config = readl(vtac->phy_regs + VTAC_SYS_CFG8521);
-       phy_config |= VTAC_TX_PHY_PLL_NOT_OSC_MODE;
-       writel(phy_config, vtac->phy_regs + VTAC_SYS_CFG8521);
-
-       /* Configure vtac tx */
-       config = VTAC_ENABLE;
-       config |= vtac->mode->vid_in_width << 4;
-       config |= vtac->mode->phyts_width << 16;
-       config |= vtac->mode->phyts_per_pixel << 23;
-       writel(config, vtac->regs + VTAC_CONFIG);
-}
-
-static const struct of_device_id vtac_of_match[] = {
-       {
-               .compatible = "st,vtac-main",
-               .data = &vtac_mode_main,
-       }, {
-               .compatible = "st,vtac-aux",
-               .data = &vtac_mode_aux,
-       }, {
-               /* end node */
-       }
-};
-MODULE_DEVICE_TABLE(of, vtac_of_match);
-
-static int sti_vtac_probe(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct device_node *np = dev->of_node;
-       const struct of_device_id *id;
-       struct sti_vtac *vtac;
-       struct resource *res;
-
-       vtac = devm_kzalloc(dev, sizeof(*vtac), GFP_KERNEL);
-       if (!vtac)
-               return -ENOMEM;
-
-       vtac->dev = dev;
-
-       id = of_match_node(vtac_of_match, np);
-       if (!id)
-               return -ENOMEM;
-
-       vtac->mode = id->data;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               DRM_ERROR("Invalid resource\n");
-               return -ENOMEM;
-       }
-       vtac->regs = devm_ioremap_resource(dev, res);
-       if (IS_ERR(vtac->regs))
-               return PTR_ERR(vtac->regs);
-
-
-       vtac->clk = devm_clk_get(dev, "vtac");
-       if (IS_ERR(vtac->clk)) {
-               DRM_ERROR("Cannot get vtac clock\n");
-               return PTR_ERR(vtac->clk);
-       }
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (res) {
-               vtac->phy_regs = devm_ioremap_nocache(dev, res->start,
-                                                resource_size(res));
-               sti_vtac_tx_set_config(vtac);
-       } else {
-
-               sti_vtac_rx_set_config(vtac);
-       }
-
-       platform_set_drvdata(pdev, vtac);
-       DRM_INFO("%s %s\n", __func__, dev_name(vtac->dev));
-
-       return 0;
-}
-
-static int sti_vtac_remove(struct platform_device *pdev)
-{
-       return 0;
-}
-
-struct platform_driver sti_vtac_driver = {
-       .driver = {
-               .name = "sti-vtac",
-               .owner = THIS_MODULE,
-               .of_match_table = vtac_of_match,
-       },
-       .probe = sti_vtac_probe,
-       .remove = sti_vtac_remove,
-};
-
-MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
-MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
-MODULE_LICENSE("GPL");
index a8882bdd0f8badf46d0921710dc359cee2f88cde..c3d9c8ae14afeb66b998a59a37b1bdbb60bb87d8 100644 (file)
@@ -429,6 +429,10 @@ static int vtg_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
        vtg->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
+       if (!vtg->regs) {
+               DRM_ERROR("failed to remap I/O memory\n");
+               return -ENOMEM;
+       }
 
        np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0);
        if (np) {
index 3a763f7cb7436b9303fdf5c88e1c3711a885a226..f80bf9385e412db766424bf00cacd76458a64a8e 100644 (file)
@@ -856,7 +856,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
        struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
        struct drm_device *dev = crtc->dev;
        struct tilcdc_drm_private *priv = dev->dev_private;
-       uint32_t stat;
+       uint32_t stat, reg;
 
        stat = tilcdc_read_irqstatus(dev);
        tilcdc_clear_irqstatus(dev, stat);
@@ -921,17 +921,26 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
                dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
                                    __func__, stat);
                tilcdc_crtc->frame_intact = false;
-               if (tilcdc_crtc->sync_lost_count++ >
-                   SYNC_LOST_COUNT_LIMIT) {
-                       dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, recovering", __func__, stat);
-                       queue_work(system_wq, &tilcdc_crtc->recover_work);
-                       if (priv->rev == 1)
+               if (priv->rev == 1) {
+                       reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG);
+                       if (reg & LCDC_RASTER_ENABLE) {
                                tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
-                                            LCDC_V1_SYNC_LOST_INT_ENA);
-                       else
+                                            LCDC_RASTER_ENABLE);
+                               tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
+                                          LCDC_RASTER_ENABLE);
+                       }
+               } else {
+                       if (tilcdc_crtc->sync_lost_count++ >
+                           SYNC_LOST_COUNT_LIMIT) {
+                               dev_err(dev->dev,
+                                       "%s(0x%08x): Sync lost flood detected, recovering",
+                                       __func__, stat);
+                               queue_work(system_wq,
+                                          &tilcdc_crtc->recover_work);
                                tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
                                             LCDC_SYNC_LOST);
-                       tilcdc_crtc->sync_lost_count = 0;
+                               tilcdc_crtc->sync_lost_count = 0;
+                       }
                }
        }
 
index 61254b991265e5af488eac6717ad87e3c04eb6e3..24f99fc9d8a47a2fae3c29cef5de1f5e9861c674 100644 (file)
@@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
        info->fbops = &virtio_gpufb_ops;
        info->pixmap.flags = FB_PIXMAP_SYSTEM;
 
-       info->screen_base = obj->vmap;
+       info->screen_buffer = obj->vmap;
        info->screen_size = obj->gem_base.size;
        drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
        drm_fb_helper_fill_var(info, &vfbdev->helper,
index 4065b2840f1ce1018d78ace0f5274dc957d5548b..5b36421ef3e5590ac99e10d993d64e53f954d55a 100644 (file)
@@ -4,5 +4,7 @@ config DRM_ZTE
        select DRM_KMS_CMA_HELPER
        select DRM_KMS_FB_HELPER
        select DRM_KMS_HELPER
+       select SND_SOC_HDMI_CODEC if SND_SOC
+       select VIDEOMODE_HELPERS
        help
          Choose this option to enable DRM on ZTE ZX SoCs.
index 699180bfd57c3b40ea594926456f46ff732650ce..01352b56c41884f4f05417436735d8b48bda1f11 100644 (file)
@@ -2,6 +2,7 @@ zxdrm-y := \
        zx_drm_drv.o \
        zx_hdmi.o \
        zx_plane.o \
+       zx_tvenc.o \
        zx_vou.o
 
 obj-$(CONFIG_DRM_ZTE) += zxdrm.o
index 3e76f72c92fffc61c960f618934652ab46a3c727..13081fed902d9bd07b99c402a212ccb0e5e846fc 100644 (file)
@@ -247,6 +247,7 @@ static struct platform_driver zx_drm_platform_driver = {
 static struct platform_driver *drivers[] = {
        &zx_crtc_driver,
        &zx_hdmi_driver,
+       &zx_tvenc_driver,
        &zx_drm_platform_driver,
 };
 
index e65cd18a6cbaf18d8a9a606d7ad361c62b599226..5ca035b079c77e770ddb5742787a1600ba5f8a01 100644 (file)
@@ -13,6 +13,7 @@
 
 extern struct platform_driver zx_crtc_driver;
 extern struct platform_driver zx_hdmi_driver;
+extern struct platform_driver zx_tvenc_driver;
 
 static inline u32 zx_readl(void __iomem *reg)
 {
index 6bf6c364811ea40c3f29e67ace80886cf4f2f11c..c47b9cbfe270a3f28b72d65e21d0474a76aebfae 100644 (file)
@@ -25,6 +25,8 @@
 #include <drm/drm_of.h>
 #include <drm/drmP.h>
 
+#include <sound/hdmi-codec.h>
+
 #include "zx_hdmi_regs.h"
 #include "zx_vou.h"
 
@@ -49,17 +51,11 @@ struct zx_hdmi {
        bool sink_is_hdmi;
        bool sink_has_audio;
        const struct vou_inf *inf;
+       struct platform_device *audio_pdev;
 };
 
 #define to_zx_hdmi(x) container_of(x, struct zx_hdmi, x)
 
-static const struct vou_inf vou_inf_hdmi = {
-       .id = VOU_HDMI,
-       .data_sel = VOU_YUV444,
-       .clocks_en_bits = BIT(24) | BIT(18) | BIT(6),
-       .clocks_sel_bits = BIT(13) | BIT(2),
-};
-
 static inline u8 hdmi_readb(struct zx_hdmi *hdmi, u16 offset)
 {
        return readl_relaxed(hdmi->mmio + offset * 4);
@@ -238,14 +234,14 @@ static void zx_hdmi_encoder_enable(struct drm_encoder *encoder)
 
        zx_hdmi_hw_enable(hdmi);
 
-       vou_inf_enable(hdmi->inf, encoder->crtc);
+       vou_inf_enable(VOU_HDMI, encoder->crtc);
 }
 
 static void zx_hdmi_encoder_disable(struct drm_encoder *encoder)
 {
        struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
 
-       vou_inf_disable(hdmi->inf, encoder->crtc);
+       vou_inf_disable(VOU_HDMI, encoder->crtc);
 
        zx_hdmi_hw_disable(hdmi);
 
@@ -366,6 +362,142 @@ static irqreturn_t zx_hdmi_irq_handler(int irq, void *dev_id)
        return IRQ_NONE;
 }
 
+static int zx_hdmi_audio_startup(struct device *dev, void *data)
+{
+       struct zx_hdmi *hdmi = dev_get_drvdata(dev);
+       struct drm_encoder *encoder = &hdmi->encoder;
+
+       vou_inf_hdmi_audio_sel(encoder->crtc, VOU_HDMI_AUD_SPDIF);
+
+       return 0;
+}
+
+static void zx_hdmi_audio_shutdown(struct device *dev, void *data)
+{
+       struct zx_hdmi *hdmi = dev_get_drvdata(dev);
+
+       /* Disable audio input */
+       hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, 0);
+}
+
+static inline int zx_hdmi_audio_get_n(unsigned int fs)
+{
+       unsigned int n;
+
+       if (fs && (fs % 44100) == 0)
+               n = 6272 * (fs / 44100);
+       else
+               n = fs * 128 / 1000;
+
+       return n;
+}
+
+static int zx_hdmi_audio_hw_params(struct device *dev,
+                                  void *data,
+                                  struct hdmi_codec_daifmt *daifmt,
+                                  struct hdmi_codec_params *params)
+{
+       struct zx_hdmi *hdmi = dev_get_drvdata(dev);
+       struct hdmi_audio_infoframe *cea = &params->cea;
+       union hdmi_infoframe frame;
+       int n;
+
+       /* We only support spdif for now */
+       if (daifmt->fmt != HDMI_SPDIF) {
+               DRM_DEV_ERROR(hdmi->dev, "invalid daifmt %d\n", daifmt->fmt);
+               return -EINVAL;
+       }
+
+       switch (params->sample_width) {
+       case 16:
+               hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
+                                SPDIF_SAMPLE_SIZE_16BIT);
+               break;
+       case 20:
+               hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
+                                SPDIF_SAMPLE_SIZE_20BIT);
+               break;
+       case 24:
+               hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
+                                SPDIF_SAMPLE_SIZE_24BIT);
+               break;
+       default:
+               DRM_DEV_ERROR(hdmi->dev, "invalid sample width %d\n",
+                             params->sample_width);
+               return -EINVAL;
+       }
+
+       /* CTS is calculated by hardware, and we only need to take care of N */
+       n = zx_hdmi_audio_get_n(params->sample_rate);
+       hdmi_writeb(hdmi, N_SVAL1, n & 0xff);
+       hdmi_writeb(hdmi, N_SVAL2, (n >> 8) & 0xff);
+       hdmi_writeb(hdmi, N_SVAL3, (n >> 16) & 0xf);
+
+       /* Enable spdif mode */
+       hdmi_writeb_mask(hdmi, AUD_MODE, SPDIF_EN, SPDIF_EN);
+
+       /* Enable audio input */
+       hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, AUD_IN_EN);
+
+       memcpy(&frame.audio, cea, sizeof(*cea));
+
+       return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AUDIO);
+}
+
+static int zx_hdmi_audio_digital_mute(struct device *dev, void *data,
+                                     bool enable)
+{
+       struct zx_hdmi *hdmi = dev_get_drvdata(dev);
+
+       if (enable)
+               hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE,
+                                TPI_AUD_MUTE);
+       else
+               hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE, 0);
+
+       return 0;
+}
+
+static int zx_hdmi_audio_get_eld(struct device *dev, void *data,
+                                uint8_t *buf, size_t len)
+{
+       struct zx_hdmi *hdmi = dev_get_drvdata(dev);
+       struct drm_connector *connector = &hdmi->connector;
+
+       memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
+
+       return 0;
+}
+
+static const struct hdmi_codec_ops zx_hdmi_codec_ops = {
+       .audio_startup = zx_hdmi_audio_startup,
+       .hw_params = zx_hdmi_audio_hw_params,
+       .audio_shutdown = zx_hdmi_audio_shutdown,
+       .digital_mute = zx_hdmi_audio_digital_mute,
+       .get_eld = zx_hdmi_audio_get_eld,
+};
+
+static struct hdmi_codec_pdata zx_hdmi_codec_pdata = {
+       .ops = &zx_hdmi_codec_ops,
+       .spdif = 1,
+};
+
+static int zx_hdmi_audio_register(struct zx_hdmi *hdmi)
+{
+       struct platform_device *pdev;
+
+       pdev = platform_device_register_data(hdmi->dev, HDMI_CODEC_DRV_NAME,
+                                            PLATFORM_DEVID_AUTO,
+                                            &zx_hdmi_codec_pdata,
+                                            sizeof(zx_hdmi_codec_pdata));
+       if (IS_ERR(pdev))
+               return PTR_ERR(pdev);
+
+       hdmi->audio_pdev = pdev;
+
+       return 0;
+}
+
 static int zx_hdmi_i2c_read(struct zx_hdmi *hdmi, struct i2c_msg *msg)
 {
        int len = msg->len;
@@ -523,7 +655,6 @@ static int zx_hdmi_bind(struct device *dev, struct device *master, void *data)
 
        hdmi->dev = dev;
        hdmi->drm = drm;
-       hdmi->inf = &vou_inf_hdmi;
 
        dev_set_drvdata(dev, hdmi);
 
@@ -566,6 +697,12 @@ static int zx_hdmi_bind(struct device *dev, struct device *master, void *data)
                return ret;
        }
 
+       ret = zx_hdmi_audio_register(hdmi);
+       if (ret) {
+               DRM_DEV_ERROR(dev, "failed to register audio: %d\n", ret);
+               return ret;
+       }
+
        ret = zx_hdmi_register(drm, hdmi);
        if (ret) {
                DRM_DEV_ERROR(dev, "failed to register hdmi: %d\n", ret);
@@ -590,6 +727,9 @@ static void zx_hdmi_unbind(struct device *dev, struct device *master,
 
        hdmi->connector.funcs->destroy(&hdmi->connector);
        hdmi->encoder.funcs->destroy(&hdmi->encoder);
+
+       if (hdmi->audio_pdev)
+               platform_device_unregister(hdmi->audio_pdev);
 }
 
 static const struct component_ops zx_hdmi_component_ops = {
index de911f66b65888c15685daa0542f5ec1fb62ca3b..c6d5d8211725c209db4958d17c611e275b6f4775 100644 (file)
 #define TPI_INFO_TRANS_RPT             BIT(6)
 #define TPI_DDC_MASTER_EN              0x06f8
 #define HW_DDC_MASTER                  BIT(7)
+#define N_SVAL1                                0xa03
+#define N_SVAL2                                0xa04
+#define N_SVAL3                                0xa05
+#define AUD_EN                         0xa13
+#define AUD_IN_EN                      BIT(0)
+#define AUD_MODE                       0xa14
+#define SPDIF_EN                       BIT(1)
+#define TPI_AUD_CONFIG                 0xa62
+#define SPDIF_SAMPLE_SIZE_SHIFT                6
+#define SPDIF_SAMPLE_SIZE_MASK         (0x3 << SPDIF_SAMPLE_SIZE_SHIFT)
+#define SPDIF_SAMPLE_SIZE_16BIT                (0x1 << SPDIF_SAMPLE_SIZE_SHIFT)
+#define SPDIF_SAMPLE_SIZE_20BIT                (0x2 << SPDIF_SAMPLE_SIZE_SHIFT)
+#define SPDIF_SAMPLE_SIZE_24BIT                (0x3 << SPDIF_SAMPLE_SIZE_SHIFT)
+#define TPI_AUD_MUTE                   BIT(4)
 
 #endif /* __ZX_HDMI_REGS_H__ */
index b634b090cdc1bc7d79fbe7d6faf7885984658bfc..1d08ba381098e5f905be122a926ba5e8cf0b00fe 100644 (file)
 #include "zx_plane_regs.h"
 #include "zx_vou.h"
 
-struct zx_plane {
-       struct drm_plane plane;
-       void __iomem *layer;
-       void __iomem *csc;
-       void __iomem *hbsc;
-       void __iomem *rsz;
-};
-
-#define to_zx_plane(plane)     container_of(plane, struct zx_plane, plane)
-
 static const uint32_t gl_formats[] = {
        DRM_FORMAT_ARGB8888,
        DRM_FORMAT_XRGB8888,
@@ -40,6 +30,261 @@ static const uint32_t gl_formats[] = {
        DRM_FORMAT_ARGB4444,
 };
 
+static const uint32_t vl_formats[] = {
+       DRM_FORMAT_NV12,        /* Semi-planar YUV420 */
+       DRM_FORMAT_YUV420,      /* Planar YUV420 */
+       DRM_FORMAT_YUYV,        /* Packed YUV422 */
+       DRM_FORMAT_YVYU,
+       DRM_FORMAT_UYVY,
+       DRM_FORMAT_VYUY,
+       DRM_FORMAT_YUV444,      /* YUV444 8bit */
+       /*
+        * TODO: add formats below that HW supports:
+        *  - YUV420 P010
+        *  - YUV420 Hantro
+        *  - YUV444 10bit
+        */
+};
+
+#define FRAC_16_16(mult, div)    (((mult) << 16) / (div))
+
+static int zx_vl_plane_atomic_check(struct drm_plane *plane,
+                                   struct drm_plane_state *plane_state)
+{
+       struct drm_framebuffer *fb = plane_state->fb;
+       struct drm_crtc *crtc = plane_state->crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_rect clip;
+       int min_scale = FRAC_16_16(1, 8);
+       int max_scale = FRAC_16_16(8, 1);
+
+       if (!crtc || !fb)
+               return 0;
+
+       crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
+                                                       crtc);
+       if (WARN_ON(!crtc_state))
+               return -EINVAL;
+
+       /* nothing to check when disabling or disabled */
+       if (!crtc_state->enable)
+               return 0;
+
+       /* plane must be enabled */
+       if (!plane_state->crtc)
+               return -EINVAL;
+
+       clip.x1 = 0;
+       clip.y1 = 0;
+       clip.x2 = crtc_state->adjusted_mode.hdisplay;
+       clip.y2 = crtc_state->adjusted_mode.vdisplay;
+
+       return drm_plane_helper_check_state(plane_state, &clip,
+                                           min_scale, max_scale,
+                                           true, true);
+}
+
+static int zx_vl_get_fmt(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_NV12:
+               return VL_FMT_YUV420;
+       case DRM_FORMAT_YUV420:
+               return VL_YUV420_PLANAR | VL_FMT_YUV420;
+       case DRM_FORMAT_YUYV:
+               return VL_YUV422_YUYV | VL_FMT_YUV422;
+       case DRM_FORMAT_YVYU:
+               return VL_YUV422_YVYU | VL_FMT_YUV422;
+       case DRM_FORMAT_UYVY:
+               return VL_YUV422_UYVY | VL_FMT_YUV422;
+       case DRM_FORMAT_VYUY:
+               return VL_YUV422_VYUY | VL_FMT_YUV422;
+       case DRM_FORMAT_YUV444:
+               return VL_FMT_YUV444_8BIT;
+       default:
+               WARN_ONCE(1, "invalid pixel format %d\n", format);
+               return -EINVAL;
+       }
+}
+
+static inline void zx_vl_set_update(struct zx_plane *zplane)
+{
+       void __iomem *layer = zplane->layer;
+
+       zx_writel_mask(layer + VL_CTRL0, VL_UPDATE, VL_UPDATE);
+}
+
+static inline void zx_vl_rsz_set_update(struct zx_plane *zplane)
+{
+       zx_writel(zplane->rsz + RSZ_VL_ENABLE_CFG, 1);
+}
+
+static int zx_vl_rsz_get_fmt(uint32_t format)
+{
+       switch (format) {
+       case DRM_FORMAT_NV12:
+       case DRM_FORMAT_YUV420:
+               return RSZ_VL_FMT_YCBCR420;
+       case DRM_FORMAT_YUYV:
+       case DRM_FORMAT_YVYU:
+       case DRM_FORMAT_UYVY:
+       case DRM_FORMAT_VYUY:
+               return RSZ_VL_FMT_YCBCR422;
+       case DRM_FORMAT_YUV444:
+               return RSZ_VL_FMT_YCBCR444;
+       default:
+               WARN_ONCE(1, "invalid pixel format %d\n", format);
+               return -EINVAL;
+       }
+}
+
+static inline u32 rsz_step_value(u32 src, u32 dst)
+{
+       u32 val = 0;
+
+       if (src == dst)
+               val = 0;
+       else if (src < dst)
+               val = RSZ_PARA_STEP((src << 16) / dst);
+       else if (src > dst)
+               val = RSZ_DATA_STEP(src / dst) |
+                     RSZ_PARA_STEP(((src << 16) / dst) & 0xffff);
+
+       return val;
+}
+
+static void zx_vl_rsz_setup(struct zx_plane *zplane, uint32_t format,
+                           u32 src_w, u32 src_h, u32 dst_w, u32 dst_h)
+{
+       void __iomem *rsz = zplane->rsz;
+       u32 src_chroma_w = src_w;
+       u32 src_chroma_h = src_h;
+       u32 fmt;
+
+       /* Set up source and destination resolution */
+       zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1));
+       zx_writel(rsz + RSZ_DEST_CFG, RSZ_VER(dst_h - 1) | RSZ_HOR(dst_w - 1));
+
+       /* Configure data format for VL RSZ */
+       fmt = zx_vl_rsz_get_fmt(format);
+       if (fmt >= 0)
+               zx_writel_mask(rsz + RSZ_VL_CTRL_CFG, RSZ_VL_FMT_MASK, fmt);
+
+       /* Calculate Chroma height and width */
+       if (fmt == RSZ_VL_FMT_YCBCR420) {
+               src_chroma_w = src_w >> 1;
+               src_chroma_h = src_h >> 1;
+       } else if (fmt == RSZ_VL_FMT_YCBCR422) {
+               src_chroma_w = src_w >> 1;
+       }
+
+       /* Set up Luma and Chroma step registers */
+       zx_writel(rsz + RSZ_VL_LUMA_HOR, rsz_step_value(src_w, dst_w));
+       zx_writel(rsz + RSZ_VL_LUMA_VER, rsz_step_value(src_h, dst_h));
+       zx_writel(rsz + RSZ_VL_CHROMA_HOR, rsz_step_value(src_chroma_w, dst_w));
+       zx_writel(rsz + RSZ_VL_CHROMA_VER, rsz_step_value(src_chroma_h, dst_h));
+
+       zx_vl_rsz_set_update(zplane);
+}
+
+static void zx_vl_plane_atomic_update(struct drm_plane *plane,
+                                     struct drm_plane_state *old_state)
+{
+       struct zx_plane *zplane = to_zx_plane(plane);
+       struct drm_plane_state *state = plane->state;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_rect *src = &state->src;
+       struct drm_rect *dst = &state->dst;
+       struct drm_gem_cma_object *cma_obj;
+       void __iomem *layer = zplane->layer;
+       void __iomem *hbsc = zplane->hbsc;
+       void __iomem *paddr_reg;
+       dma_addr_t paddr;
+       u32 src_x, src_y, src_w, src_h;
+       u32 dst_x, dst_y, dst_w, dst_h;
+       uint32_t format;
+       u32 fmt;
+       int num_planes;
+       int i;
+
+       if (!fb)
+               return;
+
+       format = fb->format->format;
+
+       src_x = src->x1 >> 16;
+       src_y = src->y1 >> 16;
+       src_w = drm_rect_width(src) >> 16;
+       src_h = drm_rect_height(src) >> 16;
+
+       dst_x = dst->x1;
+       dst_y = dst->y1;
+       dst_w = drm_rect_width(dst);
+       dst_h = drm_rect_height(dst);
+
+       /* Set up data address registers for Y, Cb and Cr planes */
+       num_planes = drm_format_num_planes(format);
+       paddr_reg = layer + VL_Y;
+       for (i = 0; i < num_planes; i++) {
+               cma_obj = drm_fb_cma_get_gem_obj(fb, i);
+               paddr = cma_obj->paddr + fb->offsets[i];
+               paddr += src_y * fb->pitches[i];
+               paddr += src_x * drm_format_plane_cpp(format, i);
+               zx_writel(paddr_reg, paddr);
+               paddr_reg += 4;
+       }
+
+       /* Set up source height/width register */
+       zx_writel(layer + VL_SRC_SIZE, GL_SRC_W(src_w) | GL_SRC_H(src_h));
+
+       /* Set up start position register */
+       zx_writel(layer + VL_POS_START, GL_POS_X(dst_x) | GL_POS_Y(dst_y));
+
+       /* Set up end position register */
+       zx_writel(layer + VL_POS_END,
+                 GL_POS_X(dst_x + dst_w) | GL_POS_Y(dst_y + dst_h));
+
+       /* Strides of Cb and Cr planes should be identical */
+       zx_writel(layer + VL_STRIDE, LUMA_STRIDE(fb->pitches[0]) |
+                 CHROMA_STRIDE(fb->pitches[1]));
+
+       /* Set up video layer data format */
+       fmt = zx_vl_get_fmt(format);
+       if (fmt >= 0)
+               zx_writel(layer + VL_CTRL1, fmt);
+
+       /* Always use scaler since it exists (set for not bypass) */
+       zx_writel_mask(layer + VL_CTRL2, VL_SCALER_BYPASS_MODE,
+                      VL_SCALER_BYPASS_MODE);
+
+       zx_vl_rsz_setup(zplane, format, src_w, src_h, dst_w, dst_h);
+
+       /* Enable HBSC block */
+       zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN);
+
+       zx_vou_layer_enable(plane);
+
+       zx_vl_set_update(zplane);
+}
+
+static void zx_plane_atomic_disable(struct drm_plane *plane,
+                                   struct drm_plane_state *old_state)
+{
+       struct zx_plane *zplane = to_zx_plane(plane);
+       void __iomem *hbsc = zplane->hbsc;
+
+       zx_vou_layer_disable(plane);
+
+       /* Disable HBSC block */
+       zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, 0);
+}
+
+static const struct drm_plane_helper_funcs zx_vl_plane_helper_funcs = {
+       .atomic_check = zx_vl_plane_atomic_check,
+       .atomic_update = zx_vl_plane_atomic_update,
+       .atomic_disable = zx_plane_atomic_disable,
+};
+
 static int zx_gl_plane_atomic_check(struct drm_plane *plane,
                                    struct drm_plane_state *plane_state)
 {
@@ -107,14 +352,6 @@ static inline void zx_gl_rsz_set_update(struct zx_plane *zplane)
        zx_writel(zplane->rsz + RSZ_ENABLE_CFG, 1);
 }
 
-void zx_plane_set_update(struct drm_plane *plane)
-{
-       struct zx_plane *zplane = to_zx_plane(plane);
-
-       zx_gl_rsz_set_update(zplane);
-       zx_gl_set_update(zplane);
-}
-
 static void zx_gl_rsz_setup(struct zx_plane *zplane, u32 src_w, u32 src_h,
                            u32 dst_w, u32 dst_h)
 {
@@ -207,12 +444,15 @@ static void zx_gl_plane_atomic_update(struct drm_plane *plane,
        /* Enable HBSC block */
        zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN);
 
+       zx_vou_layer_enable(plane);
+
        zx_gl_set_update(zplane);
 }
 
 static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
        .atomic_check = zx_gl_plane_atomic_check,
        .atomic_update = zx_gl_plane_atomic_update,
+       .atomic_disable = zx_plane_atomic_disable,
 };
 
 static void zx_plane_destroy(struct drm_plane *plane)
@@ -230,6 +470,28 @@ static const struct drm_plane_funcs zx_plane_funcs = {
        .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
 };
 
+void zx_plane_set_update(struct drm_plane *plane)
+{
+       struct zx_plane *zplane = to_zx_plane(plane);
+
+       /* Do nothing if the plane is not enabled */
+       if (!plane->state->crtc)
+               return;
+
+       switch (plane->type) {
+       case DRM_PLANE_TYPE_PRIMARY:
+               zx_gl_rsz_set_update(zplane);
+               zx_gl_set_update(zplane);
+               break;
+       case DRM_PLANE_TYPE_OVERLAY:
+               zx_vl_rsz_set_update(zplane);
+               zx_vl_set_update(zplane);
+               break;
+       default:
+               WARN_ONCE(1, "unsupported plane type %d\n", plane->type);
+       }
+}
+
 static void zx_plane_hbsc_init(struct zx_plane *zplane)
 {
        void __iomem *hbsc = zplane->hbsc;
@@ -248,28 +510,16 @@ static void zx_plane_hbsc_init(struct zx_plane *zplane)
        zx_writel(hbsc + HBSC_THRESHOLD_COL3, (0x3c0 << 16) | 0x40);
 }
 
-struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
-                               struct zx_layer_data *data,
-                               enum drm_plane_type type)
+int zx_plane_init(struct drm_device *drm, struct zx_plane *zplane,
+                 enum drm_plane_type type)
 {
        const struct drm_plane_helper_funcs *helper;
-       struct zx_plane *zplane;
-       struct drm_plane *plane;
+       struct drm_plane *plane = &zplane->plane;
+       struct device *dev = zplane->dev;
        const uint32_t *formats;
        unsigned int format_count;
        int ret;
 
-       zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
-       if (!zplane)
-               return ERR_PTR(-ENOMEM);
-
-       plane = &zplane->plane;
-
-       zplane->layer = data->layer;
-       zplane->hbsc = data->hbsc;
-       zplane->csc = data->csc;
-       zplane->rsz = data->rsz;
-
        zx_plane_hbsc_init(zplane);
 
        switch (type) {
@@ -279,10 +529,12 @@ struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
                format_count = ARRAY_SIZE(gl_formats);
                break;
        case DRM_PLANE_TYPE_OVERLAY:
-               /* TODO: add video layer (vl) support */
+               helper = &zx_vl_plane_helper_funcs;
+               formats = vl_formats;
+               format_count = ARRAY_SIZE(vl_formats);
                break;
        default:
-               return ERR_PTR(-ENODEV);
+               return -ENODEV;
        }
 
        ret = drm_universal_plane_init(drm, plane, VOU_CRTC_MASK,
@@ -290,10 +542,10 @@ struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
                                       type, NULL);
        if (ret) {
                DRM_DEV_ERROR(dev, "failed to init universal plane: %d\n", ret);
-               return ERR_PTR(ret);
+               return ret;
        }
 
        drm_plane_helper_add(plane, helper);
 
-       return plane;
+       return 0;
 }
index 2b82cd558d9d9774fc12d5684f6515019e0b01cd..933611ddffd0d20412e5ba590a5f3332b259d047 100644 (file)
 #ifndef __ZX_PLANE_H__
 #define __ZX_PLANE_H__
 
-struct zx_layer_data {
+struct zx_plane {
+       struct drm_plane plane;
+       struct device *dev;
        void __iomem *layer;
        void __iomem *csc;
        void __iomem *hbsc;
        void __iomem *rsz;
+       const struct vou_layer_bits *bits;
 };
 
-struct drm_plane *zx_plane_init(struct drm_device *drm, struct device *dev,
-                               struct zx_layer_data *data,
-                               enum drm_plane_type type);
+#define to_zx_plane(plane) container_of(plane, struct zx_plane, plane)
+
+int zx_plane_init(struct drm_device *drm, struct zx_plane *zplane,
+                 enum drm_plane_type type);
 void zx_plane_set_update(struct drm_plane *plane);
 
 #endif /* __ZX_PLANE_H__ */
index 3dde6716a558f3798612b42ccd2cfc71ec3edcf1..65f271aeabed14e4deb8d0b605d5e4506ea5bf06 100644 (file)
 #define GL_POS_X(x)    (((x) << GL_POS_X_SHIFT) & GL_POS_X_MASK)
 #define GL_POS_Y(x)    (((x) << GL_POS_Y_SHIFT) & GL_POS_Y_MASK)
 
+/* VL registers */
+#define VL_CTRL0                       0x00
+#define VL_UPDATE                      BIT(3)
+#define VL_CTRL1                       0x04
+#define VL_YUV420_PLANAR               BIT(5)
+#define VL_YUV422_SHIFT                        3
+#define VL_YUV422_YUYV                 (0 << VL_YUV422_SHIFT)
+#define VL_YUV422_YVYU                 (1 << VL_YUV422_SHIFT)
+#define VL_YUV422_UYVY                 (2 << VL_YUV422_SHIFT)
+#define VL_YUV422_VYUY                 (3 << VL_YUV422_SHIFT)
+#define VL_FMT_YUV420                  0
+#define VL_FMT_YUV422                  1
+#define VL_FMT_YUV420_P010             2
+#define VL_FMT_YUV420_HANTRO           3
+#define VL_FMT_YUV444_8BIT             4
+#define VL_FMT_YUV444_10BIT            5
+#define VL_CTRL2                       0x08
+#define VL_SCALER_BYPASS_MODE          BIT(0)
+#define VL_STRIDE                      0x0c
+#define LUMA_STRIDE_SHIFT              16
+#define LUMA_STRIDE_MASK               (0xffff << LUMA_STRIDE_SHIFT)
+#define CHROMA_STRIDE_SHIFT            0
+#define CHROMA_STRIDE_MASK             (0xffff << CHROMA_STRIDE_SHIFT)
+#define VL_SRC_SIZE                    0x10
+#define VL_Y                           0x14
+#define VL_POS_START                   0x30
+#define VL_POS_END                     0x34
+
+#define LUMA_STRIDE(x)  (((x) << LUMA_STRIDE_SHIFT) & LUMA_STRIDE_MASK)
+#define CHROMA_STRIDE(x) (((x) << CHROMA_STRIDE_SHIFT) & CHROMA_STRIDE_MASK)
+
 /* CSC registers */
 #define CSC_CTRL0                      0x30
 #define CSC_COV_MODE_SHIFT             16
 #define RSZ_DEST_CFG                   0x04
 #define RSZ_ENABLE_CFG                 0x14
 
+#define RSZ_VL_LUMA_HOR                        0x08
+#define RSZ_VL_LUMA_VER                        0x0c
+#define RSZ_VL_CHROMA_HOR              0x10
+#define RSZ_VL_CHROMA_VER              0x14
+#define RSZ_VL_CTRL_CFG                        0x18
+#define RSZ_VL_FMT_SHIFT               3
+#define RSZ_VL_FMT_MASK                        (0x3 << RSZ_VL_FMT_SHIFT)
+#define RSZ_VL_FMT_YCBCR420            (0x0 << RSZ_VL_FMT_SHIFT)
+#define RSZ_VL_FMT_YCBCR422            (0x1 << RSZ_VL_FMT_SHIFT)
+#define RSZ_VL_FMT_YCBCR444            (0x2 << RSZ_VL_FMT_SHIFT)
+#define RSZ_VL_ENABLE_CFG              0x1c
+
 #define RSZ_VER_SHIFT                  16
 #define RSZ_VER_MASK                   (0xffff << RSZ_VER_SHIFT)
 #define RSZ_HOR_SHIFT                  0
 #define RSZ_VER(x)     (((x) << RSZ_VER_SHIFT) & RSZ_VER_MASK)
 #define RSZ_HOR(x)     (((x) << RSZ_HOR_SHIFT) & RSZ_HOR_MASK)
 
+#define RSZ_DATA_STEP_SHIFT            16
+#define RSZ_DATA_STEP_MASK             (0xffff << RSZ_DATA_STEP_SHIFT)
+#define RSZ_PARA_STEP_SHIFT            0
+#define RSZ_PARA_STEP_MASK             (0xffff << RSZ_PARA_STEP_SHIFT)
+
+#define RSZ_DATA_STEP(x) (((x) << RSZ_DATA_STEP_SHIFT) & RSZ_DATA_STEP_MASK)
+#define RSZ_PARA_STEP(x) (((x) << RSZ_PARA_STEP_SHIFT) & RSZ_PARA_STEP_MASK)
+
 /* HBSC registers */
 #define HBSC_SATURATION                        0x00
 #define HBSC_HUE                       0x04
diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c
new file mode 100644 (file)
index 0000000..b56dc69
--- /dev/null
@@ -0,0 +1,407 @@
+/*
+ * Copyright 2017 Linaro Ltd.
+ * Copyright 2017 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drmP.h>
+
+#include "zx_drm_drv.h"
+#include "zx_tvenc_regs.h"
+#include "zx_vou.h"
+
+struct zx_tvenc_pwrctrl {
+       struct regmap *regmap;
+       u32 reg;
+       u32 mask;
+};
+
+struct zx_tvenc {
+       struct drm_connector connector;
+       struct drm_encoder encoder;
+       struct device *dev;
+       void __iomem *mmio;
+       const struct vou_inf *inf;
+       struct zx_tvenc_pwrctrl pwrctrl;
+};
+
+#define to_zx_tvenc(x) container_of(x, struct zx_tvenc, x)
+
+struct zx_tvenc_mode {
+       struct drm_display_mode mode;
+       u32 video_info;
+       u32 video_res;
+       u32 field1_param;
+       u32 field2_param;
+       u32 burst_line_odd1;
+       u32 burst_line_even1;
+       u32 burst_line_odd2;
+       u32 burst_line_even2;
+       u32 line_timing_param;
+       u32 weight_value;
+       u32 blank_black_level;
+       u32 burst_level;
+       u32 control_param;
+       u32 sub_carrier_phase1;
+       u32 phase_line_incr_cvbs;
+};
+
+/*
+ * The CRM cannot directly provide a suitable frequency, and we have to
+ * ask a multiplied rate from CRM and use the divider in VOU to get the
+ * desired one.
+ */
+#define TVENC_CLOCK_MULTIPLIER 4
+
+static const struct zx_tvenc_mode tvenc_mode_pal = {
+       .mode = {
+               .clock = 13500 * TVENC_CLOCK_MULTIPLIER,
+               .hdisplay = 720,
+               .hsync_start = 720 + 12,
+               .hsync_end = 720 + 12 + 2,
+               .htotal = 720 + 12 + 2 + 130,
+               .vdisplay = 576,
+               .vsync_start = 576 + 2,
+               .vsync_end = 576 + 2 + 2,
+               .vtotal = 576 + 2 + 2 + 20,
+               .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                        DRM_MODE_FLAG_INTERLACE,
+       },
+       .video_info = 0x00040040,
+       .video_res = 0x05a9c760,
+       .field1_param = 0x0004d416,
+       .field2_param = 0x0009b94f,
+       .burst_line_odd1 = 0x0004d406,
+       .burst_line_even1 = 0x0009b53e,
+       .burst_line_odd2 = 0x0004d805,
+       .burst_line_even2 = 0x0009b93f,
+       .line_timing_param = 0x06a96fdf,
+       .weight_value = 0x00c188a0,
+       .blank_black_level = 0x0000fcfc,
+       .burst_level = 0x00001595,
+       .control_param = 0x00000001,
+       .sub_carrier_phase1 = 0x1504c566,
+       .phase_line_incr_cvbs = 0xc068db8c,
+};
+
+static const struct zx_tvenc_mode tvenc_mode_ntsc = {
+       .mode = {
+               .clock = 13500 * TVENC_CLOCK_MULTIPLIER,
+               .hdisplay = 720,
+               .hsync_start = 720 + 16,
+               .hsync_end = 720 + 16 + 2,
+               .htotal = 720 + 16 + 2 + 120,
+               .vdisplay = 480,
+               .vsync_start = 480 + 3,
+               .vsync_end = 480 + 3 + 2,
+               .vtotal = 480 + 3 + 2 + 17,
+               .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+                        DRM_MODE_FLAG_INTERLACE,
+       },
+       .video_info = 0x00040080,
+       .video_res = 0x05a8375a,
+       .field1_param = 0x00041817,
+       .field2_param = 0x0008351e,
+       .burst_line_odd1 = 0x00041006,
+       .burst_line_even1 = 0x0008290d,
+       .burst_line_odd2 = 0x00000000,
+       .burst_line_even2 = 0x00000000,
+       .line_timing_param = 0x06a8ef9e,
+       .weight_value = 0x00b68197,
+       .blank_black_level = 0x0000f0f0,
+       .burst_level = 0x0000009c,
+       .control_param = 0x00000001,
+       .sub_carrier_phase1 = 0x10f83e10,
+       .phase_line_incr_cvbs = 0x80000000,
+};
+
+static const struct zx_tvenc_mode *tvenc_modes[] = {
+       &tvenc_mode_pal,
+       &tvenc_mode_ntsc,
+};
+
+static const struct zx_tvenc_mode *
+zx_tvenc_find_zmode(struct drm_display_mode *mode)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(tvenc_modes); i++) {
+               const struct zx_tvenc_mode *zmode = tvenc_modes[i];
+
+               if (drm_mode_equal(mode, &zmode->mode))
+                       return zmode;
+       }
+
+       return NULL;
+}
+
+static void zx_tvenc_encoder_mode_set(struct drm_encoder *encoder,
+                                     struct drm_display_mode *mode,
+                                     struct drm_display_mode *adj_mode)
+{
+       struct zx_tvenc *tvenc = to_zx_tvenc(encoder);
+       const struct zx_tvenc_mode *zmode;
+       struct vou_div_config configs[] = {
+               { VOU_DIV_INF,   VOU_DIV_4 },
+               { VOU_DIV_TVENC, VOU_DIV_1 },
+               { VOU_DIV_LAYER, VOU_DIV_2 },
+       };
+
+       zx_vou_config_dividers(encoder->crtc, configs, ARRAY_SIZE(configs));
+
+       zmode = zx_tvenc_find_zmode(mode);
+       if (!zmode) {
+               DRM_DEV_ERROR(tvenc->dev, "failed to find zmode\n");
+               return;
+       }
+
+       zx_writel(tvenc->mmio + VENC_VIDEO_INFO, zmode->video_info);
+       zx_writel(tvenc->mmio + VENC_VIDEO_RES, zmode->video_res);
+       zx_writel(tvenc->mmio + VENC_FIELD1_PARAM, zmode->field1_param);
+       zx_writel(tvenc->mmio + VENC_FIELD2_PARAM, zmode->field2_param);
+       zx_writel(tvenc->mmio + VENC_LINE_O_1, zmode->burst_line_odd1);
+       zx_writel(tvenc->mmio + VENC_LINE_E_1, zmode->burst_line_even1);
+       zx_writel(tvenc->mmio + VENC_LINE_O_2, zmode->burst_line_odd2);
+       zx_writel(tvenc->mmio + VENC_LINE_E_2, zmode->burst_line_even2);
+       zx_writel(tvenc->mmio + VENC_LINE_TIMING_PARAM,
+                 zmode->line_timing_param);
+       zx_writel(tvenc->mmio + VENC_WEIGHT_VALUE, zmode->weight_value);
+       zx_writel(tvenc->mmio + VENC_BLANK_BLACK_LEVEL,
+                 zmode->blank_black_level);
+       zx_writel(tvenc->mmio + VENC_BURST_LEVEL, zmode->burst_level);
+       zx_writel(tvenc->mmio + VENC_CONTROL_PARAM, zmode->control_param);
+       zx_writel(tvenc->mmio + VENC_SUB_CARRIER_PHASE1,
+                 zmode->sub_carrier_phase1);
+       zx_writel(tvenc->mmio + VENC_PHASE_LINE_INCR_CVBS,
+                 zmode->phase_line_incr_cvbs);
+}
+
+static void zx_tvenc_encoder_enable(struct drm_encoder *encoder)
+{
+       struct zx_tvenc *tvenc = to_zx_tvenc(encoder);
+       struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl;
+
+       /* Set bit to power up TVENC DAC */
+       regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask,
+                          pwrctrl->mask);
+
+       vou_inf_enable(VOU_TV_ENC, encoder->crtc);
+
+       zx_writel(tvenc->mmio + VENC_ENABLE, 1);
+}
+
+static void zx_tvenc_encoder_disable(struct drm_encoder *encoder)
+{
+       struct zx_tvenc *tvenc = to_zx_tvenc(encoder);
+       struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl;
+
+       zx_writel(tvenc->mmio + VENC_ENABLE, 0);
+
+       vou_inf_disable(VOU_TV_ENC, encoder->crtc);
+
+       /* Clear bit to power down TVENC DAC */
+       regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask, 0);
+}
+
+static const struct drm_encoder_helper_funcs zx_tvenc_encoder_helper_funcs = {
+       .enable = zx_tvenc_encoder_enable,
+       .disable = zx_tvenc_encoder_disable,
+       .mode_set = zx_tvenc_encoder_mode_set,
+};
+
+static const struct drm_encoder_funcs zx_tvenc_encoder_funcs = {
+       .destroy = drm_encoder_cleanup,
+};
+
+static int zx_tvenc_connector_get_modes(struct drm_connector *connector)
+{
+       struct zx_tvenc *tvenc = to_zx_tvenc(connector);
+       struct device *dev = tvenc->dev;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(tvenc_modes); i++) {
+               const struct zx_tvenc_mode *zmode = tvenc_modes[i];
+               struct drm_display_mode *mode;
+
+               mode = drm_mode_duplicate(connector->dev, &zmode->mode);
+               if (!mode) {
+                       DRM_DEV_ERROR(dev, "failed to duplicate drm mode\n");
+                       continue;
+               }
+
+               drm_mode_set_name(mode);
+               drm_mode_probed_add(connector, mode);
+       }
+
+       return i;
+}
+
+static enum drm_mode_status
+zx_tvenc_connector_mode_valid(struct drm_connector *connector,
+                             struct drm_display_mode *mode)
+{
+       struct zx_tvenc *tvenc = to_zx_tvenc(connector);
+       const struct zx_tvenc_mode *zmode;
+
+       zmode = zx_tvenc_find_zmode(mode);
+       if (!zmode) {
+               DRM_DEV_ERROR(tvenc->dev, "unsupported mode: %s\n", mode->name);
+               return MODE_NOMODE;
+       }
+
+       return MODE_OK;
+}
+
+static struct drm_connector_helper_funcs zx_tvenc_connector_helper_funcs = {
+       .get_modes = zx_tvenc_connector_get_modes,
+       .mode_valid = zx_tvenc_connector_mode_valid,
+};
+
+static const struct drm_connector_funcs zx_tvenc_connector_funcs = {
+       .dpms = drm_atomic_helper_connector_dpms,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = drm_connector_cleanup,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc)
+{
+       struct drm_encoder *encoder = &tvenc->encoder;
+       struct drm_connector *connector = &tvenc->connector;
+
+       /*
+        * The tvenc is designed to use aux channel, as there is a deflicker
+        * block for the channel.
+        */
+       encoder->possible_crtcs = BIT(1);
+
+       drm_encoder_init(drm, encoder, &zx_tvenc_encoder_funcs,
+                        DRM_MODE_ENCODER_TVDAC, NULL);
+       drm_encoder_helper_add(encoder, &zx_tvenc_encoder_helper_funcs);
+
+       connector->interlace_allowed = true;
+
+       drm_connector_init(drm, connector, &zx_tvenc_connector_funcs,
+                          DRM_MODE_CONNECTOR_Composite);
+       drm_connector_helper_add(connector, &zx_tvenc_connector_helper_funcs);
+
+       drm_mode_connector_attach_encoder(connector, encoder);
+
+       return 0;
+}
+
+static int zx_tvenc_pwrctrl_init(struct zx_tvenc *tvenc)
+{
+       struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl;
+       struct device *dev = tvenc->dev;
+       struct of_phandle_args out_args;
+       struct regmap *regmap;
+       int ret;
+
+       ret = of_parse_phandle_with_fixed_args(dev->of_node,
+                               "zte,tvenc-power-control", 2, 0, &out_args);
+       if (ret)
+               return ret;
+
+       regmap = syscon_node_to_regmap(out_args.np);
+       if (IS_ERR(regmap)) {
+               ret = PTR_ERR(regmap);
+               goto out;
+       }
+
+       pwrctrl->regmap = regmap;
+       pwrctrl->reg = out_args.args[0];
+       pwrctrl->mask = out_args.args[1];
+
+out:
+       of_node_put(out_args.np);
+       return ret;
+}
+
+static int zx_tvenc_bind(struct device *dev, struct device *master, void *data)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct drm_device *drm = data;
+       struct resource *res;
+       struct zx_tvenc *tvenc;
+       int ret;
+
+       tvenc = devm_kzalloc(dev, sizeof(*tvenc), GFP_KERNEL);
+       if (!tvenc)
+               return -ENOMEM;
+
+       tvenc->dev = dev;
+       dev_set_drvdata(dev, tvenc);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       tvenc->mmio = devm_ioremap_resource(dev, res);
+       if (IS_ERR(tvenc->mmio)) {
+               ret = PTR_ERR(tvenc->mmio);
+               DRM_DEV_ERROR(dev, "failed to remap tvenc region: %d\n", ret);
+               return ret;
+       }
+
+       ret = zx_tvenc_pwrctrl_init(tvenc);
+       if (ret) {
+               DRM_DEV_ERROR(dev, "failed to init power control: %d\n", ret);
+               return ret;
+       }
+
+       ret = zx_tvenc_register(drm, tvenc);
+       if (ret) {
+               DRM_DEV_ERROR(dev, "failed to register tvenc: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void zx_tvenc_unbind(struct device *dev, struct device *master,
+                           void *data)
+{
+       /* Nothing to do */
+}
+
+static const struct component_ops zx_tvenc_component_ops = {
+       .bind = zx_tvenc_bind,
+       .unbind = zx_tvenc_unbind,
+};
+
+static int zx_tvenc_probe(struct platform_device *pdev)
+{
+       return component_add(&pdev->dev, &zx_tvenc_component_ops);
+}
+
+static int zx_tvenc_remove(struct platform_device *pdev)
+{
+       component_del(&pdev->dev, &zx_tvenc_component_ops);
+       return 0;
+}
+
+static const struct of_device_id zx_tvenc_of_match[] = {
+       { .compatible = "zte,zx296718-tvenc", },
+       { /* end */ },
+};
+MODULE_DEVICE_TABLE(of, zx_tvenc_of_match);
+
+struct platform_driver zx_tvenc_driver = {
+       .probe = zx_tvenc_probe,
+       .remove = zx_tvenc_remove,
+       .driver = {
+               .name = "zx-tvenc",
+               .of_match_table = zx_tvenc_of_match,
+       },
+};
diff --git a/drivers/gpu/drm/zte/zx_tvenc_regs.h b/drivers/gpu/drm/zte/zx_tvenc_regs.h
new file mode 100644 (file)
index 0000000..bd91f5d
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2017 Linaro Ltd.
+ * Copyright 2017 ZTE Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ZX_TVENC_REGS_H__
+#define __ZX_TVENC_REGS_H__
+
+#define VENC_VIDEO_INFO                        0x04
+#define VENC_VIDEO_RES                 0x08
+#define VENC_FIELD1_PARAM              0x10
+#define VENC_FIELD2_PARAM              0x14
+#define VENC_LINE_O_1                  0x18
+#define VENC_LINE_E_1                  0x1c
+#define VENC_LINE_O_2                  0x20
+#define VENC_LINE_E_2                  0x24
+#define VENC_LINE_TIMING_PARAM         0x28
+#define VENC_WEIGHT_VALUE              0x2c
+#define VENC_BLANK_BLACK_LEVEL         0x30
+#define VENC_BURST_LEVEL               0x34
+#define VENC_CONTROL_PARAM             0x3c
+#define VENC_SUB_CARRIER_PHASE1                0x40
+#define VENC_PHASE_LINE_INCR_CVBS      0x48
+#define VENC_ENABLE                    0xa8
+
+#endif /* __ZX_TVENC_REGS_H__ */
index a86e3a5852a2723358b8a7c09b83ffde64299a5d..cf92d675feaac903e394333aab4fe3a5b8ec6996 100644 (file)
@@ -40,6 +40,7 @@ struct zx_crtc_regs {
        u32 fir_active;
        u32 fir_htiming;
        u32 fir_vtiming;
+       u32 sec_vtiming;
        u32 timing_shift;
        u32 timing_pi_shift;
 };
@@ -48,6 +49,7 @@ static const struct zx_crtc_regs main_crtc_regs = {
        .fir_active = FIR_MAIN_ACTIVE,
        .fir_htiming = FIR_MAIN_H_TIMING,
        .fir_vtiming = FIR_MAIN_V_TIMING,
+       .sec_vtiming = SEC_MAIN_V_TIMING,
        .timing_shift = TIMING_MAIN_SHIFT,
        .timing_pi_shift = TIMING_MAIN_PI_SHIFT,
 };
@@ -56,6 +58,7 @@ static const struct zx_crtc_regs aux_crtc_regs = {
        .fir_active = FIR_AUX_ACTIVE,
        .fir_htiming = FIR_AUX_H_TIMING,
        .fir_vtiming = FIR_AUX_V_TIMING,
+       .sec_vtiming = SEC_AUX_V_TIMING,
        .timing_shift = TIMING_AUX_SHIFT,
        .timing_pi_shift = TIMING_AUX_PI_SHIFT,
 };
@@ -65,7 +68,17 @@ struct zx_crtc_bits {
        u32 polarity_shift;
        u32 int_frame_mask;
        u32 tc_enable;
-       u32 gl_enable;
+       u32 sec_vactive_shift;
+       u32 sec_vactive_mask;
+       u32 interlace_select;
+       u32 pi_enable;
+       u32 div_vga_shift;
+       u32 div_pic_shift;
+       u32 div_tvenc_shift;
+       u32 div_hdmi_pnx_shift;
+       u32 div_hdmi_shift;
+       u32 div_inf_shift;
+       u32 div_layer_shift;
 };
 
 static const struct zx_crtc_bits main_crtc_bits = {
@@ -73,7 +86,17 @@ static const struct zx_crtc_bits main_crtc_bits = {
        .polarity_shift = MAIN_POL_SHIFT,
        .int_frame_mask = TIMING_INT_MAIN_FRAME,
        .tc_enable = MAIN_TC_EN,
-       .gl_enable = OSD_CTRL0_GL0_EN,
+       .sec_vactive_shift = SEC_VACT_MAIN_SHIFT,
+       .sec_vactive_mask = SEC_VACT_MAIN_MASK,
+       .interlace_select = MAIN_INTERLACE_SEL,
+       .pi_enable = MAIN_PI_EN,
+       .div_vga_shift = VGA_MAIN_DIV_SHIFT,
+       .div_pic_shift = PIC_MAIN_DIV_SHIFT,
+       .div_tvenc_shift = TVENC_MAIN_DIV_SHIFT,
+       .div_hdmi_pnx_shift = HDMI_MAIN_PNX_DIV_SHIFT,
+       .div_hdmi_shift = HDMI_MAIN_DIV_SHIFT,
+       .div_inf_shift = INF_MAIN_DIV_SHIFT,
+       .div_layer_shift = LAYER_MAIN_DIV_SHIFT,
 };
 
 static const struct zx_crtc_bits aux_crtc_bits = {
@@ -81,7 +104,17 @@ static const struct zx_crtc_bits aux_crtc_bits = {
        .polarity_shift = AUX_POL_SHIFT,
        .int_frame_mask = TIMING_INT_AUX_FRAME,
        .tc_enable = AUX_TC_EN,
-       .gl_enable = OSD_CTRL0_GL1_EN,
+       .sec_vactive_shift = SEC_VACT_AUX_SHIFT,
+       .sec_vactive_mask = SEC_VACT_AUX_MASK,
+       .interlace_select = AUX_INTERLACE_SEL,
+       .pi_enable = AUX_PI_EN,
+       .div_vga_shift = VGA_AUX_DIV_SHIFT,
+       .div_pic_shift = PIC_AUX_DIV_SHIFT,
+       .div_tvenc_shift = TVENC_AUX_DIV_SHIFT,
+       .div_hdmi_pnx_shift = HDMI_AUX_PNX_DIV_SHIFT,
+       .div_hdmi_shift = HDMI_AUX_DIV_SHIFT,
+       .div_inf_shift = INF_AUX_DIV_SHIFT,
+       .div_layer_shift = LAYER_AUX_DIV_SHIFT,
 };
 
 struct zx_crtc {
@@ -97,6 +130,40 @@ struct zx_crtc {
 
 #define to_zx_crtc(x) container_of(x, struct zx_crtc, crtc)
 
+struct vou_layer_bits {
+       u32 enable;
+       u32 chnsel;
+       u32 clksel;
+};
+
+static const struct vou_layer_bits zx_gl_bits[GL_NUM] = {
+       {
+               .enable = OSD_CTRL0_GL0_EN,
+               .chnsel = OSD_CTRL0_GL0_SEL,
+               .clksel = VOU_CLK_GL0_SEL,
+       }, {
+               .enable = OSD_CTRL0_GL1_EN,
+               .chnsel = OSD_CTRL0_GL1_SEL,
+               .clksel = VOU_CLK_GL1_SEL,
+       },
+};
+
+static const struct vou_layer_bits zx_vl_bits[VL_NUM] = {
+       {
+               .enable = OSD_CTRL0_VL0_EN,
+               .chnsel = OSD_CTRL0_VL0_SEL,
+               .clksel = VOU_CLK_VL0_SEL,
+       }, {
+               .enable = OSD_CTRL0_VL1_EN,
+               .chnsel = OSD_CTRL0_VL1_SEL,
+               .clksel = VOU_CLK_VL1_SEL,
+       }, {
+               .enable = OSD_CTRL0_VL2_EN,
+               .chnsel = OSD_CTRL0_VL2_SEL,
+               .clksel = VOU_CLK_VL2_SEL,
+       },
+};
+
 struct zx_vou_hw {
        struct device *dev;
        void __iomem *osd;
@@ -112,6 +179,33 @@ struct zx_vou_hw {
        struct zx_crtc *aux_crtc;
 };
 
+enum vou_inf_data_sel {
+       VOU_YUV444      = 0,
+       VOU_RGB_101010  = 1,
+       VOU_RGB_888     = 2,
+       VOU_RGB_666     = 3,
+};
+
+struct vou_inf {
+       enum vou_inf_id id;
+       enum vou_inf_data_sel data_sel;
+       u32 clocks_en_bits;
+       u32 clocks_sel_bits;
+};
+
+static struct vou_inf vou_infs[] = {
+       [VOU_HDMI] = {
+               .data_sel = VOU_YUV444,
+               .clocks_en_bits = BIT(24) | BIT(18) | BIT(6),
+               .clocks_sel_bits = BIT(13) | BIT(2),
+       },
+       [VOU_TV_ENC] = {
+               .data_sel = VOU_YUV444,
+               .clocks_en_bits = BIT(15),
+               .clocks_sel_bits = BIT(11) | BIT(0),
+       },
+};
+
 static inline struct zx_vou_hw *crtc_to_vou(struct drm_crtc *crtc)
 {
        struct zx_crtc *zcrtc = to_zx_crtc(crtc);
@@ -119,20 +213,30 @@ static inline struct zx_vou_hw *crtc_to_vou(struct drm_crtc *crtc)
        return zcrtc->vou;
 }
 
-void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc)
+void vou_inf_hdmi_audio_sel(struct drm_crtc *crtc,
+                           enum vou_inf_hdmi_audio aud)
 {
        struct zx_crtc *zcrtc = to_zx_crtc(crtc);
        struct zx_vou_hw *vou = zcrtc->vou;
+
+       zx_writel_mask(vou->vouctl + VOU_INF_HDMI_CTRL, VOU_HDMI_AUD_MASK, aud);
+}
+
+void vou_inf_enable(enum vou_inf_id id, struct drm_crtc *crtc)
+{
+       struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+       struct zx_vou_hw *vou = zcrtc->vou;
+       struct vou_inf *inf = &vou_infs[id];
        bool is_main = zcrtc->chn_type == VOU_CHN_MAIN;
-       u32 data_sel_shift = inf->id << 1;
+       u32 data_sel_shift = id << 1;
 
        /* Select data format */
        zx_writel_mask(vou->vouctl + VOU_INF_DATA_SEL, 0x3 << data_sel_shift,
                       inf->data_sel << data_sel_shift);
 
        /* Select channel */
-       zx_writel_mask(vou->vouctl + VOU_INF_CH_SEL, 0x1 << inf->id,
-                      zcrtc->chn_type << inf->id);
+       zx_writel_mask(vou->vouctl + VOU_INF_CH_SEL, 0x1 << id,
+                      zcrtc->chn_type << id);
 
        /* Select interface clocks */
        zx_writel_mask(vou->vouctl + VOU_CLK_SEL, inf->clocks_sel_bits,
@@ -143,20 +247,79 @@ void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc)
                       inf->clocks_en_bits);
 
        /* Enable the device */
-       zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << inf->id, 1 << inf->id);
+       zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << id, 1 << id);
 }
 
-void vou_inf_disable(const struct vou_inf *inf, struct drm_crtc *crtc)
+void vou_inf_disable(enum vou_inf_id id, struct drm_crtc *crtc)
 {
        struct zx_vou_hw *vou = crtc_to_vou(crtc);
+       struct vou_inf *inf = &vou_infs[id];
 
        /* Disable the device */
-       zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << inf->id, 0);
+       zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << id, 0);
 
        /* Disable interface clocks */
        zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits, 0);
 }
 
+void zx_vou_config_dividers(struct drm_crtc *crtc,
+                           struct vou_div_config *configs, int num)
+{
+       struct zx_crtc *zcrtc = to_zx_crtc(crtc);
+       struct zx_vou_hw *vou = zcrtc->vou;
+       const struct zx_crtc_bits *bits = zcrtc->bits;
+       int i;
+
+       /* Clear update flag bit */
+       zx_writel_mask(vou->vouctl + VOU_DIV_PARA, DIV_PARA_UPDATE, 0);
+
+       for (i = 0; i < num; i++) {
+               struct vou_div_config *cfg = configs + i;
+               u32 reg, shift;
+
+               switch (cfg->id) {
+               case VOU_DIV_VGA:
+                       reg = VOU_CLK_SEL;
+                       shift = bits->div_vga_shift;
+                       break;
+               case VOU_DIV_PIC:
+                       reg = VOU_CLK_SEL;
+                       shift = bits->div_pic_shift;
+                       break;
+               case VOU_DIV_TVENC:
+                       reg = VOU_DIV_PARA;
+                       shift = bits->div_tvenc_shift;
+                       break;
+               case VOU_DIV_HDMI_PNX:
+                       reg = VOU_DIV_PARA;
+                       shift = bits->div_hdmi_pnx_shift;
+                       break;
+               case VOU_DIV_HDMI:
+                       reg = VOU_DIV_PARA;
+                       shift = bits->div_hdmi_shift;
+                       break;
+               case VOU_DIV_INF:
+                       reg = VOU_DIV_PARA;
+                       shift = bits->div_inf_shift;
+                       break;
+               case VOU_DIV_LAYER:
+                       reg = VOU_DIV_PARA;
+                       shift = bits->div_layer_shift;
+                       break;
+               default:
+                       continue;
+               }
+
+               /* Each divider occupies 3 bits */
+               zx_writel_mask(vou->vouctl + reg, 0x7 << shift,
+                              cfg->val << shift);
+       }
+
+       /* Set update flag bit to get dividers effected */
+       zx_writel_mask(vou->vouctl + VOU_DIV_PARA, DIV_PARA_UPDATE,
+                      DIV_PARA_UPDATE);
+}
+
 static inline void vou_chn_set_update(struct zx_crtc *zcrtc)
 {
        zx_writel(zcrtc->chnreg + CHN_UPDATE, 1);
@@ -165,11 +328,13 @@ static inline void vou_chn_set_update(struct zx_crtc *zcrtc)
 static void zx_crtc_enable(struct drm_crtc *crtc)
 {
        struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+       bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
        struct zx_crtc *zcrtc = to_zx_crtc(crtc);
        struct zx_vou_hw *vou = zcrtc->vou;
        const struct zx_crtc_regs *regs = zcrtc->regs;
        const struct zx_crtc_bits *bits = zcrtc->bits;
        struct videomode vm;
+       u32 scan_mask;
        u32 pol = 0;
        u32 val;
        int ret;
@@ -177,7 +342,7 @@ static void zx_crtc_enable(struct drm_crtc *crtc)
        drm_display_mode_to_videomode(mode, &vm);
 
        /* Set up timing parameters */
-       val = V_ACTIVE(vm.vactive - 1);
+       val = V_ACTIVE((interlaced ? vm.vactive / 2 : vm.vactive) - 1);
        val |= H_ACTIVE(vm.hactive - 1);
        zx_writel(vou->timing + regs->fir_active, val);
 
@@ -191,6 +356,25 @@ static void zx_crtc_enable(struct drm_crtc *crtc)
        val |= FRONT_PORCH(vm.vfront_porch - 1);
        zx_writel(vou->timing + regs->fir_vtiming, val);
 
+       if (interlaced) {
+               u32 shift = bits->sec_vactive_shift;
+               u32 mask = bits->sec_vactive_mask;
+
+               val = zx_readl(vou->timing + SEC_V_ACTIVE);
+               val &= ~mask;
+               val |= ((vm.vactive / 2 - 1) << shift) & mask;
+               zx_writel(vou->timing + SEC_V_ACTIVE, val);
+
+               val = SYNC_WIDE(vm.vsync_len - 1);
+               /*
+                * The vback_porch for the second field needs to shift one on
+                * the value for the first field.
+                */
+               val |= BACK_PORCH(vm.vback_porch);
+               val |= FRONT_PORCH(vm.vfront_porch - 1);
+               zx_writel(vou->timing + regs->sec_vtiming, val);
+       }
+
        /* Set up polarities */
        if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW)
                pol |= 1 << POL_VSYNC_SHIFT;
@@ -201,9 +385,17 @@ static void zx_crtc_enable(struct drm_crtc *crtc)
                       pol << bits->polarity_shift);
 
        /* Setup SHIFT register by following what ZTE BSP does */
-       zx_writel(vou->timing + regs->timing_shift, H_SHIFT_VAL);
+       val = H_SHIFT_VAL;
+       if (interlaced)
+               val |= V_SHIFT_VAL << 16;
+       zx_writel(vou->timing + regs->timing_shift, val);
        zx_writel(vou->timing + regs->timing_pi_shift, H_PI_SHIFT_VAL);
 
+       /* Progressive or interlace scan select */
+       scan_mask = bits->interlace_select | bits->pi_enable;
+       zx_writel_mask(vou->timing + SCAN_CTRL, scan_mask,
+                      interlaced ? scan_mask : 0);
+
        /* Enable TIMING_CTRL */
        zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable,
                       bits->tc_enable);
@@ -214,16 +406,16 @@ static void zx_crtc_enable(struct drm_crtc *crtc)
        zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_H_MASK,
                       vm.vactive << CHN_SCREEN_H_SHIFT);
 
+       /* Configure channel interlace buffer control */
+       zx_writel_mask(zcrtc->chnreg + CHN_INTERLACE_BUF_CTRL, CHN_INTERLACE_EN,
+                      interlaced ? CHN_INTERLACE_EN : 0);
+
        /* Update channel */
        vou_chn_set_update(zcrtc);
 
        /* Enable channel */
        zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, CHN_ENABLE);
 
-       /* Enable Graphic Layer */
-       zx_writel_mask(vou->osd + OSD_CTRL0, bits->gl_enable,
-                      bits->gl_enable);
-
        drm_crtc_vblank_on(crtc);
 
        ret = clk_set_rate(zcrtc->pixclk, mode->clock * 1000);
@@ -247,9 +439,6 @@ static void zx_crtc_disable(struct drm_crtc *crtc)
 
        drm_crtc_vblank_off(crtc);
 
-       /* Disable Graphic Layer */
-       zx_writel_mask(vou->osd + OSD_CTRL0, bits->gl_enable, 0);
-
        /* Disable channel */
        zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, 0);
 
@@ -294,7 +483,7 @@ static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou,
                        enum vou_chn_type chn_type)
 {
        struct device *dev = vou->dev;
-       struct zx_layer_data data;
+       struct zx_plane *zplane;
        struct zx_crtc *zcrtc;
        int ret;
 
@@ -305,19 +494,27 @@ static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou,
        zcrtc->vou = vou;
        zcrtc->chn_type = chn_type;
 
+       zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
+       if (!zplane)
+               return -ENOMEM;
+
+       zplane->dev = dev;
+
        if (chn_type == VOU_CHN_MAIN) {
-               data.layer = vou->osd + MAIN_GL_OFFSET;
-               data.csc = vou->osd + MAIN_CSC_OFFSET;
-               data.hbsc = vou->osd + MAIN_HBSC_OFFSET;
-               data.rsz = vou->otfppu + MAIN_RSZ_OFFSET;
+               zplane->layer = vou->osd + MAIN_GL_OFFSET;
+               zplane->csc = vou->osd + MAIN_CSC_OFFSET;
+               zplane->hbsc = vou->osd + MAIN_HBSC_OFFSET;
+               zplane->rsz = vou->otfppu + MAIN_RSZ_OFFSET;
+               zplane->bits = &zx_gl_bits[0];
                zcrtc->chnreg = vou->osd + OSD_MAIN_CHN;
                zcrtc->regs = &main_crtc_regs;
                zcrtc->bits = &main_crtc_bits;
        } else {
-               data.layer = vou->osd + AUX_GL_OFFSET;
-               data.csc = vou->osd + AUX_CSC_OFFSET;
-               data.hbsc = vou->osd + AUX_HBSC_OFFSET;
-               data.rsz = vou->otfppu + AUX_RSZ_OFFSET;
+               zplane->layer = vou->osd + AUX_GL_OFFSET;
+               zplane->csc = vou->osd + AUX_CSC_OFFSET;
+               zplane->hbsc = vou->osd + AUX_HBSC_OFFSET;
+               zplane->rsz = vou->otfppu + AUX_RSZ_OFFSET;
+               zplane->bits = &zx_gl_bits[1];
                zcrtc->chnreg = vou->osd + OSD_AUX_CHN;
                zcrtc->regs = &aux_crtc_regs;
                zcrtc->bits = &aux_crtc_bits;
@@ -331,13 +528,14 @@ static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou,
                return ret;
        }
 
-       zcrtc->primary = zx_plane_init(drm, dev, &data, DRM_PLANE_TYPE_PRIMARY);
-       if (IS_ERR(zcrtc->primary)) {
-               ret = PTR_ERR(zcrtc->primary);
+       ret = zx_plane_init(drm, zplane, DRM_PLANE_TYPE_PRIMARY);
+       if (ret) {
                DRM_DEV_ERROR(dev, "failed to init primary plane: %d\n", ret);
                return ret;
        }
 
+       zcrtc->primary = &zplane->plane;
+
        ret = drm_crtc_init_with_planes(drm, &zcrtc->crtc, zcrtc->primary, NULL,
                                        &zx_crtc_funcs, NULL);
        if (ret) {
@@ -393,6 +591,78 @@ void zx_vou_disable_vblank(struct drm_device *drm, unsigned int pipe)
                       zcrtc->bits->int_frame_mask, 0);
 }
 
+void zx_vou_layer_enable(struct drm_plane *plane)
+{
+       struct zx_crtc *zcrtc = to_zx_crtc(plane->state->crtc);
+       struct zx_vou_hw *vou = zcrtc->vou;
+       struct zx_plane *zplane = to_zx_plane(plane);
+       const struct vou_layer_bits *bits = zplane->bits;
+
+       if (zcrtc->chn_type == VOU_CHN_MAIN) {
+               zx_writel_mask(vou->osd + OSD_CTRL0, bits->chnsel, 0);
+               zx_writel_mask(vou->vouctl + VOU_CLK_SEL, bits->clksel, 0);
+       } else {
+               zx_writel_mask(vou->osd + OSD_CTRL0, bits->chnsel,
+                              bits->chnsel);
+               zx_writel_mask(vou->vouctl + VOU_CLK_SEL, bits->clksel,
+                              bits->clksel);
+       }
+
+       zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, bits->enable);
+}
+
+void zx_vou_layer_disable(struct drm_plane *plane)
+{
+       struct zx_crtc *zcrtc = to_zx_crtc(plane->crtc);
+       struct zx_vou_hw *vou = zcrtc->vou;
+       struct zx_plane *zplane = to_zx_plane(plane);
+       const struct vou_layer_bits *bits = zplane->bits;
+
+       zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, 0);
+}
+
+static void zx_overlay_init(struct drm_device *drm, struct zx_vou_hw *vou)
+{
+       struct device *dev = vou->dev;
+       struct zx_plane *zplane;
+       int i;
+       int ret;
+
+       /*
+        * VL0 has some quirks on scaling support which need special handling.
+        * Let's leave it out for now.
+        */
+       for (i = 1; i < VL_NUM; i++) {
+               zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
+               if (!zplane) {
+                       DRM_DEV_ERROR(dev, "failed to allocate zplane %d\n", i);
+                       return;
+               }
+
+               zplane->layer = vou->osd + OSD_VL_OFFSET(i);
+               zplane->hbsc = vou->osd + HBSC_VL_OFFSET(i);
+               zplane->rsz = vou->otfppu + RSZ_VL_OFFSET(i);
+               zplane->bits = &zx_vl_bits[i];
+
+               ret = zx_plane_init(drm, zplane, DRM_PLANE_TYPE_OVERLAY);
+               if (ret) {
+                       DRM_DEV_ERROR(dev, "failed to init overlay %d\n", i);
+                       continue;
+               }
+       }
+}
+
+static inline void zx_osd_int_update(struct zx_crtc *zcrtc)
+{
+       struct drm_crtc *crtc = &zcrtc->crtc;
+       struct drm_plane *plane;
+
+       vou_chn_set_update(zcrtc);
+
+       drm_for_each_plane_mask(plane, crtc->dev, crtc->state->plane_mask)
+               zx_plane_set_update(plane);
+}
+
 static irqreturn_t vou_irq_handler(int irq, void *dev_id)
 {
        struct zx_vou_hw *vou = dev_id;
@@ -412,15 +682,11 @@ static irqreturn_t vou_irq_handler(int irq, void *dev_id)
        state = zx_readl(vou->osd + OSD_INT_STA);
        zx_writel(vou->osd + OSD_INT_CLRSTA, state);
 
-       if (state & OSD_INT_MAIN_UPT) {
-               vou_chn_set_update(vou->main_crtc);
-               zx_plane_set_update(vou->main_crtc->primary);
-       }
+       if (state & OSD_INT_MAIN_UPT)
+               zx_osd_int_update(vou->main_crtc);
 
-       if (state & OSD_INT_AUX_UPT) {
-               vou_chn_set_update(vou->aux_crtc);
-               zx_plane_set_update(vou->aux_crtc->primary);
-       }
+       if (state & OSD_INT_AUX_UPT)
+               zx_osd_int_update(vou->aux_crtc);
 
        if (state & OSD_INT_ERROR)
                DRM_DEV_ERROR(vou->dev, "OSD ERROR: 0x%08x!\n", state);
@@ -451,19 +717,9 @@ static void vou_dtrc_init(struct zx_vou_hw *vou)
 
 static void vou_hw_init(struct zx_vou_hw *vou)
 {
-       /* Set GL0 to main channel and GL1 to aux channel */
-       zx_writel_mask(vou->osd + OSD_CTRL0, OSD_CTRL0_GL0_SEL, 0);
-       zx_writel_mask(vou->osd + OSD_CTRL0, OSD_CTRL0_GL1_SEL,
-                      OSD_CTRL0_GL1_SEL);
-
        /* Release reset for all VOU modules */
        zx_writel(vou->vouctl + VOU_SOFT_RST, ~0);
 
-       /* Select main clock for GL0 and aux clock for GL1 module */
-       zx_writel_mask(vou->vouctl + VOU_CLK_SEL, VOU_CLK_GL0_SEL, 0);
-       zx_writel_mask(vou->vouctl + VOU_CLK_SEL, VOU_CLK_GL1_SEL,
-                      VOU_CLK_GL1_SEL);
-
        /* Enable clock auto-gating for all VOU modules */
        zx_writel(vou->vouctl + VOU_CLK_REQEN, ~0);
 
@@ -600,6 +856,8 @@ static int zx_crtc_bind(struct device *dev, struct device *master, void *data)
                goto disable_ppu_clk;
        }
 
+       zx_overlay_init(drm, vou);
+
        return 0;
 
 disable_ppu_clk:
index 349e06cd86f48517bdbe94af3a788acd1d1a5e9c..57e3c31ee6a5ab6aed1eff7bdb89d2d6897eb0a7 100644 (file)
@@ -23,24 +23,48 @@ enum vou_inf_id {
        VOU_VGA         = 5,
 };
 
-enum vou_inf_data_sel {
-       VOU_YUV444      = 0,
-       VOU_RGB_101010  = 1,
-       VOU_RGB_888     = 2,
-       VOU_RGB_666     = 3,
+enum vou_inf_hdmi_audio {
+       VOU_HDMI_AUD_SPDIF      = BIT(0),
+       VOU_HDMI_AUD_I2S        = BIT(1),
+       VOU_HDMI_AUD_DSD        = BIT(2),
+       VOU_HDMI_AUD_HBR        = BIT(3),
+       VOU_HDMI_AUD_PARALLEL   = BIT(4),
 };
 
-struct vou_inf {
-       enum vou_inf_id id;
-       enum vou_inf_data_sel data_sel;
-       u32 clocks_en_bits;
-       u32 clocks_sel_bits;
+void vou_inf_hdmi_audio_sel(struct drm_crtc *crtc,
+                           enum vou_inf_hdmi_audio aud);
+void vou_inf_enable(enum vou_inf_id id, struct drm_crtc *crtc);
+void vou_inf_disable(enum vou_inf_id id, struct drm_crtc *crtc);
+
+enum vou_div_id {
+       VOU_DIV_VGA,
+       VOU_DIV_PIC,
+       VOU_DIV_TVENC,
+       VOU_DIV_HDMI_PNX,
+       VOU_DIV_HDMI,
+       VOU_DIV_INF,
+       VOU_DIV_LAYER,
+};
+
+enum vou_div_val {
+       VOU_DIV_1 = 0,
+       VOU_DIV_2 = 1,
+       VOU_DIV_4 = 3,
+       VOU_DIV_8 = 7,
 };
 
-void vou_inf_enable(const struct vou_inf *inf, struct drm_crtc *crtc);
-void vou_inf_disable(const struct vou_inf *inf, struct drm_crtc *crtc);
+struct vou_div_config {
+       enum vou_div_id id;
+       enum vou_div_val val;
+};
+
+void zx_vou_config_dividers(struct drm_crtc *crtc,
+                           struct vou_div_config *configs, int num);
 
 int zx_vou_enable_vblank(struct drm_device *drm, unsigned int pipe);
 void zx_vou_disable_vblank(struct drm_device *drm, unsigned int pipe);
 
+void zx_vou_layer_enable(struct drm_plane *plane);
+void zx_vou_layer_disable(struct drm_plane *plane);
+
 #endif /* __ZX_VOU_H__ */
index f44e7a4ae4417b58ea89b56c9d5ab3a27443c803..c066ef123434ee74f25fffe35ca63e114d6f1e95 100644 (file)
 #define AUX_HBSC_OFFSET                        0x860
 #define AUX_RSZ_OFFSET                 0x800
 
+#define OSD_VL0_OFFSET                 0x040
+#define OSD_VL_OFFSET(i)               (OSD_VL0_OFFSET + 0x050 * (i))
+
+#define HBSC_VL0_OFFSET                        0x760
+#define HBSC_VL_OFFSET(i)              (HBSC_VL0_OFFSET + 0x040 * (i))
+
+#define RSZ_VL1_U0                     0xa00
+#define RSZ_VL_OFFSET(i)               (RSZ_VL1_U0 + 0x200 * (i))
+
 /* OSD (GPC_GLOBAL) registers */
 #define OSD_INT_STA                    0x04
 #define OSD_INT_CLRSTA                 0x08
 )
 #define OSD_INT_ENABLE (OSD_INT_ERROR | OSD_INT_AUX_UPT | OSD_INT_MAIN_UPT)
 #define OSD_CTRL0                      0x10
+#define OSD_CTRL0_VL0_EN               BIT(13)
+#define OSD_CTRL0_VL0_SEL              BIT(12)
+#define OSD_CTRL0_VL1_EN               BIT(11)
+#define OSD_CTRL0_VL1_SEL              BIT(10)
+#define OSD_CTRL0_VL2_EN               BIT(9)
+#define OSD_CTRL0_VL2_SEL              BIT(8)
 #define OSD_CTRL0_GL0_EN               BIT(7)
 #define OSD_CTRL0_GL0_SEL              BIT(6)
 #define OSD_CTRL0_GL1_EN               BIT(5)
@@ -60,6 +75,8 @@
 #define CHN_SCREEN_H_SHIFT             5
 #define CHN_SCREEN_H_MASK              (0x1fff << CHN_SCREEN_H_SHIFT)
 #define CHN_UPDATE                     0x08
+#define CHN_INTERLACE_BUF_CTRL         0x24
+#define CHN_INTERLACE_EN               BIT(2)
 
 /* TIMING_CTRL registers */
 #define TIMING_TC_ENABLE               0x04
 #define TIMING_MAIN_SHIFT              0x2c
 #define TIMING_AUX_SHIFT               0x30
 #define H_SHIFT_VAL                    0x0048
+#define V_SHIFT_VAL                    0x0001
+#define SCAN_CTRL                      0x34
+#define AUX_PI_EN                      BIT(19)
+#define MAIN_PI_EN                     BIT(18)
+#define AUX_INTERLACE_SEL              BIT(1)
+#define MAIN_INTERLACE_SEL             BIT(0)
+#define SEC_V_ACTIVE                   0x38
+#define SEC_VACT_MAIN_SHIFT            0
+#define SEC_VACT_MAIN_MASK             (0xffff << SEC_VACT_MAIN_SHIFT)
+#define SEC_VACT_AUX_SHIFT             16
+#define SEC_VACT_AUX_MASK              (0xffff << SEC_VACT_AUX_SHIFT)
+#define SEC_MAIN_V_TIMING              0x3c
+#define SEC_AUX_V_TIMING               0x40
 #define TIMING_MAIN_PI_SHIFT           0x68
 #define TIMING_AUX_PI_SHIFT            0x6c
 #define H_PI_SHIFT_VAL                 0x000f
 #define VOU_INF_DATA_SEL               0x08
 #define VOU_SOFT_RST                   0x14
 #define VOU_CLK_SEL                    0x18
+#define VGA_AUX_DIV_SHIFT              29
+#define VGA_MAIN_DIV_SHIFT             26
+#define PIC_MAIN_DIV_SHIFT             23
+#define PIC_AUX_DIV_SHIFT              20
+#define VOU_CLK_VL2_SEL                        BIT(8)
+#define VOU_CLK_VL1_SEL                        BIT(7)
+#define VOU_CLK_VL0_SEL                        BIT(6)
 #define VOU_CLK_GL1_SEL                        BIT(5)
 #define VOU_CLK_GL0_SEL                        BIT(4)
+#define VOU_DIV_PARA                   0x1c
+#define DIV_PARA_UPDATE                        BIT(31)
+#define TVENC_AUX_DIV_SHIFT            28
+#define HDMI_AUX_PNX_DIV_SHIFT         25
+#define HDMI_MAIN_PNX_DIV_SHIFT                22
+#define HDMI_AUX_DIV_SHIFT             19
+#define HDMI_MAIN_DIV_SHIFT            16
+#define TVENC_MAIN_DIV_SHIFT           13
+#define INF_AUX_DIV_SHIFT              9
+#define INF_MAIN_DIV_SHIFT             6
+#define LAYER_AUX_DIV_SHIFT            3
+#define LAYER_MAIN_DIV_SHIFT           0
 #define VOU_CLK_REQEN                  0x20
 #define VOU_CLK_EN                     0x24
+#define VOU_INF_HDMI_CTRL              0x30
+#define VOU_HDMI_AUD_MASK              0x1f
 
 /* OTFPPU_CTRL registers */
 #define OTFPPU_RSZ_DATA_SOURCE         0x04
index cff060b56da9d6cb8cb138657cf4efca7debef61..ea36b557d5eea87b27171937fe9bc05f680b15a8 100644 (file)
@@ -2496,6 +2496,7 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
 #if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
index 717704e9ae07bd97d64f38321d2d49f07de62b83..c0303f61c26a94f1998f6883d42a0fc8cb41f432 100644 (file)
@@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev)
        struct usb_interface *usbif = to_usb_interface(dev->parent);
        struct usb_device *usbdev = interface_to_usbdev(usbif);
        int brightness;
-       char data[8];
+       char *data;
+
+       data = kmalloc(8, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
 
        ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
                              K90_REQUEST_STATUS,
                              USB_DIR_IN | USB_TYPE_VENDOR |
                              USB_RECIP_DEVICE, 0, 0, data, 8,
                              USB_CTRL_SET_TIMEOUT);
-       if (ret < 0) {
+       if (ret < 5) {
                dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
                         ret);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
        brightness = data[4];
        if (brightness < 0 || brightness > 3) {
                dev_warn(dev,
                         "Read invalid backlight brightness: %02hhx.\n",
                         data[4]);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
-       return brightness;
+       ret = brightness;
+out:
+       kfree(data);
+
+       return ret;
 }
 
 static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev)
@@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev,
        struct usb_interface *usbif = to_usb_interface(dev->parent);
        struct usb_device *usbdev = interface_to_usbdev(usbif);
        const char *macro_mode;
-       char data[8];
+       char *data;
+
+       data = kmalloc(2, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
 
        ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
                              K90_REQUEST_GET_MODE,
                              USB_DIR_IN | USB_TYPE_VENDOR |
                              USB_RECIP_DEVICE, 0, 0, data, 2,
                              USB_CTRL_SET_TIMEOUT);
-       if (ret < 0) {
+       if (ret < 1) {
                dev_warn(dev, "Failed to get K90 initial mode (error %d).\n",
                         ret);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
 
        switch (data[0]) {
@@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev,
        default:
                dev_warn(dev, "K90 in unknown mode: %02hhx.\n",
                         data[0]);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
+       ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
+out:
+       kfree(data);
+
+       return ret;
 }
 
 static ssize_t k90_store_macro_mode(struct device *dev,
@@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev,
        struct usb_interface *usbif = to_usb_interface(dev->parent);
        struct usb_device *usbdev = interface_to_usbdev(usbif);
        int current_profile;
-       char data[8];
+       char *data;
+
+       data = kmalloc(8, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
 
        ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
                              K90_REQUEST_STATUS,
                              USB_DIR_IN | USB_TYPE_VENDOR |
                              USB_RECIP_DEVICE, 0, 0, data, 8,
                              USB_CTRL_SET_TIMEOUT);
-       if (ret < 0) {
+       if (ret < 8) {
                dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
                         ret);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
        current_profile = data[7];
        if (current_profile < 1 || current_profile > 3) {
                dev_warn(dev, "Read invalid current profile: %02hhx.\n",
                         data[7]);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
+       ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
+out:
+       kfree(data);
+
+       return ret;
 }
 
 static ssize_t k90_store_current_profile(struct device *dev,
index 1b764d1745f3daa693a17ee706c302ff31ae0f0e..1689568b597d4e5bb8824ccbe679f627525bf2a4 100644 (file)
@@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
        if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
                return rdesc;
 
+       if (*rsize < 4)
+               return rdesc;
+
        for (i = 0; i < *rsize - 4; i++)
                if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) {
                        rdesc[i] = 0x19;
index 54bd22dc14110c308744f28f01a7ab4cff79ff95..f46f2c5117fae76a1c87105363e5c8db4c8673a3 100644 (file)
 #define USB_VENDOR_ID_PETALYNX         0x18b1
 #define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE   0x0037
 
+#define USB_VENDOR_ID_PETZL            0x2122
+#define USB_DEVICE_ID_PETZL_HEADLAMP   0x1234
+
 #define USB_VENDOR_ID_PHILIPS          0x0471
 #define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617
 
index 78fb32a7b103446136000c8ee9ac64cf7fba7054..ea3c3546cef7f81f0f1c82b996c3ed5ab9fb26ed 100644 (file)
@@ -426,6 +426,15 @@ static int i2c_hid_hwreset(struct i2c_client *client)
        if (ret)
                goto out_unlock;
 
+       /*
+        * The HID over I2C specification states that if a DEVICE needs time
+        * after the PWR_ON request, it should utilise CLOCK stretching.
+        * However, it has been observered that the Windows driver provides a
+        * 1ms sleep between the PWR_ON and RESET requests and that some devices
+        * rely on this.
+        */
+       usleep_range(1000, 5000);
+
        i2c_hid_dbg(ihid, "resetting...\n");
 
        ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
index b9779bcbd1403f00114f9565c543df79583baa38..8aeca038cc7331244eeeb5dc0468b22ef66e7d44 100644 (file)
@@ -740,6 +740,11 @@ static int wacom_add_shared_data(struct hid_device *hdev)
                return retval;
        }
 
+       if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
+               wacom_wac->shared->touch = hdev;
+       else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
+               wacom_wac->shared->pen = hdev;
+
 out:
        mutex_unlock(&wacom_udev_list_lock);
        return retval;
@@ -2036,10 +2041,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
        if (error)
                goto fail;
 
-       error = wacom_add_shared_data(hdev);
-       if (error)
-               goto fail;
-
        /*
         * Bamboo Pad has a generic hid handling for the Pen, and we switch it
         * into debug mode for the touch part.
@@ -2080,10 +2081,9 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
 
        wacom_update_name(wacom, wireless ? " (WL)" : "");
 
-       if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
-               wacom_wac->shared->touch = hdev;
-       else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
-               wacom_wac->shared->pen = hdev;
+       error = wacom_add_shared_data(hdev);
+       if (error)
+               goto fail;
 
        if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
             (features->quirks & WACOM_QUIRK_BATTERY)) {
index b1a9a3ca6d564c72d3f445e663b196af87743ed1..0884dc9554fdf632e684aa3689292368d5fb7e3b 100644 (file)
@@ -2187,6 +2187,16 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
 
        wacom_report_events(hdev, report);
 
+       /*
+        * Non-input reports may be sent prior to the device being
+        * completely initialized. Since only their events need
+        * to be processed, exit after 'wacom_report_events' has
+        * been called to prevent potential crashes in the report-
+        * processing functions.
+        */
+       if (report->type != HID_INPUT_REPORT)
+               return;
+
        if (WACOM_PAD_FIELD(field)) {
                wacom_wac_pad_battery_report(hdev, report);
                if (wacom->wacom_wac.pad_input)
index c2268cdf38e82348ae883c99d1f140a7fc2d0ecc..e34d82e79b988a781010cad1e0f283617dfb8471 100644 (file)
@@ -585,10 +585,29 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
                 u8 command, int size, union i2c_smbus_data *data)
 {
        struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap);
+       unsigned short piix4_smba = adapdata->smba;
+       int retries = MAX_TIMEOUT;
+       int smbslvcnt;
        u8 smba_en_lo;
        u8 port;
        int retval;
 
+       /* Request the SMBUS semaphore, avoid conflicts with the IMC */
+       smbslvcnt  = inb_p(SMBSLVCNT);
+       do {
+               outb_p(smbslvcnt | 0x10, SMBSLVCNT);
+
+               /* Check the semaphore status */
+               smbslvcnt  = inb_p(SMBSLVCNT);
+               if (smbslvcnt & 0x10)
+                       break;
+
+               usleep_range(1000, 2000);
+       } while (--retries);
+       /* SMBus is still owned by the IMC, we give up */
+       if (!retries)
+               return -EBUSY;
+
        mutex_lock(&piix4_mutex_sb800);
 
        outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
@@ -606,6 +625,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
 
        mutex_unlock(&piix4_mutex_sb800);
 
+       /* Release the semaphore */
+       outb_p(smbslvcnt | 0x20, SMBSLVCNT);
+
        return retval;
 }
 
index cf9e396d7702c6c769e2106a6938f90c6ddbb7a9..583e95042a21d86fe235d2e9784a1f6a6723205b 100644 (file)
@@ -931,7 +931,10 @@ static int i2c_device_probe(struct device *dev)
        if (!client->irq) {
                int irq = -ENOENT;
 
-               if (dev->of_node) {
+               if (client->flags & I2C_CLIENT_HOST_NOTIFY) {
+                       dev_dbg(dev, "Using Host Notify IRQ\n");
+                       irq = i2c_smbus_host_notify_to_irq(client);
+               } else if (dev->of_node) {
                        irq = of_irq_get_byname(dev->of_node, "irq");
                        if (irq == -EINVAL || irq == -ENODATA)
                                irq = of_irq_get(dev->of_node, 0);
@@ -940,14 +943,7 @@ static int i2c_device_probe(struct device *dev)
                }
                if (irq == -EPROBE_DEFER)
                        return irq;
-               /*
-                * ACPI and OF did not find any useful IRQ, try to see
-                * if Host Notify can be used.
-                */
-               if (irq < 0) {
-                       dev_dbg(dev, "Using Host Notify IRQ\n");
-                       irq = i2c_smbus_host_notify_to_irq(client);
-               }
+
                if (irq < 0)
                        irq = 0;
 
@@ -1708,7 +1704,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
 
        if (i2c_check_addr_validity(addr, info.flags)) {
                dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
-                       info.addr, node->full_name);
+                       addr, node->full_name);
                return ERR_PTR(-EINVAL);
        }
 
@@ -1716,6 +1712,9 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
        info.of_node = of_node_get(node);
        info.archdata = &dev_ad;
 
+       if (of_property_read_bool(node, "host-notify"))
+               info.flags |= I2C_CLIENT_HOST_NOTIFY;
+
        if (of_get_property(node, "wakeup-source", NULL))
                info.flags |= I2C_CLIENT_WAKE;
 
@@ -3633,7 +3632,7 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
        int ret;
 
        if (!client || !slave_cb) {
-               WARN(1, "insufficent data\n");
+               WARN(1, "insufficient data\n");
                return -EINVAL;
        }
 
index 66f323fd39826ebe9a738d55724ee2e124b45419..6f638bbc922db4fd366e1e3dcdd11c8655215622 100644 (file)
@@ -331,7 +331,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
                unsigned long arg)
 {
        struct i2c_smbus_ioctl_data data_arg;
-       union i2c_smbus_data temp;
+       union i2c_smbus_data temp = {};
        int datasize, res;
 
        if (copy_from_user(&data_arg,
index f3135ae22df429c743c771c40c29d049869091f5..abd18f31b24f68e27c28f1120e52aa96b9347887 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
-#include <linux/miscdevice.h>
 #include <linux/module.h>
 #include <linux/poll.h>
 #include <linux/init.h>
index 6d949965867190458abaa653636ed4ab6702a4aa..c7d5b2b643d10798eb7da917f16888a2cff2bc3b 100644 (file)
@@ -1377,6 +1377,12 @@ static int xpad_init_input(struct usb_xpad *xpad)
        input_dev->name = xpad->name;
        input_dev->phys = xpad->phys;
        usb_to_input_id(xpad->udev, &input_dev->id);
+
+       if (xpad->xtype == XTYPE_XBOX360W) {
+               /* x360w controllers and the receiver have different ids */
+               input_dev->id.product = 0x02a1;
+       }
+
        input_dev->dev.parent = &xpad->intf->dev;
 
        input_set_drvdata(input_dev, xpad);
index a8b0a2eec344e863bd292251095a4e84f3a0b98b..7fed92fb8cc137c28e9e54a08d5bea4f1233ba08 100644 (file)
@@ -136,7 +136,6 @@ static const struct i2c_device_id adxl34x_id[] = {
 
 MODULE_DEVICE_TABLE(i2c, adxl34x_id);
 
-#ifdef CONFIG_OF
 static const struct of_device_id adxl34x_of_id[] = {
        /*
         * The ADXL346 is backward-compatible with the ADXL345. Differences are
@@ -153,13 +152,12 @@ static const struct of_device_id adxl34x_of_id[] = {
 };
 
 MODULE_DEVICE_TABLE(of, adxl34x_of_id);
-#endif
 
 static struct i2c_driver adxl34x_driver = {
        .driver = {
                .name = "adxl34x",
                .pm = &adxl34x_i2c_pm,
-               .of_match_table = of_match_ptr(adxl34x_of_id),
+               .of_match_table = adxl34x_of_id,
        },
        .probe    = adxl34x_i2c_probe,
        .remove   = adxl34x_i2c_remove,
index cde6f4bd8ea2e3f0f70fbccf3f684e5cd079ec04..6d279aa27cb9a10d70a2e732fe9599297883cbc0 100644 (file)
@@ -114,7 +114,7 @@ enum SS4_PACKET_ID {
                                 (_b[1] & 0x7F)         \
                                )
 
-#define SS4_TS_Y_V2(_b)                (s8)(                           \
+#define SS4_TS_Y_V2(_b)                -(s8)(                          \
                                 ((_b[3] & 0x01) << 7) |        \
                                 (_b[2] & 0x7F)         \
                                )
index aa7c5da608005cfb471279b484bcc46f39fe3c82..cb2bf203f4cabac152d8fedd13fe9ec55826f136 100644 (file)
@@ -29,7 +29,7 @@
  * after soft reset, we should wait for 1 ms
  * before the device becomes operational
  */
-#define SOFT_RESET_DELAY_MS    3
+#define SOFT_RESET_DELAY_US    3000
 /* and after hard reset, we should wait for max 500ms */
 #define HARD_RESET_DELAY_MS    500
 
@@ -311,7 +311,7 @@ static int synaptics_i2c_reset_config(struct i2c_client *client)
        if (ret) {
                dev_err(&client->dev, "Unable to reset device\n");
        } else {
-               msleep(SOFT_RESET_DELAY_MS);
+               usleep_range(SOFT_RESET_DELAY_US, SOFT_RESET_DELAY_US + 100);
                ret = synaptics_i2c_config(client);
                if (ret)
                        dev_err(&client->dev, "Unable to config device\n");
index 30cc627a4f4531ff93014ea94bf9835b0b37e8bc..8993983e3fe4892b748ff5d77c1597880c63e2c9 100644 (file)
@@ -41,7 +41,8 @@ config RMI4_SMB
 
 config RMI4_F03
         bool "RMI4 Function 03 (PS2 Guest)"
-        depends on RMI4_CORE && SERIO
+       depends on RMI4_CORE
+       depends on SERIO=y || RMI4_CORE=SERIO
         help
           Say Y here if you want to add support for RMI4 function 03.
 
index 77551f52220275c0589419ced9f7037cc11062b0..a7618776705ab929e42d4c3e870b4911f1a30d9c 100644 (file)
@@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
                },
        },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
+               },
+       },
        { }
 };
 
index 02aec284decac37b1a93b16b609007c077db7f98..3e6003d32e565c748a43730574b9424eeb294d33 100644 (file)
@@ -914,9 +914,9 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
 
                case QUEUE_HEADER_NORMAL:
                        report_count = ts->buf[FW_HDR_COUNT];
-                       if (report_count > 3) {
+                       if (report_count == 0 || report_count > 3) {
                                dev_err(&client->dev,
-                                       "too large report count: %*ph\n",
+                                       "bad report count: %*ph\n",
                                        HEADER_SIZE, ts->buf);
                                break;
                        }
index e38936d05df18fa20564c0ed7e8093e3b377fd81..2a514036a83dc0da07c0966b7fe247c18356bbbf 100644 (file)
@@ -212,6 +212,7 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
                                int is_new);
 struct md_cluster_info;
 
+/* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */
 enum mddev_flags {
        MD_ARRAY_FIRST_USE,     /* First use of array, needs initialization */
        MD_CLOSING,             /* If set, we are closing the array, do not open
@@ -702,4 +703,11 @@ static inline int mddev_is_clustered(struct mddev *mddev)
 {
        return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
 }
+
+/* clear unsupported mddev_flags */
+static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
+       unsigned long unsupported_flags)
+{
+       mddev->flags &= ~unsupported_flags;
+}
 #endif /* _MD_MD_H */
index a162fedeb51a48ce31d18fbca5a51a0fe9204bf4..848365d474f3a3bca26168ac3619f2c52edd7618 100644 (file)
 #include "raid0.h"
 #include "raid5.h"
 
+#define UNSUPPORTED_MDDEV_FLAGS                \
+       ((1L << MD_HAS_JOURNAL) |       \
+        (1L << MD_JOURNAL_CLEAN) |     \
+        (1L << MD_FAILFAST_SUPPORTED))
+
 static int raid0_congested(struct mddev *mddev, int bits)
 {
        struct r0conf *conf = mddev->private;
@@ -539,8 +544,7 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
        mddev->delta_disks = -1;
        /* make sure it will be not marked as dirty */
        mddev->recovery_cp = MaxSector;
-       clear_bit(MD_HAS_JOURNAL, &mddev->flags);
-       clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+       mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
 
        create_strip_zones(mddev, &priv_conf);
 
@@ -583,7 +587,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
        mddev->degraded = 0;
        /* make sure it will be not marked as dirty */
        mddev->recovery_cp = MaxSector;
-       clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+       mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
 
        create_strip_zones(mddev, &priv_conf);
        return priv_conf;
@@ -626,7 +630,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
        mddev->raid_disks = 1;
        /* make sure it will be not marked as dirty */
        mddev->recovery_cp = MaxSector;
-       clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+       mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
 
        create_strip_zones(mddev, &priv_conf);
        return priv_conf;
index a1f3fbed91009c5aa3253109d82db8eed38a2720..7b0f647bcccb513b6650136cddd51bc0ebec796a 100644 (file)
 #include "raid1.h"
 #include "bitmap.h"
 
+#define UNSUPPORTED_MDDEV_FLAGS                \
+       ((1L << MD_HAS_JOURNAL) |       \
+        (1L << MD_JOURNAL_CLEAN))
+
 /*
  * Number of guaranteed r1bios in case of extreme VM load:
  */
@@ -1066,17 +1070,107 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
        kfree(plug);
 }
 
-static void raid1_make_request(struct mddev *mddev, struct bio * bio)
+static void raid1_read_request(struct mddev *mddev, struct bio *bio,
+                                struct r1bio *r1_bio)
 {
        struct r1conf *conf = mddev->private;
        struct raid1_info *mirror;
-       struct r1bio *r1_bio;
        struct bio *read_bio;
+       struct bitmap *bitmap = mddev->bitmap;
+       const int op = bio_op(bio);
+       const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+       int sectors_handled;
+       int max_sectors;
+       int rdisk;
+
+       wait_barrier(conf, bio);
+
+read_again:
+       rdisk = read_balance(conf, r1_bio, &max_sectors);
+
+       if (rdisk < 0) {
+               /* couldn't find anywhere to read from */
+               raid_end_bio_io(r1_bio);
+               return;
+       }
+       mirror = conf->mirrors + rdisk;
+
+       if (test_bit(WriteMostly, &mirror->rdev->flags) &&
+           bitmap) {
+               /*
+                * Reading from a write-mostly device must take care not to
+                * over-take any writes that are 'behind'
+                */
+               raid1_log(mddev, "wait behind writes");
+               wait_event(bitmap->behind_wait,
+                          atomic_read(&bitmap->behind_writes) == 0);
+       }
+       r1_bio->read_disk = rdisk;
+       r1_bio->start_next_window = 0;
+
+       read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+       bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
+                max_sectors);
+
+       r1_bio->bios[rdisk] = read_bio;
+
+       read_bio->bi_iter.bi_sector = r1_bio->sector +
+               mirror->rdev->data_offset;
+       read_bio->bi_bdev = mirror->rdev->bdev;
+       read_bio->bi_end_io = raid1_end_read_request;
+       bio_set_op_attrs(read_bio, op, do_sync);
+       if (test_bit(FailFast, &mirror->rdev->flags) &&
+           test_bit(R1BIO_FailFast, &r1_bio->state))
+               read_bio->bi_opf |= MD_FAILFAST;
+       read_bio->bi_private = r1_bio;
+
+       if (mddev->gendisk)
+               trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+                                     read_bio, disk_devt(mddev->gendisk),
+                                     r1_bio->sector);
+
+       if (max_sectors < r1_bio->sectors) {
+               /*
+                * could not read all from this device, so we will need another
+                * r1_bio.
+                */
+               sectors_handled = (r1_bio->sector + max_sectors
+                                  - bio->bi_iter.bi_sector);
+               r1_bio->sectors = max_sectors;
+               spin_lock_irq(&conf->device_lock);
+               if (bio->bi_phys_segments == 0)
+                       bio->bi_phys_segments = 2;
+               else
+                       bio->bi_phys_segments++;
+               spin_unlock_irq(&conf->device_lock);
+
+               /*
+                * Cannot call generic_make_request directly as that will be
+                * queued in __make_request and subsequent mempool_alloc might
+                * block waiting for it.  So hand bio over to raid1d.
+                */
+               reschedule_retry(r1_bio);
+
+               r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+
+               r1_bio->master_bio = bio;
+               r1_bio->sectors = bio_sectors(bio) - sectors_handled;
+               r1_bio->state = 0;
+               r1_bio->mddev = mddev;
+               r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
+               goto read_again;
+       } else
+               generic_make_request(read_bio);
+}
+
+static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+                               struct r1bio *r1_bio)
+{
+       struct r1conf *conf = mddev->private;
        int i, disks;
-       struct bitmap *bitmap;
+       struct bitmap *bitmap = mddev->bitmap;
        unsigned long flags;
        const int op = bio_op(bio);
-       const int rw = bio_data_dir(bio);
        const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
        const unsigned long do_flush_fua = (bio->bi_opf &
                                                (REQ_PREFLUSH | REQ_FUA));
@@ -1096,15 +1190,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
 
        md_write_start(mddev, bio); /* wait on superblock update early */
 
-       if (bio_data_dir(bio) == WRITE &&
-           ((bio_end_sector(bio) > mddev->suspend_lo &&
+       if ((bio_end_sector(bio) > mddev->suspend_lo &&
            bio->bi_iter.bi_sector < mddev->suspend_hi) ||
            (mddev_is_clustered(mddev) &&
             md_cluster_ops->area_resyncing(mddev, WRITE,
-                    bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
-               /* As the suspend_* range is controlled by
-                * userspace, we want an interruptible
-                * wait.
+                    bio->bi_iter.bi_sector, bio_end_sector(bio)))) {
+
+               /*
+                * As the suspend_* range is controlled by userspace, we want
+                * an interruptible wait.
                 */
                DEFINE_WAIT(w);
                for (;;) {
@@ -1115,128 +1209,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
                            bio->bi_iter.bi_sector >= mddev->suspend_hi ||
                            (mddev_is_clustered(mddev) &&
                             !md_cluster_ops->area_resyncing(mddev, WRITE,
-                                    bio->bi_iter.bi_sector, bio_end_sector(bio))))
+                                    bio->bi_iter.bi_sector,
+                                    bio_end_sector(bio))))
                                break;
                        schedule();
                }
                finish_wait(&conf->wait_barrier, &w);
        }
-
        start_next_window = wait_barrier(conf, bio);
 
-       bitmap = mddev->bitmap;
-
-       /*
-        * make_request() can abort the operation when read-ahead is being
-        * used and no empty request is available.
-        *
-        */
-       r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
-
-       r1_bio->master_bio = bio;
-       r1_bio->sectors = bio_sectors(bio);
-       r1_bio->state = 0;
-       r1_bio->mddev = mddev;
-       r1_bio->sector = bio->bi_iter.bi_sector;
-
-       /* We might need to issue multiple reads to different
-        * devices if there are bad blocks around, so we keep
-        * track of the number of reads in bio->bi_phys_segments.
-        * If this is 0, there is only one r1_bio and no locking
-        * will be needed when requests complete.  If it is
-        * non-zero, then it is the number of not-completed requests.
-        */
-       bio->bi_phys_segments = 0;
-       bio_clear_flag(bio, BIO_SEG_VALID);
-
-       if (rw == READ) {
-               /*
-                * read balancing logic:
-                */
-               int rdisk;
-
-read_again:
-               rdisk = read_balance(conf, r1_bio, &max_sectors);
-
-               if (rdisk < 0) {
-                       /* couldn't find anywhere to read from */
-                       raid_end_bio_io(r1_bio);
-                       return;
-               }
-               mirror = conf->mirrors + rdisk;
-
-               if (test_bit(WriteMostly, &mirror->rdev->flags) &&
-                   bitmap) {
-                       /* Reading from a write-mostly device must
-                        * take care not to over-take any writes
-                        * that are 'behind'
-                        */
-                       raid1_log(mddev, "wait behind writes");
-                       wait_event(bitmap->behind_wait,
-                                  atomic_read(&bitmap->behind_writes) == 0);
-               }
-               r1_bio->read_disk = rdisk;
-               r1_bio->start_next_window = 0;
-
-               read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
-                        max_sectors);
-
-               r1_bio->bios[rdisk] = read_bio;
-
-               read_bio->bi_iter.bi_sector = r1_bio->sector +
-                       mirror->rdev->data_offset;
-               read_bio->bi_bdev = mirror->rdev->bdev;
-               read_bio->bi_end_io = raid1_end_read_request;
-               bio_set_op_attrs(read_bio, op, do_sync);
-               if (test_bit(FailFast, &mirror->rdev->flags) &&
-                   test_bit(R1BIO_FailFast, &r1_bio->state))
-                       read_bio->bi_opf |= MD_FAILFAST;
-               read_bio->bi_private = r1_bio;
-
-               if (mddev->gendisk)
-                       trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
-                                             read_bio, disk_devt(mddev->gendisk),
-                                             r1_bio->sector);
-
-               if (max_sectors < r1_bio->sectors) {
-                       /* could not read all from this device, so we will
-                        * need another r1_bio.
-                        */
-
-                       sectors_handled = (r1_bio->sector + max_sectors
-                                          - bio->bi_iter.bi_sector);
-                       r1_bio->sectors = max_sectors;
-                       spin_lock_irq(&conf->device_lock);
-                       if (bio->bi_phys_segments == 0)
-                               bio->bi_phys_segments = 2;
-                       else
-                               bio->bi_phys_segments++;
-                       spin_unlock_irq(&conf->device_lock);
-                       /* Cannot call generic_make_request directly
-                        * as that will be queued in __make_request
-                        * and subsequent mempool_alloc might block waiting
-                        * for it.  So hand bio over to raid1d.
-                        */
-                       reschedule_retry(r1_bio);
-
-                       r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
-
-                       r1_bio->master_bio = bio;
-                       r1_bio->sectors = bio_sectors(bio) - sectors_handled;
-                       r1_bio->state = 0;
-                       r1_bio->mddev = mddev;
-                       r1_bio->sector = bio->bi_iter.bi_sector +
-                               sectors_handled;
-                       goto read_again;
-               } else
-                       generic_make_request(read_bio);
-               return;
-       }
-
-       /*
-        * WRITE:
-        */
        if (conf->pending_count >= max_queued_requests) {
                md_wakeup_thread(mddev->thread);
                raid1_log(mddev, "wait queued");
@@ -1280,8 +1261,7 @@ read_again:
                        int bad_sectors;
                        int is_bad;
 
-                       is_bad = is_badblock(rdev, r1_bio->sector,
-                                            max_sectors,
+                       is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
                                             &first_bad, &bad_sectors);
                        if (is_bad < 0) {
                                /* mustn't write here until the bad block is
@@ -1370,7 +1350,8 @@ read_again:
                        continue;
 
                mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
+               bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector,
+                        max_sectors);
 
                if (first_clone) {
                        /* do behind I/O ?
@@ -1464,6 +1445,40 @@ read_again:
        wake_up(&conf->wait_barrier);
 }
 
+static void raid1_make_request(struct mddev *mddev, struct bio *bio)
+{
+       struct r1conf *conf = mddev->private;
+       struct r1bio *r1_bio;
+
+       /*
+        * make_request() can abort the operation when read-ahead is being
+        * used and no empty request is available.
+        *
+        */
+       r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+
+       r1_bio->master_bio = bio;
+       r1_bio->sectors = bio_sectors(bio);
+       r1_bio->state = 0;
+       r1_bio->mddev = mddev;
+       r1_bio->sector = bio->bi_iter.bi_sector;
+
+       /*
+        * We might need to issue multiple reads to different devices if there
+        * are bad blocks around, so we keep track of the number of reads in
+        * bio->bi_phys_segments.  If this is 0, there is only one r1_bio and
+        * no locking will be needed when requests complete.  If it is
+        * non-zero, then it is the number of not-completed requests.
+        */
+       bio->bi_phys_segments = 0;
+       bio_clear_flag(bio, BIO_SEG_VALID);
+
+       if (bio_data_dir(bio) == READ)
+               raid1_read_request(mddev, bio, r1_bio);
+       else
+               raid1_write_request(mddev, bio, r1_bio);
+}
+
 static void raid1_status(struct seq_file *seq, struct mddev *mddev)
 {
        struct r1conf *conf = mddev->private;
@@ -3246,8 +3261,8 @@ static void *raid1_takeover(struct mddev *mddev)
                if (!IS_ERR(conf)) {
                        /* Array must appear to be quiesced */
                        conf->array_frozen = 1;
-                       clear_bit(MD_HAS_JOURNAL, &mddev->flags);
-                       clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+                       mddev_clear_unsupported_flags(mddev,
+                               UNSUPPORTED_MDDEV_FLAGS);
                }
                return conf;
        }
index ab5e86209322fc0d0ed3e2db18a468c6f5054f13..1920756828dfb3fd512e2cb5705f329f10324698 100644 (file)
@@ -1087,23 +1087,122 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
        kfree(plug);
 }
 
-static void __make_request(struct mddev *mddev, struct bio *bio)
+static void raid10_read_request(struct mddev *mddev, struct bio *bio,
+                               struct r10bio *r10_bio)
 {
        struct r10conf *conf = mddev->private;
-       struct r10bio *r10_bio;
        struct bio *read_bio;
+       const int op = bio_op(bio);
+       const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+       int sectors_handled;
+       int max_sectors;
+       sector_t sectors;
+       struct md_rdev *rdev;
+       int slot;
+
+       /*
+        * Register the new request and wait if the reconstruction
+        * thread has put up a bar for new requests.
+        * Continue immediately if no resync is active currently.
+        */
+       wait_barrier(conf);
+
+       sectors = bio_sectors(bio);
+       while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+           bio->bi_iter.bi_sector < conf->reshape_progress &&
+           bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
+               /*
+                * IO spans the reshape position.  Need to wait for reshape to
+                * pass
+                */
+               raid10_log(conf->mddev, "wait reshape");
+               allow_barrier(conf);
+               wait_event(conf->wait_barrier,
+                          conf->reshape_progress <= bio->bi_iter.bi_sector ||
+                          conf->reshape_progress >= bio->bi_iter.bi_sector +
+                          sectors);
+               wait_barrier(conf);
+       }
+
+read_again:
+       rdev = read_balance(conf, r10_bio, &max_sectors);
+       if (!rdev) {
+               raid_end_bio_io(r10_bio);
+               return;
+       }
+       slot = r10_bio->read_slot;
+
+       read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+       bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
+                max_sectors);
+
+       r10_bio->devs[slot].bio = read_bio;
+       r10_bio->devs[slot].rdev = rdev;
+
+       read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
+               choose_data_offset(r10_bio, rdev);
+       read_bio->bi_bdev = rdev->bdev;
+       read_bio->bi_end_io = raid10_end_read_request;
+       bio_set_op_attrs(read_bio, op, do_sync);
+       if (test_bit(FailFast, &rdev->flags) &&
+           test_bit(R10BIO_FailFast, &r10_bio->state))
+               read_bio->bi_opf |= MD_FAILFAST;
+       read_bio->bi_private = r10_bio;
+
+       if (mddev->gendisk)
+               trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+                                     read_bio, disk_devt(mddev->gendisk),
+                                     r10_bio->sector);
+       if (max_sectors < r10_bio->sectors) {
+               /*
+                * Could not read all from this device, so we will need another
+                * r10_bio.
+                */
+               sectors_handled = (r10_bio->sector + max_sectors
+                                  - bio->bi_iter.bi_sector);
+               r10_bio->sectors = max_sectors;
+               spin_lock_irq(&conf->device_lock);
+               if (bio->bi_phys_segments == 0)
+                       bio->bi_phys_segments = 2;
+               else
+                       bio->bi_phys_segments++;
+               spin_unlock_irq(&conf->device_lock);
+               /*
+                * Cannot call generic_make_request directly as that will be
+                * queued in __generic_make_request and subsequent
+                * mempool_alloc might block waiting for it.  so hand bio over
+                * to raid10d.
+                */
+               reschedule_retry(r10_bio);
+
+               r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+               r10_bio->master_bio = bio;
+               r10_bio->sectors = bio_sectors(bio) - sectors_handled;
+               r10_bio->state = 0;
+               r10_bio->mddev = mddev;
+               r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
+               goto read_again;
+       } else
+               generic_make_request(read_bio);
+       return;
+}
+
+static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+                                struct r10bio *r10_bio)
+{
+       struct r10conf *conf = mddev->private;
        int i;
        const int op = bio_op(bio);
-       const int rw = bio_data_dir(bio);
        const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
        const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
        unsigned long flags;
        struct md_rdev *blocked_rdev;
        struct blk_plug_cb *cb;
        struct raid10_plug_cb *plug = NULL;
+       sector_t sectors;
        int sectors_handled;
        int max_sectors;
-       int sectors;
 
        md_write_start(mddev, bio);
 
@@ -1118,8 +1217,9 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
        while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
            bio->bi_iter.bi_sector < conf->reshape_progress &&
            bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
-               /* IO spans the reshape position.  Need to wait for
-                * reshape to pass
+               /*
+                * IO spans the reshape position.  Need to wait for reshape to
+                * pass
                 */
                raid10_log(conf->mddev, "wait reshape");
                allow_barrier(conf);
@@ -1129,8 +1229,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
                           sectors);
                wait_barrier(conf);
        }
+
        if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
-           bio_data_dir(bio) == WRITE &&
            (mddev->reshape_backwards
             ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
                bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
@@ -1148,98 +1248,6 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
                conf->reshape_safe = mddev->reshape_position;
        }
 
-       r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
-
-       r10_bio->master_bio = bio;
-       r10_bio->sectors = sectors;
-
-       r10_bio->mddev = mddev;
-       r10_bio->sector = bio->bi_iter.bi_sector;
-       r10_bio->state = 0;
-
-       /* We might need to issue multiple reads to different
-        * devices if there are bad blocks around, so we keep
-        * track of the number of reads in bio->bi_phys_segments.
-        * If this is 0, there is only one r10_bio and no locking
-        * will be needed when the request completes.  If it is
-        * non-zero, then it is the number of not-completed requests.
-        */
-       bio->bi_phys_segments = 0;
-       bio_clear_flag(bio, BIO_SEG_VALID);
-
-       if (rw == READ) {
-               /*
-                * read balancing logic:
-                */
-               struct md_rdev *rdev;
-               int slot;
-
-read_again:
-               rdev = read_balance(conf, r10_bio, &max_sectors);
-               if (!rdev) {
-                       raid_end_bio_io(r10_bio);
-                       return;
-               }
-               slot = r10_bio->read_slot;
-
-               read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
-                        max_sectors);
-
-               r10_bio->devs[slot].bio = read_bio;
-               r10_bio->devs[slot].rdev = rdev;
-
-               read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
-                       choose_data_offset(r10_bio, rdev);
-               read_bio->bi_bdev = rdev->bdev;
-               read_bio->bi_end_io = raid10_end_read_request;
-               bio_set_op_attrs(read_bio, op, do_sync);
-               if (test_bit(FailFast, &rdev->flags) &&
-                   test_bit(R10BIO_FailFast, &r10_bio->state))
-                       read_bio->bi_opf |= MD_FAILFAST;
-               read_bio->bi_private = r10_bio;
-
-               if (mddev->gendisk)
-                       trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
-                                             read_bio, disk_devt(mddev->gendisk),
-                                             r10_bio->sector);
-               if (max_sectors < r10_bio->sectors) {
-                       /* Could not read all from this device, so we will
-                        * need another r10_bio.
-                        */
-                       sectors_handled = (r10_bio->sector + max_sectors
-                                          - bio->bi_iter.bi_sector);
-                       r10_bio->sectors = max_sectors;
-                       spin_lock_irq(&conf->device_lock);
-                       if (bio->bi_phys_segments == 0)
-                               bio->bi_phys_segments = 2;
-                       else
-                               bio->bi_phys_segments++;
-                       spin_unlock_irq(&conf->device_lock);
-                       /* Cannot call generic_make_request directly
-                        * as that will be queued in __generic_make_request
-                        * and subsequent mempool_alloc might block
-                        * waiting for it.  so hand bio over to raid10d.
-                        */
-                       reschedule_retry(r10_bio);
-
-                       r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
-
-                       r10_bio->master_bio = bio;
-                       r10_bio->sectors = bio_sectors(bio) - sectors_handled;
-                       r10_bio->state = 0;
-                       r10_bio->mddev = mddev;
-                       r10_bio->sector = bio->bi_iter.bi_sector +
-                               sectors_handled;
-                       goto read_again;
-               } else
-                       generic_make_request(read_bio);
-               return;
-       }
-
-       /*
-        * WRITE:
-        */
        if (conf->pending_count >= max_queued_requests) {
                md_wakeup_thread(mddev->thread);
                raid10_log(mddev, "wait queued");
@@ -1300,8 +1308,7 @@ retry_write:
                        int bad_sectors;
                        int is_bad;
 
-                       is_bad = is_badblock(rdev, dev_sector,
-                                            max_sectors,
+                       is_bad = is_badblock(rdev, dev_sector, max_sectors,
                                             &first_bad, &bad_sectors);
                        if (is_bad < 0) {
                                /* Mustn't write here until the bad block
@@ -1405,8 +1412,7 @@ retry_write:
                        r10_bio->devs[i].bio = mbio;
 
                        mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
-                                          choose_data_offset(r10_bio,
-                                                             rdev));
+                                          choose_data_offset(r10_bio, rdev));
                        mbio->bi_bdev = rdev->bdev;
                        mbio->bi_end_io = raid10_end_write_request;
                        bio_set_op_attrs(mbio, op, do_sync | do_fua);
@@ -1457,8 +1463,7 @@ retry_write:
                        r10_bio->devs[i].repl_bio = mbio;
 
                        mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
-                                          choose_data_offset(
-                                                  r10_bio, rdev));
+                                          choose_data_offset(r10_bio, rdev));
                        mbio->bi_bdev = rdev->bdev;
                        mbio->bi_end_io = raid10_end_write_request;
                        bio_set_op_attrs(mbio, op, do_sync | do_fua);
@@ -1503,6 +1508,36 @@ retry_write:
        one_write_done(r10_bio);
 }
 
+static void __make_request(struct mddev *mddev, struct bio *bio)
+{
+       struct r10conf *conf = mddev->private;
+       struct r10bio *r10_bio;
+
+       r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+       r10_bio->master_bio = bio;
+       r10_bio->sectors = bio_sectors(bio);
+
+       r10_bio->mddev = mddev;
+       r10_bio->sector = bio->bi_iter.bi_sector;
+       r10_bio->state = 0;
+
+       /*
+        * We might need to issue multiple reads to different devices if there
+        * are bad blocks around, so we keep track of the number of reads in
+        * bio->bi_phys_segments.  If this is 0, there is only one r10_bio and
+        * no locking will be needed when the request completes.  If it is
+        * non-zero, then it is the number of not-completed requests.
+        */
+       bio->bi_phys_segments = 0;
+       bio_clear_flag(bio, BIO_SEG_VALID);
+
+       if (bio_data_dir(bio) == READ)
+               raid10_read_request(mddev, bio, r10_bio);
+       else
+               raid10_write_request(mddev, bio, r10_bio);
+}
+
 static void raid10_make_request(struct mddev *mddev, struct bio *bio)
 {
        struct r10conf *conf = mddev->private;
index d7bfb6fc8aef8808b143c024f823bab4e6bf640b..0e8ed2c327b07fd849c1720d7b272dd860b949b9 100644 (file)
@@ -1682,8 +1682,7 @@ out:
 
 static struct stripe_head *
 r5c_recovery_alloc_stripe(struct r5conf *conf,
-                         sector_t stripe_sect,
-                         sector_t log_start)
+                         sector_t stripe_sect)
 {
        struct stripe_head *sh;
 
@@ -1692,7 +1691,6 @@ r5c_recovery_alloc_stripe(struct r5conf *conf,
                return NULL;  /* no more stripe available */
 
        r5l_recovery_reset_stripe(sh);
-       sh->log_start = log_start;
 
        return sh;
 }
@@ -1862,7 +1860,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
                                                stripe_sect);
 
                if (!sh) {
-                       sh = r5c_recovery_alloc_stripe(conf, stripe_sect, ctx->pos);
+                       sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
                        /*
                         * cannot get stripe from raid5_get_active_stripe
                         * try replay some stripes
@@ -1871,7 +1869,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
                                r5c_recovery_replay_stripes(
                                        cached_stripe_list, ctx);
                                sh = r5c_recovery_alloc_stripe(
-                                       conf, stripe_sect, ctx->pos);
+                                       conf, stripe_sect);
                        }
                        if (!sh) {
                                pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
@@ -1879,8 +1877,8 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
                                        conf->min_nr_stripes * 2);
                                raid5_set_cache_size(mddev,
                                                     conf->min_nr_stripes * 2);
-                               sh = r5c_recovery_alloc_stripe(
-                                       conf, stripe_sect, ctx->pos);
+                               sh = r5c_recovery_alloc_stripe(conf,
+                                                              stripe_sect);
                        }
                        if (!sh) {
                                pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
@@ -1894,7 +1892,6 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
                        if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
                            test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
                                r5l_recovery_replay_one_stripe(conf, sh, ctx);
-                               sh->log_start = ctx->pos;
                                list_move_tail(&sh->lru, cached_stripe_list);
                        }
                        r5l_recovery_load_data(log, sh, ctx, payload,
@@ -1933,8 +1930,6 @@ static void r5c_recovery_load_one_stripe(struct r5l_log *log,
                        set_bit(R5_UPTODATE, &dev->flags);
                }
        }
-       list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
-       atomic_inc(&log->stripe_in_journal_count);
 }
 
 /*
@@ -2070,6 +2065,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
        struct stripe_head *sh, *next;
        struct mddev *mddev = log->rdev->mddev;
        struct page *page;
+       sector_t next_checkpoint = MaxSector;
 
        page = alloc_page(GFP_KERNEL);
        if (!page) {
@@ -2078,6 +2074,8 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
                return -ENOMEM;
        }
 
+       WARN_ON(list_empty(&ctx->cached_list));
+
        list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
                struct r5l_meta_block *mb;
                int i;
@@ -2123,12 +2121,15 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
                sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
                             REQ_OP_WRITE, REQ_FUA, false);
                sh->log_start = ctx->pos;
+               list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
+               atomic_inc(&log->stripe_in_journal_count);
                ctx->pos = write_pos;
                ctx->seq += 1;
-
+               next_checkpoint = sh->log_start;
                list_del_init(&sh->lru);
                raid5_release_stripe(sh);
        }
+       log->next_checkpoint = next_checkpoint;
        __free_page(page);
        return 0;
 }
@@ -2139,7 +2140,6 @@ static int r5l_recovery_log(struct r5l_log *log)
        struct r5l_recovery_ctx ctx;
        int ret;
        sector_t pos;
-       struct stripe_head *sh;
 
        ctx.pos = log->last_checkpoint;
        ctx.seq = log->last_cp_seq;
@@ -2164,16 +2164,13 @@ static int r5l_recovery_log(struct r5l_log *log)
                log->next_checkpoint = ctx.pos;
                r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
                ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
-       } else {
-               sh = list_last_entry(&ctx.cached_list, struct stripe_head, lru);
-               log->next_checkpoint = sh->log_start;
        }
 
        if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
                pr_debug("md/raid:%s: starting from clean shutdown\n",
                         mdname(mddev));
        else {
-               pr_debug("md/raid:%s: recoverying %d data-only stripes and %d data-parity stripes\n",
+               pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
                         mdname(mddev), ctx.data_only_stripes,
                         ctx.data_parity_stripes);
 
@@ -2418,9 +2415,6 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
        if (do_wakeup)
                wake_up(&conf->wait_for_overlap);
 
-       if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
-               return;
-
        spin_lock_irq(&conf->log->stripe_in_journal_lock);
        list_del_init(&sh->r5c);
        spin_unlock_irq(&conf->log->stripe_in_journal_lock);
@@ -2639,14 +2633,16 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
        spin_lock_init(&log->stripe_in_journal_lock);
        atomic_set(&log->stripe_in_journal_count, 0);
 
+       rcu_assign_pointer(conf->log, log);
+
        if (r5l_load_log(log))
                goto error;
 
-       rcu_assign_pointer(conf->log, log);
        set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
        return 0;
 
 error:
+       rcu_assign_pointer(conf->log, NULL);
        md_unregister_thread(&log->reclaim_thread);
 reclaim_thread:
        mempool_destroy(log->meta_pool);
index 06d7279bdd048e66369961194bc1d21567a08446..36c13e4be9c9e5d0cedacb59910e7b4482eb6ddd 100644 (file)
@@ -62,6 +62,8 @@
 #include "raid0.h"
 #include "bitmap.h"
 
+#define UNSUPPORTED_MDDEV_FLAGS        (1L << MD_FAILFAST_SUPPORTED)
+
 #define cpu_to_group(cpu) cpu_to_node(cpu)
 #define ANY_GROUP NUMA_NO_NODE
 
@@ -7829,8 +7831,9 @@ static void *raid5_takeover_raid1(struct mddev *mddev)
        mddev->new_chunk_sectors = chunksect;
 
        ret = setup_conf(mddev);
-       if (!IS_ERR_VALUE(ret))
-               clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+       if (!IS_ERR(ret))
+               mddev_clear_unsupported_flags(mddev,
+                       UNSUPPORTED_MDDEV_FLAGS);
        return ret;
 }
 
index a0547dbf980645104d862fdc39e2ad740107ad15..76382c858c35435b98e061a7dda49d5dacad9585 100644 (file)
@@ -330,7 +330,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card,
        struct ms_id_register id_reg;
 
        if (!(*mrq)) {
-               memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL,
+               memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
                                  sizeof(struct ms_id_register));
                *mrq = &card->current_mrq;
                return 0;
index 18e05ca7584f9723ba12bf26a0aaf145b5903896..3600c9993a9830504d0cc5bcd61c4b8f19376762 100644 (file)
@@ -152,6 +152,9 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
 {
        int ret;
 
+       if (!cldev->bus->hbm_f_os_supported)
+               return;
+
        ret = mei_cldev_enable(cldev);
        if (ret)
                return;
index c6c051b52f55faf1b5896164a4e5e3e251f86899..c6217a4993ad4da05094978e50cc3f1acb1b1fd5 100644 (file)
@@ -180,6 +180,8 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
                                 dev->hbm_f_ev_supported);
                pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n",
                                 dev->hbm_f_fa_supported);
+               pos += scnprintf(buf + pos, bufsz - pos, "\tOS: %01d\n",
+                                dev->hbm_f_os_supported);
        }
 
        pos += scnprintf(buf + pos, bufsz - pos, "pg:  %s, %s\n",
index dd7f15a65eeddbdf53bd2b6c094bfa98be969caf..25b4a1ba522df87fa60f0c8a9555607aa76df6d6 100644 (file)
@@ -989,6 +989,10 @@ static void mei_hbm_config_features(struct mei_device *dev)
        /* Fixed Address Client Support */
        if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
                dev->hbm_f_fa_supported = 1;
+
+       /* OS ver message Support */
+       if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
+               dev->hbm_f_os_supported = 1;
 }
 
 /**
index 9daf3f9aed2508e08070cb3ef96a53fc43f8d5f9..e1e4d47d4d7d21f5d62a7b514b2886277f9ae9bb 100644 (file)
 #define HBM_MINOR_VERSION_FA               0
 #define HBM_MAJOR_VERSION_FA               2
 
+/*
+ * MEI version with OS ver message support
+ */
+#define HBM_MINOR_VERSION_OS               0
+#define HBM_MAJOR_VERSION_OS               2
+
 /* Host bus message command opcode */
 #define MEI_HBM_CMD_OP_MSK                  0x7f
 /* Host bus message command RESPONSE */
index 699693cd8c59ddde7cd109c550d92f955b0f4c0e..8dadb98662a9e61da14ed8d95268bc9b4ec23d2b 100644 (file)
@@ -406,6 +406,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
  * @hbm_f_ev_supported  : hbm feature event notification
  * @hbm_f_fa_supported  : hbm feature fixed address client
  * @hbm_f_ie_supported  : hbm feature immediate reply to enum request
+ * @hbm_f_os_supported  : hbm feature support OS ver message
  *
  * @me_clients_rwsem: rw lock over me_clients list
  * @me_clients  : list of FW clients
@@ -487,6 +488,7 @@ struct mei_device {
        unsigned int hbm_f_ev_supported:1;
        unsigned int hbm_f_fa_supported:1;
        unsigned int hbm_f_ie_supported:1;
+       unsigned int hbm_f_os_supported:1;
 
        struct rw_semaphore me_clients_rwsem;
        struct list_head me_clients;
index b11c3455b040c5b03e8083e9b8e4f234b0752537..e6ea8503f40c8466643db34f6ee36ff19345aa89 100644 (file)
@@ -506,9 +506,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
                }
        } while (busy);
 
-       if (host->ops->card_busy && send_status)
-               return mmc_switch_status(card);
-
        return 0;
 }
 
@@ -577,24 +574,26 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
        if (!use_busy_signal)
                goto out;
 
-       /* Switch to new timing before poll and check switch status. */
-       if (timing)
-               mmc_set_timing(host, timing);
-
        /*If SPI or used HW busy detection above, then we don't need to poll. */
        if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
-               mmc_host_is_spi(host)) {
-               if (send_status)
-                       err = mmc_switch_status(card);
+               mmc_host_is_spi(host))
                goto out_tim;
-       }
 
        /* Let's try to poll to find out when the command is completed. */
        err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
+       if (err)
+               goto out;
 
 out_tim:
-       if (err && timing)
-               mmc_set_timing(host, old_timing);
+       /* Switch to new timing before check switch status. */
+       if (timing)
+               mmc_set_timing(host, timing);
+
+       if (send_status) {
+               err = mmc_switch_status(card);
+               if (err && timing)
+                       mmc_set_timing(host, old_timing);
+       }
 out:
        mmc_retune_release(host);
 
index b352760c041ee5332be4eddc61a6f0cf0816bba6..09739352834c8253eedac3000b703e42c37eacfc 100644 (file)
@@ -578,13 +578,15 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
 {
        struct meson_host *host = dev_id;
        struct mmc_request *mrq;
-       struct mmc_command *cmd = host->cmd;
+       struct mmc_command *cmd;
        u32 irq_en, status, raw_status;
        irqreturn_t ret = IRQ_HANDLED;
 
        if (WARN_ON(!host))
                return IRQ_NONE;
 
+       cmd = host->cmd;
+
        mrq = host->mrq;
 
        if (WARN_ON(!mrq))
@@ -670,10 +672,10 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
        int ret = IRQ_HANDLED;
 
        if (WARN_ON(!mrq))
-               ret = IRQ_NONE;
+               return IRQ_NONE;
 
        if (WARN_ON(!cmd))
-               ret = IRQ_NONE;
+               return IRQ_NONE;
 
        data = cmd->data;
        if (data) {
index 44ecebd1ea8c1834a5d311fbe36ddfc8383e3ecd..c8b8ac66ff7e3a4839ba4943c9069e540846c87e 100644 (file)
@@ -309,6 +309,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
        cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
        cmd1 = cmd->arg;
 
+       if (cmd->opcode == MMC_STOP_TRANSMISSION)
+               cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
+
        if (host->sdio_irq_en) {
                ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
                cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
@@ -417,8 +420,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
                       ssp->base + HW_SSP_BLOCK_SIZE);
        }
 
-       if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
-           (cmd->opcode == SD_IO_RW_EXTENDED))
+       if (cmd->opcode == SD_IO_RW_EXTENDED)
                cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
 
        cmd1 = cmd->arg;
index 160f695cc09c611cf4aabdeeff236d672c66b639..278a5a435ab76bc422dd748617c96611fe09b105 100644 (file)
@@ -395,7 +395,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
        /* Power on the SDHCI controller and its children */
        acpi_device_fix_up_power(device);
        list_for_each_entry(child, &device->children, node)
-               acpi_device_fix_up_power(child);
+               if (child->status.present && child->status.enabled)
+                       acpi_device_fix_up_power(child);
 
        if (acpi_bus_get_status(device) || !device->status.present)
                return -ENODEV;
index 353a9ddf6b975d48d522da646761660b35b44d4c..9ce5dcb4abd0f5026384ed4e0466d1ddfa44c617 100644 (file)
@@ -426,6 +426,7 @@ config MTD_NAND_ORION
 
 config MTD_NAND_OXNAS
        tristate "NAND Flash support for Oxford Semiconductor SoC"
+       depends on HAS_IOMEM
        help
          This enables the NAND flash controller on Oxford Semiconductor SoCs.
 
@@ -540,7 +541,7 @@ config MTD_NAND_FSMC
          Flexible Static Memory Controller (FSMC)
 
 config MTD_NAND_XWAY
-       tristate "Support for NAND on Lantiq XWAY SoC"
+       bool "Support for NAND on Lantiq XWAY SoC"
        depends on LANTIQ && SOC_TYPE_XWAY
        help
          Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
index 5553a5d9efd1144b276a20290f5cb2be9fc302f0..846a66c1b1338f92e65a9548c6fa32e63d834294 100644 (file)
@@ -775,7 +775,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
        init_completion(&host->comp_controller);
 
        host->irq = platform_get_irq(pdev, 0);
-       if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
+       if (host->irq < 0) {
                dev_err(&pdev->dev, "failed to get platform irq\n");
                res = -EINVAL;
                goto err_exit3;
index 28c7f474be77b8b7ccab3379d81bdaa6006594ae..4a5e948c62df1b7d29ff9fe78b50e7d0858b01d5 100644 (file)
@@ -632,11 +632,13 @@ static int tango_nand_probe(struct platform_device *pdev)
        if (IS_ERR(nfc->pbus_base))
                return PTR_ERR(nfc->pbus_base);
 
+       writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
+
        clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(clk))
                return PTR_ERR(clk);
 
-       nfc->chan = dma_request_chan(&pdev->dev, "nfc_sbox");
+       nfc->chan = dma_request_chan(&pdev->dev, "rxtx");
        if (IS_ERR(nfc->chan))
                return PTR_ERR(nfc->chan);
 
index 1f2948c0c458d60ec60e898bb1b4893ddba33ecf..895101a5e686457c0dc63237efcb33a79c43a559 100644 (file)
@@ -232,7 +232,6 @@ static const struct of_device_id xway_nand_match[] = {
        { .compatible = "lantiq,nand-xway" },
        {},
 };
-MODULE_DEVICE_TABLE(of, xway_nand_match);
 
 static struct platform_driver xway_nand_driver = {
        .probe  = xway_nand_probe,
@@ -243,6 +242,4 @@ static struct platform_driver xway_nand_driver = {
        },
 };
 
-module_platform_driver(xway_nand_driver);
-
-MODULE_LICENSE("GPL");
+builtin_platform_driver(xway_nand_driver);
index b8c293373eccd8293ab50fea6a4ce25b1745076b..a306de4318d7d2301fdcb8f34c7045e884278ad4 100644 (file)
@@ -190,7 +190,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
  */
 static int ipddp_create(struct ipddp_route *new_rt)
 {
-        struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
+        struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
 
         if (rt == NULL)
                 return -ENOMEM;
index 9ec33b51a0edad879701bef79d6c8f250d778b91..2ce7ae97ac9148d39137eff66954dfc3b4cf239b 100644 (file)
@@ -393,7 +393,7 @@ static int bcm_sf2_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
        if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
                return bcm_sf2_sw_indir_rw(priv, 1, addr, regnum, 0);
        else
-               return mdiobus_read(priv->master_mii_bus, addr, regnum);
+               return mdiobus_read_nested(priv->master_mii_bus, addr, regnum);
 }
 
 static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
@@ -407,7 +407,7 @@ static int bcm_sf2_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
        if (addr == BRCM_PSEUDO_PHY_ADDR && priv->indir_phy_mask & BIT(addr))
                bcm_sf2_sw_indir_rw(priv, 0, addr, regnum, val);
        else
-               mdiobus_write(priv->master_mii_bus, addr, regnum, val);
+               mdiobus_write_nested(priv->master_mii_bus, addr, regnum, val);
 
        return 0;
 }
@@ -982,6 +982,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
        const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
        struct device_node *dn = pdev->dev.of_node;
        struct b53_platform_data *pdata;
+       struct dsa_switch_ops *ops;
        struct bcm_sf2_priv *priv;
        struct b53_device *dev;
        struct dsa_switch *ds;
@@ -995,6 +996,10 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
        if (!priv)
                return -ENOMEM;
 
+       ops = devm_kzalloc(&pdev->dev, sizeof(*ops), GFP_KERNEL);
+       if (!ops)
+               return -ENOMEM;
+
        dev = b53_switch_alloc(&pdev->dev, &bcm_sf2_io_ops, priv);
        if (!dev)
                return -ENOMEM;
@@ -1014,6 +1019,8 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
        ds = dev->ds;
 
        /* Override the parts that are non-standard wrt. normal b53 devices */
+       memcpy(ops, ds->ops, sizeof(*ops));
+       ds->ops = ops;
        ds->ops->get_tag_protocol = bcm_sf2_sw_get_tag_protocol;
        ds->ops->setup = bcm_sf2_sw_setup;
        ds->ops->get_phy_flags = bcm_sf2_sw_get_phy_flags;
index 155190db682d29a6a97b2267550954fb4eba639d..9943629fcbf9ae14a9683e0b2eb0da459f83c0c6 100644 (file)
@@ -539,6 +539,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
                }
        }
 
+isr_done:
        /* If there is not a separate AN irq, handle it here */
        if (pdata->dev_irq == pdata->an_irq)
                pdata->phy_if.an_isr(irq, pdata);
@@ -551,7 +552,6 @@ static irqreturn_t xgbe_isr(int irq, void *data)
        if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
                pdata->i2c_if.i2c_isr(irq, pdata);
 
-isr_done:
        return IRQ_HANDLED;
 }
 
index 7e8cf213fd813d8530f65c8439a77fd16ffeff9b..744ed6ddaf373964a2b3526a2b73613932c73c87 100644 (file)
@@ -710,11 +710,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
        unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
        unsigned int pkts_compl = 0, bytes_compl = 0;
        struct bcm_sysport_cb *cb;
-       struct netdev_queue *txq;
        u32 hw_ind;
 
-       txq = netdev_get_tx_queue(ndev, ring->index);
-
        /* Compute how many descriptors have been processed since last call */
        hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
        c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
@@ -745,9 +742,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
 
        ring->c_index = c_index;
 
-       if (netif_tx_queue_stopped(txq) && pkts_compl)
-               netif_tx_wake_queue(txq);
-
        netif_dbg(priv, tx_done, ndev,
                  "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
                  ring->index, ring->c_index, pkts_compl, bytes_compl);
@@ -759,16 +753,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
                                           struct bcm_sysport_tx_ring *ring)
 {
+       struct netdev_queue *txq;
        unsigned int released;
        unsigned long flags;
 
+       txq = netdev_get_tx_queue(priv->netdev, ring->index);
+
        spin_lock_irqsave(&ring->lock, flags);
        released = __bcm_sysport_tx_reclaim(priv, ring);
+       if (released)
+               netif_tx_wake_queue(txq);
+
        spin_unlock_irqrestore(&ring->lock, flags);
 
        return released;
 }
 
+/* Locked version of the per-ring TX reclaim, but does not wake the queue */
+static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
+                                struct bcm_sysport_tx_ring *ring)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ring->lock, flags);
+       __bcm_sysport_tx_reclaim(priv, ring);
+       spin_unlock_irqrestore(&ring->lock, flags);
+}
+
 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
 {
        struct bcm_sysport_tx_ring *ring =
@@ -1252,7 +1263,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
        napi_disable(&ring->napi);
        netif_napi_del(&ring->napi);
 
-       bcm_sysport_tx_reclaim(priv, ring);
+       bcm_sysport_tx_clean(priv, ring);
 
        kfree(ring->cbs);
        ring->cbs = NULL;
index 185e9e047aa9adda61ac602caea5f59c3efe2d7e..ae42de4fdddf6b77d2c2cf1606795e139d4b472b 100644 (file)
@@ -8720,11 +8720,14 @@ static void tg3_free_consistent(struct tg3 *tp)
        tg3_mem_rx_release(tp);
        tg3_mem_tx_release(tp);
 
+       /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
+       tg3_full_lock(tp, 0);
        if (tp->hw_stats) {
                dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
                                  tp->hw_stats, tp->stats_mapping);
                tp->hw_stats = NULL;
        }
+       tg3_full_unlock(tp);
 }
 
 /*
index 9211c750e0642660bfdd79bb023f9058a89c18af..2f85b64f01fa06d708e52bde776708f96b55c30e 100644 (file)
@@ -47,8 +47,9 @@ struct lmac {
 struct bgx {
        u8                      bgx_id;
        struct  lmac            lmac[MAX_LMAC_PER_BGX];
-       int                     lmac_count;
+       u8                      lmac_count;
        u8                      max_lmac;
+       u8                      acpi_lmac_idx;
        void __iomem            *reg_base;
        struct pci_dev          *pdev;
        bool                    is_dlm;
@@ -1143,13 +1144,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
        if (acpi_bus_get_device(handle, &adev))
                goto out;
 
-       acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
+       acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
 
-       SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
+       SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
 
-       bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
+       bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
+       bgx->acpi_lmac_idx++; /* move to next LMAC */
 out:
-       bgx->lmac_count++;
        return AE_OK;
 }
 
index 0e74529a42095b311c97dc367c2ba3b20bdfc27c..30e855004c57592f9ab6c0cea2eb73f63b59b7ca 100644 (file)
@@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
 err:
        mutex_unlock(&adapter->mcc_lock);
 
-        if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
+        if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
                status = -EPERM;
 
        return status;
index 225e9a4877d7b16e058cfa7c0dac9ccb5434bb5b..1a7f8ad7b9c6111ea2f8839a5d28c82af1ef13a8 100644 (file)
@@ -275,8 +275,7 @@ static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
 
        /* Check if mac has already been added as part of uc-list */
        for (i = 0; i < adapter->uc_macs; i++) {
-               if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
-                                    mac)) {
+               if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
                        /* mac already added, skip addition */
                        adapter->pmac_id[0] = adapter->pmac_id[i + 1];
                        return 0;
@@ -319,6 +318,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
        if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
                return 0;
 
+       /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
+        * address
+        */
+       if (BEx_chip(adapter) && be_virtfn(adapter) &&
+           !check_privilege(adapter, BE_PRIV_FILTMGMT))
+               return -EPERM;
+
        /* if device is not running, copy MAC to netdev->dev_addr */
        if (!netif_running(netdev))
                goto done;
@@ -1655,14 +1661,12 @@ static void be_clear_mc_list(struct be_adapter *adapter)
 
 static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
 {
-       if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
-                            adapter->dev_mac)) {
+       if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
                adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
                return 0;
        }
 
-       return be_cmd_pmac_add(adapter,
-                              (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
+       return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
                               adapter->if_handle,
                               &adapter->pmac_id[uc_idx + 1], 0);
 }
@@ -1698,9 +1702,8 @@ static void be_set_uc_list(struct be_adapter *adapter)
        }
 
        if (adapter->update_uc_list) {
-               i = 1; /* First slot is claimed by the Primary MAC */
-
                /* cache the uc-list in adapter array */
+               i = 0;
                netdev_for_each_uc_addr(ha, netdev) {
                        ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
                        i++;
@@ -3613,7 +3616,11 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
 
 static void be_disable_if_filters(struct be_adapter *adapter)
 {
-       be_dev_mac_del(adapter, adapter->pmac_id[0]);
+       /* Don't delete MAC on BE3 VFs without FILTMGMT privilege  */
+       if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
+           check_privilege(adapter, BE_PRIV_FILTMGMT))
+               be_dev_mac_del(adapter, adapter->pmac_id[0]);
+
        be_clear_uc_list(adapter);
        be_clear_mc_list(adapter);
 
@@ -3766,8 +3773,9 @@ static int be_enable_if_filters(struct be_adapter *adapter)
        if (status)
                return status;
 
-       /* For BE3 VFs, the PF programs the initial MAC address */
-       if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
+       /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
+       if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
+           check_privilege(adapter, BE_PRIV_FILTMGMT)) {
                status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
                if (status)
                        return status;
index a761001308dcc190d8c9bad57dad40f09dc49caa..1515abaa5ac9cab53ef4aab0ee05104c5bd2a61f 100644 (file)
@@ -3962,8 +3962,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
                                     PAGE_SIZE,
                                     DMA_FROM_DEVICE,
                                     DMA_ATTR_SKIP_CPU_SYNC);
-               __page_frag_drain(buffer_info->page, 0,
-                                 buffer_info->pagecnt_bias);
+               __page_frag_cache_drain(buffer_info->page,
+                                       buffer_info->pagecnt_bias);
 
                buffer_info->page = NULL;
        }
@@ -6991,7 +6991,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
                dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
                                     PAGE_SIZE, DMA_FROM_DEVICE,
                                     DMA_ATTR_SKIP_CPU_SYNC);
-               __page_frag_drain(page, 0, rx_buffer->pagecnt_bias);
+               __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
        }
 
        /* clear contents of rx_buffer */
index a849da92f857e5c22cd1ec93158cecbff9c75d90..6b8635378f1fcb2aae4e8ac390bcd09d552c2256 100644 (file)
@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
 {
        struct mlx4_cq *cq;
 
+       rcu_read_lock();
        cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
                               cqn & (dev->caps.num_cqs - 1));
+       rcu_read_unlock();
+
        if (!cq) {
                mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
                return;
        }
 
+       /* Acessing the CQ outside of rcu_read_lock is safe, because
+        * the CQ is freed only after interrupt handling is completed.
+        */
        ++cq->arm_sn;
 
        cq->comp(cq);
@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
        struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
        struct mlx4_cq *cq;
 
-       spin_lock(&cq_table->lock);
-
+       rcu_read_lock();
        cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
-       if (cq)
-               atomic_inc(&cq->refcount);
-
-       spin_unlock(&cq_table->lock);
+       rcu_read_unlock();
 
        if (!cq) {
-               mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+               mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
                return;
        }
 
+       /* Acessing the CQ outside of rcu_read_lock is safe, because
+        * the CQ is freed only after interrupt handling is completed.
+        */
        cq->event(cq, event_type);
-
-       if (atomic_dec_and_test(&cq->refcount))
-               complete(&cq->free);
 }
 
 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
        if (err)
                return err;
 
-       spin_lock_irq(&cq_table->lock);
+       spin_lock(&cq_table->lock);
        err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
-       spin_unlock_irq(&cq_table->lock);
+       spin_unlock(&cq_table->lock);
        if (err)
                goto err_icm;
 
@@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
        return 0;
 
 err_radix:
-       spin_lock_irq(&cq_table->lock);
+       spin_lock(&cq_table->lock);
        radix_tree_delete(&cq_table->tree, cq->cqn);
-       spin_unlock_irq(&cq_table->lock);
+       spin_unlock(&cq_table->lock);
 
 err_icm:
        mlx4_cq_free_icm(dev, cq->cqn);
@@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
        if (err)
                mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
 
+       spin_lock(&cq_table->lock);
+       radix_tree_delete(&cq_table->tree, cq->cqn);
+       spin_unlock(&cq_table->lock);
+
        synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
        if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
            priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
                synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
 
-       spin_lock_irq(&cq_table->lock);
-       radix_tree_delete(&cq_table->tree, cq->cqn);
-       spin_unlock_irq(&cq_table->lock);
-
        if (atomic_dec_and_test(&cq->refcount))
                complete(&cq->free);
        wait_for_completion(&cq->free);
index edbe200ac2fa4a11ad30bb8f08d8f7e3cb910708..761f8b12399cab245abccc0f7d7f84fde742c14d 100644 (file)
@@ -1748,8 +1748,11 @@ int mlx4_en_start_port(struct net_device *dev)
        /* Process all completions if exist to prevent
         * the queues freezing if they are full
         */
-       for (i = 0; i < priv->rx_ring_num; i++)
+       for (i = 0; i < priv->rx_ring_num; i++) {
+               local_bh_disable();
                napi_schedule(&priv->rx_cq[i]->napi);
+               local_bh_enable();
+       }
 
        netif_tx_start_all_queues(dev);
        netif_device_attach(dev);
@@ -2277,7 +2280,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
 
        if (priv->tx_ring_num[TX_XDP] &&
            !mlx4_en_check_xdp_mtu(dev, new_mtu))
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
 
        dev->mtu = new_mtu;
 
index cd3638e6fe25b2f8db4ea5e771535df51652faae..0509996957d9664b612358dd805359f4bc67b8dc 100644 (file)
@@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                        break;
 
                case MLX4_EVENT_TYPE_SRQ_LIMIT:
-                       mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
-                                __func__);
+                       mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
+                                __func__, be32_to_cpu(eqe->event.srq.srqn),
+                                eq->eqn);
                case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
                        if (mlx4_is_master(dev)) {
                                /* forward only to slave owning the SRQ */
@@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
                                                  eq->eqn, eq->cons_index, ret);
                                        break;
                                }
-                               mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
-                                         __func__, slave,
-                                         be32_to_cpu(eqe->event.srq.srqn),
-                                         eqe->type, eqe->subtype);
+                               if (eqe->type ==
+                                   MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+                                       mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+                                                 __func__, slave,
+                                                 be32_to_cpu(eqe->event.srq.srqn),
+                                                 eqe->type, eqe->subtype);
 
                                if (!ret && slave != dev->caps.function) {
-                                       mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
-                                                 __func__, eqe->type,
-                                                 eqe->subtype, slave);
+                                       if (eqe->type ==
+                                           MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+                                               mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+                                                         __func__, eqe->type,
+                                                         eqe->subtype, slave);
                                        mlx4_slave_event(dev, slave, eqe);
                                        break;
                                }
index 56185a0b827df6394a1edba70cf6925683ffb403..1822382212eed5d77fb290598fbe2d0834480aa9 100644 (file)
@@ -2980,6 +2980,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
                put_res(dev, slave, srqn, RES_SRQ);
                qp->srq = srq;
        }
+
+       /* Save param3 for dynamic changes from VST back to VGT */
+       qp->param3 = qpc->param3;
        put_res(dev, slave, rcqn, RES_CQ);
        put_res(dev, slave, mtt_base, RES_MTT);
        res_end_move(dev, slave, RES_QP, qpn);
@@ -3772,7 +3775,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
        int qpn = vhcr->in_modifier & 0x7fffff;
        struct res_qp *qp;
        u8 orig_sched_queue;
-       __be32  orig_param3 = qpc->param3;
        u8 orig_vlan_control = qpc->pri_path.vlan_control;
        u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
        u8 orig_pri_path_fl = qpc->pri_path.fl;
@@ -3814,7 +3816,6 @@ out:
         */
        if (!err) {
                qp->sched_queue = orig_sched_queue;
-               qp->param3      = orig_param3;
                qp->vlan_control = orig_vlan_control;
                qp->fvl_rx      =  orig_fvl_rx;
                qp->pri_path_fl = orig_pri_path_fl;
index 1236b27b149386ab61f1c25ab26c84d43d561ab9..2b7dd315020cd9e1a21b28643621122695cd06dd 100644 (file)
@@ -3675,14 +3675,8 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
 
 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
 {
-       struct mlx5_core_dev *mdev = priv->mdev;
-       struct mlx5_eswitch *esw = mdev->priv.eswitch;
-
        mlx5e_vxlan_cleanup(priv);
 
-       if (MLX5_CAP_GEN(mdev, vport_group_manager))
-               mlx5_eswitch_unregister_vport_rep(esw, 0);
-
        if (priv->xdp_prog)
                bpf_prog_put(priv->xdp_prog);
 }
@@ -3807,9 +3801,14 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
 
 static void mlx5e_nic_disable(struct mlx5e_priv *priv)
 {
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
        queue_work(priv->wq, &priv->set_rx_mode_work);
+       if (MLX5_CAP_GEN(mdev, vport_group_manager))
+               mlx5_eswitch_unregister_vport_rep(esw, 0);
        mlx5e_disable_async_events(priv);
-       mlx5_lag_remove(priv->mdev);
+       mlx5_lag_remove(mdev);
 }
 
 static const struct mlx5e_profile mlx5e_nic_profile = {
index 1fffe48a93cc35d55d03eba9ba352590924548d2..cbfac06b7ffd1d5140226ccb87331db57d4880d8 100644 (file)
@@ -109,7 +109,6 @@ static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
        switch (am->tune_state) {
        case MLX5E_AM_PARKING_ON_TOP:
        case MLX5E_AM_PARKING_TIRED:
-               WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n");
                return true;
        case MLX5E_AM_GOING_RIGHT:
                return (am->steps_left > 1) && (am->steps_right == 1);
@@ -123,7 +122,6 @@ static void mlx5e_am_turn(struct mlx5e_rx_am *am)
        switch (am->tune_state) {
        case MLX5E_AM_PARKING_ON_TOP:
        case MLX5E_AM_PARKING_TIRED:
-               WARN_ONCE(true, "mlx5e_am_turn: PARKING\n");
                break;
        case MLX5E_AM_GOING_RIGHT:
                am->tune_state = MLX5E_AM_GOING_LEFT;
@@ -144,7 +142,6 @@ static int mlx5e_am_step(struct mlx5e_rx_am *am)
        switch (am->tune_state) {
        case MLX5E_AM_PARKING_ON_TOP:
        case MLX5E_AM_PARKING_TIRED:
-               WARN_ONCE(true, "mlx5e_am_step: PARKING\n");
                break;
        case MLX5E_AM_GOING_RIGHT:
                if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
@@ -282,10 +279,8 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
        u32 delta_us = ktime_us_delta(end->time, start->time);
        unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
 
-       if (!delta_us) {
-               WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n");
+       if (!delta_us)
                return;
-       }
 
        curr_stats->ppms =            (npkts * USEC_PER_MSEC) / delta_us;
        curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
index f8829b5171560ed2f51cd05b8c82d2a076ce8cbb..46bef6a26a8cdbebf268b6275271367c4109a77d 100644 (file)
@@ -161,15 +161,21 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
        }
 }
 
+/* we get here also when setting rule to the FW failed, etc. It means that the
+ * flow rule itself might not exist, but some offloading related to the actions
+ * should be cleaned.
+ */
 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
                              struct mlx5e_tc_flow *flow)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5_fc *counter = NULL;
 
-       counter = mlx5_flow_rule_counter(flow->rule);
-
-       mlx5_del_flow_rules(flow->rule);
+       if (!IS_ERR(flow->rule)) {
+               counter = mlx5_flow_rule_counter(flow->rule);
+               mlx5_del_flow_rules(flow->rule);
+               mlx5_fc_destroy(priv->mdev, counter);
+       }
 
        if (esw && esw->mode == SRIOV_OFFLOADS) {
                mlx5_eswitch_del_vlan_action(esw, flow->attr);
@@ -177,8 +183,6 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
                        mlx5e_detach_encap(priv, flow);
        }
 
-       mlx5_fc_destroy(priv->mdev, counter);
-
        if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
                mlx5_destroy_flow_table(priv->fs.tc.t);
                priv->fs.tc.t = NULL;
@@ -225,6 +229,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
        void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
                                       outer_headers);
 
+       struct flow_dissector_key_control *enc_control =
+               skb_flow_dissector_target(f->dissector,
+                                         FLOW_DISSECTOR_KEY_ENC_CONTROL,
+                                         f->key);
+
        if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
                struct flow_dissector_key_ports *key =
                        skb_flow_dissector_target(f->dissector,
@@ -237,28 +246,34 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
 
                /* Full udp dst port must be given */
                if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
-                       return -EOPNOTSUPP;
-
-               /* udp src port isn't supported */
-               if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
-                       return -EOPNOTSUPP;
+                       goto vxlan_match_offload_err;
 
                if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
                    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
                        parse_vxlan_attr(spec, f);
-               else
+               else {
+                       netdev_warn(priv->netdev,
+                                   "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
                        return -EOPNOTSUPP;
+               }
 
                MLX5_SET(fte_match_set_lyr_2_4, headers_c,
                         udp_dport, ntohs(mask->dst));
                MLX5_SET(fte_match_set_lyr_2_4, headers_v,
                         udp_dport, ntohs(key->dst));
 
+               MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+                        udp_sport, ntohs(mask->src));
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+                        udp_sport, ntohs(key->src));
        } else { /* udp dst port must be given */
-                       return -EOPNOTSUPP;
+vxlan_match_offload_err:
+               netdev_warn(priv->netdev,
+                           "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
+               return -EOPNOTSUPP;
        }
 
-       if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+       if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
                struct flow_dissector_key_ipv4_addrs *key =
                        skb_flow_dissector_target(f->dissector,
                                                  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
@@ -280,10 +295,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                MLX5_SET(fte_match_set_lyr_2_4, headers_v,
                         dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
                         ntohl(key->dst));
-       }
 
-       MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
-       MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
+       }
 
        /* Enforce DMAC when offloading incoming tunneled flows.
         * Flow counters require a match on the DMAC.
@@ -346,6 +361,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                        if (parse_tunnel_attr(priv, spec, f))
                                return -EOPNOTSUPP;
                        break;
+               case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+                       netdev_warn(priv->netdev,
+                                   "IPv6 tunnel decap offload isn't supported\n");
                default:
                        return -EOPNOTSUPP;
                }
@@ -375,6 +393,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                        MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
                        MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
                                 key->flags & FLOW_DIS_IS_FRAGMENT);
+
+                       /* the HW doesn't need L3 inline to match on frag=no */
+                       if (key->flags & FLOW_DIS_IS_FRAGMENT)
+                               *min_inline = MLX5_INLINE_MODE_IP;
                }
        }
 
@@ -646,18 +668,18 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
        int ttl;
 
 #if IS_ENABLED(CONFIG_INET)
+       int ret;
+
        rt = ip_route_output_key(dev_net(mirred_dev), fl4);
-       if (IS_ERR(rt)) {
-               pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr);
-               return -EOPNOTSUPP;
-       }
+       ret = PTR_ERR_OR_ZERO(rt);
+       if (ret)
+               return ret;
 #else
        return -EOPNOTSUPP;
 #endif
 
        if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
-               pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n",
-                       __func__);
+               pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
                ip_rt_put(rt);
                return -EOPNOTSUPP;
        }
@@ -718,8 +740,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
                                          struct net_device **out_dev)
 {
        int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+       struct neighbour *n = NULL;
        struct flowi4 fl4 = {};
-       struct neighbour *n;
        char *encap_header;
        int encap_size;
        __be32 saddr;
@@ -750,7 +772,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
        e->out_dev = *out_dev;
 
        if (!(n->nud_state & NUD_VALID)) {
-               err = -ENOTSUPP;
+               pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
+               err = -EOPNOTSUPP;
                goto out;
        }
 
@@ -772,6 +795,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
        err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
                               encap_size, encap_header, &e->encap_id);
 out:
+       if (err && n)
+               neigh_release(n);
        kfree(encap_header);
        return err;
 }
@@ -792,9 +817,17 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        int tunnel_type;
        int err;
 
-       /* udp dst port must be given */
+       /* udp dst port must be set */
        if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
+               goto vxlan_encap_offload_err;
+
+       /* setting udp src port isn't supported */
+       if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
+vxlan_encap_offload_err:
+               netdev_warn(priv->netdev,
+                           "must set udp dst port and not set udp src port\n");
                return -EOPNOTSUPP;
+       }
 
        if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
            MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
@@ -802,6 +835,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                info.tun_id = tunnel_id_to_key32(key->tun_id);
                tunnel_type = MLX5_HEADER_TYPE_VXLAN;
        } else {
+               netdev_warn(priv->netdev,
+                           "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
                return -EOPNOTSUPP;
        }
 
@@ -809,6 +844,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        case AF_INET:
                info.daddr = key->u.ipv4.dst;
                break;
+       case AF_INET6:
+               netdev_warn(priv->netdev,
+                           "IPv6 tunnel encap offload isn't supported\n");
        default:
                return -EOPNOTSUPP;
        }
@@ -986,7 +1024,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
 
        if (IS_ERR(flow->rule)) {
                err = PTR_ERR(flow->rule);
-               goto err_free;
+               goto err_del_rule;
        }
 
        err = rhashtable_insert_fast(&tc->ht, &flow->node,
@@ -997,7 +1035,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
        goto out;
 
 err_del_rule:
-       mlx5_del_flow_rules(flow->rule);
+       mlx5e_tc_del_flow(priv, flow);
 
 err_free:
        kfree(flow);
index 6547f22e6b9b919010eada133093654348dff671..d01e9f21d4691ea497aa7ea0666c83e330c078bb 100644 (file)
@@ -1195,7 +1195,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
 {
        int err = 0;
 
-       mlx5_drain_health_wq(dev);
+       if (cleanup)
+               mlx5_drain_health_wq(dev);
 
        mutex_lock(&dev->intf_state_mutex);
        if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
@@ -1359,9 +1360,10 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
 
        mlx5_enter_error_state(dev);
        mlx5_unload_one(dev, priv, false);
-       /* In case of kernel call save the pci state */
+       /* In case of kernel call save the pci state and drain the health wq */
        if (state) {
                pci_save_state(pdev);
+               mlx5_drain_health_wq(dev);
                mlx5_pci_disable_device(dev);
        }
 
index d147ddd97997e3c2018f6ae9613a04474990b37e..0af3338bfcb4fbe4681afccaba3a6ca41e59a4e0 100644 (file)
@@ -209,21 +209,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
 /* pci_eqe_cmd_token
  * Command completion event - token
  */
-MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);
 
 /* pci_eqe_cmd_status
  * Command completion event - status
  */
-MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);
 
 /* pci_eqe_cmd_out_param_h
  * Command completion event - output parameter - higher part
  */
-MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);
 
 /* pci_eqe_cmd_out_param_l
  * Command completion event - output parameter - lower part
  */
-MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);
 
 #endif
index d768c7b6c6d6688c46077c48e25df45cdf9ad7dc..003093abb1707f0a6cdefe465e073c005d2ee3f9 100644 (file)
@@ -684,6 +684,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
                        dev_kfree_skb_any(skb_orig);
                        return NETDEV_TX_OK;
                }
+               dev_consume_skb_any(skb_orig);
        }
 
        if (eth_skb_pad(skb)) {
index 150ccf5192a9895d8e6dc1d41c3510c207d870e5..2e88115e87359777279872efaad3581078aaa59e 100644 (file)
@@ -345,6 +345,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
                        dev_kfree_skb_any(skb_orig);
                        return NETDEV_TX_OK;
                }
+               dev_consume_skb_any(skb_orig);
        }
        mlxsw_sx_txhdr_construct(skb, &tx_info);
        /* TX header is consumed by HW on the way so we shouldn't count its
index 99a14df28b9634dd8e6fdeb6bc9bce2d219a3e1a..2851b4c5657049600c6df2c9c9fa90649cbb832a 100644 (file)
@@ -201,6 +201,13 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
                else
                        adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr);
 
+               /* of_phy_find_device() claims a reference to the phydev,
+                * so we do that here manually as well. When the driver
+                * later unloads, it can unilaterally drop the reference
+                * without worrying about ACPI vs DT.
+                */
+               if (adpt->phydev)
+                       get_device(&adpt->phydev->mdio.dev);
        } else {
                struct device_node *phy_np;
 
index 422289c232bc77b2c08c49e5646967b94aeedff8..f46d300bd58597ce64cc4cefe3be2b1aa4b03afa 100644 (file)
@@ -719,8 +719,7 @@ static int emac_probe(struct platform_device *pdev)
 err_undo_napi:
        netif_napi_del(&adpt->rx_q.napi);
 err_undo_mdiobus:
-       if (!has_acpi_companion(&pdev->dev))
-               put_device(&adpt->phydev->mdio.dev);
+       put_device(&adpt->phydev->mdio.dev);
        mdiobus_unregister(adpt->mii_bus);
 err_undo_clocks:
        emac_clks_teardown(adpt);
@@ -740,8 +739,7 @@ static int emac_remove(struct platform_device *pdev)
 
        emac_clks_teardown(adpt);
 
-       if (!has_acpi_companion(&pdev->dev))
-               put_device(&adpt->phydev->mdio.dev);
+       put_device(&adpt->phydev->mdio.dev);
        mdiobus_unregister(adpt->mii_bus);
        free_netdev(netdev);
 
index 44389c90056a0f197a97f5d36478ec597f266004..8f1623bf2134700498198a98cb6aca9dddd2a6cd 100644 (file)
@@ -696,7 +696,7 @@ enum rtl_tx_desc_bit_1 {
 enum rtl_rx_desc_bit {
        /* Rx private */
        PID1            = (1 << 18), /* Protocol ID bit 1/2 */
-       PID0            = (1 << 17), /* Protocol ID bit 2/2 */
+       PID0            = (1 << 17), /* Protocol ID bit 0/2 */
 
 #define RxProtoUDP     (PID1)
 #define RxProtoTCP     (PID0)
index 92d7692c840dbc67e33ca9fe61d3496893def71d..89ac1e3f617599238d35fa444ff66c20abd48dc6 100644 (file)
@@ -926,14 +926,10 @@ static int ravb_poll(struct napi_struct *napi, int budget)
        /* Receive error message handling */
        priv->rx_over_errors =  priv->stats[RAVB_BE].rx_over_errors;
        priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
-       if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
+       if (priv->rx_over_errors != ndev->stats.rx_over_errors)
                ndev->stats.rx_over_errors = priv->rx_over_errors;
-               netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
-       }
-       if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
+       if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
                ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
-               netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
-       }
 out:
        return budget - quota;
 }
@@ -1508,6 +1504,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
                 entry / NUM_TX_DESC * DPTR_ALIGN;
        len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+       /* Zero length DMA descriptors are problematic as they seem to
+        * terminate DMA transfers. Avoid them by simply using a length of
+        * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
+        *
+        * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
+        * data by the call to skb_put_padto() above this is safe with
+        * respect to both the length of the first DMA descriptor (len)
+        * overflowing the available data and the length of the second DMA
+        * descriptor (skb->len - len) being negative.
+        */
+       if (len == 0)
+               len = DPTR_ALIGN;
+
        memcpy(buffer, skb->data, len);
        dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
        if (dma_mapping_error(ndev->dev.parent, dma_addr))
index 00fafabab1d08ed505130ceab5306255c9910087..f729a6b43958cc82a1b2d38293cb50baf767f39a 100644 (file)
@@ -574,6 +574,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
        .rpadir_value   = 2 << 16,
        .no_trimd       = 1,
        .no_ade         = 1,
+       .hw_crc         = 1,
        .tsu            = 1,
        .select_mii     = 1,
        .shift_rd0      = 1,
@@ -802,7 +803,7 @@ static struct sh_eth_cpu_data sh7734_data = {
 
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
-       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
 
        .tx_check       = EESR_TC1 | EESR_FTC,
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -832,7 +833,7 @@ static struct sh_eth_cpu_data sh7763_data = {
 
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
-       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
 
        .tx_check       = EESR_TC1 | EESR_FTC,
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
index 39eb7a65bb9f6a6137ffe13c4c2e776720311db1..e3f6389e1b01c5bba89c9cf3b632113fa9b4db42 100644 (file)
@@ -3319,8 +3319,16 @@ int stmmac_dvr_probe(struct device *device,
                ndev->max_mtu = JUMBO_LEN;
        else
                ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
-       if (priv->plat->maxmtu < ndev->max_mtu)
+       /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
+        * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
+        */
+       if ((priv->plat->maxmtu < ndev->max_mtu) &&
+           (priv->plat->maxmtu >= ndev->min_mtu))
                ndev->max_mtu = priv->plat->maxmtu;
+       else if (priv->plat->maxmtu < ndev->min_mtu)
+               dev_warn(priv->device,
+                        "%s: warning: maxmtu having invalid value (%d)\n",
+                        __func__, priv->plat->maxmtu);
 
        if (flow_ctrl)
                priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
@@ -3332,7 +3340,8 @@ int stmmac_dvr_probe(struct device *device,
         */
        if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
                priv->use_riwt = 1;
-               netdev_info(priv->dev, "Enable RX Mitigation via HW Watchdog Timer\n");
+               dev_info(priv->device,
+                        "Enable RX Mitigation via HW Watchdog Timer\n");
        }
 
        netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
@@ -3358,17 +3367,17 @@ int stmmac_dvr_probe(struct device *device,
                /* MDIO bus Registration */
                ret = stmmac_mdio_register(ndev);
                if (ret < 0) {
-                       netdev_err(priv->dev,
-                                  "%s: MDIO bus (id: %d) registration failed",
-                                  __func__, priv->plat->bus_id);
+                       dev_err(priv->device,
+                               "%s: MDIO bus (id: %d) registration failed",
+                               __func__, priv->plat->bus_id);
                        goto error_mdio_register;
                }
        }
 
        ret = register_netdev(ndev);
        if (ret) {
-               netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
-                          __func__, ret);
+               dev_err(priv->device, "%s: ERROR %i registering the device\n",
+                       __func__, ret);
                goto error_netdev_register;
        }
 
index a2831773431a6c19d185b2e82e50897aaf7ef7d8..3da4737620cb3fbf50e16c2cf61de3dbfd3e2355 100644 (file)
@@ -89,6 +89,9 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat)
 
        /* Set default value for unicast filter entries */
        plat->unicast_filter_entries = 1;
+
+       /* Set the maxmtu to a default of JUMBO_LEN */
+       plat->maxmtu = JUMBO_LEN;
 }
 
 static int quark_default_data(struct plat_stmmacenet_data *plat,
@@ -126,6 +129,9 @@ static int quark_default_data(struct plat_stmmacenet_data *plat,
        /* Set default value for unicast filter entries */
        plat->unicast_filter_entries = 1;
 
+       /* Set the maxmtu to a default of JUMBO_LEN */
+       plat->maxmtu = JUMBO_LEN;
+
        return 0;
 }
 
index 77c88fcf2b86f610d8797aec75fba1ac0ba83561..9b8a30bf939bf946baed5f93228178e5fa686037 100644 (file)
@@ -1210,7 +1210,7 @@ int cpmac_init(void)
                goto fail_alloc;
        }
 
-#warning FIXME: unhardcode gpio&reset bits
+       /* FIXME: unhardcode gpio&reset bits */
        ar7_gpio_disable(26);
        ar7_gpio_disable(27);
        ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
index c9414c0548526657e78f77eb792f14df278eb376..fcab8019dda08ad430d126f9f044f39291a7a154 100644 (file)
@@ -659,6 +659,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
         * policy filters on the host). Deliver these via the VF
         * interface in the guest.
         */
+       rcu_read_lock();
        vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
        if (vf_netdev && (vf_netdev->flags & IFF_UP))
                net = vf_netdev;
@@ -667,6 +668,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
        skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
        if (unlikely(!skb)) {
                ++net->stats.rx_dropped;
+               rcu_read_unlock();
                return NVSP_STAT_FAIL;
        }
 
@@ -696,6 +698,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
         * TODO - use NAPI?
         */
        netif_rx(skb);
+       rcu_read_unlock();
 
        return 0;
 }
index 46d53a6c8cf868a18ab82a27e730c36e1bc6fb0b..76ba7ecfe14290c7800879c0f80b284f54299ca7 100644 (file)
@@ -1715,9 +1715,9 @@ static int at86rf230_probe(struct spi_device *spi)
        /* Reset */
        if (gpio_is_valid(rstn)) {
                udelay(1);
-               gpio_set_value(rstn, 0);
+               gpio_set_value_cansleep(rstn, 0);
                udelay(1);
-               gpio_set_value(rstn, 1);
+               gpio_set_value_cansleep(rstn, 1);
                usleep_range(120, 240);
        }
 
index 1253f864737ae3cfb10a95ee8c916b5c04dd94b6..ef688518ad77d7ec004e265c3ee9a0086228271a 100644 (file)
@@ -117,13 +117,26 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
 {
        struct usb_device *usb_dev = atusb->usb_dev;
        int ret;
+       uint8_t *buffer;
        uint8_t value;
 
+       buffer = kmalloc(1, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
        dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
        ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
                                ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
-                               0, reg, &value, 1, 1000);
-       return ret >= 0 ? value : ret;
+                               0, reg, buffer, 1, 1000);
+
+       if (ret >= 0) {
+               value = buffer[0];
+               kfree(buffer);
+               return value;
+       } else {
+               kfree(buffer);
+               return ret;
+       }
 }
 
 static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask,
@@ -549,13 +562,6 @@ static int
 atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
 {
        struct atusb *atusb = hw->priv;
-       struct device *dev = &atusb->usb_dev->dev;
-
-       if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) {
-               dev_info(dev, "Automatic frame retransmission is only available from "
-                       "firmware version 0.3. Please update if you want this feature.");
-               return -EINVAL;
-       }
 
        return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries);
 }
@@ -608,9 +614,13 @@ static const struct ieee802154_ops atusb_ops = {
 static int atusb_get_and_show_revision(struct atusb *atusb)
 {
        struct usb_device *usb_dev = atusb->usb_dev;
-       unsigned char buffer[3];
+       unsigned char *buffer;
        int ret;
 
+       buffer = kmalloc(3, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
        /* Get a couple of the ATMega Firmware values */
        ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
                                ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
@@ -631,15 +641,20 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
                dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
        }
 
+       kfree(buffer);
        return ret;
 }
 
 static int atusb_get_and_show_build(struct atusb *atusb)
 {
        struct usb_device *usb_dev = atusb->usb_dev;
-       char build[ATUSB_BUILD_SIZE + 1];
+       char *build;
        int ret;
 
+       build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL);
+       if (!build)
+               return -ENOMEM;
+
        ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
                                ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
                                build, ATUSB_BUILD_SIZE, 1000);
@@ -648,6 +663,7 @@ static int atusb_get_and_show_build(struct atusb *atusb)
                dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
        }
 
+       kfree(build);
        return ret;
 }
 
@@ -698,7 +714,7 @@ fail:
 static int atusb_set_extended_addr(struct atusb *atusb)
 {
        struct usb_device *usb_dev = atusb->usb_dev;
-       unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN];
+       unsigned char *buffer;
        __le64 extended_addr;
        u64 addr;
        int ret;
@@ -710,12 +726,20 @@ static int atusb_set_extended_addr(struct atusb *atusb)
                return 0;
        }
 
+       buffer = kmalloc(IEEE802154_EXTENDED_ADDR_LEN, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
        /* Firmware is new enough so we fetch the address from EEPROM */
        ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
                                ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0,
                                buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000);
-       if (ret < 0)
-               dev_err(&usb_dev->dev, "failed to fetch extended address\n");
+       if (ret < 0) {
+               dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n");
+               ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr);
+               kfree(buffer);
+               return ret;
+       }
 
        memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN);
        /* Check if read address is not empty and the unicast bit is set correctly */
@@ -729,6 +753,7 @@ static int atusb_set_extended_addr(struct atusb *atusb)
                        &addr);
        }
 
+       kfree(buffer);
        return ret;
 }
 
@@ -770,8 +795,7 @@ static int atusb_probe(struct usb_interface *interface,
 
        hw->parent = &usb_dev->dev;
        hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
-                   IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS |
-                   IEEE802154_HW_FRAME_RETRIES;
+                   IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
 
        hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
                         WPAN_PHY_FLAG_CCA_MODE;
@@ -800,6 +824,9 @@ static int atusb_probe(struct usb_interface *interface,
        atusb_get_and_show_build(atusb);
        atusb_set_extended_addr(atusb);
 
+       if (atusb->fw_ver_maj >= 0 && atusb->fw_ver_min >= 3)
+               hw->flags |= IEEE802154_HW_FRAME_RETRIES;
+
        ret = atusb_get_and_clear_error(atusb);
        if (ret) {
                dev_err(&atusb->usb_dev->dev,
index d361835b315dd6b9ed542a48515e85c8024192d3..8dbd59baa34d5ed9eda97396f38b5ab38e96416a 100644 (file)
@@ -279,6 +279,7 @@ config MARVELL_PHY
 
 config MESON_GXL_PHY
        tristate "Amlogic Meson GXL Internal PHY"
+       depends on ARCH_MESON || COMPILE_TEST
        ---help---
          Currently has a driver for the Amlogic Meson GXL Internal PHY
 
index 1b639242f9e23170e69f7b7669ba82d8d4263fb5..ca1b462bf7b2782412014b63a0578aa328d9d4e1 100644 (file)
@@ -29,6 +29,7 @@
 #define MII_DP83867_MICR       0x12
 #define MII_DP83867_ISR                0x13
 #define DP83867_CTRL           0x1f
+#define DP83867_CFG3           0x1e
 
 /* Extended Registers */
 #define DP83867_RGMIICTL       0x0032
@@ -98,6 +99,8 @@ static int dp83867_config_intr(struct phy_device *phydev)
                micr_status |=
                        (MII_DP83867_MICR_AN_ERR_INT_EN |
                        MII_DP83867_MICR_SPEED_CHNG_INT_EN |
+                       MII_DP83867_MICR_AUTONEG_COMP_INT_EN |
+                       MII_DP83867_MICR_LINK_STS_CHNG_INT_EN |
                        MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
                        MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
 
@@ -129,12 +132,16 @@ static int dp83867_of_init(struct phy_device *phydev)
 
        ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
                                   &dp83867->rx_id_delay);
-       if (ret)
+       if (ret &&
+           (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+            phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID))
                return ret;
 
        ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
                                   &dp83867->tx_id_delay);
-       if (ret)
+       if (ret &&
+           (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+            phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID))
                return ret;
 
        return of_property_read_u32(of_node, "ti,fifo-depth",
@@ -214,6 +221,13 @@ static int dp83867_config_init(struct phy_device *phydev)
                }
        }
 
+       /* Enable Interrupt output INT_OE in CFG3 register */
+       if (phy_interrupt_is_valid(phydev)) {
+               val = phy_read(phydev, DP83867_CFG3);
+               val |= BIT(7);
+               phy_write(phydev, DP83867_CFG3, val);
+       }
+
        return 0;
 }
 
index e269262471a44fdcbfc9ef3c5833bf3ceb78624e..0b78210c0fa74e88b2ef5e27d4a7e6c78e9fa45c 100644 (file)
@@ -1192,7 +1192,8 @@ static int marvell_read_status(struct phy_device *phydev)
        int err;
 
        /* Check the fiber mode first */
-       if (phydev->supported & SUPPORTED_FIBRE) {
+       if (phydev->supported & SUPPORTED_FIBRE &&
+           phydev->interface != PHY_INTERFACE_MODE_SGMII) {
                err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
                if (err < 0)
                        goto error;
index 25f93a98863b79be76ac72b814d5dd4ff75c89a9..48da6e93c3f783e07f61ae24151e3114ac8dc1ae 100644 (file)
@@ -1065,6 +1065,15 @@ void phy_state_machine(struct work_struct *work)
                        if (old_link != phydev->link)
                                phydev->state = PHY_CHANGELINK;
                }
+               /*
+                * Failsafe: check that nobody set phydev->link=0 between two
+                * poll cycles, otherwise we won't leave RUNNING state as long
+                * as link remains down.
+                */
+               if (!phydev->link && phydev->state == PHY_RUNNING) {
+                       phydev->state = PHY_CHANGELINK;
+                       phydev_err(phydev, "no link in PHY_RUNNING\n");
+               }
                break;
        case PHY_CHANGELINK:
                err = phy_read_status(phydev);
index 7dc61228c55b8af26f0623fa4cfb9dd512cd97ac..f3b48ad90865d036845b34ae0fa326dcb4fea297 100644 (file)
@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
        u8 checksum = CHECKSUM_NONE;
        u32 opts2, opts3;
 
-       if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02)
+       if (!(tp->netdev->features & NETIF_F_RXCSUM))
                goto return_result;
 
        opts2 = le32_to_cpu(rx_desc->opts2);
@@ -3576,39 +3576,87 @@ static bool delay_autosuspend(struct r8152 *tp)
                return false;
 }
 
-static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
+static int rtl8152_rumtime_suspend(struct r8152 *tp)
 {
-       struct r8152 *tp = usb_get_intfdata(intf);
        struct net_device *netdev = tp->netdev;
        int ret = 0;
 
-       mutex_lock(&tp->control);
+       if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
+               u32 rcr = 0;
 
-       if (PMSG_IS_AUTO(message)) {
-               if (netif_running(netdev) && delay_autosuspend(tp)) {
+               if (delay_autosuspend(tp)) {
                        ret = -EBUSY;
                        goto out1;
                }
 
-               set_bit(SELECTIVE_SUSPEND, &tp->flags);
-       } else {
-               netif_device_detach(netdev);
+               if (netif_carrier_ok(netdev)) {
+                       u32 ocp_data;
+
+                       rcr = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+                       ocp_data = rcr & ~RCR_ACPT_ALL;
+                       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+                       rxdy_gated_en(tp, true);
+                       ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA,
+                                                PLA_OOB_CTRL);
+                       if (!(ocp_data & RXFIFO_EMPTY)) {
+                               rxdy_gated_en(tp, false);
+                               ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+                               ret = -EBUSY;
+                               goto out1;
+                       }
+               }
+
+               clear_bit(WORK_ENABLE, &tp->flags);
+               usb_kill_urb(tp->intr_urb);
+
+               tp->rtl_ops.autosuspend_en(tp, true);
+
+               if (netif_carrier_ok(netdev)) {
+                       napi_disable(&tp->napi);
+                       rtl_stop_rx(tp);
+                       rxdy_gated_en(tp, false);
+                       ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+                       napi_enable(&tp->napi);
+               }
        }
 
+       set_bit(SELECTIVE_SUSPEND, &tp->flags);
+
+out1:
+       return ret;
+}
+
+static int rtl8152_system_suspend(struct r8152 *tp)
+{
+       struct net_device *netdev = tp->netdev;
+       int ret = 0;
+
+       netif_device_detach(netdev);
+
        if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
                clear_bit(WORK_ENABLE, &tp->flags);
                usb_kill_urb(tp->intr_urb);
                napi_disable(&tp->napi);
-               if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
-                       rtl_stop_rx(tp);
-                       tp->rtl_ops.autosuspend_en(tp, true);
-               } else {
-                       cancel_delayed_work_sync(&tp->schedule);
-                       tp->rtl_ops.down(tp);
-               }
+               cancel_delayed_work_sync(&tp->schedule);
+               tp->rtl_ops.down(tp);
                napi_enable(&tp->napi);
        }
-out1:
+
+       return ret;
+}
+
+static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
+{
+       struct r8152 *tp = usb_get_intfdata(intf);
+       int ret;
+
+       mutex_lock(&tp->control);
+
+       if (PMSG_IS_AUTO(message))
+               ret = rtl8152_rumtime_suspend(tp);
+       else
+               ret = rtl8152_system_suspend(tp);
+
        mutex_unlock(&tp->control);
 
        return ret;
@@ -4308,6 +4356,11 @@ static int rtl8152_probe(struct usb_interface *intf,
                                NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
                                NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
 
+       if (tp->version == RTL_VER_01) {
+               netdev->features &= ~NETIF_F_RXCSUM;
+               netdev->hw_features &= ~NETIF_F_RXCSUM;
+       }
+
        netdev->ethtool_ops = &ops;
        netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
 
index 23dfb0eac0981704f2770bc3abe9bb32a80e3fc9..454f907d419a7f87cc0ae1813f40c054726be7e8 100644 (file)
@@ -263,7 +263,9 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
                .flowi4_iif = LOOPBACK_IFINDEX,
                .flowi4_tos = RT_TOS(ip4h->tos),
                .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
+               .flowi4_proto = ip4h->protocol,
                .daddr = ip4h->daddr,
+               .saddr = ip4h->saddr,
        };
        struct net *net = dev_net(vrf_dev);
        struct rtable *rt;
@@ -1250,6 +1252,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
                return -EINVAL;
 
        vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
+       if (vrf->tb_id == RT_TABLE_UNSPEC)
+               return -EINVAL;
 
        dev->priv_flags |= IFF_L3MDEV_MASTER;
 
index bb70dd5723b587c7d886446547dcec3659a583e4..ca7196c400609b8cfe6fe9da5efae19770258de8 100644 (file)
@@ -1798,7 +1798,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
 static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
                                      struct vxlan_sock *sock4,
                                      struct sk_buff *skb, int oif, u8 tos,
-                                     __be32 daddr, __be32 *saddr,
+                                     __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
                                      struct dst_cache *dst_cache,
                                      const struct ip_tunnel_info *info)
 {
@@ -1824,6 +1824,8 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
        fl4.flowi4_proto = IPPROTO_UDP;
        fl4.daddr = daddr;
        fl4.saddr = *saddr;
+       fl4.fl4_dport = dport;
+       fl4.fl4_sport = sport;
 
        rt = ip_route_output_key(vxlan->net, &fl4);
        if (likely(!IS_ERR(rt))) {
@@ -1851,6 +1853,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
                                          __be32 label,
                                          const struct in6_addr *daddr,
                                          struct in6_addr *saddr,
+                                         __be16 dport, __be16 sport,
                                          struct dst_cache *dst_cache,
                                          const struct ip_tunnel_info *info)
 {
@@ -1877,6 +1880,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
        fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
        fl6.flowi6_mark = skb->mark;
        fl6.flowi6_proto = IPPROTO_UDP;
+       fl6.fl6_dport = dport;
+       fl6.fl6_sport = sport;
 
        err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
                                         sock6->sock->sk,
@@ -2068,6 +2073,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                                     rdst ? rdst->remote_ifindex : 0, tos,
                                     dst->sin.sin_addr.s_addr,
                                     &src->sin.sin_addr.s_addr,
+                                    dst_port, src_port,
                                     dst_cache, info);
                if (IS_ERR(rt)) {
                        err = PTR_ERR(rt);
@@ -2104,6 +2110,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                                        rdst ? rdst->remote_ifindex : 0, tos,
                                        label, &dst->sin6.sin6_addr,
                                        &src->sin6.sin6_addr,
+                                       dst_port, src_port,
                                        dst_cache, info);
                if (IS_ERR(ndst)) {
                        err = PTR_ERR(ndst);
@@ -2430,7 +2437,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 
                rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
                                     info->key.u.ipv4.dst,
-                                    &info->key.u.ipv4.src, NULL, info);
+                                    &info->key.u.ipv4.src, dport, sport, NULL, info);
                if (IS_ERR(rt))
                        return PTR_ERR(rt);
                ip_rt_put(rt);
@@ -2441,7 +2448,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 
                ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
                                        info->key.label, &info->key.u.ipv6.dst,
-                                       &info->key.u.ipv6.src, NULL, info);
+                                       &info->key.u.ipv6.src, dport, sport, NULL, info);
                if (IS_ERR(ndst))
                        return PTR_ERR(ndst);
                dst_release(ndst);
index bc7397d709d3ac5ff85b9a1057e43a93500587fc..08bc7822f8209d0a9136357edb18683092c86c18 100644 (file)
@@ -16,7 +16,7 @@
 /********************************************************************/
 int orinoco_mic_init(struct orinoco_private *priv)
 {
-       priv->tx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
+       priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
                                              CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_mic)) {
                printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
@@ -25,7 +25,7 @@ int orinoco_mic_init(struct orinoco_private *priv)
                return -ENOMEM;
        }
 
-       priv->rx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
+       priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0,
                                              CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_mic)) {
                printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
@@ -40,17 +40,16 @@ int orinoco_mic_init(struct orinoco_private *priv)
 void orinoco_mic_free(struct orinoco_private *priv)
 {
        if (priv->tx_tfm_mic)
-               crypto_free_ahash(priv->tx_tfm_mic);
+               crypto_free_shash(priv->tx_tfm_mic);
        if (priv->rx_tfm_mic)
-               crypto_free_ahash(priv->rx_tfm_mic);
+               crypto_free_shash(priv->rx_tfm_mic);
 }
 
-int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
+int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
                u8 *da, u8 *sa, u8 priority,
                u8 *data, size_t data_len, u8 *mic)
 {
-       AHASH_REQUEST_ON_STACK(req, tfm_michael);
-       struct scatterlist sg[2];
+       SHASH_DESC_ON_STACK(desc, tfm_michael);
        u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
        int err;
 
@@ -67,18 +66,27 @@ int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
        hdr[ETH_ALEN * 2 + 2] = 0;
        hdr[ETH_ALEN * 2 + 3] = 0;
 
-       /* Use scatter gather to MIC header and data in one go */
-       sg_init_table(sg, 2);
-       sg_set_buf(&sg[0], hdr, sizeof(hdr));
-       sg_set_buf(&sg[1], data, data_len);
+       desc->tfm = tfm_michael;
+       desc->flags = 0;
 
-       if (crypto_ahash_setkey(tfm_michael, key, MIC_KEYLEN))
-               return -1;
+       err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN);
+       if (err)
+               return err;
+
+       err = crypto_shash_init(desc);
+       if (err)
+               return err;
+
+       err = crypto_shash_update(desc, hdr, sizeof(hdr));
+       if (err)
+               return err;
+
+       err = crypto_shash_update(desc, data, data_len);
+       if (err)
+               return err;
+
+       err = crypto_shash_final(desc, mic);
+       shash_desc_zero(desc);
 
-       ahash_request_set_tfm(req, tfm_michael);
-       ahash_request_set_callback(req, 0, NULL, NULL);
-       ahash_request_set_crypt(req, sg, mic, data_len + sizeof(hdr));
-       err = crypto_ahash_digest(req);
-       ahash_request_zero(req);
        return err;
 }
index ce731d05cc98cd2d0b6415f66868d3ad517b4753..e8724e8892194bb3e732078b3561ab031071a061 100644 (file)
@@ -6,6 +6,7 @@
 #define _ORINOCO_MIC_H_
 
 #include <linux/types.h>
+#include <crypto/hash.h>
 
 #define MICHAEL_MIC_LEN 8
 
@@ -15,7 +16,7 @@ struct crypto_ahash;
 
 int orinoco_mic_init(struct orinoco_private *priv);
 void orinoco_mic_free(struct orinoco_private *priv);
-int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
+int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key,
                u8 *da, u8 *sa, u8 priority,
                u8 *data, size_t data_len, u8 *mic);
 
index 2f0c84b1c440cd1160bdebc3cabcb13be110a73e..5fa1c3e3713f835387353ba781cbb24ce95df6dc 100644 (file)
@@ -152,8 +152,8 @@ struct orinoco_private {
        u8 *wpa_ie;
        int wpa_ie_len;
 
-       struct crypto_ahash *rx_tfm_mic;
-       struct crypto_ahash *tx_tfm_mic;
+       struct crypto_shash *rx_tfm_mic;
+       struct crypto_shash *tx_tfm_mic;
 
        unsigned int wpa_enabled:1;
        unsigned int tkip_cm_active:1;
index 0a508649903d73fe4b9a643a5e7e43577abc22c9..49015b05f3d1a048ee6e3ac00426ea76cd5bab44 100644 (file)
@@ -1063,6 +1063,7 @@ int rtl_usb_probe(struct usb_interface *intf,
                return -ENOMEM;
        }
        rtlpriv = hw->priv;
+       rtlpriv->hw = hw;
        rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
                                    GFP_KERNEL);
        if (!rtlpriv->usb_data)
index 6307088b375f2d899002f9fc9fae16c387357598..a518cb1b59d4238b675fccd695f45003af380296 100644 (file)
@@ -957,6 +957,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
 {
        resource_size_t allocated = 0, available = 0;
        struct nd_region *nd_region = to_nd_region(dev->parent);
+       struct nd_namespace_common *ndns = to_ndns(dev);
        struct nd_mapping *nd_mapping;
        struct nvdimm_drvdata *ndd;
        struct nd_label_id label_id;
@@ -964,7 +965,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
        u8 *uuid = NULL;
        int rc, i;
 
-       if (dev->driver || to_ndns(dev)->claim)
+       if (dev->driver || ndns->claim)
                return -EBUSY;
 
        if (is_namespace_pmem(dev)) {
@@ -1034,20 +1035,16 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
 
                nd_namespace_pmem_set_resource(nd_region, nspm,
                                val * nd_region->ndr_mappings);
-       } else if (is_namespace_blk(dev)) {
-               struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
-               /*
-                * Try to delete the namespace if we deleted all of its
-                * allocation, this is not the seed device for the
-                * region, and it is not actively claimed by a btt
-                * instance.
-                */
-               if (val == 0 && nd_region->ns_seed != dev
-                               && !nsblk->common.claim)
-                       nd_device_unregister(dev, ND_ASYNC);
        }
 
+       /*
+        * Try to delete the namespace if we deleted all of its
+        * allocation, this is not the seed device for the region, and
+        * it is not actively claimed by a btt instance.
+        */
+       if (val == 0 && nd_region->ns_seed != dev && !ndns->claim)
+               nd_device_unregister(dev, ND_ASYNC);
+
        return rc;
 }
 
index 7282d7495bf1f0a1bf6685012dafb1d9cc60bfa4..5b536be5a12eb97023745a59f65283280b7b3675 100644 (file)
@@ -90,7 +90,9 @@ static int read_pmem(struct page *page, unsigned int off,
 
        rc = memcpy_from_pmem(mem + off, pmem_addr, len);
        kunmap_atomic(mem);
-       return rc;
+       if (rc)
+               return -EIO;
+       return 0;
 }
 
 static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
index 2fc86dc7a8df3e487c8222fa84310e7832c9c0a8..8a3c3e32a704b3e359a78ed06e95c6e977f322bc 100644 (file)
@@ -1106,12 +1106,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
        if (ret)
                return ret;
 
-       /* Checking for ctrl->tagset is a trick to avoid sleeping on module
-        * load, since we only need the quirk on reset_controller. Notice
-        * that the HGST device needs this delay only in firmware activation
-        * procedure; unfortunately we have no (easy) way to verify this.
-        */
-       if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset)
+       if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
                msleep(NVME_QUIRK_DELAY_AMOUNT);
 
        return nvme_wait_ready(ctrl, cap, false);
index aa0bc60810a74ff93cf05b294b2a9d4968ecf397..fcc9dcfdf67517d1352bef388a50d2dbf6c9a129 100644 (file)
@@ -1654,13 +1654,12 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
                struct nvme_fc_fcp_op *op)
 {
        struct nvmefc_fcp_req *freq = &op->fcp_req;
-       u32 map_len = nvme_map_len(rq);
        enum dma_data_direction dir;
        int ret;
 
        freq->sg_cnt = 0;
 
-       if (!map_len)
+       if (!blk_rq_payload_bytes(rq))
                return 0;
 
        freq->sg_table.sgl = freq->first_sgl;
@@ -1854,7 +1853,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (ret)
                return ret;
 
-       data_len = nvme_map_len(rq);
+       data_len = blk_rq_payload_bytes(rq);
        if (data_len)
                io_dir = ((rq_data_dir(rq) == WRITE) ?
                                        NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
index 6377e14586dc5c837749049cf3dafc7b210a3026..aead6d08ed2c83b4f67e9087b0ca446247b57d18 100644 (file)
@@ -225,14 +225,6 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
        return (sector >> (ns->lba_shift - 9));
 }
 
-static inline unsigned nvme_map_len(struct request *rq)
-{
-       if (req_op(rq) == REQ_OP_DISCARD)
-               return sizeof(struct nvme_dsm_range);
-       else
-               return blk_rq_bytes(rq);
-}
-
 static inline void nvme_cleanup_cmd(struct request *req)
 {
        if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
index 19beeb7b2ac26a5bf0f81bf4e8b995bf29dba195..3faefabf339c98d221373437e411512543d29263 100644 (file)
@@ -306,11 +306,11 @@ static __le64 **iod_list(struct request *req)
        return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
 }
 
-static int nvme_init_iod(struct request *rq, unsigned size,
-               struct nvme_dev *dev)
+static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
        int nseg = blk_rq_nr_phys_segments(rq);
+       unsigned int size = blk_rq_payload_bytes(rq);
 
        if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
                iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -420,12 +420,11 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
 }
 #endif
 
-static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
-               int total_len)
+static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct dma_pool *pool;
-       int length = total_len;
+       int length = blk_rq_payload_bytes(req);
        struct scatterlist *sg = iod->sg;
        int dma_len = sg_dma_len(sg);
        u64 dma_addr = sg_dma_address(sg);
@@ -501,7 +500,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
 }
 
 static int nvme_map_data(struct nvme_dev *dev, struct request *req,
-               unsigned size, struct nvme_command *cmnd)
+               struct nvme_command *cmnd)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
        struct request_queue *q = req->q;
@@ -519,7 +518,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
                                DMA_ATTR_NO_WARN))
                goto out;
 
-       if (!nvme_setup_prps(dev, req, size))
+       if (!nvme_setup_prps(dev, req))
                goto out_unmap;
 
        ret = BLK_MQ_RQ_QUEUE_ERROR;
@@ -580,7 +579,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_dev *dev = nvmeq->dev;
        struct request *req = bd->rq;
        struct nvme_command cmnd;
-       unsigned map_len;
        int ret = BLK_MQ_RQ_QUEUE_OK;
 
        /*
@@ -600,13 +598,12 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (ret != BLK_MQ_RQ_QUEUE_OK)
                return ret;
 
-       map_len = nvme_map_len(req);
-       ret = nvme_init_iod(req, map_len, dev);
+       ret = nvme_init_iod(req, dev);
        if (ret != BLK_MQ_RQ_QUEUE_OK)
                goto out_free_cmd;
 
        if (blk_rq_nr_phys_segments(req))
-               ret = nvme_map_data(dev, req, map_len, &cmnd);
+               ret = nvme_map_data(dev, req, &cmnd);
 
        if (ret != BLK_MQ_RQ_QUEUE_OK)
                goto out_cleanup_iod;
index f587af345889eb1b32a1f4f87dbe0c9a3a30ae42..557f29b1f1bb23e4e5cded156db21b4f981b585a 100644 (file)
@@ -981,8 +981,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
 }
 
 static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
-               struct request *rq, unsigned int map_len,
-               struct nvme_command *c)
+               struct request *rq, struct nvme_command *c)
 {
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
        struct nvme_rdma_device *dev = queue->device;
@@ -1014,9 +1013,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
        }
 
        if (count == 1) {
-               if (rq_data_dir(rq) == WRITE &&
-                   map_len <= nvme_rdma_inline_data_size(queue) &&
-                   nvme_rdma_queue_idx(queue))
+               if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
+                   blk_rq_payload_bytes(rq) <=
+                               nvme_rdma_inline_data_size(queue))
                        return nvme_rdma_map_sg_inline(queue, req, c);
 
                if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
@@ -1422,7 +1421,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
                struct request *rq)
 {
        if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
-               struct nvme_command *cmd = (struct nvme_command *)rq->cmd;
+               struct nvme_command *cmd = nvme_req(rq)->cmd;
 
                if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
                    cmd->common.opcode != nvme_fabrics_command ||
@@ -1444,7 +1443,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_command *c = sqe->data;
        bool flush = false;
        struct ib_device *dev;
-       unsigned int map_len;
        int ret;
 
        WARN_ON_ONCE(rq->tag < 0);
@@ -1462,8 +1460,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        blk_mq_start_request(rq);
 
-       map_len = nvme_map_len(rq);
-       ret = nvme_rdma_map_data(queue, rq, map_len, c);
+       ret = nvme_rdma_map_data(queue, rq, c);
        if (ret < 0) {
                dev_err(queue->ctrl->ctrl.device,
                             "Failed to map data (%d)\n", ret);
index 1f38d0836751af9f823d848218d7dc17d9dad43f..f1b633bce525f9752bcb2cba15a5e1e770c89c0f 100644 (file)
@@ -517,7 +517,7 @@ static int xgene_msi_probe(struct platform_device *pdev)
 
        rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
                               xgene_msi_hwirq_alloc, NULL);
-       if (rc)
+       if (rc < 0)
                goto err_cpuhp;
        pci_xgene_online = rc;
        rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
index bed19994c1e94d4e32c134e58133c4acd8b8bd88..af8f6e92e8851ca84b459e4587508960b225d336 100644 (file)
@@ -807,11 +807,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
 {
        u32 val;
 
-       /* get iATU unroll support */
-       pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
-       dev_dbg(pp->dev, "iATU unroll: %s\n",
-               pp->iatu_unroll_enabled ? "enabled" : "disabled");
-
        /* set the number of lanes */
        val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
        val &= ~PORT_LINK_MODE_MASK;
@@ -882,6 +877,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
         * we should not program the ATU here.
         */
        if (!pp->ops->rd_other_conf) {
+               /* get iATU unroll support */
+               pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
+               dev_dbg(pp->dev, "iATU unroll: %s\n",
+                       pp->iatu_unroll_enabled ? "enabled" : "disabled");
+
                dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
                                          PCIE_ATU_TYPE_MEM, pp->mem_base,
                                          pp->mem_bus_addr, pp->mem_size);
index e164b5c9f0f03d953e825150977fe0c571b40690..204960e70333f1dbe49d03ffffac7393f06a37c4 100644 (file)
@@ -1169,6 +1169,7 @@ void set_pcie_port_type(struct pci_dev *pdev)
        pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
        if (!pos)
                return;
+
        pdev->pcie_cap = pos;
        pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
        pdev->pcie_flags_reg = reg16;
@@ -1176,13 +1177,14 @@ void set_pcie_port_type(struct pci_dev *pdev)
        pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
 
        /*
-        * A Root Port is always the upstream end of a Link.  No PCIe
-        * component has two Links.  Two Links are connected by a Switch
-        * that has a Port on each Link and internal logic to connect the
-        * two Ports.
+        * A Root Port or a PCI-to-PCIe bridge is always the upstream end
+        * of a Link.  No PCIe component has two Links.  Two Links are
+        * connected by a Switch that has a Port on each Link and internal
+        * logic to connect the two Ports.
         */
        type = pci_pcie_type(pdev);
-       if (type == PCI_EXP_TYPE_ROOT_PORT)
+       if (type == PCI_EXP_TYPE_ROOT_PORT ||
+           type == PCI_EXP_TYPE_PCIE_BRIDGE)
                pdev->has_secondary_link = 1;
        else if (type == PCI_EXP_TYPE_UPSTREAM ||
                 type == PCI_EXP_TYPE_DOWNSTREAM) {
index 37300634b7d2c853a05f5dcecce6b9d46d187b59..c123488266ce74883ed8ba972b43103d136bb66e 100644 (file)
@@ -1092,6 +1092,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
        enum pin_config_param param = pinconf_to_config_param(*config);
        void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
        unsigned long flags;
        u32 conf, pull, val, debounce;
        u16 arg = 0;
@@ -1128,7 +1129,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
                        return -EINVAL;
 
                raw_spin_lock_irqsave(&vg->lock, flags);
-               debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG));
+               debounce = readl(db_reg);
                raw_spin_unlock_irqrestore(&vg->lock, flags);
 
                switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
@@ -1176,6 +1177,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
        unsigned int param, arg;
        void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
        unsigned long flags;
        u32 conf, val, debounce;
        int i, ret = 0;
@@ -1238,36 +1240,40 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
 
                        break;
                case PIN_CONFIG_INPUT_DEBOUNCE:
-                       debounce = readl(byt_gpio_reg(vg, offset,
-                                                     BYT_DEBOUNCE_REG));
-                       conf &= ~BYT_DEBOUNCE_PULSE_MASK;
+                       debounce = readl(db_reg);
+                       debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
 
                        switch (arg) {
+                       case 0:
+                               conf &= BYT_DEBOUNCE_EN;
+                               break;
                        case 375:
-                               conf |= BYT_DEBOUNCE_PULSE_375US;
+                               debounce |= BYT_DEBOUNCE_PULSE_375US;
                                break;
                        case 750:
-                               conf |= BYT_DEBOUNCE_PULSE_750US;
+                               debounce |= BYT_DEBOUNCE_PULSE_750US;
                                break;
                        case 1500:
-                               conf |= BYT_DEBOUNCE_PULSE_1500US;
+                               debounce |= BYT_DEBOUNCE_PULSE_1500US;
                                break;
                        case 3000:
-                               conf |= BYT_DEBOUNCE_PULSE_3MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_3MS;
                                break;
                        case 6000:
-                               conf |= BYT_DEBOUNCE_PULSE_6MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_6MS;
                                break;
                        case 12000:
-                               conf |= BYT_DEBOUNCE_PULSE_12MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_12MS;
                                break;
                        case 24000:
-                               conf |= BYT_DEBOUNCE_PULSE_24MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_24MS;
                                break;
                        default:
                                ret = -EINVAL;
                        }
 
+                       if (!ret)
+                               writel(debounce, db_reg);
                        break;
                default:
                        ret = -ENOTSUPP;
@@ -1617,6 +1623,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
 
 static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
 {
+       struct gpio_chip *gc = &vg->chip;
+       struct device *dev = &vg->pdev->dev;
        void __iomem *reg;
        u32 base, value;
        int i;
@@ -1638,10 +1646,12 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
                }
 
                value = readl(reg);
-               if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
-                   !(value & BYT_DIRECT_IRQ_EN)) {
+               if (value & BYT_DIRECT_IRQ_EN) {
+                       clear_bit(i, gc->irq_valid_mask);
+                       dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
+               } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
                        byt_gpio_clear_triggering(vg, i);
-                       dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i);
+                       dev_dbg(dev, "disabling GPIO %d\n", i);
                }
        }
 
@@ -1680,6 +1690,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
        gc->can_sleep   = false;
        gc->parent      = &vg->pdev->dev;
        gc->ngpio       = vg->soc_data->npins;
+       gc->irq_need_valid_mask = true;
 
 #ifdef CONFIG_PM_SLEEP
        vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
index 59cb7a6fc5bef316d042f93da72792edca2ea8d9..901b356b09d71679a2b4a03f7cd57b30a22fa6f4 100644 (file)
@@ -19,7 +19,7 @@
 
 #define BXT_PAD_OWN    0x020
 #define BXT_HOSTSW_OWN 0x080
-#define BXT_PADCFGLOCK 0x090
+#define BXT_PADCFGLOCK 0x060
 #define BXT_GPI_IE     0x110
 
 #define BXT_COMMUNITY(s, e)                            \
index 1e139672f1af9da0fa7ff4af1a919395e2ea6957..6df35dcb29aea68c0ddec6cbd29bb1c9a3abd56c 100644 (file)
@@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
        return 0;
 }
 
+static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
+{
+       u32 value;
+
+       value = readl(padcfg0);
+       if (input) {
+               value &= ~PADCFG0_GPIORXDIS;
+               value |= PADCFG0_GPIOTXDIS;
+       } else {
+               value &= ~PADCFG0_GPIOTXDIS;
+               value |= PADCFG0_GPIORXDIS;
+       }
+       writel(value, padcfg0);
+}
+
 static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
                                     struct pinctrl_gpio_range *range,
                                     unsigned pin)
@@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
        /* Disable SCI/SMI/NMI generation */
        value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
        value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
-       /* Disable TX buffer and enable RX (this will be input) */
-       value &= ~PADCFG0_GPIORXDIS;
-       value |= PADCFG0_GPIOTXDIS;
        writel(value, padcfg0);
 
+       /* Disable TX buffer and enable RX (this will be input) */
+       __intel_gpio_set_direction(padcfg0, true);
+
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
        return 0;
@@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
        struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
        void __iomem *padcfg0;
        unsigned long flags;
-       u32 value;
 
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
        padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
-
-       value = readl(padcfg0);
-       if (input)
-               value |= PADCFG0_GPIOTXDIS;
-       else
-               value &= ~PADCFG0_GPIOTXDIS;
-       writel(value, padcfg0);
+       __intel_gpio_set_direction(padcfg0, input);
 
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
index c3928aa3fefa9a1d24b0214e877bbac2bc15f67e..e0bca4df2a2f3188da0d559a29013893a5bea528 100644 (file)
@@ -253,9 +253,8 @@ static const unsigned int uart_tx_ao_a_pins[]       = { PIN(GPIOAO_0, 0) };
 static const unsigned int uart_rx_ao_a_pins[]  = { PIN(GPIOAO_1, 0) };
 static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_1, 0),
-                                                   PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_5, 0) };
 static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
 
@@ -498,7 +497,7 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
        GPIO_GROUP(GPIOAO_13, 0),
 
        /* bank AO */
-       GROUP(uart_tx_ao_b,     0,      26),
+       GROUP(uart_tx_ao_b,     0,      24),
        GROUP(uart_rx_ao_b,     0,      25),
        GROUP(uart_tx_ao_a,     0,      12),
        GROUP(uart_rx_ao_a,     0,      11),
index 25694f7094c714bbf35eee2ae7b51e2b4ce5b1e9..b69743b07a1d591ace36d410583231319234d4f0 100644 (file)
@@ -214,9 +214,8 @@ static const unsigned int uart_tx_ao_a_pins[]       = { PIN(GPIOAO_0, 0) };
 static const unsigned int uart_rx_ao_a_pins[]  = { PIN(GPIOAO_1, 0) };
 static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_1, 0),
-                                                   PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_5, 0) };
 static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
 
@@ -409,7 +408,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
        GPIO_GROUP(GPIOAO_9, 0),
 
        /* bank AO */
-       GROUP(uart_tx_ao_b,     0,      26),
+       GROUP(uart_tx_ao_b,     0,      24),
        GROUP(uart_rx_ao_b,     0,      25),
        GROUP(uart_tx_ao_a,     0,      12),
        GROUP(uart_rx_ao_a,     0,      11),
index c9a146948192dba19ca5da1587791c25b315d628..537b52055756645a8f225dd7e96b191d7d841e96 100644 (file)
@@ -202,6 +202,8 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
                        i = 128;
                        pin_num = AMD_GPIO_PINS_BANK2 + i;
                        break;
+               default:
+                       return;
                }
 
                for (; i < pin_num; i++) {
index aa8bd9794683b715013c82aa9220d11cfb0ea595..96686336e3a396254b9473f01f1776e0297301ce 100644 (file)
@@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                          0, 0, 0, 0};
 static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
                                           41, 42, 45};
-static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
 static const unsigned i2c0_pins[] = {63, 64};
 static const int i2c0_muxvals[] = {0, 0};
 static const unsigned i2c1_pins[] = {65, 66};
index 410741acb3c92dabe36417800f564a943c5d42ec..f46ece2ce3c4d48086c73b0e2d0c63ee1fe35893 100644 (file)
@@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
                        case 8:
                        case 7:
                        case 6:
+                       case 1:
                                ideapad_input_report(priv, vpc_bit);
                                break;
                        case 5:
index 1fc0de870ff826e8b90956ab557cc83008e1ce68..361770568ad03a6e7a3bc7e6d579ccacae1725b7 100644 (file)
@@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev)
 
        input_set_capability(input, EV_KEY, KEY_POWER);
 
-       error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
+       error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT,
                                     DRIVER_NAME, input);
        if (error) {
                dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
index 97b4c3a219c0c79f3a3ed9359bc30bb13a439ce6..25f15df5c2d7b3c37b82e099f301831c403caa7d 100644 (file)
@@ -326,7 +326,7 @@ static int __init mlxplat_init(void)
        return 0;
 
 fail_platform_mux_register:
-       for (i--; i > 0 ; i--)
+       while (--i >= 0)
                platform_device_unregister(priv->pdev_mux[i]);
        platform_device_unregister(priv->pdev_i2c);
 fail_alloc:
index cbf4d83a727106ee0f7e42ca1b868616d994c0c3..25b176996cb793a789214a1a1237910b01cd3673 100644 (file)
@@ -139,7 +139,7 @@ static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
 
 static int s3_wmi_check_platform_device(struct device *dev, void *data)
 {
-       struct acpi_device *adev, *ts_adev;
+       struct acpi_device *adev, *ts_adev = NULL;
        acpi_handle handle;
        acpi_status status;
 
@@ -244,13 +244,11 @@ static int s3_wmi_remove(struct platform_device *device)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int s3_wmi_resume(struct device *dev)
+static int __maybe_unused s3_wmi_resume(struct device *dev)
 {
        s3_wmi_send_lid_state();
        return 0;
 }
-#endif
 static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
 
 static struct platform_driver s3_wmi_driver = {
index 9a507e77eced18cc433792f651761dd137b75d62..90b05c72186c4f9e9c37a812ec469a738d253e95 100644 (file)
@@ -396,9 +396,6 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
                        goto unwind_vring_allocations;
        }
 
-       /* track the rvdevs list reference */
-       kref_get(&rvdev->refcount);
-
        list_add_tail(&rvdev->node, &rproc->rvdevs);
 
        rproc_add_subdev(rproc, &rvdev->subdev,
@@ -889,13 +886,15 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
        /*
         * Create a copy of the resource table. When a virtio device starts
         * and calls vring_new_virtqueue() the address of the allocated vring
-        * will be stored in the table_ptr. Before the device is started,
-        * table_ptr will be copied into device memory.
+        * will be stored in the cached_table. Before the device is started,
+        * cached_table will be copied into device memory.
         */
-       rproc->table_ptr = kmemdup(table, tablesz, GFP_KERNEL);
-       if (!rproc->table_ptr)
+       rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
+       if (!rproc->cached_table)
                goto clean_up;
 
+       rproc->table_ptr = rproc->cached_table;
+
        /* reset max_notifyid */
        rproc->max_notifyid = -1;
 
@@ -914,16 +913,18 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
        }
 
        /*
-        * The starting device has been given the rproc->table_ptr as the
+        * The starting device has been given the rproc->cached_table as the
         * resource table. The address of the vring along with the other
-        * allocated resources (carveouts etc) is stored in table_ptr.
+        * allocated resources (carveouts etc) is stored in cached_table.
         * In order to pass this information to the remote device we must copy
         * this information to device memory. We also update the table_ptr so
         * that any subsequent changes will be applied to the loaded version.
         */
        loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
-       if (loaded_table)
-               memcpy(loaded_table, rproc->table_ptr, tablesz);
+       if (loaded_table) {
+               memcpy(loaded_table, rproc->cached_table, tablesz);
+               rproc->table_ptr = loaded_table;
+       }
 
        /* power up the remote processor */
        ret = rproc->ops->start(rproc);
@@ -951,7 +952,8 @@ stop_rproc:
 clean_up_resources:
        rproc_resource_cleanup(rproc);
 clean_up:
-       kfree(rproc->table_ptr);
+       kfree(rproc->cached_table);
+       rproc->cached_table = NULL;
        rproc->table_ptr = NULL;
 
        rproc_disable_iommu(rproc);
@@ -1185,7 +1187,8 @@ void rproc_shutdown(struct rproc *rproc)
        rproc_disable_iommu(rproc);
 
        /* Free the copy of the resource table */
-       kfree(rproc->table_ptr);
+       kfree(rproc->cached_table);
+       rproc->cached_table = NULL;
        rproc->table_ptr = NULL;
 
        /* if in crash state, unlock crash handler */
index a79cb5a9e5f22963ed56214421abc099c5949c37..1cfb775e8e82b8b391108c700abba57f92867066 100644 (file)
@@ -453,8 +453,8 @@ int rpmsg_register_device(struct rpmsg_device *rpdev)
        struct device *dev = &rpdev->dev;
        int ret;
 
-       dev_set_name(&rpdev->dev, "%s:%s",
-                    dev_name(dev->parent), rpdev->id.name);
+       dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent),
+                    rpdev->id.name, rpdev->src, rpdev->dst);
 
        rpdev->dev.bus = &rpmsg_bus;
        rpdev->dev.release = rpmsg_release_device;
index 639ed4e6afd19b46d1e8fb3ec5669b217da8d3c4..070c4da95f48c0e9b0dbb7b6bcf008f8d6e5972e 100644 (file)
@@ -145,6 +145,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
 #define CCW_CMD_WRITE_CONF 0x21
 #define CCW_CMD_WRITE_STATUS 0x31
 #define CCW_CMD_READ_VQ_CONF 0x32
+#define CCW_CMD_READ_STATUS 0x72
 #define CCW_CMD_SET_IND_ADAPTER 0x73
 #define CCW_CMD_SET_VIRTIO_REV 0x83
 
@@ -160,6 +161,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
 #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
 #define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
 #define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
+#define VIRTIO_CCW_DOING_READ_STATUS 0x20000000
 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000
 
 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
@@ -452,7 +454,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
         * This may happen on device detach.
         */
        if (ret && (ret != -ENODEV))
-               dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
+               dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n",
                         ret, index);
 
        vring_del_virtqueue(vq);
@@ -892,6 +894,28 @@ out_free:
 static u8 virtio_ccw_get_status(struct virtio_device *vdev)
 {
        struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+       u8 old_status = *vcdev->status;
+       struct ccw1 *ccw;
+
+       if (vcdev->revision < 1)
+               return *vcdev->status;
+
+       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+       if (!ccw)
+               return old_status;
+
+       ccw->cmd_code = CCW_CMD_READ_STATUS;
+       ccw->flags = 0;
+       ccw->count = sizeof(*vcdev->status);
+       ccw->cda = (__u32)(unsigned long)vcdev->status;
+       ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
+/*
+ * If the channel program failed (should only happen if the device
+ * was hotunplugged, and then we clean up via the machine check
+ * handler anyway), vcdev->status was not overwritten and we just
+ * return the old status, which is fine.
+*/
+       kfree(ccw);
 
        return *vcdev->status;
 }
@@ -920,7 +944,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
        kfree(ccw);
 }
 
-static struct virtio_config_ops virtio_ccw_config_ops = {
+static const struct virtio_config_ops virtio_ccw_config_ops = {
        .get_features = virtio_ccw_get_features,
        .finalize_features = virtio_ccw_finalize_features,
        .get = virtio_ccw_get_config,
@@ -987,6 +1011,7 @@ static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
                case VIRTIO_CCW_DOING_READ_CONFIG:
                case VIRTIO_CCW_DOING_WRITE_CONFIG:
                case VIRTIO_CCW_DOING_WRITE_STATUS:
+               case VIRTIO_CCW_DOING_READ_STATUS:
                case VIRTIO_CCW_DOING_SET_VQ:
                case VIRTIO_CCW_DOING_SET_IND:
                case VIRTIO_CCW_DOING_SET_CONF_IND:
index d9e15210b110efcd3147d5a02f0e06c0673d4ad3..5caf5f3ff642282ee13776e9df9ca9a18f494536 100644 (file)
@@ -64,9 +64,9 @@ int           max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
 u32    bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
 u32    *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
 
-#define BFAD_FW_FILE_CB                "cbfw-3.2.3.0.bin"
-#define BFAD_FW_FILE_CT                "ctfw-3.2.3.0.bin"
-#define BFAD_FW_FILE_CT2       "ct2fw-3.2.3.0.bin"
+#define BFAD_FW_FILE_CB                "cbfw-3.2.5.1.bin"
+#define BFAD_FW_FILE_CT                "ctfw-3.2.5.1.bin"
+#define BFAD_FW_FILE_CT2       "ct2fw-3.2.5.1.bin"
 
 static u32 *bfad_load_fwimg(struct pci_dev *pdev);
 static void bfad_free_fwimg(void);
index a9a00169ad91960798c35114c69b4d17f13d1039..b2e8c0dfc79cb247a30e2ec826bc9a64da489e64 100644 (file)
@@ -3363,7 +3363,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job)
        struct bfad_fcxp    *drv_fcxp;
        struct bfa_fcs_lport_s *fcs_port;
        struct bfa_fcs_rport_s *fcs_rport;
-       struct fc_bsg_request *bsg_request = bsg_request;
+       struct fc_bsg_request *bsg_request = job->request;
        struct fc_bsg_reply *bsg_reply = job->reply;
        uint32_t command_type = bsg_request->msgcode;
        unsigned long flags;
index f9e862093a25935e601c45b4854035e98e035f03..cfcfff48e8e16e3fb2b66c0d491c0d612a8cdd38 100644 (file)
@@ -58,7 +58,7 @@
 #ifdef BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #else
-#define BFAD_DRIVER_VERSION    "3.2.25.0"
+#define BFAD_DRIVER_VERSION    "3.2.25.1"
 #endif
 
 #define BFAD_PROTO_NAME FCPI_NAME
index 9ddc9200e0a48b00632bbc441c492679786ec570..9e4b7709043e0c49620755cafce2f80b3e99df89 100644 (file)
@@ -248,6 +248,7 @@ struct fnic {
        struct completion *remove_wait; /* device remove thread blocks */
 
        atomic_t in_flight;             /* io counter */
+       bool internal_reset_inprogress;
        u32 _reserved;                  /* fill hole */
        unsigned long state_flags;      /* protected by host lock */
        enum fnic_state state;
index 2544a37ece0afdc19742d01954487f87dd4c3c5c..adb3d5871e743442e04014cd2f86eae15ab27912 100644 (file)
@@ -2581,6 +2581,19 @@ int fnic_host_reset(struct scsi_cmnd *sc)
        unsigned long wait_host_tmo;
        struct Scsi_Host *shost = sc->device->host;
        struct fc_lport *lp = shost_priv(shost);
+       struct fnic *fnic = lport_priv(lp);
+       unsigned long flags;
+
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       if (fnic->internal_reset_inprogress == 0) {
+               fnic->internal_reset_inprogress = 1;
+       } else {
+               spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                       "host reset in progress skipping another host reset\n");
+               return SUCCESS;
+       }
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
        /*
         * If fnic_reset is successful, wait for fabric login to complete
@@ -2601,6 +2614,9 @@ int fnic_host_reset(struct scsi_cmnd *sc)
                }
        }
 
+       spin_lock_irqsave(&fnic->fnic_lock, flags);
+       fnic->internal_reset_inprogress = 0;
+       spin_unlock_irqrestore(&fnic->fnic_lock, flags);
        return ret;
 }
 
index 3d3768aaab4f2bb79ccbaf40f1f569114e4f9b30..99b747cedbebc517a78714db321743f0837834b6 100644 (file)
@@ -46,6 +46,7 @@
 
 #define        INITIAL_SRP_LIMIT       800
 #define        DEFAULT_MAX_SECTORS     256
+#define MAX_TXU                        1024 * 1024
 
 static uint max_vdma_size = MAX_H_COPY_RDMA;
 
@@ -1391,7 +1392,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
        }
 
        info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
-                                 GFP_KERNEL);
+                                 GFP_ATOMIC);
        if (!info) {
                dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
                        iue->target);
@@ -1443,7 +1444,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
        info->mad_version = cpu_to_be32(MAD_VERSION_1);
        info->os_type = cpu_to_be32(LINUX);
        memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
-       info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE);
+       info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
 
        dma_wmb();
        rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
@@ -1509,7 +1510,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
        }
 
        cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
-                                GFP_KERNEL);
+                                GFP_ATOMIC);
        if (!cap) {
                dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
                        iue->target);
@@ -3585,7 +3586,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
                               1, 1);
        if (rc) {
                pr_err("srp_transfer_data() failed: %d\n", rc);
-               return -EAGAIN;
+               return -EIO;
        }
        /*
         * We now tell TCM to add this WRITE CDB directly into the TCM storage
index 236e4e51d1617243d279d09089d95bb50c0a09d6..7b6bd8ed0d0bd6fc8b056052dffbe60595015cbe 100644 (file)
@@ -3590,12 +3590,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
                } else {
                        buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
                        lpfc_els_free_data(phba, buf_ptr1);
+                       elsiocb->context2 = NULL;
                }
        }
 
        if (elsiocb->context3) {
                buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
                lpfc_els_free_bpl(phba, buf_ptr);
+               elsiocb->context3 = NULL;
        }
        lpfc_sli_release_iocbq(phba, elsiocb);
        return 0;
index 4faa7672fc1d80add7e603e7bda066e5b98fd34b..a78a3df68f679659eb9d05b24133312a663ab812 100644 (file)
@@ -5954,18 +5954,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
 
  free_vfi_bmask:
        kfree(phba->sli4_hba.vfi_bmask);
+       phba->sli4_hba.vfi_bmask = NULL;
  free_xri_ids:
        kfree(phba->sli4_hba.xri_ids);
+       phba->sli4_hba.xri_ids = NULL;
  free_xri_bmask:
        kfree(phba->sli4_hba.xri_bmask);
+       phba->sli4_hba.xri_bmask = NULL;
  free_vpi_ids:
        kfree(phba->vpi_ids);
+       phba->vpi_ids = NULL;
  free_vpi_bmask:
        kfree(phba->vpi_bmask);
+       phba->vpi_bmask = NULL;
  free_rpi_ids:
        kfree(phba->sli4_hba.rpi_ids);
+       phba->sli4_hba.rpi_ids = NULL;
  free_rpi_bmask:
        kfree(phba->sli4_hba.rpi_bmask);
+       phba->sli4_hba.rpi_bmask = NULL;
  err_exit:
        return rc;
 }
index 394fe1338d0976a42f183e328dfaed02f540560f..dcb33f4fa68720624945f4bdc3f5c932e530c86f 100644 (file)
@@ -393,6 +393,7 @@ struct MPT3SAS_TARGET {
  * @eedp_enable: eedp support enable bit
  * @eedp_type: 0(type_1), 1(type_2), 2(type_3)
  * @eedp_block_length: block size
+ * @ata_command_pending: SATL passthrough outstanding for device
  */
 struct MPT3SAS_DEVICE {
        struct MPT3SAS_TARGET *sas_target;
@@ -404,6 +405,17 @@ struct MPT3SAS_DEVICE {
        u8      ignore_delay_remove;
        /* Iopriority Command Handling */
        u8      ncq_prio_enable;
+       /*
+        * Bug workaround for SATL handling: the mpt2/3sas firmware
+        * doesn't return BUSY or TASK_SET_FULL for subsequent
+        * commands while a SATL pass through is in operation as the
+        * spec requires, it simply does nothing with them until the
+        * pass through completes, causing them possibly to timeout if
+        * the passthrough is a long executing command (like format or
+        * secure erase).  This variable allows us to do the right
+        * thing while a SATL command is pending.
+        */
+       unsigned long ata_command_pending;
 
 };
 
index b5c966e319d315474b94703b93ab0343013dd973..75f3fce1c86773299704347fc0960fb5148ea53f 100644 (file)
@@ -3899,9 +3899,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
        }
 }
 
-static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
+static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
 {
-       return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
+       struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
+
+       if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
+               return 0;
+
+       if (pending)
+               return test_and_set_bit(0, &priv->ata_command_pending);
+
+       clear_bit(0, &priv->ata_command_pending);
+       return 0;
 }
 
 /**
@@ -3925,9 +3934,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
                if (!scmd)
                        continue;
                count++;
-               if (ata_12_16_cmd(scmd))
-                       scsi_internal_device_unblock(scmd->device,
-                                                       SDEV_RUNNING);
+               _scsih_set_satl_pending(scmd, false);
                mpt3sas_base_free_smid(ioc, smid);
                scsi_dma_unmap(scmd);
                if (ioc->pci_error_recovery)
@@ -4063,13 +4070,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
        if (ioc->logging_level & MPT_DEBUG_SCSI)
                scsi_print_command(scmd);
 
-       /*
-        * Lock the device for any subsequent command until command is
-        * done.
-        */
-       if (ata_12_16_cmd(scmd))
-               scsi_internal_device_block(scmd->device);
-
        sas_device_priv_data = scmd->device->hostdata;
        if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
                scmd->result = DID_NO_CONNECT << 16;
@@ -4083,6 +4083,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
                return 0;
        }
 
+       /*
+        * Bug work around for firmware SATL handling.  The loop
+        * is based on atomic operations and ensures consistency
+        * since we're lockless at this point
+        */
+       do {
+               if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+                       scmd->result = SAM_STAT_BUSY;
+                       scmd->scsi_done(scmd);
+                       return 0;
+               }
+       } while (_scsih_set_satl_pending(scmd, true));
+
        sas_target_priv_data = sas_device_priv_data->sas_target;
 
        /* invalid device handle */
@@ -4650,8 +4663,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
        if (scmd == NULL)
                return 1;
 
-       if (ata_12_16_cmd(scmd))
-               scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
+       _scsih_set_satl_pending(scmd, false);
 
        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
 
index 23ca8a274586746e19204f291a844e6cbc43a07f..21331453db7bd29a0c2bc8cac430b8c8bb60b9c2 100644 (file)
@@ -1,6 +1,6 @@
 config QEDI
        tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support"
-       depends on PCI && SCSI
+       depends on PCI && SCSI && UIO
        depends on QED
        select SCSI_ISCSI_ATTRS
        select QED_LL2
index 47eb4d545d13c5f9b80149f162b04756108cd654..f201f40996205c1f522cde8b53c6e0a4d02aaec9 100644 (file)
@@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
        struct qla_hw_data *ha = vha->hw;
        ssize_t rval = 0;
 
+       mutex_lock(&ha->optrom_mutex);
+
        if (ha->optrom_state != QLA_SREADING)
-               return 0;
+               goto out;
 
-       mutex_lock(&ha->optrom_mutex);
        rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
            ha->optrom_region_size);
+
+out:
        mutex_unlock(&ha->optrom_mutex);
 
        return rval;
@@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
            struct device, kobj)));
        struct qla_hw_data *ha = vha->hw;
 
-       if (ha->optrom_state != QLA_SWRITING)
+       mutex_lock(&ha->optrom_mutex);
+
+       if (ha->optrom_state != QLA_SWRITING) {
+               mutex_unlock(&ha->optrom_mutex);
                return -EINVAL;
-       if (off > ha->optrom_region_size)
+       }
+       if (off > ha->optrom_region_size) {
+               mutex_unlock(&ha->optrom_mutex);
                return -ERANGE;
+       }
        if (off + count > ha->optrom_region_size)
                count = ha->optrom_region_size - off;
 
-       mutex_lock(&ha->optrom_mutex);
        memcpy(&ha->optrom_buffer[off], buf, count);
        mutex_unlock(&ha->optrom_mutex);
 
@@ -753,7 +761,6 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
        struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
            struct device, kobj)));
        int type;
-       int rval = 0;
        port_id_t did;
 
        type = simple_strtol(buf, NULL, 10);
@@ -767,7 +774,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
 
        ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
 
-       rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
+       qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
        return count;
 }
 
index f7df01b76714e09dc919cbb9660b66bed603d6bc..5b1287a63c494b6edf8fcf7e8ec75a17530032db 100644 (file)
@@ -1556,7 +1556,8 @@ typedef struct {
 struct atio {
        uint8_t         entry_type;             /* Entry type. */
        uint8_t         entry_count;            /* Entry count. */
-       uint8_t         data[58];
+       __le16          attr_n_length;
+       uint8_t         data[56];
        uint32_t        signature;
 #define ATIO_PROCESSED 0xDEADDEAD              /* Signature */
 };
@@ -2732,7 +2733,7 @@ struct isp_operations {
 #define QLA_MSIX_FW_MODE(m)    (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
 #define QLA_MSIX_FW_MODE_1(m)  (QLA_MSIX_FW_MODE(m) == 1)
 
-#define QLA_MSIX_DEFAULT               0x00
+#define QLA_BASE_VECTORS       2 /* default + RSP */
 #define QLA_MSIX_RSP_Q                 0x01
 #define QLA_ATIO_VECTOR                0x02
 #define QLA_MSIX_QPAIR_MULTIQ_RSP_Q    0x03
@@ -2754,7 +2755,6 @@ struct qla_msix_entry {
        uint16_t entry;
        char name[30];
        void *handle;
-       struct irq_affinity_notify irq_notify;
        int cpuid;
 };
 
index 632d5f30386ab0ae529036c292f3c1c8e64162ca..7b6317c8c2e93bef3509c7e3d15fbae080922788 100644 (file)
@@ -1191,7 +1191,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
 
        /* Wait for soft-reset to complete. */
        RD_REG_DWORD(&reg->ctrl_status);
-       for (cnt = 0; cnt < 6000000; cnt++) {
+       for (cnt = 0; cnt < 60; cnt++) {
                barrier();
                if ((RD_REG_DWORD(&reg->ctrl_status) &
                    CSRX_ISP_SOFT_RESET) == 0)
@@ -1234,7 +1234,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
        RD_REG_DWORD(&reg->hccr);
 
        RD_REG_WORD(&reg->mailbox0);
-       for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+       for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
            rval == QLA_SUCCESS; cnt--) {
                barrier();
                if (cnt)
index 5093ca9b02ec52c8e70674f88205941cc0967d9f..dc88a09f9043c9359cba9c276e523571235c1b50 100644 (file)
@@ -19,10 +19,6 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
        sts_entry_t *);
-static void qla_irq_affinity_notify(struct irq_affinity_notify *,
-    const cpumask_t *);
-static void qla_irq_affinity_release(struct kref *);
-
 
 /**
  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -2496,6 +2492,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
        if (pkt->entry_status & RF_BUSY)
                res = DID_BUS_BUSY << 16;
 
+       if (pkt->entry_type == NOTIFY_ACK_TYPE &&
+           pkt->handle == QLA_TGT_SKIP_HANDLE)
+               return;
+
        sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
        if (sp) {
                sp->done(ha, sp, res);
@@ -2572,14 +2572,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
        if (!vha->flags.online)
                return;
 
-       if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
-               /* if kernel does not notify qla of IRQ's CPU change,
-                * then set it here.
-                */
-               rsp->msix->cpuid = smp_processor_id();
-               ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
-       }
-
        while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
                pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
 
@@ -3018,13 +3010,20 @@ static struct qla_init_msix_entry qla82xx_msix_entries[] = {
 static int
 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
 {
-#define MIN_MSIX_COUNT 2
        int i, ret;
        struct qla_msix_entry *qentry;
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+       struct irq_affinity desc = {
+               .pre_vectors = QLA_BASE_VECTORS,
+       };
+
+       if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha))
+               desc.pre_vectors++;
+
+       ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS,
+                       ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+                       &desc);
 
-       ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
-                                   PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
        if (ret < 0) {
                ql_log(ql_log_fatal, vha, 0x00c7,
                    "MSI-X: Failed to enable support, "
@@ -3069,13 +3068,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
                qentry->have_irq = 0;
                qentry->in_use = 0;
                qentry->handle = NULL;
-               qentry->irq_notify.notify  = qla_irq_affinity_notify;
-               qentry->irq_notify.release = qla_irq_affinity_release;
-               qentry->cpuid = -1;
        }
 
        /* Enable MSI-X vectors for the base queue */
-       for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
+       for (i = 0; i < QLA_BASE_VECTORS; i++) {
                qentry = &ha->msix_entries[i];
                qentry->handle = rsp;
                rsp->msix = qentry;
@@ -3093,18 +3089,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
                        goto msix_register_fail;
                qentry->have_irq = 1;
                qentry->in_use = 1;
-
-               /* Register for CPU affinity notification. */
-               irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
-
-               /* Schedule work (ie. trigger a notification) to read cpu
-                * mask for this specific irq.
-                * kref_get is required because
-               * irq_affinity_notify() will do
-               * kref_put().
-               */
-               kref_get(&qentry->irq_notify.kref);
-               schedule_work(&qentry->irq_notify.work);
        }
 
        /*
@@ -3301,49 +3285,3 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
        msix->handle = qpair;
        return ret;
 }
-
-
-/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
-static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
-       const cpumask_t *mask)
-{
-       struct qla_msix_entry *e =
-               container_of(notify, struct qla_msix_entry, irq_notify);
-       struct qla_hw_data *ha;
-       struct scsi_qla_host *base_vha;
-       struct rsp_que *rsp = e->handle;
-
-       /* user is recommended to set mask to just 1 cpu */
-       e->cpuid = cpumask_first(mask);
-
-       ha = rsp->hw;
-       base_vha = pci_get_drvdata(ha->pdev);
-
-       ql_dbg(ql_dbg_init, base_vha, 0xffff,
-           "%s: host %ld : vector %d cpu %d \n", __func__,
-           base_vha->host_no, e->vector, e->cpuid);
-
-       if (e->have_irq) {
-               if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
-                   (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
-                       ha->tgt.rspq_vector_cpuid = e->cpuid;
-                       ql_dbg(ql_dbg_init, base_vha, 0xffff,
-                           "%s: host%ld: rspq vector %d cpu %d  runtime change\n",
-                           __func__, base_vha->host_no, e->vector, e->cpuid);
-               }
-       }
-}
-
-static void qla_irq_affinity_release(struct kref *ref)
-{
-       struct irq_affinity_notify *notify =
-               container_of(ref, struct irq_affinity_notify, kref);
-       struct qla_msix_entry *e =
-               container_of(notify, struct qla_msix_entry, irq_notify);
-       struct rsp_que *rsp = e->handle;
-       struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
-
-       ql_dbg(ql_dbg_init, base_vha, 0xffff,
-               "%s: host%ld: vector %d cpu %d\n", __func__,
-           base_vha->host_no, e->vector, e->cpuid);
-}
index 2819ceb96041e5b97b234f115c9b35d4b4251ffe..67f64db390b0cd43e2ff6166d30903712ef80938 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/delay.h>
 #include <linux/gfp.h>
 
-struct rom_cmd {
+static struct rom_cmd {
        uint16_t cmd;
 } rom_cmds[] = {
        { MBC_LOAD_RAM },
@@ -101,12 +101,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                return QLA_FUNCTION_TIMEOUT;
        }
 
-        /* if PCI error, then avoid mbx processing.*/
-        if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
+       /* if PCI error, then avoid mbx processing.*/
+       if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
                ql_log(ql_log_warn, vha, 0x1191,
                    "PCI error, exiting.\n");
                return QLA_FUNCTION_TIMEOUT;
-        }
+       }
 
        reg = ha->iobase;
        io_lock_on = base_vha->flags.init_done;
@@ -323,20 +323,33 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                }
        } else {
 
-               uint16_t mb0;
-               uint32_t ictrl;
+               uint16_t mb[8];
+               uint32_t ictrl, host_status, hccr;
                uint16_t        w;
 
                if (IS_FWI2_CAPABLE(ha)) {
-                       mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
+                       mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
+                       mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
+                       mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
+                       mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
+                       mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
                        ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
+                       host_status = RD_REG_DWORD(&reg->isp24.host_status);
+                       hccr = RD_REG_DWORD(&reg->isp24.hccr);
+
+                       ql_log(ql_log_warn, vha, 0x1119,
+                           "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+                           "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
+                           command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
+                           mb[7], host_status, hccr);
+
                } else {
-                       mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
+                       mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
                        ictrl = RD_REG_WORD(&reg->isp.ictrl);
+                       ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
+                           "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+                           "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
                }
-               ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
-                   "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
-                   "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
                ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
 
                /* Capture FW dump only, if PCI device active */
@@ -684,7 +697,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
        mbx_cmd_t       mc;
        mbx_cmd_t       *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
-       int configured_count;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
            "Entered %s.\n", __func__);
@@ -707,7 +719,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
                /*EMPTY*/
                ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
        } else {
-               configured_count = mcp->mb[11];
                ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
                    "Done %s.\n", __func__);
        }
index 54380b434b304eddde918a94ee833e18be6a35f6..0a1723cc08cfc4cbc626f988e58601eff70db5ff 100644 (file)
@@ -42,6 +42,11 @@ static int qla82xx_crb_table_initialized;
        (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
        QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
 
+const int MD_MIU_TEST_AGT_RDDATA[] = {
+       0x410000A8, 0x410000AC,
+       0x410000B8, 0x410000BC
+};
+
 static void qla82xx_crb_addr_transform_setup(void)
 {
        qla82xx_crb_addr_transform(XDMA);
index 6201dce3553bf951b5b1f770842e289ced553e95..77624eac95a4741a4e475a29a012f1217a75a72f 100644 (file)
@@ -1176,8 +1176,7 @@ struct qla82xx_md_entry_queue {
 #define MD_MIU_TEST_AGT_ADDR_LO                0x41000094
 #define MD_MIU_TEST_AGT_ADDR_HI                0x41000098
 
-static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
-       0x410000B8, 0x410000BC };
+extern const int MD_MIU_TEST_AGT_RDDATA[4];
 
 #define CRB_NIU_XG_PAUSE_CTL_P0        0x1
 #define CRB_NIU_XG_PAUSE_CTL_P1        0x8
index 007192d7bad85fae9711198b15f31924da9f0e3e..dc1ec9b610273956c6d7848e23882961326ecadf 100644 (file)
 
 #define TIMEOUT_100_MS 100
 
+static const uint32_t qla8044_reg_tbl[] = {
+       QLA8044_PEG_HALT_STATUS1,
+       QLA8044_PEG_HALT_STATUS2,
+       QLA8044_PEG_ALIVE_COUNTER,
+       QLA8044_CRB_DRV_ACTIVE,
+       QLA8044_CRB_DEV_STATE,
+       QLA8044_CRB_DRV_STATE,
+       QLA8044_CRB_DRV_SCRATCH,
+       QLA8044_CRB_DEV_PART_INFO1,
+       QLA8044_CRB_IDC_VER_MAJOR,
+       QLA8044_FW_VER_MAJOR,
+       QLA8044_FW_VER_MINOR,
+       QLA8044_FW_VER_SUB,
+       QLA8044_CMDPEG_STATE,
+       QLA8044_ASIC_TEMP,
+};
+
 /* 8044 Flash Read/Write functions */
 uint32_t
 qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
index 02fe3c4cdf5577f275d3a898e7d07d6e3e89592c..83c1b7e17c80f1affa2a5857118f44a6549d832a 100644 (file)
@@ -535,23 +535,6 @@ enum qla_regs {
 #define CRB_CMDPEG_CHECK_RETRY_COUNT    60
 #define CRB_CMDPEG_CHECK_DELAY          500
 
-static const uint32_t qla8044_reg_tbl[] = {
-       QLA8044_PEG_HALT_STATUS1,
-       QLA8044_PEG_HALT_STATUS2,
-       QLA8044_PEG_ALIVE_COUNTER,
-       QLA8044_CRB_DRV_ACTIVE,
-       QLA8044_CRB_DEV_STATE,
-       QLA8044_CRB_DRV_STATE,
-       QLA8044_CRB_DRV_SCRATCH,
-       QLA8044_CRB_DEV_PART_INFO1,
-       QLA8044_CRB_IDC_VER_MAJOR,
-       QLA8044_FW_VER_MAJOR,
-       QLA8044_FW_VER_MINOR,
-       QLA8044_FW_VER_SUB,
-       QLA8044_CMDPEG_STATE,
-       QLA8044_ASIC_TEMP,
-};
-
 /* MiniDump Structures */
 
 /* Driver_code is for driver to write some info about the entry
index 8521cfe302e9e3e72c7aaf1a4753ca75f953b972..0a000ecf0881411d4c01c1a95245d1eb9d9da771 100644 (file)
@@ -466,7 +466,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
                        continue;
 
                rsp = ha->rsp_q_map[cnt];
-               clear_bit(cnt, ha->req_qid_map);
+               clear_bit(cnt, ha->rsp_qid_map);
                ha->rsp_q_map[cnt] =  NULL;
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
                qla2x00_free_rsp_que(ha, rsp);
@@ -3662,7 +3662,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
                                sizeof(struct ct6_dsd), 0,
                                SLAB_HWCACHE_ALIGN, NULL);
                        if (!ctx_cachep)
-                               goto fail_free_gid_list;
+                               goto fail_free_srb_mempool;
                }
                ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
                        ctx_cachep);
@@ -3815,7 +3815,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
        ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
            GFP_KERNEL);
        if (!ha->loop_id_map)
-               goto fail_async_pd;
+               goto fail_loop_id_map;
        else {
                qla2x00_set_reserved_loop_ids(ha);
                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
@@ -3824,6 +3824,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
 
        return 0;
 
+fail_loop_id_map:
+       dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
 fail_async_pd:
        dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
 fail_ex_init_cb:
@@ -3851,6 +3853,10 @@ fail_free_ms_iocb:
        dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
        ha->ms_iocb = NULL;
        ha->ms_iocb_dma = 0;
+
+       if (ha->sns_cmd)
+               dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
+                   ha->sns_cmd, ha->sns_cmd_dma);
 fail_dma_pool:
        if (IS_QLA82XX(ha) || ql2xenabledif) {
                dma_pool_destroy(ha->fcp_cmnd_dma_pool);
@@ -3868,10 +3874,12 @@ fail_free_nvram:
        kfree(ha->nvram);
        ha->nvram = NULL;
 fail_free_ctx_mempool:
-       mempool_destroy(ha->ctx_mempool);
+       if (ha->ctx_mempool)
+               mempool_destroy(ha->ctx_mempool);
        ha->ctx_mempool = NULL;
 fail_free_srb_mempool:
-       mempool_destroy(ha->srb_mempool);
+       if (ha->srb_mempool)
+               mempool_destroy(ha->srb_mempool);
        ha->srb_mempool = NULL;
 fail_free_gid_list:
        dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
index bff9689f5ca94f56be0a9739af6bf769885f73bf..e4fda84b959eca2d52aa009c353cbe7f5ffc94bd 100644 (file)
@@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
 {
        struct qla_hw_data *ha = vha->hw;
        struct qla_tgt_sess *sess = NULL;
-       uint32_t unpacked_lun, lun = 0;
        uint16_t loop_id;
        int res = 0;
        struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
-       struct atio_from_isp *a = (struct atio_from_isp *)iocb;
        unsigned long flags;
 
        loop_id = le16_to_cpu(n->u.isp24.nport_handle);
@@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
            "loop_id %d)\n", vha->host_no, sess, sess->port_name,
            mcmd, loop_id);
 
-       lun = a->u.isp24.fcp_cmnd.lun;
-       unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
-
-       return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
-           iocb, QLA24XX_MGMT_SEND_NACK);
+       return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
 }
 
 /* ha->tgt.sess_lock supposed to be held on entry */
@@ -3067,7 +3061,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
 
        pkt->entry_type = NOTIFY_ACK_TYPE;
        pkt->entry_count = 1;
-       pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+       pkt->handle = QLA_TGT_SKIP_HANDLE;
 
        nack = (struct nack_to_isp *)pkt;
        nack->ox_id = ntfy->ox_id;
@@ -3110,6 +3104,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
 #if 0  /* Todo  */
                if (rc == -ENOMEM)
                        qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#else
+               if (rc) {
+               }
 #endif
                goto done;
        }
@@ -6457,12 +6454,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
        if (!vha->flags.online)
                return;
 
-       while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
+       while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
+           fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
                pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
                cnt = pkt->u.raw.entry_count;
 
-               qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt,
-                   ha_locked);
+               if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
+                       /*
+                        * This packet is corrupted. The header + payload
+                        * can not be trusted. There is no point in passing
+                        * it further up.
+                        */
+                       ql_log(ql_log_warn, vha, 0xffff,
+                           "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
+                           pkt->u.isp24.fcp_hdr.s_id,
+                           be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
+                           le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
+
+                       adjust_corrupted_atio(pkt);
+                       qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
+               } else {
+                       qlt_24xx_atio_pkt_all_vps(vha,
+                           (struct atio_from_isp *)pkt, ha_locked);
+               }
 
                for (i = 0; i < cnt; i++) {
                        ha->tgt.atio_ring_index++;
@@ -6545,6 +6559,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
 
                /* Disable Full Login after LIP */
                nv->host_p &= cpu_to_le32(~BIT_10);
+
+               /*
+                * clear BIT 15 explicitly as we have seen at least
+                * a couple of instances where this was set and this
+                * was causing the firmware to not be initialized.
+                */
+               nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
                /* Enable target PRLI control */
                nv->firmware_options_2 |= cpu_to_le32(BIT_14);
        } else {
@@ -6560,9 +6581,6 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
                return;
        }
 
-       /* out-of-order frames reassembly */
-       nv->firmware_options_3 |= BIT_6|BIT_9;
-
        if (ha->tgt.enable_class_2) {
                if (vha->flags.init_done)
                        fc_host_supported_classes(vha->host) =
@@ -6629,11 +6647,17 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
                /* Disable ini mode, if requested */
                if (!qla_ini_mode_enabled(vha))
                        nv->firmware_options_1 |= cpu_to_le32(BIT_5);
-
                /* Disable Full Login after LIP */
                nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
                /* Enable initial LIP */
                nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
+               /*
+                * clear BIT 15 explicitly as we have seen at
+                * least a couple of instances where this was set
+                * and this was causing the firmware to not be
+                * initialized.
+                */
+               nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
                if (ql2xtgt_tape_enable)
                        /* Enable FC tape support */
                        nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -6658,9 +6682,6 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
                return;
        }
 
-       /* out-of-order frames reassembly */
-       nv->firmware_options_3 |= BIT_6|BIT_9;
-
        if (ha->tgt.enable_class_2) {
                if (vha->flags.init_done)
                        fc_host_supported_classes(vha->host) =
index f26c5f60eedd27f6dcd36a835266f5924f32a6fd..0824a8164a2494361ef12892851f020ab40de6f4 100644 (file)
@@ -427,13 +427,33 @@ struct atio_from_isp {
                struct {
                        uint8_t  entry_type;    /* Entry type. */
                        uint8_t  entry_count;   /* Entry count. */
-                       uint8_t  data[58];
+                       __le16   attr_n_length;
+#define FCP_CMD_LENGTH_MASK 0x0fff
+#define FCP_CMD_LENGTH_MIN  0x38
+                       uint8_t  data[56];
                        uint32_t signature;
 #define ATIO_PROCESSED 0xDEADDEAD              /* Signature */
                } raw;
        } u;
 } __packed;
 
+static inline int fcpcmd_is_corrupted(struct atio *atio)
+{
+       if (atio->entry_type == ATIO_TYPE7 &&
+           (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
+           FCP_CMD_LENGTH_MIN))
+               return 1;
+       else
+               return 0;
+}
+
+/* adjust corrupted atio so we won't trip over the same entry again. */
+static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
+{
+       atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN);
+       atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
+}
+
 #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
 
 /*
index 36935c9ed669513195a01ba26fd1e302773b593f..8a58ef3adab4425ba69a992dd2f51bd9357f44c9 100644 (file)
@@ -433,6 +433,18 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
                                count++;
                        }
                }
+       } else if (QLA_TGT_MODE_ENABLED() &&
+           ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
+               struct qla_hw_data *ha = vha->hw;
+               struct atio *atr = ha->tgt.atio_ring;
+
+               if (atr || !buf) {
+                       length = ha->tgt.atio_q_length;
+                       qla27xx_insert16(0, buf, len);
+                       qla27xx_insert16(length, buf, len);
+                       qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
+                       count++;
+               }
        } else {
                ql_dbg(ql_dbg_misc, vha, 0xd026,
                    "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
@@ -676,6 +688,18 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
                                count++;
                        }
                }
+       } else if (QLA_TGT_MODE_ENABLED() &&
+           ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
+               struct qla_hw_data *ha = vha->hw;
+               struct atio *atr = ha->tgt.atio_ring_ptr;
+
+               if (atr || !buf) {
+                       qla27xx_insert16(0, buf, len);
+                       qla27xx_insert16(1, buf, len);
+                       qla27xx_insert32(ha->tgt.atio_q_in ?
+                           readl(ha->tgt.atio_q_in) : 0, buf, len);
+                       count++;
+               }
        } else {
                ql_dbg(ql_dbg_misc, vha, 0xd02f,
                    "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
index 6643f6fc7795bcc09fc0c3ffd79a85b907ce6d94..d925910be761dfcdc61c5c3e97bc98c4372a6cb7 100644 (file)
@@ -1800,7 +1800,7 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item,
 {
        return sprintf(page,
            "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
-           UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+           UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
            utsname()->machine);
 }
 
@@ -1906,7 +1906,7 @@ static int tcm_qla2xxx_register_configfs(void)
        int ret;
 
        pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
-           UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+           UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
            utsname()->machine);
 
        ret = target_register_template(&tcm_qla2xxx_ops);
index 37e026a4823d6fdeb1048dd7f419f7ca253417c0..cf8430be183b6b529d33382386c54c2acbcd828a 100644 (file)
@@ -1,7 +1,6 @@
 #include <target/target_core_base.h>
 #include <linux/btree.h>
 
-#define TCM_QLA2XXX_VERSION    "v0.1"
 /* length of ASCII WWPNs including pad */
 #define TCM_QLA2XXX_NAMELEN    32
 /*
index c35b6de4ca643297d1908341421c865c2cb93e84..e9e1e141af9cd287bcca730d05a7a62d58fb644a 100644 (file)
@@ -1018,7 +1018,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
        count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
        BUG_ON(count > sdb->table.nents);
        sdb->table.nents = count;
-       sdb->length = blk_rq_bytes(req);
+       sdb->length = blk_rq_payload_bytes(req);
        return BLKPREP_OK;
 }
 
@@ -2893,7 +2893,7 @@ scsi_internal_device_block(struct scsi_device *sdev)
         * request queue. 
         */
        if (q->mq_ops) {
-               blk_mq_stop_hw_queues(q);
+               blk_mq_quiesce_queue(q);
        } else {
                spin_lock_irqsave(q->queue_lock, flags);
                blk_stop_queue(q);
index b1933041da39d414f9c6e127052ba8cbaa25e65e..0b09638fa39be80768701f991cceccc1bee9988f 100644 (file)
@@ -836,7 +836,6 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
        struct bio *bio = rq->bio;
        sector_t sector = blk_rq_pos(rq);
        unsigned int nr_sectors = blk_rq_sectors(rq);
-       unsigned int nr_bytes = blk_rq_bytes(rq);
        int ret;
 
        if (sdkp->device->no_write_same)
@@ -869,21 +868,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
 
        cmd->transfersize = sdp->sector_size;
        cmd->allowed = SD_MAX_RETRIES;
-
-       /*
-        * For WRITE_SAME the data transferred in the DATA IN buffer is
-        * different from the amount of data actually written to the target.
-        *
-        * We set up __data_len to the amount of data transferred from the
-        * DATA IN buffer so that blk_rq_map_sg set up the proper S/G list
-        * to transfer a single sector of data first, but then reset it to
-        * the amount of data to be written right after so that the I/O path
-        * knows how much to actually write.
-        */
-       rq->__data_len = sdp->sector_size;
-       ret = scsi_init_io(cmd);
-       rq->__data_len = nr_bytes;
-       return ret;
+       return scsi_init_io(cmd);
 }
 
 static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
@@ -2600,7 +2585,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
                if (sdp->broken_fua) {
                        sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
                        sdkp->DPOFUA = 0;
-               } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
+               } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
+                          !sdkp->device->use_16_for_rw) {
                        sd_first_printk(KERN_NOTICE, sdkp,
                                  "Uses READ/WRITE(6), disabling FUA\n");
                        sdkp->DPOFUA = 0;
@@ -2783,13 +2769,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
                queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
        }
 
-       sdkp->zoned = (buffer[8] >> 4) & 3;
-       if (sdkp->zoned == 1)
-               q->limits.zoned = BLK_ZONED_HA;
-       else if (sdkp->device->type == TYPE_ZBC)
+       if (sdkp->device->type == TYPE_ZBC) {
+               /* Host-managed */
                q->limits.zoned = BLK_ZONED_HM;
-       else
-               q->limits.zoned = BLK_ZONED_NONE;
+       } else {
+               sdkp->zoned = (buffer[8] >> 4) & 3;
+               if (sdkp->zoned == 1)
+                       /* Host-aware */
+                       q->limits.zoned = BLK_ZONED_HA;
+               else
+                       /*
+                        * Treat drive-managed devices as
+                        * regular block devices.
+                        */
+                       q->limits.zoned = BLK_ZONED_NONE;
+       }
        if (blk_queue_is_zoned(q) && sdkp->first_scan)
                sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
                      q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
index 8c9a35c91705e42fcbc07e3721d0522f96d496dc..50adabbb5808902aea6abfd64a39c183678a6a6f 100644 (file)
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
 
        ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
 
-       if (scsi_is_sas_rphy(&sdev->sdev_gendev))
+       if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
                efd.addr = sas_get_address(sdev);
 
        if (efd.addr) {
index 396b32dca07464dddbf575a37fd13ad1e3f92579..7cf70aaec0ba32ce9c506a84f21fb9be39bf9e64 100644 (file)
@@ -591,6 +591,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!pool) {
                SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
 
+               ret = -ENOMEM;
                goto err_free_res;
        }
 
@@ -601,6 +602,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!pool) {
                SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
 
+               ret = -ENOMEM;
                goto err_free_dflt_sgl_pool;
        }
 
@@ -611,6 +613,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (!pool) {
                SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
 
+               ret = -ENOMEM;
                goto err_free_max_sgl_pool;
        }
 
index 8823cc81ae45345bd0d632436eb4ce387d456e0b..5bb376009d98b78bd0dbf6da3e8f0853f9e6528e 100644 (file)
@@ -459,6 +459,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
 
        if (IS_ERR(task)) {
                dev_err(dev, "can't create rproc_boot thread\n");
+               ret = PTR_ERR(task);
                goto err_put_rproc;
        }
 
index ec4aa252d6e8c1c761a47246851ad2645382516e..2922a9908302d84781f63d1b091ca6e4ddb2eba8 100644 (file)
@@ -378,6 +378,7 @@ config SPI_FSL_SPI
 config SPI_FSL_DSPI
        tristate "Freescale DSPI controller"
        select REGMAP_MMIO
+       depends on HAS_DMA
        depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
        help
          This enables support for the Freescale DSPI controller in master
index e89da0af45d2518ef26670f7bbd50b875692872b..0314c6b9e04415b0cb792d8e9a4048a6311fd97d 100644 (file)
@@ -800,7 +800,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
        struct spi_master *master;
        struct a3700_spi *spi;
        u32 num_cs = 0;
-       int ret = 0;
+       int irq, ret = 0;
 
        master = spi_alloc_master(dev, sizeof(*spi));
        if (!master) {
@@ -825,7 +825,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
        master->unprepare_message = a3700_spi_unprepare_message;
        master->set_cs = a3700_spi_set_cs;
        master->flags = SPI_MASTER_HALF_DUPLEX;
-       master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL |
+       master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
                              SPI_RX_QUAD | SPI_TX_QUAD);
 
        platform_set_drvdata(pdev, master);
@@ -846,12 +846,13 @@ static int a3700_spi_probe(struct platform_device *pdev)
                goto error;
        }
 
-       spi->irq = platform_get_irq(pdev, 0);
-       if (spi->irq < 0) {
-               dev_err(dev, "could not get irq: %d\n", spi->irq);
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(dev, "could not get irq: %d\n", irq);
                ret = -ENXIO;
                goto error;
        }
+       spi->irq = irq;
 
        init_completion(&spi->done);
 
index 319225d7e761b0066a062b017c6e6f2104447859..6ab4c770022882eacc338a31b345c0d80bc1b541 100644 (file)
@@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev)
                        SPI_ENGINE_VERSION_MAJOR(version),
                        SPI_ENGINE_VERSION_MINOR(version),
                        SPI_ENGINE_VERSION_PATCH(version));
-               return -ENODEV;
+               ret = -ENODEV;
+               goto err_put_master;
        }
 
        spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
index d36c11b73a35ca656ab04e9c5ef0492f96950b32..02fb96797ac8b9ec52f41c8a13f93b290db0fc1c 100644 (file)
@@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
                        buf = t->rx_buf;
                t->rx_dma = dma_map_single(&spi->dev, buf,
                                t->len, DMA_FROM_DEVICE);
-               if (!t->rx_dma) {
+               if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
                        ret = -EFAULT;
                        goto err_rx_map;
                }
@@ -660,7 +660,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
                        buf = (void *)t->tx_buf;
                t->tx_dma = dma_map_single(&spi->dev, buf,
                                t->len, DMA_TO_DEVICE);
-               if (!t->tx_dma) {
+               if (dma_mapping_error(&spi->dev, t->tx_dma)) {
                        ret = -EFAULT;
                        goto err_tx_map;
                }
index e31971f91475b1b3d9f1b2011e0b6e4e2ae4697b..837cb8d0bac6c9a1bd9f866192cd96950b13a68b 100644 (file)
@@ -274,11 +274,11 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
 static void mid_spi_dma_stop(struct dw_spi *dws)
 {
        if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
-               dmaengine_terminate_all(dws->txchan);
+               dmaengine_terminate_sync(dws->txchan);
                clear_bit(TX_BUSY, &dws->dma_chan_busy);
        }
        if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
-               dmaengine_terminate_all(dws->rxchan);
+               dmaengine_terminate_sync(dws->rxchan);
                clear_bit(RX_BUSY, &dws->dma_chan_busy);
        }
 }
index b715a26a91484fb695088459d9249b367b334fe1..054012f875671b995141f8c549021389f21f454f 100644 (file)
@@ -107,7 +107,10 @@ static const struct file_operations dw_spi_regs_ops = {
 
 static int dw_spi_debugfs_init(struct dw_spi *dws)
 {
-       dws->debugfs = debugfs_create_dir("dw_spi", NULL);
+       char name[128];
+
+       snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev));
+       dws->debugfs = debugfs_create_dir(name, NULL);
        if (!dws->debugfs)
                return -ENOMEM;
 
index dd7b5b47291d551890da8e8dfc324ca74895d66b..d6239fa718be9e251f577b9d9dd792a0e5c5ead5 100644 (file)
@@ -1690,6 +1690,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
                pxa2xx_spi_write(drv_data, SSCR1, tmp);
                tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
                pxa2xx_spi_write(drv_data, SSCR0, tmp);
+               break;
        default:
                tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
                      SSCR1_TxTresh(TX_THRESH_DFLT);
index 0012ad02e5696d35b547a3a698682f51a2d02819..1f00eeb0b5a3fb93ae838978dbf7815d94897378 100644 (file)
@@ -973,14 +973,16 @@ static const struct sh_msiof_chipdata r8a779x_data = {
 };
 
 static const struct of_device_id sh_msiof_match[] = {
-       { .compatible = "renesas,sh-msiof",        .data = &sh_data },
        { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
        { .compatible = "renesas,msiof-r8a7790",   .data = &r8a779x_data },
        { .compatible = "renesas,msiof-r8a7791",   .data = &r8a779x_data },
        { .compatible = "renesas,msiof-r8a7792",   .data = &r8a779x_data },
        { .compatible = "renesas,msiof-r8a7793",   .data = &r8a779x_data },
        { .compatible = "renesas,msiof-r8a7794",   .data = &r8a779x_data },
+       { .compatible = "renesas,rcar-gen2-msiof", .data = &r8a779x_data },
        { .compatible = "renesas,msiof-r8a7796",   .data = &r8a779x_data },
+       { .compatible = "renesas,rcar-gen3-msiof", .data = &r8a779x_data },
+       { .compatible = "renesas,sh-msiof",        .data = &sh_data }, /* Deprecated */
        {},
 };
 MODULE_DEVICE_TABLE(of, sh_msiof_match);
index 7dfefd66df93874b1359824890b4b760275ff2c6..1cadc9eefa21a47e783160b874dbd2ce02f8f05f 100644 (file)
@@ -1693,6 +1693,10 @@ void transport_generic_request_failure(struct se_cmd *cmd,
        case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
        case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
        case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
+       case TCM_TOO_MANY_TARGET_DESCS:
+       case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
+       case TCM_TOO_MANY_SEGMENT_DESCS:
+       case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
                break;
        case TCM_OUT_OF_RESOURCES:
                sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2808,6 +2812,26 @@ static const struct sense_info sense_info_table[] = {
                .key = ILLEGAL_REQUEST,
                .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
        },
+       [TCM_TOO_MANY_TARGET_DESCS] = {
+               .key = ILLEGAL_REQUEST,
+               .asc = 0x26,
+               .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
+       },
+       [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
+               .key = ILLEGAL_REQUEST,
+               .asc = 0x26,
+               .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
+       },
+       [TCM_TOO_MANY_SEGMENT_DESCS] = {
+               .key = ILLEGAL_REQUEST,
+               .asc = 0x26,
+               .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
+       },
+       [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
+               .key = ILLEGAL_REQUEST,
+               .asc = 0x26,
+               .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
+       },
        [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
                .key = ILLEGAL_REQUEST,
                .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
index 37d5caebffa6b593025a28b703a54a71e7d940d3..d828b3b5000bf421826b9823efcbac8d6b2d58a3 100644 (file)
@@ -53,18 +53,13 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
        return 0;
 }
 
-static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
-                                       bool src)
+static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
+                                       struct se_device **found_dev)
 {
        struct se_device *se_dev;
-       unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
+       unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
        int rc;
 
-       if (src)
-               dev_wwn = &xop->dst_tid_wwn[0];
-       else
-               dev_wwn = &xop->src_tid_wwn[0];
-
        mutex_lock(&g_device_mutex);
        list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
 
@@ -78,15 +73,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
                if (rc != 0)
                        continue;
 
-               if (src) {
-                       xop->dst_dev = se_dev;
-                       pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
-                               " se_dev\n", xop->dst_dev);
-               } else {
-                       xop->src_dev = se_dev;
-                       pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located"
-                               " se_dev\n", xop->src_dev);
-               }
+               *found_dev = se_dev;
+               pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
 
                rc = target_depend_item(&se_dev->dev_group.cg_item);
                if (rc != 0) {
@@ -110,7 +98,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
 }
 
 static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
-                               unsigned char *p, bool src)
+                               unsigned char *p, unsigned short cscd_index)
 {
        unsigned char *desc = p;
        unsigned short ript;
@@ -155,7 +143,13 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
                return -EINVAL;
        }
 
-       if (src) {
+       if (cscd_index != xop->stdi && cscd_index != xop->dtdi) {
+               pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor "
+                        "dest\n", cscd_index);
+               return 0;
+       }
+
+       if (cscd_index == xop->stdi) {
                memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
                /*
                 * Determine if the source designator matches the local device
@@ -167,10 +161,15 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
                        pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
                                        " received xop\n", xop->src_dev);
                }
-       } else {
+       }
+
+       if (cscd_index == xop->dtdi) {
                memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
                /*
-                * Determine if the destination designator matches the local device
+                * Determine if the destination designator matches the local
+                * device. If @cscd_index corresponds to both source (stdi) and
+                * destination (dtdi), or dtdi comes after stdi, then
+                * XCOL_DEST_RECV_OP wins.
                 */
                if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
                                XCOPY_NAA_IEEE_REGEX_LEN)) {
@@ -190,20 +189,23 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
 {
        struct se_device *local_dev = se_cmd->se_dev;
        unsigned char *desc = p;
-       int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
+       int offset = tdll % XCOPY_TARGET_DESC_LEN, rc;
+       unsigned short cscd_index = 0;
        unsigned short start = 0;
-       bool src = true;
 
        *sense_ret = TCM_INVALID_PARAMETER_LIST;
 
        if (offset != 0) {
                pr_err("XCOPY target descriptor list length is not"
                        " multiple of %d\n", XCOPY_TARGET_DESC_LEN);
+               *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
                return -EINVAL;
        }
-       if (tdll > 64) {
+       if (tdll > RCR_OP_MAX_TARGET_DESC_COUNT * XCOPY_TARGET_DESC_LEN) {
                pr_err("XCOPY target descriptor supports a maximum"
                        " two src/dest descriptors, tdll: %hu too large..\n", tdll);
+               /* spc4r37 6.4.3.4 CSCD DESCRIPTOR LIST LENGTH field */
+               *sense_ret = TCM_TOO_MANY_TARGET_DESCS;
                return -EINVAL;
        }
        /*
@@ -215,37 +217,43 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
 
        while (start < tdll) {
                /*
-                * Check target descriptor identification with 0xE4 type with
-                * use VPD 0x83 WWPN matching ..
+                * Check target descriptor identification with 0xE4 type, and
+                * compare the current index with the CSCD descriptor IDs in
+                * the segment descriptor. Use VPD 0x83 WWPN matching ..
                 */
                switch (desc[0]) {
                case 0xe4:
                        rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
-                                                       &desc[0], src);
+                                                       &desc[0], cscd_index);
                        if (rc != 0)
                                goto out;
-                       /*
-                        * Assume target descriptors are in source -> destination order..
-                        */
-                       if (src)
-                               src = false;
-                       else
-                               src = true;
                        start += XCOPY_TARGET_DESC_LEN;
                        desc += XCOPY_TARGET_DESC_LEN;
-                       ret++;
+                       cscd_index++;
                        break;
                default:
                        pr_err("XCOPY unsupported descriptor type code:"
                                        " 0x%02x\n", desc[0]);
+                       *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
                        goto out;
                }
        }
 
-       if (xop->op_origin == XCOL_SOURCE_RECV_OP)
-               rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
-       else
-               rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
+       switch (xop->op_origin) {
+       case XCOL_SOURCE_RECV_OP:
+               rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
+                                               &xop->dst_dev);
+               break;
+       case XCOL_DEST_RECV_OP:
+               rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
+                                               &xop->src_dev);
+               break;
+       default:
+               pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
+                       "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi);
+               rc = -EINVAL;
+               break;
+       }
        /*
         * If a matching IEEE NAA 0x83 descriptor for the requested device
         * is not located on this node, return COPY_ABORTED with ASQ/ASQC
@@ -262,7 +270,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
        pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
                 xop->dst_dev, &xop->dst_tid_wwn[0]);
 
-       return ret;
+       return cscd_index;
 
 out:
        return -EINVAL;
@@ -284,6 +292,14 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
 
        xop->stdi = get_unaligned_be16(&desc[4]);
        xop->dtdi = get_unaligned_be16(&desc[6]);
+
+       if (xop->stdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX ||
+           xop->dtdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX) {
+               pr_err("XCOPY segment desc 0x02: unsupported CSCD ID > 0x%x; stdi: %hu dtdi: %hu\n",
+                       XCOPY_CSCD_DESC_ID_LIST_OFF_MAX, xop->stdi, xop->dtdi);
+               return -EINVAL;
+       }
+
        pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
                desc_len, xop->stdi, xop->dtdi, dc);
 
@@ -306,15 +322,25 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
 
 static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
                                struct xcopy_op *xop, unsigned char *p,
-                               unsigned int sdll)
+                               unsigned int sdll, sense_reason_t *sense_ret)
 {
        unsigned char *desc = p;
        unsigned int start = 0;
        int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
 
+       *sense_ret = TCM_INVALID_PARAMETER_LIST;
+
        if (offset != 0) {
                pr_err("XCOPY segment descriptor list length is not"
                        " multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
+               *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
+               return -EINVAL;
+       }
+       if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) {
+               pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too"
+                       " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll);
+               /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */
+               *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS;
                return -EINVAL;
        }
 
@@ -335,6 +361,7 @@ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
                default:
                        pr_err("XCOPY unsupported segment descriptor"
                                "type: 0x%02x\n", desc[0]);
+                       *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
                        goto out;
                }
        }
@@ -861,6 +888,16 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
                return TCM_UNSUPPORTED_SCSI_OPCODE;
        }
 
+       if (se_cmd->data_length == 0) {
+               target_complete_cmd(se_cmd, SAM_STAT_GOOD);
+               return TCM_NO_SENSE;
+       }
+       if (se_cmd->data_length < XCOPY_HDR_LEN) {
+               pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
+                               se_cmd->data_length, XCOPY_HDR_LEN);
+               return TCM_PARAMETER_LIST_LENGTH_ERROR;
+       }
+
        xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
        if (!xop) {
                pr_err("Unable to allocate xcopy_op\n");
@@ -883,6 +920,12 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
         */
        tdll = get_unaligned_be16(&p[2]);
        sdll = get_unaligned_be32(&p[8]);
+       if (tdll + sdll > RCR_OP_MAX_DESC_LIST_LEN) {
+               pr_err("XCOPY descriptor list length %u exceeds maximum %u\n",
+                      tdll + sdll, RCR_OP_MAX_DESC_LIST_LEN);
+               ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+               goto out;
+       }
 
        inline_dl = get_unaligned_be32(&p[12]);
        if (inline_dl != 0) {
@@ -890,10 +933,32 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
                goto out;
        }
 
+       if (se_cmd->data_length < (XCOPY_HDR_LEN + tdll + sdll + inline_dl)) {
+               pr_err("XCOPY parameter truncation: data length %u too small "
+                       "for tdll: %hu sdll: %u inline_dl: %u\n",
+                       se_cmd->data_length, tdll, sdll, inline_dl);
+               ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+               goto out;
+       }
+
        pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
                " tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
                tdll, sdll, inline_dl);
 
+       /*
+        * skip over the target descriptors until segment descriptors
+        * have been passed - CSCD ids are needed to determine src and dest.
+        */
+       seg_desc = &p[16] + tdll;
+
+       rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc,
+                                                   sdll, &ret);
+       if (rc <= 0)
+               goto out;
+
+       pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
+                               rc * XCOPY_SEGMENT_DESC_LEN);
+
        rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
        if (rc <= 0)
                goto out;
@@ -911,18 +976,8 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
 
        pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
                                rc * XCOPY_TARGET_DESC_LEN);
-       seg_desc = &p[16];
-       seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
-
-       rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
-       if (rc <= 0) {
-               xcopy_pt_undepend_remotedev(xop);
-               goto out;
-       }
        transport_kunmap_data_sg(se_cmd);
 
-       pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
-                               rc * XCOPY_SEGMENT_DESC_LEN);
        INIT_WORK(&xop->xop_work, target_xcopy_do_work);
        queue_work(xcopy_wq, &xop->xop_work);
        return TCM_NO_SENSE;
index 4d3d4dd060f28366ebd069abb603472ae0275d5b..7c0b105cbe1b486062c40d97280059a286f59bf8 100644 (file)
@@ -1,10 +1,17 @@
 #include <target/target_core_base.h>
 
+#define XCOPY_HDR_LEN                  16
 #define XCOPY_TARGET_DESC_LEN          32
 #define XCOPY_SEGMENT_DESC_LEN         28
 #define XCOPY_NAA_IEEE_REGEX_LEN       16
 #define XCOPY_MAX_SECTORS              1024
 
+/*
+ * SPC4r37 6.4.6.1
+ * Table 150 â€” CSCD descriptor ID values
+ */
+#define XCOPY_CSCD_DESC_ID_LIST_OFF_MAX        0x07FF
+
 enum xcopy_origin_list {
        XCOL_SOURCE_RECV_OP = 0x01,
        XCOL_DEST_RECV_OP = 0x02,
index b811b0fb61b1381fba45440f963030aba9ee509a..4c779651245351ea488cf20dec87199d755eb135 100644 (file)
@@ -118,12 +118,12 @@ struct rockchip_tsadc_chip {
        void (*control)(void __iomem *reg, bool on);
 
        /* Per-sensor methods */
-       int (*get_temp)(struct chip_tsadc_table table,
+       int (*get_temp)(const struct chip_tsadc_table *table,
                        int chn, void __iomem *reg, int *temp);
-       void (*set_alarm_temp)(struct chip_tsadc_table table,
-                              int chn, void __iomem *reg, int temp);
-       void (*set_tshut_temp)(struct chip_tsadc_table table,
-                              int chn, void __iomem *reg, int temp);
+       int (*set_alarm_temp)(const struct chip_tsadc_table *table,
+                             int chn, void __iomem *reg, int temp);
+       int (*set_tshut_temp)(const struct chip_tsadc_table *table,
+                             int chn, void __iomem *reg, int temp);
        void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
 
        /* Per-table methods */
@@ -317,6 +317,7 @@ static const struct tsadc_table rk3288_code_table[] = {
        {3452, 115000},
        {3437, 120000},
        {3421, 125000},
+       {0, 125000},
 };
 
 static const struct tsadc_table rk3368_code_table[] = {
@@ -397,59 +398,80 @@ static const struct tsadc_table rk3399_code_table[] = {
        {TSADCV3_DATA_MASK, 125000},
 };
 
-static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table,
+static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table,
                                   int temp)
 {
        int high, low, mid;
-       u32 error = 0;
+       unsigned long num;
+       unsigned int denom;
+       u32 error = table->data_mask;
 
        low = 0;
-       high = table.length - 1;
+       high = (table->length - 1) - 1; /* ignore the last check for table */
        mid = (high + low) / 2;
 
        /* Return mask code data when the temp is over table range */
-       if (temp < table.id[low].temp || temp > table.id[high].temp) {
-               error = table.data_mask;
+       if (temp < table->id[low].temp || temp > table->id[high].temp)
                goto exit;
-       }
 
        while (low <= high) {
-               if (temp == table.id[mid].temp)
-                       return table.id[mid].code;
-               else if (temp < table.id[mid].temp)
+               if (temp == table->id[mid].temp)
+                       return table->id[mid].code;
+               else if (temp < table->id[mid].temp)
                        high = mid - 1;
                else
                        low = mid + 1;
                mid = (low + high) / 2;
        }
 
+       /*
+        * The conversion code granularity provided by the table. Let's
+        * assume that the relationship between temperature and
+        * analog value between 2 table entries is linear and interpolate
+        * to produce less granular result.
+        */
+       num = abs(table->id[mid + 1].code - table->id[mid].code);
+       num *= temp - table->id[mid].temp;
+       denom = table->id[mid + 1].temp - table->id[mid].temp;
+
+       switch (table->mode) {
+       case ADC_DECREMENT:
+               return table->id[mid].code - (num / denom);
+       case ADC_INCREMENT:
+               return table->id[mid].code + (num / denom);
+       default:
+               pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
+               return error;
+       }
+
 exit:
-       pr_err("Invalid the conversion, error=%d\n", error);
+       pr_err("%s: invalid temperature, temp=%d error=%d\n",
+              __func__, temp, error);
        return error;
 }
 
-static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
-                                  int *temp)
+static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table,
+                                  u32 code, int *temp)
 {
        unsigned int low = 1;
-       unsigned int high = table.length - 1;
+       unsigned int high = table->length - 1;
        unsigned int mid = (low + high) / 2;
        unsigned int num;
        unsigned long denom;
 
-       WARN_ON(table.length < 2);
+       WARN_ON(table->length < 2);
 
-       switch (table.mode) {
+       switch (table->mode) {
        case ADC_DECREMENT:
-               code &= table.data_mask;
-               if (code < table.id[high].code)
+               code &= table->data_mask;
+               if (code <= table->id[high].code)
                        return -EAGAIN;         /* Incorrect reading */
 
                while (low <= high) {
-                       if (code >= table.id[mid].code &&
-                           code < table.id[mid - 1].code)
+                       if (code >= table->id[mid].code &&
+                           code < table->id[mid - 1].code)
                                break;
-                       else if (code < table.id[mid].code)
+                       else if (code < table->id[mid].code)
                                low = mid + 1;
                        else
                                high = mid - 1;
@@ -458,15 +480,15 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
                }
                break;
        case ADC_INCREMENT:
-               code &= table.data_mask;
-               if (code < table.id[low].code)
+               code &= table->data_mask;
+               if (code < table->id[low].code)
                        return -EAGAIN;         /* Incorrect reading */
 
                while (low <= high) {
-                       if (code <= table.id[mid].code &&
-                           code > table.id[mid - 1].code)
+                       if (code <= table->id[mid].code &&
+                           code > table->id[mid - 1].code)
                                break;
-                       else if (code > table.id[mid].code)
+                       else if (code > table->id[mid].code)
                                low = mid + 1;
                        else
                                high = mid - 1;
@@ -475,7 +497,8 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
                }
                break;
        default:
-               pr_err("Invalid the conversion table\n");
+               pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
+               return -EINVAL;
        }
 
        /*
@@ -484,10 +507,10 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
         * temperature between 2 table entries is linear and interpolate
         * to produce less granular result.
         */
-       num = table.id[mid].temp - table.id[mid - 1].temp;
-       num *= abs(table.id[mid - 1].code - code);
-       denom = abs(table.id[mid - 1].code - table.id[mid].code);
-       *temp = table.id[mid - 1].temp + (num / denom);
+       num = table->id[mid].temp - table->id[mid - 1].temp;
+       num *= abs(table->id[mid - 1].code - code);
+       denom = abs(table->id[mid - 1].code - table->id[mid].code);
+       *temp = table->id[mid - 1].temp + (num / denom);
 
        return 0;
 }
@@ -638,7 +661,7 @@ static void rk_tsadcv3_control(void __iomem *regs, bool enable)
        writel_relaxed(val, regs + TSADCV2_AUTO_CON);
 }
 
-static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
+static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table,
                               int chn, void __iomem *regs, int *temp)
 {
        u32 val;
@@ -648,39 +671,57 @@ static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
        return rk_tsadcv2_code_to_temp(table, val, temp);
 }
 
-static void rk_tsadcv2_alarm_temp(struct chip_tsadc_table table,
-                                 int chn, void __iomem *regs, int temp)
+static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table,
+                                int chn, void __iomem *regs, int temp)
 {
-       u32 alarm_value, int_en;
+       u32 alarm_value;
+       u32 int_en, int_clr;
+
+       /*
+        * In some cases, some sensors didn't need the trip points, the
+        * set_trips will pass {-INT_MAX, INT_MAX} to trigger tsadc alarm
+        * in the end, ignore this case and disable the high temperature
+        * interrupt.
+        */
+       if (temp == INT_MAX) {
+               int_clr = readl_relaxed(regs + TSADCV2_INT_EN);
+               int_clr &= ~TSADCV2_INT_SRC_EN(chn);
+               writel_relaxed(int_clr, regs + TSADCV2_INT_EN);
+               return 0;
+       }
 
        /* Make sure the value is valid */
        alarm_value = rk_tsadcv2_temp_to_code(table, temp);
-       if (alarm_value == table.data_mask)
-               return;
+       if (alarm_value == table->data_mask)
+               return -ERANGE;
 
-       writel_relaxed(alarm_value & table.data_mask,
+       writel_relaxed(alarm_value & table->data_mask,
                       regs + TSADCV2_COMP_INT(chn));
 
        int_en = readl_relaxed(regs + TSADCV2_INT_EN);
        int_en |= TSADCV2_INT_SRC_EN(chn);
        writel_relaxed(int_en, regs + TSADCV2_INT_EN);
+
+       return 0;
 }
 
-static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table,
-                                 int chn, void __iomem *regs, int temp)
+static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table,
+                                int chn, void __iomem *regs, int temp)
 {
        u32 tshut_value, val;
 
        /* Make sure the value is valid */
        tshut_value = rk_tsadcv2_temp_to_code(table, temp);
-       if (tshut_value == table.data_mask)
-               return;
+       if (tshut_value == table->data_mask)
+               return -ERANGE;
 
        writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
 
        /* TSHUT will be valid */
        val = readl_relaxed(regs + TSADCV2_AUTO_CON);
        writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON);
+
+       return 0;
 }
 
 static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
@@ -883,10 +924,8 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
        dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n",
                __func__, sensor->id, low, high);
 
-       tsadc->set_alarm_temp(tsadc->table,
-                             sensor->id, thermal->regs, high);
-
-       return 0;
+       return tsadc->set_alarm_temp(&tsadc->table,
+                                    sensor->id, thermal->regs, high);
 }
 
 static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
@@ -896,7 +935,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
        const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
        int retval;
 
-       retval = tsadc->get_temp(tsadc->table,
+       retval = tsadc->get_temp(&tsadc->table,
                                 sensor->id, thermal->regs, out_temp);
        dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
                sensor->id, *out_temp, retval);
@@ -982,8 +1021,12 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
        int error;
 
        tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
-       tsadc->set_tshut_temp(tsadc->table, id, thermal->regs,
+
+       error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs,
                              thermal->tshut_temp);
+       if (error)
+               dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
+                       __func__, thermal->tshut_temp, error);
 
        sensor->thermal = thermal;
        sensor->id = id;
@@ -1196,9 +1239,13 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
 
                thermal->chip->set_tshut_mode(id, thermal->regs,
                                              thermal->tshut_mode);
-               thermal->chip->set_tshut_temp(thermal->chip->table,
+
+               error = thermal->chip->set_tshut_temp(&thermal->chip->table,
                                              id, thermal->regs,
                                              thermal->tshut_temp);
+               if (error)
+                       dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
+                               __func__, thermal->tshut_temp, error);
        }
 
        thermal->chip->control(thermal->regs, true);
index 641faab6e24b50fef4d70d3334edfc49e0ab0ce3..655591316a881274a6d152801ffd19138a0dc34b 100644 (file)
@@ -799,6 +799,11 @@ static void thermal_release(struct device *dev)
        if (!strncmp(dev_name(dev), "thermal_zone",
                     sizeof("thermal_zone") - 1)) {
                tz = to_thermal_zone(dev);
+               kfree(tz->trip_type_attrs);
+               kfree(tz->trip_temp_attrs);
+               kfree(tz->trip_hyst_attrs);
+               kfree(tz->trips_attribute_group.attrs);
+               kfree(tz->device.groups);
                kfree(tz);
        } else if (!strncmp(dev_name(dev), "cooling_device",
                            sizeof("cooling_device") - 1)) {
@@ -1305,10 +1310,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
 
        thermal_zone_device_set_polling(tz, 0);
 
-       kfree(tz->trip_type_attrs);
-       kfree(tz->trip_temp_attrs);
-       kfree(tz->trip_hyst_attrs);
-       kfree(tz->trips_attribute_group.attrs);
        thermal_set_governor(tz, NULL);
 
        thermal_remove_hwmon_sysfs(tz);
@@ -1316,7 +1317,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
        idr_destroy(&tz->idr);
        mutex_destroy(&tz->lock);
        device_unregister(&tz->device);
-       kfree(tz->device.groups);
 }
 EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
 
index 541af5946203bf08e9a2c38a269661e1df81d417..c4a508a124dc2b9dca9e7147c3d8dedc69c5016d 100644 (file)
@@ -58,14 +58,6 @@ static LIST_HEAD(thermal_hwmon_list);
 
 static DEFINE_MUTEX(thermal_hwmon_list_lock);
 
-static ssize_t
-name_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct thermal_hwmon_device *hwmon = dev_get_drvdata(dev);
-       return sprintf(buf, "%s\n", hwmon->type);
-}
-static DEVICE_ATTR_RO(name);
-
 static ssize_t
 temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
@@ -165,15 +157,12 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
 
        INIT_LIST_HEAD(&hwmon->tz_list);
        strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
-       hwmon->device = hwmon_device_register(NULL);
+       hwmon->device = hwmon_device_register_with_info(NULL, hwmon->type,
+                                                       hwmon, NULL, NULL);
        if (IS_ERR(hwmon->device)) {
                result = PTR_ERR(hwmon->device);
                goto free_mem;
        }
-       dev_set_drvdata(hwmon->device, hwmon);
-       result = device_create_file(hwmon->device, &dev_attr_name);
-       if (result)
-               goto free_mem;
 
  register_sys_interface:
        temp = kzalloc(sizeof(*temp), GFP_KERNEL);
@@ -222,10 +211,8 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
  free_temp_mem:
        kfree(temp);
  unregister_name:
-       if (new_hwmon_device) {
-               device_remove_file(hwmon->device, &dev_attr_name);
+       if (new_hwmon_device)
                hwmon_device_unregister(hwmon->device);
-       }
  free_mem:
        if (new_hwmon_device)
                kfree(hwmon);
@@ -267,7 +254,6 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
        list_del(&hwmon->node);
        mutex_unlock(&thermal_hwmon_list_lock);
 
-       device_remove_file(hwmon->device, &dev_attr_name);
        hwmon_device_unregister(hwmon->device);
        kfree(hwmon);
 }
index 61569a765d9ee17213052db6f0d3a1c41d4d9293..76e03a7de9cc3d790a230948f599ef2db4b93249 100644 (file)
@@ -675,7 +675,7 @@ static struct console univ8250_console = {
        .device         = uart_console_device,
        .setup          = univ8250_console_setup,
        .match          = univ8250_console_match,
-       .flags          = CON_PRINTBUFFER | CON_ANYTIME | CON_CONSDEV,
+       .flags          = CON_PRINTBUFFER | CON_ANYTIME,
        .index          = -1,
        .data           = &serial8250_reg,
 };
index aa0166b6d450dcadc85b89d3939c7ae3dd4f0cbc..116436b7fa52a96c740e025d7b5c613e1d365fa8 100644 (file)
@@ -5642,17 +5642,15 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
 static void serial8250_io_resume(struct pci_dev *dev)
 {
        struct serial_private *priv = pci_get_drvdata(dev);
-       const struct pciserial_board *board;
+       struct serial_private *new;
 
        if (!priv)
                return;
 
-       board = priv->board;
-       kfree(priv);
-       priv = pciserial_init_ports(dev, board);
-
-       if (!IS_ERR(priv)) {
-               pci_set_drvdata(dev, priv);
+       new = pciserial_init_ports(dev, priv->board);
+       if (!IS_ERR(new)) {
+               pci_set_drvdata(dev, new);
+               kfree(priv);
        }
 }
 
index fe4399b41df6cea4904edcee4749bb67532f7e18..c13fec451d03db3bfc382d77ea3b1fdf7b3d04e1 100644 (file)
@@ -1413,7 +1413,7 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
         * Enable previously disabled RX interrupts.
         */
        if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
-               serial8250_clear_fifos(p);
+               serial8250_clear_and_reinit_fifos(p);
 
                p->ier |= UART_IER_RLSI | UART_IER_RDI;
                serial_port_out(&p->port, UART_IER, p->ier);
index 168b10cad47b5437c2152313fcad026e2747300a..fabbe76203bb76a541775792828d3a8662c78348 100644 (file)
@@ -481,6 +481,14 @@ static void atmel_stop_tx(struct uart_port *port)
                /* disable PDC transmit */
                atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
        }
+
+       /*
+        * Disable the transmitter.
+        * This is mandatory when DMA is used, otherwise the DMA buffer
+        * is fully transmitted.
+        */
+       atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
+
        /* Disable interrupts */
        atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
 
@@ -513,6 +521,9 @@ static void atmel_start_tx(struct uart_port *port)
 
        /* Enable interrupts */
        atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
+
+       /* re-enable the transmitter */
+       atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
 }
 
 /*
@@ -798,6 +809,11 @@ static void atmel_complete_tx_dma(void *arg)
         */
        if (!uart_circ_empty(xmit))
                atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
+       else if ((port->rs485.flags & SER_RS485_ENABLED) &&
+                !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
+               /* DMA done, stop TX, start RX for RS485 */
+               atmel_start_rx(port);
+       }
 
        spin_unlock_irqrestore(&port->lock, flags);
 }
@@ -900,12 +916,6 @@ static void atmel_tx_dma(struct uart_port *port)
                desc->callback = atmel_complete_tx_dma;
                desc->callback_param = atmel_port;
                atmel_port->cookie_tx = dmaengine_submit(desc);
-
-       } else {
-               if (port->rs485.flags & SER_RS485_ENABLED) {
-                       /* DMA done, stop TX, start RX for RS485 */
-                       atmel_start_rx(port);
-               }
        }
 
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
index 52bbd27e93ae7a83af92489b6d27fe0309437afe..701c085bb19b8e03f67316133fa5a4be7e0b05fe 100644 (file)
@@ -946,8 +946,8 @@ static const struct input_device_id sysrq_ids[] = {
        {
                .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
                                INPUT_DEVICE_ID_MATCH_KEYBIT,
-               .evbit = { BIT_MASK(EV_KEY) },
-               .keybit = { BIT_MASK(KEY_LEFTALT) },
+               .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) },
+               .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) },
        },
        { },
 };
index 9548d3e03453db062042600668840d5b4676e649..302b8f5f7d27d26b264bbaeb0eb9a2756362c482 100644 (file)
@@ -513,8 +513,8 @@ struct dwc2_core_params {
        /* Gadget parameters */
        bool g_dma;
        bool g_dma_desc;
-       u16 g_rx_fifo_size;
-       u16 g_np_tx_fifo_size;
+       u32 g_rx_fifo_size;
+       u32 g_np_tx_fifo_size;
        u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
 };
 
index c55db4aa54d677c77fb2a0c9bcff7d38ac7d1b9e..77c5fcf3a5bf7f101c51225f3c21a90fc7e59dcf 100644 (file)
@@ -3169,7 +3169,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
        /* keep other bits untouched (so e.g. forced modes are not lost) */
        usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
        usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
-               GUSBCFG_HNPCAP);
+               GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
 
        if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
            (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
@@ -3749,8 +3749,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
                __func__, epctrl, epctrl_reg);
 
        /* Allocate DMA descriptor chain for non-ctrl endpoints */
-       if (using_desc_dma(hsotg)) {
-               hs_ep->desc_list = dma_alloc_coherent(hsotg->dev,
+       if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
+               hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
                        MAX_DMA_DESC_NUM_GENERIC *
                        sizeof(struct dwc2_dma_desc),
                        &hs_ep->desc_list_dma, GFP_ATOMIC);
@@ -3872,7 +3872,7 @@ error1:
 
 error2:
        if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
-               dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+               dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
                        sizeof(struct dwc2_dma_desc),
                        hs_ep->desc_list, hs_ep->desc_list_dma);
                hs_ep->desc_list = NULL;
@@ -3902,14 +3902,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
                return -EINVAL;
        }
 
-       /* Remove DMA memory allocated for non-control Endpoints */
-       if (using_desc_dma(hsotg)) {
-               dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
-                                 sizeof(struct dwc2_dma_desc),
-                                 hs_ep->desc_list, hs_ep->desc_list_dma);
-               hs_ep->desc_list = NULL;
-       }
-
        epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
 
        spin_lock_irqsave(&hsotg->lock, flags);
@@ -4131,7 +4123,7 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
        /* keep other bits untouched (so e.g. forced modes are not lost) */
        usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
        usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
-               GUSBCFG_HNPCAP);
+               GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
 
        /* set the PLL on, remove the HNP/SRP and set the PHY */
        trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
index 911c3b36ac067028acdaa5d53bd0c9e8702f8942..46d0ad5105e40e5818e590a957072a48066c5ae0 100644 (file)
@@ -4367,6 +4367,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
        if (!HCD_HW_ACCESSIBLE(hcd))
                goto unlock;
 
+       if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
+               goto unlock;
+
        if (!hsotg->params.hibernation)
                goto skip_power_saving;
 
@@ -4489,8 +4492,8 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
 {
 #ifdef VERBOSE_DEBUG
        struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
-       char *pipetype;
-       char *speed;
+       char *pipetype = NULL;
+       char *speed = NULL;
 
        dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
        dev_vdbg(hsotg->dev, "  Device address: %d\n",
index 11fe68a4627bd315c8e82dbd3398b2dfde9bde8c..bcd1e19b40768679a0cee673be38587c8fdd11b0 100644 (file)
@@ -385,16 +385,16 @@ static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param,
 }
 
 /**
- * dwc2_set_param_u16() - Set a u16 parameter
+ * dwc2_set_param_u32() - Set a u32 parameter
  *
  * See dwc2_set_param().
  */
-static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param,
+static void dwc2_set_param_u32(struct dwc2_hsotg *hsotg, u32 *param,
                               bool lookup, char *property, u16 legacy,
                               u16 def, u16 min, u16 max)
 {
        dwc2_set_param(hsotg, param, lookup, property,
-                      legacy, def, min, max, 2);
+                      legacy, def, min, max, 4);
 }
 
 /**
@@ -1178,12 +1178,12 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
                 * auto-detect if the hardware does not support the
                 * default.
                 */
-               dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size,
+               dwc2_set_param_u32(hsotg, &p->g_rx_fifo_size,
                                   true, "g-rx-fifo-size", 2048,
                                   hw->rx_fifo_size,
                                   16, hw->rx_fifo_size);
 
-               dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size,
+               dwc2_set_param_u32(hsotg, &p->g_np_tx_fifo_size,
                                   true, "g-np-tx-fifo-size", 1024,
                                   hw->dev_nperio_tx_fifo_size,
                                   16, hw->dev_nperio_tx_fifo_size);
index e27899bb57064b94811fae851587b06b743128ff..e956306d9b0f834e52632f09fe2af5f2ae913bc0 100644 (file)
@@ -138,7 +138,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
                exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
                if (IS_ERR(exynos->axius_clk)) {
                        dev_err(dev, "no AXI UpScaler clk specified\n");
-                       return -ENODEV;
+                       ret = -ENODEV;
+                       goto axius_clk_err;
                }
                clk_prepare_enable(exynos->axius_clk);
        } else {
@@ -196,6 +197,7 @@ err3:
        regulator_disable(exynos->vdd33);
 err2:
        clk_disable_unprepare(exynos->axius_clk);
+axius_clk_err:
        clk_disable_unprepare(exynos->susp_clk);
        clk_disable_unprepare(exynos->clk);
        return ret;
index 002822d98fda207505581ca75cb47373db81fb78..49d685ad0da90d1a1282dd9d25f31ad64db22087 100644 (file)
@@ -2147,7 +2147,7 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
        cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
        if (!cdev->os_desc_req->buf) {
                ret = -ENOMEM;
-               kfree(cdev->os_desc_req);
+               usb_ep_free_request(ep0, cdev->os_desc_req);
                goto end;
        }
        cdev->os_desc_req->context = cdev;
index 5e746adc8a2d5416b7e1bcbeb8c41559716599b4..5490fc51638ede3c565eff9036ff3beaf884d3a9 100644 (file)
@@ -1806,7 +1806,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
        unsigned long flags;
 
        spin_lock_irqsave(&func->ffs->eps_lock, flags);
-       do {
+       while (count--) {
                /* pending requests get nuked */
                if (likely(ep->ep))
                        usb_ep_disable(ep->ep);
@@ -1817,7 +1817,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
                        __ffs_epfile_read_buffer_free(epfile);
                        ++epfile;
                }
-       } while (--count);
+       }
        spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
 }
 
@@ -1831,7 +1831,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
        int ret = 0;
 
        spin_lock_irqsave(&func->ffs->eps_lock, flags);
-       do {
+       while(count--) {
                struct usb_endpoint_descriptor *ds;
                int desc_idx;
 
@@ -1867,7 +1867,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
 
                ++ep;
                ++epfile;
-       } while (--count);
+       }
        spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
 
        return ret;
@@ -3448,12 +3448,12 @@ static void ffs_func_unbind(struct usb_configuration *c,
 
        /* cleanup after autoconfig */
        spin_lock_irqsave(&func->ffs->eps_lock, flags);
-       do {
+       while (count--) {
                if (ep->ep && ep->req)
                        usb_ep_free_request(ep->ep, ep->req);
                ep->req = NULL;
                ++ep;
-       } while (--count);
+       }
        spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
        kfree(func->eps);
        func->eps = NULL;
index f3212db9bc37bf1889c482e86c9c679720766b1f..12c7687216e62f3b88dc47546ec12d5d52efdff5 100644 (file)
@@ -1978,7 +1978,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
                        dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
                        goto err;
                }
-               ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
+               sprintf(ep->name, "ep%d", ep->index);
+               ep->ep.name = ep->name;
 
                ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
                ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
index 3e1c9d589dfa34ade18fffa7e300a4c9dfcc1563..b03b2ebfc53a3cdc2e5e7c1731beb9221ea7ea4c 100644 (file)
@@ -280,6 +280,7 @@ struct usba_ep {
        void __iomem                            *ep_regs;
        void __iomem                            *dma_regs;
        void __iomem                            *fifo;
+       char                                    name[8];
        struct usb_ep                           ep;
        struct usba_udc                         *udc;
 
index ddfab301e36658adccde76b9a47141b39a3508c7..e5834dd9bcdedb246a51be1b35fa70c9ed920b61 100644 (file)
@@ -165,7 +165,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
                return -ENODEV;
 
        /* Try to set 64-bit DMA first */
-       if (WARN_ON(!pdev->dev.dma_mask))
+       if (!pdev->dev.dma_mask)
                /* Platform did not initialize dma_mask */
                ret = dma_coerce_mask_and_coherent(&pdev->dev,
                                                   DMA_BIT_MASK(64));
index 25f522b09dd9746938168e6f675464db9f2e6740..e32029a31ca4d2ed5856c18e6e10b4b40de1132a 100644 (file)
@@ -913,17 +913,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
        spin_lock_irqsave(&xhci->lock, flags);
 
        ep->stop_cmds_pending--;
-       if (xhci->xhc_state & XHCI_STATE_REMOVING) {
-               spin_unlock_irqrestore(&xhci->lock, flags);
-               return;
-       }
-       if (xhci->xhc_state & XHCI_STATE_DYING) {
-               xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
-                               "Stop EP timer ran, but another timer marked "
-                               "xHCI as DYING, exiting.");
-               spin_unlock_irqrestore(&xhci->lock, flags);
-               return;
-       }
        if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                                "Stop EP timer ran, but no command pending, "
index 0c8deb9ed42def112efc8ad1aaf95526315c164f..9a0ec116654acd27740d497c76ba1a6bb6ab6dd9 100644 (file)
@@ -1534,19 +1534,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                xhci_urb_free_priv(urb_priv);
                return ret;
        }
-       if ((xhci->xhc_state & XHCI_STATE_DYING) ||
-                       (xhci->xhc_state & XHCI_STATE_HALTED)) {
-               xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
-                               "Ep 0x%x: URB %p to be canceled on "
-                               "non-responsive xHCI host.",
-                               urb->ep->desc.bEndpointAddress, urb);
-               /* Let the stop endpoint command watchdog timer (which set this
-                * state) finish cleaning up the endpoint TD lists.  We must
-                * have caught it in the middle of dropping a lock and giving
-                * back an URB.
-                */
-               goto done;
-       }
 
        ep_index = xhci_get_endpoint_index(&urb->ep->desc);
        ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
index 4fef50e5c8c1295364aca0a28ba9cf8da005ac21..dd70c88419d20798b738c3da2df009887fdfec7f 100644 (file)
@@ -114,6 +114,7 @@ static int musb_regdump_show(struct seq_file *s, void *unused)
        unsigned                i;
 
        seq_printf(s, "MUSB (M)HDRC Register Dump\n");
+       pm_runtime_get_sync(musb->controller);
 
        for (i = 0; i < ARRAY_SIZE(musb_regmap); i++) {
                switch (musb_regmap[i].size) {
@@ -132,6 +133,8 @@ static int musb_regdump_show(struct seq_file *s, void *unused)
                }
        }
 
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
        return 0;
 }
 
@@ -145,7 +148,10 @@ static int musb_test_mode_show(struct seq_file *s, void *unused)
        struct musb             *musb = s->private;
        unsigned                test;
 
+       pm_runtime_get_sync(musb->controller);
        test = musb_readb(musb->mregs, MUSB_TESTMODE);
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
 
        if (test & MUSB_TEST_FORCE_HOST)
                seq_printf(s, "force host\n");
@@ -194,11 +200,12 @@ static ssize_t musb_test_mode_write(struct file *file,
        u8                      test;
        char                    buf[18];
 
+       pm_runtime_get_sync(musb->controller);
        test = musb_readb(musb->mregs, MUSB_TESTMODE);
        if (test) {
                dev_err(musb->controller, "Error: test mode is already set. "
                        "Please do USB Bus Reset to start a new test.\n");
-               return count;
+               goto ret;
        }
 
        memset(buf, 0x00, sizeof(buf));
@@ -234,6 +241,9 @@ static ssize_t musb_test_mode_write(struct file *file,
 
        musb_writeb(musb->mregs, MUSB_TESTMODE, test);
 
+ret:
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
        return count;
 }
 
@@ -254,8 +264,13 @@ static int musb_softconnect_show(struct seq_file *s, void *unused)
        switch (musb->xceiv->otg->state) {
        case OTG_STATE_A_HOST:
        case OTG_STATE_A_WAIT_BCON:
+               pm_runtime_get_sync(musb->controller);
+
                reg = musb_readb(musb->mregs, MUSB_DEVCTL);
                connect = reg & MUSB_DEVCTL_SESSION ? 1 : 0;
+
+               pm_runtime_mark_last_busy(musb->controller);
+               pm_runtime_put_autosuspend(musb->controller);
                break;
        default:
                connect = -1;
@@ -284,6 +299,7 @@ static ssize_t musb_softconnect_write(struct file *file,
        if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
                return -EFAULT;
 
+       pm_runtime_get_sync(musb->controller);
        if (!strncmp(buf, "0", 1)) {
                switch (musb->xceiv->otg->state) {
                case OTG_STATE_A_HOST:
@@ -314,6 +330,8 @@ static ssize_t musb_softconnect_write(struct file *file,
                }
        }
 
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
        return count;
 }
 
index 2597b83a8ae254d0eff28b2d2d124ff51c196248..95aa5233726cf86166ae5aad2ea88c446e894d13 100644 (file)
@@ -95,6 +95,7 @@ struct ch341_private {
        unsigned baud_rate; /* set baud rate */
        u8 line_control; /* set line control value RTS/DTR */
        u8 line_status; /* active status of modem control inputs */
+       u8 lcr;
 };
 
 static void ch341_set_termios(struct tty_struct *tty,
@@ -112,6 +113,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request,
        r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
                            USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
                            value, index, NULL, 0, DEFAULT_TIMEOUT);
+       if (r < 0)
+               dev_err(&dev->dev, "failed to send control message: %d\n", r);
 
        return r;
 }
@@ -129,11 +132,24 @@ static int ch341_control_in(struct usb_device *dev,
        r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
                            USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
                            value, index, buf, bufsize, DEFAULT_TIMEOUT);
-       return r;
+       if (r < bufsize) {
+               if (r >= 0) {
+                       dev_err(&dev->dev,
+                               "short control message received (%d < %u)\n",
+                               r, bufsize);
+                       r = -EIO;
+               }
+
+               dev_err(&dev->dev, "failed to receive control message: %d\n",
+                       r);
+               return r;
+       }
+
+       return 0;
 }
 
-static int ch341_init_set_baudrate(struct usb_device *dev,
-                                  struct ch341_private *priv, unsigned ctrl)
+static int ch341_set_baudrate_lcr(struct usb_device *dev,
+                                 struct ch341_private *priv, u8 lcr)
 {
        short a;
        int r;
@@ -156,9 +172,19 @@ static int ch341_init_set_baudrate(struct usb_device *dev,
        factor = 0x10000 - factor;
        a = (factor & 0xff00) | divisor;
 
-       /* 0x9c is "enable SFR_UART Control register and timer" */
-       r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT,
-                             0x9c | (ctrl << 8), a | 0x80);
+       /*
+        * CH341A buffers data until a full endpoint-size packet (32 bytes)
+        * has been received unless bit 7 is set.
+        */
+       a |= BIT(7);
+
+       r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, a);
+       if (r)
+               return r;
+
+       r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, lcr);
+       if (r)
+               return r;
 
        return r;
 }
@@ -170,9 +196,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control)
 
 static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
 {
+       const unsigned int size = 2;
        char *buffer;
        int r;
-       const unsigned size = 8;
        unsigned long flags;
 
        buffer = kmalloc(size, GFP_KERNEL);
@@ -183,14 +209,9 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
        if (r < 0)
                goto out;
 
-       /* setup the private status if available */
-       if (r == 2) {
-               r = 0;
-               spin_lock_irqsave(&priv->lock, flags);
-               priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
-               spin_unlock_irqrestore(&priv->lock, flags);
-       } else
-               r = -EPROTO;
+       spin_lock_irqsave(&priv->lock, flags);
+       priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
+       spin_unlock_irqrestore(&priv->lock, flags);
 
 out:   kfree(buffer);
        return r;
@@ -200,9 +221,9 @@ out:        kfree(buffer);
 
 static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
 {
+       const unsigned int size = 2;
        char *buffer;
        int r;
-       const unsigned size = 8;
 
        buffer = kmalloc(size, GFP_KERNEL);
        if (!buffer)
@@ -232,7 +253,7 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
        if (r < 0)
                goto out;
 
-       r = ch341_init_set_baudrate(dev, priv, 0);
+       r = ch341_set_baudrate_lcr(dev, priv, priv->lcr);
        if (r < 0)
                goto out;
 
@@ -258,7 +279,6 @@ static int ch341_port_probe(struct usb_serial_port *port)
 
        spin_lock_init(&priv->lock);
        priv->baud_rate = DEFAULT_BAUD_RATE;
-       priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
 
        r = ch341_configure(port->serial->dev, priv);
        if (r < 0)
@@ -320,7 +340,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
 
        r = ch341_configure(serial->dev, priv);
        if (r)
-               goto out;
+               return r;
 
        if (tty)
                ch341_set_termios(tty, port, NULL);
@@ -330,12 +350,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
        if (r) {
                dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n",
                        __func__, r);
-               goto out;
+               return r;
        }
 
        r = usb_serial_generic_open(tty, port);
+       if (r)
+               goto err_kill_interrupt_urb;
+
+       return 0;
+
+err_kill_interrupt_urb:
+       usb_kill_urb(port->interrupt_in_urb);
 
-out:   return r;
+       return r;
 }
 
 /* Old_termios contains the original termios settings and
@@ -356,7 +383,6 @@ static void ch341_set_termios(struct tty_struct *tty,
 
        baud_rate = tty_get_baud_rate(tty);
 
-       priv->baud_rate = baud_rate;
        ctrl = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX;
 
        switch (C_CSIZE(tty)) {
@@ -386,22 +412,25 @@ static void ch341_set_termios(struct tty_struct *tty,
                ctrl |= CH341_LCR_STOP_BITS_2;
 
        if (baud_rate) {
-               spin_lock_irqsave(&priv->lock, flags);
-               priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
-               spin_unlock_irqrestore(&priv->lock, flags);
-               r = ch341_init_set_baudrate(port->serial->dev, priv, ctrl);
+               priv->baud_rate = baud_rate;
+
+               r = ch341_set_baudrate_lcr(port->serial->dev, priv, ctrl);
                if (r < 0 && old_termios) {
                        priv->baud_rate = tty_termios_baud_rate(old_termios);
                        tty_termios_copy_hw(&tty->termios, old_termios);
+               } else if (r == 0) {
+                       priv->lcr = ctrl;
                }
-       } else {
-               spin_lock_irqsave(&priv->lock, flags);
-               priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
-               spin_unlock_irqrestore(&priv->lock, flags);
        }
 
-       ch341_set_handshake(port->serial->dev, priv->line_control);
+       spin_lock_irqsave(&priv->lock, flags);
+       if (C_BAUD(tty) == B0)
+               priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
+       else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
+               priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
+       spin_unlock_irqrestore(&priv->lock, flags);
 
+       ch341_set_handshake(port->serial->dev, priv->line_control);
 }
 
 static void ch341_break_ctl(struct tty_struct *tty, int break_state)
@@ -576,14 +605,23 @@ static int ch341_tiocmget(struct tty_struct *tty)
 
 static int ch341_reset_resume(struct usb_serial *serial)
 {
-       struct ch341_private *priv;
-
-       priv = usb_get_serial_port_data(serial->port[0]);
+       struct usb_serial_port *port = serial->port[0];
+       struct ch341_private *priv = usb_get_serial_port_data(port);
+       int ret;
 
        /* reconfigure ch341 serial port after bus-reset */
        ch341_configure(serial->dev, priv);
 
-       return 0;
+       if (tty_port_initialized(&port->port)) {
+               ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
+               if (ret) {
+                       dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
+                               ret);
+                       return ret;
+               }
+       }
+
+       return usb_serial_generic_resume(serial);
 }
 
 static struct usb_serial_driver ch341_device = {
index 0ee190fc1bf8ce7154a4a4eac7023a377aa03365..6cb45757818fae2222383358b4d38d7c87b2dbf4 100644 (file)
@@ -192,10 +192,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
                             status_buf, KLSI_STATUSBUF_LEN,
                             10000
                             );
-       if (rc < 0)
-               dev_err(&port->dev, "Reading line status failed (error = %d)\n",
-                       rc);
-       else {
+       if (rc != KLSI_STATUSBUF_LEN) {
+               dev_err(&port->dev, "reading line status failed: %d\n", rc);
+               if (rc >= 0)
+                       rc = -EIO;
+       } else {
                status = get_unaligned_le16(status_buf);
 
                dev_info(&port->serial->dev->dev, "read status %x %x\n",
index 79451f7ef1b76301cc881379ad28ce12e4b4dc33..062c205f00469faf1f7226e152f61d920d71f27b 100644 (file)
@@ -216,7 +216,6 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
        struct scatterlist sg[4], sg_dst;
        void *dst_buf;
        size_t dst_size;
-       const u8 bzero[16] = { 0 };
        u8 iv[crypto_skcipher_ivsize(tfm_cbc)];
        size_t zero_padding;
 
@@ -261,7 +260,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
        sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1));
        sg_set_buf(&sg[2], b, blen);
        /* 0 if well behaved :) */
-       sg_set_buf(&sg[3], bzero, zero_padding);
+       sg_set_page(&sg[3], ZERO_PAGE(0), zero_padding, 0);
        sg_init_one(&sg_dst, dst_buf, dst_size);
 
        skcipher_request_set_tfm(req, tfm_cbc);
index 9266271a787a70b61325d1e92ae9a8d710465f0c..b3cc33fa6d26a69f44dc013b7bee55f6bd7682ab 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/uaccess.h>
 #include <linux/vfio.h>
 #include <linux/workqueue.h>
-#include <linux/pid_namespace.h>
 #include <linux/mdev.h>
 #include <linux/notifier.h>
 
@@ -495,8 +494,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
                                  unsigned long *pfn_base, bool do_accounting)
 {
        unsigned long limit;
-       bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns,
-                                  CAP_IPC_LOCK);
+       bool lock_cap = has_capability(dma->task, CAP_IPC_LOCK);
        struct mm_struct *mm;
        int ret;
        bool rsvd;
index 253310cdaacabc25d67aa997e45033bf7137b3b9..fd6c8b66f06fd97734bfdad5917c7f65de70265d 100644 (file)
@@ -843,7 +843,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
        struct iov_iter out_iter, in_iter, prot_iter, data_iter;
        u64 tag;
        u32 exp_data_len, data_direction;
-       unsigned out, in;
+       unsigned int out = 0, in = 0;
        int head, ret, prot_bytes;
        size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
        size_t out_size, in_size;
@@ -2087,7 +2087,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
        NULL,
 };
 
-static struct target_core_fabric_ops vhost_scsi_ops = {
+static const struct target_core_fabric_ops vhost_scsi_ops = {
        .module                         = THIS_MODULE,
        .name                           = "vhost",
        .get_fabric_name                = vhost_scsi_get_fabric_name,
index bbbf588540ed71d82ed63deb355b77736a2a9628..ce5e63d2c66aac7d019c422ec294cab025e94e5e 100644 (file)
@@ -373,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
 
 static int vhost_vsock_start(struct vhost_vsock *vsock)
 {
+       struct vhost_virtqueue *vq;
        size_t i;
        int ret;
 
@@ -383,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
                goto err;
 
        for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
-               struct vhost_virtqueue *vq = &vsock->vqs[i];
+               vq = &vsock->vqs[i];
 
                mutex_lock(&vq->mutex);
 
                if (!vhost_vq_access_ok(vq)) {
                        ret = -EFAULT;
-                       mutex_unlock(&vq->mutex);
                        goto err_vq;
                }
 
                if (!vq->private_data) {
                        vq->private_data = vsock;
-                       vhost_vq_init_access(vq);
+                       ret = vhost_vq_init_access(vq);
+                       if (ret)
+                               goto err_vq;
                }
 
                mutex_unlock(&vq->mutex);
@@ -405,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
        return 0;
 
 err_vq:
+       vq->private_data = NULL;
+       mutex_unlock(&vq->mutex);
+
        for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
-               struct vhost_virtqueue *vq = &vsock->vqs[i];
+               vq = &vsock->vqs[i];
 
                mutex_lock(&vq->mutex);
                vq->private_data = NULL;
index f89245b8ba8e9a28483c4ff5edb03b80a1a9b2e3..68a113594808f220aa818424cd6e342897806a74 100644 (file)
@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
 
 int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
 {
-       int tooff = 0, fromoff = 0;
-       int size;
+       unsigned int tooff = 0, fromoff = 0;
+       size_t size;
 
        if (to->start > from->start)
                fromoff = to->start - from->start;
        else
                tooff = from->start - to->start;
-       size = to->len - tooff;
-       if (size > (int) (from->len - fromoff))
-               size = from->len - fromoff;
-       if (size <= 0)
+       if (fromoff >= from->len || tooff >= to->len)
+               return -EINVAL;
+
+       size = min_t(size_t, to->len - tooff, from->len - fromoff);
+       if (size == 0)
                return -EINVAL;
        size *= sizeof(u16);
 
@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
 
 int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
 {
-       int tooff = 0, fromoff = 0;
-       int size;
+       unsigned int tooff = 0, fromoff = 0;
+       size_t size;
 
        if (to->start > from->start)
                fromoff = to->start - from->start;
        else
                tooff = from->start - to->start;
-       size = to->len - tooff;
-       if (size > (int) (from->len - fromoff))
-               size = from->len - fromoff;
-       if (size <= 0)
+       if (fromoff >= from->len || tooff >= to->len)
+               return -EINVAL;
+
+       size = min_t(size_t, to->len - tooff, from->len - fromoff);
+       if (size == 0)
                return -EINVAL;
        size *= sizeof(u16);
 
index d47a2fcef818f3cea1ce7f09160061b3c8d2f0a6..c71fde5fe835c48d1ce4611b29108f8cf7fb44f3 100644 (file)
@@ -59,6 +59,7 @@
 #define pr_fmt(fmt) "virtio-mmio: " fmt
 
 #include <linux/acpi.h>
+#include <linux/dma-mapping.h>
 #include <linux/highmem.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -498,6 +499,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
        struct virtio_mmio_device *vm_dev;
        struct resource *mem;
        unsigned long magic;
+       int rc;
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!mem)
@@ -547,9 +549,25 @@ static int virtio_mmio_probe(struct platform_device *pdev)
        }
        vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
 
-       if (vm_dev->version == 1)
+       if (vm_dev->version == 1) {
                writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
 
+               rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+               /*
+                * In the legacy case, ensure our coherently-allocated virtio
+                * ring will be at an address expressable as a 32-bit PFN.
+                */
+               if (!rc)
+                       dma_set_coherent_mask(&pdev->dev,
+                                             DMA_BIT_MASK(32 + PAGE_SHIFT));
+       } else {
+               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       }
+       if (rc)
+               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (rc)
+               dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA.  Trying to continue, but this might not work.\n");
+
        platform_set_drvdata(pdev, vm_dev);
 
        return register_virtio_device(&vm_dev->vdev);
index 409aeaa49246a0edd7c6da07ca38b58c3f876109..7e38ed79c3fc0f2c095164d480f75b31630a6694 100644 (file)
@@ -159,6 +159,13 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
        if (xen_domain())
                return true;
 
+       /*
+        * On ARM-based machines, the DMA ops will do the right thing,
+        * so always use them with legacy devices.
+        */
+       if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
+               return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
+
        return false;
 }
 
index 6b5ee896af6318e9b50c384fcf3e062d24d89c47..7cc51223db1cbe8e905307850b7bd42b6c739279 100644 (file)
@@ -464,7 +464,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
        vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
        pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
 
-       *pci_base = (dma_addr_t)vme_base + pci_offset;
+       *pci_base = (dma_addr_t)*vme_base + pci_offset;
        *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
 
        *enabled = 0;
index 112ce422dc2268f25d7266e42797d6ddfa1a8fe4..2a165cc8a43cd6768529ffe48126c172e5b9f7df 100644 (file)
@@ -42,6 +42,7 @@
 static unsigned long platform_mmio;
 static unsigned long platform_mmio_alloc;
 static unsigned long platform_mmiolen;
+static uint64_t callback_via;
 
 static unsigned long alloc_xen_mmio(unsigned long len)
 {
@@ -54,6 +55,51 @@ static unsigned long alloc_xen_mmio(unsigned long len)
        return addr;
 }
 
+static uint64_t get_callback_via(struct pci_dev *pdev)
+{
+       u8 pin;
+       int irq;
+
+       irq = pdev->irq;
+       if (irq < 16)
+               return irq; /* ISA IRQ */
+
+       pin = pdev->pin;
+
+       /* We don't know the GSI. Specify the PCI INTx line instead. */
+       return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */
+               ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
+               ((uint64_t)pdev->bus->number << 16) |
+               ((uint64_t)(pdev->devfn & 0xff) << 8) |
+               ((uint64_t)(pin - 1) & 3);
+}
+
+static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
+{
+       xen_hvm_evtchn_do_upcall();
+       return IRQ_HANDLED;
+}
+
+static int xen_allocate_irq(struct pci_dev *pdev)
+{
+       return request_irq(pdev->irq, do_hvm_evtchn_intr,
+                       IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
+                       "xen-platform-pci", pdev);
+}
+
+static int platform_pci_resume(struct pci_dev *pdev)
+{
+       int err;
+       if (!xen_pv_domain())
+               return 0;
+       err = xen_set_callback_via(callback_via);
+       if (err) {
+               dev_err(&pdev->dev, "platform_pci_resume failure!\n");
+               return err;
+       }
+       return 0;
+}
+
 static int platform_pci_probe(struct pci_dev *pdev,
                              const struct pci_device_id *ent)
 {
@@ -92,6 +138,28 @@ static int platform_pci_probe(struct pci_dev *pdev,
        platform_mmio = mmio_addr;
        platform_mmiolen = mmio_len;
 
+       /* 
+        * Xen HVM guests always use the vector callback mechanism.
+        * L1 Dom0 in a nested Xen environment is a PV guest inside in an
+        * HVM environment. It needs the platform-pci driver to get
+        * notifications from L0 Xen, but it cannot use the vector callback
+        * as it is not exported by L1 Xen.
+        */
+       if (xen_pv_domain()) {
+               ret = xen_allocate_irq(pdev);
+               if (ret) {
+                       dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
+                       goto out;
+               }
+               callback_via = get_callback_via(pdev);
+               ret = xen_set_callback_via(callback_via);
+               if (ret) {
+                       dev_warn(&pdev->dev, "Unable to set the evtchn callback "
+                                        "err=%d\n", ret);
+                       goto out;
+               }
+       }
+
        max_nr_gframes = gnttab_max_grant_frames();
        grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
        ret = gnttab_setup_auto_xlat_frames(grant_frames);
@@ -123,6 +191,9 @@ static struct pci_driver platform_driver = {
        .name =           DRV_NAME,
        .probe =          platform_pci_probe,
        .id_table =       platform_pci_tbl,
+#ifdef CONFIG_PM
+       .resume_early =   platform_pci_resume,
+#endif
 };
 
 builtin_pci_driver(platform_driver);
index c2a377cdda2b03d6efe8768e4ef7894a06ebe853..83eab52fb3f69a75aa06a9f2a31760a384508f41 100644 (file)
@@ -38,6 +38,7 @@ config FS_DAX
        bool "Direct Access (DAX) support"
        depends on MMU
        depends on !(ARM || MIPS || SPARC)
+       select FS_IOMAP
        help
          Direct Access (DAX) can be used on memory-backed block devices.
          If the block device supports DAX and the filesystem supports DAX,
index 4ab67e8cb776ac18fd12bfec2fa7816800f3f5fa..873b4ca82ccbcde4c108a2592fbd7f46b88273a0 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1085,7 +1085,8 @@ static void aio_complete(struct kiocb *kiocb, long res, long res2)
                 * Tell lockdep we inherited freeze protection from submission
                 * thread.
                 */
-               __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+               if (S_ISREG(file_inode(file)->i_mode))
+                       __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
                file_end_write(file);
        }
 
@@ -1525,7 +1526,8 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
                 * by telling it the lock got released so that it doesn't
                 * complain about held lock when we return to userspace.
                 */
-               __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+               if (S_ISREG(file_inode(file)->i_mode))
+                       __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
        }
        kfree(iovec);
        return ret;
index 29a02daf08a97896c600e88317e646dbf3ab3c9b..422370293cfd8cfc9b0f527f73498ce7c28244be 100644 (file)
@@ -2298,6 +2298,7 @@ static int elf_core_dump(struct coredump_params *cprm)
                                goto end_coredump;
                }
        }
+       dump_truncate(cprm);
 
        if (!elf_core_write_extra_data(cprm))
                goto end_coredump;
index 63d197724519b30d8392546f9a8ba9855feaf5aa..ff0b0be92d6122402f02c039eed8e4599fdce4df 100644 (file)
@@ -273,6 +273,8 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
        unsigned long flags;
 
        while (1) {
+               void *wtag;
+
                spin_lock_irqsave(lock, flags);
                if (list_empty(list))
                        break;
@@ -299,11 +301,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
                spin_unlock_irqrestore(lock, flags);
 
                /*
-                * we don't want to call the ordered free functions
-                * with the lock held though
+                * We don't want to call the ordered free functions with the
+                * lock held though. Save the work as tag for the trace event,
+                * because the callback could free the structure.
                 */
+               wtag = work;
                work->ordered_free(work);
-               trace_btrfs_all_work_done(work);
+               trace_btrfs_all_work_done(wq->fs_info, wtag);
        }
        spin_unlock_irqrestore(lock, flags);
 }
@@ -311,6 +315,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
 static void normal_work_helper(struct btrfs_work *work)
 {
        struct __btrfs_workqueue *wq;
+       void *wtag;
        int need_order = 0;
 
        /*
@@ -324,6 +329,8 @@ static void normal_work_helper(struct btrfs_work *work)
        if (work->ordered_func)
                need_order = 1;
        wq = work->wq;
+       /* Safe for tracepoints in case work gets freed by the callback */
+       wtag = work;
 
        trace_btrfs_work_sched(work);
        thresh_exec_hook(wq);
@@ -333,7 +340,7 @@ static void normal_work_helper(struct btrfs_work *work)
                run_ordered_work(wq);
        }
        if (!need_order)
-               trace_btrfs_all_work_done(work);
+               trace_btrfs_all_work_done(wq->fs_info, wtag);
 }
 
 void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
index e97302f437a16db24db7d2aa6ae202cde3309fb6..dcd2e798767e57a80e7f50ff791a675e394a62c0 100644 (file)
@@ -2522,11 +2522,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                if (ref && ref->seq &&
                    btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
                        spin_unlock(&locked_ref->lock);
-                       btrfs_delayed_ref_unlock(locked_ref);
                        spin_lock(&delayed_refs->lock);
                        locked_ref->processing = 0;
                        delayed_refs->num_heads_ready++;
                        spin_unlock(&delayed_refs->lock);
+                       btrfs_delayed_ref_unlock(locked_ref);
                        locked_ref = NULL;
                        cond_resched();
                        count++;
@@ -2572,7 +2572,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                                         */
                                        if (must_insert_reserved)
                                                locked_ref->must_insert_reserved = 1;
+                                       spin_lock(&delayed_refs->lock);
                                        locked_ref->processing = 0;
+                                       delayed_refs->num_heads_ready++;
+                                       spin_unlock(&delayed_refs->lock);
                                        btrfs_debug(fs_info,
                                                    "run_delayed_extent_op returned %d",
                                                    ret);
@@ -7384,7 +7387,8 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
 
                spin_unlock(&cluster->refill_lock);
 
-               down_read(&used_bg->data_rwsem);
+               /* We should only have one-level nested. */
+               down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
 
                spin_lock(&cluster->refill_lock);
                if (used_bg == cluster->block_group)
index f2b281ad7af6b9db26b48c6d4f072a850c19d58a..4e024260ad713ffc583d8f2ffaeb7ba6a025014d 100644 (file)
@@ -7059,7 +7059,7 @@ insert:
        write_unlock(&em_tree->lock);
 out:
 
-       trace_btrfs_get_extent(root, em);
+       trace_btrfs_get_extent(root, inode, em);
 
        btrfs_free_path(path);
        if (trans) {
@@ -7623,11 +7623,18 @@ static void adjust_dio_outstanding_extents(struct inode *inode,
         * within our reservation, otherwise we need to adjust our inode
         * counter appropriately.
         */
-       if (dio_data->outstanding_extents) {
+       if (dio_data->outstanding_extents >= num_extents) {
                dio_data->outstanding_extents -= num_extents;
        } else {
+               /*
+                * If dio write length has been split due to no large enough
+                * contiguous space, we need to compensate our inode counter
+                * appropriately.
+                */
+               u64 num_needed = num_extents - dio_data->outstanding_extents;
+
                spin_lock(&BTRFS_I(inode)->lock);
-               BTRFS_I(inode)->outstanding_extents += num_extents;
+               BTRFS_I(inode)->outstanding_extents += num_needed;
                spin_unlock(&BTRFS_I(inode)->lock);
        }
 }
index f10bf5213ed8a48b95a8cf3cf51dc3c2c81b8463..eeffff84f280958cfd117e860055d7b9fc683377 100644 (file)
@@ -37,6 +37,7 @@
  */
 #define LOG_INODE_ALL 0
 #define LOG_INODE_EXISTS 1
+#define LOG_OTHER_INODE 2
 
 /*
  * directory trouble cases
@@ -4641,7 +4642,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        if (S_ISDIR(inode->i_mode) ||
            (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
                       &BTRFS_I(inode)->runtime_flags) &&
-            inode_only == LOG_INODE_EXISTS))
+            inode_only >= LOG_INODE_EXISTS))
                max_key.type = BTRFS_XATTR_ITEM_KEY;
        else
                max_key.type = (u8)-1;
@@ -4665,7 +4666,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                return ret;
        }
 
-       mutex_lock(&BTRFS_I(inode)->log_mutex);
+       if (inode_only == LOG_OTHER_INODE) {
+               inode_only = LOG_INODE_EXISTS;
+               mutex_lock_nested(&BTRFS_I(inode)->log_mutex,
+                                 SINGLE_DEPTH_NESTING);
+       } else {
+               mutex_lock(&BTRFS_I(inode)->log_mutex);
+       }
 
        /*
         * a brute force approach to making sure we get the most uptodate
@@ -4817,7 +4824,7 @@ again:
                                 * unpin it.
                                 */
                                err = btrfs_log_inode(trans, root, other_inode,
-                                                     LOG_INODE_EXISTS,
+                                                     LOG_OTHER_INODE,
                                                      0, LLONG_MAX, ctx);
                                iput(other_inode);
                                if (err)
index 161342b73ce50256bd2cba63b0d1c5ece503f599..726f928238d0c86a931cd58613799e59cc35fdf8 100644 (file)
@@ -352,7 +352,5 @@ skip:
 
 out:
        btrfs_free_path(path);
-       if (ret)
-               btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret);
-       return 0;
+       return ret;
 }
index 9cd0c0ea7cdbd5b683ab035927636f9f56b751d3..e4b066cd912ad9ea249c3c88f81026a68186095c 100644 (file)
@@ -502,9 +502,9 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
                dout(" head snapc %p has %d dirty pages\n",
                     snapc, ci->i_wrbuffer_ref_head);
                if (truncate_size)
-                       *truncate_size = capsnap->truncate_size;
+                       *truncate_size = ci->i_truncate_size;
                if (truncate_seq)
-                       *truncate_seq = capsnap->truncate_seq;
+                       *truncate_seq = ci->i_truncate_seq;
        }
        spin_unlock(&ci->i_ceph_lock);
        return snapc;
index baea866a6751facf4c1f18dda23e71582978dffe..94fd76d04683d88103b42ff71a02490201a9783f 100644 (file)
@@ -2591,8 +2591,13 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
                        add_wait_queue(&ci->i_cap_wq, &wait);
 
                        while (!try_get_cap_refs(ci, need, want, endoff,
-                                                true, &_got, &err))
+                                                true, &_got, &err)) {
+                               if (signal_pending(current)) {
+                                       ret = -ERESTARTSYS;
+                                       break;
+                               }
                                wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+                       }
 
                        remove_wait_queue(&ci->i_cap_wq, &wait);
 
index d7a93696663b66b9183a9ae1559c2119b3fc3f98..8ab1fdf0bd49b74f380a578aea92ce738393403d 100644 (file)
@@ -1230,7 +1230,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
                struct ceph_mds_client *mdsc =
                        ceph_sb_to_client(dir->i_sb)->mdsc;
                struct ceph_mds_request *req;
-               int op, mask, err;
+               int op, err;
+               u32 mask;
 
                if (flags & LOOKUP_RCU)
                        return -ECHILD;
@@ -1245,7 +1246,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
                        mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
                        if (ceph_security_xattr_wanted(dir))
                                mask |= CEPH_CAP_XATTR_SHARED;
-                       req->r_args.getattr.mask = mask;
+                       req->r_args.getattr.mask = cpu_to_le32(mask);
 
                        err = ceph_mdsc_do_request(mdsc, NULL, req);
                        switch (err) {
index 398e5328b30952410cc503e7e4d20918d3bdc671..5e659d054b40ae6faac23af26c5321c5af6ff69b 100644 (file)
@@ -305,7 +305,8 @@ static int frag_tree_split_cmp(const void *l, const void *r)
 {
        struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
        struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
-       return ceph_frag_compare(ls->frag, rs->frag);
+       return ceph_frag_compare(le32_to_cpu(ls->frag),
+                                le32_to_cpu(rs->frag));
 }
 
 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
index 4f49253387a0a59d238a041b1ec8fc9fd7185c40..c9d2e553a6c487f01bd11ed4c7a2c15ddfcd058d 100644 (file)
@@ -288,12 +288,13 @@ static int parse_reply_info_extra(void **p, void *end,
                                  struct ceph_mds_reply_info_parsed *info,
                                  u64 features)
 {
-       if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
+       u32 op = le32_to_cpu(info->head->op);
+
+       if (op == CEPH_MDS_OP_GETFILELOCK)
                return parse_reply_info_filelock(p, end, info, features);
-       else if (info->head->op == CEPH_MDS_OP_READDIR ||
-                info->head->op == CEPH_MDS_OP_LSSNAP)
+       else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
                return parse_reply_info_dir(p, end, info, features);
-       else if (info->head->op == CEPH_MDS_OP_CREATE)
+       else if (op == CEPH_MDS_OP_CREATE)
                return parse_reply_info_create(p, end, info, features);
        else
                return -EIO;
@@ -2106,6 +2107,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
                        dout("do_request mdsmap err %d\n", err);
                        goto finish;
                }
+               if (mdsc->mdsmap->m_epoch == 0) {
+                       dout("do_request no mdsmap, waiting for map\n");
+                       list_add(&req->r_wait, &mdsc->waiting_for_map);
+                       goto finish;
+               }
                if (!(mdsc->fsc->mount_options->flags &
                      CEPH_MOUNT_OPT_MOUNTWAIT) &&
                    !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
index e525b6017cdf863fd811425fb603070be40420b2..ae6b05629ca174d714bbdb3db477bee8f0d8bff8 100644 (file)
@@ -833,3 +833,21 @@ int dump_align(struct coredump_params *cprm, int align)
        return mod ? dump_skip(cprm, align - mod) : 1;
 }
 EXPORT_SYMBOL(dump_align);
+
+/*
+ * Ensures that file size is big enough to contain the current file
+ * postion. This prevents gdb from complaining about a truncated file
+ * if the last "write" to the file was dump_skip.
+ */
+void dump_truncate(struct coredump_params *cprm)
+{
+       struct file *file = cprm->file;
+       loff_t offset;
+
+       if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
+               offset = file->f_op->llseek(file, 0, SEEK_CUR);
+               if (i_size_read(file->f_mapping->host) < offset)
+                       do_truncate(file->f_path.dentry, offset, 0, file);
+       }
+}
+EXPORT_SYMBOL(dump_truncate);
index 5c74f60d0a5094dc0a27f27ae0acd41667414332..3af2da5e64ce77fa8ae4b3f294c82882d350120f 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -691,8 +691,8 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
                                      pgoff_t index, unsigned long pfn)
 {
        struct vm_area_struct *vma;
-       pte_t *ptep;
-       pte_t pte;
+       pte_t pte, *ptep = NULL;
+       pmd_t *pmdp = NULL;
        spinlock_t *ptl;
        bool changed;
 
@@ -707,21 +707,42 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
 
                address = pgoff_address(index, vma);
                changed = false;
-               if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
+               if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
                        continue;
-               if (pfn != pte_pfn(*ptep))
-                       goto unlock;
-               if (!pte_dirty(*ptep) && !pte_write(*ptep))
-                       goto unlock;
 
-               flush_cache_page(vma, address, pfn);
-               pte = ptep_clear_flush(vma, address, ptep);
-               pte = pte_wrprotect(pte);
-               pte = pte_mkclean(pte);
-               set_pte_at(vma->vm_mm, address, ptep, pte);
-               changed = true;
-unlock:
-               pte_unmap_unlock(ptep, ptl);
+               if (pmdp) {
+#ifdef CONFIG_FS_DAX_PMD
+                       pmd_t pmd;
+
+                       if (pfn != pmd_pfn(*pmdp))
+                               goto unlock_pmd;
+                       if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
+                               goto unlock_pmd;
+
+                       flush_cache_page(vma, address, pfn);
+                       pmd = pmdp_huge_clear_flush(vma, address, pmdp);
+                       pmd = pmd_wrprotect(pmd);
+                       pmd = pmd_mkclean(pmd);
+                       set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+                       changed = true;
+unlock_pmd:
+                       spin_unlock(ptl);
+#endif
+               } else {
+                       if (pfn != pte_pfn(*ptep))
+                               goto unlock_pte;
+                       if (!pte_dirty(*ptep) && !pte_write(*ptep))
+                               goto unlock_pte;
+
+                       flush_cache_page(vma, address, pfn);
+                       pte = ptep_clear_flush(vma, address, ptep);
+                       pte = pte_wrprotect(pte);
+                       pte = pte_mkclean(pte);
+                       set_pte_at(vma->vm_mm, address, ptep, pte);
+                       changed = true;
+unlock_pte:
+                       pte_unmap_unlock(ptep, ptl);
+               }
 
                if (changed)
                        mmu_notifier_invalidate_page(vma->vm_mm, address);
@@ -969,7 +990,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
 }
 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
 
-#ifdef CONFIG_FS_IOMAP
 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
 {
        return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
@@ -1407,4 +1427,3 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
 }
 EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
 #endif /* CONFIG_FS_DAX_PMD */
-#endif /* CONFIG_FS_IOMAP */
index 769903dbc19df53199b4aca2e79607145e179c98..95d71eda81420a506c5a1f4a4f5283b31310d5e4 100644 (file)
@@ -1336,8 +1336,11 @@ int d_set_mounted(struct dentry *dentry)
        }
        spin_lock(&dentry->d_lock);
        if (!d_unlinked(dentry)) {
-               dentry->d_flags |= DCACHE_MOUNTED;
-               ret = 0;
+               ret = -EBUSY;
+               if (!d_mountpoint(dentry)) {
+                       dentry->d_flags |= DCACHE_MOUNTED;
+                       ret = 0;
+               }
        }
        spin_unlock(&dentry->d_lock);
 out:
index aeae8c06345155e35e6f9d1567d004c6d073ea66..c87bae4376b848f4204c06016eeb223f675dc7db 100644 (file)
@@ -906,6 +906,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
                        struct buffer_head *map_bh)
 {
        const unsigned blkbits = sdio->blkbits;
+       const unsigned i_blkbits = blkbits + sdio->blkfactor;
        int ret = 0;
 
        while (sdio->block_in_file < sdio->final_block_in_request) {
@@ -949,7 +950,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
                                        clean_bdev_aliases(
                                                map_bh->b_bdev,
                                                map_bh->b_blocknr,
-                                               map_bh->b_size >> blkbits);
+                                               map_bh->b_size >> i_blkbits);
                                }
 
                                if (!sdio->blkfactor)
index 36bea5adcabaa735056b20290ce23cdc7dfc0851..c634874e12d969fbd0b00ad8da745553168876e6 100644 (file)
@@ -1,6 +1,5 @@
 config EXT2_FS
        tristate "Second extended fs support"
-       select FS_IOMAP if FS_DAX
        help
          Ext2 is a standard Linux file system for hard disks.
 
index 7b90691e98c4f5fdd3b2d162285b00e71ac51b63..e38039fd96ff59ab59ce17407abcf26de4c5a950 100644 (file)
@@ -37,7 +37,6 @@ config EXT4_FS
        select CRC16
        select CRYPTO
        select CRYPTO_CRC32C
-       select FS_IOMAP if FS_DAX
        help
          This is the next generation of the ext3 filesystem.
 
index 0738f48293ccbfe0fd528ededfd6a4a08fe4251d..0d880245375890058602437d3c709bfedd3945b8 100644 (file)
@@ -713,8 +713,8 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
        }
        sector = SECTOR_FROM_BLOCK(blkstart);
 
-       if (sector & (bdev_zone_size(bdev) - 1) ||
-                               nr_sects != bdev_zone_size(bdev)) {
+       if (sector & (bdev_zone_sectors(bdev) - 1) ||
+           nr_sects != bdev_zone_sectors(bdev)) {
                f2fs_msg(sbi->sb, KERN_INFO,
                        "(%d) %s: Unaligned discard attempted (block %x + %x)",
                        devi, sbi->s_ndevs ? FDEV(devi).path: "",
index 702638e21c7621a4804a8c84fefc7635785ff42d..46fd30d8af7763c93244092b6673e3cc2ff5f05f 100644 (file)
@@ -1553,16 +1553,16 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
                return 0;
 
        if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
-                               SECTOR_TO_BLOCK(bdev_zone_size(bdev)))
+                               SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
                return -EINVAL;
-       sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_size(bdev));
+       sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
        if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
                                __ilog2_u32(sbi->blocks_per_blkz))
                return -EINVAL;
        sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
        FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
                                        sbi->log_blocks_per_blkz;
-       if (nr_sectors & (bdev_zone_size(bdev) - 1))
+       if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
                FDEV(devi).nr_blkz++;
 
        FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
index 70ea57c7b6bb2b48ecf47d8cc9bd211cfc430a61..4e06a27ed7f80d4d0472e3d6c9e9fe3f0f1d7da5 100644 (file)
@@ -2025,7 +2025,6 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
                struct fuse_req *req;
                req = list_entry(head->next, struct fuse_req, list);
                req->out.h.error = -ECONNABORTED;
-               clear_bit(FR_PENDING, &req->flags);
                clear_bit(FR_SENT, &req->flags);
                list_del_init(&req->list);
                request_end(fc, req);
@@ -2103,6 +2102,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
                spin_lock(&fiq->waitq.lock);
                fiq->connected = 0;
                list_splice_init(&fiq->pending, &to_end2);
+               list_for_each_entry(req, &to_end2, list)
+                       clear_bit(FR_PENDING, &req->flags);
                while (forget_pending(fiq))
                        kfree(dequeue_forget(fiq, 1, NULL));
                wake_up_all_locked(&fiq->waitq);
index 1f7c732f32b07f1bab9e4961f16cb52ee9f09f70..811fd8929a18c1e330316202fd40ac58857ec3c7 100644 (file)
@@ -68,7 +68,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec)
        if (sec || nsec) {
                struct timespec64 ts = {
                        sec,
-                       max_t(u32, nsec, NSEC_PER_SEC - 1)
+                       min_t(u32, nsec, NSEC_PER_SEC - 1)
                };
 
                return get_jiffies_64() + timespec64_to_jiffies(&ts);
index e973cd51f12632eff2c17929c85d1e8b2e955730..28d6f35feed62855e26ede0073b3bcaf6b6564a9 100644 (file)
@@ -245,7 +245,8 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
        struct inode *root;
        struct qstr d_name = QSTR_INIT(name, strlen(name));
 
-       s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL);
+       s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER,
+                       &init_user_ns, NULL);
        if (IS_ERR(s))
                return ERR_CAST(s);
 
index b5b1259e064f8d9661110ba1f1f73d1a0ff19d51..487ba30bb5c67a8f66bdaadb5b13e8240c937f7a 100644 (file)
@@ -742,26 +742,50 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
        return NULL;
 }
 
-static struct mountpoint *new_mountpoint(struct dentry *dentry)
+static struct mountpoint *get_mountpoint(struct dentry *dentry)
 {
-       struct hlist_head *chain = mp_hash(dentry);
-       struct mountpoint *mp;
+       struct mountpoint *mp, *new = NULL;
        int ret;
 
-       mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
-       if (!mp)
+       if (d_mountpoint(dentry)) {
+mountpoint:
+               read_seqlock_excl(&mount_lock);
+               mp = lookup_mountpoint(dentry);
+               read_sequnlock_excl(&mount_lock);
+               if (mp)
+                       goto done;
+       }
+
+       if (!new)
+               new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
+       if (!new)
                return ERR_PTR(-ENOMEM);
 
+
+       /* Exactly one processes may set d_mounted */
        ret = d_set_mounted(dentry);
-       if (ret) {
-               kfree(mp);
-               return ERR_PTR(ret);
-       }
 
-       mp->m_dentry = dentry;
-       mp->m_count = 1;
-       hlist_add_head(&mp->m_hash, chain);
-       INIT_HLIST_HEAD(&mp->m_list);
+       /* Someone else set d_mounted? */
+       if (ret == -EBUSY)
+               goto mountpoint;
+
+       /* The dentry is not available as a mountpoint? */
+       mp = ERR_PTR(ret);
+       if (ret)
+               goto done;
+
+       /* Add the new mountpoint to the hash table */
+       read_seqlock_excl(&mount_lock);
+       new->m_dentry = dentry;
+       new->m_count = 1;
+       hlist_add_head(&new->m_hash, mp_hash(dentry));
+       INIT_HLIST_HEAD(&new->m_list);
+       read_sequnlock_excl(&mount_lock);
+
+       mp = new;
+       new = NULL;
+done:
+       kfree(new);
        return mp;
 }
 
@@ -1595,11 +1619,11 @@ void __detach_mounts(struct dentry *dentry)
        struct mount *mnt;
 
        namespace_lock();
+       lock_mount_hash();
        mp = lookup_mountpoint(dentry);
        if (IS_ERR_OR_NULL(mp))
                goto out_unlock;
 
-       lock_mount_hash();
        event++;
        while (!hlist_empty(&mp->m_list)) {
                mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
@@ -1609,9 +1633,9 @@ void __detach_mounts(struct dentry *dentry)
                }
                else umount_tree(mnt, UMOUNT_CONNECTED);
        }
-       unlock_mount_hash();
        put_mountpoint(mp);
 out_unlock:
+       unlock_mount_hash();
        namespace_unlock();
 }
 
@@ -2038,9 +2062,7 @@ retry:
        namespace_lock();
        mnt = lookup_mnt(path);
        if (likely(!mnt)) {
-               struct mountpoint *mp = lookup_mountpoint(dentry);
-               if (!mp)
-                       mp = new_mountpoint(dentry);
+               struct mountpoint *mp = get_mountpoint(dentry);
                if (IS_ERR(mp)) {
                        namespace_unlock();
                        inode_unlock(dentry->d_inode);
@@ -2059,7 +2081,11 @@ retry:
 static void unlock_mount(struct mountpoint *where)
 {
        struct dentry *dentry = where->m_dentry;
+
+       read_seqlock_excl(&mount_lock);
        put_mountpoint(where);
+       read_sequnlock_excl(&mount_lock);
+
        namespace_unlock();
        inode_unlock(dentry->d_inode);
 }
@@ -3135,9 +3161,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
        touch_mnt_namespace(current->nsproxy->mnt_ns);
        /* A moved mount should not expire automatically */
        list_del_init(&new_mnt->mnt_expire);
+       put_mountpoint(root_mp);
        unlock_mount_hash();
        chroot_fs_refs(&root, &new);
-       put_mountpoint(root_mp);
        error = 0;
 out4:
        unlock_mount(old_mp);
index 6dcbc5defb7a8dd670b63995eb553c379e47a0d4..ecc151697fd4bd81288941848a4b6a76e7563a24 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/mm.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
-#include <linux/file.h>
 #include <linux/string.h>
 #include <linux/ratelimit.h>
 #include <linux/printk.h>
@@ -1083,7 +1082,8 @@ int nfs4_call_sync(struct rpc_clnt *clnt,
        return nfs4_call_sync_sequence(clnt, server, msg, args, res);
 }
 
-static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
+static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
+               unsigned long timestamp)
 {
        struct nfs_inode *nfsi = NFS_I(dir);
 
@@ -1099,6 +1099,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
                                NFS_INO_INVALID_ACL;
        }
        dir->i_version = cinfo->after;
+       nfsi->read_cache_jiffies = timestamp;
        nfsi->attr_gencount = nfs_inc_attr_generation_counter();
        nfs_fscache_invalidate(dir);
        spin_unlock(&dir->i_lock);
@@ -2391,11 +2392,13 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
        nfs_fattr_map_and_free_names(server, &data->f_attr);
 
        if (o_arg->open_flags & O_CREAT) {
-               update_changeattr(dir, &o_res->cinfo);
                if (o_arg->open_flags & O_EXCL)
                        data->file_created = 1;
                else if (o_res->cinfo.before != o_res->cinfo.after)
                        data->file_created = 1;
+               if (data->file_created || dir->i_version != o_res->cinfo.after)
+                       update_changeattr(dir, &o_res->cinfo,
+                                       o_res->f_attr->time_start);
        }
        if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
                server->caps &= ~NFS_CAP_POSIX_LOCK;
@@ -4073,11 +4076,12 @@ static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name)
                .rpc_argp = &args,
                .rpc_resp = &res,
        };
+       unsigned long timestamp = jiffies;
        int status;
 
        status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
        if (status == 0)
-               update_changeattr(dir, &res.cinfo);
+               update_changeattr(dir, &res.cinfo, timestamp);
        return status;
 }
 
@@ -4125,7 +4129,8 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
        if (nfs4_async_handle_error(task, res->server, NULL,
                                    &data->timeout) == -EAGAIN)
                return 0;
-       update_changeattr(dir, &res->cinfo);
+       if (task->tk_status == 0)
+               update_changeattr(dir, &res->cinfo, res->dir_attr->time_start);
        return 1;
 }
 
@@ -4159,8 +4164,11 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
        if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
                return 0;
 
-       update_changeattr(old_dir, &res->old_cinfo);
-       update_changeattr(new_dir, &res->new_cinfo);
+       if (task->tk_status == 0) {
+               update_changeattr(old_dir, &res->old_cinfo, res->old_fattr->time_start);
+               if (new_dir != old_dir)
+                       update_changeattr(new_dir, &res->new_cinfo, res->new_fattr->time_start);
+       }
        return 1;
 }
 
@@ -4197,7 +4205,7 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct
 
        status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
        if (!status) {
-               update_changeattr(dir, &res.cinfo);
+               update_changeattr(dir, &res.cinfo, res.fattr->time_start);
                status = nfs_post_op_update_inode(inode, res.fattr);
                if (!status)
                        nfs_setsecurity(inode, res.fattr, res.label);
@@ -4272,7 +4280,8 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
        int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
                                    &data->arg.seq_args, &data->res.seq_res, 1);
        if (status == 0) {
-               update_changeattr(dir, &data->res.dir_cinfo);
+               update_changeattr(dir, &data->res.dir_cinfo,
+                               data->res.fattr->time_start);
                status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
        }
        return status;
@@ -6127,7 +6136,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
        p->server = server;
        atomic_inc(&lsp->ls_count);
        p->ctx = get_nfs_open_context(ctx);
-       get_file(fl->fl_file);
        memcpy(&p->fl, fl, sizeof(p->fl));
        return p;
 out_free_seqid:
@@ -6240,7 +6248,6 @@ static void nfs4_lock_release(void *calldata)
                nfs_free_seqid(data->arg.lock_seqid);
        nfs4_put_lock_state(data->lsp);
        put_nfs_open_context(data->ctx);
-       fput(data->fl.fl_file);
        kfree(data);
        dprintk("%s: done!\n", __func__);
 }
index 1d152f4470cd6f6b0bcc6f73fd572e331b18778b..90e6193ce6bed300ddadb1aa1a9aca64a8c2aafc 100644 (file)
@@ -1729,7 +1729,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
                        break;
                case -NFS4ERR_STALE_CLIENTID:
                        set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
-                       nfs4_state_clear_reclaim_reboot(clp);
                        nfs4_state_start_reclaim_reboot(clp);
                        break;
                case -NFS4ERR_EXPIRED:
index 7ecf16be4a444ec250678e89ad210c7f19cbbc18..8fae53ce21d16c8406ff01425d924eb044edee34 100644 (file)
@@ -2440,7 +2440,9 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
        p++;                /* to be backfilled later */
 
        if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
-               u32 *supp = nfsd_suppattrs[minorversion];
+               u32 supp[3];
+
+               memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
 
                if (!IS_POSIXACL(dentry->d_inode))
                        supp[0] &= ~FATTR4_WORD0_ACL;
index 83d576f6a287b13939a7dd70622ae4d89b5031f8..77d1632e905d8b1ec249cf86c7ece183c38f2746 100644 (file)
@@ -3303,6 +3303,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
        mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
             lockres->l_level, new_level);
 
+       /*
+        * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
+        * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
+        * we can recover correctly from node failure. Otherwise, we may get
+        * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
+        */
+       if (!ocfs2_is_o2cb_active() &&
+           lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
+               lvb = 1;
+
        if (lvb)
                dlm_flags |= DLM_LKF_VALBLK;
 
index 52c07346bea3f8960399184c0e6aead7545a25f2..820359096c7aa8df5b76e2a0c1404ce7f9d0337d 100644 (file)
@@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
  */
 static struct ocfs2_stack_plugin *active_stack;
 
+inline int ocfs2_is_o2cb_active(void)
+{
+       return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
+}
+EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
+
 static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
 {
        struct ocfs2_stack_plugin *p;
index f2dce10fae543c254dcb4e6628d357b60a3ac16c..e3036e1790e86da7b4e13dcb0b8c88e3f19b6d50 100644 (file)
@@ -298,6 +298,9 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
 int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
 void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
 
+/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
+int ocfs2_is_o2cb_active(void);
+
 extern struct kset *ocfs2_kset;
 
 #endif  /* STACKGLUE_H */
index 9ad48d9202a99a6c63ccb07b9b31476f5ee524af..023bb0b03352f4440d893b4e713d7d9ed937a771 100644 (file)
@@ -154,29 +154,38 @@ out_err:
 static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
                            struct dentry **ret)
 {
-       const char *s = d->name.name;
+       /* Counting down from the end, since the prefix can change */
+       size_t rem = d->name.len - 1;
        struct dentry *dentry = NULL;
        int err;
 
-       if (*s != '/')
+       if (d->name.name[0] != '/')
                return ovl_lookup_single(base, d, d->name.name, d->name.len,
                                         0, "", ret);
 
-       while (*s++ == '/' && !IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
+       while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
+               const char *s = d->name.name + d->name.len - rem;
                const char *next = strchrnul(s, '/');
-               size_t slen = strlen(s);
+               size_t thislen = next - s;
+               bool end = !next[0];
 
-               if (WARN_ON(slen > d->name.len) ||
-                   WARN_ON(strcmp(d->name.name + d->name.len - slen, s)))
+               /* Verify we did not go off the rails */
+               if (WARN_ON(s[-1] != '/'))
                        return -EIO;
 
-               err = ovl_lookup_single(base, d, s, next - s,
-                                       d->name.len - slen, next, &base);
+               err = ovl_lookup_single(base, d, s, thislen,
+                                       d->name.len - rem, next, &base);
                dput(dentry);
                if (err)
                        return err;
                dentry = base;
-               s = next;
+               if (end)
+                       break;
+
+               rem -= thislen + 1;
+
+               if (WARN_ON(rem >= d->name.len))
+                       return -EIO;
        }
        *ret = dentry;
        return 0;
index 595522022aca04beeeabd3d2ed4e73992f268296..c9d48dc784953fa4af62f9af58dfbb56ed2faa70 100644 (file)
@@ -922,11 +922,10 @@ int simple_set_acl(struct inode *inode, struct posix_acl *acl, int type)
        int error;
 
        if (type == ACL_TYPE_ACCESS) {
-               error = posix_acl_equiv_mode(acl, &inode->i_mode);
-               if (error < 0)
-                       return 0;
-               if (error == 0)
-                       acl = NULL;
+               error = posix_acl_update_mode(inode,
+                               &inode->i_mode, &acl);
+               if (error)
+                       return error;
        }
 
        inode->i_ctime = current_time(inode);
index 8e7e61b28f31c037961c081d09a7be5f818013ef..87c9a9aacda3601e2686e239243f447728137943 100644 (file)
@@ -3179,6 +3179,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
             iter.tgid += 1, iter = next_tgid(ns, iter)) {
                char name[PROC_NUMBUF];
                int len;
+
+               cond_resched();
                if (!has_pid_permissions(ns, iter.task, 2))
                        continue;
 
index 55313d9948954bbe4bff5769e487eb78012776db..d4e37acd48217dcb1090a1ee68bb4ec4cb7226e5 100644 (file)
@@ -709,7 +709,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
        ctl_dir = container_of(head, struct ctl_dir, header);
 
        if (!dir_emit_dots(file, ctx))
-               return 0;
+               goto out;
 
        pos = 2;
 
@@ -719,6 +719,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
                        break;
                }
        }
+out:
        sysctl_head_finish(head);
        return 0;
 }
index d0f8a38dfafacd8f3d524d1ff69ae8f621eea278..0186fe6d39f3b4d2e77497d4d34a7691204ae9fa 100644 (file)
@@ -74,6 +74,7 @@
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/uaccess.h>
+#include <linux/major.h>
 #include "internal.h"
 
 static struct kmem_cache *romfs_inode_cachep;
@@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode)
 static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        struct super_block *sb = dentry->d_sb;
-       u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+       u64 id = 0;
+
+       /* When calling huge_encode_dev(),
+        * use sb->s_bdev->bd_dev when,
+        *   - CONFIG_ROMFS_ON_BLOCK defined
+        * use sb->s_dev when,
+        *   - CONFIG_ROMFS_ON_BLOCK undefined and
+        *   - CONFIG_ROMFS_ON_MTD defined
+        * leave id as 0 when,
+        *   - CONFIG_ROMFS_ON_BLOCK undefined and
+        *   - CONFIG_ROMFS_ON_MTD undefined
+        */
+       if (sb->s_bdev)
+               id = huge_encode_dev(sb->s_bdev->bd_dev);
+       else if (sb->s_dev)
+               id = huge_encode_dev(sb->s_dev);
 
        buf->f_type = ROMFS_MAGIC;
        buf->f_namelen = ROMFS_MAXFN;
@@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_flags |= MS_RDONLY | MS_NOATIME;
        sb->s_op = &romfs_super_ops;
 
+#ifdef CONFIG_ROMFS_ON_MTD
+       /* Use same dev ID from the underlying mtdblock device */
+       if (sb->s_mtd)
+               sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
+#endif
        /* read the image superblock and check it */
        rsb = kmalloc(512, GFP_KERNEL);
        if (!rsb)
index 0a908ae7af1382d46efa5c8073300db4680ce7e2..b0d0623c83ed88eae3afa31f079dfd2486767d07 100644 (file)
@@ -53,7 +53,7 @@ config UBIFS_ATIME_SUPPORT
 
 config UBIFS_FS_ENCRYPTION
        bool "UBIFS Encryption"
-       depends on UBIFS_FS
+       depends on UBIFS_FS && BLOCK
        select FS_ENCRYPTION
        default n
        help
index 1c5331ac9614040016019aca78c1338133ea4e11..528369f3e472087fe39e0b9e716cb4a70016cee7 100644 (file)
@@ -390,16 +390,6 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
        dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
                dentry, mode, dir->i_ino);
 
-       if (ubifs_crypt_is_encrypted(dir)) {
-               err = fscrypt_get_encryption_info(dir);
-               if (err)
-                       return err;
-
-               if (!fscrypt_has_encryption_key(dir)) {
-                       return -EPERM;
-               }
-       }
-
        err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
        if (err)
                return err;
@@ -741,17 +731,9 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
        ubifs_assert(inode_is_locked(dir));
        ubifs_assert(inode_is_locked(inode));
 
-       if (ubifs_crypt_is_encrypted(dir)) {
-               if (!fscrypt_has_permitted_context(dir, inode))
-                       return -EPERM;
-
-               err = fscrypt_get_encryption_info(inode);
-               if (err)
-                       return err;
-
-               if (!fscrypt_has_encryption_key(inode))
-                       return -EPERM;
-       }
+       if (ubifs_crypt_is_encrypted(dir) &&
+           !fscrypt_has_permitted_context(dir, inode))
+               return -EPERM;
 
        err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
        if (err)
@@ -1000,17 +982,6 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        if (err)
                return err;
 
-       if (ubifs_crypt_is_encrypted(dir)) {
-               err = fscrypt_get_encryption_info(dir);
-               if (err)
-                       goto out_budg;
-
-               if (!fscrypt_has_encryption_key(dir)) {
-                       err = -EPERM;
-                       goto out_budg;
-               }
-       }
-
        err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
        if (err)
                goto out_budg;
@@ -1096,17 +1067,6 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
                return err;
        }
 
-       if (ubifs_crypt_is_encrypted(dir)) {
-               err = fscrypt_get_encryption_info(dir);
-               if (err)
-                       goto out_budg;
-
-               if (!fscrypt_has_encryption_key(dir)) {
-                       err = -EPERM;
-                       goto out_budg;
-               }
-       }
-
        err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
        if (err)
                goto out_budg;
@@ -1231,18 +1191,6 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
                        goto out_inode;
                }
 
-               err = fscrypt_get_encryption_info(inode);
-               if (err) {
-                       kfree(sd);
-                       goto out_inode;
-               }
-
-               if (!fscrypt_has_encryption_key(inode)) {
-                       kfree(sd);
-                       err = -EPERM;
-                       goto out_inode;
-               }
-
                ostr.name = sd->encrypted_path;
                ostr.len = disk_link.len;
 
index 78d713644df3c00cf5f3be6ae51b742a47345417..da519ba205f614fb7a5ea9d3eb72b4c58b4504e9 100644 (file)
@@ -217,6 +217,9 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case FS_IOC32_SETFLAGS:
                cmd = FS_IOC_SETFLAGS;
                break;
+       case FS_IOC_SET_ENCRYPTION_POLICY:
+       case FS_IOC_GET_ENCRYPTION_POLICY:
+               break;
        default:
                return -ENOIOCTLCMD;
        }
index a459211a1c21059ff8739566d873b2093285b495..294519b98874058ef7ac7e089361733adb9f26de 100644 (file)
@@ -744,6 +744,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
 
        } else {
                data->compr_size = 0;
+               out_len = compr_len;
        }
 
        dlen = UBIFS_DATA_NODE_SZ + out_len;
@@ -1319,6 +1320,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
        dn->compr_type = cpu_to_le16(compr_type);
        dn->size = cpu_to_le32(*new_len);
        *new_len = UBIFS_DATA_NODE_SZ + out_len;
+       err = 0;
 out:
        kfree(buf);
        return err;
index 74ae2de949df68b5918a3656840eb5ab22c8cda1..709aa098dd46e48e34a9627cd0af85d136737fab 100644 (file)
 #include <linux/slab.h>
 #include "ubifs.h"
 
+static int try_read_node(const struct ubifs_info *c, void *buf, int type,
+                        int len, int lnum, int offs);
+static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
+                             struct ubifs_zbranch *zbr, void *node);
+
 /*
  * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
  * @NAME_LESS: name corresponding to the first argument is less than second
@@ -402,7 +407,19 @@ static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
                return 0;
        }
 
-       err = ubifs_tnc_read_node(c, zbr, node);
+       if (c->replaying) {
+               err = fallible_read_node(c, &zbr->key, zbr, node);
+               /*
+                * When the node was not found, return -ENOENT, 0 otherwise.
+                * Negative return codes stay as-is.
+                */
+               if (err == 0)
+                       err = -ENOENT;
+               else if (err == 1)
+                       err = 0;
+       } else {
+               err = ubifs_tnc_read_node(c, zbr, node);
+       }
        if (err)
                return err;
 
@@ -2857,7 +2874,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
        if (fname_len(nm) > 0) {
                if (err) {
                        /* Handle collisions */
-                       err = resolve_collision(c, key, &znode, &n, nm);
+                       if (c->replaying)
+                               err = fallible_resolve_collision(c, key, &znode, &n,
+                                                        nm, 0);
+                       else
+                               err = resolve_collision(c, key, &znode, &n, nm);
                        dbg_tnc("rc returned %d, znode %p, n %d",
                                err, znode, n);
                        if (unlikely(err < 0))
index d96e2f30084bcfab552ffe3005af090abfe319c9..43953e03c35682723c6658dfe9b8cceed9de22ef 100644 (file)
@@ -63,6 +63,7 @@ struct userfaultfd_wait_queue {
        struct uffd_msg msg;
        wait_queue_t wq;
        struct userfaultfd_ctx *ctx;
+       bool waken;
 };
 
 struct userfaultfd_wake_range {
@@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
        if (len && (start > uwq->msg.arg.pagefault.address ||
                    start + len <= uwq->msg.arg.pagefault.address))
                goto out;
+       WRITE_ONCE(uwq->waken, true);
+       /*
+        * The implicit smp_mb__before_spinlock in try_to_wake_up()
+        * renders uwq->waken visible to other CPUs before the task is
+        * waken.
+        */
        ret = wake_up_state(wq->private, mode);
        if (ret)
                /*
@@ -264,6 +271,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
        struct userfaultfd_wait_queue uwq;
        int ret;
        bool must_wait, return_to_userland;
+       long blocking_state;
 
        BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
@@ -334,10 +342,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
        uwq.wq.private = current;
        uwq.msg = userfault_msg(vmf->address, vmf->flags, reason);
        uwq.ctx = ctx;
+       uwq.waken = false;
 
        return_to_userland =
                (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
                (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
+       blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
+                        TASK_KILLABLE;
 
        spin_lock(&ctx->fault_pending_wqh.lock);
        /*
@@ -350,8 +361,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
         * following the spin_unlock to happen before the list_add in
         * __add_wait_queue.
         */
-       set_current_state(return_to_userland ? TASK_INTERRUPTIBLE :
-                         TASK_KILLABLE);
+       set_current_state(blocking_state);
        spin_unlock(&ctx->fault_pending_wqh.lock);
 
        must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
@@ -364,6 +374,29 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
                wake_up_poll(&ctx->fd_wqh, POLLIN);
                schedule();
                ret |= VM_FAULT_MAJOR;
+
+               /*
+                * False wakeups can orginate even from rwsem before
+                * up_read() however userfaults will wait either for a
+                * targeted wakeup on the specific uwq waitqueue from
+                * wake_userfault() or for signals or for uffd
+                * release.
+                */
+               while (!READ_ONCE(uwq.waken)) {
+                       /*
+                        * This needs the full smp_store_mb()
+                        * guarantee as the state write must be
+                        * visible to other CPUs before reading
+                        * uwq.waken from other CPUs.
+                        */
+                       set_current_state(blocking_state);
+                       if (READ_ONCE(uwq.waken) ||
+                           READ_ONCE(ctx->released) ||
+                           (return_to_userland ? signal_pending(current) :
+                            fatal_signal_pending(current)))
+                               break;
+                       schedule();
+               }
        }
 
        __set_current_state(TASK_RUNNING);
index 5050056a0b06445a93845987a52ec8356c499f80..9f06a211e1570549cb8df3cdba2de2d01a79f890 100644 (file)
@@ -95,10 +95,7 @@ unsigned int
 xfs_alloc_set_aside(
        struct xfs_mount        *mp)
 {
-       unsigned int            blocks;
-
-       blocks = 4 + (mp->m_sb.sb_agcount * XFS_ALLOC_AGFL_RESERVE);
-       return blocks;
+       return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
 }
 
 /*
@@ -365,35 +362,11 @@ xfs_alloc_fix_len(
                return;
        ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
        ASSERT(rlen % args->prod == args->mod);
+       ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
+               rlen + args->minleft);
        args->len = rlen;
 }
 
-/*
- * Fix up length if there is too little space left in the a.g.
- * Return 1 if ok, 0 if too little, should give up.
- */
-STATIC int
-xfs_alloc_fix_minleft(
-       xfs_alloc_arg_t *args)          /* allocation argument structure */
-{
-       xfs_agf_t       *agf;           /* a.g. freelist header */
-       int             diff;           /* free space difference */
-
-       if (args->minleft == 0)
-               return 1;
-       agf = XFS_BUF_TO_AGF(args->agbp);
-       diff = be32_to_cpu(agf->agf_freeblks)
-               - args->len - args->minleft;
-       if (diff >= 0)
-               return 1;
-       args->len += diff;              /* shrink the allocated space */
-       /* casts to (int) catch length underflows */
-       if ((int)args->len >= (int)args->minlen)
-               return 1;
-       args->agbno = NULLAGBLOCK;
-       return 0;
-}
-
 /*
  * Update the two btrees, logically removing from freespace the extent
  * starting at rbno, rlen blocks.  The extent is contained within the
@@ -689,8 +662,6 @@ xfs_alloc_ag_vextent(
        xfs_alloc_arg_t *args)  /* argument structure for allocation */
 {
        int             error=0;
-       xfs_extlen_t    reservation;
-       xfs_extlen_t    oldmax;
 
        ASSERT(args->minlen > 0);
        ASSERT(args->maxlen > 0);
@@ -698,20 +669,6 @@ xfs_alloc_ag_vextent(
        ASSERT(args->mod < args->prod);
        ASSERT(args->alignment > 0);
 
-       /*
-        * Clamp maxlen to the amount of free space minus any reservations
-        * that have been made.
-        */
-       oldmax = args->maxlen;
-       reservation = xfs_ag_resv_needed(args->pag, args->resv);
-       if (args->maxlen > args->pag->pagf_freeblks - reservation)
-               args->maxlen = args->pag->pagf_freeblks - reservation;
-       if (args->maxlen == 0) {
-               args->agbno = NULLAGBLOCK;
-               args->maxlen = oldmax;
-               return 0;
-       }
-
        /*
         * Branch to correct routine based on the type.
         */
@@ -731,8 +688,6 @@ xfs_alloc_ag_vextent(
                /* NOTREACHED */
        }
 
-       args->maxlen = oldmax;
-
        if (error || args->agbno == NULLAGBLOCK)
                return error;
 
@@ -841,9 +796,6 @@ xfs_alloc_ag_vextent_exact(
        args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
                                                - args->agbno;
        xfs_alloc_fix_len(args);
-       if (!xfs_alloc_fix_minleft(args))
-               goto not_found;
-
        ASSERT(args->agbno + args->len <= tend);
 
        /*
@@ -1149,12 +1101,7 @@ restart:
                XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
                ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
                args->len = blen;
-               if (!xfs_alloc_fix_minleft(args)) {
-                       xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
-                       trace_xfs_alloc_near_nominleft(args);
-                       return 0;
-               }
-               blen = args->len;
+
                /*
                 * We are allocating starting at bnew for blen blocks.
                 */
@@ -1346,12 +1293,6 @@ restart:
         */
        args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
        xfs_alloc_fix_len(args);
-       if (!xfs_alloc_fix_minleft(args)) {
-               trace_xfs_alloc_near_nominleft(args);
-               xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
-               xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
-               return 0;
-       }
        rlen = args->len;
        (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
                                     args->datatype, ltbnoa, ltlena, &ltnew);
@@ -1553,8 +1494,6 @@ restart:
        }
        xfs_alloc_fix_len(args);
 
-       if (!xfs_alloc_fix_minleft(args))
-               goto out_nominleft;
        rlen = args->len;
        XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
        /*
@@ -2056,7 +1995,7 @@ xfs_alloc_space_available(
        int                     flags)
 {
        struct xfs_perag        *pag = args->pag;
-       xfs_extlen_t            longest;
+       xfs_extlen_t            alloc_len, longest;
        xfs_extlen_t            reservation; /* blocks that are still reserved */
        int                     available;
 
@@ -2066,17 +2005,28 @@ xfs_alloc_space_available(
        reservation = xfs_ag_resv_needed(pag, args->resv);
 
        /* do we have enough contiguous free space for the allocation? */
+       alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
        longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
                        reservation);
-       if ((args->minlen + args->alignment + args->minalignslop - 1) > longest)
+       if (longest < alloc_len)
                return false;
 
        /* do we have enough free space remaining for the allocation? */
        available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
-                         reservation - min_free - args->total);
-       if (available < (int)args->minleft || available <= 0)
+                         reservation - min_free - args->minleft);
+       if (available < (int)max(args->total, alloc_len))
                return false;
 
+       /*
+        * Clamp maxlen to the amount of free space available for the actual
+        * extent allocation.
+        */
+       if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
+               args->maxlen = available;
+               ASSERT(args->maxlen > 0);
+               ASSERT(args->maxlen >= args->minlen);
+       }
+
        return true;
 }
 
@@ -2122,7 +2072,8 @@ xfs_alloc_fix_freelist(
        }
 
        need = xfs_alloc_min_freelist(mp, pag);
-       if (!xfs_alloc_space_available(args, need, flags))
+       if (!xfs_alloc_space_available(args, need, flags |
+                       XFS_ALLOC_FLAG_CHECK))
                goto out_agbp_relse;
 
        /*
@@ -2638,12 +2589,10 @@ xfs_alloc_vextent(
        xfs_agblock_t   agsize; /* allocation group size */
        int             error;
        int             flags;  /* XFS_ALLOC_FLAG_... locking flags */
-       xfs_extlen_t    minleft;/* minimum left value, temp copy */
        xfs_mount_t     *mp;    /* mount structure pointer */
        xfs_agnumber_t  sagno;  /* starting allocation group number */
        xfs_alloctype_t type;   /* input allocation type */
        int             bump_rotor = 0;
-       int             no_min = 0;
        xfs_agnumber_t  rotorstep = xfs_rotorstep; /* inode32 agf stepper */
 
        mp = args->mp;
@@ -2672,7 +2621,6 @@ xfs_alloc_vextent(
                trace_xfs_alloc_vextent_badargs(args);
                return 0;
        }
-       minleft = args->minleft;
 
        switch (type) {
        case XFS_ALLOCTYPE_THIS_AG:
@@ -2683,9 +2631,7 @@ xfs_alloc_vextent(
                 */
                args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
                args->pag = xfs_perag_get(mp, args->agno);
-               args->minleft = 0;
                error = xfs_alloc_fix_freelist(args, 0);
-               args->minleft = minleft;
                if (error) {
                        trace_xfs_alloc_vextent_nofix(args);
                        goto error0;
@@ -2750,9 +2696,7 @@ xfs_alloc_vextent(
                 */
                for (;;) {
                        args->pag = xfs_perag_get(mp, args->agno);
-                       if (no_min) args->minleft = 0;
                        error = xfs_alloc_fix_freelist(args, flags);
-                       args->minleft = minleft;
                        if (error) {
                                trace_xfs_alloc_vextent_nofix(args);
                                goto error0;
@@ -2792,20 +2736,17 @@ xfs_alloc_vextent(
                         * or switch to non-trylock mode.
                         */
                        if (args->agno == sagno) {
-                               if (no_min == 1) {
+                               if (flags == 0) {
                                        args->agbno = NULLAGBLOCK;
                                        trace_xfs_alloc_vextent_allfailed(args);
                                        break;
                                }
-                               if (flags == 0) {
-                                       no_min = 1;
-                               } else {
-                                       flags = 0;
-                                       if (type == XFS_ALLOCTYPE_START_BNO) {
-                                               args->agbno = XFS_FSB_TO_AGBNO(mp,
-                                                       args->fsbno);
-                                               args->type = XFS_ALLOCTYPE_NEAR_BNO;
-                                       }
+
+                               flags = 0;
+                               if (type == XFS_ALLOCTYPE_START_BNO) {
+                                       args->agbno = XFS_FSB_TO_AGBNO(mp,
+                                               args->fsbno);
+                                       args->type = XFS_ALLOCTYPE_NEAR_BNO;
                                }
                        }
                        xfs_perag_put(args->pag);
index 7c404a6b0ae32292cf9a6eb39efd15026e8e76ed..1d0f48a501a3d6e9575b634ac022f30a91a2f309 100644 (file)
@@ -56,7 +56,7 @@ typedef unsigned int xfs_alloctype_t;
 #define        XFS_ALLOC_FLAG_FREEING  0x00000002  /* indicate caller is freeing extents*/
 #define        XFS_ALLOC_FLAG_NORMAP   0x00000004  /* don't modify the rmapbt */
 #define        XFS_ALLOC_FLAG_NOSHRINK 0x00000008  /* don't shrink the freelist */
-
+#define        XFS_ALLOC_FLAG_CHECK    0x00000010  /* test only, don't modify args */
 
 /*
  * Argument structure for xfs_alloc routines.
index 2760bc3b2536c46eedd63c66e127e953dab9ce55..44773c9eb957dd8f6c5904ef9d41c3de880d14be 100644 (file)
@@ -3812,7 +3812,6 @@ xfs_bmap_btalloc(
                args.fsbno = 0;
                args.type = XFS_ALLOCTYPE_FIRST_AG;
                args.total = ap->minlen;
-               args.minleft = 0;
                if ((error = xfs_alloc_vextent(&args)))
                        return error;
                ap->dfops->dop_low = true;
@@ -4344,8 +4343,6 @@ xfs_bmapi_allocate(
        if (error)
                return error;
 
-       if (bma->dfops->dop_low)
-               bma->minleft = 0;
        if (bma->cur)
                bma->cur->bc_private.b.firstblock = *bma->firstblock;
        if (bma->blkno == NULLFSBLOCK)
index d6330c297ca0a4858666403f513f43ce4e16d1a0..d9be241fc86fb39207e725d28bdcdccf943a393d 100644 (file)
@@ -502,12 +502,11 @@ try_another_ag:
        if (args.fsbno == NULLFSBLOCK && args.minleft) {
                /*
                 * Could not find an AG with enough free space to satisfy
-                * a full btree split.  Try again without minleft and if
+                * a full btree split.  Try again and if
                 * successful activate the lowspace algorithm.
                 */
                args.fsbno = 0;
                args.type = XFS_ALLOCTYPE_FIRST_AG;
-               args.minleft = 0;
                error = xfs_alloc_vextent(&args);
                if (error)
                        goto error0;
index c58d72c220f58593cd05b90b3227f1ecb4f2d06f..2f389d366e93324c3f481d8c7b0773475b423d83 100644 (file)
 struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
 
 /*
- * @mode, if set, indicates that the type field needs to be set up.
- * This uses the transformation from file mode to DT_* as defined in linux/fs.h
- * for file type specification. This will be propagated into the directory
- * structure if appropriate for the given operation and filesystem config.
+ * Convert inode mode to directory entry filetype
  */
-const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = {
-       [0]                     = XFS_DIR3_FT_UNKNOWN,
-       [S_IFREG >> S_SHIFT]    = XFS_DIR3_FT_REG_FILE,
-       [S_IFDIR >> S_SHIFT]    = XFS_DIR3_FT_DIR,
-       [S_IFCHR >> S_SHIFT]    = XFS_DIR3_FT_CHRDEV,
-       [S_IFBLK >> S_SHIFT]    = XFS_DIR3_FT_BLKDEV,
-       [S_IFIFO >> S_SHIFT]    = XFS_DIR3_FT_FIFO,
-       [S_IFSOCK >> S_SHIFT]   = XFS_DIR3_FT_SOCK,
-       [S_IFLNK >> S_SHIFT]    = XFS_DIR3_FT_SYMLINK,
-};
+unsigned char xfs_mode_to_ftype(int mode)
+{
+       switch (mode & S_IFMT) {
+       case S_IFREG:
+               return XFS_DIR3_FT_REG_FILE;
+       case S_IFDIR:
+               return XFS_DIR3_FT_DIR;
+       case S_IFCHR:
+               return XFS_DIR3_FT_CHRDEV;
+       case S_IFBLK:
+               return XFS_DIR3_FT_BLKDEV;
+       case S_IFIFO:
+               return XFS_DIR3_FT_FIFO;
+       case S_IFSOCK:
+               return XFS_DIR3_FT_SOCK;
+       case S_IFLNK:
+               return XFS_DIR3_FT_SYMLINK;
+       default:
+               return XFS_DIR3_FT_UNKNOWN;
+       }
+}
 
 /*
  * ASCII case-insensitive (ie. A-Z) support for directories that was
@@ -631,7 +639,8 @@ xfs_dir2_isblock(
        if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
                return rval;
        rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
-       ASSERT(rval == 0 || args->dp->i_d.di_size == args->geo->blksize);
+       if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize)
+               return -EFSCORRUPTED;
        *vp = rval;
        return 0;
 }
index 0197590fa7d7c0a3d97d68dccdcfb3bd0709964b..d6e6d9d16f6c30d90e88f742c077782bc678b86f 100644 (file)
@@ -18,6 +18,9 @@
 #ifndef __XFS_DIR2_H__
 #define __XFS_DIR2_H__
 
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+
 struct xfs_defer_ops;
 struct xfs_da_args;
 struct xfs_inode;
@@ -32,10 +35,9 @@ struct xfs_dir2_data_unused;
 extern struct xfs_name xfs_name_dotdot;
 
 /*
- * directory filetype conversion tables.
+ * Convert inode mode to directory entry filetype
  */
-#define S_SHIFT 12
-extern const unsigned char xfs_mode_to_ftype[];
+extern unsigned char xfs_mode_to_ftype(int mode);
 
 /*
  * directory operations vector for encode/decode routines
index dd483e2767f7a38a4dfef5c063148430364b4aaf..d93f9d918cfc11ada2ded50d21e0eb9226381f93 100644 (file)
@@ -29,6 +29,7 @@
 #include "xfs_icache.h"
 #include "xfs_trans.h"
 #include "xfs_ialloc.h"
+#include "xfs_dir2.h"
 
 /*
  * Check that none of the inode's in the buffer have a next
@@ -386,6 +387,7 @@ xfs_dinode_verify(
        xfs_ino_t               ino,
        struct xfs_dinode       *dip)
 {
+       uint16_t                mode;
        uint16_t                flags;
        uint64_t                flags2;
 
@@ -396,8 +398,12 @@ xfs_dinode_verify(
        if (be64_to_cpu(dip->di_size) & (1ULL << 63))
                return false;
 
-       /* No zero-length symlinks. */
-       if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0)
+       mode = be16_to_cpu(dip->di_mode);
+       if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
+               return false;
+
+       /* No zero-length symlinks/dirs. */
+       if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
                return false;
 
        /* only version 3 or greater inodes are extensively verified here */
index 0f56fcd3a5d51517b93c391bb3d97a58f205a544..631e7c0e0a29ae51eba5f7e25d8dfc96dc93c61e 100644 (file)
@@ -1152,19 +1152,22 @@ xfs_vm_releasepage(
         * block_invalidatepage() can send pages that are still marked dirty
         * but otherwise have invalidated buffers.
         *
-        * We've historically freed buffers on the latter. Instead, quietly
-        * filter out all dirty pages to avoid spurious buffer state warnings.
-        * This can likely be removed once shrink_active_list() is fixed.
+        * We want to release the latter to avoid unnecessary buildup of the
+        * LRU, skip the former and warn if we've left any lingering
+        * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
+        * or unwritten buffers and warn if the page is not dirty. Otherwise
+        * try to release the buffers.
         */
-       if (PageDirty(page))
-               return 0;
-
        xfs_count_page_state(page, &delalloc, &unwritten);
 
-       if (WARN_ON_ONCE(delalloc))
+       if (delalloc) {
+               WARN_ON_ONCE(!PageDirty(page));
                return 0;
-       if (WARN_ON_ONCE(unwritten))
+       }
+       if (unwritten) {
+               WARN_ON_ONCE(!PageDirty(page));
                return 0;
+       }
 
        return try_to_free_buffers(page);
 }
index 7a30b8f11db7a26f8a82ded531e8a5170ea03ad5..9d06cc30e875e147a5560bad24e5a55aedb65cf0 100644 (file)
@@ -710,6 +710,10 @@ xfs_dq_get_next_id(
        /* Simple advance */
        next_id = *id + 1;
 
+       /* If we'd wrap past the max ID, stop */
+       if (next_id < *id)
+               return -ENOENT;
+
        /* If new ID is within the current chunk, advancing it sufficed */
        if (next_id % mp->m_quotainfo->qi_dqperchunk) {
                *id = next_id;
index 308bebb6dfd266f85ae225ef0c235128bb7b36ba..22c16155f1b42a1380f85935fa43706cd60e2b40 100644 (file)
@@ -97,13 +97,28 @@ xfs_init_security(
 
 static void
 xfs_dentry_to_name(
+       struct xfs_name *namep,
+       struct dentry   *dentry)
+{
+       namep->name = dentry->d_name.name;
+       namep->len = dentry->d_name.len;
+       namep->type = XFS_DIR3_FT_UNKNOWN;
+}
+
+static int
+xfs_dentry_mode_to_name(
        struct xfs_name *namep,
        struct dentry   *dentry,
        int             mode)
 {
        namep->name = dentry->d_name.name;
        namep->len = dentry->d_name.len;
-       namep->type = xfs_mode_to_ftype[(mode & S_IFMT) >> S_SHIFT];
+       namep->type = xfs_mode_to_ftype(mode);
+
+       if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN))
+               return -EFSCORRUPTED;
+
+       return 0;
 }
 
 STATIC void
@@ -119,7 +134,7 @@ xfs_cleanup_inode(
         * xfs_init_security we must back out.
         * ENOSPC can hit here, among other things.
         */
-       xfs_dentry_to_name(&teardown, dentry, 0);
+       xfs_dentry_to_name(&teardown, dentry);
 
        xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
 }
@@ -154,8 +169,12 @@ xfs_generic_create(
        if (error)
                return error;
 
+       /* Verify mode is valid also for tmpfile case */
+       error = xfs_dentry_mode_to_name(&name, dentry, mode);
+       if (unlikely(error))
+               goto out_free_acl;
+
        if (!tmpfile) {
-               xfs_dentry_to_name(&name, dentry, mode);
                error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
        } else {
                error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
@@ -248,7 +267,7 @@ xfs_vn_lookup(
        if (dentry->d_name.len >= MAXNAMELEN)
                return ERR_PTR(-ENAMETOOLONG);
 
-       xfs_dentry_to_name(&name, dentry, 0);
+       xfs_dentry_to_name(&name, dentry);
        error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
        if (unlikely(error)) {
                if (unlikely(error != -ENOENT))
@@ -275,7 +294,7 @@ xfs_vn_ci_lookup(
        if (dentry->d_name.len >= MAXNAMELEN)
                return ERR_PTR(-ENAMETOOLONG);
 
-       xfs_dentry_to_name(&xname, dentry, 0);
+       xfs_dentry_to_name(&xname, dentry);
        error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
        if (unlikely(error)) {
                if (unlikely(error != -ENOENT))
@@ -310,7 +329,9 @@ xfs_vn_link(
        struct xfs_name name;
        int             error;
 
-       xfs_dentry_to_name(&name, dentry, inode->i_mode);
+       error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode);
+       if (unlikely(error))
+               return error;
 
        error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
        if (unlikely(error))
@@ -329,7 +350,7 @@ xfs_vn_unlink(
        struct xfs_name name;
        int             error;
 
-       xfs_dentry_to_name(&name, dentry, 0);
+       xfs_dentry_to_name(&name, dentry);
 
        error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
        if (error)
@@ -359,7 +380,9 @@ xfs_vn_symlink(
 
        mode = S_IFLNK |
                (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
-       xfs_dentry_to_name(&name, dentry, mode);
+       error = xfs_dentry_mode_to_name(&name, dentry, mode);
+       if (unlikely(error))
+               goto out;
 
        error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
        if (unlikely(error))
@@ -395,6 +418,7 @@ xfs_vn_rename(
 {
        struct inode    *new_inode = d_inode(ndentry);
        int             omode = 0;
+       int             error;
        struct xfs_name oname;
        struct xfs_name nname;
 
@@ -405,8 +429,14 @@ xfs_vn_rename(
        if (flags & RENAME_EXCHANGE)
                omode = d_inode(ndentry)->i_mode;
 
-       xfs_dentry_to_name(&oname, odentry, omode);
-       xfs_dentry_to_name(&nname, ndentry, d_inode(odentry)->i_mode);
+       error = xfs_dentry_mode_to_name(&oname, odentry, omode);
+       if (omode && unlikely(error))
+               return error;
+
+       error = xfs_dentry_mode_to_name(&nname, ndentry,
+                                       d_inode(odentry)->i_mode);
+       if (unlikely(error))
+               return error;
 
        return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)),
                          XFS_I(ndir), &nname,
index e467218c0098323d41e55caf4a660862d39463a8..7a989de224f4b77477e88e74e23d1be4272682be 100644 (file)
@@ -331,11 +331,11 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
 }
 
 #define ASSERT_ALWAYS(expr)    \
-       (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+       (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
 
 #ifdef DEBUG
 #define ASSERT(expr)   \
-       (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+       (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
 
 #ifndef STATIC
 # define STATIC noinline
@@ -346,7 +346,7 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
 #ifdef XFS_WARN
 
 #define ASSERT(expr)   \
-       (unlikely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
+       (likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
 
 #ifndef STATIC
 # define STATIC static noinline
index c39ac14ff54009ba33a4a7313612bd2afb95117c..b1469f0a91a6c04329cb2fff2dc6e9953485a071 100644 (file)
@@ -3317,12 +3317,8 @@ xfs_log_force(
        xfs_mount_t     *mp,
        uint            flags)
 {
-       int     error;
-
        trace_xfs_log_force(mp, 0, _RET_IP_);
-       error = _xfs_log_force(mp, flags, NULL);
-       if (error)
-               xfs_warn(mp, "%s: error %d returned.", __func__, error);
+       _xfs_log_force(mp, flags, NULL);
 }
 
 /*
@@ -3466,12 +3462,8 @@ xfs_log_force_lsn(
        xfs_lsn_t       lsn,
        uint            flags)
 {
-       int     error;
-
        trace_xfs_log_force(mp, lsn, _RET_IP_);
-       error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
-       if (error)
-               xfs_warn(mp, "%s: error %d returned.", __func__, error);
+       _xfs_log_force_lsn(mp, lsn, flags, NULL);
 }
 
 /*
index 982c299e435a09703de12d32b4deec08a54d44e0..d026f5017c33cea2c9766b28b5ccb67d635fbe85 100644 (file)
@@ -73,6 +73,5 @@ extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
 
 extern void drm_kms_helper_poll_disable(struct drm_device *dev);
 extern void drm_kms_helper_poll_enable(struct drm_device *dev);
-extern void drm_kms_helper_poll_enable_locked(struct drm_device *dev);
 
 #endif
index 55bbeb0ff594581a2448adcf7fb26b7eb883243a..04681359a6f51d051103f184969440a48b18d3a1 100644 (file)
 # define DP_PSR_SETUP_TIME_0                (6 << 1)
 # define DP_PSR_SETUP_TIME_MASK             (7 << 1)
 # define DP_PSR_SETUP_TIME_SHIFT            1
-
+# define DP_PSR2_SU_Y_COORDINATE_REQUIRED   (1 << 4)  /* eDP 1.4a */
+# define DP_PSR2_SU_GRANULARITY_REQUIRED    (1 << 5)  /* eDP 1.4b */
 /*
  * 0x80-0x8f describe downstream port capabilities, but there are two layouts
  * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set.  If it was not,
 #define DP_RECEIVER_ALPM_STATUS                    0x200b  /* eDP 1.4 */
 # define DP_ALPM_LOCK_TIMEOUT_ERROR        (1 << 0)
 
+#define DP_DPRX_FEATURE_ENUMERATION_LIST    0x2210  /* DP 1.3 */
+# define DP_GTC_CAP                                    (1 << 0)  /* DP 1.3 */
+# define DP_SST_SPLIT_SDP_CAP                          (1 << 1)  /* DP 1.4 */
+# define DP_AV_SYNC_CAP                                        (1 << 2)  /* DP 1.3 */
+# define DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED      (1 << 3)  /* DP 1.3 */
+# define DP_VSC_EXT_VESA_SDP_SUPPORTED                 (1 << 4)  /* DP 1.4 */
+# define DP_VSC_EXT_VESA_SDP_CHAINING_SUPPORTED                (1 << 5)  /* DP 1.4 */
+# define DP_VSC_EXT_CEA_SDP_SUPPORTED                  (1 << 6)  /* DP 1.4 */
+# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED         (1 << 7)  /* DP 1.4 */
+
 /* DP 1.2 Sideband message defines */
 /* peer device type - DP 1.2a Table 2-92 */
 #define DP_PEER_DEVICE_NONE            0x0
index b717ed9d2b755255cfafa76553b3823449f594c2..5c970ce6794977a73e8c69181b5fe715191927b5 100644 (file)
@@ -76,4 +76,5 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
 
 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
 
+void kvm_timer_init_vhe(void);
 #endif
index 83695641bd5ec272551857c448cc9b4f354898b8..1ca8e8fd10789d0fff1aa0b21ede64b72075e094 100644 (file)
@@ -739,7 +739,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
        }
 }
 
-static inline unsigned int blk_queue_zone_size(struct request_queue *q)
+static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
 {
        return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
 }
@@ -1000,6 +1000,19 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
        return blk_rq_cur_bytes(rq) >> 9;
 }
 
+/*
+ * Some commands like WRITE SAME have a payload or data transfer size which
+ * is different from the size of the request.  Any driver that supports such
+ * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
+ * calculate the data transfer size.
+ */
+static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+{
+       if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+               return rq->special_vec.bv_len;
+       return blk_rq_bytes(rq);
+}
+
 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
                                                     int op)
 {
@@ -1536,12 +1549,12 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
        return false;
 }
 
-static inline unsigned int bdev_zone_size(struct block_device *bdev)
+static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
 {
        struct request_queue *q = bdev_get_queue(bdev);
 
        if (q)
-               return blk_queue_zone_size(q);
+               return blk_queue_zone_sectors(q);
 
        return 0;
 }
index f74ae68086dc69b3c02ced58a13f6daed0b4cd77..05cf951df3fedd12b742b9478af13aeace86749c 100644 (file)
@@ -216,7 +216,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
 
 bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
-int bpf_prog_calc_digest(struct bpf_prog *fp);
+int bpf_prog_calc_tag(struct bpf_prog *fp);
 
 const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
 
index d016a121a8c46492bd6feea5b093dcea9bf27933..28ffa94aed6b85d10531e7dc11864f9ebc8701f4 100644 (file)
@@ -14,6 +14,7 @@ struct coredump_params;
 extern int dump_skip(struct coredump_params *cprm, size_t nr);
 extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
 extern int dump_align(struct coredump_params *cprm, int align);
+extern void dump_truncate(struct coredump_params *cprm);
 #ifdef CONFIG_COREDUMP
 extern void do_coredump(const siginfo_t *siginfo);
 #else
index 20bfefbe75941627c25c30621d4d9b09606c720c..d936a0021839cca651e19ec43e71b8f21cb69cf0 100644 (file)
@@ -74,6 +74,8 @@ enum cpuhp_state {
        CPUHP_ZCOMP_PREPARE,
        CPUHP_TIMERS_DEAD,
        CPUHP_MIPS_SOC_PREPARE,
+       CPUHP_BP_PREPARE_DYN,
+       CPUHP_BP_PREPARE_DYN_END                = CPUHP_BP_PREPARE_DYN + 20,
        CPUHP_BRINGUP_CPU,
        CPUHP_AP_IDLE_DEAD,
        CPUHP_AP_OFFLINE,
index a07a476178cd1b4c59295ba2c1ee420c1bbb696e..5b1af30ece55828e655b2bf02a149fd71354f3eb 100644 (file)
@@ -103,6 +103,7 @@ typedef     struct {
 
 #define EFI_PAGE_SHIFT         12
 #define EFI_PAGE_SIZE          (1UL << EFI_PAGE_SHIFT)
+#define EFI_PAGES_MAX          (U64_MAX >> EFI_PAGE_SHIFT)
 
 typedef struct {
        u32 type;
@@ -950,6 +951,7 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
 #endif
 extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
 
+extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries);
 extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
 extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
 extern void __init efi_memmap_unmap(void);
index a0934e6c9babf8436a18b28f4dea285894e5dca5..e4eb2546339afbf2764f5ba335e1e66947fc5af3 100644 (file)
@@ -57,6 +57,8 @@ struct bpf_prog_aux;
 /* BPF program can access up to 512 bytes of stack space. */
 #define MAX_BPF_STACK  512
 
+#define BPF_TAG_SIZE   8
+
 /* Helper macros for filter block array initializers. */
 
 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@@ -408,7 +410,7 @@ struct bpf_prog {
        kmemcheck_bitfield_end(meta);
        enum bpf_prog_type      type;           /* Type of BPF program */
        u32                     len;            /* Number of filter blocks */
-       u32                     digest[SHA_DIGEST_WORDS]; /* Program digest */
+       u8                      tag[BPF_TAG_SIZE];
        struct bpf_prog_aux     *aux;           /* Auxiliary fields */
        struct sock_fprog_kern  *orig_prog;     /* Original BPF program */
        unsigned int            (*bpf_func)(const void *ctx,
@@ -519,7 +521,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
        return prog->len * sizeof(struct bpf_insn);
 }
 
-static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog)
+static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
 {
        return round_up(bpf_prog_insn_size(prog) +
                        sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
index 4175dca4ac39dd7848e3abcda5eb163865ab2734..0fe0b6295ab58edfe6745467487fe19beba0d723 100644 (file)
@@ -38,9 +38,8 @@ struct vm_area_struct;
 #define ___GFP_ACCOUNT         0x100000u
 #define ___GFP_NOTRACK         0x200000u
 #define ___GFP_DIRECT_RECLAIM  0x400000u
-#define ___GFP_OTHER_NODE      0x800000u
-#define ___GFP_WRITE           0x1000000u
-#define ___GFP_KSWAPD_RECLAIM  0x2000000u
+#define ___GFP_WRITE           0x800000u
+#define ___GFP_KSWAPD_RECLAIM  0x1000000u
 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
 
 /*
@@ -172,11 +171,6 @@ struct vm_area_struct;
  * __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
  *   distinguishing in the source between false positives and allocations that
  *   cannot be supported (e.g. page tables).
- *
- * __GFP_OTHER_NODE is for allocations that are on a remote node but that
- *   should not be accounted for as a remote allocation in vmstat. A
- *   typical user would be khugepaged collapsing a huge page on a remote
- *   node.
  */
 #define __GFP_COLD     ((__force gfp_t)___GFP_COLD)
 #define __GFP_NOWARN   ((__force gfp_t)___GFP_NOWARN)
@@ -184,10 +178,9 @@ struct vm_area_struct;
 #define __GFP_ZERO     ((__force gfp_t)___GFP_ZERO)
 #define __GFP_NOTRACK  ((__force gfp_t)___GFP_NOTRACK)
 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
-#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
 
 /* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT 26
+#define __GFP_BITS_SHIFT 25
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /*
@@ -506,11 +499,10 @@ extern void free_hot_cold_page(struct page *page, bool cold);
 extern void free_hot_cold_page_list(struct list_head *list, bool cold);
 
 struct page_frag_cache;
-extern void __page_frag_drain(struct page *page, unsigned int order,
-                             unsigned int count);
-extern void *__alloc_page_frag(struct page_frag_cache *nc,
-                              unsigned int fragsz, gfp_t gfp_mask);
-extern void __free_page_frag(void *addr);
+extern void __page_frag_cache_drain(struct page *page, unsigned int count);
+extern void *page_frag_alloc(struct page_frag_cache *nc,
+                            unsigned int fragsz, gfp_t gfp_mask);
+extern void page_frag_free(void *addr);
 
 #define __free_page(page) __free_pages((page), 0)
 #define free_page(addr) free_pages((addr), 0)
index c2748accea71aa006268afebe65898fcc9f6d033..e973faba69dc5c90586aa97511d860178f31ff2e 100644 (file)
@@ -274,37 +274,67 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
                struct irq_chip *irqchip,
                int parent_irq);
 
-int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+                            struct irq_chip *irqchip,
+                            unsigned int first_irq,
+                            irq_flow_handler_t handler,
+                            unsigned int type,
+                            bool nested,
+                            struct lock_class_key *lock_key);
+
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * Lockdep requires that each irqchip instance be created with a
+ * unique key so as to avoid unnecessary warnings. This upfront
+ * boilerplate static inlines provides such a key for each
+ * unique instance.
+ */
+static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+                                      struct irq_chip *irqchip,
+                                      unsigned int first_irq,
+                                      irq_flow_handler_t handler,
+                                      unsigned int type)
+{
+       static struct lock_class_key key;
+
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, false, &key);
+}
+
+static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
                          struct irq_chip *irqchip,
                          unsigned int first_irq,
                          irq_flow_handler_t handler,
-                         unsigned int type,
-                         bool nested,
-                         struct lock_class_key *lock_key);
+                         unsigned int type)
+{
+
+       static struct lock_class_key key;
+
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, true, &key);
+}
+#else
+static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+                                      struct irq_chip *irqchip,
+                                      unsigned int first_irq,
+                                      irq_flow_handler_t handler,
+                                      unsigned int type)
+{
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, false, NULL);
+}
 
-/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
 static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
                          struct irq_chip *irqchip,
                          unsigned int first_irq,
                          irq_flow_handler_t handler,
                          unsigned int type)
 {
-       return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq,
-                                    handler, type, true, NULL);
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, true, NULL);
 }
-
-#ifdef CONFIG_LOCKDEP
-#define gpiochip_irqchip_add(...)                              \
-(                                                              \
-       ({                                                      \
-               static struct lock_class_key _key;              \
-               _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
-       })                                                      \
-)
-#else
-#define gpiochip_irqchip_add(...)                              \
-       _gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
-#endif
+#endif /* CONFIG_LOCKDEP */
 
 #endif /* CONFIG_GPIOLIB_IRQCHIP */
 
index b2109c522dec0726d6e6e390358a79be4bcdf891..4b45ec46161fd66d6e7fd32f53dd71b8ac1cab42 100644 (file)
@@ -665,6 +665,7 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
 #define I2C_CLIENT_TEN         0x10    /* we have a ten bit chip address */
                                        /* Must equal I2C_M_TEN below */
 #define I2C_CLIENT_SLAVE       0x20    /* we are the slave */
+#define I2C_CLIENT_HOST_NOTIFY 0x40    /* We want to use I2C host notify */
 #define I2C_CLIENT_WAKE                0x80    /* for board_info; true iff can wake */
 #define I2C_CLIENT_SCCB                0x9000  /* Use Omnivision SCCB protocol */
                                        /* Must match I2C_M_STOP|IGNORE_NAK */
index 089f70f83e97c9a1adf1087f10d095a02bd3e152..23da3af459fe7af7b5e5c2eabccbda350122e88b 100644 (file)
@@ -14,6 +14,7 @@ struct static_key_deferred {
 
 #ifdef HAVE_JUMP_LABEL
 extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
+extern void static_key_deferred_flush(struct static_key_deferred *key);
 extern void
 jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
 
@@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
        STATIC_KEY_CHECK_USE();
        static_key_slow_dec(&key->key);
 }
+static inline void static_key_deferred_flush(struct static_key_deferred *key)
+{
+       STATIC_KEY_CHECK_USE();
+}
 static inline void
 jump_label_rate_limit(struct static_key_deferred *key,
                unsigned long rl)
index 56aec84237ad5b7b55c3a43eb04a882f9eb24d5e..cb09238f6d32be355a9b7e2347e48746de7eae77 100644 (file)
@@ -514,8 +514,8 @@ extern enum system_states {
 #define TAINT_FLAGS_COUNT              16
 
 struct taint_flag {
-       char true;      /* character printed when tainted */
-       char false;     /* character printed when not tainted */
+       char c_true;    /* character printed when tainted */
+       char c_false;   /* character printed when not tainted */
        bool module;    /* also show as a per-module taint flag */
 };
 
index 61d20c17f3b7e40ba706a49da463c2f9eabd7d93..254698856b8fc2723dae61e3dd20b6c8176129e7 100644 (file)
@@ -120,7 +120,7 @@ struct mem_cgroup_reclaim_iter {
  */
 struct mem_cgroup_per_node {
        struct lruvec           lruvec;
-       unsigned long           lru_size[NR_LRU_LISTS];
+       unsigned long           lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
 
        struct mem_cgroup_reclaim_iter  iter[DEF_PRIORITY + 1];
 
@@ -432,7 +432,7 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
 
 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
-               int nr_pages);
+               int zid, int nr_pages);
 
 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
                                           int nid, unsigned int lru_mask);
@@ -441,9 +441,23 @@ static inline
 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 {
        struct mem_cgroup_per_node *mz;
+       unsigned long nr_pages = 0;
+       int zid;
 
        mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
-       return mz->lru_size[lru];
+       for (zid = 0; zid < MAX_NR_ZONES; zid++)
+               nr_pages += mz->lru_zone_size[zid][lru];
+       return nr_pages;
+}
+
+static inline
+unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
+               enum lru_list lru, int zone_idx)
+{
+       struct mem_cgroup_per_node *mz;
+
+       mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+       return mz->lru_zone_size[zone_idx][lru];
 }
 
 void mem_cgroup_handle_over_high(void);
@@ -671,6 +685,12 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
 {
        return 0;
 }
+static inline
+unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
+               enum lru_list lru, int zone_idx)
+{
+       return 0;
+}
 
 static inline unsigned long
 mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
index 01033fadea4766d5e6efddc78ca595d68c021464..c1784c0b4f3585e0d20ca8253813c31d47f11c04 100644 (file)
@@ -284,7 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
                unsigned long map_offset);
 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
                                          unsigned long pnum);
-extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
-                         enum zone_type target);
+extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+                         enum zone_type target, int *zone_shift);
 
 #endif /* __LINUX_MEMORY_HOTPLUG_H */
index fe6b4036664a9a7c82fe4a22be93288928eceac1..b84615b0f64c294eebb72095cc8dbc874b4ae22b 100644 (file)
@@ -1210,8 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
                        struct vm_area_struct *vma);
 void unmap_mapping_range(struct address_space *mapping,
                loff_t const holebegin, loff_t const holelen, int even_cows);
-int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
-              spinlock_t **ptlp);
+int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+                            pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
        unsigned long *pfn);
 int follow_phys(struct vm_area_struct *vma, unsigned long address,
index 71613e8a720f99b6870f1e3f8957d2761a6ac1a0..41d376e7116dccae22d9881312cfb660cd9fd58e 100644 (file)
@@ -39,7 +39,7 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
 {
        __update_lru_size(lruvec, lru, zid, nr_pages);
 #ifdef CONFIG_MEMCG
-       mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
+       mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
 #endif
 }
 
index 36d9896fbc1eb0d12e60682f15e96648c13ebf98..f4aac87adcc3555014f6215d6599b604c70388e6 100644 (file)
@@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
  * @zonelist - The zonelist to search for a suitable zone
  * @highest_zoneidx - The zone index of the highest zone to return
  * @nodes - An optional nodemask to filter the zonelist with
- * @zone - The first suitable zone found is returned via this parameter
+ * @return - Zoneref pointer for the first suitable zone found (see below)
  *
  * This function returns the first zone at or below a given zone index that is
  * within the allowed nodemask. The zoneref returned is a cursor that can be
  * used to iterate the zonelist with next_zones_zonelist by advancing it by
  * one before calling.
+ *
+ * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
+ * never NULL). This may happen either genuinely, or due to concurrent nodemask
+ * update due to cpuset modification.
  */
 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
                                        enum zone_type highest_zoneidx,
index 994f7423a74bd622884c3b646f4123d28697b8ad..9bde9558b59672a866bd763039d326bde2af0f81 100644 (file)
@@ -2477,14 +2477,19 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
        return NAPI_GRO_CB(skb)->frag0_len < hlen;
 }
 
+static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
+{
+       NAPI_GRO_CB(skb)->frag0 = NULL;
+       NAPI_GRO_CB(skb)->frag0_len = 0;
+}
+
 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
                                        unsigned int offset)
 {
        if (!pskb_may_pull(skb, hlen))
                return NULL;
 
-       NAPI_GRO_CB(skb)->frag0 = NULL;
-       NAPI_GRO_CB(skb)->frag0_len = 0;
+       skb_gro_frag0_invalidate(skb);
        return skb->data + offset;
 }
 
index aacca824a6aef4fcc4d2480aa9eeefd2fe82d6f9..0a3fadc32693a9cf869693f4c406eee5d168e36b 100644 (file)
@@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
 extern int watchdog_thresh;
 extern unsigned long watchdog_enabled;
 extern unsigned long *watchdog_cpumask_bits;
+extern atomic_t watchdog_park_in_progress;
 #ifdef CONFIG_SMP
 extern int sysctl_softlockup_all_cpu_backtrace;
 extern int sysctl_hardlockup_all_cpu_backtrace;
index 4741ecdb981743151b70afff63b10740dfaa4132..78ed8105e64d1ae0341104c8d129cfeba9402800 100644 (file)
@@ -1259,6 +1259,7 @@ extern void perf_event_disable(struct perf_event *event);
 extern void perf_event_disable_local(struct perf_event *event);
 extern void perf_event_disable_inatomic(struct perf_event *event);
 extern void perf_event_task_tick(void);
+extern int perf_event_account_interrupt(struct perf_event *event);
 #else /* !CONFIG_PERF_EVENTS: */
 static inline void *
 perf_aux_output_begin(struct perf_output_handle *handle,
index 321f9ed552a995f396696b71343f66cbd2e01d9e..01f71e1d2e941e359fc5fdd07f0645813ef8f845 100644 (file)
@@ -444,6 +444,10 @@ bool __rcu_is_watching(void);
 #error "Unknown RCU implementation specified to kernel configuration"
 #endif
 
+#define RCU_SCHEDULER_INACTIVE 0
+#define RCU_SCHEDULER_INIT     1
+#define RCU_SCHEDULER_RUNNING  2
+
 /*
  * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
  * initialization and destruction of rcu_head on the stack. rcu_head structures
index e2f3a3281d8fd29de861f4d46ccf7da54ce6eea8..8265d351c9f0e6bc7814fd59ca1efff531e4798f 100644 (file)
@@ -408,7 +408,8 @@ enum rproc_crash_type {
  * @crash_comp: completion used to sync crash handler and the rproc reload
  * @recovery_disabled: flag that state if recovery was disabled
  * @max_notifyid: largest allocated notify id.
- * @table_ptr: our copy of the resource table
+ * @table_ptr: pointer to the resource table in effect
+ * @cached_table: copy of the resource table
  * @has_iommu: flag to indicate if remote processor is behind an MMU
  */
 struct rproc {
@@ -440,6 +441,7 @@ struct rproc {
        bool recovery_disabled;
        int max_notifyid;
        struct resource_table *table_ptr;
+       struct resource_table *cached_table;
        bool has_iommu;
        bool auto_boot;
 };
index 4d1905245c7aa50df56acf0f77c77f3347c28c04..ad3ec9ec61f7b6de743b5d6de4225defcc18be99 100644 (file)
@@ -854,6 +854,16 @@ struct signal_struct {
 
 #define SIGNAL_UNKILLABLE      0x00000040 /* for init: ignore fatal signals */
 
+#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
+                         SIGNAL_STOP_CONTINUED)
+
+static inline void signal_set_stop_flags(struct signal_struct *sig,
+                                        unsigned int flags)
+{
+       WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
+       sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
+}
+
 /* If true, all threads except ->group_exit_task have pending SIGKILL */
 static inline int signal_group_exit(const struct signal_struct *sig)
 {
index b53c0cfd417e3bec1c5cfab76787901bafa5884c..a410715bbef8889d148a6b3f8dbd6afc4ae6f4d0 100644 (file)
@@ -2480,7 +2480,7 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
 
 static inline void skb_free_frag(void *addr)
 {
-       __free_page_frag(addr);
+       page_frag_free(addr);
 }
 
 void *napi_alloc_frag(unsigned int fragsz);
index 084b12bad198232426c6beb27cd348d72797b344..4c536356681543d8749bc5013d619f6c5c7a5bd3 100644 (file)
@@ -226,7 +226,7 @@ static inline const char *__check_heap_object(const void *ptr,
  * (PAGE_SIZE*2).  Larger requests are passed to the page allocator.
  */
 #define KMALLOC_SHIFT_HIGH     (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX      (MAX_ORDER + PAGE_SHIFT)
+#define KMALLOC_SHIFT_MAX      (MAX_ORDER + PAGE_SHIFT - 1)
 #ifndef KMALLOC_SHIFT_LOW
 #define KMALLOC_SHIFT_LOW      3
 #endif
@@ -239,7 +239,7 @@ static inline const char *__check_heap_object(const void *ptr,
  * be allocated from the same page.
  */
 #define KMALLOC_SHIFT_HIGH     PAGE_SHIFT
-#define KMALLOC_SHIFT_MAX      30
+#define KMALLOC_SHIFT_MAX      (MAX_ORDER + PAGE_SHIFT - 1)
 #ifndef KMALLOC_SHIFT_LOW
 #define KMALLOC_SHIFT_LOW      3
 #endif
index e5d19344037491651c6c80f6b310842ee5715126..7440290f64acd3694dfc5c17618c55f6253aae01 100644 (file)
@@ -66,6 +66,7 @@ struct svc_xprt {
 #define XPT_LISTENER   10              /* listening endpoint */
 #define XPT_CACHE_AUTH 11              /* cache auth info */
 #define XPT_LOCAL      12              /* connection from loopback interface */
+#define XPT_KILL_TEMP   13             /* call xpo_kill_temp_xprt before closing */
 
        struct svc_serv         *xpt_server;    /* service for transport */
        atomic_t                xpt_reserved;   /* space on outq that is rsvd */
index 09f4be179ff304ed0c5e921c8b9bd6e33112c886..7f47b7098b1b9b9cfb6aacfca126826160eaaa8c 100644 (file)
@@ -150,8 +150,9 @@ enum {
        SWP_FILE        = (1 << 7),     /* set after swap_activate success */
        SWP_AREA_DISCARD = (1 << 8),    /* single-time swap area discards */
        SWP_PAGE_DISCARD = (1 << 9),    /* freed swap page-cluster discards */
+       SWP_STABLE_WRITES = (1 << 10),  /* no overwrite PG_writeback pages */
                                        /* add others here before... */
-       SWP_SCANNING    = (1 << 10),    /* refcount in scan_swap_map */
+       SWP_SCANNING    = (1 << 11),    /* refcount in scan_swap_map */
 };
 
 #define SWAP_CLUSTER_MAX 32UL
index fc5848dad7a43216b3f124c4afdaa6b64b23910c..c93f4b3a59cb7a9f578ec18dd2f6c56014195a90 100644 (file)
@@ -62,8 +62,13 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
 
 /* TCP Fast Open Cookie as stored in memory */
 struct tcp_fastopen_cookie {
+       union {
+               u8      val[TCP_FASTOPEN_COOKIE_MAX];
+#if IS_ENABLED(CONFIG_IPV6)
+               struct in6_addr addr;
+#endif
+       };
        s8      len;
-       u8      val[TCP_FASTOPEN_COOKIE_MAX];
        bool    exp;    /* In RFC6994 experimental option format */
 };
 
index bd36ce431e32cbb57a1ec4e97964b78774787a2e..bab0b1ad0613eb7de475b88546cecd7a52645e90 100644 (file)
@@ -8,23 +8,7 @@
 #ifndef _LINUX_TIMERFD_H
 #define _LINUX_TIMERFD_H
 
-/* For O_CLOEXEC and O_NONBLOCK */
-#include <linux/fcntl.h>
-
-/* For _IO helpers */
-#include <linux/ioctl.h>
-
-/*
- * CAREFUL: Check include/asm-generic/fcntl.h when defining
- * new flags, since they might collide with O_* ones. We want
- * to re-use O_* flags that couldn't possibly have a meaning
- * from eventfd, in order to leave a free define-space for
- * shared O_* flags.
- */
-#define TFD_TIMER_ABSTIME (1 << 0)
-#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
-#define TFD_CLOEXEC O_CLOEXEC
-#define TFD_NONBLOCK O_NONBLOCK
+#include <uapi/linux/timerfd.h>
 
 #define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK)
 /* Flags for timerfd_create.  */
@@ -32,6 +16,4 @@
 /* Flags for timerfd_settime.  */
 #define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET)
 
-#define TFD_IOC_SET_TICKS      _IOW('T', 0, u64)
-
 #endif /* _LINUX_TIMERFD_H */
index 96dd0b3f70d75ba5362eccecdd8c8371627f22a7..da5033dd8cbcab5bff1557452b7a55e888d46c30 100644 (file)
@@ -809,11 +809,11 @@ static inline void fc_set_wwnn(struct fc_lport *lport, u64 wwnn)
 /**
  * fc_set_wwpn() - Set the World Wide Port Name of a local port
  * @lport: The local port whose WWPN is to be set
- * @wwnn:  The new WWPN
+ * @wwpn:  The new WWPN
  */
-static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwnn)
+static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwpn)
 {
-       lport->wwpn = wwnn;
+       lport->wwpn = wwpn;
 }
 
 /**
index 530c57bdefa06567c3bcaba9249845af1d4e535a..915c4357945c27992aa3b7561ab705dedff082ad 100644 (file)
@@ -36,10 +36,10 @@ struct hdmi_codec_daifmt {
                HDMI_AC97,
                HDMI_SPDIF,
        } fmt;
-       int bit_clk_inv:1;
-       int frame_clk_inv:1;
-       int bit_clk_master:1;
-       int frame_clk_master:1;
+       unsigned int bit_clk_inv:1;
+       unsigned int frame_clk_inv:1;
+       unsigned int bit_clk_master:1;
+       unsigned int frame_clk_master:1;
 };
 
 /*
index 2b502f6cc6d036be6e25c6834671be1cde2fd8da..b86168a21d56c7fff1d59dfacbef93049bde888e 100644 (file)
@@ -813,6 +813,7 @@ struct snd_soc_component {
        unsigned int suspended:1; /* is in suspend PM state */
 
        struct list_head list;
+       struct list_head card_aux_list; /* for auxiliary bound components */
        struct list_head card_list;
 
        struct snd_soc_dai_driver *dai_drv;
@@ -1152,6 +1153,7 @@ struct snd_soc_card {
         */
        struct snd_soc_aux_dev *aux_dev;
        int num_aux_devs;
+       struct list_head aux_comp_list;
 
        const struct snd_kcontrol_new *controls;
        int num_controls;
@@ -1547,6 +1549,7 @@ static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
        INIT_LIST_HEAD(&card->widgets);
        INIT_LIST_HEAD(&card->paths);
        INIT_LIST_HEAD(&card->dapm_list);
+       INIT_LIST_HEAD(&card->aux_comp_list);
        INIT_LIST_HEAD(&card->component_dev_list);
 }
 
index 29e6858bb1648b636dcce48072f9fe43e5a8a884..43edf82e54fffce7b3d8bfed8c832aebfc60c79b 100644 (file)
@@ -174,6 +174,10 @@ enum tcm_sense_reason_table {
        TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED  = R(0x16),
        TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED  = R(0x17),
        TCM_COPY_TARGET_DEVICE_NOT_REACHABLE    = R(0x18),
+       TCM_TOO_MANY_TARGET_DESCS               = R(0x19),
+       TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE   = R(0x1a),
+       TCM_TOO_MANY_SEGMENT_DESCS              = R(0x1b),
+       TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE  = R(0x1c),
 #undef R
 };
 
index c14bed4ab0977fc1d7d06b38051644f8e67e2349..88d18a8ceb59f9c6e3c5630a491cf6d3c91cf73c 100644 (file)
@@ -130,8 +130,8 @@ DECLARE_EVENT_CLASS(btrfs__inode,
                                BTRFS_I(inode)->root->root_key.objectid;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, "
-                 "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu",
+       TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%lu blocks=%llu "
+                 "disk_i_size=%llu last_trans=%llu logged_trans=%llu",
                  show_root_type(__entry->root_objectid),
                  (unsigned long long)__entry->generation,
                  (unsigned long)__entry->ino,
@@ -184,14 +184,16 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
 
 TRACE_EVENT_CONDITION(btrfs_get_extent,
 
-       TP_PROTO(struct btrfs_root *root, struct extent_map *map),
+       TP_PROTO(struct btrfs_root *root, struct inode *inode,
+                struct extent_map *map),
 
-       TP_ARGS(root, map),
+       TP_ARGS(root, inode, map),
 
        TP_CONDITION(map),
 
        TP_STRUCT__entry_btrfs(
                __field(        u64,  root_objectid     )
+               __field(        u64,  ino               )
                __field(        u64,  start             )
                __field(        u64,  len               )
                __field(        u64,  orig_start        )
@@ -204,7 +206,8 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
 
        TP_fast_assign_btrfs(root->fs_info,
                __entry->root_objectid  = root->root_key.objectid;
-               __entry->start          = map->start;
+               __entry->ino            = btrfs_ino(inode);
+               __entry->start          = map->start;
                __entry->len            = map->len;
                __entry->orig_start     = map->orig_start;
                __entry->block_start    = map->block_start;
@@ -214,11 +217,12 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
                __entry->compress_type  = map->compress_type;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu, "
-                 "orig_start = %llu, block_start = %llu(%s), "
-                 "block_len = %llu, flags = %s, refs = %u, "
-                 "compress_type = %u",
+       TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu "
+                 "orig_start=%llu block_start=%llu(%s) "
+                 "block_len=%llu flags=%s refs=%u "
+                 "compress_type=%u",
                  show_root_type(__entry->root_objectid),
+                 (unsigned long long)__entry->ino,
                  (unsigned long long)__entry->start,
                  (unsigned long long)__entry->len,
                  (unsigned long long)__entry->orig_start,
@@ -259,6 +263,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
                __field(        int,  compress_type     )
                __field(        int,  refs              )
                __field(        u64,  root_objectid     )
+               __field(        u64,  truncated_len     )
        ),
 
        TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
@@ -273,18 +278,21 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
                __entry->refs           = atomic_read(&ordered->refs);
                __entry->root_objectid  =
                                BTRFS_I(inode)->root->root_key.objectid;
+               __entry->truncated_len  = ordered->truncated_len;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), ino = %llu, file_offset = %llu, "
-                 "start = %llu, len = %llu, disk_len = %llu, "
-                 "bytes_left = %llu, flags = %s, compress_type = %d, "
-                 "refs = %d",
+       TP_printk_btrfs("root=%llu(%s) ino=%llu file_offset=%llu "
+                 "start=%llu len=%llu disk_len=%llu "
+                 "truncated_len=%llu "
+                 "bytes_left=%llu flags=%s compress_type=%d "
+                 "refs=%d",
                  show_root_type(__entry->root_objectid),
                  (unsigned long long)__entry->ino,
                  (unsigned long long)__entry->file_offset,
                  (unsigned long long)__entry->start,
                  (unsigned long long)__entry->len,
                  (unsigned long long)__entry->disk_len,
+                 (unsigned long long)__entry->truncated_len,
                  (unsigned long long)__entry->bytes_left,
                  show_ordered_flags(__entry->flags),
                  __entry->compress_type, __entry->refs)
@@ -354,10 +362,10 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
                                 BTRFS_I(inode)->root->root_key.objectid;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, "
-                 "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
-                 "range_end = %llu, for_kupdate = %d, "
-                 "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
+       TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu "
+                 "nr_to_write=%ld pages_skipped=%ld range_start=%llu "
+                 "range_end=%llu for_kupdate=%d "
+                 "for_reclaim=%d range_cyclic=%d writeback_index=%lu",
                  show_root_type(__entry->root_objectid),
                  (unsigned long)__entry->ino, __entry->index,
                  __entry->nr_to_write, __entry->pages_skipped,
@@ -400,8 +408,8 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
                         BTRFS_I(page->mapping->host)->root->root_key.objectid;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, "
-                 "end = %llu, uptodate = %d",
+       TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu start=%llu "
+                 "end=%llu uptodate=%d",
                  show_root_type(__entry->root_objectid),
                  (unsigned long)__entry->ino, (unsigned long)__entry->index,
                  (unsigned long long)__entry->start,
@@ -433,7 +441,7 @@ TRACE_EVENT(btrfs_sync_file,
                                 BTRFS_I(inode)->root->root_key.objectid;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d",
+       TP_printk_btrfs("root=%llu(%s) ino=%ld parent=%ld datasync=%d",
                  show_root_type(__entry->root_objectid),
                  (unsigned long)__entry->ino, (unsigned long)__entry->parent,
                  __entry->datasync)
@@ -484,9 +492,9 @@ TRACE_EVENT(btrfs_add_block_group,
                __entry->create         = create;
        ),
 
-       TP_printk("%pU: block_group offset = %llu, size = %llu, "
-                 "flags = %llu(%s), bytes_used = %llu, bytes_super = %llu, "
-                 "create = %d", __entry->fsid,
+       TP_printk("%pU: block_group offset=%llu size=%llu "
+                 "flags=%llu(%s) bytes_used=%llu bytes_super=%llu "
+                 "create=%d", __entry->fsid,
                  (unsigned long long)__entry->offset,
                  (unsigned long long)__entry->size,
                  (unsigned long long)__entry->flags,
@@ -535,9 +543,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
                __entry->seq            = ref->seq;
        ),
 
-       TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, "
-                 "parent = %llu(%s), ref_root = %llu(%s), level = %d, "
-                 "type = %s, seq = %llu",
+       TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
+                 "parent=%llu(%s) ref_root=%llu(%s) level=%d "
+                 "type=%s seq=%llu",
                  (unsigned long long)__entry->bytenr,
                  (unsigned long long)__entry->num_bytes,
                  show_ref_action(__entry->action),
@@ -600,9 +608,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
                __entry->seq            = ref->seq;
        ),
 
-       TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, "
-                 "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
-                 "offset = %llu, type = %s, seq = %llu",
+       TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
+                 "parent=%llu(%s) ref_root=%llu(%s) owner=%llu "
+                 "offset=%llu type=%s seq=%llu",
                  (unsigned long long)__entry->bytenr,
                  (unsigned long long)__entry->num_bytes,
                  show_ref_action(__entry->action),
@@ -657,7 +665,7 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
                __entry->is_data        = head_ref->is_data;
        ),
 
-       TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d",
+       TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s is_data=%d",
                  (unsigned long long)__entry->bytenr,
                  (unsigned long long)__entry->num_bytes,
                  show_ref_action(__entry->action),
@@ -721,8 +729,8 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
                __entry->root_objectid  = fs_info->chunk_root->root_key.objectid;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), offset = %llu, size = %llu, "
-                 "num_stripes = %d, sub_stripes = %d, type = %s",
+       TP_printk_btrfs("root=%llu(%s) offset=%llu size=%llu "
+                 "num_stripes=%d sub_stripes=%d type=%s",
                  show_root_type(__entry->root_objectid),
                  (unsigned long long)__entry->offset,
                  (unsigned long long)__entry->size,
@@ -771,8 +779,8 @@ TRACE_EVENT(btrfs_cow_block,
                __entry->cow_level      = btrfs_header_level(cow);
        ),
 
-       TP_printk_btrfs("root = %llu(%s), refs = %d, orig_buf = %llu "
-                 "(orig_level = %d), cow_buf = %llu (cow_level = %d)",
+       TP_printk_btrfs("root=%llu(%s) refs=%d orig_buf=%llu "
+                 "(orig_level=%d) cow_buf=%llu (cow_level=%d)",
                  show_root_type(__entry->root_objectid),
                  __entry->refs,
                  (unsigned long long)__entry->buf_start,
@@ -836,7 +844,7 @@ TRACE_EVENT(btrfs_trigger_flush,
                __assign_str(reason, reason)
        ),
 
-       TP_printk("%pU: %s: flush = %d(%s), flags = %llu(%s), bytes = %llu",
+       TP_printk("%pU: %s: flush=%d(%s) flags=%llu(%s) bytes=%llu",
                  __entry->fsid, __get_str(reason), __entry->flush,
                  show_flush_action(__entry->flush),
                  (unsigned long long)__entry->flags,
@@ -879,8 +887,8 @@ TRACE_EVENT(btrfs_flush_space,
                __entry->ret            =       ret;
        ),
 
-       TP_printk("%pU: state = %d(%s), flags = %llu(%s), num_bytes = %llu, "
-                 "orig_bytes = %llu, ret = %d", __entry->fsid, __entry->state,
+       TP_printk("%pU: state=%d(%s) flags=%llu(%s) num_bytes=%llu "
+                 "orig_bytes=%llu ret=%d", __entry->fsid, __entry->state,
                  show_flush_state(__entry->state),
                  (unsigned long long)__entry->flags,
                  __print_flags((unsigned long)__entry->flags, "|",
@@ -905,7 +913,7 @@ DECLARE_EVENT_CLASS(btrfs__reserved_extent,
                __entry->len            = len;
        ),
 
-       TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu",
+       TP_printk_btrfs("root=%llu(%s) start=%llu len=%llu",
                  show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
                  (unsigned long long)__entry->start,
                  (unsigned long long)__entry->len)
@@ -944,7 +952,7 @@ TRACE_EVENT(find_free_extent,
                __entry->data           = data;
        ),
 
-       TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, flags = %Lu(%s)",
+       TP_printk_btrfs("root=%Lu(%s) len=%Lu empty_size=%Lu flags=%Lu(%s)",
                  show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
                  __entry->num_bytes, __entry->empty_size, __entry->data,
                  __print_flags((unsigned long)__entry->data, "|",
@@ -973,8 +981,8 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
                __entry->len            = len;
        ),
 
-       TP_printk_btrfs("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
-                 "start = %Lu, len = %Lu",
+       TP_printk_btrfs("root=%Lu(%s) block_group=%Lu flags=%Lu(%s) "
+                 "start=%Lu len=%Lu",
                  show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
                  __entry->bg_objectid,
                  __entry->flags, __print_flags((unsigned long)__entry->flags,
@@ -1025,8 +1033,8 @@ TRACE_EVENT(btrfs_find_cluster,
                __entry->min_bytes      = min_bytes;
        ),
 
-       TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu,"
-                 " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid,
+       TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) start=%Lu len=%Lu "
+                 "empty_size=%Lu min_bytes=%Lu", __entry->bg_objectid,
                  __entry->flags,
                  __print_flags((unsigned long)__entry->flags, "|",
                                BTRFS_GROUP_FLAGS), __entry->start,
@@ -1047,7 +1055,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
                __entry->bg_objectid    = block_group->key.objectid;
        ),
 
-       TP_printk_btrfs("block_group = %Lu", __entry->bg_objectid)
+       TP_printk_btrfs("block_group=%Lu", __entry->bg_objectid)
 );
 
 TRACE_EVENT(btrfs_setup_cluster,
@@ -1075,8 +1083,8 @@ TRACE_EVENT(btrfs_setup_cluster,
                __entry->bitmap         = bitmap;
        ),
 
-       TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, "
-                 "size = %Lu, max_size = %Lu, bitmap = %d",
+       TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) window_start=%Lu "
+                 "size=%Lu max_size=%Lu bitmap=%d",
                  __entry->bg_objectid,
                  __entry->flags,
                  __print_flags((unsigned long)__entry->flags, "|",
@@ -1103,7 +1111,7 @@ TRACE_EVENT(alloc_extent_state,
                __entry->ip     = IP
        ),
 
-       TP_printk("state=%p; mask = %s; caller = %pS", __entry->state,
+       TP_printk("state=%p mask=%s caller=%pS", __entry->state,
                  show_gfp_flags(__entry->mask), (void *)__entry->ip)
 );
 
@@ -1123,7 +1131,7 @@ TRACE_EVENT(free_extent_state,
                __entry->ip = IP
        ),
 
-       TP_printk(" state=%p; caller = %pS", __entry->state,
+       TP_printk("state=%p caller=%pS", __entry->state,
                  (void *)__entry->ip)
 );
 
@@ -1151,28 +1159,32 @@ DECLARE_EVENT_CLASS(btrfs__work,
                __entry->normal_work    = &work->normal_work;
        ),
 
-       TP_printk_btrfs("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p,"
-                 " ordered_free=%p",
+       TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%pf ordered_func=%p "
+                 "ordered_free=%p",
                  __entry->work, __entry->normal_work, __entry->wq,
                   __entry->func, __entry->ordered_func, __entry->ordered_free)
 );
 
-/* For situiations that the work is freed */
+/*
+ * For situiations when the work is freed, we pass fs_info and a tag that that
+ * matches address of the work structure so it can be paired with the
+ * scheduling event.
+ */
 DECLARE_EVENT_CLASS(btrfs__work__done,
 
-       TP_PROTO(struct btrfs_work *work),
+       TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
 
-       TP_ARGS(work),
+       TP_ARGS(fs_info, wtag),
 
        TP_STRUCT__entry_btrfs(
-               __field(        void *, work                    )
+               __field(        void *, wtag                    )
        ),
 
-       TP_fast_assign_btrfs(btrfs_work_owner(work),
-               __entry->work           = work;
+       TP_fast_assign_btrfs(fs_info,
+               __entry->wtag           = wtag;
        ),
 
-       TP_printk_btrfs("work->%p", __entry->work)
+       TP_printk_btrfs("work->%p", __entry->wtag)
 );
 
 DEFINE_EVENT(btrfs__work, btrfs_work_queued,
@@ -1191,9 +1203,9 @@ DEFINE_EVENT(btrfs__work, btrfs_work_sched,
 
 DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
 
-       TP_PROTO(struct btrfs_work *work),
+       TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
 
-       TP_ARGS(work)
+       TP_ARGS(fs_info, wtag)
 );
 
 DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
@@ -1221,7 +1233,7 @@ DECLARE_EVENT_CLASS(btrfs__workqueue,
                __entry->high           = high;
        ),
 
-       TP_printk_btrfs("name=%s%s, wq=%p", __get_str(name),
+       TP_printk_btrfs("name=%s%s wq=%p", __get_str(name),
                  __print_flags(__entry->high, "",
                                {(WQ_HIGHPRI),  "-high"}),
                  __entry->wq)
@@ -1276,7 +1288,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_data_map,
                __entry->free_reserved  =       free_reserved;
        ),
 
-       TP_printk_btrfs("rootid=%llu, ino=%lu, free_reserved=%llu",
+       TP_printk_btrfs("rootid=%llu ino=%lu free_reserved=%llu",
                  __entry->rootid, __entry->ino, __entry->free_reserved)
 );
 
@@ -1323,7 +1335,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
                __entry->op             = op;
        ),
 
-       TP_printk_btrfs("root=%llu, ino=%lu, start=%llu, len=%llu, reserved=%llu, op=%s",
+       TP_printk_btrfs("root=%llu ino=%lu start=%llu len=%llu reserved=%llu op=%s",
                  __entry->rootid, __entry->ino, __entry->start, __entry->len,
                  __entry->reserved,
                  __print_flags((unsigned long)__entry->op, "",
@@ -1361,7 +1373,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_delayed_ref,
                __entry->reserved       = reserved;
        ),
 
-       TP_printk_btrfs("root=%llu, reserved=%llu, op=free",
+       TP_printk_btrfs("root=%llu reserved=%llu op=free",
                  __entry->ref_root, __entry->reserved)
 );
 
@@ -1388,7 +1400,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
                __entry->num_bytes      = rec->num_bytes;
        ),
 
-       TP_printk_btrfs("bytenr = %llu, num_bytes = %llu",
+       TP_printk_btrfs("bytenr=%llu num_bytes=%llu",
                  (unsigned long long)__entry->bytenr,
                  (unsigned long long)__entry->num_bytes)
 );
@@ -1430,8 +1442,8 @@ TRACE_EVENT(btrfs_qgroup_account_extent,
                __entry->nr_new_roots   = nr_new_roots;
        ),
 
-       TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, "
-                 "nr_new_roots = %llu",
+       TP_printk_btrfs("bytenr=%llu num_bytes=%llu nr_old_roots=%llu "
+                 "nr_new_roots=%llu",
                  __entry->bytenr,
                  __entry->num_bytes,
                  __entry->nr_old_roots,
@@ -1457,7 +1469,7 @@ TRACE_EVENT(qgroup_update_counters,
                __entry->cur_new_count  = cur_new_count;
        ),
 
-       TP_printk_btrfs("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu",
+       TP_printk_btrfs("qgid=%llu cur_old_count=%llu cur_new_count=%llu",
                  __entry->qgid,
                  __entry->cur_old_count,
                  __entry->cur_new_count)
index 9e687ca9a307b1344889186e46dd5e62a85f614f..15bf875d0e4a666d91c959bd10b62318b62d61d5 100644 (file)
@@ -47,8 +47,7 @@
        {(unsigned long)__GFP_WRITE,            "__GFP_WRITE"},         \
        {(unsigned long)__GFP_RECLAIM,          "__GFP_RECLAIM"},       \
        {(unsigned long)__GFP_DIRECT_RECLAIM,   "__GFP_DIRECT_RECLAIM"},\
-       {(unsigned long)__GFP_KSWAPD_RECLAIM,   "__GFP_KSWAPD_RECLAIM"},\
-       {(unsigned long)__GFP_OTHER_NODE,       "__GFP_OTHER_NODE"}     \
+       {(unsigned long)__GFP_KSWAPD_RECLAIM,   "__GFP_KSWAPD_RECLAIM"}\
 
 #define show_gfp_flags(flags)                                          \
        (flags) ? __print_flags(flags, "|",                             \
index 9355dd8eff3ba39401dfe37e7fbf7737f0397f11..c97addd08f8c9545314e6d3519dc16aeb6f917d5 100644 (file)
@@ -9,6 +9,7 @@ header-y += i810_drm.h
 header-y += i915_drm.h
 header-y += mga_drm.h
 header-y += nouveau_drm.h
+header-y += omap_drm.h
 header-y += qxl_drm.h
 header-y += r128_drm.h
 header-y += radeon_drm.h
index da32c2f6c3f9a7389471eaa46e0a87c93d4e55aa..57093b455db6895c453b445f8962b3a8049ed06b 100644 (file)
@@ -395,6 +395,7 @@ typedef struct drm_i915_irq_wait {
  * priorities and the driver will attempt to execute batches in priority order.
  */
 #define I915_PARAM_HAS_SCHEDULER        41
+#define I915_PARAM_HUC_STATUS           42
 
 typedef struct drm_i915_getparam {
        __s32 param;
index a8b93e68523941ca5615cbbe0da87f42d779db6d..f330ba4547cfd4c59da3d8f309e4ab76d8bd8b40 100644 (file)
@@ -414,6 +414,7 @@ header-y += telephony.h
 header-y += termios.h
 header-y += thermal.h
 header-y += time.h
+header-y += timerfd.h
 header-y += times.h
 header-y += timex.h
 header-y += tiocl.h
index 6b76e3b0c18eac57491268b5ce395050914cef99..bea982af9cfb80a3081f50af90145a3b9758185c 100644 (file)
@@ -1772,7 +1772,9 @@ enum nl80211_commands {
  *
  * @NL80211_ATTR_OPMODE_NOTIF: Operating mode field from Operating Mode
  *     Notification Element based on association request when used with
- *     %NL80211_CMD_NEW_STATION; u8 attribute.
+ *     %NL80211_CMD_NEW_STATION or %NL80211_CMD_SET_STATION (only when
+ *     %NL80211_FEATURE_FULL_AP_CLIENT_STATE is supported, or with TDLS);
+ *     u8 attribute.
  *
  * @NL80211_ATTR_VENDOR_ID: The vendor ID, either a 24-bit OUI or, if
  *     %NL80211_VENDOR_ID_IS_LINUX is set, a special Linux ID (not used yet)
index cb4bcdc5854372e514865c30315e65ae30080527..a4dcd88ec2718621c2cf8e801566e5264c75c071 100644 (file)
@@ -397,7 +397,7 @@ enum {
        TCA_BPF_NAME,
        TCA_BPF_FLAGS,
        TCA_BPF_FLAGS_GEN,
-       TCA_BPF_DIGEST,
+       TCA_BPF_TAG,
        __TCA_BPF_MAX,
 };
 
index a6b88a6f7f712b271f07e809ee7a79697e6cb8cf..975b50dc8d1d466bb60c6279de7ebde1d05bd54f 100644 (file)
@@ -27,7 +27,7 @@ enum {
        TCA_ACT_BPF_FD,
        TCA_ACT_BPF_NAME,
        TCA_ACT_BPF_PAD,
-       TCA_ACT_BPF_DIGEST,
+       TCA_ACT_BPF_TAG,
        __TCA_ACT_BPF_MAX,
 };
 #define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
diff --git a/include/uapi/linux/timerfd.h b/include/uapi/linux/timerfd.h
new file mode 100644 (file)
index 0000000..6fcfaa8
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ *  include/linux/timerfd.h
+ *
+ *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#ifndef _UAPI_LINUX_TIMERFD_H
+#define _UAPI_LINUX_TIMERFD_H
+
+#include <linux/types.h>
+
+/* For O_CLOEXEC and O_NONBLOCK */
+#include <linux/fcntl.h>
+
+/* For _IO helpers */
+#include <linux/ioctl.h>
+
+/*
+ * CAREFUL: Check include/asm-generic/fcntl.h when defining
+ * new flags, since they might collide with O_* ones. We want
+ * to re-use O_* flags that couldn't possibly have a meaning
+ * from eventfd, in order to leave a free define-space for
+ * shared O_* flags.
+ *
+ * Also make sure to update the masks in include/linux/timerfd.h
+ * when adding new flags.
+ */
+#define TFD_TIMER_ABSTIME (1 << 0)
+#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
+#define TFD_CLOEXEC O_CLOEXEC
+#define TFD_NONBLOCK O_NONBLOCK
+
+#define TFD_IOC_SET_TICKS      _IOW('T', 0, __u64)
+
+#endif /* _UAPI_LINUX_TIMERFD_H */
index 223b734abccdc3b7f3baae457261dd66862fcd1d..e1a937348a3ed2bb3a76820e1ffa6a542f6aa9fb 100644 (file)
@@ -1176,6 +1176,10 @@ config CGROUP_DEBUG
 
          Say N.
 
+config SOCK_CGROUP_DATA
+       bool
+       default n
+
 endif # CGROUPS
 
 config CHECKPOINT_RESTORE
index e08b948519223e62aca265284483ee7b1ef1420c..3ec5742b5640f5d265ea8f0d9b8f986cf25b8f1c 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1977,7 +1977,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
                }
 
                rcu_read_lock();
-               sem_lock(sma, sops, nsops);
+               locknum = sem_lock(sma, sops, nsops);
 
                if (!ipc_valid_object(&sma->sem_perm))
                        goto out_unlock_free;
index a2ac051c342f87b1372a7aeb33ba816327ee1a43..229a5d5df9770fc66774bf5defea359873946d01 100644 (file)
@@ -56,7 +56,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
            attr->value_size == 0 || attr->map_flags)
                return ERR_PTR(-EINVAL);
 
-       if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1))
+       if (attr->value_size > KMALLOC_MAX_SIZE)
                /* if value_size is bigger, the user space won't be able to
                 * access the elements.
                 */
index 1eb4f1303756164f2893d964198eecd40b0bcd62..503d4211988afe1d3eddd6d39aba520dc4c245ef 100644 (file)
@@ -146,10 +146,11 @@ void __bpf_prog_free(struct bpf_prog *fp)
        vfree(fp);
 }
 
-int bpf_prog_calc_digest(struct bpf_prog *fp)
+int bpf_prog_calc_tag(struct bpf_prog *fp)
 {
        const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
-       u32 raw_size = bpf_prog_digest_scratch_size(fp);
+       u32 raw_size = bpf_prog_tag_scratch_size(fp);
+       u32 digest[SHA_DIGEST_WORDS];
        u32 ws[SHA_WORKSPACE_WORDS];
        u32 i, bsize, psize, blocks;
        struct bpf_insn *dst;
@@ -162,7 +163,7 @@ int bpf_prog_calc_digest(struct bpf_prog *fp)
        if (!raw)
                return -ENOMEM;
 
-       sha_init(fp->digest);
+       sha_init(digest);
        memset(ws, 0, sizeof(ws));
 
        /* We need to take out the map fd for the digest calculation
@@ -204,13 +205,14 @@ int bpf_prog_calc_digest(struct bpf_prog *fp)
        *bits = cpu_to_be64((psize - 1) << 3);
 
        while (blocks--) {
-               sha_transform(fp->digest, todo, ws);
+               sha_transform(digest, todo, ws);
                todo += SHA_MESSAGE_BYTES;
        }
 
-       result = (__force __be32 *)fp->digest;
+       result = (__force __be32 *)digest;
        for (i = 0; i < SHA_DIGEST_WORDS; i++)
-               result[i] = cpu_to_be32(fp->digest[i]);
+               result[i] = cpu_to_be32(digest[i]);
+       memcpy(fp->tag, result, sizeof(fp->tag));
 
        vfree(raw);
        return 0;
index 34debc1a9641875382b6603820f3e85c4a18dffc..3f2bb58952d8dfa4a2e082f9e6f9d277e19f0577 100644 (file)
@@ -274,7 +274,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
                 */
                goto free_htab;
 
-       if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) -
+       if (htab->map.value_size >= KMALLOC_MAX_SIZE -
            MAX_BPF_STACK - sizeof(struct htab_elem))
                /* if value_size is bigger, the user space won't be able to
                 * access the elements via bpf syscall. This check also makes
index e89acea22ecfc9a625de1924b2e4f997d88d158b..1d6b29e4e2c35ec14cec0a0d68bb7b636102a395 100644 (file)
@@ -688,17 +688,17 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
 {
        const struct bpf_prog *prog = filp->private_data;
-       char prog_digest[sizeof(prog->digest) * 2 + 1] = { };
+       char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
 
-       bin2hex(prog_digest, prog->digest, sizeof(prog->digest));
+       bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
        seq_printf(m,
                   "prog_type:\t%u\n"
                   "prog_jited:\t%u\n"
-                  "prog_digest:\t%s\n"
+                  "prog_tag:\t%s\n"
                   "memlock:\t%llu\n",
                   prog->type,
                   prog->jited,
-                  prog_digest,
+                  prog_tag,
                   prog->pages * 1ULL << PAGE_SHIFT);
 }
 #endif
index 83ed2f8f6f228b1ae90f66d89e436b71a5c6a27c..cdc43b899f281ebd01dd8cf8110e6fb966c2a1fa 100644 (file)
@@ -2936,7 +2936,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
        int insn_cnt = env->prog->len;
        int i, j, err;
 
-       err = bpf_prog_calc_digest(env->prog);
+       err = bpf_prog_calc_tag(env->prog);
        if (err)
                return err;
 
index a98e814f216f9c54f878bcfca32e4747be357c07..f97fe77ceb88aa39ff3bd01f497586f778c052a2 100644 (file)
@@ -318,6 +318,7 @@ bool has_capability(struct task_struct *t, int cap)
 {
        return has_ns_capability(t, &init_user_ns, cap);
 }
+EXPORT_SYMBOL(has_capability);
 
 /**
  * has_ns_capability_noaudit - Does a task have a capability (unaudited)
index f75c4d031eeb2152c75182466dbf5ec97e93c008..0a5f630f5c5430c231b2ba8ccb7d671bca09014e 100644 (file)
@@ -764,7 +764,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
        int prev_state, ret = 0;
-       bool hasdied = false;
 
        if (num_online_cpus() == 1)
                return -EBUSY;
@@ -809,7 +808,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
                cpuhp_kick_ap_work(cpu);
        }
 
-       hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
 out:
        cpu_hotplug_done();
        return ret;
@@ -1302,10 +1300,24 @@ static int cpuhp_cb_check(enum cpuhp_state state)
  */
 static int cpuhp_reserve_state(enum cpuhp_state state)
 {
-       enum cpuhp_state i;
+       enum cpuhp_state i, end;
+       struct cpuhp_step *step;
 
-       for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
-               if (!cpuhp_ap_states[i].name)
+       switch (state) {
+       case CPUHP_AP_ONLINE_DYN:
+               step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
+               end = CPUHP_AP_ONLINE_DYN_END;
+               break;
+       case CPUHP_BP_PREPARE_DYN:
+               step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
+               end = CPUHP_BP_PREPARE_DYN_END;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       for (i = state; i <= end; i++, step++) {
+               if (!step->name)
                        return i;
        }
        WARN(1, "No more dynamic states available for CPU hotplug\n");
@@ -1323,7 +1335,7 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
 
        mutex_lock(&cpuhp_state_mutex);
 
-       if (state == CPUHP_AP_ONLINE_DYN) {
+       if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
                ret = cpuhp_reserve_state(state);
                if (ret < 0)
                        goto out;
index ab15509fab8c0659c3f422036f5649718a9e4437..110b38a58493ee4ba4c19763d2678dae8815e1af 100644 (file)
@@ -2249,7 +2249,7 @@ static int  __perf_install_in_context(void *info)
        struct perf_event_context *ctx = event->ctx;
        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
        struct perf_event_context *task_ctx = cpuctx->task_ctx;
-       bool activate = true;
+       bool reprogram = true;
        int ret = 0;
 
        raw_spin_lock(&cpuctx->ctx.lock);
@@ -2257,27 +2257,26 @@ static int  __perf_install_in_context(void *info)
                raw_spin_lock(&ctx->lock);
                task_ctx = ctx;
 
-               /* If we're on the wrong CPU, try again */
-               if (task_cpu(ctx->task) != smp_processor_id()) {
-                       ret = -ESRCH;
-                       goto unlock;
-               }
+               reprogram = (ctx->task == current);
 
                /*
-                * If we're on the right CPU, see if the task we target is
-                * current, if not we don't have to activate the ctx, a future
-                * context switch will do that for us.
+                * If the task is running, it must be running on this CPU,
+                * otherwise we cannot reprogram things.
+                *
+                * If its not running, we don't care, ctx->lock will
+                * serialize against it becoming runnable.
                 */
-               if (ctx->task != current)
-                       activate = false;
-               else
-                       WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
+               if (task_curr(ctx->task) && !reprogram) {
+                       ret = -ESRCH;
+                       goto unlock;
+               }
 
+               WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
        } else if (task_ctx) {
                raw_spin_lock(&task_ctx->lock);
        }
 
-       if (activate) {
+       if (reprogram) {
                ctx_sched_out(ctx, cpuctx, EVENT_TIME);
                add_event_to_ctx(event, ctx);
                ctx_resched(cpuctx, task_ctx);
@@ -2328,13 +2327,36 @@ perf_install_in_context(struct perf_event_context *ctx,
        /*
         * Installing events is tricky because we cannot rely on ctx->is_active
         * to be set in case this is the nr_events 0 -> 1 transition.
+        *
+        * Instead we use task_curr(), which tells us if the task is running.
+        * However, since we use task_curr() outside of rq::lock, we can race
+        * against the actual state. This means the result can be wrong.
+        *
+        * If we get a false positive, we retry, this is harmless.
+        *
+        * If we get a false negative, things are complicated. If we are after
+        * perf_event_context_sched_in() ctx::lock will serialize us, and the
+        * value must be correct. If we're before, it doesn't matter since
+        * perf_event_context_sched_in() will program the counter.
+        *
+        * However, this hinges on the remote context switch having observed
+        * our task->perf_event_ctxp[] store, such that it will in fact take
+        * ctx::lock in perf_event_context_sched_in().
+        *
+        * We do this by task_function_call(), if the IPI fails to hit the task
+        * we know any future context switch of task must see the
+        * perf_event_ctpx[] store.
         */
-again:
+
        /*
-        * Cannot use task_function_call() because we need to run on the task's
-        * CPU regardless of whether its current or not.
+        * This smp_mb() orders the task->perf_event_ctxp[] store with the
+        * task_cpu() load, such that if the IPI then does not find the task
+        * running, a future context switch of that task must observe the
+        * store.
         */
-       if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
+       smp_mb();
+again:
+       if (!task_function_call(task, __perf_install_in_context, event))
                return;
 
        raw_spin_lock_irq(&ctx->lock);
@@ -2348,12 +2370,16 @@ again:
                raw_spin_unlock_irq(&ctx->lock);
                return;
        }
-       raw_spin_unlock_irq(&ctx->lock);
        /*
-        * Since !ctx->is_active doesn't mean anything, we must IPI
-        * unconditionally.
+        * If the task is not running, ctx->lock will avoid it becoming so,
+        * thus we can safely install the event.
         */
-       goto again;
+       if (task_curr(task)) {
+               raw_spin_unlock_irq(&ctx->lock);
+               goto again;
+       }
+       add_event_to_ctx(event, ctx);
+       raw_spin_unlock_irq(&ctx->lock);
 }
 
 /*
@@ -7034,25 +7060,12 @@ static void perf_log_itrace_start(struct perf_event *event)
        perf_output_end(&handle);
 }
 
-/*
- * Generic event overflow handling, sampling.
- */
-
-static int __perf_event_overflow(struct perf_event *event,
-                                  int throttle, struct perf_sample_data *data,
-                                  struct pt_regs *regs)
+static int
+__perf_event_account_interrupt(struct perf_event *event, int throttle)
 {
-       int events = atomic_read(&event->event_limit);
        struct hw_perf_event *hwc = &event->hw;
-       u64 seq;
        int ret = 0;
-
-       /*
-        * Non-sampling counters might still use the PMI to fold short
-        * hardware counters, ignore those.
-        */
-       if (unlikely(!is_sampling_event(event)))
-               return 0;
+       u64 seq;
 
        seq = __this_cpu_read(perf_throttled_seq);
        if (seq != hwc->interrupts_seq) {
@@ -7080,6 +7093,34 @@ static int __perf_event_overflow(struct perf_event *event,
                        perf_adjust_period(event, delta, hwc->last_period, true);
        }
 
+       return ret;
+}
+
+int perf_event_account_interrupt(struct perf_event *event)
+{
+       return __perf_event_account_interrupt(event, 1);
+}
+
+/*
+ * Generic event overflow handling, sampling.
+ */
+
+static int __perf_event_overflow(struct perf_event *event,
+                                  int throttle, struct perf_sample_data *data,
+                                  struct pt_regs *regs)
+{
+       int events = atomic_read(&event->event_limit);
+       int ret = 0;
+
+       /*
+        * Non-sampling counters might still use the PMI to fold short
+        * hardware counters, ignore those.
+        */
+       if (unlikely(!is_sampling_event(event)))
+               return 0;
+
+       ret = __perf_event_account_interrupt(event, throttle);
+
        /*
         * XXX event_limit might not quite work as expected on inherited
         * events
@@ -9503,6 +9544,37 @@ static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
        return 0;
 }
 
+/*
+ * Variation on perf_event_ctx_lock_nested(), except we take two context
+ * mutexes.
+ */
+static struct perf_event_context *
+__perf_event_ctx_lock_double(struct perf_event *group_leader,
+                            struct perf_event_context *ctx)
+{
+       struct perf_event_context *gctx;
+
+again:
+       rcu_read_lock();
+       gctx = READ_ONCE(group_leader->ctx);
+       if (!atomic_inc_not_zero(&gctx->refcount)) {
+               rcu_read_unlock();
+               goto again;
+       }
+       rcu_read_unlock();
+
+       mutex_lock_double(&gctx->mutex, &ctx->mutex);
+
+       if (group_leader->ctx != gctx) {
+               mutex_unlock(&ctx->mutex);
+               mutex_unlock(&gctx->mutex);
+               put_ctx(gctx);
+               goto again;
+       }
+
+       return gctx;
+}
+
 /**
  * sys_perf_event_open - open a performance event, associate it to a task/cpu
  *
@@ -9746,12 +9818,31 @@ SYSCALL_DEFINE5(perf_event_open,
        }
 
        if (move_group) {
-               gctx = group_leader->ctx;
-               mutex_lock_double(&gctx->mutex, &ctx->mutex);
+               gctx = __perf_event_ctx_lock_double(group_leader, ctx);
+
                if (gctx->task == TASK_TOMBSTONE) {
                        err = -ESRCH;
                        goto err_locked;
                }
+
+               /*
+                * Check if we raced against another sys_perf_event_open() call
+                * moving the software group underneath us.
+                */
+               if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
+                       /*
+                        * If someone moved the group out from under us, check
+                        * if this new event wound up on the same ctx, if so
+                        * its the regular !move_group case, otherwise fail.
+                        */
+                       if (gctx != ctx) {
+                               err = -EINVAL;
+                               goto err_locked;
+                       } else {
+                               perf_event_ctx_unlock(group_leader, gctx);
+                               move_group = 0;
+                       }
+               }
        } else {
                mutex_lock(&ctx->mutex);
        }
@@ -9853,7 +9944,7 @@ SYSCALL_DEFINE5(perf_event_open,
        perf_unpin_context(ctx);
 
        if (move_group)
-               mutex_unlock(&gctx->mutex);
+               perf_event_ctx_unlock(group_leader, gctx);
        mutex_unlock(&ctx->mutex);
 
        if (task) {
@@ -9879,7 +9970,7 @@ SYSCALL_DEFINE5(perf_event_open,
 
 err_locked:
        if (move_group)
-               mutex_unlock(&gctx->mutex);
+               perf_event_ctx_unlock(group_leader, gctx);
        mutex_unlock(&ctx->mutex);
 /* err_file: */
        fput(event_file);
index 93ad6c1fb9b6212e706eb3ae08f7b881192008ec..a9b8cf50059151c17f63d35cf4c622ae8b72f131 100644 (file)
@@ -182,6 +182,13 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key)
 }
 EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
 
+void static_key_deferred_flush(struct static_key_deferred *key)
+{
+       STATIC_KEY_CHECK_USE();
+       flush_delayed_work(&key->work);
+}
+EXPORT_SYMBOL_GPL(static_key_deferred_flush);
+
 void jump_label_rate_limit(struct static_key_deferred *key,
                unsigned long rl)
 {
index b501e390bb34403c11d6ae09c31ea9fcb769ea8e..9ecedc28b928debb6a5988a5db8b76833133d9e4 100644 (file)
@@ -246,7 +246,9 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
        /* pages are dead and unused, undo the arch mapping */
        align_start = res->start & ~(SECTION_SIZE - 1);
        align_size = ALIGN(resource_size(res), SECTION_SIZE);
+       mem_hotplug_begin();
        arch_remove_memory(align_start, align_size);
+       mem_hotplug_done();
        untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
        pgmap_radix_release(res);
        dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
@@ -358,7 +360,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
        if (error)
                goto err_pfn_remap;
 
+       mem_hotplug_begin();
        error = arch_add_memory(nid, align_start, align_size, true);
+       mem_hotplug_done();
        if (error)
                goto err_add_memory;
 
index 5088784c0cf9e97c166b7dc06afed9d1c7709d2e..38d4270925d4d13619d725052aa3f9844f23bc96 100644 (file)
@@ -1145,7 +1145,7 @@ static size_t module_flags_taint(struct module *mod, char *buf)
 
        for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
                if (taint_flags[i].module && test_bit(i, &mod->taints))
-                       buf[l++] = taint_flags[i].true;
+                       buf[l++] = taint_flags[i].c_true;
        }
 
        return l;
index c51edaa04fce389bfcf5a62e59fe7a76bf853c6f..08aa88dde7de806d4cb2b14fd93e87be8dd94501 100644 (file)
@@ -249,7 +249,7 @@ void panic(const char *fmt, ...)
                 * Delay timeout seconds before rebooting the machine.
                 * We can't use the "normal" timers since we just panicked.
                 */
-               pr_emerg("Rebooting in %d seconds..", panic_timeout);
+               pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
 
                for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
                        touch_nmi_watchdog();
@@ -355,7 +355,7 @@ const char *print_tainted(void)
                for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
                        const struct taint_flag *t = &taint_flags[i];
                        *s++ = test_bit(i, &tainted_mask) ?
-                                       t->true : t->false;
+                                       t->c_true : t->c_false;
                }
                *s = 0;
        } else
index df9e8e9e0be7714fa580b547492c940b11221008..eef2ce9686366a72e9dadaa9056bcda8295ae93d 100644 (file)
@@ -151,8 +151,12 @@ out:
 
 static void delayed_free_pidns(struct rcu_head *p)
 {
-       kmem_cache_free(pid_ns_cachep,
-                       container_of(p, struct pid_namespace, rcu));
+       struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu);
+
+       dec_pid_namespaces(ns->ucounts);
+       put_user_ns(ns->user_ns);
+
+       kmem_cache_free(pid_ns_cachep, ns);
 }
 
 static void destroy_pid_namespace(struct pid_namespace *ns)
@@ -162,8 +166,6 @@ static void destroy_pid_namespace(struct pid_namespace *ns)
        ns_free_inum(&ns->ns);
        for (i = 0; i < PIDMAP_ENTRIES; i++)
                kfree(ns->pidmap[i].page);
-       dec_pid_namespaces(ns->ucounts);
-       put_user_ns(ns->user_ns);
        call_rcu(&ns->rcu, delayed_free_pidns);
 }
 
index 80adef7d4c3d01d9ef9ed95c483956d2a858854f..0d6ff3e471be6c1597e0e78fb90d07eb0ce9c546 100644 (file)
@@ -136,6 +136,7 @@ int rcu_jiffies_till_stall_check(void);
 #define TPS(x)  tracepoint_string(x)
 
 void rcu_early_boot_tests(void);
+void rcu_test_sync_prims(void);
 
 /*
  * This function really isn't for public consumption, but RCU is special in
index 1898559e6b60ddc52884f6977fca21e57c6f1f90..b23a4d076f3d2c64862172c83c18f21605e87159 100644 (file)
@@ -185,9 +185,6 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
  * benefits of doing might_sleep() to reduce latency.)
  *
  * Cool, huh?  (Due to Josh Triplett.)
- *
- * But we want to make this a static inline later.  The cond_resched()
- * currently makes this problematic.
  */
 void synchronize_sched(void)
 {
@@ -195,7 +192,6 @@ void synchronize_sched(void)
                         lock_is_held(&rcu_lock_map) ||
                         lock_is_held(&rcu_sched_lock_map),
                         "Illegal synchronize_sched() in RCU read-side critical section");
-       cond_resched();
 }
 EXPORT_SYMBOL_GPL(synchronize_sched);
 
index 196f0302e2f4320ebd25dedc31d0ea8a4ab026c1..c64b827ecbca19656395e873ca06da0c92a6298e 100644 (file)
@@ -60,12 +60,17 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 
 /*
  * During boot, we forgive RCU lockdep issues.  After this function is
- * invoked, we start taking RCU lockdep issues seriously.
+ * invoked, we start taking RCU lockdep issues seriously.  Note that unlike
+ * Tree RCU, Tiny RCU transitions directly from RCU_SCHEDULER_INACTIVE
+ * to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage.
+ * The reason for this is that Tiny RCU does not need kthreads, so does
+ * not have to care about the fact that the scheduler is half-initialized
+ * at a certain phase of the boot process.
  */
 void __init rcu_scheduler_starting(void)
 {
        WARN_ON(nr_context_switches() > 0);
-       rcu_scheduler_active = 1;
+       rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
 }
 
 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
index 96c52e43f7cac0e5d6b41004c0c72d269c351f4a..cb4e2056ccf3cf799bb7c045aca346fedb2ed698 100644 (file)
@@ -127,13 +127,16 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
 int sysctl_panic_on_rcu_stall __read_mostly;
 
 /*
- * The rcu_scheduler_active variable transitions from zero to one just
- * before the first task is spawned.  So when this variable is zero, RCU
- * can assume that there is but one task, allowing RCU to (for example)
+ * The rcu_scheduler_active variable is initialized to the value
+ * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
+ * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
+ * RCU can assume that there is but one task, allowing RCU to (for example)
  * optimize synchronize_rcu() to a simple barrier().  When this variable
- * is one, RCU must actually do all the hard work required to detect real
- * grace periods.  This variable is also used to suppress boot-time false
- * positives from lockdep-RCU error checking.
+ * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
+ * to detect real grace periods.  This variable is also used to suppress
+ * boot-time false positives from lockdep-RCU error checking.  Finally, it
+ * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
+ * is fully initialized, including all of its kthreads having been spawned.
  */
 int rcu_scheduler_active __read_mostly;
 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
@@ -3980,18 +3983,22 @@ static int __init rcu_spawn_gp_kthread(void)
 early_initcall(rcu_spawn_gp_kthread);
 
 /*
- * This function is invoked towards the end of the scheduler's initialization
- * process.  Before this is called, the idle task might contain
- * RCU read-side critical sections (during which time, this idle
- * task is booting the system).  After this function is called, the
- * idle tasks are prohibited from containing RCU read-side critical
- * sections.  This function also enables RCU lockdep checking.
+ * This function is invoked towards the end of the scheduler's
+ * initialization process.  Before this is called, the idle task might
+ * contain synchronous grace-period primitives (during which time, this idle
+ * task is booting the system, and such primitives are no-ops).  After this
+ * function is called, any synchronous grace-period primitives are run as
+ * expedited, with the requesting task driving the grace period forward.
+ * A later core_initcall() rcu_exp_runtime_mode() will switch to full
+ * runtime RCU functionality.
  */
 void rcu_scheduler_starting(void)
 {
        WARN_ON(num_online_cpus() != 1);
        WARN_ON(nr_context_switches() > 0);
-       rcu_scheduler_active = 1;
+       rcu_test_sync_prims();
+       rcu_scheduler_active = RCU_SCHEDULER_INIT;
+       rcu_test_sync_prims();
 }
 
 /*
index d3053e99fdb67deb01a35a9af998a66658d0ee22..e59e1849b89aca14797999deb3e9e91bdd9b78c2 100644 (file)
@@ -531,6 +531,20 @@ struct rcu_exp_work {
        struct work_struct rew_work;
 };
 
+/*
+ * Common code to drive an expedited grace period forward, used by
+ * workqueues and mid-boot-time tasks.
+ */
+static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
+                                 smp_call_func_t func, unsigned long s)
+{
+       /* Initialize the rcu_node tree in preparation for the wait. */
+       sync_rcu_exp_select_cpus(rsp, func);
+
+       /* Wait and clean up, including waking everyone. */
+       rcu_exp_wait_wake(rsp, s);
+}
+
 /*
  * Work-queue handler to drive an expedited grace period forward.
  */
@@ -538,12 +552,8 @@ static void wait_rcu_exp_gp(struct work_struct *wp)
 {
        struct rcu_exp_work *rewp;
 
-       /* Initialize the rcu_node tree in preparation for the wait. */
        rewp = container_of(wp, struct rcu_exp_work, rew_work);
-       sync_rcu_exp_select_cpus(rewp->rew_rsp, rewp->rew_func);
-
-       /* Wait and clean up, including waking everyone. */
-       rcu_exp_wait_wake(rewp->rew_rsp, rewp->rew_s);
+       rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
 }
 
 /*
@@ -569,12 +579,18 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
        if (exp_funnel_lock(rsp, s))
                return;  /* Someone else did our work for us. */
 
-       /* Marshall arguments and schedule the expedited grace period. */
-       rew.rew_func = func;
-       rew.rew_rsp = rsp;
-       rew.rew_s = s;
-       INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
-       schedule_work(&rew.rew_work);
+       /* Ensure that load happens before action based on it. */
+       if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
+               /* Direct call during scheduler init and early_initcalls(). */
+               rcu_exp_sel_wait_wake(rsp, func, s);
+       } else {
+               /* Marshall arguments & schedule the expedited grace period. */
+               rew.rew_func = func;
+               rew.rew_rsp = rsp;
+               rew.rew_s = s;
+               INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
+               schedule_work(&rew.rew_work);
+       }
 
        /* Wait for expedited grace period to complete. */
        rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
@@ -676,6 +692,8 @@ void synchronize_rcu_expedited(void)
 {
        struct rcu_state *rsp = rcu_state_p;
 
+       if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
+               return;
        _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
@@ -693,3 +711,15 @@ void synchronize_rcu_expedited(void)
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+/*
+ * Switch to run-time mode once Tree RCU has fully initialized.
+ */
+static int __init rcu_exp_runtime_mode(void)
+{
+       rcu_test_sync_prims();
+       rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
+       rcu_test_sync_prims();
+       return 0;
+}
+core_initcall(rcu_exp_runtime_mode);
index 85c5a883c6e31047194a8c74603ce71ab8381f67..56583e764ebf398a7b14f442f63ce6f707f046e4 100644 (file)
@@ -670,7 +670,7 @@ void synchronize_rcu(void)
                         lock_is_held(&rcu_lock_map) ||
                         lock_is_held(&rcu_sched_lock_map),
                         "Illegal synchronize_rcu() in RCU read-side critical section");
-       if (!rcu_scheduler_active)
+       if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
                return;
        if (rcu_gp_is_expedited())
                synchronize_rcu_expedited();
index f19271dce0a9784709d7bc7860a2de972e67a070..4f6db7e6a1179ee00c99f62d854a39b00c959d2b 100644 (file)
@@ -121,11 +121,14 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held);
  * Should expedited grace-period primitives always fall back to their
  * non-expedited counterparts?  Intended for use within RCU.  Note
  * that if the user specifies both rcu_expedited and rcu_normal, then
- * rcu_normal wins.
+ * rcu_normal wins.  (Except during the time period during boot from
+ * when the first task is spawned until the rcu_exp_runtime_mode()
+ * core_initcall() is invoked, at which point everything is expedited.)
  */
 bool rcu_gp_is_normal(void)
 {
-       return READ_ONCE(rcu_normal);
+       return READ_ONCE(rcu_normal) &&
+              rcu_scheduler_active != RCU_SCHEDULER_INIT;
 }
 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
 
@@ -135,13 +138,14 @@ static atomic_t rcu_expedited_nesting =
 /*
  * Should normal grace-period primitives be expedited?  Intended for
  * use within RCU.  Note that this function takes the rcu_expedited
- * sysfs/boot variable into account as well as the rcu_expedite_gp()
- * nesting.  So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited()
- * returns false is a -really- bad idea.
+ * sysfs/boot variable and rcu_scheduler_active into account as well
+ * as the rcu_expedite_gp() nesting.  So looping on rcu_unexpedite_gp()
+ * until rcu_gp_is_expedited() returns false is a -really- bad idea.
  */
 bool rcu_gp_is_expedited(void)
 {
-       return rcu_expedited || atomic_read(&rcu_expedited_nesting);
+       return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
+              rcu_scheduler_active == RCU_SCHEDULER_INIT;
 }
 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
 
@@ -257,7 +261,7 @@ EXPORT_SYMBOL_GPL(rcu_callback_map);
 
 int notrace debug_lockdep_rcu_enabled(void)
 {
-       return rcu_scheduler_active && debug_locks &&
+       return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
               current->lockdep_recursion == 0;
 }
 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
@@ -591,7 +595,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
 void synchronize_rcu_tasks(void)
 {
        /* Complain if the scheduler has not started.  */
-       RCU_LOCKDEP_WARN(!rcu_scheduler_active,
+       RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
                         "synchronize_rcu_tasks called too soon");
 
        /* Wait for the grace period. */
@@ -813,6 +817,23 @@ static void rcu_spawn_tasks_kthread(void)
 
 #endif /* #ifdef CONFIG_TASKS_RCU */
 
+/*
+ * Test each non-SRCU synchronous grace-period wait API.  This is
+ * useful just after a change in mode for these primitives, and
+ * during early boot.
+ */
+void rcu_test_sync_prims(void)
+{
+       if (!IS_ENABLED(CONFIG_PROVE_RCU))
+               return;
+       synchronize_rcu();
+       synchronize_rcu_bh();
+       synchronize_sched();
+       synchronize_rcu_expedited();
+       synchronize_rcu_bh_expedited();
+       synchronize_sched_expedited();
+}
+
 #ifdef CONFIG_PROVE_RCU
 
 /*
@@ -865,6 +886,7 @@ void rcu_early_boot_tests(void)
                early_boot_test_call_rcu_bh();
        if (rcu_self_test_sched)
                early_boot_test_call_rcu_sched();
+       rcu_test_sync_prims();
 }
 
 static int rcu_verify_early_boot_tests(void)
index ff046b73ff2d309ce00fbd3bd8949a8f6f5fd297..3603d93a19689be7188a004f2b999b27e0ebdf2f 100644 (file)
@@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task)
         * fresh group stop.  Read comment in do_signal_stop() for details.
         */
        if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
-               sig->flags = SIGNAL_STOP_STOPPED;
+               signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
                return true;
        }
        return false;
@@ -843,7 +843,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
                         * will take ->siglock, notice SIGNAL_CLD_MASK, and
                         * notify its parent. See get_signal_to_deliver().
                         */
-                       signal->flags = why | SIGNAL_STOP_CONTINUED;
+                       signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
                        signal->group_stop_count = 0;
                        signal->group_exit_code = 0;
                }
index 8dbaec0e4f7f079b87f50ea67c82341304387783..1aea594a54dbdac604ca950fdaf93508e5b6e6a7 100644 (file)
@@ -2475,6 +2475,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
                                break;
                        if (neg)
                                continue;
+                       val = convmul * val / convdiv;
                        if ((min && val < *min) || (max && val > *max))
                                continue;
                        *i = val;
index 2c115fdab39765f4ecbeaade9fa74008e711d08a..74e0388cc88d4d17b340d102ec8e053257d6ef55 100644 (file)
@@ -767,7 +767,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
        tick = expires;
 
        /* Skip reprogram of event if its not changed */
-       if (ts->tick_stopped && (expires == dev->next_event))
+       if (ts->tick_stopped && (expires == ts->next_tick))
                goto out;
 
        /*
@@ -787,6 +787,8 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
                trace_tick_stop(1, TICK_DEP_MASK_NONE);
        }
 
+       ts->next_tick = tick;
+
        /*
         * If the expiration time == KTIME_MAX, then we simply stop
         * the tick timer.
@@ -802,7 +804,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
        else
                tick_program_event(tick, 1);
 out:
-       /* Update the estimated sleep length */
+       /*
+        * Update the estimated sleep length until the next timer
+        * (not only the tick).
+        */
        ts->sleep_length = ktime_sub(dev->next_event, now);
        return tick;
 }
index bf38226e5c17c15e276c2e4e8c5f4fe423e071f3..075444e3d48e643549ba5c7fbcc3c792fb90e1db 100644 (file)
@@ -27,6 +27,7 @@ enum tick_nohz_mode {
  *                     timer is modified for nohz sleeps. This is necessary
  *                     to resume the tick timer operation in the timeline
  *                     when the CPU returns from nohz sleep.
+ * @next_tick:         Next tick to be fired when in dynticks mode.
  * @tick_stopped:      Indicator that the idle tick has been stopped
  * @idle_jiffies:      jiffies at the entry to idle for idle time accounting
  * @idle_calls:                Total number of idle calls
@@ -44,6 +45,7 @@ struct tick_sched {
        unsigned long                   check_clocks;
        enum tick_nohz_mode             nohz_mode;
        ktime_t                         last_tick;
+       ktime_t                         next_tick;
        int                             inidle;
        int                             tick_stopped;
        unsigned long                   idle_jiffies;
index 9d20d5dd298af25d0cd95635e217180601703959..4bbd38ec37886d3d104e3d37dc80d101ab3767ac 100644 (file)
@@ -128,10 +128,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
        struct hlist_head *hashent = ucounts_hashentry(ns, uid);
        struct ucounts *ucounts, *new;
 
-       spin_lock(&ucounts_lock);
+       spin_lock_irq(&ucounts_lock);
        ucounts = find_ucounts(ns, uid, hashent);
        if (!ucounts) {
-               spin_unlock(&ucounts_lock);
+               spin_unlock_irq(&ucounts_lock);
 
                new = kzalloc(sizeof(*new), GFP_KERNEL);
                if (!new)
@@ -141,7 +141,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
                new->uid = uid;
                atomic_set(&new->count, 0);
 
-               spin_lock(&ucounts_lock);
+               spin_lock_irq(&ucounts_lock);
                ucounts = find_ucounts(ns, uid, hashent);
                if (ucounts) {
                        kfree(new);
@@ -152,16 +152,18 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
        }
        if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
                ucounts = NULL;
-       spin_unlock(&ucounts_lock);
+       spin_unlock_irq(&ucounts_lock);
        return ucounts;
 }
 
 static void put_ucounts(struct ucounts *ucounts)
 {
+       unsigned long flags;
+
        if (atomic_dec_and_test(&ucounts->count)) {
-               spin_lock(&ucounts_lock);
+               spin_lock_irqsave(&ucounts_lock, flags);
                hlist_del_init(&ucounts->node);
-               spin_unlock(&ucounts_lock);
+               spin_unlock_irqrestore(&ucounts_lock, flags);
 
                kfree(ucounts);
        }
index d4b0fa01cae39cd720661d7a62f50a7926f9db69..63177be0159e9493f6d6ade90efae743aaf117b7 100644 (file)
@@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
 #define for_each_watchdog_cpu(cpu) \
        for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
 
+atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
+
 /*
  * The 'watchdog_running' variable is set to 1 when the watchdog threads
  * are registered/started and is set to 0 when the watchdog threads are
@@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        int duration;
        int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
 
+       if (atomic_read(&watchdog_park_in_progress) != 0)
+               return HRTIMER_NORESTART;
+
        /* kick the hardlockup detector */
        watchdog_interrupt_count();
 
@@ -467,12 +472,16 @@ static int watchdog_park_threads(void)
 {
        int cpu, ret = 0;
 
+       atomic_set(&watchdog_park_in_progress, 1);
+
        for_each_watchdog_cpu(cpu) {
                ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
                if (ret)
                        break;
        }
 
+       atomic_set(&watchdog_park_in_progress, 0);
+
        return ret;
 }
 
index 84016c8aee6b5d2769495a8c6eee0b4ac559b1a6..12b8dd64078655dd9004d03caa8167da16b57cf5 100644 (file)
@@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
        /* Ensure the watchdog never gets throttled */
        event->hw.interrupts = 0;
 
+       if (atomic_read(&watchdog_park_in_progress) != 0)
+               return;
+
        if (__this_cpu_read(watchdog_nmi_touch) == true) {
                __this_cpu_write(watchdog_nmi_touch, false);
                return;
index b06848a104e6940e128e63a5d3c8c7fb39ffaf8b..eb9e9a7870fa7bdb0f373f858037c1026880c85f 100644 (file)
@@ -164,7 +164,7 @@ config DEBUG_INFO_REDUCED
 
 config DEBUG_INFO_SPLIT
        bool "Produce split debuginfo in .dwo files"
-       depends on DEBUG_INFO
+       depends on DEBUG_INFO && !FRV
        help
          Generate debug info into separate .dwo files. This significantly
          reduces the build directory size for builds with DEBUG_INFO,
index 86c8911b0e3a6fff02b9e52faa11816cfe508362..a3e14ce92a5684a662c2c8f80f97e6fef95943b7 100644 (file)
@@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr,
 
        return err;
 }
-EXPORT_SYMBOL_GPL(ioremap_page_range);
index 25f57230380104f419257ea43c1cd3d2e31d7e65..e68604ae3cedf41ce98bc06de2142629fa115cbd 100644 (file)
@@ -730,43 +730,50 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
 }
 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
 
+static inline void pipe_truncate(struct iov_iter *i)
+{
+       struct pipe_inode_info *pipe = i->pipe;
+       if (pipe->nrbufs) {
+               size_t off = i->iov_offset;
+               int idx = i->idx;
+               int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
+               if (off) {
+                       pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
+                       idx = next_idx(idx, pipe);
+                       nrbufs++;
+               }
+               while (pipe->nrbufs > nrbufs) {
+                       pipe_buf_release(pipe, &pipe->bufs[idx]);
+                       idx = next_idx(idx, pipe);
+                       pipe->nrbufs--;
+               }
+       }
+}
+
 static void pipe_advance(struct iov_iter *i, size_t size)
 {
        struct pipe_inode_info *pipe = i->pipe;
-       struct pipe_buffer *buf;
-       int idx = i->idx;
-       size_t off = i->iov_offset, orig_sz;
-       
        if (unlikely(i->count < size))
                size = i->count;
-       orig_sz = size;
-
        if (size) {
+               struct pipe_buffer *buf;
+               size_t off = i->iov_offset, left = size;
+               int idx = i->idx;
                if (off) /* make it relative to the beginning of buffer */
-                       size += off - pipe->bufs[idx].offset;
+                       left += off - pipe->bufs[idx].offset;
                while (1) {
                        buf = &pipe->bufs[idx];
-                       if (size <= buf->len)
+                       if (left <= buf->len)
                                break;
-                       size -= buf->len;
+                       left -= buf->len;
                        idx = next_idx(idx, pipe);
                }
-               buf->len = size;
                i->idx = idx;
-               off = i->iov_offset = buf->offset + size;
-       }
-       if (off)
-               idx = next_idx(idx, pipe);
-       if (pipe->nrbufs) {
-               int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
-               /* [curbuf,unused) is in use.  Free [idx,unused) */
-               while (idx != unused) {
-                       pipe_buf_release(pipe, &pipe->bufs[idx]);
-                       idx = next_idx(idx, pipe);
-                       pipe->nrbufs--;
-               }
+               i->iov_offset = buf->offset + left;
        }
-       i->count -= orig_sz;
+       i->count -= size;
+       /* ... and discard everything past that point */
+       pipe_truncate(i);
 }
 
 void iov_iter_advance(struct iov_iter *i, size_t size)
@@ -826,6 +833,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
                        size_t count)
 {
        BUG_ON(direction != ITER_PIPE);
+       WARN_ON(pipe->nrbufs == pipe->buffers);
        i->type = direction;
        i->pipe = pipe;
        i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
index 0b92d605fb69cc805a96c8333dab36174f755e22..84812a9fb16fbbd1409315ea3752fb9a1e3e39ef 100644 (file)
@@ -769,7 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
                        struct radix_tree_node *old = child;
                        offset = child->offset + 1;
                        child = child->parent;
-                       WARN_ON_ONCE(!list_empty(&node->private_list));
+                       WARN_ON_ONCE(!list_empty(&old->private_list));
                        radix_tree_node_free(old);
                        if (old == entry_to_node(node))
                                return;
index 975b8fc4f1e1143dcdd295731cf1fc18cf0561fe..a8d74a733a38b54a912f5e292f0a15a2cfff4a95 100644 (file)
@@ -483,11 +483,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
                    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
 
        /*
-        * For mappings greater than a page, we limit the stride (and
-        * hence alignment) to a page size.
+        * For mappings greater than or equal to a page, we limit the stride
+        * (and hence alignment) to a page size.
         */
        nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
-       if (size > PAGE_SIZE)
+       if (size >= PAGE_SIZE)
                stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
        else
                stride = 1;
index d0e4d1002059360e50254ae2c87dc8f7a87a2dff..b772a33ef640ab0d6770bb3d249a6fe6f16eeebc 100644 (file)
@@ -138,7 +138,7 @@ static int page_cache_tree_insert(struct address_space *mapping,
                                dax_radix_locked_entry(0, RADIX_DAX_EMPTY));
                        /* Wakeup waiters for exceptional entry lock */
                        dax_wake_mapping_entry_waiter(mapping, page->index, p,
-                                                     false);
+                                                     true);
                }
        }
        __radix_tree_replace(&mapping->page_tree, node, slot, page,
index 10eedbf14421f29675d18e80569bafc9efd60763..5f3ad65c85de01fa6e4c8a07ef9494410bf2b133 100644 (file)
@@ -783,6 +783,12 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
+       /*
+        * When we COW a devmap PMD entry, we split it into PTEs, so we should
+        * not be in this function with `flags & FOLL_COW` set.
+        */
+       WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
+
        if (flags & FOLL_WRITE && !pmd_write(*pmd))
                return NULL;
 
@@ -883,15 +889,17 @@ void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
 {
        pmd_t entry;
        unsigned long haddr;
+       bool write = vmf->flags & FAULT_FLAG_WRITE;
 
        vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
        if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
                goto unlock;
 
        entry = pmd_mkyoung(orig_pmd);
+       if (write)
+               entry = pmd_mkdirty(entry);
        haddr = vmf->address & HPAGE_PMD_MASK;
-       if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry,
-                               vmf->flags & FAULT_FLAG_WRITE))
+       if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
                update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
 
 unlock:
@@ -919,8 +927,7 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
        }
 
        for (i = 0; i < HPAGE_PMD_NR; i++) {
-               pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
-                                              __GFP_OTHER_NODE, vma,
+               pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
                                               vmf->address, page_to_nid(page));
                if (unlikely(!pages[i] ||
                             mem_cgroup_try_charge(pages[i], vma->vm_mm,
@@ -1127,6 +1134,16 @@ out_unlock:
        return ret;
 }
 
+/*
+ * FOLL_FORCE can write to even unwritable pmd's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+{
+       return pmd_write(pmd) ||
+              ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+}
+
 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                                   unsigned long addr,
                                   pmd_t *pmd,
@@ -1137,7 +1154,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
-       if (flags & FOLL_WRITE && !pmd_write(*pmd))
+       if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
                goto out;
 
        /* Avoid dumping huge zero page */
index 3edb759c5c7d15dfeacb4eb18e4a901602a0ee23..c7025c132670a4d8e3279d1ebb7730718fb6aa8a 100644 (file)
@@ -1773,23 +1773,32 @@ free:
 }
 
 /*
- * When releasing a hugetlb pool reservation, any surplus pages that were
- * allocated to satisfy the reservation must be explicitly freed if they were
- * never used.
- * Called with hugetlb_lock held.
+ * This routine has two main purposes:
+ * 1) Decrement the reservation count (resv_huge_pages) by the value passed
+ *    in unused_resv_pages.  This corresponds to the prior adjustments made
+ *    to the associated reservation map.
+ * 2) Free any unused surplus pages that may have been allocated to satisfy
+ *    the reservation.  As many as unused_resv_pages may be freed.
+ *
+ * Called with hugetlb_lock held.  However, the lock could be dropped (and
+ * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
+ * we must make sure nobody else can claim pages we are in the process of
+ * freeing.  Do this by ensuring resv_huge_page always is greater than the
+ * number of huge pages we plan to free when dropping the lock.
  */
 static void return_unused_surplus_pages(struct hstate *h,
                                        unsigned long unused_resv_pages)
 {
        unsigned long nr_pages;
 
-       /* Uncommit the reservation */
-       h->resv_huge_pages -= unused_resv_pages;
-
        /* Cannot return gigantic pages currently */
        if (hstate_is_gigantic(h))
-               return;
+               goto out;
 
+       /*
+        * Part (or even all) of the reservation could have been backed
+        * by pre-allocated pages. Only free surplus pages.
+        */
        nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
 
        /*
@@ -1799,12 +1808,22 @@ static void return_unused_surplus_pages(struct hstate *h,
         * when the nodes with surplus pages have no free pages.
         * free_pool_huge_page() will balance the the freed pages across the
         * on-line nodes with memory and will handle the hstate accounting.
+        *
+        * Note that we decrement resv_huge_pages as we free the pages.  If
+        * we drop the lock, resv_huge_pages will still be sufficiently large
+        * to cover subsequent pages we may free.
         */
        while (nr_pages--) {
+               h->resv_huge_pages--;
+               unused_resv_pages--;
                if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
-                       break;
+                       goto out;
                cond_resched_lock(&hugetlb_lock);
        }
+
+out:
+       /* Fully uncommit the reservation */
+       h->resv_huge_pages -= unused_resv_pages;
 }
 
 
index e32389a970305eaf60b36da896e2226a8ce18a0d..77ae3239c3de17bfbf7ba29b56a5cb270611cfd8 100644 (file)
@@ -943,7 +943,7 @@ static void collapse_huge_page(struct mm_struct *mm,
        VM_BUG_ON(address & ~HPAGE_PMD_MASK);
 
        /* Only allocate from the target node */
-       gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE;
+       gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
 
        /*
         * Before allocating the hugepage, release the mmap_sem read lock.
@@ -1242,7 +1242,6 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
        struct vm_area_struct *vma;
        unsigned long addr;
        pmd_t *pmd, _pmd;
-       bool deposited = false;
 
        i_mmap_lock_write(mapping);
        vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
@@ -1267,26 +1266,10 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
                        spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
                        /* assume page table is clear */
                        _pmd = pmdp_collapse_flush(vma, addr, pmd);
-                       /*
-                        * now deposit the pgtable for arch that need it
-                        * otherwise free it.
-                        */
-                       if (arch_needs_pgtable_deposit()) {
-                               /*
-                                * The deposit should be visibile only after
-                                * collapse is seen by others.
-                                */
-                               smp_wmb();
-                               pgtable_trans_huge_deposit(vma->vm_mm, pmd,
-                                                          pmd_pgtable(_pmd));
-                               deposited = true;
-                       }
                        spin_unlock(ptl);
                        up_write(&vma->vm_mm->mmap_sem);
-                       if (!deposited) {
-                               atomic_long_dec(&vma->vm_mm->nr_ptes);
-                               pte_free(vma->vm_mm, pmd_pgtable(_pmd));
-                       }
+                       atomic_long_dec(&vma->vm_mm->nr_ptes);
+                       pte_free(vma->vm_mm, pmd_pgtable(_pmd));
                }
        }
        i_mmap_unlock_write(mapping);
@@ -1326,8 +1309,7 @@ static void collapse_shmem(struct mm_struct *mm,
        VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
 
        /* Only allocate from the target node */
-       gfp = alloc_hugepage_khugepaged_gfpmask() |
-               __GFP_OTHER_NODE | __GFP_THISNODE;
+       gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
 
        new_page = khugepaged_alloc_page(hpage, gfp, node);
        if (!new_page) {
index 4048897e7b01a6e5e82333c0852629eded927ff3..b822e158b319e8f2f02ecbfe76c31b6466be51f1 100644 (file)
@@ -625,8 +625,8 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
 unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
                                           int nid, unsigned int lru_mask)
 {
+       struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
        unsigned long nr = 0;
-       struct mem_cgroup_per_node *mz;
        enum lru_list lru;
 
        VM_BUG_ON((unsigned)nid >= nr_node_ids);
@@ -634,8 +634,7 @@ unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
        for_each_lru(lru) {
                if (!(BIT(lru) & lru_mask))
                        continue;
-               mz = mem_cgroup_nodeinfo(memcg, nid);
-               nr += mz->lru_size[lru];
+               nr += mem_cgroup_get_lru_size(lruvec, lru);
        }
        return nr;
 }
@@ -1002,6 +1001,7 @@ out:
  * mem_cgroup_update_lru_size - account for adding or removing an lru page
  * @lruvec: mem_cgroup per zone lru vector
  * @lru: index of lru list the page is sitting on
+ * @zid: zone id of the accounted pages
  * @nr_pages: positive when adding or negative when removing
  *
  * This function must be called under lru_lock, just before a page is added
@@ -1009,27 +1009,25 @@ out:
  * so as to allow it to check that lru_size 0 is consistent with list_empty).
  */
 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
-                               int nr_pages)
+                               int zid, int nr_pages)
 {
        struct mem_cgroup_per_node *mz;
        unsigned long *lru_size;
        long size;
-       bool empty;
 
        if (mem_cgroup_disabled())
                return;
 
        mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
-       lru_size = mz->lru_size + lru;
-       empty = list_empty(lruvec->lists + lru);
+       lru_size = &mz->lru_zone_size[zid][lru];
 
        if (nr_pages < 0)
                *lru_size += nr_pages;
 
        size = *lru_size;
-       if (WARN_ONCE(size < 0 || empty != !size,
-               "%s(%p, %d, %d): lru_size %ld but %sempty\n",
-               __func__, lruvec, lru, nr_pages, size, empty ? "" : "not ")) {
+       if (WARN_ONCE(size < 0,
+               "%s(%p, %d, %d): lru_size %ld\n",
+               __func__, lruvec, lru, nr_pages, size)) {
                VM_BUG_ON(1);
                *lru_size = 0;
        }
@@ -4355,9 +4353,9 @@ static int mem_cgroup_do_precharge(unsigned long count)
                return ret;
        }
 
-       /* Try charges one by one with reclaim */
+       /* Try charges one by one with reclaim, but do not retry */
        while (count--) {
-               ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
+               ret = try_charge(mc.to, GFP_KERNEL __GFP_NORETRY, 1);
                if (ret)
                        return ret;
                mc.precharge++;
index 9f2c15cdb32c6327c0d2e8992c5c269ba573f698..6bf2b471e30ca566a55160e4131bf7e7b9c3c4ea 100644 (file)
@@ -3772,8 +3772,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 }
 #endif /* __PAGETABLE_PMD_FOLDED */
 
-static int __follow_pte(struct mm_struct *mm, unsigned long address,
-               pte_t **ptepp, spinlock_t **ptlp)
+static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+               pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -3790,11 +3790,20 @@ static int __follow_pte(struct mm_struct *mm, unsigned long address,
 
        pmd = pmd_offset(pud, address);
        VM_BUG_ON(pmd_trans_huge(*pmd));
-       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
-               goto out;
 
-       /* We cannot handle huge page PFN maps. Luckily they don't exist. */
-       if (pmd_huge(*pmd))
+       if (pmd_huge(*pmd)) {
+               if (!pmdpp)
+                       goto out;
+
+               *ptlp = pmd_lock(mm, pmd);
+               if (pmd_huge(*pmd)) {
+                       *pmdpp = pmd;
+                       return 0;
+               }
+               spin_unlock(*ptlp);
+       }
+
+       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
                goto out;
 
        ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
@@ -3810,16 +3819,30 @@ out:
        return -EINVAL;
 }
 
-int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
-              spinlock_t **ptlp)
+static inline int follow_pte(struct mm_struct *mm, unsigned long address,
+                            pte_t **ptepp, spinlock_t **ptlp)
+{
+       int res;
+
+       /* (void) is needed to make gcc happy */
+       (void) __cond_lock(*ptlp,
+                          !(res = __follow_pte_pmd(mm, address, ptepp, NULL,
+                                          ptlp)));
+       return res;
+}
+
+int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+                            pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
 {
        int res;
 
        /* (void) is needed to make gcc happy */
        (void) __cond_lock(*ptlp,
-                          !(res = __follow_pte(mm, address, ptepp, ptlp)));
+                          !(res = __follow_pte_pmd(mm, address, ptepp, pmdpp,
+                                          ptlp)));
        return res;
 }
+EXPORT_SYMBOL(follow_pte_pmd);
 
 /**
  * follow_pfn - look up PFN at a user virtual address
index e43142c15631fefdf5a605ced247a6429825252f..ca2723d4733849eab01b323a50e6b1bc609e308c 100644 (file)
@@ -1033,36 +1033,39 @@ static void node_states_set_node(int node, struct memory_notify *arg)
        node_set_state(node, N_MEMORY);
 }
 
-int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
-                  enum zone_type target)
+bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+                  enum zone_type target, int *zone_shift)
 {
        struct zone *zone = page_zone(pfn_to_page(pfn));
        enum zone_type idx = zone_idx(zone);
        int i;
 
+       *zone_shift = 0;
+
        if (idx < target) {
                /* pages must be at end of current zone */
                if (pfn + nr_pages != zone_end_pfn(zone))
-                       return 0;
+                       return false;
 
                /* no zones in use between current zone and target */
                for (i = idx + 1; i < target; i++)
                        if (zone_is_initialized(zone - idx + i))
-                               return 0;
+                               return false;
        }
 
        if (target < idx) {
                /* pages must be at beginning of current zone */
                if (pfn != zone->zone_start_pfn)
-                       return 0;
+                       return false;
 
                /* no zones in use between current zone and target */
                for (i = target + 1; i < idx; i++)
                        if (zone_is_initialized(zone - idx + i))
-                               return 0;
+                               return false;
        }
 
-       return target - idx;
+       *zone_shift = target - idx;
+       return true;
 }
 
 /* Must be protected by mem_hotplug_begin() */
@@ -1089,10 +1092,13 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
            !can_online_high_movable(zone))
                return -EINVAL;
 
-       if (online_type == MMOP_ONLINE_KERNEL)
-               zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL);
-       else if (online_type == MMOP_ONLINE_MOVABLE)
-               zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE);
+       if (online_type == MMOP_ONLINE_KERNEL) {
+               if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
+                       return -EINVAL;
+       } else if (online_type == MMOP_ONLINE_MOVABLE) {
+               if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
+                       return -EINVAL;
+       }
 
        zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
        if (!zone)
index 2e346645eb80d6bb8f97761c30aed6a512017e59..1e7873e40c9a16e922d4800e6dc41486eee23540 100644 (file)
@@ -2017,8 +2017,8 @@ retry_cpuset:
 
        nmask = policy_nodemask(gfp, pol);
        zl = policy_zonelist(gfp, pol, node);
-       mpol_cond_put(pol);
        page = __alloc_pages_nodemask(gfp, order, zl, nmask);
+       mpol_cond_put(pol);
 out:
        if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
                goto retry_cpuset;
index 2c6d5f64feca409e9fdca4b551a240882e421476..f3e0c69a97b76997d9fa65cda0b7e1b1fb8fa29a 100644 (file)
@@ -1864,14 +1864,14 @@ int move_freepages(struct zone *zone,
 #endif
 
        for (page = start_page; page <= end_page;) {
-               /* Make sure we are not inadvertently changing nodes */
-               VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
-
                if (!pfn_valid_within(page_to_pfn(page))) {
                        page++;
                        continue;
                }
 
+               /* Make sure we are not inadvertently changing nodes */
+               VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
+
                if (!PageBuddy(page)) {
                        page++;
                        continue;
@@ -2583,30 +2583,22 @@ int __isolate_free_page(struct page *page, unsigned int order)
  * Update NUMA hit/miss statistics
  *
  * Must be called with interrupts disabled.
- *
- * When __GFP_OTHER_NODE is set assume the node of the preferred
- * zone is the local node. This is useful for daemons who allocate
- * memory on behalf of other processes.
  */
-static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
-                                                               gfp_t flags)
+static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
 {
 #ifdef CONFIG_NUMA
-       int local_nid = numa_node_id();
        enum zone_stat_item local_stat = NUMA_LOCAL;
 
-       if (unlikely(flags & __GFP_OTHER_NODE)) {
+       if (z->node != numa_node_id())
                local_stat = NUMA_OTHER;
-               local_nid = preferred_zone->node;
-       }
 
-       if (z->node == local_nid) {
+       if (z->node == preferred_zone->node)
                __inc_zone_state(z, NUMA_HIT);
-               __inc_zone_state(z, local_stat);
-       } else {
+       else {
                __inc_zone_state(z, NUMA_MISS);
                __inc_zone_state(preferred_zone, NUMA_FOREIGN);
        }
+       __inc_zone_state(z, local_stat);
 #endif
 }
 
@@ -2674,7 +2666,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
        }
 
        __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
-       zone_statistics(preferred_zone, zone, gfp_flags);
+       zone_statistics(preferred_zone, zone);
        local_irq_restore(flags);
 
        VM_BUG_ON_PAGE(bad_range(zone, page), page);
@@ -3531,12 +3523,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        struct page *page = NULL;
        unsigned int alloc_flags;
        unsigned long did_some_progress;
-       enum compact_priority compact_priority = DEF_COMPACT_PRIORITY;
+       enum compact_priority compact_priority;
        enum compact_result compact_result;
-       int compaction_retries = 0;
-       int no_progress_loops = 0;
+       int compaction_retries;
+       int no_progress_loops;
        unsigned long alloc_start = jiffies;
        unsigned int stall_timeout = 10 * HZ;
+       unsigned int cpuset_mems_cookie;
 
        /*
         * In the slowpath, we sanity check order to avoid ever trying to
@@ -3557,6 +3550,23 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
                gfp_mask &= ~__GFP_ATOMIC;
 
+retry_cpuset:
+       compaction_retries = 0;
+       no_progress_loops = 0;
+       compact_priority = DEF_COMPACT_PRIORITY;
+       cpuset_mems_cookie = read_mems_allowed_begin();
+       /*
+        * We need to recalculate the starting point for the zonelist iterator
+        * because we might have used different nodemask in the fast path, or
+        * there was a cpuset modification and we are retrying - otherwise we
+        * could end up iterating over non-eligible zones endlessly.
+        */
+       ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+                                       ac->high_zoneidx, ac->nodemask);
+       if (!ac->preferred_zoneref->zone)
+               goto nopage;
+
+
        /*
         * The fast path uses conservative alloc_flags to succeed only until
         * kswapd needs to be woken up, and to avoid the cost of setting up
@@ -3716,6 +3726,13 @@ retry:
                                &compaction_retries))
                goto retry;
 
+       /*
+        * It's possible we raced with cpuset update so the OOM would be
+        * premature (see below the nopage: label for full explanation).
+        */
+       if (read_mems_allowed_retry(cpuset_mems_cookie))
+               goto retry_cpuset;
+
        /* Reclaim has failed us, start killing things */
        page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
        if (page)
@@ -3728,6 +3745,16 @@ retry:
        }
 
 nopage:
+       /*
+        * When updating a task's mems_allowed or mempolicy nodemask, it is
+        * possible to race with parallel threads in such a way that our
+        * allocation can fail while the mask is being updated. If we are about
+        * to fail, check if the cpuset changed during allocation and if so,
+        * retry.
+        */
+       if (read_mems_allowed_retry(cpuset_mems_cookie))
+               goto retry_cpuset;
+
        warn_alloc(gfp_mask,
                        "page allocation failure: order:%u", order);
 got_pg:
@@ -3742,7 +3769,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                        struct zonelist *zonelist, nodemask_t *nodemask)
 {
        struct page *page;
-       unsigned int cpuset_mems_cookie;
        unsigned int alloc_flags = ALLOC_WMARK_LOW;
        gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
        struct alloc_context ac = {
@@ -3779,9 +3805,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
                alloc_flags |= ALLOC_CMA;
 
-retry_cpuset:
-       cpuset_mems_cookie = read_mems_allowed_begin();
-
        /* Dirty zone balancing only done in the fast path */
        ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
 
@@ -3792,8 +3815,13 @@ retry_cpuset:
         */
        ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
                                        ac.high_zoneidx, ac.nodemask);
-       if (!ac.preferred_zoneref) {
+       if (!ac.preferred_zoneref->zone) {
                page = NULL;
+               /*
+                * This might be due to race with cpuset_current_mems_allowed
+                * update, so make sure we retry with original nodemask in the
+                * slow path.
+                */
                goto no_zone;
        }
 
@@ -3802,6 +3830,7 @@ retry_cpuset:
        if (likely(page))
                goto out;
 
+no_zone:
        /*
         * Runtime PM, block IO and its error handling path can deadlock
         * because I/O on the device might not complete.
@@ -3813,21 +3842,10 @@ retry_cpuset:
         * Restore the original nodemask if it was potentially replaced with
         * &cpuset_current_mems_allowed to optimize the fast-path attempt.
         */
-       if (cpusets_enabled())
+       if (unlikely(ac.nodemask != nodemask))
                ac.nodemask = nodemask;
-       page = __alloc_pages_slowpath(alloc_mask, order, &ac);
 
-no_zone:
-       /*
-        * When updating a task's mems_allowed, it is possible to race with
-        * parallel threads in such a way that an allocation can fail while
-        * the mask is being updated. If a page allocation is about to fail,
-        * check if the cpuset changed during allocation and if so, retry.
-        */
-       if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
-               alloc_mask = gfp_mask;
-               goto retry_cpuset;
-       }
+       page = __alloc_pages_slowpath(alloc_mask, order, &ac);
 
 out:
        if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
@@ -3904,8 +3922,8 @@ EXPORT_SYMBOL(free_pages);
  * drivers to provide a backing region of memory for use as either an
  * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
  */
-static struct page *__page_frag_refill(struct page_frag_cache *nc,
-                                      gfp_t gfp_mask)
+static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
+                                            gfp_t gfp_mask)
 {
        struct page *page = NULL;
        gfp_t gfp = gfp_mask;
@@ -3925,22 +3943,23 @@ static struct page *__page_frag_refill(struct page_frag_cache *nc,
        return page;
 }
 
-void __page_frag_drain(struct page *page, unsigned int order,
-                      unsigned int count)
+void __page_frag_cache_drain(struct page *page, unsigned int count)
 {
        VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
 
        if (page_ref_sub_and_test(page, count)) {
+               unsigned int order = compound_order(page);
+
                if (order == 0)
                        free_hot_cold_page(page, false);
                else
                        __free_pages_ok(page, order);
        }
 }
-EXPORT_SYMBOL(__page_frag_drain);
+EXPORT_SYMBOL(__page_frag_cache_drain);
 
-void *__alloc_page_frag(struct page_frag_cache *nc,
-                       unsigned int fragsz, gfp_t gfp_mask)
+void *page_frag_alloc(struct page_frag_cache *nc,
+                     unsigned int fragsz, gfp_t gfp_mask)
 {
        unsigned int size = PAGE_SIZE;
        struct page *page;
@@ -3948,7 +3967,7 @@ void *__alloc_page_frag(struct page_frag_cache *nc,
 
        if (unlikely(!nc->va)) {
 refill:
-               page = __page_frag_refill(nc, gfp_mask);
+               page = __page_frag_cache_refill(nc, gfp_mask);
                if (!page)
                        return NULL;
 
@@ -3991,19 +4010,19 @@ refill:
 
        return nc->va + offset;
 }
-EXPORT_SYMBOL(__alloc_page_frag);
+EXPORT_SYMBOL(page_frag_alloc);
 
 /*
  * Frees a page fragment allocated out of either a compound or order 0 page.
  */
-void __free_page_frag(void *addr)
+void page_frag_free(void *addr)
 {
        struct page *page = virt_to_head_page(addr);
 
        if (unlikely(put_page_testzero(page)))
                __free_pages_ok(page, compound_order(page));
 }
-EXPORT_SYMBOL(__free_page_frag);
+EXPORT_SYMBOL(page_frag_free);
 
 static void *make_alloc_exact(unsigned long addr, unsigned int order,
                size_t size)
@@ -7255,6 +7274,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
                .zone = page_zone(pfn_to_page(start)),
                .mode = MIGRATE_SYNC,
                .ignore_skip_hint = true,
+               .gfp_mask = GFP_KERNEL,
        };
        INIT_LIST_HEAD(&cc.migratepages);
 
index 29bc6c0dedd07020e9f433332f907620239343a5..4f2ec6bb46ebe949d2e19ff154faddc7e2526f02 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2457,7 +2457,6 @@ union freelist_init_state {
                unsigned int pos;
                unsigned int *list;
                unsigned int count;
-               unsigned int rand;
        };
        struct rnd_state rnd_state;
 };
@@ -2483,8 +2482,7 @@ static bool freelist_state_initialize(union freelist_init_state *state,
        } else {
                state->list = cachep->random_seq;
                state->count = count;
-               state->pos = 0;
-               state->rand = rand;
+               state->pos = rand % count;
                ret = true;
        }
        return ret;
@@ -2493,7 +2491,9 @@ static bool freelist_state_initialize(union freelist_init_state *state,
 /* Get the next entry on the list and randomize it using a random shift */
 static freelist_idx_t next_random_slot(union freelist_init_state *state)
 {
-       return (state->list[state->pos++] + state->rand) % state->count;
+       if (state->pos >= state->count)
+               state->pos = 0;
+       return state->list[state->pos++];
 }
 
 /* Swap two freelist entries */
index 067598a008493fabb68d48120a904943fff4e08c..7aa6f433f4de554d308e774d9e9b40507c6ab48a 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -496,10 +496,11 @@ static inline int check_valid_pointer(struct kmem_cache *s,
        return 1;
 }
 
-static void print_section(char *text, u8 *addr, unsigned int length)
+static void print_section(char *level, char *text, u8 *addr,
+                         unsigned int length)
 {
        metadata_access_enable();
-       print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
+       print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
                        length, 1);
        metadata_access_disable();
 }
@@ -636,14 +637,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
               p, p - addr, get_freepointer(s, p));
 
        if (s->flags & SLAB_RED_ZONE)
-               print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
+               print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+                             s->red_left_pad);
        else if (p > addr + 16)
-               print_section("Bytes b4 ", p - 16, 16);
+               print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 
-       print_section("Object ", p, min_t(unsigned long, s->object_size,
-                               PAGE_SIZE));
+       print_section(KERN_ERR, "Object ", p,
+                     min_t(unsigned long, s->object_size, PAGE_SIZE));
        if (s->flags & SLAB_RED_ZONE)
-               print_section("Redzone ", p + s->object_size,
+               print_section(KERN_ERR, "Redzone ", p + s->object_size,
                        s->inuse - s->object_size);
 
        if (s->offset)
@@ -658,7 +660,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 
        if (off != size_from_object(s))
                /* Beginning of the filler is the free pointer */
-               print_section("Padding ", p + off, size_from_object(s) - off);
+               print_section(KERN_ERR, "Padding ", p + off,
+                             size_from_object(s) - off);
 
        dump_stack();
 }
@@ -820,7 +823,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
                end--;
 
        slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
-       print_section("Padding ", end - remainder, remainder);
+       print_section(KERN_ERR, "Padding ", end - remainder, remainder);
 
        restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
        return 0;
@@ -973,7 +976,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
                        page->freelist);
 
                if (!alloc)
-                       print_section("Object ", (void *)object,
+                       print_section(KERN_INFO, "Object ", (void *)object,
                                        s->object_size);
 
                dump_stack();
index 1c6e0321205dd2d34abc7f39a0753c128eb7ae53..4761701d1721e63fb8334e3a63fcc11b9e5e6443 100644 (file)
@@ -943,11 +943,25 @@ bool reuse_swap_page(struct page *page, int *total_mapcount)
        count = page_trans_huge_mapcount(page, total_mapcount);
        if (count <= 1 && PageSwapCache(page)) {
                count += page_swapcount(page);
-               if (count == 1 && !PageWriteback(page)) {
+               if (count != 1)
+                       goto out;
+               if (!PageWriteback(page)) {
                        delete_from_swap_cache(page);
                        SetPageDirty(page);
+               } else {
+                       swp_entry_t entry;
+                       struct swap_info_struct *p;
+
+                       entry.val = page_private(page);
+                       p = swap_info_get(entry);
+                       if (p->flags & SWP_STABLE_WRITES) {
+                               spin_unlock(&p->lock);
+                               return false;
+                       }
+                       spin_unlock(&p->lock);
                }
        }
+out:
        return count <= 1;
 }
 
@@ -2448,6 +2462,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
                error = -ENOMEM;
                goto bad_swap;
        }
+
+       if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
+               p->flags |= SWP_STABLE_WRITES;
+
        if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
                int cpu;
 
index 6aa5b01d3e757b9b462993f3421050fd8831ceea..532a2a750952daffed4e4242d36a449eb9ba8837 100644 (file)
@@ -242,6 +242,16 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru)
        return node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
 }
 
+unsigned long lruvec_zone_lru_size(struct lruvec *lruvec, enum lru_list lru,
+                                  int zone_idx)
+{
+       if (!mem_cgroup_disabled())
+               return mem_cgroup_get_zone_lru_size(lruvec, lru, zone_idx);
+
+       return zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zone_idx],
+                              NR_ZONE_LRU_BASE + lru);
+}
+
 /*
  * Add a shrinker callback to be called from the vm.
  */
@@ -1382,8 +1392,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
  * be complete before mem_cgroup_update_lru_size due to a santity check.
  */
 static __always_inline void update_lru_sizes(struct lruvec *lruvec,
-                       enum lru_list lru, unsigned long *nr_zone_taken,
-                       unsigned long nr_taken)
+                       enum lru_list lru, unsigned long *nr_zone_taken)
 {
        int zid;
 
@@ -1392,11 +1401,11 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
                        continue;
 
                __update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
-       }
-
 #ifdef CONFIG_MEMCG
-       mem_cgroup_update_lru_size(lruvec, lru, -nr_taken);
+               mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
 #endif
+       }
+
 }
 
 /*
@@ -1501,7 +1510,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
        *nr_scanned = scan;
        trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan,
                                    nr_taken, mode, is_file_lru(lru));
-       update_lru_sizes(lruvec, lru, nr_zone_taken, nr_taken);
+       update_lru_sizes(lruvec, lru, nr_zone_taken);
        return nr_taken;
 }
 
@@ -2047,10 +2056,8 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
                if (!managed_zone(zone))
                        continue;
 
-               inactive_zone = zone_page_state(zone,
-                               NR_ZONE_LRU_BASE + (file * LRU_FILE));
-               active_zone = zone_page_state(zone,
-                               NR_ZONE_LRU_BASE + (file * LRU_FILE) + LRU_ACTIVE);
+               inactive_zone = lruvec_zone_lru_size(lruvec, file * LRU_FILE, zid);
+               active_zone = lruvec_zone_lru_size(lruvec, (file * LRU_FILE) + LRU_ACTIVE, zid);
 
                inactive -= min(inactive, inactive_zone);
                active -= min(active, active_zone);
index a1005007224ca04ee673fb948776107d6ba075c4..a29bb4b41c50e3c55463eaee2a5dda1292b757f6 100644 (file)
@@ -258,10 +258,6 @@ config XPS
 config HWBM
        bool
 
-config SOCK_CGROUP_DATA
-       bool
-       default n
-
 config CGROUP_NET_PRIO
        bool "Network priority cgroup"
        depends on CGROUPS
index 4855d18a8511072b499043d1160ad1ff4b2f9f3e..038b109b2be70e8da91589a1bf3c82e73a3903ca 100644 (file)
@@ -264,7 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
 {
        ax25_clear_queues(ax25);
 
-       if (!sock_flag(ax25->sk, SOCK_DESTROY))
+       if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
                ax25_stop_heartbeat(ax25);
        ax25_stop_t1timer(ax25);
        ax25_stop_t2timer(ax25);
index 8ca6a929bf1255cb432fd4bd59d34345d62e09c4..95087e6e8258366af95579bb308d1a6e18266f0e 100644 (file)
@@ -399,7 +399,7 @@ bridged_dnat:
                                br_nf_hook_thresh(NF_BR_PRE_ROUTING,
                                                  net, sk, skb, skb->dev,
                                                  NULL,
-                                                 br_nf_pre_routing_finish);
+                                                 br_nf_pre_routing_finish_bridge);
                                return 0;
                        }
                        ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
index 3949ce70be07bc90b1ee7e67ca95f09c1d5258f3..292e33bd916e650c0317ab630a0c60a400d21c7d 100644 (file)
@@ -214,7 +214,7 @@ static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
        SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
        struct sg_table sgt;
        struct scatterlist prealloc_sg;
-       char iv[AES_BLOCK_SIZE];
+       char iv[AES_BLOCK_SIZE] __aligned(8);
        int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
        int crypt_len = encrypt ? in_len + pad_byte : in_len;
        int ret;
index 8db5a0b4b52061afe9cd44dead33cc69523d15df..07b307b0b414730688b64fdb2295b0fa1b721e51 100644 (file)
@@ -4441,7 +4441,9 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
            pinfo->nr_frags &&
            !PageHighMem(skb_frag_page(frag0))) {
                NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
-               NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
+               NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
+                                                   skb_frag_size(frag0),
+                                                   skb->end - skb->tail);
        }
 }
 
index fe4e1531976c3a36127b6ad4af33f24534af4c52..1b7673aac59d51a5f8b5ef3f2076f1440c017fae 100644 (file)
@@ -67,8 +67,8 @@ EXPORT_SYMBOL(skb_flow_dissector_init);
  * The function will try to retrieve a be32 entity at
  * offset poff
  */
-__be16 skb_flow_get_be16(const struct sk_buff *skb, int poff, void *data,
-                        int hlen)
+static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
+                               void *data, int hlen)
 {
        __be16 *u, _u;
 
index 5a03730fbc1a84376985d4d68d8b80d8f38b0985..734c71468b013838516cfe8c744dcd0e797a6e2b 100644 (file)
@@ -369,7 +369,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 
        local_irq_save(flags);
        nc = this_cpu_ptr(&netdev_alloc_cache);
-       data = __alloc_page_frag(nc, fragsz, gfp_mask);
+       data = page_frag_alloc(nc, fragsz, gfp_mask);
        local_irq_restore(flags);
        return data;
 }
@@ -391,7 +391,7 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 {
        struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 
-       return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
+       return page_frag_alloc(&nc->page, fragsz, gfp_mask);
 }
 
 void *napi_alloc_frag(unsigned int fragsz)
@@ -441,7 +441,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
        local_irq_save(flags);
 
        nc = this_cpu_ptr(&netdev_alloc_cache);
-       data = __alloc_page_frag(nc, len, gfp_mask);
+       data = page_frag_alloc(nc, len, gfp_mask);
        pfmemalloc = nc->pfmemalloc;
 
        local_irq_restore(flags);
@@ -505,7 +505,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
        if (sk_memalloc_socks())
                gfp_mask |= __GFP_MEMALLOC;
 
-       data = __alloc_page_frag(&nc->page, len, gfp_mask);
+       data = page_frag_alloc(&nc->page, len, gfp_mask);
        if (unlikely(!data))
                return NULL;
 
index f560e0826009851e79a1f8a8b90f4ded3cfc382d..4eca27dc5c9478e36120a5128a7c11d6208b45a9 100644 (file)
@@ -222,7 +222,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
   "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN"     , "sk_lock-AF_PHONET"   ,
   "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG"      ,
   "sk_lock-AF_NFC"   , "sk_lock-AF_VSOCK"    , "sk_lock-AF_KCM"      ,
-  "sk_lock-AF_MAX"
+  "sk_lock-AF_QIPCRTR", "sk_lock-AF_MAX"
 };
 static const char *const af_family_slock_key_strings[AF_MAX+1] = {
   "slock-AF_UNSPEC", "slock-AF_UNIX"     , "slock-AF_INET"     ,
@@ -239,7 +239,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
   "slock-AF_RXRPC" , "slock-AF_ISDN"     , "slock-AF_PHONET"   ,
   "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG"      ,
   "slock-AF_NFC"   , "slock-AF_VSOCK"    ,"slock-AF_KCM"       ,
-  "slock-AF_MAX"
+  "slock-AF_QIPCRTR", "slock-AF_MAX"
 };
 static const char *const af_family_clock_key_strings[AF_MAX+1] = {
   "clock-AF_UNSPEC", "clock-AF_UNIX"     , "clock-AF_INET"     ,
@@ -256,7 +256,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
   "clock-AF_RXRPC" , "clock-AF_ISDN"     , "clock-AF_PHONET"   ,
   "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG"      ,
   "clock-AF_NFC"   , "clock-AF_VSOCK"    , "clock-AF_KCM"      ,
-  "clock-AF_MAX"
+  "clock-AF_QIPCRTR", "clock-AF_MAX"
 };
 
 /*
index 5fff951a0a4928ccf28fb681be86c7df6a05e94f..da38621245458bae2506b0c030d92315f1be5775 100644 (file)
@@ -394,9 +394,11 @@ static int dsa_dst_apply(struct dsa_switch_tree *dst)
                        return err;
        }
 
-       err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
-       if (err)
-               return err;
+       if (dst->ds[0]) {
+               err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
+               if (err)
+                       return err;
+       }
 
        /* If we use a tagging format that doesn't have an ethertype
         * field, make sure that all packets from this point on get
@@ -433,7 +435,8 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
                dsa_ds_unapply(dst, ds);
        }
 
-       dsa_cpu_port_ethtool_restore(dst->ds[0]);
+       if (dst->ds[0])
+               dsa_cpu_port_ethtool_restore(dst->ds[0]);
 
        pr_info("DSA: tree %d unapplied\n", dst->tree);
        dst->applied = false;
index 7a5b4c7d9a87b18051a94aa2e16e6b85c8d85aa6..9a375b908d01fae08b46b46df2e2579a4c268027 100644 (file)
@@ -1279,8 +1279,9 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                    nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
                        goto nla_put_failure;
 #endif
-               if (fi->fib_nh->nh_lwtstate)
-                       lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate);
+               if (fi->fib_nh->nh_lwtstate &&
+                   lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate) < 0)
+                       goto nla_put_failure;
        }
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
        if (fi->fib_nhs > 1) {
@@ -1316,8 +1317,10 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
                            nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
                                goto nla_put_failure;
 #endif
-                       if (nh->nh_lwtstate)
-                               lwtunnel_fill_encap(skb, nh->nh_lwtstate);
+                       if (nh->nh_lwtstate &&
+                           lwtunnel_fill_encap(skb, nh->nh_lwtstate) < 0)
+                               goto nla_put_failure;
+
                        /* length of rtnetlink header + attributes */
                        rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
                } endfor_nexthops(fi);
@@ -1618,8 +1621,13 @@ void fib_select_multipath(struct fib_result *res, int hash)
 void fib_select_path(struct net *net, struct fib_result *res,
                     struct flowi4 *fl4, int mp_hash)
 {
+       bool oif_check;
+
+       oif_check = (fl4->flowi4_oif == 0 ||
+                    fl4->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF);
+
 #ifdef CONFIG_IP_ROUTE_MULTIPATH
-       if (res->fi->fib_nhs > 1 && fl4->flowi4_oif == 0) {
+       if (res->fi->fib_nhs > 1 && oif_check) {
                if (mp_hash < 0)
                        mp_hash = get_hash_from_flowi4(fl4) >> 1;
 
@@ -1629,7 +1637,7 @@ void fib_select_path(struct net *net, struct fib_result *res,
 #endif
        if (!res->prefixlen &&
            res->table->tb_num_default > 1 &&
-           res->type == RTN_UNICAST && !fl4->flowi4_oif)
+           res->type == RTN_UNICAST && oif_check)
                fib_select_default(fl4, res);
 
        if (!fl4->saddr)
index 21db00d0362bb60d48aed2c900b857f86cef5793..a6b8c1a4102ba7ab07efbcf504fa7ca4025c6f19 100644 (file)
@@ -144,7 +144,7 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
        rcu_read_lock_bh();
        c = __clusterip_config_find(net, clusterip);
        if (c) {
-               if (unlikely(!atomic_inc_not_zero(&c->refcount)))
+               if (!c->pde || unlikely(!atomic_inc_not_zero(&c->refcount)))
                        c = NULL;
                else if (entry)
                        atomic_inc(&c->entries);
@@ -166,14 +166,15 @@ clusterip_config_init_nodelist(struct clusterip_config *c,
 
 static struct clusterip_config *
 clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
-                       struct net_device *dev)
+                     struct net_device *dev)
 {
+       struct net *net = dev_net(dev);
        struct clusterip_config *c;
-       struct clusterip_net *cn = net_generic(dev_net(dev), clusterip_net_id);
+       struct clusterip_net *cn = net_generic(net, clusterip_net_id);
 
        c = kzalloc(sizeof(*c), GFP_ATOMIC);
        if (!c)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        c->dev = dev;
        c->clusterip = ip;
@@ -185,6 +186,17 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
        atomic_set(&c->refcount, 1);
        atomic_set(&c->entries, 1);
 
+       spin_lock_bh(&cn->lock);
+       if (__clusterip_config_find(net, ip)) {
+               spin_unlock_bh(&cn->lock);
+               kfree(c);
+
+               return ERR_PTR(-EBUSY);
+       }
+
+       list_add_rcu(&c->list, &cn->configs);
+       spin_unlock_bh(&cn->lock);
+
 #ifdef CONFIG_PROC_FS
        {
                char buffer[16];
@@ -195,16 +207,16 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
                                          cn->procdir,
                                          &clusterip_proc_fops, c);
                if (!c->pde) {
+                       spin_lock_bh(&cn->lock);
+                       list_del_rcu(&c->list);
+                       spin_unlock_bh(&cn->lock);
                        kfree(c);
-                       return NULL;
+
+                       return ERR_PTR(-ENOMEM);
                }
        }
 #endif
 
-       spin_lock_bh(&cn->lock);
-       list_add_rcu(&c->list, &cn->configs);
-       spin_unlock_bh(&cn->lock);
-
        return c;
 }
 
@@ -410,9 +422,9 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
 
                        config = clusterip_config_init(cipinfo,
                                                        e->ip.dst.s_addr, dev);
-                       if (!config) {
+                       if (IS_ERR(config)) {
                                dev_put(dev);
-                               return -ENOMEM;
+                               return PTR_ERR(config);
                        }
                        dev_mc_add(config->dev, config->clustermac);
                }
index 0fcac8e7a2b2fb9fdb9f74bdcadf32bd177ceb39..709ffe67d1de1609be7d3e4a98d9314b01e5f265 100644 (file)
@@ -2472,7 +2472,7 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src, u32 table_id,
        r->rtm_dst_len  = 32;
        r->rtm_src_len  = 0;
        r->rtm_tos      = fl4->flowi4_tos;
-       r->rtm_table    = table_id;
+       r->rtm_table    = table_id < 256 ? table_id : RT_TABLE_COMPAT;
        if (nla_put_u32(skb, RTA_TABLE, table_id))
                goto nla_put_failure;
        r->rtm_type     = rt->rt_type;
index 22cbd61079b5a9d2661583b7d96eea46eddb685d..b2fa498b15d173739d0ebc5b6dd0577bf8dc4c08 100644 (file)
@@ -951,7 +951,7 @@ static struct ctl_table ipv4_net_table[] = {
                .data           = &init_net.ipv4.sysctl_tcp_notsent_lowat,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_douintvec,
        },
        {
                .procname       = "tcp_tw_reuse",
index 4e777a3243f94457d9928e3967bb83947da563f6..f51919535ca763d54c25a48534256150de7b66a7 100644 (file)
@@ -113,7 +113,7 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
                struct tcp_fastopen_cookie tmp;
 
                if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
-                       struct in6_addr *buf = (struct in6_addr *) tmp.val;
+                       struct in6_addr *buf = &tmp.addr;
                        int i;
 
                        for (i = 0; i < 4; i++)
index d46f4d5b1c62edf95791e9d47d966c3bc61e1888..ba8f02d0f283c6eaaf14ed89103adea135093353 100644 (file)
@@ -606,7 +606,6 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
 
 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
 {
index 89c59e656f44939863ceada610d3442d2de666ba..fc7b4017ba241f9dd39d49bd6258ecd4a16e3a3a 100644 (file)
@@ -191,6 +191,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
        ops = rcu_dereference(inet6_offloads[proto]);
        if (!ops || !ops->callbacks.gro_receive) {
                __pskb_pull(skb, skb_gro_offset(skb));
+               skb_gro_frag0_invalidate(skb);
                proto = ipv6_gso_pull_exthdrs(skb, proto);
                skb_gro_pull(skb, -skb_transport_offset(skb));
                skb_reset_transport_header(skb);
index 36d2921809428eb520256dffb39393ed023b7ec7..753d6d0860fb14c100ab8b20799782ab81602635 100644 (file)
@@ -1108,7 +1108,7 @@ route_lookup:
                                     t->parms.name);
                goto tx_err_dst_release;
        }
-       mtu = dst_mtu(dst) - psh_hlen;
+       mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
        if (encap_limit >= 0) {
                max_headroom += 8;
                mtu -= 8;
@@ -1117,7 +1117,7 @@ route_lookup:
                mtu = IPV6_MIN_MTU;
        if (skb_dst(skb) && !t->parms.collect_md)
                skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
-       if (skb->len > mtu && !skb_is_gso(skb)) {
+       if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
                *pmtu = mtu;
                err = -EMSGSIZE;
                goto tx_err_dst_release;
index f4b4a4a5f4ba740f4a290e2394034fe335941abe..d82042c8d8fd4b38eac12a58eb634438aab726a7 100644 (file)
@@ -189,12 +189,12 @@ static int vti6_tnl_create2(struct net_device *dev)
        struct vti6_net *ip6n = net_generic(net, vti6_net_id);
        int err;
 
+       dev->rtnl_link_ops = &vti6_link_ops;
        err = register_netdevice(dev);
        if (err < 0)
                goto out;
 
        strcpy(t->parms.name, dev->name);
-       dev->rtnl_link_ops = &vti6_link_ops;
 
        dev_hold(dev);
        vti6_tnl_link(ip6n, t);
index 14a3903f1c82d83d44c39befdfe827833d09b13c..7139fffd61b6f764a9d0ae02ed41365afa3ab55c 100644 (file)
@@ -81,7 +81,7 @@ static void mld_gq_timer_expire(unsigned long data);
 static void mld_ifc_timer_expire(unsigned long data);
 static void mld_ifc_event(struct inet6_dev *idev);
 static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
-static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
+static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
 static void mld_clear_delrec(struct inet6_dev *idev);
 static bool mld_in_v1_mode(const struct inet6_dev *idev);
 static int sf_setstate(struct ifmcaddr6 *pmc);
@@ -692,9 +692,9 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
                        dev_mc_del(dev, buf);
        }
 
-       if (mc->mca_flags & MAF_NOREPORT)
-               goto done;
        spin_unlock_bh(&mc->mca_lock);
+       if (mc->mca_flags & MAF_NOREPORT)
+               return;
 
        if (!mc->idev->dead)
                igmp6_leave_group(mc);
@@ -702,8 +702,6 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
        spin_lock_bh(&mc->mca_lock);
        if (del_timer(&mc->mca_timer))
                atomic_dec(&mc->mca_refcnt);
-done:
-       ip6_mc_clear_src(mc);
        spin_unlock_bh(&mc->mca_lock);
 }
 
@@ -748,10 +746,11 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
        spin_unlock_bh(&idev->mc_lock);
 }
 
-static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
+static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
 {
        struct ifmcaddr6 *pmc, *pmc_prev;
-       struct ip6_sf_list *psf, *psf_next;
+       struct ip6_sf_list *psf;
+       struct in6_addr *pmca = &im->mca_addr;
 
        spin_lock_bh(&idev->mc_lock);
        pmc_prev = NULL;
@@ -768,14 +767,20 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
        }
        spin_unlock_bh(&idev->mc_lock);
 
+       spin_lock_bh(&im->mca_lock);
        if (pmc) {
-               for (psf = pmc->mca_tomb; psf; psf = psf_next) {
-                       psf_next = psf->sf_next;
-                       kfree(psf);
+               im->idev = pmc->idev;
+               im->mca_crcount = idev->mc_qrv;
+               im->mca_sfmode = pmc->mca_sfmode;
+               if (pmc->mca_sfmode == MCAST_INCLUDE) {
+                       im->mca_tomb = pmc->mca_tomb;
+                       im->mca_sources = pmc->mca_sources;
+                       for (psf = im->mca_sources; psf; psf = psf->sf_next)
+                               psf->sf_crcount = im->mca_crcount;
                }
                in6_dev_put(pmc->idev);
-               kfree(pmc);
        }
+       spin_unlock_bh(&im->mca_lock);
 }
 
 static void mld_clear_delrec(struct inet6_dev *idev)
@@ -904,7 +909,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
        mca_get(mc);
        write_unlock_bh(&idev->lock);
 
-       mld_del_delrec(idev, &mc->mca_addr);
+       mld_del_delrec(idev, mc);
        igmp6_group_added(mc);
        ma_put(mc);
        return 0;
@@ -927,6 +932,7 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
                                write_unlock_bh(&idev->lock);
 
                                igmp6_group_dropped(ma);
+                               ip6_mc_clear_src(ma);
 
                                ma_put(ma);
                                return 0;
@@ -2501,15 +2507,17 @@ void ipv6_mc_down(struct inet6_dev *idev)
        /* Withdraw multicast list */
 
        read_lock_bh(&idev->lock);
-       mld_ifc_stop_timer(idev);
-       mld_gq_stop_timer(idev);
-       mld_dad_stop_timer(idev);
 
        for (i = idev->mc_list; i; i = i->next)
                igmp6_group_dropped(i);
-       read_unlock_bh(&idev->lock);
 
-       mld_clear_delrec(idev);
+       /* Should stop timer after group drop. or we will
+        * start timer again in mld_ifc_event()
+        */
+       mld_ifc_stop_timer(idev);
+       mld_gq_stop_timer(idev);
+       mld_dad_stop_timer(idev);
+       read_unlock_bh(&idev->lock);
 }
 
 static void ipv6_mc_reset(struct inet6_dev *idev)
@@ -2531,8 +2539,10 @@ void ipv6_mc_up(struct inet6_dev *idev)
 
        read_lock_bh(&idev->lock);
        ipv6_mc_reset(idev);
-       for (i = idev->mc_list; i; i = i->next)
+       for (i = idev->mc_list; i; i = i->next) {
+               mld_del_delrec(idev, i);
                igmp6_group_added(i);
+       }
        read_unlock_bh(&idev->lock);
 }
 
@@ -2565,6 +2575,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
 
        /* Deactivate timers */
        ipv6_mc_down(idev);
+       mld_clear_delrec(idev);
 
        /* Delete all-nodes address. */
        /* We cannot call ipv6_dev_mc_dec() directly, our caller in
@@ -2579,11 +2590,9 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
        write_lock_bh(&idev->lock);
        while ((i = idev->mc_list) != NULL) {
                idev->mc_list = i->next;
-               write_unlock_bh(&idev->lock);
 
-               igmp6_group_dropped(i);
+               write_unlock_bh(&idev->lock);
                ma_put(i);
-
                write_lock_bh(&idev->lock);
        }
        write_unlock_bh(&idev->lock);
index 8417c41d8ec8398a72c56f9b37b16d94702cbbf3..4f6b067c8753a541bc20d6d9f6f4c5009c8956cb 100644 (file)
@@ -1464,7 +1464,7 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
        struct fib6_node *fn;
 
        /* Get the "current" route for this destination and
-        * check if the redirect has come from approriate router.
+        * check if the redirect has come from appropriate router.
         *
         * RFC 4861 specifies that redirects should only be
         * accepted if they come from the nexthop to the target.
@@ -2768,7 +2768,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
           old MTU is the lowest MTU in the path, update the route PMTU
           to reflect the increase. In this case if the other nodes' MTU
           also have the lowest MTU, TOO BIG MESSAGE will be lead to
-          PMTU discouvery.
+          PMTU discovery.
         */
        if (rt->dst.dev == arg->dev &&
            dst_metric_raw(&rt->dst, RTAX_MTU) &&
@@ -3317,7 +3317,8 @@ static int rt6_fill_node(struct net *net,
        if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
                goto nla_put_failure;
 
-       lwtunnel_fill_encap(skb, rt->dst.lwtstate);
+       if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
+               goto nla_put_failure;
 
        nlmsg_end(skb, nlh);
        return 0;
index ef1c8a46e7aceee45b2044d4b4338dc3aed88807..03a064803626890ade73073cc12735aec777f9e5 100644 (file)
@@ -400,7 +400,7 @@ static int seg6_hmac_init_algo(void)
                        *p_tfm = tfm;
                }
 
-               p_tfm = this_cpu_ptr(algo->tfms);
+               p_tfm = raw_cpu_ptr(algo->tfms);
                tfm = *p_tfm;
 
                shsize = sizeof(*shash) + crypto_shash_descsize(tfm);
index bbfca22c34aeec5c63e9494e672f959c5a06b380..1d60cb132835c9f9089510f035a1ca95e5b1e1a7 100644 (file)
@@ -265,7 +265,9 @@ int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
        slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
 
 #ifdef CONFIG_DST_CACHE
+       preempt_disable();
        dst = dst_cache_get(&slwt->cache);
+       preempt_enable();
 #endif
 
        if (unlikely(!dst)) {
@@ -286,7 +288,9 @@ int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
                }
 
 #ifdef CONFIG_DST_CACHE
+               preempt_disable();
                dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
+               preempt_enable();
 #endif
        }
 
index cfb9e5f4e28f52c699072eaf41f12f3a6e667160..13190b38f22ee5116fb7701feed22ec436031a4e 100644 (file)
@@ -1044,7 +1044,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
 {
        struct sock *sk = sock->sk;
        struct iucv_sock *iucv = iucv_sk(sk);
-       size_t headroom, linear;
+       size_t headroom = 0;
+       size_t linear;
        struct sk_buff *skb;
        struct iucv_message txmsg = {0};
        struct cmsghdr *cmsg;
@@ -1122,18 +1123,20 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
         * this is fine for SOCK_SEQPACKET (unless we want to support
         * segmented records using the MSG_EOR flag), but
         * for SOCK_STREAM we might want to improve it in future */
-       headroom = (iucv->transport == AF_IUCV_TRANS_HIPER)
-                  ? sizeof(struct af_iucv_trans_hdr) + ETH_HLEN : 0;
-       if (headroom + len < PAGE_SIZE) {
+       if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+               headroom = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
                linear = len;
        } else {
-               /* In nonlinear "classic" iucv skb,
-                * reserve space for iucv_array
-                */
-               if (iucv->transport != AF_IUCV_TRANS_HIPER)
-                       headroom += sizeof(struct iucv_array) *
-                                   (MAX_SKB_FRAGS + 1);
-               linear = PAGE_SIZE - headroom;
+               if (len < PAGE_SIZE) {
+                       linear = len;
+               } else {
+                       /* In nonlinear "classic" iucv skb,
+                        * reserve space for iucv_array
+                        */
+                       headroom = sizeof(struct iucv_array) *
+                                  (MAX_SKB_FRAGS + 1);
+                       linear = PAGE_SIZE - headroom;
+               }
        }
        skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
                                   noblock, &err, 0);
index e75cbf6ecc26e4ec7bde777c385e001c0e49e371..a0d901d8992ea892bbb44890e08d6f782c68b4e3 100644 (file)
@@ -231,9 +231,6 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
                    !(sta->sdata->bss && sta->sdata->bss == sdata->bss))
                        continue;
 
-               if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC))
-                       continue;
-
                max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
        }
        rcu_read_unlock();
index 41497b670e2bde0e55a2a35f5fbbdb60148ddd54..d37ae7dc114b2c2eb5b8dc4773c6a688b047e0a2 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright (c) 2016        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -1295,6 +1296,26 @@ static void ieee80211_iface_work(struct work_struct *work)
                } else if (ieee80211_is_action(mgmt->frame_control) &&
                           mgmt->u.action.category == WLAN_CATEGORY_VHT) {
                        switch (mgmt->u.action.u.vht_group_notif.action_code) {
+                       case WLAN_VHT_ACTION_OPMODE_NOTIF: {
+                               struct ieee80211_rx_status *status;
+                               enum nl80211_band band;
+                               u8 opmode;
+
+                               status = IEEE80211_SKB_RXCB(skb);
+                               band = status->band;
+                               opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
+
+                               mutex_lock(&local->sta_mtx);
+                               sta = sta_info_get_bss(sdata, mgmt->sa);
+
+                               if (sta)
+                                       ieee80211_vht_handle_opmode(sdata, sta,
+                                                                   opmode,
+                                                                   band);
+
+                               mutex_unlock(&local->sta_mtx);
+                               break;
+                       }
                        case WLAN_VHT_ACTION_GROUPID_MGMT:
                                ieee80211_process_mu_groups(sdata, mgmt);
                                break;
index 1822c77f2b1c3125ec1bc63fa6b7de085a26bbed..56fb47953b72420b3ceb4e3f5d27dc9c4d23928b 100644 (file)
@@ -913,12 +913,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                supp_ht = supp_ht || sband->ht_cap.ht_supported;
                supp_vht = supp_vht || sband->vht_cap.vht_supported;
 
-               if (sband->ht_cap.ht_supported)
-                       local->rx_chains =
-                               max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
-                                   local->rx_chains);
+               if (!sband->ht_cap.ht_supported)
+                       continue;
 
                /* TODO: consider VHT for RX chains, hopefully it's the same */
+               local->rx_chains =
+                       max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
+                           local->rx_chains);
+
+               /* no need to mask, SM_PS_DISABLED has all bits set */
+               sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
+                                    IEEE80211_HT_CAP_SM_PS_SHIFT;
        }
 
        /* if low-level driver supports AP, we also support VLAN */
index 206698bc93f406939bb5d883b6ab2f04bc1a3bed..9e2641d4558753b7bf746388a971ac337cef9349 100644 (file)
@@ -40,6 +40,8 @@ void rate_control_rate_init(struct sta_info *sta)
 
        ieee80211_sta_set_rx_nss(sta);
 
+       ieee80211_recalc_min_chandef(sta->sdata);
+
        if (!ref)
                return;
 
index 3e289a64ed4317a8a495cb7cac8197fb9ce10c3e..3090dd4342f6eee6f2d3bdba67493849622ac1a7 100644 (file)
@@ -2472,7 +2472,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
        if (!ifmsh->mshcfg.dot11MeshForwarding)
                goto out;
 
-       fwd_skb = skb_copy_expand(skb, local->tx_headroom, 0, GFP_ATOMIC);
+       fwd_skb = skb_copy_expand(skb, local->tx_headroom +
+                                      sdata->encrypt_headroom, 0, GFP_ATOMIC);
        if (!fwd_skb) {
                net_info_ratelimited("%s: failed to clone mesh frame\n",
                                    sdata->name);
@@ -2880,17 +2881,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
 
                switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
                case WLAN_VHT_ACTION_OPMODE_NOTIF: {
-                       u8 opmode;
-
                        /* verify opmode is present */
                        if (len < IEEE80211_MIN_ACTION_SIZE + 2)
                                goto invalid;
-
-                       opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
-
-                       ieee80211_vht_handle_opmode(rx->sdata, rx->sta,
-                                                   opmode, status->band);
-                       goto handled;
+                       goto queue;
                }
                case WLAN_VHT_ACTION_GROUPID_MGMT: {
                        if (len < IEEE80211_MIN_ACTION_SIZE + 25)
@@ -3942,21 +3936,31 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
        u64_stats_update_end(&stats->syncp);
 
        if (fast_rx->internal_forward) {
-               struct sta_info *dsta = sta_info_get(rx->sdata, skb->data);
+               struct sk_buff *xmit_skb = NULL;
+               bool multicast = is_multicast_ether_addr(skb->data);
+
+               if (multicast) {
+                       xmit_skb = skb_copy(skb, GFP_ATOMIC);
+               } else if (sta_info_get(rx->sdata, skb->data)) {
+                       xmit_skb = skb;
+                       skb = NULL;
+               }
 
-               if (dsta) {
+               if (xmit_skb) {
                        /*
                         * Send to wireless media and increase priority by 256
                         * to keep the received priority instead of
                         * reclassifying the frame (see cfg80211_classify8021d).
                         */
-                       skb->priority += 256;
-                       skb->protocol = htons(ETH_P_802_3);
-                       skb_reset_network_header(skb);
-                       skb_reset_mac_header(skb);
-                       dev_queue_xmit(skb);
-                       return true;
+                       xmit_skb->priority += 256;
+                       xmit_skb->protocol = htons(ETH_P_802_3);
+                       skb_reset_network_header(xmit_skb);
+                       skb_reset_mac_header(xmit_skb);
+                       dev_queue_xmit(xmit_skb);
                }
+
+               if (!skb)
+                       return true;
        }
 
        /* deliver to local stack */
index b6cfcf038c11fa529e00da2eed70f6ff48426a37..50c309094c37ba5e73160613f6e316c0a6a161f2 100644 (file)
@@ -1501,8 +1501,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
 
                /* This will evaluate to 1, 3, 5 or 7. */
                for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++)
-                       if (ignored_acs & BIT(ac))
-                               continue;
+                       if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac]))
+                               break;
                tid = 7 - 2 * ac;
 
                ieee80211_send_null_response(sta, tid, reason, true, false);
index 0d8b716e509edca48fbc65ceaf06e6b7d6a2c189..797e847cbc49a1a5f1a515f01aa68c0212aed997 100644 (file)
@@ -1243,7 +1243,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
 
 static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
                                          struct ieee80211_vif *vif,
-                                         struct ieee80211_sta *pubsta,
+                                         struct sta_info *sta,
                                          struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -1257,10 +1257,13 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
        if (!ieee80211_is_data(hdr->frame_control))
                return NULL;
 
-       if (pubsta) {
+       if (sta) {
                u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
 
-               txq = pubsta->txq[tid];
+               if (!sta->uploaded)
+                       return NULL;
+
+               txq = sta->sta.txq[tid];
        } else if (vif) {
                txq = vif->txq;
        }
@@ -1503,23 +1506,17 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
        struct fq *fq = &local->fq;
        struct ieee80211_vif *vif;
        struct txq_info *txqi;
-       struct ieee80211_sta *pubsta;
 
        if (!local->ops->wake_tx_queue ||
            sdata->vif.type == NL80211_IFTYPE_MONITOR)
                return false;
 
-       if (sta && sta->uploaded)
-               pubsta = &sta->sta;
-       else
-               pubsta = NULL;
-
        if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
                sdata = container_of(sdata->bss,
                                     struct ieee80211_sub_if_data, u.ap);
 
        vif = &sdata->vif;
-       txqi = ieee80211_get_txq(local, vif, pubsta, skb);
+       txqi = ieee80211_get_txq(local, vif, sta, skb);
 
        if (!txqi)
                return false;
index 6832bf6ab69fe012ea4eeb3c02b79523083cdc58..43e45bb660bcde02af964a31a71efd64f07ba448 100644 (file)
@@ -527,8 +527,10 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
 
        u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band);
 
-       if (changed > 0)
+       if (changed > 0) {
+               ieee80211_recalc_min_chandef(sdata);
                rate_control_rate_update(local, sband, sta, changed);
+       }
 }
 
 void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
index a019a87e58ee8151620eb3d8500979d02dba54c1..0db5f9782265ebb033f10d07da815495e8a7d278 100644 (file)
@@ -2115,7 +2115,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
         * is called on error from nf_tables_newrule().
         */
        expr = nft_expr_first(rule);
-       while (expr->ops && expr != nft_expr_last(rule)) {
+       while (expr != nft_expr_last(rule) && expr->ops) {
                nf_tables_expr_destroy(ctx, expr);
                expr = nft_expr_next(expr);
        }
index 36d2b10965464cd8abc3ad57326aefb00b003971..7d699bbd45b0eaae1574a094f54a85d8219d390a 100644 (file)
@@ -250,6 +250,22 @@ static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
        return 0;
 }
 
+static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
+                                __wsum fsum, __wsum tsum, int csum_offset)
+{
+       __sum16 sum;
+
+       if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
+               return -1;
+
+       nft_csum_replace(&sum, fsum, tsum);
+       if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
+           skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
+               return -1;
+
+       return 0;
+}
+
 static void nft_payload_set_eval(const struct nft_expr *expr,
                                 struct nft_regs *regs,
                                 const struct nft_pktinfo *pkt)
@@ -259,7 +275,6 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
        const u32 *src = &regs->data[priv->sreg];
        int offset, csum_offset;
        __wsum fsum, tsum;
-       __sum16 sum;
 
        switch (priv->base) {
        case NFT_PAYLOAD_LL_HEADER:
@@ -282,18 +297,14 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
        csum_offset = offset + priv->csum_offset;
        offset += priv->offset;
 
-       if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
+       if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
            (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
             skb->ip_summed != CHECKSUM_PARTIAL)) {
-               if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
-                       goto err;
-
                fsum = skb_checksum(skb, offset, priv->len, 0);
                tsum = csum_partial(src, priv->len, 0);
-               nft_csum_replace(&sum, fsum, tsum);
 
-               if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
-                   skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
+               if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
+                   nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
                        goto err;
 
                if (priv->csum_flags &&
index 3e19fa1230dc6b9274090257be97b91827699485..dbb6aaff67ec5c151f8c8c6333721421f8e85076 100644 (file)
@@ -38,7 +38,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
 
        if (priv->queues_total > 1) {
                if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) {
-                       int cpu = smp_processor_id();
+                       int cpu = raw_smp_processor_id();
 
                        queue = priv->queuenum + cpu % priv->queues_total;
                } else {
index bd6efc53f26d01d8c8ac246c36fa4b1cf970ce31..2d6fe3559912674385e7679557fc31ddeb901b38 100644 (file)
@@ -110,30 +110,32 @@ static int nft_quota_obj_init(const struct nlattr * const tb[],
 static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv,
                             bool reset)
 {
+       u64 consumed, consumed_cap;
        u32 flags = priv->flags;
-       u64 consumed;
-
-       if (reset) {
-               consumed = atomic64_xchg(&priv->consumed, 0);
-               if (test_and_clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags))
-                       flags |= NFT_QUOTA_F_DEPLETED;
-       } else {
-               consumed = atomic64_read(&priv->consumed);
-       }
 
        /* Since we inconditionally increment consumed quota for each packet
         * that we see, don't go over the quota boundary in what we send to
         * userspace.
         */
-       if (consumed > priv->quota)
-               consumed = priv->quota;
+       consumed = atomic64_read(&priv->consumed);
+       if (consumed >= priv->quota) {
+               consumed_cap = priv->quota;
+               flags |= NFT_QUOTA_F_DEPLETED;
+       } else {
+               consumed_cap = consumed;
+       }
 
        if (nla_put_be64(skb, NFTA_QUOTA_BYTES, cpu_to_be64(priv->quota),
                         NFTA_QUOTA_PAD) ||
-           nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed),
+           nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed_cap),
                         NFTA_QUOTA_PAD) ||
            nla_put_be32(skb, NFTA_QUOTA_FLAGS, htonl(flags)))
                goto nla_put_failure;
+
+       if (reset) {
+               atomic64_sub(consumed, &priv->consumed);
+               clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags);
+       }
        return 0;
 
 nla_put_failure:
index 28c56b95fb7ff6ebaf5a99d8cfcbf583a01e73f5..ea7c67050792c9093c28b397e5ae164a570a0e33 100644 (file)
@@ -1502,10 +1502,7 @@ static int __init netlbl_init(void)
        printk(KERN_INFO "NetLabel: Initializing\n");
        printk(KERN_INFO "NetLabel:  domain hash size = %u\n",
               (1 << NETLBL_DOMHSH_BITSIZE));
-       printk(KERN_INFO "NetLabel:  protocols ="
-              " UNLABELED"
-              " CIPSOv4"
-              "\n");
+       printk(KERN_INFO "NetLabel:  protocols = UNLABELED CIPSOv4 CALIPSO\n");
 
        ret_val = netlbl_domhsh_init(NETLBL_DOMHSH_BITSIZE);
        if (ret_val != 0)
index 6b78bab27755b2758f3c8ecd5b9c6d61615af4b6..54253ea5976e694ba93a9145e575197fce9df69a 100644 (file)
@@ -514,7 +514,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
        int hooknum, nh_off, err = NF_ACCEPT;
 
        nh_off = skb_network_offset(skb);
-       skb_pull(skb, nh_off);
+       skb_pull_rcsum(skb, nh_off);
 
        /* See HOOK2MANIP(). */
        if (maniptype == NF_NAT_MANIP_SRC)
@@ -579,6 +579,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
        err = nf_nat_packet(ct, ctinfo, hooknum, skb);
 push:
        skb_push(skb, nh_off);
+       skb_postpush_rcsum(skb, skb->data, nh_off);
 
        return err;
 }
@@ -886,7 +887,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
 
        /* The conntrack module expects to be working at L3. */
        nh_ofs = skb_network_offset(skb);
-       skb_pull(skb, nh_ofs);
+       skb_pull_rcsum(skb, nh_ofs);
 
        if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
                err = handle_fragments(net, key, info->zone.id, skb);
@@ -900,6 +901,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
                err = ovs_ct_lookup(net, key, info, skb);
 
        skb_push(skb, nh_ofs);
+       skb_postpush_rcsum(skb, skb->data, nh_ofs);
        if (err)
                kfree_skb(skb);
        return err;
index c985ecbe9bd62a81899d43eb802003d6bb4ebe08..ae5ac175b2bef96ffa614bc799db5cd90a7bdc08 100644 (file)
@@ -252,7 +252,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
        const int pkt_len = 20;
        struct qrtr_hdr *hdr;
        struct sk_buff *skb;
-       u32 *buf;
+       __le32 *buf;
 
        skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
        if (!skb)
@@ -269,7 +269,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
        hdr->dst_node_id = cpu_to_le32(dst_node);
        hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
 
-       buf = (u32 *)skb_put(skb, pkt_len);
+       buf = (__le32 *)skb_put(skb, pkt_len);
        memset(buf, 0, pkt_len);
        buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
        buf[1] = cpu_to_le32(src_node);
index 2095c83ce7730d49550103a8efad943d28815617..e10456ef6f7a43c1b1a3c153012805015c51e9f5 100644 (file)
@@ -900,8 +900,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
                        goto err;
                }
                act->order = i;
-               if (event == RTM_GETACTION)
-                       act->tcfa_refcnt++;
                list_add_tail(&act->list, &actions);
        }
 
@@ -914,7 +912,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
                return ret;
        }
 err:
-       tcf_action_destroy(&actions, 0);
+       if (event != RTM_GETACTION)
+               tcf_action_destroy(&actions, 0);
        return ret;
 }
 
index 1c60317f01214ad77c17415bb2d7bc194b124615..520baa41cba3a8d24a659b67c5a5ed8a44129f37 100644 (file)
@@ -123,12 +123,11 @@ static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
            nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
                return -EMSGSIZE;
 
-       nla = nla_reserve(skb, TCA_ACT_BPF_DIGEST,
-                         sizeof(prog->filter->digest));
+       nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
        if (nla == NULL)
                return -EMSGSIZE;
 
-       memcpy(nla_data(nla), prog->filter->digest, nla_len(nla));
+       memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
 
        return 0;
 }
index adc776048d1a899f7955d0a92dd48b374178f3bb..d9c97018317dd7e26b6c99dfa83b8d6daeca6d19 100644 (file)
@@ -555,11 +555,11 @@ static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
            nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
                return -EMSGSIZE;
 
-       nla = nla_reserve(skb, TCA_BPF_DIGEST, sizeof(prog->filter->digest));
+       nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
        if (nla == NULL)
                return -EMSGSIZE;
 
-       memcpy(nla_data(nla), prog->filter->digest, nla_len(nla));
+       memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
 
        return 0;
 }
index e54082699520ae40d8ab4c282c230f3b3787db92..34efaa4ef2f6acfbed9b490f948f214e91a5606c 100644 (file)
@@ -1048,7 +1048,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
                             (new_transport->state == SCTP_PF)))
                                new_transport = asoc->peer.active_path;
                        if (new_transport->state == SCTP_UNCONFIRMED) {
-                               WARN_ONCE(1, "Atempt to send packet on unconfirmed path.");
+                               WARN_ONCE(1, "Attempt to send packet on unconfirmed path.");
                                sctp_chunk_fail(chunk, 0);
                                sctp_chunk_free(chunk);
                                continue;
index a8c2307590b87ce5774f275fcbc1397f095d4c27..0758e13754e2faccb257d2f6ba9cca7b2da1baab 100644 (file)
@@ -533,7 +533,7 @@ static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
        return used;
 }
 
-int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
+static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
 {
        int err = simple_setattr(dentry, iattr);
 
index 886e9d381771ab8c05ed514a399a089c106b2282..1530825985221a1aeb5f77ee81f4251acdef9d96 100644 (file)
@@ -1489,7 +1489,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
        case RPC_GSS_PROC_DESTROY:
                if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
                        goto auth_err;
-               rsci->h.expiry_time = get_seconds();
+               rsci->h.expiry_time = seconds_since_boot();
                set_bit(CACHE_NEGATIVE, &rsci->h.flags);
                if (resv->iov_len + 4 > PAGE_SIZE)
                        goto drop;
index 3bc1d61694cbbbf7a094a1849b747b65760550b2..9c9db55a0c1e1735e522e406797c4efb25930b07 100644 (file)
@@ -799,6 +799,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
 
        if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
                dprintk("svc_recv: found XPT_CLOSE\n");
+               if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
+                       xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
                svc_delete_xprt(xprt);
                /* Leave XPT_BUSY set on the dead xprt: */
                goto out;
@@ -1020,9 +1022,11 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
                le = to_be_closed.next;
                list_del_init(le);
                xprt = list_entry(le, struct svc_xprt, xpt_list);
-               dprintk("svc_age_temp_xprts_now: closing %p\n", xprt);
-               xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
-               svc_close_xprt(xprt);
+               set_bit(XPT_CLOSE, &xprt->xpt_flags);
+               set_bit(XPT_KILL_TEMP, &xprt->xpt_flags);
+               dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n",
+                               xprt);
+               svc_xprt_enqueue(xprt);
        }
 }
 EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now);
index 57d35fbb1c28570b5c23f5e06974e8b902853846..172b537f8cfc942ef62574b74cff7ac5f421fba9 100644 (file)
@@ -347,8 +347,6 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
        atomic_inc(&rdma_stat_read);
        return ret;
  err:
-       ib_dma_unmap_sg(xprt->sc_cm_id->device,
-                       frmr->sg, frmr->sg_nents, frmr->direction);
        svc_rdma_put_context(ctxt, 0);
        svc_rdma_put_frmr(xprt, frmr);
        return ret;
index 6b109a808d4c5f43677ff25bd5450d5e8c1dd920..02462d67d1914b6e4b20c4daf7a493b29049a2e4 100644 (file)
@@ -169,7 +169,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
 
        /* Send response, if necessary */
        if (respond && (mtyp == DSC_REQ_MSG)) {
-               rskb = tipc_buf_acquire(MAX_H_SIZE);
+               rskb = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
                if (!rskb)
                        return;
                tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
@@ -278,7 +278,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b,
        req = kmalloc(sizeof(*req), GFP_ATOMIC);
        if (!req)
                return -ENOMEM;
-       req->buf = tipc_buf_acquire(MAX_H_SIZE);
+       req->buf = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
        if (!req->buf) {
                kfree(req);
                return -ENOMEM;
index bda89bf9f4ff185f64c68c06d8b88a4385380c58..4e8647aef01c1d070751adc6a24c63769b66e599 100644 (file)
@@ -1395,7 +1395,7 @@ tnl:
                        msg_set_seqno(hdr, seqno++);
                pktlen = msg_size(hdr);
                msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
-               tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
+               tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
                if (!tnlskb) {
                        pr_warn("%sunable to send packet\n", link_co_err);
                        return;
index a22be502f1bd06dc8ec23ab44e56d32f4c3dd469..ab02d07424764ad4b269b6ad560167f528454b80 100644 (file)
@@ -58,12 +58,12 @@ static unsigned int align(unsigned int i)
  * NOTE: Headroom is reserved to allow prepending of a data link header.
  *       There may also be unrequested tailroom present at the buffer's end.
  */
-struct sk_buff *tipc_buf_acquire(u32 size)
+struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
 {
        struct sk_buff *skb;
        unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
 
-       skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
+       skb = alloc_skb_fclone(buf_size, gfp);
        if (skb) {
                skb_reserve(skb, BUF_HEADROOM);
                skb_put(skb, size);
@@ -95,7 +95,7 @@ struct sk_buff *tipc_msg_create(uint user, uint type,
        struct tipc_msg *msg;
        struct sk_buff *buf;
 
-       buf = tipc_buf_acquire(hdr_sz + data_sz);
+       buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
        if (unlikely(!buf))
                return NULL;
 
@@ -261,7 +261,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
 
        /* No fragmentation needed? */
        if (likely(msz <= pktmax)) {
-               skb = tipc_buf_acquire(msz);
+               skb = tipc_buf_acquire(msz, GFP_KERNEL);
                if (unlikely(!skb))
                        return -ENOMEM;
                skb_orphan(skb);
@@ -282,7 +282,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
        msg_set_importance(&pkthdr, msg_importance(mhdr));
 
        /* Prepare first fragment */
-       skb = tipc_buf_acquire(pktmax);
+       skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
        if (!skb)
                return -ENOMEM;
        skb_orphan(skb);
@@ -313,7 +313,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
                        pktsz = drem + INT_H_SIZE;
                else
                        pktsz = pktmax;
-               skb = tipc_buf_acquire(pktsz);
+               skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
                if (!skb) {
                        rc = -ENOMEM;
                        goto error;
@@ -448,7 +448,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
        if (msz > (max / 2))
                return false;
 
-       _skb = tipc_buf_acquire(max);
+       _skb = tipc_buf_acquire(max, GFP_ATOMIC);
        if (!_skb)
                return false;
 
@@ -496,7 +496,7 @@ bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
 
        /* Never return SHORT header; expand by replacing buffer if necessary */
        if (msg_short(hdr)) {
-               *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen);
+               *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC);
                if (!*skb)
                        goto exit;
                memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
index 8d408612ffa490521e310b3c643f2407a6c822f3..2c3dc38abf9c25814c0d9e3c31711706f2bca664 100644 (file)
@@ -820,7 +820,7 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
        return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
 }
 
-struct sk_buff *tipc_buf_acquire(u32 size);
+struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp);
 bool tipc_msg_validate(struct sk_buff *skb);
 bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
 void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
index c1cfd92de17aee30a310305707a70ecb87fd2548..23f8899e0f8c3d7bda50bc57801bcc63ceab448c 100644 (file)
@@ -69,7 +69,7 @@ static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
                                         u32 dest)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
-       struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
+       struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
        struct tipc_msg *msg;
 
        if (buf != NULL) {
index 3df85a751a85285f609c5583e7ad864c7c822560..5c1b267e22beefe7cfa83e3541783fab702a092a 100644 (file)
@@ -4615,6 +4615,15 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
                break;
        }
 
+       /*
+        * Older kernel versions ignored this attribute entirely, so don't
+        * reject attempts to update it but mark it as unused instead so the
+        * driver won't look at the data.
+        */
+       if (statype != CFG80211_STA_AP_CLIENT_UNASSOC &&
+           statype != CFG80211_STA_TDLS_PEER_SETUP)
+               params->opmode_notif_used = false;
+
        return 0;
 }
 EXPORT_SYMBOL(cfg80211_check_station_change);
@@ -4854,6 +4863,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
                params.local_pm = pm;
        }
 
+       if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) {
+               params.opmode_notif_used = true;
+               params.opmode_notif =
+                       nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]);
+       }
+
        /* Include parameters for TDLS peer (will check later) */
        err = nl80211_set_station_tdls(info, &params);
        if (err)
@@ -14502,13 +14517,17 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
 
        list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) {
                bool schedule_destroy_work = false;
-               bool schedule_scan_stop = false;
                struct cfg80211_sched_scan_request *sched_scan_req =
                        rcu_dereference(rdev->sched_scan_req);
 
                if (sched_scan_req && notify->portid &&
-                   sched_scan_req->owner_nlportid == notify->portid)
-                       schedule_scan_stop = true;
+                   sched_scan_req->owner_nlportid == notify->portid) {
+                       sched_scan_req->owner_nlportid = 0;
+
+                       if (rdev->ops->sched_scan_stop &&
+                           rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
+                               schedule_work(&rdev->sched_scan_stop_wk);
+               }
 
                list_for_each_entry_rcu(wdev, &rdev->wiphy.wdev_list, list) {
                        cfg80211_mlme_unregister_socket(wdev, notify->portid);
@@ -14539,12 +14558,6 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
                                spin_unlock(&rdev->destroy_list_lock);
                                schedule_work(&rdev->destroy_work);
                        }
-               } else if (schedule_scan_stop) {
-                       sched_scan_req->owner_nlportid = 0;
-
-                       if (rdev->ops->sched_scan_stop &&
-                           rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
-                               schedule_work(&rdev->sched_scan_stop_wk);
                }
        }
 
index 09f7fe7e5fd74c289586711263d99bd9e25b1c2c..d8014065d479f9b85d96ae3d09664b1b14894467 100644 (file)
@@ -4,7 +4,7 @@
 #include <unistd.h>
 #include <string.h>
 #include <errno.h>
-#include <net/ethernet.h>
+#include <linux/if_ether.h>
 #include <net/if.h>
 #include <linux/if_packet.h>
 #include <arpa/inet.h>
index f4fa6af22def9fa9c01df1525c7a61b3aecb8cdd..ccca1e3480171501f01779ca88fc26305bff2491 100644 (file)
@@ -9,7 +9,6 @@
 #include <string.h>
 #include <fcntl.h>
 #include <poll.h>
-#include <sys/ioctl.h>
 #include <linux/perf_event.h>
 #include <linux/bpf.h>
 #include <errno.h>
index 1fc57a5093a7be425406cca55f4eb994e2eb979e..ca495686b9c31786d0df8c16407bd06d28ee1b40 100644 (file)
@@ -1073,7 +1073,7 @@ int mtty_get_region_info(struct mdev_device *mdev,
 {
        unsigned int size = 0;
        struct mdev_state *mdev_state;
-       int bar_index;
+       u32 bar_index;
 
        if (!mdev)
                return -EINVAL;
@@ -1082,8 +1082,11 @@ int mtty_get_region_info(struct mdev_device *mdev,
        if (!mdev_state)
                return -EINVAL;
 
-       mutex_lock(&mdev_state->ops_lock);
        bar_index = region_info->index;
+       if (bar_index >= VFIO_PCI_NUM_REGIONS)
+               return -EINVAL;
+
+       mutex_lock(&mdev_state->ops_lock);
 
        switch (bar_index) {
        case VFIO_PCI_CONFIG_REGION_INDEX:
@@ -1180,7 +1183,10 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
 
                memcpy(&mdev_state->dev_info, &info, sizeof(info));
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               if (copy_to_user((void __user *)arg, &info, minsz))
+                       return -EFAULT;
+
+               return 0;
        }
        case VFIO_DEVICE_GET_REGION_INFO:
        {
@@ -1201,7 +1207,10 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
                if (ret)
                        return ret;
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               if (copy_to_user((void __user *)arg, &info, minsz))
+                       return -EFAULT;
+
+               return 0;
        }
 
        case VFIO_DEVICE_GET_IRQ_INFO:
@@ -1221,10 +1230,10 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
                if (ret)
                        return ret;
 
-               if (info.count == -1)
-                       return -EINVAL;
+               if (copy_to_user((void __user *)arg, &info, minsz))
+                       return -EFAULT;
 
-               return copy_to_user((void __user *)arg, &info, minsz);
+               return 0;
        }
        case VFIO_DEVICE_SET_IRQS:
        {
index 9913be8532ab765651ae5166953d9e25281ce83c..8fd745cb3f36c49c09213dccab6b818c6f6a00bf 100644 (file)
@@ -311,9 +311,15 @@ int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux,
 }
 EXPORT_SYMBOL_GPL(snd_hda_get_conn_index);
 
-
-/* return DEVLIST_LEN parameter of the given widget */
-static unsigned int get_num_devices(struct hda_codec *codec, hda_nid_t nid)
+/**
+ * snd_hda_get_num_devices - get DEVLIST_LEN parameter of the given widget
+ *  @codec: the HDA codec
+ *  @nid: NID of the pin to parse
+ *
+ * Get the device entry number on the given widget. This is a feature of
+ * DP MST audio. Each pin can have several device entries in it.
+ */
+unsigned int snd_hda_get_num_devices(struct hda_codec *codec, hda_nid_t nid)
 {
        unsigned int wcaps = get_wcaps(codec, nid);
        unsigned int parm;
@@ -327,6 +333,7 @@ static unsigned int get_num_devices(struct hda_codec *codec, hda_nid_t nid)
                parm = 0;
        return parm & AC_DEV_LIST_LEN_MASK;
 }
+EXPORT_SYMBOL_GPL(snd_hda_get_num_devices);
 
 /**
  * snd_hda_get_devices - copy device list without cache
@@ -344,7 +351,7 @@ int snd_hda_get_devices(struct hda_codec *codec, hda_nid_t nid,
        unsigned int parm;
        int i, dev_len, devices;
 
-       parm = get_num_devices(codec, nid);
+       parm = snd_hda_get_num_devices(codec, nid);
        if (!parm)      /* not multi-stream capable */
                return 0;
 
@@ -368,6 +375,63 @@ int snd_hda_get_devices(struct hda_codec *codec, hda_nid_t nid,
        return devices;
 }
 
+/**
+ * snd_hda_get_dev_select - get device entry select on the pin
+ * @codec: the HDA codec
+ * @nid: NID of the pin to get device entry select
+ *
+ * Get the devcie entry select on the pin. Return the device entry
+ * id selected on the pin. Return 0 means the first device entry
+ * is selected or MST is not supported.
+ */
+int snd_hda_get_dev_select(struct hda_codec *codec, hda_nid_t nid)
+{
+       /* not support dp_mst will always return 0, using first dev_entry */
+       if (!codec->dp_mst)
+               return 0;
+
+       return snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_DEVICE_SEL, 0);
+}
+EXPORT_SYMBOL_GPL(snd_hda_get_dev_select);
+
+/**
+ * snd_hda_set_dev_select - set device entry select on the pin
+ * @codec: the HDA codec
+ * @nid: NID of the pin to set device entry select
+ * @dev_id: device entry id to be set
+ *
+ * Set the device entry select on the pin nid.
+ */
+int snd_hda_set_dev_select(struct hda_codec *codec, hda_nid_t nid, int dev_id)
+{
+       int ret, num_devices;
+
+       /* not support dp_mst will always return 0, using first dev_entry */
+       if (!codec->dp_mst)
+               return 0;
+
+       /* AC_PAR_DEVLIST_LEN is 0 based. */
+       num_devices = snd_hda_get_num_devices(codec, nid) + 1;
+       /* If Device List Length is 0 (num_device = 1),
+        * the pin is not multi stream capable.
+        * Do nothing in this case.
+        */
+       if (num_devices == 1)
+               return 0;
+
+       /* Behavior of setting index being equal to or greater than
+        * Device List Length is not predictable
+        */
+       if (num_devices <= dev_id)
+               return -EINVAL;
+
+       ret = snd_hda_codec_write(codec, nid, 0,
+                       AC_VERB_SET_DEVICE_SEL, dev_id);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(snd_hda_set_dev_select);
+
 /*
  * read widget caps for each widget and store in cache
  */
@@ -403,6 +467,10 @@ static int read_pin_defaults(struct hda_codec *codec)
                pin->nid = nid;
                pin->cfg = snd_hda_codec_read(codec, nid, 0,
                                              AC_VERB_GET_CONFIG_DEFAULT, 0);
+               /*
+                * all device entries are the same widget control so far
+                * fixme: if any codec is different, need fix here
+                */
                pin->ctrl = snd_hda_codec_read(codec, nid, 0,
                                               AC_VERB_GET_PIN_WIDGET_CONTROL,
                                               0);
index 373fcad840ea6ff5c18b4c4aa93b6473c7a60f1c..f17f25245e52acdee1a57f3ae5ab7b84fd97a37b 100644 (file)
@@ -347,8 +347,11 @@ int snd_hda_override_conn_list(struct hda_codec *codec, hda_nid_t nid, int nums,
                          const hda_nid_t *list);
 int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux,
                           hda_nid_t nid, int recursive);
+unsigned int snd_hda_get_num_devices(struct hda_codec *codec, hda_nid_t nid);
 int snd_hda_get_devices(struct hda_codec *codec, hda_nid_t nid,
                        u8 *dev_list, int max_devices);
+int snd_hda_get_dev_select(struct hda_codec *codec, hda_nid_t nid);
+int snd_hda_set_dev_select(struct hda_codec *codec, hda_nid_t nid, int dev_id);
 
 struct hda_verb {
        hda_nid_t nid;
index cf9bc042fe966361588b8dc92b66e308fd50e657..32105cee56ca770c6969a212b69373cb507637fe 100644 (file)
@@ -76,6 +76,7 @@ struct hdmi_spec_per_cvt {
 
 struct hdmi_spec_per_pin {
        hda_nid_t pin_nid;
+       int dev_id;
        /* pin idx, different device entries on the same pin use the same idx */
        int pin_nid_idx;
        int num_mux_nids;
@@ -130,7 +131,23 @@ struct hdmi_spec {
        struct snd_array cvts; /* struct hdmi_spec_per_cvt */
        hda_nid_t cvt_nids[4]; /* only for haswell fix */
 
+       /*
+        * num_pins is the number of virtual pins
+        * for example, there are 3 pins, and each pin
+        * has 4 device entries, then the num_pins is 12
+        */
        int num_pins;
+       /*
+        * num_nids is the number of real pins
+        * In the above example, num_nids is 3
+        */
+       int num_nids;
+       /*
+        * dev_num is the number of device entries
+        * on each pin.
+        * In the above example, dev_num is 4
+        */
+       int dev_num;
        struct snd_array pins; /* struct hdmi_spec_per_pin */
        struct hdmi_pcm pcm_rec[16];
        struct mutex pcm_lock;
@@ -217,14 +234,26 @@ union audio_infoframe {
 /* obtain hda_pcm object assigned to idx */
 #define get_pcm_rec(spec, idx) (get_hdmi_pcm(spec, idx)->pcm)
 
-static int pin_nid_to_pin_index(struct hda_codec *codec, hda_nid_t pin_nid)
+static int pin_id_to_pin_index(struct hda_codec *codec,
+                              hda_nid_t pin_nid, int dev_id)
 {
        struct hdmi_spec *spec = codec->spec;
        int pin_idx;
+       struct hdmi_spec_per_pin *per_pin;
+
+       /*
+        * (dev_id == -1) means it is NON-MST pin
+        * return the first virtual pin on this port
+        */
+       if (dev_id == -1)
+               dev_id = 0;
 
-       for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++)
-               if (get_pin(spec, pin_idx)->pin_nid == pin_nid)
+       for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+               per_pin = get_pin(spec, pin_idx);
+               if ((per_pin->pin_nid == pin_nid) &&
+                       (per_pin->dev_id == dev_id))
                        return pin_idx;
+       }
 
        codec_warn(codec, "HDMI: pin nid %d not registered\n", pin_nid);
        return -EINVAL;
@@ -724,10 +753,11 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
 
 static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll);
 
-static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid)
+static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid,
+                                     int dev_id)
 {
        struct hdmi_spec *spec = codec->spec;
-       int pin_idx = pin_nid_to_pin_index(codec, nid);
+       int pin_idx = pin_id_to_pin_index(codec, nid, dev_id);
 
        if (pin_idx < 0)
                return;
@@ -738,7 +768,8 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid)
 static void jack_callback(struct hda_codec *codec,
                          struct hda_jack_callback *jack)
 {
-       check_presence_and_report(codec, jack->nid);
+       /* hda_jack don't support DP MST */
+       check_presence_and_report(codec, jack->nid, 0);
 }
 
 static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -747,6 +778,12 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
        struct hda_jack_tbl *jack;
        int dev_entry = (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT;
 
+       /*
+        * assume DP MST uses dyn_pcm_assign and acomp and
+        * never comes here
+        * if DP MST supports unsol event, below code need
+        * consider dev_entry
+        */
        jack = snd_hda_jack_tbl_get_from_tag(codec, tag);
        if (!jack)
                return;
@@ -757,7 +794,8 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
                codec->addr, jack->nid, dev_entry, !!(res & AC_UNSOL_RES_IA),
                !!(res & AC_UNSOL_RES_PD), !!(res & AC_UNSOL_RES_ELDV));
 
-       check_presence_and_report(codec, jack->nid);
+       /* hda_jack don't support DP MST */
+       check_presence_and_report(codec, jack->nid, 0);
 }
 
 static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -970,28 +1008,60 @@ static int intel_cvt_id_to_mux_idx(struct hdmi_spec *spec,
  * by any other pins.
  */
 static void intel_not_share_assigned_cvt(struct hda_codec *codec,
-                       hda_nid_t pin_nid, int mux_idx)
+                                        hda_nid_t pin_nid,
+                                        int dev_id, int mux_idx)
 {
        struct hdmi_spec *spec = codec->spec;
        hda_nid_t nid;
        int cvt_idx, curr;
        struct hdmi_spec_per_cvt *per_cvt;
+       struct hdmi_spec_per_pin *per_pin;
+       int pin_idx;
+
+       /* configure the pins connections */
+       for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+               int dev_id_saved;
+               int dev_num;
 
-       /* configure all pins, including "no physical connection" ones */
-       for_each_hda_codec_node(nid, codec) {
-               unsigned int wid_caps = get_wcaps(codec, nid);
-               unsigned int wid_type = get_wcaps_type(wid_caps);
+               per_pin = get_pin(spec, pin_idx);
+               /*
+                * pin not connected to monitor
+                * no need to operate on it
+                */
+               if (!per_pin->pcm)
+                       continue;
 
-               if (wid_type != AC_WID_PIN)
+               if ((per_pin->pin_nid == pin_nid) &&
+                       (per_pin->dev_id == dev_id))
                        continue;
 
-               if (nid == pin_nid)
+               /*
+                * if per_pin->dev_id >= dev_num,
+                * snd_hda_get_dev_select() will fail,
+                * and the following operation is unpredictable.
+                * So skip this situation.
+                */
+               dev_num = snd_hda_get_num_devices(codec, per_pin->pin_nid) + 1;
+               if (per_pin->dev_id >= dev_num)
                        continue;
 
+               nid = per_pin->pin_nid;
+
+               /*
+                * Calling this function should not impact
+                * on the device entry selection
+                * So let's save the dev id for each pin,
+                * and restore it when return
+                */
+               dev_id_saved = snd_hda_get_dev_select(codec, nid);
+               snd_hda_set_dev_select(codec, nid, per_pin->dev_id);
                curr = snd_hda_codec_read(codec, nid, 0,
                                          AC_VERB_GET_CONNECT_SEL, 0);
-               if (curr != mux_idx)
+               if (curr != mux_idx) {
+                       snd_hda_set_dev_select(codec, nid, dev_id_saved);
                        continue;
+               }
+
 
                /* choose an unassigned converter. The conveters in the
                 * connection list are in the same order as in the codec.
@@ -1008,12 +1078,13 @@ static void intel_not_share_assigned_cvt(struct hda_codec *codec,
                                break;
                        }
                }
+               snd_hda_set_dev_select(codec, nid, dev_id_saved);
        }
 }
 
 /* A wrapper of intel_not_share_asigned_cvt() */
 static void intel_not_share_assigned_cvt_nid(struct hda_codec *codec,
-                       hda_nid_t pin_nid, hda_nid_t cvt_nid)
+                       hda_nid_t pin_nid, int dev_id, hda_nid_t cvt_nid)
 {
        int mux_idx;
        struct hdmi_spec *spec = codec->spec;
@@ -1025,7 +1096,7 @@ static void intel_not_share_assigned_cvt_nid(struct hda_codec *codec,
         */
        mux_idx = intel_cvt_id_to_mux_idx(spec, cvt_nid);
        if (mux_idx >= 0)
-               intel_not_share_assigned_cvt(codec, pin_nid, mux_idx);
+               intel_not_share_assigned_cvt(codec, pin_nid, dev_id, mux_idx);
 }
 
 /* skeleton caller of pin_cvt_fixup ops */
@@ -1140,6 +1211,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
        per_pin->cvt_nid = per_cvt->cvt_nid;
        hinfo->nid = per_cvt->cvt_nid;
 
+       snd_hda_set_dev_select(codec, per_pin->pin_nid, per_pin->dev_id);
        snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
                            AC_VERB_SET_CONNECT_SEL,
                            per_pin->mux_idx);
@@ -1198,6 +1270,7 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
                return -EINVAL;
        }
 
+       /* all the device entries on the same pin have the same conn list */
        per_pin->num_mux_nids = snd_hda_get_connections(codec, pin_nid,
                                                        per_pin->mux_nids,
                                                        HDA_MAX_CONNECTIONS);
@@ -1215,13 +1288,13 @@ static int hdmi_find_pcm_slot(struct hdmi_spec *spec,
                return per_pin->pin_nid_idx;
 
        /* have a second try; check the "reserved area" over num_pins */
-       for (i = spec->num_pins; i < spec->pcm_used; i++) {
+       for (i = spec->num_nids; i < spec->pcm_used; i++) {
                if (!test_bit(i, &spec->pcm_bitmap))
                        return i;
        }
 
        /* the last try; check the empty slots in pins */
-       for (i = 0; i < spec->num_pins; i++) {
+       for (i = 0; i < spec->num_nids; i++) {
                if (!test_bit(i, &spec->pcm_bitmap))
                        return i;
        }
@@ -1296,10 +1369,13 @@ static void hdmi_pcm_setup_pin(struct hdmi_spec *spec,
        per_pin->cvt_nid = hinfo->nid;
 
        mux_idx = hdmi_get_pin_cvt_mux(spec, per_pin, hinfo->nid);
-       if (mux_idx < per_pin->num_mux_nids)
+       if (mux_idx < per_pin->num_mux_nids) {
+               snd_hda_set_dev_select(codec, per_pin->pin_nid,
+                                  per_pin->dev_id);
                snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
                                AC_VERB_SET_CONNECT_SEL,
                                mux_idx);
+       }
        snd_hda_spdif_ctls_assign(codec, per_pin->pcm_idx, hinfo->nid);
 
        non_pcm = check_non_pcm_per_cvt(codec, hinfo->nid);
@@ -1467,6 +1543,11 @@ static struct snd_jack *pin_idx_to_jack(struct hda_codec *codec,
        if (per_pin->pcm_idx >= 0 && spec->dyn_pcm_assign)
                jack = spec->pcm_rec[per_pin->pcm_idx].jack;
        else if (!spec->dyn_pcm_assign) {
+               /*
+                * jack tbl doesn't support DP MST
+                * DP MST will use dyn_pcm_assign,
+                * so DP MST will never come here
+                */
                jack_tbl = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
                if (jack_tbl)
                        jack = jack_tbl->jack;
@@ -1485,9 +1566,9 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
 
        mutex_lock(&per_pin->lock);
        eld->monitor_present = false;
-       size = snd_hdac_acomp_get_eld(&codec->core, per_pin->pin_nid, -1,
-                                     &eld->monitor_present, eld->eld_buffer,
-                                     ELD_MAX_SIZE);
+       size = snd_hdac_acomp_get_eld(&codec->core, per_pin->pin_nid,
+                                     per_pin->dev_id, &eld->monitor_present,
+                                     eld->eld_buffer, ELD_MAX_SIZE);
        if (size > 0) {
                size = min(size, ELD_MAX_SIZE);
                if (snd_hdmi_parse_eld(codec, &eld->info,
@@ -1565,38 +1646,81 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
        int pin_idx;
        struct hdmi_spec_per_pin *per_pin;
        int err;
+       int dev_num, i;
 
        caps = snd_hda_query_pin_caps(codec, pin_nid);
        if (!(caps & (AC_PINCAP_HDMI | AC_PINCAP_DP)))
                return 0;
 
+       /*
+        * For DP MST audio, Configuration Default is the same for
+        * all device entries on the same pin
+        */
        config = snd_hda_codec_get_pincfg(codec, pin_nid);
        if (get_defcfg_connect(config) == AC_JACK_PORT_NONE)
                return 0;
 
-       if (is_haswell_plus(codec))
-               intel_haswell_fixup_connect_list(codec, pin_nid);
-
-       pin_idx = spec->num_pins;
-       per_pin = snd_array_new(&spec->pins);
-       if (!per_pin)
-               return -ENOMEM;
-
-       per_pin->pin_nid = pin_nid;
-       per_pin->non_pcm = false;
-       if (spec->dyn_pcm_assign)
-               per_pin->pcm_idx = -1;
-       else {
-               per_pin->pcm = get_hdmi_pcm(spec, pin_idx);
-               per_pin->pcm_idx = pin_idx;
+       /*
+        * To simplify the implementation, malloc all
+        * the virtual pins in the initialization statically
+        */
+       if (is_haswell_plus(codec)) {
+               /*
+                * On Intel platforms, device entries number is
+                * changed dynamically. If there is a DP MST
+                * hub connected, the device entries number is 3.
+                * Otherwise, it is 1.
+                * Here we manually set dev_num to 3, so that
+                * we can initialize all the device entries when
+                * bootup statically.
+                */
+               dev_num = 3;
+               spec->dev_num = 3;
+       } else if (spec->dyn_pcm_assign && codec->dp_mst) {
+               dev_num = snd_hda_get_num_devices(codec, pin_nid) + 1;
+               /*
+                * spec->dev_num is the maxinum number of device entries
+                * among all the pins
+                */
+               spec->dev_num = (spec->dev_num > dev_num) ?
+                       spec->dev_num : dev_num;
+       } else {
+               /*
+                * If the platform doesn't support DP MST,
+                * manually set dev_num to 1. This means
+                * the pin has only one device entry.
+                */
+               dev_num = 1;
+               spec->dev_num = 1;
        }
-       per_pin->pin_nid_idx = pin_idx;
 
-       err = hdmi_read_pin_conn(codec, pin_idx);
-       if (err < 0)
-               return err;
+       for (i = 0; i < dev_num; i++) {
+               pin_idx = spec->num_pins;
+               per_pin = snd_array_new(&spec->pins);
 
-       spec->num_pins++;
+               if (!per_pin)
+                       return -ENOMEM;
+
+               if (spec->dyn_pcm_assign) {
+                       per_pin->pcm = NULL;
+                       per_pin->pcm_idx = -1;
+               } else {
+                       per_pin->pcm = get_hdmi_pcm(spec, pin_idx);
+                       per_pin->pcm_idx = pin_idx;
+               }
+               per_pin->pin_nid = pin_nid;
+               per_pin->pin_nid_idx = spec->num_nids;
+               per_pin->dev_id = i;
+               per_pin->non_pcm = false;
+               snd_hda_set_dev_select(codec, pin_nid, i);
+               if (is_haswell_plus(codec))
+                       intel_haswell_fixup_connect_list(codec, pin_nid);
+               err = hdmi_read_pin_conn(codec, pin_idx);
+               if (err < 0)
+                       return err;
+               spec->num_pins++;
+       }
+       spec->num_nids++;
 
        return 0;
 }
@@ -1744,7 +1868,7 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
        /* Call sync_audio_rate to set the N/CTS/M manually if necessary */
        /* Todo: add DP1.2 MST audio support later */
        if (codec_has_acomp(codec))
-               snd_hdac_sync_audio_rate(&codec->core, pin_nid, -1,
+               snd_hdac_sync_audio_rate(&codec->core, pin_nid, per_pin->dev_id,
                                         runtime->rate);
 
        non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
@@ -1762,6 +1886,7 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
                                    pinctl | PIN_OUT);
        }
 
+       /* snd_hda_set_dev_select() has been called before */
        err = spec->ops.setup_stream(codec, cvt_nid, pin_nid,
                                 stream_tag, format);
        mutex_unlock(&spec->pcm_lock);
@@ -1897,17 +2022,23 @@ static bool is_hdmi_pcm_attached(struct hdac_device *hdac, int pcm_idx)
 static int generic_hdmi_build_pcms(struct hda_codec *codec)
 {
        struct hdmi_spec *spec = codec->spec;
-       int pin_idx;
+       int idx;
 
-       for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+       /*
+        * for non-mst mode, pcm number is the same as before
+        * for DP MST mode, pcm number is (nid number + dev_num - 1)
+        *  dev_num is the device entry number in a pin
+        *
+        */
+       for (idx = 0; idx < spec->num_nids + spec->dev_num - 1; idx++) {
                struct hda_pcm *info;
                struct hda_pcm_stream *pstr;
 
-               info = snd_hda_codec_pcm_new(codec, "HDMI %d", pin_idx);
+               info = snd_hda_codec_pcm_new(codec, "HDMI %d", idx);
                if (!info)
                        return -ENOMEM;
 
-               spec->pcm_rec[pin_idx].pcm = info;
+               spec->pcm_rec[idx].pcm = info;
                spec->pcm_used++;
                info->pcm_type = HDA_PCM_TYPE_HDMI;
                info->own_chmap = true;
@@ -1915,6 +2046,9 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
                pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK];
                pstr->substreams = 1;
                pstr->ops = generic_ops;
+               /* pcm number is less than 16 */
+               if (spec->pcm_used >= 16)
+                       break;
                /* other pstr fields are set in open */
        }
 
@@ -2070,7 +2204,9 @@ static int generic_hdmi_init(struct hda_codec *codec)
        for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
                struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
                hda_nid_t pin_nid = per_pin->pin_nid;
+               int dev_id = per_pin->dev_id;
 
+               snd_hda_set_dev_select(codec, pin_nid, dev_id);
                hdmi_init_pin(codec, pin_nid);
                if (!codec_has_acomp(codec))
                        snd_hda_jack_detect_enable_callback(codec, pin_nid,
@@ -2178,6 +2314,7 @@ static int alloc_generic_hdmi(struct hda_codec *codec)
                return -ENOMEM;
 
        spec->ops = generic_standard_hdmi_ops;
+       spec->dev_num = 1;      /* initialize to 1 */
        mutex_init(&spec->pcm_lock);
        snd_hdac_register_chmap_ops(&codec->core, &spec->chmap);
 
@@ -2295,6 +2432,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
 {
        struct hda_codec *codec = audio_ptr;
        int pin_nid;
+       int dev_id = pipe;
 
        /* we assume only from port-B to port-D */
        if (port < 1 || port > 3)
@@ -2321,7 +2459,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
                return;
 
        snd_hdac_i915_set_bclk(&codec->bus->core);
-       check_presence_and_report(codec, pin_nid);
+       check_presence_and_report(codec, pin_nid, dev_id);
 }
 
 /* register i915 component pin_eld_notify callback */
@@ -2354,11 +2492,13 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
                               hda_nid_t cvt_nid)
 {
        if (per_pin) {
+               snd_hda_set_dev_select(codec, per_pin->pin_nid,
+                              per_pin->dev_id);
                intel_verify_pin_cvt_connect(codec, per_pin);
                intel_not_share_assigned_cvt(codec, per_pin->pin_nid,
-                                            per_pin->mux_idx);
+                                    per_pin->dev_id, per_pin->mux_idx);
        } else {
-               intel_not_share_assigned_cvt_nid(codec, 0, cvt_nid);
+               intel_not_share_assigned_cvt_nid(codec, 0, 0, cvt_nid);
        }
 }
 
@@ -2378,6 +2518,8 @@ static int patch_i915_hsw_hdmi(struct hda_codec *codec)
        if (err < 0)
                return err;
        spec = codec->spec;
+       codec->dp_mst = true;
+       spec->dyn_pcm_assign = true;
 
        intel_haswell_enable_all_pins(codec, true);
        intel_haswell_fixup_enable_dp12(codec);
@@ -2389,7 +2531,6 @@ static int patch_i915_hsw_hdmi(struct hda_codec *codec)
                codec->core.link_power_control = 1;
 
        codec->patch_ops.set_power_state = haswell_set_power_state;
-       codec->dp_mst = true;
        codec->depop_delay = 0;
        codec->auto_runtime_pm = 1;
 
index efe3a44658d5a5e6d3a82d5f23ed8f8afd190d5a..4576f987a4a5fec34a1bb780e0ba1ccb71dfeada 100644 (file)
@@ -561,9 +561,9 @@ static void nau8825_xtalk_prepare(struct nau8825 *nau8825)
        nau8825_xtalk_backup(nau8825);
        /* Config IIS as master to output signal by codec */
        regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
-               NAU8825_I2S_MS_MASK | NAU8825_I2S_DRV_MASK |
+               NAU8825_I2S_MS_MASK | NAU8825_I2S_LRC_DIV_MASK |
                NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_MASTER |
-               (0x2 << NAU8825_I2S_DRV_SFT) | 0x1);
+               (0x2 << NAU8825_I2S_LRC_DIV_SFT) | 0x1);
        /* Ramp up headphone volume to 0dB to get better performance and
         * avoid pop noise in headphone.
         */
@@ -657,7 +657,7 @@ static void nau8825_xtalk_clean(struct nau8825 *nau8825)
                NAU8825_IRQ_RMS_EN, NAU8825_IRQ_RMS_EN);
        /* Recover default value for IIS */
        regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
-               NAU8825_I2S_MS_MASK | NAU8825_I2S_DRV_MASK |
+               NAU8825_I2S_MS_MASK | NAU8825_I2S_LRC_DIV_MASK |
                NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_SLAVE);
        /* Restore value of specific register for cross talk */
        nau8825_xtalk_restore(nau8825);
@@ -2006,7 +2006,8 @@ static void nau8825_fll_apply(struct nau8825 *nau8825,
                        NAU8825_FLL_INTEGER_MASK, fll_param->fll_int);
        /* FLL pre-scaler */
        regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL4,
-                       NAU8825_FLL_REF_DIV_MASK, fll_param->clk_ref_div);
+                       NAU8825_FLL_REF_DIV_MASK,
+                       fll_param->clk_ref_div << NAU8825_FLL_REF_DIV_SFT);
        /* select divided VCO input */
        regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
                NAU8825_FLL_CLK_SW_MASK, NAU8825_FLL_CLK_SW_REF);
index 5d1704e732415dd04134f44ddab9971156c97de1..514fd13c2f4624bb96b237422dfd63fae52adf17 100644 (file)
 #define NAU8825_FLL_CLK_SRC_FS                 (0x3 << NAU8825_FLL_CLK_SRC_SFT)
 
 /* FLL4 (0x07) */
-#define NAU8825_FLL_REF_DIV_MASK               (0x3 << 10)
+#define NAU8825_FLL_REF_DIV_SFT        10
+#define NAU8825_FLL_REF_DIV_MASK       (0x3 << NAU8825_FLL_REF_DIV_SFT)
 
 /* FLL5 (0x08) */
 #define NAU8825_FLL_PDB_DAC_EN         (0x1 << 15)
 
 /* I2S_PCM_CTRL2 (0x1d) */
 #define NAU8825_I2S_TRISTATE   (1 << 15) /* 0 - normal mode, 1 - Hi-Z output */
-#define NAU8825_I2S_DRV_SFT    12
-#define NAU8825_I2S_DRV_MASK   (0x3 << NAU8825_I2S_DRV_SFT)
+#define NAU8825_I2S_LRC_DIV_SFT        12
+#define NAU8825_I2S_LRC_DIV_MASK       (0x3 << NAU8825_I2S_LRC_DIV_SFT)
 #define NAU8825_I2S_MS_SFT     3
 #define NAU8825_I2S_MS_MASK    (1 << NAU8825_I2S_MS_SFT)
 #define NAU8825_I2S_MS_MASTER  (1 << NAU8825_I2S_MS_SFT)
index 10c2a564a715dc82e198a4bb50c5691662685c7a..1ac96ef9ee2077dc70ba940f939e8162ce1dec26 100644 (file)
@@ -3833,6 +3833,9 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
                }
        }
 
+       regmap_update_bits(rt5645->regmap, RT5645_ADDA_CLK1,
+               RT5645_I2S_PD1_MASK, RT5645_I2S_PD1_2);
+
        if (rt5645->pdata.jd_invert) {
                regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
                        RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
index 8877b74b0510fedefe81adc6637c9d72cf9c722f..bb94d50052d7a7233282b50afca134e4f2f7cf3f 100644 (file)
@@ -126,6 +126,16 @@ static const struct reg_default aic3x_reg[] = {
        { 108, 0x00 }, { 109, 0x00 },
 };
 
+static bool aic3x_volatile_reg(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case AIC3X_RESET:
+               return true;
+       default:
+               return false;
+       }
+}
+
 static const struct regmap_config aic3x_regmap = {
        .reg_bits = 8,
        .val_bits = 8,
@@ -133,6 +143,9 @@ static const struct regmap_config aic3x_regmap = {
        .max_register = DAC_ICC_ADJ,
        .reg_defaults = aic3x_reg,
        .num_reg_defaults = ARRAY_SIZE(aic3x_reg),
+
+       .volatile_reg = aic3x_volatile_reg,
+
        .cache_type = REGCACHE_RBTREE,
 };
 
index 593b7d1aed4695bbf6e538fa924bc6263901041f..d72ccef9e238d39807657597d1c8496022718bb5 100644 (file)
@@ -1551,7 +1551,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
        const struct wmfw_region *region;
        const struct wm_adsp_region *mem;
        const char *region_name;
-       char *file, *text;
+       char *file, *text = NULL;
        struct wm_adsp_buf *buf;
        unsigned int reg;
        int regions = 0;
@@ -1700,10 +1700,21 @@ static int wm_adsp_load(struct wm_adsp *dsp)
                         regions, le32_to_cpu(region->len), offset,
                         region_name);
 
+               if ((pos + le32_to_cpu(region->len) + sizeof(*region)) >
+                   firmware->size) {
+                       adsp_err(dsp,
+                                "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+                                file, regions, region_name,
+                                le32_to_cpu(region->len), firmware->size);
+                       ret = -EINVAL;
+                       goto out_fw;
+               }
+
                if (text) {
                        memcpy(text, region->data, le32_to_cpu(region->len));
                        adsp_info(dsp, "%s: %s\n", file, text);
                        kfree(text);
+                       text = NULL;
                }
 
                if (reg) {
@@ -1748,6 +1759,7 @@ out_fw:
        regmap_async_complete(regmap);
        wm_adsp_buf_free(&buf_list);
        release_firmware(firmware);
+       kfree(text);
 out:
        kfree(file);
 
@@ -2233,6 +2245,17 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
                }
 
                if (reg) {
+                       if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) >
+                           firmware->size) {
+                               adsp_err(dsp,
+                                        "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+                                        file, blocks, region_name,
+                                        le32_to_cpu(blk->len),
+                                        firmware->size);
+                               ret = -EINVAL;
+                               goto out_fw;
+                       }
+
                        buf = wm_adsp_buf_alloc(blk->data,
                                                le32_to_cpu(blk->len),
                                                &buf_list);
index 2998954a1c7459e02c3397956e690a276120f06d..bdf8398cbc81b68b445f202636e8949db5e5cae5 100644 (file)
@@ -681,22 +681,19 @@ static int dw_i2s_probe(struct platform_device *pdev)
        }
 
        if (!pdata) {
-               ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
-               if (ret == -EPROBE_DEFER) {
-                       dev_err(&pdev->dev,
-                               "failed to register PCM, deferring probe\n");
-                       return ret;
-               } else if (ret) {
-                       dev_err(&pdev->dev,
-                               "Could not register DMA PCM: %d\n"
-                               "falling back to PIO mode\n", ret);
+               if (irq >= 0) {
                        ret = dw_pcm_register(pdev);
-                       if (ret) {
-                               dev_err(&pdev->dev,
-                                       "Could not register PIO PCM: %d\n",
+                       dev->use_pio = true;
+               } else {
+                       ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL,
+                                       0);
+                       dev->use_pio = false;
+               }
+
+               if (ret) {
+                       dev_err(&pdev->dev, "could not register pcm: %d\n",
                                        ret);
-                               goto err_clk_disable;
-                       }
+                       goto err_clk_disable;
                }
        }
 
index 50349437d9615107a3a08fa481b6b2d61612e51c..fde08660b63be270f95436424e32e443bdb8a0de 100644 (file)
@@ -224,6 +224,12 @@ struct fsl_ssi_soc_data {
  * @dbg_stats: Debugging statistics
  *
  * @soc: SoC specific data
+ *
+ * @fifo_watermark: the FIFO watermark setting.  Notifies DMA when
+ *             there are @fifo_watermark or fewer words in TX fifo or
+ *             @fifo_watermark or more empty words in RX fifo.
+ * @dma_maxburst: max number of words to transfer in one go.  So far,
+ *             this is always the same as fifo_watermark.
  */
 struct fsl_ssi_private {
        struct regmap *regs;
@@ -263,6 +269,9 @@ struct fsl_ssi_private {
 
        const struct fsl_ssi_soc_data *soc;
        struct device *dev;
+
+       u32 fifo_watermark;
+       u32 dma_maxburst;
 };
 
 /*
@@ -1051,21 +1060,7 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
        regmap_write(regs, CCSR_SSI_SRCR, srcr);
        regmap_write(regs, CCSR_SSI_SCR, scr);
 
-       /*
-        * Set the watermark for transmit FIFI 0 and receive FIFO 0. We don't
-        * use FIFO 1. We program the transmit water to signal a DMA transfer
-        * if there are only two (or fewer) elements left in the FIFO. Two
-        * elements equals one frame (left channel, right channel). This value,
-        * however, depends on the depth of the transmit buffer.
-        *
-        * We set the watermark on the same level as the DMA burstsize.  For
-        * fiq it is probably better to use the biggest possible watermark
-        * size.
-        */
-       if (ssi_private->use_dma)
-               wm = ssi_private->fifo_depth - 2;
-       else
-               wm = ssi_private->fifo_depth;
+       wm = ssi_private->fifo_watermark;
 
        regmap_write(regs, CCSR_SSI_SFCSR,
                        CCSR_SSI_SFCSR_TFWM0(wm) | CCSR_SSI_SFCSR_RFWM0(wm) |
@@ -1373,12 +1368,8 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
                dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
                         PTR_ERR(ssi_private->baudclk));
 
-       /*
-        * We have burstsize be "fifo_depth - 2" to match the SSI
-        * watermark setting in fsl_ssi_startup().
-        */
-       ssi_private->dma_params_tx.maxburst = ssi_private->fifo_depth - 2;
-       ssi_private->dma_params_rx.maxburst = ssi_private->fifo_depth - 2;
+       ssi_private->dma_params_tx.maxburst = ssi_private->dma_maxburst;
+       ssi_private->dma_params_rx.maxburst = ssi_private->dma_maxburst;
        ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0;
        ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0;
 
@@ -1543,6 +1534,47 @@ static int fsl_ssi_probe(struct platform_device *pdev)
                 /* Older 8610 DTs didn't have the fifo-depth property */
                ssi_private->fifo_depth = 8;
 
+       /*
+        * Set the watermark for transmit FIFO 0 and receive FIFO 0. We don't
+        * use FIFO 1 but set the watermark appropriately nontheless.
+        * We program the transmit water to signal a DMA transfer
+        * if there are N elements left in the FIFO. For chips with 15-deep
+        * FIFOs, set watermark to 8.  This allows the SSI to operate at a
+        * high data rate without channel slipping. Behavior is unchanged
+        * for the older chips with a fifo depth of only 8.  A value of 4
+        * might be appropriate for the older chips, but is left at
+        * fifo_depth-2 until sombody has a chance to test.
+        *
+        * We set the watermark on the same level as the DMA burstsize.  For
+        * fiq it is probably better to use the biggest possible watermark
+        * size.
+        */
+       switch (ssi_private->fifo_depth) {
+       case 15:
+               /*
+                * 2 samples is not enough when running at high data
+                * rates (like 48kHz @ 16 bits/channel, 16 channels)
+                * 8 seems to split things evenly and leave enough time
+                * for the DMA to fill the FIFO before it's over/under
+                * run.
+                */
+               ssi_private->fifo_watermark = 8;
+               ssi_private->dma_maxburst = 8;
+               break;
+       case 8:
+       default:
+               /*
+                * maintain old behavior for older chips.
+                * Keeping it the same because I don't have an older
+                * board to test with.
+                * I suspect this could be changed to be something to
+                * leave some more space in the fifo.
+                */
+               ssi_private->fifo_watermark = ssi_private->fifo_depth - 2;
+               ssi_private->dma_maxburst = ssi_private->fifo_depth - 2;
+               break;
+       }
+
        dev_set_drvdata(&pdev->dev, ssi_private);
 
        if (ssi_private->soc->imx) {
index 507a86a5eafe5646bb9731f865b85571eb6ffdfc..8d2fb2d6f532c833726e5dfbac2fe191eb95a15e 100644 (file)
@@ -142,7 +142,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
                 * for Jack detection and button press
                 */
                ret = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_RCCLK,
-                                            0,
+                                            48000 * 512,
                                             SND_SOC_CLOCK_IN);
                if (!ret) {
                        if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && priv->mclk)
@@ -825,10 +825,20 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
        if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && (is_valleyview())) {
                priv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
                if (IS_ERR(priv->mclk)) {
+                       ret_val = PTR_ERR(priv->mclk);
+
                        dev_err(&pdev->dev,
-                               "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
-                               PTR_ERR(priv->mclk));
-                       return PTR_ERR(priv->mclk);
+                               "Failed to get MCLK from pmc_plt_clk_3: %d\n",
+                               ret_val);
+
+                       /*
+                        * Fall back to bit clock usage for -ENOENT (clock not
+                        * available likely due to missing dependencies), bail
+                        * for all other errors, including -EPROBE_DEFER
+                        */
+                       if (ret_val != -ENOENT)
+                               return ret_val;
+                       byt_rt5640_quirk &= ~BYT_RT5640_MCLK_EN;
                }
        }
 
index 84b5101e6ca691df0e0896b59a95e07a55e3b5e7..6c6b63a6b338f50f0eb218f231506f0017e56241 100644 (file)
@@ -180,6 +180,9 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
        snd_pcm_set_sync(substream);
 
        mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
+       if (!mconfig)
+               return -EINVAL;
+
        skl_tplg_d0i3_get(skl, mconfig->d0i3_caps);
 
        return 0;
index 8fc3178bc79cf975809d7c808c0b03cb1d91f846..b30bd384c8d38338d50994e9a92673aba123824d 100644 (file)
@@ -515,6 +515,9 @@ EXPORT_SYMBOL_GPL(skl_sst_init_fw);
 
 void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
 {
+
+       if (ctx->dsp->fw)
+               release_firmware(ctx->dsp->fw);
        skl_clear_module_table(ctx->dsp);
        skl_freeup_uuid_list(ctx);
        skl_ipc_free(&ctx->ipc);
index 4bd68de761309b57285bf203aa2ee46cabc440cc..99b5b0835c1e840c66e45676ff81cd57a4c1875c 100644 (file)
@@ -1030,10 +1030,8 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod,
                return -ENOMEM;
 
        ret = snd_ctl_add(card, kctrl);
-       if (ret < 0) {
-               snd_ctl_free_one(kctrl);
+       if (ret < 0)
                return ret;
-       }
 
        cfg->update = update;
        cfg->card = card;
index f1901bb1466ec67b12189713c66ad040f1ae75c7..baa1afa41e3dd57fdc36655b7d3bbd147ade820f 100644 (file)
@@ -1748,6 +1748,7 @@ static int soc_bind_aux_dev(struct snd_soc_card *card, int num)
 
        component->init = aux_dev->init;
        component->auxiliary = 1;
+       list_add(&component->card_aux_list, &card->aux_comp_list);
 
        return 0;
 
@@ -1758,16 +1759,14 @@ err_defer:
 
 static int soc_probe_aux_devices(struct snd_soc_card *card)
 {
-       struct snd_soc_component *comp;
+       struct snd_soc_component *comp, *tmp;
        int order;
        int ret;
 
        for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
                order++) {
-               list_for_each_entry(comp, &card->component_dev_list, card_list) {
-                       if (!comp->auxiliary)
-                               continue;
-
+               list_for_each_entry_safe(comp, tmp, &card->aux_comp_list,
+                                        card_aux_list) {
                        if (comp->driver->probe_order == order) {
                                ret = soc_probe_component(card, comp);
                                if (ret < 0) {
@@ -1776,6 +1775,7 @@ static int soc_probe_aux_devices(struct snd_soc_card *card)
                                                comp->name, ret);
                                        return ret;
                                }
+                               list_del(&comp->card_aux_list);
                        }
                }
        }
index e7a1eaa2772f4418534d094e576b424a76aaa844..6aba14009c92abc853d72575ed447e934ed5e458 100644 (file)
@@ -2184,9 +2184,11 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
                break;
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_SUSPEND:
-       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
                break;
+       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+               fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
+               break;
        }
 
 out:
index 65670b2b408cca0d4bffb2931cb7443cd4ba75b1..fbfb1fab88d5be60de7c40ed8fa1dd78d6fae727 100644 (file)
@@ -514,13 +514,12 @@ static void remove_widget(struct snd_soc_component *comp,
                            == SND_SOC_TPLG_TYPE_MIXER)
                                kfree(kcontrol->tlv.p);
 
-                       snd_ctl_remove(card, kcontrol);
-
                        /* Private value is used as struct soc_mixer_control
                         * for volume mixers or soc_bytes_ext for bytes
                         * controls.
                         */
                        kfree((void *)kcontrol->private_value);
+                       snd_ctl_remove(card, kcontrol);
                }
                kfree(w->kcontrol_news);
        }
index b3fd2382fdd9ed62f102e210489eadd17f4e3b8f..eb4b9f7a571e0f154fd7e00b81ef2c5ff7a17130 100644 (file)
@@ -1135,6 +1135,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
        case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
        case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+       case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */
        case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
        case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
index 3284bb14ae789bcb0513cba80e8ef0dd43e0812d..8aad81151d5068bacbc1e6d3c114248e9847bf84 100644 (file)
@@ -213,6 +213,9 @@ static int get_value(struct parse_opt_ctx_t *p,
                else
                        err = get_arg(p, opt, flags, (const char **)opt->value);
 
+               if (opt->set)
+                       *(bool *)opt->set = true;
+
                /* PARSE_OPT_NOEMPTY: Allow NULL but disallow empty string. */
                if (opt->flags & PARSE_OPT_NOEMPTY) {
                        const char *val = *(const char **)opt->value;
index 8866ac438b3441d9e82c739da41273e93466e48b..11c3be3bcce79598bd758d436270399f0f240842 100644 (file)
@@ -137,6 +137,11 @@ struct option {
        { .type = OPTION_STRING,  .short_name = (s), .long_name = (l), \
          .value = check_vtype(v, const char **), (a), .help = (h), \
          .flags = PARSE_OPT_OPTARG, .defval = (intptr_t)(d) }
+#define OPT_STRING_OPTARG_SET(s, l, v, os, a, h, d) \
+       { .type = OPTION_STRING, .short_name = (s), .long_name = (l), \
+         .value = check_vtype(v, const char **), (a), .help = (h), \
+         .flags = PARSE_OPT_OPTARG, .defval = (intptr_t)(d), \
+         .set = check_vtype(os, bool *)}
 #define OPT_STRING_NOEMPTY(s, l, v, a, h)   { .type = OPTION_STRING,  .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h), .flags = PARSE_OPT_NOEMPTY}
 #define OPT_DATE(s, l, v, h) \
        { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb }
index f1ce6006525886169fa239bfaa5cabffa33e3c9f..ec30c2fcbac05c2bef0268ac9494c0c5b4254a23 100644 (file)
@@ -111,7 +111,7 @@ static int sched_switch_handler(struct trace_seq *s,
        trace_seq_printf(s, "%lld ", val);
 
        if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0)
-               trace_seq_printf(s, "[%lld] ", val);
+               trace_seq_printf(s, "[%d] ", (int) val);
 
        if (pevent_get_field_val(s,  event, "prev_state", record, &val, 0) == 0)
                write_state(s, val);
@@ -129,7 +129,7 @@ static int sched_switch_handler(struct trace_seq *s,
        trace_seq_printf(s, "%lld", val);
 
        if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0)
-               trace_seq_printf(s, " [%lld]", val);
+               trace_seq_printf(s, " [%d]", (int) val);
 
        return 0;
 }
index 27fc3617c6a42066bf7a64bb2ace9961a350183b..5054d9147f0f03122f96338f96e9acd08cc36992 100644 (file)
@@ -430,6 +430,10 @@ that gets then processed, possibly via a perf script, to decide if that
 particular perf.data snapshot should be kept or not.
 
 Implies --timestamp-filename, --no-buildid and --no-buildid-cache.
+The reason for the latter two is to reduce the data file switching
+overhead. You can still switch them on with:
+
+  --switch-output --no-no-buildid  --no-no-buildid-cache
 
 --dry-run::
 Parse options then exit. --dry-run can be used to detect errors in cmdline
index 8fc24824705e0bfecd696bfc50a53ec4924cf9d7..8bb16aa9d661d0e5b8104adc0ef3d14a1fc296f9 100644 (file)
@@ -704,9 +704,9 @@ install-tests: all install-gtk
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
                $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
 
-install-bin: install-tools install-tests
+install-bin: install-tools install-tests install-traceevent-plugins
 
-install: install-bin try-install-man install-traceevent-plugins
+install: install-bin try-install-man
 
 install-python_ext:
        $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
index 35a02f8e5a4aa90548c643c0de80e85b07a0b3bb..915869e00d863af03ca409f0f9ffa98147c45385 100644 (file)
@@ -655,7 +655,6 @@ static const struct {
        { "__GFP_RECLAIM",              "R" },
        { "__GFP_DIRECT_RECLAIM",       "DR" },
        { "__GFP_KSWAPD_RECLAIM",       "KR" },
-       { "__GFP_OTHER_NODE",           "ON" },
 };
 
 static size_t max_gfp_len;
index 74d6a035133a96a7303287e130abe73af3fc4622..4ec10e9427d915a041e0510eaac2ded2daf9fc07 100644 (file)
@@ -1405,7 +1405,7 @@ static bool dry_run;
  * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
  * using pipes, etc.
  */
-struct option __record_options[] = {
+static struct option __record_options[] = {
        OPT_CALLBACK('e', "event", &record.evlist, "event",
                     "event selector. use 'perf list' to list available events",
                     parse_events_option),
@@ -1636,7 +1636,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
                 * overhead. Still generate buildid if they are required
                 * explicitly using
                 *
-                *  perf record --signal-trigger --no-no-buildid \
+                *  perf record --switch-output --no-no-buildid \
                 *              --no-no-buildid-cache
                 *
                 * Following code equals to:
index d53e706a6f1706d2d69caa93132521f55d89d072..5b134b0d1ff37e42a1b82b5973528c4003bb80f3 100644 (file)
@@ -209,6 +209,7 @@ struct perf_sched {
        u64             skipped_samples;
        const char      *time_str;
        struct perf_time_interval ptime;
+       struct perf_time_interval hist_time;
 };
 
 /* per thread run time data */
@@ -2460,6 +2461,11 @@ static int timehist_sched_change_event(struct perf_tool *tool,
                timehist_print_sample(sched, sample, &al, thread, t);
 
 out:
+       if (sched->hist_time.start == 0 && t >= ptime->start)
+               sched->hist_time.start = t;
+       if (ptime->end == 0 || t <= ptime->end)
+               sched->hist_time.end = t;
+
        if (tr) {
                /* time of this sched_switch event becomes last time task seen */
                tr->last_time = sample->time;
@@ -2624,6 +2630,7 @@ static void timehist_print_summary(struct perf_sched *sched,
        struct thread *t;
        struct thread_runtime *r;
        int i;
+       u64 hist_time = sched->hist_time.end - sched->hist_time.start;
 
        memset(&totals, 0, sizeof(totals));
 
@@ -2665,7 +2672,7 @@ static void timehist_print_summary(struct perf_sched *sched,
                        totals.sched_count += r->run_stats.n;
                        printf("    CPU %2d idle for ", i);
                        print_sched_time(r->total_run_time, 6);
-                       printf(" msec\n");
+                       printf(" msec  (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
                } else
                        printf("    CPU %2d idle entire time window\n", i);
        }
@@ -2701,12 +2708,16 @@ static void timehist_print_summary(struct perf_sched *sched,
 
        printf("\n"
               "    Total number of unique tasks: %" PRIu64 "\n"
-              "Total number of context switches: %" PRIu64 "\n"
-              "           Total run time (msec): ",
+              "Total number of context switches: %" PRIu64 "\n",
               totals.task_count, totals.sched_count);
 
+       printf("           Total run time (msec): ");
        print_sched_time(totals.total_run_time, 2);
        printf("\n");
+
+       printf("    Total scheduling time (msec): ");
+       print_sched_time(hist_time, 2);
+       printf(" (x %d)\n", sched->max_cpu);
 }
 
 typedef int (*sched_handler)(struct perf_tool *tool,
index d281ae2b54e8fef7b85574a8b799dc33c21e9e44..6a6f44dd594bc4c6275694335ebaa02b22118982 100644 (file)
@@ -163,7 +163,7 @@ static struct map *kernel_get_module_map(const char *module)
 
        /* A file path -- this is an offline module */
        if (module && strchr(module, '/'))
-               return machine__findnew_module_map(host_machine, 0, module);
+               return dso__new_map(module);
 
        if (!module)
                module = "kernel";
@@ -173,6 +173,7 @@ static struct map *kernel_get_module_map(const char *module)
                if (strncmp(pos->dso->short_name + 1, module,
                            pos->dso->short_name_len - 2) == 0 &&
                    module[pos->dso->short_name_len - 2] == '\0') {
+                       map__get(pos);
                        return pos;
                }
        }
@@ -188,15 +189,6 @@ struct map *get_target_map(const char *target, bool user)
                return kernel_get_module_map(target);
 }
 
-static void put_target_map(struct map *map, bool user)
-{
-       if (map && user) {
-               /* Only the user map needs to be released */
-               map__put(map);
-       }
-}
-
-
 static int convert_exec_to_group(const char *exec, char **result)
 {
        char *ptr1, *ptr2, *exec_copy;
@@ -267,21 +259,6 @@ static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
        return true;
 }
 
-/*
- * NOTE:
- * '.gnu.linkonce.this_module' section of kernel module elf directly
- * maps to 'struct module' from linux/module.h. This section contains
- * actual module name which will be used by kernel after loading it.
- * But, we cannot use 'struct module' here since linux/module.h is not
- * exposed to user-space. Offset of 'name' has remained same from long
- * time, so hardcoding it here.
- */
-#ifdef __LP64__
-#define MOD_NAME_OFFSET 24
-#else
-#define MOD_NAME_OFFSET 12
-#endif
-
 /*
  * @module can be module name of module file path. In case of path,
  * inspect elf and find out what is actual module name.
@@ -296,6 +273,7 @@ static char *find_module_name(const char *module)
        Elf_Data *data;
        Elf_Scn *sec;
        char *mod_name = NULL;
+       int name_offset;
 
        fd = open(module, O_RDONLY);
        if (fd < 0)
@@ -317,7 +295,21 @@ static char *find_module_name(const char *module)
        if (!data || !data->d_buf)
                goto ret_err;
 
-       mod_name = strdup((char *)data->d_buf + MOD_NAME_OFFSET);
+       /*
+        * NOTE:
+        * '.gnu.linkonce.this_module' section of kernel module elf directly
+        * maps to 'struct module' from linux/module.h. This section contains
+        * actual module name which will be used by kernel after loading it.
+        * But, we cannot use 'struct module' here since linux/module.h is not
+        * exposed to user-space. Offset of 'name' has remained same from long
+        * time, so hardcoding it here.
+        */
+       if (ehdr.e_ident[EI_CLASS] == ELFCLASS32)
+               name_offset = 12;
+       else    /* expect ELFCLASS64 by default */
+               name_offset = 24;
+
+       mod_name = strdup((char *)data->d_buf + name_offset);
 
 ret_err:
        elf_end(elf);
@@ -412,7 +404,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
        }
 
 out:
-       put_target_map(map, uprobes);
+       map__put(map);
        return ret;
 
 }
@@ -618,6 +610,67 @@ error:
        return ret ? : -ENOENT;
 }
 
+/* Adjust symbol name and address */
+static int post_process_probe_trace_point(struct probe_trace_point *tp,
+                                          struct map *map, unsigned long offs)
+{
+       struct symbol *sym;
+       u64 addr = tp->address + tp->offset - offs;
+
+       sym = map__find_symbol(map, addr);
+       if (!sym)
+               return -ENOENT;
+
+       if (strcmp(sym->name, tp->symbol)) {
+               /* If we have no realname, use symbol for it */
+               if (!tp->realname)
+                       tp->realname = tp->symbol;
+               else
+                       free(tp->symbol);
+               tp->symbol = strdup(sym->name);
+               if (!tp->symbol)
+                       return -ENOMEM;
+       }
+       tp->offset = addr - sym->start;
+       tp->address -= offs;
+
+       return 0;
+}
+
+/*
+ * Rename DWARF symbols to ELF symbols -- gcc sometimes optimizes functions
+ * and generate new symbols with suffixes such as .constprop.N or .isra.N
+ * etc. Since those symbols are not recorded in DWARF, we have to find
+ * correct generated symbols from offline ELF binary.
+ * For online kernel or uprobes we don't need this because those are
+ * rebased on _text, or already a section relative address.
+ */
+static int
+post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
+                                       int ntevs, const char *pathname)
+{
+       struct map *map;
+       unsigned long stext = 0;
+       int i, ret = 0;
+
+       /* Prepare a map for offline binary */
+       map = dso__new_map(pathname);
+       if (!map || get_text_start_address(pathname, &stext) < 0) {
+               pr_warning("Failed to get ELF symbols for %s\n", pathname);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < ntevs; i++) {
+               ret = post_process_probe_trace_point(&tevs[i].point,
+                                                    map, stext);
+               if (ret < 0)
+                       break;
+       }
+       map__put(map);
+
+       return ret;
+}
+
 static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
                                          int ntevs, const char *exec)
 {
@@ -645,18 +698,31 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
        return ret;
 }
 
-static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
-                                           int ntevs, const char *module)
+static int
+post_process_module_probe_trace_events(struct probe_trace_event *tevs,
+                                      int ntevs, const char *module,
+                                      struct debuginfo *dinfo)
 {
+       Dwarf_Addr text_offs = 0;
        int i, ret = 0;
        char *mod_name = NULL;
+       struct map *map;
 
        if (!module)
                return 0;
 
-       mod_name = find_module_name(module);
+       map = get_target_map(module, false);
+       if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) {
+               pr_warning("Failed to get ELF symbols for %s\n", module);
+               return -EINVAL;
+       }
 
+       mod_name = find_module_name(module);
        for (i = 0; i < ntevs; i++) {
+               ret = post_process_probe_trace_point(&tevs[i].point,
+                                               map, (unsigned long)text_offs);
+               if (ret < 0)
+                       break;
                tevs[i].point.module =
                        strdup(mod_name ? mod_name : module);
                if (!tevs[i].point.module) {
@@ -666,6 +732,8 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
        }
 
        free(mod_name);
+       map__put(map);
+
        return ret;
 }
 
@@ -679,7 +747,8 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
 
        /* Skip post process if the target is an offline kernel */
        if (symbol_conf.ignore_vmlinux_buildid)
-               return 0;
+               return post_process_offline_probe_trace_events(tevs, ntevs,
+                                               symbol_conf.vmlinux_name);
 
        reloc_sym = kernel_get_ref_reloc_sym();
        if (!reloc_sym) {
@@ -722,7 +791,7 @@ arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unuse
 static int post_process_probe_trace_events(struct perf_probe_event *pev,
                                           struct probe_trace_event *tevs,
                                           int ntevs, const char *module,
-                                          bool uprobe)
+                                          bool uprobe, struct debuginfo *dinfo)
 {
        int ret;
 
@@ -730,7 +799,8 @@ static int post_process_probe_trace_events(struct perf_probe_event *pev,
                ret = add_exec_to_probe_trace_events(tevs, ntevs, module);
        else if (module)
                /* Currently ref_reloc_sym based probe is not for drivers */
-               ret = add_module_to_probe_trace_events(tevs, ntevs, module);
+               ret = post_process_module_probe_trace_events(tevs, ntevs,
+                                                            module, dinfo);
        else
                ret = post_process_kernel_probe_trace_events(tevs, ntevs);
 
@@ -774,30 +844,27 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
                }
        }
 
-       debuginfo__delete(dinfo);
-
        if (ntevs > 0) {        /* Succeeded to find trace events */
                pr_debug("Found %d probe_trace_events.\n", ntevs);
                ret = post_process_probe_trace_events(pev, *tevs, ntevs,
-                                               pev->target, pev->uprobes);
+                                       pev->target, pev->uprobes, dinfo);
                if (ret < 0 || ret == ntevs) {
+                       pr_debug("Post processing failed or all events are skipped. (%d)\n", ret);
                        clear_probe_trace_events(*tevs, ntevs);
                        zfree(tevs);
+                       ntevs = 0;
                }
-               if (ret != ntevs)
-                       return ret < 0 ? ret : ntevs;
-               ntevs = 0;
-               /* Fall through */
        }
 
+       debuginfo__delete(dinfo);
+
        if (ntevs == 0) {       /* No error but failed to find probe point. */
                pr_warning("Probe point '%s' not found.\n",
                           synthesize_perf_probe_point(&pev->point));
                return -ENOENT;
-       }
-       /* Error path : ntevs < 0 */
-       pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
-       if (ntevs < 0) {
+       } else if (ntevs < 0) {
+               /* Error path : ntevs < 0 */
+               pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
                if (ntevs == -EBADF)
                        pr_warning("Warning: No dwarf info found in the vmlinux - "
                                "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
@@ -2869,7 +2936,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
        }
 
 out:
-       put_target_map(map, pev->uprobes);
+       map__put(map);
        free(syms);
        return ret;
 
@@ -3362,10 +3429,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
                return ret;
 
        /* Get a symbol map */
-       if (user)
-               map = dso__new_map(target);
-       else
-               map = kernel_get_module_map(target);
+       map = get_target_map(target, user);
        if (!map) {
                pr_err("Failed to get a map for %s\n", (target) ? : "kernel");
                return -EINVAL;
@@ -3397,9 +3461,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
         }
 
 end:
-       if (user) {
-               map__put(map);
-       }
+       map__put(map);
        exit_probe_symbol_maps();
 
        return ret;
index df4debe564daabce2e739289b14360d49185bc6f..0d9d6e0803b88b6fe3909c0b8b83f24fa47580f6 100644 (file)
@@ -1501,7 +1501,8 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
 }
 
 /* For the kernel module, we need a special code to get a DIE */
-static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+                               bool adjust_offset)
 {
        int n, i;
        Elf32_Word shndx;
@@ -1530,6 +1531,8 @@ static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
                        if (!shdr)
                                return -ENOENT;
                        *offs = shdr->sh_addr;
+                       if (adjust_offset)
+                               *offs -= shdr->sh_offset;
                }
        }
        return 0;
@@ -1543,16 +1546,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
        Dwarf_Addr _addr = 0, baseaddr = 0;
        const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
        int baseline = 0, lineno = 0, ret = 0;
-       bool reloc = false;
 
-retry:
+       /* We always need to relocate the address for aranges */
+       if (debuginfo__get_text_offset(dbg, &baseaddr, false) == 0)
+               addr += baseaddr;
        /* Find cu die */
        if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
-               if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
-                       addr += baseaddr;
-                       reloc = true;
-                       goto retry;
-               }
                pr_warning("Failed to find debug information for address %lx\n",
                           addr);
                ret = -EINVAL;
index f1d8558f498e96771c13b3f42046a757e888bdf2..2956c51986529ee7481f922d488a449c0a7619a0 100644 (file)
@@ -46,6 +46,9 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
 int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
                                struct perf_probe_point *ppt);
 
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+                              bool adjust_offset);
+
 /* Find a line range */
 int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr);
 
index 99400b0e8f2a892dcfbfe2572758ef3eae73be59..adbc6c02c3aaac757028e6bbe1ae63cd11ee437a 100644 (file)
@@ -537,6 +537,12 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
                                break;
                } else {
                        int n = namesz + descsz;
+
+                       if (n > (int)sizeof(bf)) {
+                               n = sizeof(bf);
+                               pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
+                                        __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
+                       }
                        if (read(fd, bf, n) != n)
                                break;
                }
index 71b05891a6a14adfe7537e70333352b844966134..831022b12848de68695065040b38bd7ec55f7577 100644 (file)
@@ -90,7 +90,7 @@ ifdef INSTALL_PATH
        done;
 
        @# Ask all targets to emit their test scripts
-       echo "#!/bin/bash" > $(ALL_SCRIPT)
+       echo "#!/bin/sh" > $(ALL_SCRIPT)
        echo "cd \$$(dirname \$$0)" >> $(ALL_SCRIPT)
        echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)
 
index 92e627adf3540f5c79ca4a2d372f81c74fa3bf8b..6d58cca8e23574a1fe15993d1da8f1558ecf8e06 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
 
 SRC_TREE=../../../../
 
index c09a682df56ae9fb3cc91214ae9d79da19714633..16058bbea7a8501324ce6c9c639f5219d69979ba 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
 
 echo "--------------------"
 echo "running socket test"
index c22860ab973378f76417d2bc85f1daf2c828e0c7..30e1ac62e8cb4249350c89aa64163e3d4ee3bedd 100644 (file)
@@ -66,7 +66,7 @@ int pmc56_overflow(void)
 
        FAIL_IF(ebb_event_enable(&event));
 
-       mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
+       mtspr(SPRN_PMC2, pmc_sample_period(sample_period));
        mtspr(SPRN_PMC5, 0);
        mtspr(SPRN_PMC6, 0);
 
index bdd58c78902e96948f92475f4898de1ec0c14a57..df9e0a0cdf294e1e334c6558c9c4f87a38387e1d 100644 (file)
@@ -1367,7 +1367,7 @@ void run_tests_once(void)
                tracing_off();
                close_test_fds();
 
-               printf("test %2d PASSED (itertation %d)\n", test_nr, iteration_nr);
+               printf("test %2d PASSED (iteration %d)\n", test_nr, iteration_nr);
                dprintf1("======================\n\n");
        }
        iteration_nr++;
index 34e63cc4c572bfcafe6fecb4784fc4ba5079bf8a..14142faf040b7e81a1c38a983aa76d9ae50ee4e1 100644 (file)
@@ -26,6 +26,16 @@ static inline void wait_cycles(unsigned long long cycles)
 #define VMEXIT_CYCLES 500
 #define VMENTRY_CYCLES 500
 
+#elif defined(__s390x__)
+static inline void wait_cycles(unsigned long long cycles)
+{
+       asm volatile("0: brctg %0,0b" : : "d" (cycles));
+}
+
+/* tweak me */
+#define VMEXIT_CYCLES 200
+#define VMENTRY_CYCLES 200
+
 #else
 static inline void wait_cycles(unsigned long long cycles)
 {
@@ -81,6 +91,8 @@ extern unsigned ring_size;
 /* Is there a portable way to do this? */
 #if defined(__x86_64__) || defined(__i386__)
 #define cpu_relax() asm ("rep; nop" ::: "memory")
+#elif defined(__s390x__)
+#define cpu_relax() barrier()
 #else
 #define cpu_relax() assert(0)
 #endif
index 2e69ca812b4cf4b39d653cd774286587fcc87bc7..29b0d3920bfc412a049b1478a7074869cbd113df 100755 (executable)
@@ -1,12 +1,13 @@
 #!/bin/sh
 
+CPUS_ONLINE=$(lscpu --online -p=cpu|grep -v -e '#')
 #use last CPU for host. Why not the first?
 #many devices tend to use cpu0 by default so
 #it tends to be busier
-HOST_AFFINITY=$(lscpu -p=cpu | tail -1)
+HOST_AFFINITY=$(echo "${CPUS_ONLINE}"|tail -n 1)
 
 #run command on all cpus
-for cpu in $(seq 0 $HOST_AFFINITY)
+for cpu in $CPUS_ONLINE
 do
        #Don't run guest and host on same CPU
        #It actually works ok if using signalling
index a2dbbccbb6a3fe96751fafde8cd01658aa1301a5..6a084cd57b883e1c4bb8420bee37ed0a74321d98 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <clocksource/arm_arch_timer.h>
 #include <asm/arch_timer.h>
+#include <asm/kvm_hyp.h>
 
 #include <kvm/arm_vgic.h>
 #include <kvm/arm_arch_timer.h>
@@ -89,9 +90,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
        struct kvm_vcpu *vcpu;
 
        vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
-       vcpu->arch.timer_cpu.armed = false;
-
-       WARN_ON(!kvm_timer_should_fire(vcpu));
 
        /*
         * If the vcpu is blocked we want to wake it up so that it will see
@@ -512,3 +510,25 @@ void kvm_timer_init(struct kvm *kvm)
 {
        kvm->arch.timer.cntvoff = kvm_phys_timer_read();
 }
+
+/*
+ * On VHE system, we only need to configure trap on physical timer and counter
+ * accesses in EL0 and EL1 once, not for every world switch.
+ * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
+ * and this makes those bits have no effect for the host kernel execution.
+ */
+void kvm_timer_init_vhe(void)
+{
+       /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
+       u32 cnthctl_shift = 10;
+       u64 val;
+
+       /*
+        * Disallow physical timer access for the guest.
+        * Physical counter access is allowed.
+        */
+       val = read_sysreg(cnthctl_el2);
+       val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift);
+       val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
+       write_sysreg(val, cnthctl_el2);
+}
index 798866a8d8756b07dd815d7a8bc8dec8f6019e13..63e28dd18bb09755b035f6e30bb4aaa6eca082fc 100644 (file)
@@ -35,10 +35,16 @@ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
        /* Disable the virtual timer */
        write_sysreg_el0(0, cntv_ctl);
 
-       /* Allow physical timer/counter access for the host */
-       val = read_sysreg(cnthctl_el2);
-       val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
-       write_sysreg(val, cnthctl_el2);
+       /*
+        * We don't need to do this for VHE since the host kernel runs in EL2
+        * with HCR_EL2.TGE ==1, which makes those bits have no impact.
+        */
+       if (!has_vhe()) {
+               /* Allow physical timer/counter access for the host */
+               val = read_sysreg(cnthctl_el2);
+               val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
+               write_sysreg(val, cnthctl_el2);
+       }
 
        /* Clear cntvoff for the host */
        write_sysreg(0, cntvoff_el2);
@@ -50,14 +56,17 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
        u64 val;
 
-       /*
-        * Disallow physical timer access for the guest
-        * Physical counter access is allowed
-        */
-       val = read_sysreg(cnthctl_el2);
-       val &= ~CNTHCTL_EL1PCEN;
-       val |= CNTHCTL_EL1PCTEN;
-       write_sysreg(val, cnthctl_el2);
+       /* Those bits are already configured at boot on VHE-system */
+       if (!has_vhe()) {
+               /*
+                * Disallow physical timer access for the guest
+                * Physical counter access is allowed
+                */
+               val = read_sysreg(cnthctl_el2);
+               val &= ~CNTHCTL_EL1PCEN;
+               val |= CNTHCTL_EL1PCTEN;
+               write_sysreg(val, cnthctl_el2);
+       }
 
        if (timer->enabled) {
                write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
index 5114391b7e5af52ee5f815baead6b4561243a31a..c737ea0a310a732cc6f878c57877aa3086e67280 100644 (file)
@@ -268,15 +268,11 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
 {
        struct vgic_dist *dist = &kvm->arch.vgic;
 
-       mutex_lock(&kvm->lock);
-
        dist->ready = false;
        dist->initialized = false;
 
        kfree(dist->spis);
        dist->nr_spis = 0;
-
-       mutex_unlock(&kvm->lock);
 }
 
 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -286,7 +282,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
        INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
 }
 
-void kvm_vgic_destroy(struct kvm *kvm)
+/* To be called with kvm->lock held */
+static void __kvm_vgic_destroy(struct kvm *kvm)
 {
        struct kvm_vcpu *vcpu;
        int i;
@@ -297,6 +294,13 @@ void kvm_vgic_destroy(struct kvm *kvm)
                kvm_vgic_vcpu_destroy(vcpu);
 }
 
+void kvm_vgic_destroy(struct kvm *kvm)
+{
+       mutex_lock(&kvm->lock);
+       __kvm_vgic_destroy(kvm);
+       mutex_unlock(&kvm->lock);
+}
+
 /**
  * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
  * is a GICv2. A GICv3 must be explicitly initialized by the guest using the
@@ -348,6 +352,10 @@ int kvm_vgic_map_resources(struct kvm *kvm)
                ret = vgic_v2_map_resources(kvm);
        else
                ret = vgic_v3_map_resources(kvm);
+
+       if (ret)
+               __kvm_vgic_destroy(kvm);
+
 out:
        mutex_unlock(&kvm->lock);
        return ret;
index 9bab86757fa4f3613c372fbc0250c146284306ff..834137e7b83ff0c37515a1c36300c24aeadb9925 100644 (file)
@@ -293,8 +293,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
        dist->ready = true;
 
 out:
-       if (ret)
-               kvm_vgic_destroy(kvm);
        return ret;
 }
 
index 5c9f9745e6cab8284161397c3d810df65304fae8..e6b03fd8c374ca7a4dcb1e272141504f66c697d6 100644 (file)
@@ -302,8 +302,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
        dist->ready = true;
 
 out:
-       if (ret)
-               kvm_vgic_destroy(kvm);
        return ret;
 }
 
index 52abac4bb6a2532ad55f4ec690530a6a7b593df0..6d2fcd6fcb2509e801c84b88e6c18e3e7a1d1fa0 100644 (file)
@@ -195,7 +195,7 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
        mutex_lock(&lock);
 
        list_for_each_entry(tmp, &consumers, node) {
-               if (tmp->token == consumer->token) {
+               if (tmp->token == consumer->token || tmp == consumer) {
                        mutex_unlock(&lock);
                        module_put(THIS_MODULE);
                        return -EBUSY;
@@ -245,7 +245,7 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
        mutex_lock(&lock);
 
        list_for_each_entry(tmp, &consumers, node) {
-               if (tmp->token != consumer->token)
+               if (tmp != consumer)
                        continue;
 
                list_for_each_entry(producer, &producers, node) {